From noreply at buildbot.pypy.org Sat Feb 1 03:01:52 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Sat, 1 Feb 2014 03:01:52 +0100 (CET) Subject: [pypy-commit] pypy default: move low-level stuff out of unaryop Message-ID: <20140201020152.3EC8F1C0153@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r69040:2ad0b1147d25 Date: 2014-02-01 02:01 +0000 http://bitbucket.org/pypy/pypy/changeset/2ad0b1147d25/ Log: move low-level stuff out of unaryop diff --git a/rpython/annotator/unaryop.py b/rpython/annotator/unaryop.py --- a/rpython/annotator/unaryop.py +++ b/rpython/annotator/unaryop.py @@ -4,7 +4,6 @@ from __future__ import absolute_import -from types import MethodType from rpython.flowspace.operation import op from rpython.annotator.model import (SomeObject, SomeInteger, SomeBool, SomeString, SomeChar, SomeList, SomeDict, SomeTuple, SomeImpossibleValue, @@ -757,63 +756,6 @@ # This should probably never happen raise AnnotatorError("Cannot call len on a pbc") -# annotation of low-level types -from rpython.rtyper.llannotation import ( - SomePtr, SomeLLADTMeth, ll_to_annotation, lltype_to_annotation, - annotation_to_lltype) - -class __extend__(SomePtr): - - def getattr(self, s_attr): - assert s_attr.is_constant(), "getattr on ptr %r with non-constant field-name" % self.ll_ptrtype - example = self.ll_ptrtype._example() - try: - v = example._lookup_adtmeth(s_attr.const) - except AttributeError: - v = getattr(example, s_attr.const) - return ll_to_annotation(v) - else: - if isinstance(v, MethodType): - from rpython.rtyper.lltypesystem import lltype - ll_ptrtype = lltype.typeOf(v.im_self) - assert isinstance(ll_ptrtype, (lltype.Ptr, lltype.InteriorPtr)) - return SomeLLADTMeth(ll_ptrtype, v.im_func) - return getbookkeeper().immutablevalue(v) - getattr.can_only_throw = [] - - def len(self): - length = self.ll_ptrtype._example()._fixedlength() - if length is None: - return SomeObject.len(self) - else: - return immutablevalue(length) - - def setattr(self, s_attr, s_value): # just doing checking - assert s_attr.is_constant(), "setattr on ptr %r with non-constant field-name" % self.ll_ptrtype - example = self.ll_ptrtype._example() - if getattr(example, s_attr.const) is not None: # ignore Void s_value - v_lltype = annotation_to_lltype(s_value) - setattr(example, s_attr.const, v_lltype._defl()) - - def call(self, args): - args_s, kwds_s = args.unpack() - if kwds_s: - raise Exception("keyword arguments to call to a low-level fn ptr") - info = 'argument to ll function pointer call' - llargs = [annotation_to_lltype(s_arg,info)._defl() for s_arg in args_s] - v = self.ll_ptrtype._example()(*llargs) - return ll_to_annotation(v) - - def bool(self): - return s_Bool - -class __extend__(SomeLLADTMeth): - - def call(self, args): - bookkeeper = getbookkeeper() - s_func = bookkeeper.immutablevalue(self.func) - return s_func.call(args.prepend(lltype_to_annotation(self.ll_ptrtype))) - #_________________________________________ # weakrefs diff --git a/rpython/rtyper/llannotation.py b/rpython/rtyper/llannotation.py --- a/rpython/rtyper/llannotation.py +++ b/rpython/rtyper/llannotation.py @@ -1,6 +1,7 @@ """ Code for annotating low-level thingies. """ +from types import MethodType from rpython.tool.pairtype import pair, pairtype from rpython.annotator.model import ( SomeObject, SomeSingleFloat, SomeFloat, SomeLongFloat, SomeChar, @@ -108,6 +109,54 @@ def can_be_none(self): return False + def getattr(self, s_attr): + from rpython.annotator.bookkeeper import getbookkeeper + if not s_attr.is_constant(): + raise AnnotatorError("getattr on ptr %r with non-constant " + "field-name" % self.ll_ptrtype) + example = self.ll_ptrtype._example() + try: + v = example._lookup_adtmeth(s_attr.const) + except AttributeError: + v = getattr(example, s_attr.const) + return ll_to_annotation(v) + else: + if isinstance(v, MethodType): + ll_ptrtype = lltype.typeOf(v.im_self) + assert isinstance(ll_ptrtype, (lltype.Ptr, lltype.InteriorPtr)) + return SomeLLADTMeth(ll_ptrtype, v.im_func) + return getbookkeeper().immutablevalue(v) + getattr.can_only_throw = [] + + def len(self): + from rpython.annotator.bookkeeper import getbookkeeper + length = self.ll_ptrtype._example()._fixedlength() + if length is None: + return SomeObject.len(self) + else: + return getbookkeeper().immutablevalue(length) + + def setattr(self, s_attr, s_value): # just doing checking + if not s_attr.is_constant(): + raise AnnotatorError("setattr on ptr %r with non-constant " + "field-name" % self.ll_ptrtype) + example = self.ll_ptrtype._example() + if getattr(example, s_attr.const) is not None: # ignore Void s_value + v_lltype = annotation_to_lltype(s_value) + setattr(example, s_attr.const, v_lltype._defl()) + + def call(self, args): + args_s, kwds_s = args.unpack() + if kwds_s: + raise Exception("keyword arguments to call to a low-level fn ptr") + info = 'argument to ll function pointer call' + llargs = [annotation_to_lltype(s_arg, info)._defl() for s_arg in args_s] + v = self.ll_ptrtype._example()(*llargs) + return ll_to_annotation(v) + + def bool(self): + return s_Bool + class SomeInteriorPtr(SomePtr): def __init__(self, ll_ptrtype): @@ -125,6 +174,13 @@ def can_be_none(self): return False + def call(self, args): + from rpython.annotator.bookkeeper import getbookkeeper + bookkeeper = getbookkeeper() + s_func = bookkeeper.immutablevalue(self.func) + return s_func.call(args.prepend(lltype_to_annotation(self.ll_ptrtype))) + + class __extend__(pairtype(SomePtr, SomePtr)): def union((p1, p2)): if p1.ll_ptrtype != p2.ll_ptrtype: From noreply at buildbot.pypy.org Sat Feb 1 18:28:38 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Sat, 1 Feb 2014 18:28:38 +0100 (CET) Subject: [pypy-commit] pypy NonConstant: Simply use not_const() to annotate NonConstant Message-ID: <20140201172838.48EC31C087E@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: NonConstant Changeset: r69041:d1fc613a6520 Date: 2014-02-01 17:27 +0000 http://bitbucket.org/pypy/pypy/changeset/d1fc613a6520/ Log: Simply use not_const() to annotate NonConstant diff --git a/rpython/rlib/nonconst.py b/rpython/rlib/nonconst.py --- a/rpython/rlib/nonconst.py +++ b/rpython/rlib/nonconst.py @@ -4,6 +4,7 @@ from rpython.rtyper.extregistry import ExtRegistryEntry from rpython.flowspace.model import Constant +from rpython.annotator.model import not_const class NonConstant(object): def __init__(self, _constant): @@ -33,11 +34,8 @@ class EntryNonConstant(ExtRegistryEntry): _about_ = NonConstant - def compute_result_annotation(self, arg): - if hasattr(arg, 'const'): - return self.bookkeeper.immutablevalue(arg.const, False) - else: - return arg + def compute_result_annotation(self, s_arg): + return not_const(s_arg) def specialize_call(self, hop): hop.exception_cannot_occur() From noreply at buildbot.pypy.org Sat Feb 1 18:56:40 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sat, 1 Feb 2014 18:56:40 +0100 (CET) Subject: [pypy-commit] pypy precompiled-headers: move Makefile tests Message-ID: <20140201175640.7D1D71C13DF@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: precompiled-headers Changeset: r69042:0b9dc910cbd5 Date: 2014-01-29 23:39 +0200 http://bitbucket.org/pypy/pypy/changeset/0b9dc910cbd5/ Log: move Makefile tests diff --git a/rpython/translator/platform/test/test_makefile.py b/rpython/translator/platform/test/test_makefile.py --- a/rpython/translator/platform/test/test_makefile.py +++ b/rpython/translator/platform/test/test_makefile.py @@ -1,7 +1,10 @@ from rpython.translator.platform.posix import GnuMakefile as Makefile +from rpython.translator.platform import host +from rpython.tool.udir import udir +from rpython.translator.tool.cbuild import ExternalCompilationInfo from StringIO import StringIO -import re +import re, sys def test_simple_makefile(): m = Makefile() @@ -29,3 +32,106 @@ val = s.getvalue() assert not re.search('CC += +xxx', val, re.M) assert re.search('CC += +yyy', val, re.M) + +class TestMakefile(object): + platform = host + strict_on_stderr = True + + def check_res(self, res, expected='42\n'): + assert res.out == expected + if self.strict_on_stderr: + assert res.err == '' + assert res.returncode == 0 + + def test_900_files(self): + txt = '#include \n' + for i in range(900): + txt += 'int func%03d();\n' % i + txt += 'int main() {\n int j=0;' + for i in range(900): + txt += ' j += func%03d();\n' % i + txt += ' printf("%d\\n", j);\n' + txt += ' return 0;};\n' + cfile = udir.join('test_900_files.c') + cfile.write(txt) + cfiles = [cfile] + for i in range(900): + cfile2 = udir.join('implement%03d.c' %i) + cfile2.write(''' + int func%03d() + { + return %d; + } + ''' % (i, i)) + cfiles.append(cfile2) + mk = self.platform.gen_makefile(cfiles, ExternalCompilationInfo(), path=udir) + mk.write() + self.platform.execute_makefile(mk) + res = self.platform.execute(udir.join('test_900_files')) + self.check_res(res, '%d\n' %sum(range(900))) + + def test_precompiled_headers(self): + import time + tmpdir = udir.join('precompiled_headers').ensure(dir=1) + # Create an eci that should not use precompiled headers + eci = ExternalCompilationInfo(include_dirs=[tmpdir]) + main_c = tmpdir.join('main_no_pch.c') + eci.separate_module_files = [main_c] + ncfiles = 10 + nprecompiled_headers = 20 + txt = '' + for i in range(ncfiles): + txt += "int func%03d();\n" % i + txt += "\nint main(int argc, char * argv[])\n" + txt += "{\n int i=0;\n" + for i in range(ncfiles): + txt += " i += func%03d();\n" % i + txt += ' printf("%d\\n", i);\n' + txt += " return 0;\n};\n" + main_c.write(txt) + # Create some large headers with dummy functions to be precompiled + cfiles_precompiled_headers = [] + for i in range(nprecompiled_headers): + pch_name =tmpdir.join('pcheader%03d.h' % i) + txt = '' + for j in range(3000): + txt += "int pcfunc%03d_%03d();\n" %(i, j) + pch_name.write(txt) + cfiles_precompiled_headers.append(pch_name) + # Create some cfiles with headers we want precompiled + cfiles = [] + for i in range(ncfiles): + c_name =tmpdir.join('implement%03d.c' % i) + txt = '' + for pch_name in cfiles_precompiled_headers: + txt += '#include "%s"\n' % pch_name + txt += "int func%03d(){ return %d;};\n" % (i, i) + c_name.write(txt) + cfiles.append(c_name) + mk = self.platform.gen_makefile(cfiles, eci, path=udir, + cfile_precompilation=cfiles_precompiled_headers) + if sys.platform == 'win32': + clean = ('clean', '', 'for %f in ( $(OBJECTS) $(TARGET) ) do @if exist %f del /f %f') + else: + clean = ('clean', '', 'rm -f $(OBJECTS) $(TARGET) ') + mk.rule(*clean) + mk.write() + t0 = time.clock() + self.platform.execute_makefile(mk) + t1 = time.clock() + t_precompiled = t1 - t0 + res = self.platform.execute(mk.exe_name) + self.check_res(res, '%d\n' %sum(range(ncfiles))) + self.platform.execute_makefile(mk, extra_opts=['clean']) + #Rewrite a non-precompiled header makefile + mk = self.platform.gen_makefile(cfiles, eci, path=udir) + mk.rule(*clean) + mk.write() + t0 = time.clock() + self.platform.execute_makefile(mk) + t1 = time.clock() + t_normal = t1 - t0 + print "precompiled haeder 'make' time %.2f, non-precompiled header time %.2f" %(t_precompiled, t_normal) + assert t_precompiled < t_normal * 0.5 + + diff --git a/rpython/translator/platform/test/test_platform.py b/rpython/translator/platform/test/test_platform.py --- a/rpython/translator/platform/test/test_platform.py +++ b/rpython/translator/platform/test/test_platform.py @@ -59,97 +59,6 @@ res = self.platform.execute(executable) self.check_res(res) - def test_900_files(self): - txt = '#include \n' - for i in range(900): - txt += 'int func%03d();\n' % i - txt += 'int main() {\n int j=0;' - for i in range(900): - txt += ' j += func%03d();\n' % i - txt += ' printf("%d\\n", j);\n' - txt += ' return 0;};\n' - cfile = udir.join('test_900_files.c') - cfile.write(txt) - cfiles = [cfile] - for i in range(900): - cfile2 = udir.join('implement%03d.c' %i) - cfile2.write(''' - int func%03d() - { - return %d; - } - ''' % (i, i)) - cfiles.append(cfile2) - mk = self.platform.gen_makefile(cfiles, ExternalCompilationInfo(), path=udir) - mk.write() - self.platform.execute_makefile(mk) - res = self.platform.execute(udir.join('test_900_files')) - self.check_res(res, '%d\n' %sum(range(900))) - - def test_precompiled_headers(self): - import time - tmpdir = udir.join('precompiled_headers').ensure(dir=1) - # Create an eci that should not use precompiled headers - eci = ExternalCompilationInfo(include_dirs=[tmpdir]) - main_c = tmpdir.join('main_no_pch.c') - eci.separate_module_files = [main_c] - ncfiles = 10 - nprecompiled_headers = 20 - txt = '' - for i in range(ncfiles): - txt += "int func%03d();\n" % i - txt += "\nint main(int argc, char * argv[])\n" - txt += "{\n int i=0;\n" - for i in range(ncfiles): - txt += " i += func%03d();\n" % i - txt += ' printf("%d\\n", i);\n' - txt += " return 0;\n};\n" - main_c.write(txt) - # Create some large headers with dummy functions to be precompiled - cfiles_precompiled_headers = [] - for i in range(nprecompiled_headers): - pch_name =tmpdir.join('pcheader%03d.h' % i) - txt = '' - for j in range(3000): - txt += "int pcfunc%03d_%03d();\n" %(i, j) - pch_name.write(txt) - cfiles_precompiled_headers.append(pch_name) - # Create some cfiles with headers we want precompiled - cfiles = [] - for i in range(ncfiles): - c_name =tmpdir.join('implement%03d.c' % i) - txt = '' - for pch_name in cfiles_precompiled_headers: - txt += '#include "%s"\n' % pch_name - txt += "int func%03d(){ return %d;};\n" % (i, i) - c_name.write(txt) - cfiles.append(c_name) - mk = self.platform.gen_makefile(cfiles, eci, path=udir, - cfile_precompilation=cfiles_precompiled_headers) - if sys.platform == 'win32': - clean = ('clean', '', 'for %f in ( $(OBJECTS) $(TARGET) ) do @if exist %f del /f %f') - else: - clean = ('clean', '', 'rm -f $(OBJECTS) $(TARGET) ') - mk.rule(*clean) - mk.write() - t0 = time.clock() - self.platform.execute_makefile(mk) - t1 = time.clock() - t_precompiled = t1 - t0 - res = self.platform.execute(mk.exe_name) - self.check_res(res, '%d\n' %sum(range(ncfiles))) - self.platform.execute_makefile(mk, extra_opts=['clean']) - #Rewrite a non-precompiled header makefile - mk = self.platform.gen_makefile(cfiles, eci, path=udir) - mk.rule(*clean) - mk.write() - t0 = time.clock() - self.platform.execute_makefile(mk) - t1 = time.clock() - t_normal = t1 - t0 - print "precompiled haeder 'make' time %.2f, non-precompiled header time %.2f" %(t_precompiled, t_normal) - assert t_precompiled < t_normal * 0.5 - def test_nice_errors(self): cfile = udir.join('test_nice_errors.c') cfile.write('') From noreply at buildbot.pypy.org Sat Feb 1 18:56:41 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sat, 1 Feb 2014 18:56:41 +0100 (CET) Subject: [pypy-commit] pypy precompiled-headers: implement precompiled header use for windows Message-ID: <20140201175641.963A31C13DF@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: precompiled-headers Changeset: r69043:d74d5f697984 Date: 2014-01-31 14:56 +0200 http://bitbucket.org/pypy/pypy/changeset/d74d5f697984/ Log: implement precompiled header use for windows diff --git a/rpython/translator/platform/test/test_makefile.py b/rpython/translator/platform/test/test_makefile.py --- a/rpython/translator/platform/test/test_makefile.py +++ b/rpython/translator/platform/test/test_makefile.py @@ -93,9 +93,10 @@ cfiles_precompiled_headers = [] for i in range(nprecompiled_headers): pch_name =tmpdir.join('pcheader%03d.h' % i) - txt = '' + txt = '#ifndef PCHEADER%03d_H\n#define PCHEADER%03d_H\n' %(i, i) for j in range(3000): txt += "int pcfunc%03d_%03d();\n" %(i, j) + txt += '#endif' pch_name.write(txt) cfiles_precompiled_headers.append(pch_name) # Create some cfiles with headers we want precompiled @@ -108,12 +109,22 @@ txt += "int func%03d(){ return %d;};\n" % (i, i) c_name.write(txt) cfiles.append(c_name) - mk = self.platform.gen_makefile(cfiles, eci, path=udir, - cfile_precompilation=cfiles_precompiled_headers) if sys.platform == 'win32': clean = ('clean', '', 'for %f in ( $(OBJECTS) $(TARGET) ) do @if exist %f del /f %f') else: clean = ('clean', '', 'rm -f $(OBJECTS) $(TARGET) ') + #write a non-precompiled header makefile + mk = self.platform.gen_makefile(cfiles, eci, path=tmpdir) + mk.rule(*clean) + mk.write() + t0 = time.clock() + self.platform.execute_makefile(mk) + t1 = time.clock() + t_normal = t1 - t0 + self.platform.execute_makefile(mk, extra_opts=['clean']) + # Write a super-duper makefile with precompiled headers + mk = self.platform.gen_makefile(cfiles, eci, path=tmpdir, + cfile_precompilation=cfiles_precompiled_headers,) mk.rule(*clean) mk.write() t0 = time.clock() @@ -122,15 +133,6 @@ t_precompiled = t1 - t0 res = self.platform.execute(mk.exe_name) self.check_res(res, '%d\n' %sum(range(ncfiles))) - self.platform.execute_makefile(mk, extra_opts=['clean']) - #Rewrite a non-precompiled header makefile - mk = self.platform.gen_makefile(cfiles, eci, path=udir) - mk.rule(*clean) - mk.write() - t0 = time.clock() - self.platform.execute_makefile(mk) - t1 = time.clock() - t_normal = t1 - t0 print "precompiled haeder 'make' time %.2f, non-precompiled header time %.2f" %(t_precompiled, t_normal) assert t_precompiled < t_normal * 0.5 diff --git a/rpython/translator/platform/windows.py b/rpython/translator/platform/windows.py --- a/rpython/translator/platform/windows.py +++ b/rpython/translator/platform/windows.py @@ -318,15 +318,38 @@ if self.x64: definitions.append(('_WIN64', '1')) + rules = [ + ('.asm.obj', '', '$(MASM) /nologo /Fo$@ /c $< $(INCLUDEDIRS)'), + ] + + if cfile_precompilation: + stdafx_h = path.join('stdafx.h') + txt = '#ifndef PYPY_STDAFX_H\n' + txt += '#define PYPY_STDAFX_H\n' + txt += '\n'.join(['#include "' + m.pathrel(c) + '"' for c in cfile_precompilation]) + txt += '\n#endif\n' + stdafx_h.write(txt) + stdafx_c = path.join('stdafx.c') + stdafx_c.write('#include "stdafx.h"\n') + definitions.append(('CREATE_PCH', '/Ycstdafx.h /Fpstdafx.pch /FIstdafx.h')) + definitions.append(('USE_PCH', '/Yustdafx.h /Fpstdafx.pch /FIstdafx.h')) + rules.append(('all', 'stdafx.pch $(DEFAULT_TARGET)', [])) + rules.append(('stdafx.pch', '', + '$(CC) stdafx.c /c /nologo $(CFLAGS) $(CFLAGSEXTRA) $(CREATE_PCH) $(INCLUDEDIRS)')) + rules.append(('.c.obj', '', + '$(CC) /nologo $(CFLAGS) $(CFLAGSEXTRA) $(USE_PCH) /Fo$@ /c $< $(INCLUDEDIRS)')) + + target_deps = 'stdafx.obj $(OBJECTS)' + else: + rules.append(('all', '$(DEFAULT_TARGET)', [])) + rules.append(('.c.obj', '', + '$(CC) /nologo $(CFLAGS) $(CFLAGSEXTRA) /Fo$@ /c $< $(INCLUDEDIRS)')) + target_deps = '$(OBJECTS)' + + for args in definitions: m.definition(*args) - rules = [ - ('all', '$(DEFAULT_TARGET)', []), - ('.c.obj', '', '$(CC) /nologo $(CFLAGS) $(CFLAGSEXTRA) /Fo$@ /c $< $(INCLUDEDIRS)'), - ('.asm.obj', '', '$(MASM) /nologo /Fo$@ /c $< $(INCLUDEDIRS)'), - ] - for rule in rules: m.rule(*rule) @@ -343,12 +366,12 @@ rel_ofiles[-1]) objects = ' @obj_names.rsp' if self.version < 80: - m.rule('$(TARGET)', '$(OBJECTS)', + m.rule('$(TARGET)', target_deps, create_obj_response_file + [\ '$(CC_LINK) /nologo $(LDFLAGS) $(LDFLAGSEXTRA)' + objects + ' /out:$@ $(LIBDIRS) $(LIBS)', ]) else: - m.rule('$(TARGET)', '$(OBJECTS)', + m.rule('$(TARGET)', target_deps, create_obj_response_file + [\ '$(CC_LINK) /nologo $(LDFLAGS) $(LDFLAGSEXTRA)' + objects + ' $(LINKFILES) /out:$@ $(LIBDIRS) $(LIBS) /MANIFEST /MANIFESTFILE:$*.manifest', 'mt.exe -nologo -manifest $*.manifest -outputresource:$@;1', From noreply at buildbot.pypy.org Sat Feb 1 18:56:42 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sat, 1 Feb 2014 18:56:42 +0100 (CET) Subject: [pypy-commit] pypy default: windows - prevent dreaded dialog box on failure tests Message-ID: <20140201175642.A7A151C13DF@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r69044:565d143e0975 Date: 2014-02-01 19:55 +0200 http://bitbucket.org/pypy/pypy/changeset/565d143e0975/ Log: windows - prevent dreaded dialog box on failure tests diff --git a/rpython/translator/c/test/test_standalone.py b/rpython/translator/c/test/test_standalone.py --- a/rpython/translator/c/test/test_standalone.py +++ b/rpython/translator/c/test/test_standalone.py @@ -16,6 +16,20 @@ from rpython.conftest import cdir from rpython.conftest import option +def setup_module(module): + if os.name == 'nt': + # Do not open dreaded dialog box on segfault + import ctypes + SEM_NOGPFAULTERRORBOX = 0x0002 # From MSDN + old_err_mode = ctypes.windll.kernel32.GetErrorMode() + new_err_mode = old_err_mode | SEM_NOGPFAULTERRORBOX + ctypes.windll.kernel32.SetErrorMode(new_err_mode) + module.old_err_mode = old_err_mode + +def teardown_module(module): + if os.name == 'nt': + import ctypes + ctypes.windll.kernel32.SetErrorMode(module.old_err_mode) class StandaloneTests(object): config = None From noreply at buildbot.pypy.org Sat Feb 1 21:10:32 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 1 Feb 2014 21:10:32 +0100 (CET) Subject: [pypy-commit] pypy default: unused import Message-ID: <20140201201032.9F9BC1D24FA@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r69045:186bbfd2aad0 Date: 2014-02-01 12:09 -0800 http://bitbucket.org/pypy/pypy/changeset/186bbfd2aad0/ Log: unused import diff --git a/rpython/rlib/rarithmetic.py b/rpython/rlib/rarithmetic.py --- a/rpython/rlib/rarithmetic.py +++ b/rpython/rlib/rarithmetic.py @@ -690,9 +690,8 @@ characters of 's'. Raises ParseStringError in case of error. Raises ParseStringOverflowError in case the result does not fit. """ - from rpython.rlib.rstring import NumberStringParser, \ - ParseStringOverflowError, \ - ParseStringError, strip_spaces + from rpython.rlib.rstring import ( + NumberStringParser, ParseStringOverflowError, strip_spaces) s = literal = strip_spaces(s) p = NumberStringParser(s, literal, base, 'int') base = p.base From noreply at buildbot.pypy.org Sat Feb 1 21:35:05 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Sat, 1 Feb 2014 21:35:05 +0100 (CET) Subject: [pypy-commit] pypy default: remove unnecessary import Message-ID: <20140201203506.00DEA1D248D@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r69046:180ede03fc14 Date: 2014-02-01 20:32 +0000 http://bitbucket.org/pypy/pypy/changeset/180ede03fc14/ Log: remove unnecessary import diff --git a/rpython/annotator/annrpython.py b/rpython/annotator/annrpython.py --- a/rpython/annotator/annrpython.py +++ b/rpython/annotator/annrpython.py @@ -24,7 +24,6 @@ def __init__(self, translator=None, policy=None, bookkeeper=None): import rpython.rtyper.extfuncregistry # has side effects - import rpython.rlib.nonconst # has side effects if translator is None: # interface for tests From noreply at buildbot.pypy.org Sun Feb 2 23:25:31 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sun, 2 Feb 2014 23:25:31 +0100 (CET) Subject: [pypy-commit] pypy default: describe invalid source strings at the pypy level vs rpython. simplifies py3k Message-ID: <20140202222531.C1B031D2482@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r69047:3c0908d6f8ad Date: 2014-02-02 14:08 -0800 http://bitbucket.org/pypy/pypy/changeset/3c0908d6f8ad/ Log: describe invalid source strings at the pypy level vs rpython. simplifies py3k and improves error messages diff --git a/pypy/module/_csv/interp_reader.py b/pypy/module/_csv/interp_reader.py --- a/pypy/module/_csv/interp_reader.py +++ b/pypy/module/_csv/interp_reader.py @@ -39,6 +39,7 @@ field_builder.append(c) def save_field(self, field_builder): + space = self.space field = field_builder.build() if self.numeric_field: from rpython.rlib.rstring import ParseStringError @@ -46,12 +47,12 @@ self.numeric_field = False try: ff = string_to_float(field) - except ParseStringError, e: - raise OperationError(self.space.w_ValueError, - self.space.wrap(e.msg)) - w_obj = self.space.wrap(ff) + except ParseStringError as e: + from pypy.objspace.std.inttype import wrap_parsestringerror + raise wrap_parsestringerror(space, e, space.wrap(field)) + w_obj = space.wrap(ff) else: - w_obj = self.space.wrap(field) + w_obj = space.wrap(field) self.fields_w.append(w_obj) def next_w(self): diff --git a/pypy/objspace/std/floattype.py b/pypy/objspace/std/floattype.py --- a/pypy/objspace/std/floattype.py +++ b/pypy/objspace/std/floattype.py @@ -34,20 +34,11 @@ value = space.float_w(w_obj) elif (space.isinstance_w(w_value, space.w_str) or space.isinstance_w(w_value, space.w_bytearray)): - strvalue = space.bufferstr_w(w_value) - try: - value = rfloat.string_to_float(strvalue) - except ParseStringError, e: - raise OperationError(space.w_ValueError, - space.wrap(e.msg)) + value = _string_to_float(space, w_value, space.bufferstr_w(w_value)) elif space.isinstance_w(w_value, space.w_unicode): from unicodeobject import unicode_to_decimal_w - strvalue = unicode_to_decimal_w(space, w_value) - try: - value = rfloat.string_to_float(strvalue) - except ParseStringError, e: - raise OperationError(space.w_ValueError, - space.wrap(e.msg)) + value = _string_to_float(space, w_value, + unicode_to_decimal_w(space, w_value)) else: value = space.float_w(w_x) w_obj = space.allocate_instance(W_FloatObject, w_floattype) @@ -55,6 +46,14 @@ return w_obj +def _string_to_float(space, w_source, string): + try: + return rfloat.string_to_float(string) + except ParseStringError as e: + from pypy.objspace.std.inttype import wrap_parsestringerror + raise wrap_parsestringerror(space, e, w_source) + + def detect_floatformat(): from rpython.rtyper.lltypesystem import rffi, lltype buf = lltype.malloc(rffi.CCHARP.TO, 8, flavor='raw') diff --git a/pypy/objspace/std/inttype.py b/pypy/objspace/std/inttype.py --- a/pypy/objspace/std/inttype.py +++ b/pypy/objspace/std/inttype.py @@ -9,7 +9,8 @@ from rpython.rlib.rarithmetic import r_uint, string_to_int from rpython.rlib.objectmodel import instantiate from rpython.rlib.rbigint import rbigint -from rpython.rlib.rstring import ParseStringError, ParseStringOverflowError +from rpython.rlib.rstring import ( + InvalidBaseError, ParseStringError, ParseStringOverflowError) from rpython.rlib import jit # ____________________________________________________________ @@ -63,27 +64,33 @@ # ____________________________________________________________ @jit.elidable -def string_to_int_or_long(space, string, base=10): +def string_to_int_or_long(space, w_source, string, base=10): w_longval = None value = 0 try: value = string_to_int(string, base) - except ParseStringError, e: - raise OperationError(space.w_ValueError, - space.wrap(e.msg)) + except ParseStringError as e: + raise wrap_parsestringerror(space, e, w_source) except ParseStringOverflowError, e: - w_longval = retry_to_w_long(space, e.parser) + w_longval = retry_to_w_long(space, e.parser, w_source) return value, w_longval -def retry_to_w_long(space, parser): +def retry_to_w_long(space, parser, w_source): parser.rewind() try: bigint = rbigint._from_numberstring_parser(parser) - except ParseStringError, e: - raise OperationError(space.w_ValueError, - space.wrap(e.msg)) + except ParseStringError as e: + raise wrap_parsestringerror(space, e, w_source) return space.newlong_from_rbigint(bigint) +def wrap_parsestringerror(space, e, w_source): + if isinstance(e, InvalidBaseError): + w_msg = space.wrap(e.msg) + else: + w_msg = space.wrap('%s: %s' % (e.msg, + space.str_w(space.repr(w_source)))) + return OperationError(space.w_ValueError, w_msg) + @unwrap_spec(w_x = WrappedDefault(0)) def descr__new__(space, w_inttype, w_x, w_base=None): from pypy.objspace.std.intobject import W_IntObject @@ -110,11 +117,12 @@ # an overflowing long value = space.int_w(w_obj) elif space.isinstance_w(w_value, space.w_str): - value, w_longval = string_to_int_or_long(space, space.str_w(w_value)) + value, w_longval = string_to_int_or_long(space, w_value, + space.str_w(w_value)) elif space.isinstance_w(w_value, space.w_unicode): from pypy.objspace.std.unicodeobject import unicode_to_decimal_w string = unicode_to_decimal_w(space, w_value) - value, w_longval = string_to_int_or_long(space, string) + value, w_longval = string_to_int_or_long(space, w_value, string) else: # If object supports the buffer interface try: @@ -127,7 +135,8 @@ w_value) else: buf = space.interp_w(Buffer, w_buffer) - value, w_longval = string_to_int_or_long(space, buf.as_str()) + value, w_longval = string_to_int_or_long(space, w_value, + buf.as_str()) else: base = space.int_w(w_base) @@ -142,7 +151,7 @@ space.wrap("int() can't convert non-string " "with explicit base")) - value, w_longval = string_to_int_or_long(space, s, base) + value, w_longval = string_to_int_or_long(space, w_value, s, base) if w_longval is not None: if not space.is_w(w_inttype, space.w_int): diff --git a/pypy/objspace/std/longtype.py b/pypy/objspace/std/longtype.py --- a/pypy/objspace/std/longtype.py +++ b/pypy/objspace/std/longtype.py @@ -41,10 +41,11 @@ w_obj = space.int(w_obj) return newbigint(space, w_longtype, space.bigint_w(w_obj)) elif space.isinstance_w(w_value, space.w_str): - return string_to_w_long(space, w_longtype, space.str_w(w_value)) + return string_to_w_long(space, w_longtype, w_value, + space.str_w(w_value)) elif space.isinstance_w(w_value, space.w_unicode): from pypy.objspace.std.unicodeobject import unicode_to_decimal_w - return string_to_w_long(space, w_longtype, + return string_to_w_long(space, w_longtype, w_value, unicode_to_decimal_w(space, w_value)) else: try: @@ -57,7 +58,8 @@ w_value) else: buf = space.interp_w(Buffer, w_buffer) - return string_to_w_long(space, w_longtype, buf.as_str()) + return string_to_w_long(space, w_longtype, w_value, + buf.as_str()) else: base = space.int_w(w_base) @@ -71,15 +73,15 @@ raise OperationError(space.w_TypeError, space.wrap("long() can't convert non-string " "with explicit base")) - return string_to_w_long(space, w_longtype, s, base) + return string_to_w_long(space, w_longtype, w_value, s, base) -def string_to_w_long(space, w_longtype, s, base=10): +def string_to_w_long(space, w_longtype, w_source, string, base=10): try: - bigint = rbigint.fromstr(s, base) - except ParseStringError, e: - raise OperationError(space.w_ValueError, - space.wrap(e.msg)) + bigint = rbigint.fromstr(string, base) + except ParseStringError as e: + from pypy.objspace.std.inttype import wrap_parsestringerror + raise wrap_parsestringerror(space, e, w_source) return newbigint(space, w_longtype, bigint) string_to_w_long._dont_inline_ = True diff --git a/pypy/objspace/std/test/test_intobject.py b/pypy/objspace/std/test/test_intobject.py --- a/pypy/objspace/std/test/test_intobject.py +++ b/pypy/objspace/std/test/test_intobject.py @@ -1,3 +1,4 @@ +# encoding: utf-8 import py import sys from pypy.objspace.std import intobject as iobj @@ -517,6 +518,18 @@ assert str(e.value) == ( "int() argument must be a string or a number, not 'list'") + def test_invalid_literal_message(self): + import sys + if '__pypy__' not in sys.builtin_module_names: + skip('PyPy 2.x/CPython 3.4 only') + for value in b' 1j ', u' 1٢٣٤j ': + try: + int(value) + except ValueError as e: + assert repr(value) in str(e) + else: + assert False, value + class AppTestIntOptimizedAdd(AppTestInt): spaceconfig = {"objspace.std.optimized_int_add": True} diff --git a/rpython/rlib/rfloat.py b/rpython/rlib/rfloat.py --- a/rpython/rlib/rfloat.py +++ b/rpython/rlib/rfloat.py @@ -25,6 +25,8 @@ globals().update(rffi_platform.configure(CConfig)) +INVALID_MSG = "invalid literal for float()" + def string_to_float(s): """ Conversion of string to float. @@ -36,10 +38,8 @@ from rpython.rlib.rstring import strip_spaces, ParseStringError s = strip_spaces(s) - if not s: - raise ParseStringError("empty string for float()") - + raise ParseStringError(INVALID_MSG) low = s.lower() if low == "-inf" or low == "-infinity": @@ -56,7 +56,7 @@ try: return rstring_to_float(s) except ValueError: - raise ParseStringError("invalid literal for float(): '%s'" % s) + raise ParseStringError(INVALID_MSG) def rstring_to_float(s): from rpython.rlib.rdtoa import strtod diff --git a/rpython/rlib/rstring.py b/rpython/rlib/rstring.py --- a/rpython/rlib/rstring.py +++ b/rpython/rlib/rstring.py @@ -278,6 +278,9 @@ def __init__(self, msg): self.msg = msg +class InvalidBaseError(ParseStringError): + """Signals an invalid base argument""" + class ParseStringOverflowError(Exception): def __init__(self, parser): self.parser = parser @@ -286,11 +289,10 @@ class NumberStringParser: def error(self): - raise ParseStringError("invalid literal for %s() with base %d: '%s'" % - (self.fname, self.original_base, self.literal)) + raise ParseStringError("invalid literal for %s() with base %d" % + (self.fname, self.original_base)) def __init__(self, s, literal, base, fname): - self.literal = literal self.fname = fname sign = 1 if s.startswith('-'): @@ -311,7 +313,7 @@ else: base = 10 elif base < 2 or base > 36: - raise ParseStringError, "%s() base must be >= 2 and <= 36" % (fname,) + raise InvalidBaseError("%s() base must be >= 2 and <= 36" % fname) self.base = base if base == 16 and (s.startswith('0x') or s.startswith('0X')): From noreply at buildbot.pypy.org Mon Feb 3 05:19:39 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Mon, 3 Feb 2014 05:19:39 +0100 (CET) Subject: [pypy-commit] pypy remove-intlong-smm: merge default Message-ID: <20140203041939.30F2B1C1178@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: remove-intlong-smm Changeset: r69048:0e80c730535a Date: 2014-02-02 14:51 -0800 http://bitbucket.org/pypy/pypy/changeset/0e80c730535a/ Log: merge default diff too long, truncating to 2000 out of 23372 lines diff --git a/lib-python/2.7/test/test_memoryview.py b/lib-python/2.7/test/test_memoryview.py --- a/lib-python/2.7/test/test_memoryview.py +++ b/lib-python/2.7/test/test_memoryview.py @@ -166,11 +166,18 @@ self.assertTrue(m[0:6] == m[:]) self.assertFalse(m[0:5] == m) - # Comparison with objects which don't support the buffer API - self.assertFalse(m == u"abcdef") - self.assertTrue(m != u"abcdef") - self.assertFalse(u"abcdef" == m) - self.assertTrue(u"abcdef" != m) + if test_support.check_impl_detail(cpython=True): + # what is supported and what is not supported by memoryview is + # very inconsisten on CPython. In PyPy, memoryview supports + # the buffer interface, and thus the following comparison + # succeeds. See also the comment in + # pypy.modules.__builtin__.interp_memoryview.W_MemoryView.descr_buffer + # + # Comparison with objects which don't support the buffer API + self.assertFalse(m == u"abcdef", "%s %s" % (self, tp)) + self.assertTrue(m != u"abcdef") + self.assertFalse(u"abcdef" == m) + self.assertTrue(u"abcdef" != m) # Unordered comparisons are unimplemented, and therefore give # arbitrary results (they raise a TypeError in py3k) diff --git a/lib-python/2.7/test/test_ssl.py b/lib-python/2.7/test/test_ssl.py --- a/lib-python/2.7/test/test_ssl.py +++ b/lib-python/2.7/test/test_ssl.py @@ -993,7 +993,7 @@ try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True) try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_OPTIONAL) try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_REQUIRED) - try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, True) + try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False) try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv3, False) try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLSv1, False) diff --git a/lib-python/conftest.py b/lib-python/conftest.py --- a/lib-python/conftest.py +++ b/lib-python/conftest.py @@ -109,7 +109,7 @@ RegrTest('test_asynchat.py', usemodules='select fcntl'), RegrTest('test_asyncore.py', usemodules='select fcntl'), RegrTest('test_atexit.py', core=True), - RegrTest('test_audioop.py', skip="unsupported extension module"), + RegrTest('test_audioop.py', skip="incomplete module"), RegrTest('test_augassign.py', core=True), RegrTest('test_base64.py', usemodules='struct'), RegrTest('test_bastion.py'), diff --git a/lib_pypy/_ctypes/array.py b/lib_pypy/_ctypes/array.py --- a/lib_pypy/_ctypes/array.py +++ b/lib_pypy/_ctypes/array.py @@ -1,4 +1,4 @@ -import _ffi +from _rawffi import alt as _ffi import _rawffi from _ctypes.basics import _CData, cdata_from_address, _CDataMeta, sizeof @@ -20,10 +20,13 @@ # we don't want to have buffers here if len(val) > self._length_: raise ValueError("%r too long" % (val,)) - for i in range(len(val)): - self[i] = val[i] + if isinstance(val, str): + _rawffi.rawstring2charp(self._buffer.buffer, val) + else: + for i in range(len(val)): + self[i] = val[i] if len(val) < self._length_: - self[len(val)] = '\x00' + self._buffer[len(val)] = '\x00' res.value = property(getvalue, setvalue) def getraw(self): @@ -33,8 +36,7 @@ def setraw(self, buffer): if len(buffer) > self._length_: raise ValueError("%r too long" % (buffer,)) - for i in range(len(buffer)): - self[i] = buffer[i] + _rawffi.rawstring2charp(self._buffer.buffer, buffer) res.raw = property(getraw, setraw) elif subletter == 'u': def getvalue(self): @@ -45,10 +47,14 @@ # we don't want to have buffers here if len(val) > self._length_: raise ValueError("%r too long" % (val,)) + if isinstance(val, unicode): + target = self._buffer + else: + target = self for i in range(len(val)): - self[i] = val[i] + target[i] = val[i] if len(val) < self._length_: - self[len(val)] = '\x00' + target[len(val)] = u'\x00' res.value = property(getvalue, setvalue) if '_length_' in typedict: diff --git a/lib_pypy/_ctypes/basics.py b/lib_pypy/_ctypes/basics.py --- a/lib_pypy/_ctypes/basics.py +++ b/lib_pypy/_ctypes/basics.py @@ -1,6 +1,6 @@ import _rawffi -import _ffi +from _rawffi import alt as _ffi import sys try: from __pypy__ import builtinify diff --git a/lib_pypy/_ctypes/function.py b/lib_pypy/_ctypes/function.py --- a/lib_pypy/_ctypes/function.py +++ b/lib_pypy/_ctypes/function.py @@ -5,7 +5,7 @@ from _ctypes.basics import is_struct_shape from _ctypes.builtin import get_errno, set_errno, get_last_error, set_last_error import _rawffi -import _ffi +from _rawffi import alt as _ffi import sys import traceback diff --git a/lib_pypy/_ctypes/pointer.py b/lib_pypy/_ctypes/pointer.py --- a/lib_pypy/_ctypes/pointer.py +++ b/lib_pypy/_ctypes/pointer.py @@ -1,6 +1,6 @@ import _rawffi -import _ffi +from _rawffi import alt as _ffi from _ctypes.basics import _CData, _CDataMeta, cdata_from_address, ArgumentError from _ctypes.basics import keepalive_key, store_reference, ensure_objects from _ctypes.basics import sizeof, byref, as_ffi_pointer diff --git a/lib_pypy/_ctypes/primitive.py b/lib_pypy/_ctypes/primitive.py --- a/lib_pypy/_ctypes/primitive.py +++ b/lib_pypy/_ctypes/primitive.py @@ -1,4 +1,4 @@ -import _ffi +from _rawffi import alt as _ffi import _rawffi import weakref import sys diff --git a/lib_pypy/_ctypes/structure.py b/lib_pypy/_ctypes/structure.py --- a/lib_pypy/_ctypes/structure.py +++ b/lib_pypy/_ctypes/structure.py @@ -2,6 +2,8 @@ import _rawffi from _ctypes.basics import _CData, _CDataMeta, keepalive_key,\ store_reference, ensure_objects, CArgObject +from _ctypes.array import Array +from _ctypes.pointer import _Pointer import inspect def names_and_fields(self, _fields_, superclass, anonymous_fields=None): @@ -104,8 +106,11 @@ def __set__(self, obj, value): fieldtype = self.ctype cobj = fieldtype.from_param(value) - if ensure_objects(cobj) is not None: - key = keepalive_key(self.num) + key = keepalive_key(self.num) + if issubclass(fieldtype, _Pointer) and isinstance(cobj, Array): + # if our value is an Array we need the whole thing alive + store_reference(obj, key, cobj) + elif ensure_objects(cobj) is not None: store_reference(obj, key, cobj._objects) arg = cobj._get_buffer_value() if fieldtype._fficompositesize is not None: diff --git a/lib_pypy/_ffi.py b/lib_pypy/_ffi.py new file mode 100644 --- /dev/null +++ b/lib_pypy/_ffi.py @@ -0,0 +1,2 @@ +# Backward compatibility hack +from _rawffi.alt import * diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -330,6 +330,14 @@ # SQLite version information sqlite_version = str(_ffi.string(_lib.sqlite3_libversion()).decode('ascii')) +_STMT_TYPE_UPDATE = 0 +_STMT_TYPE_DELETE = 1 +_STMT_TYPE_INSERT = 2 +_STMT_TYPE_REPLACE = 3 +_STMT_TYPE_OTHER = 4 +_STMT_TYPE_SELECT = 5 +_STMT_TYPE_INVALID = 6 + class Error(StandardError): pass @@ -992,13 +1000,18 @@ self.__statement = self.__connection._statement_cache.get(sql) if self.__connection._isolation_level is not None: - if self.__statement._type in ("UPDATE", "DELETE", "INSERT", "REPLACE"): + if self.__statement._type in ( + _STMT_TYPE_UPDATE, + _STMT_TYPE_DELETE, + _STMT_TYPE_INSERT, + _STMT_TYPE_REPLACE + ): if not self.__connection._in_transaction: self.__connection._begin() - elif self.__statement._type == "OTHER": + elif self.__statement._type == _STMT_TYPE_OTHER: if self.__connection._in_transaction: self.__connection.commit() - elif self.__statement._type == "SELECT": + elif self.__statement._type == _STMT_TYPE_SELECT: if multiple: raise ProgrammingError("You cannot execute SELECT " "statements in executemany().") @@ -1021,12 +1034,17 @@ self.__statement._reset() raise self.__connection._get_exception(ret) - if self.__statement._type in ("UPDATE", "DELETE", "INSERT", "REPLACE"): + if self.__statement._type in ( + _STMT_TYPE_UPDATE, + _STMT_TYPE_DELETE, + _STMT_TYPE_INSERT, + _STMT_TYPE_REPLACE + ): if self.__rowcount == -1: self.__rowcount = 0 self.__rowcount += _lib.sqlite3_changes(self.__connection._db) - if not multiple and self.__statement._type == "INSERT": + if not multiple and self.__statement._type == _STMT_TYPE_INSERT: self.__lastrowid = _lib.sqlite3_last_insert_rowid(self.__connection._db) else: self.__lastrowid = None @@ -1176,11 +1194,19 @@ first_word = sql.lstrip().split(" ")[0].upper() if first_word == "": - self._type = "INVALID" - elif first_word in ("SELECT", "INSERT", "UPDATE", "DELETE", "REPLACE"): - self._type = first_word + self._type = _STMT_TYPE_INVALID + elif first_word == "SELECT": + self._type = _STMT_TYPE_SELECT + elif first_word == "INSERT": + self._type = _STMT_TYPE_INSERT + elif first_word == "UPDATE": + self._type = _STMT_TYPE_UPDATE + elif first_word == "DELETE": + self._type = _STMT_TYPE_DELETE + elif first_word == "REPLACE": + self._type = _STMT_TYPE_REPLACE else: - self._type = "OTHER" + self._type = _STMT_TYPE_OTHER if isinstance(sql, unicode): sql = sql.encode('utf-8') @@ -1193,7 +1219,7 @@ if ret == _lib.SQLITE_OK and not self._statement: # an empty statement, work around that, as it's the least trouble - self._type = "SELECT" + self._type = _STMT_TYPE_SELECT c_sql = _ffi.new("char[]", b"select 42") ret = _lib.sqlite3_prepare_v2(self.__con._db, c_sql, -1, statement_star, next_char) @@ -1312,7 +1338,12 @@ raise ValueError("parameters are of unsupported type") def _get_description(self): - if self._type in ("INSERT", "UPDATE", "DELETE", "REPLACE"): + if self._type in ( + _STMT_TYPE_INSERT, + _STMT_TYPE_UPDATE, + _STMT_TYPE_DELETE, + _STMT_TYPE_REPLACE + ): return None desc = [] for i in xrange(_lib.sqlite3_column_count(self._statement)): diff --git a/lib_pypy/audioop.py b/lib_pypy/audioop.py new file mode 100644 --- /dev/null +++ b/lib_pypy/audioop.py @@ -0,0 +1,29 @@ + +import struct + + +class error(Exception): + pass + + +def _check_size(size): + if size != 1 and size != 2 and size != 4: + raise error("Size should be 1, 2 or 4") + + +def _check_params(length, size): + _check_size(size) + if length % size != 0: + raise error("not a whole number of frames") + + +def getsample(cp, size, i): + _check_params(len(cp), size) + if not (0 <= i < len(cp) / size): + raise error("Index out of range") + if size == 1: + return struct.unpack_from("B", buffer(cp)[i:])[0] + elif size == 2: + return struct.unpack_from("H", buffer(cp)[i * 2:])[0] + elif size == 4: + return struct.unpack_from("I", buffer(cp)[i * 4:])[0] diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -34,14 +34,14 @@ "struct", "_hashlib", "_md5", "_sha", "_minimal_curses", "cStringIO", "thread", "itertools", "pyexpat", "_ssl", "cpyext", "array", "binascii", "_multiprocessing", '_warnings', - "_collections", "_multibytecodec", "micronumpy", "_ffi", + "_collections", "_multibytecodec", "micronumpy", "_continuation", "_cffi_backend", "_csv", "cppyy", "_pypyjson"] )) translation_modules = default_modules.copy() translation_modules.update(dict.fromkeys( ["fcntl", "rctime", "select", "signal", "_rawffi", "zlib", - "struct", "_md5", "cStringIO", "array", "_ffi", + "struct", "_md5", "cStringIO", "array", "binascii", # the following are needed for pyrepl (and hence for the # interactive prompt/pdb) @@ -96,7 +96,6 @@ # no _rawffi if importing rpython.rlib.clibffi raises ImportError # or CompilationError or py.test.skip.Exception "_rawffi" : ["rpython.rlib.clibffi"], - "_ffi" : ["rpython.rlib.clibffi"], "zlib" : ["rpython.rlib.rzlib"], "bz2" : ["pypy.module.bz2.interp_bz2"], diff --git a/pypy/doc/_ref.txt b/pypy/doc/_ref.txt --- a/pypy/doc/_ref.txt +++ b/pypy/doc/_ref.txt @@ -109,6 +109,4 @@ .. _`rpython/translator/c/`: https://bitbucket.org/pypy/pypy/src/default/rpython/translator/c/ .. _`rpython/translator/c/src/stacklet/`: https://bitbucket.org/pypy/pypy/src/default/rpython/translator/c/src/stacklet/ .. _`rpython/translator/c/src/stacklet/stacklet.h`: https://bitbucket.org/pypy/pypy/src/default/rpython/translator/c/src/stacklet/stacklet.h -.. _`rpython/translator/cli/`: https://bitbucket.org/pypy/pypy/src/default/rpython/translator/cli/ -.. _`rpython/translator/jvm/`: https://bitbucket.org/pypy/pypy/src/default/rpython/translator/jvm/ .. _`rpython/translator/tool/`: https://bitbucket.org/pypy/pypy/src/default/rpython/translator/tool/ diff --git a/pypy/doc/config/translation.lldebug0.txt b/pypy/doc/config/translation.lldebug0.txt new file mode 100644 --- /dev/null +++ b/pypy/doc/config/translation.lldebug0.txt @@ -0,0 +1,1 @@ +Like lldebug, but in addition compile C files with -O0 diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -426,25 +426,12 @@ Could we use LLVM? ------------------ -In theory yes. But we tried to use it 5 or 6 times already, as a -translation backend or as a JIT backend --- and failed each time. +There is a (static) translation backend using LLVM in the branch +``llvm-translation-backend``. It can translate PyPy with or without the JIT on +Linux. -In more details: using LLVM as a (static) translation backend is -pointless nowadays because you can generate C code and compile it with -clang. (Note that compiling PyPy with clang gives a result that is not -faster than compiling it with gcc.) We might in theory get extra -benefits from LLVM's GC integration, but this requires more work on the -LLVM side before it would be remotely useful. Anyway, it could be -interfaced via a custom primitive in the C code. - -On the other hand, using LLVM as our JIT backend looks interesting as -well --- but again we made an attempt, and it failed: LLVM has no way to -patch the generated machine code. - -So the position of the core PyPy developers is that if anyone wants to -make an N+1'th attempt with LLVM, they are welcome, and will be happy to -provide help in the IRC channel, but they are left with the burden of proof -that (a) it works and (b) it gives important benefits. +Using LLVM as our JIT backend looks interesting as well -- we made an attempt, +but it failed: LLVM has no way to patch the generated machine code. ---------------------- How do I compile PyPy? diff --git a/pypy/doc/garbage_collection.rst b/pypy/doc/garbage_collection.rst --- a/pypy/doc/garbage_collection.rst +++ b/pypy/doc/garbage_collection.rst @@ -210,4 +210,12 @@ are preserved. If the object dies then the pre-reserved location becomes free garbage, to be collected at the next major collection. +The exact name of this GC is either `minimark` or `incminimark`. The +latter is a version that does major collections incrementally (i.e. one +major collection is split along some number of minor collections, rather +than being done all at once after a specific minor collection). The +default is `incminimark`, as it seems to have a very minimal impact on +performance and memory usage at the benefit of avoiding the long pauses +of `minimark`. + .. include:: _ref.txt diff --git a/pypy/doc/gc_info.rst b/pypy/doc/gc_info.rst --- a/pypy/doc/gc_info.rst +++ b/pypy/doc/gc_info.rst @@ -6,7 +6,7 @@ Minimark -------- -PyPy's default ``minimark`` garbage collector is configurable through +PyPy's default ``incminimark`` garbage collector is configurable through several environment variables: ``PYPY_GC_NURSERY`` @@ -14,6 +14,17 @@ Defaults to 1/2 of your cache or ``4M``. Small values (like 1 or 1KB) are useful for debugging. +``PYPY_GC_NURSERY_CLEANUP`` + The interval at which nursery is cleaned up. Must + be smaller than the nursery size and bigger than the + biggest object we can allotate in the nursery. + +``PYPY_GC_INCREMENT_STEP`` + The size of memory marked during the marking step. Default is size of + nursery times 2. If you mark it too high your GC is not incremental at + all. The minimum is set to size that survives minor collection times + 1.5 so we reclaim anything all the time. + ``PYPY_GC_MAJOR_COLLECT`` Major collection memory factor. Default is ``1.82``, which means trigger a major collection when the diff --git a/pypy/doc/project-ideas.rst b/pypy/doc/project-ideas.rst --- a/pypy/doc/project-ideas.rst +++ b/pypy/doc/project-ideas.rst @@ -74,6 +74,10 @@ The actual details would be rather differen in PyPy, but we would like to have the same optimization implemented. +Or maybe not. We can also play around with the idea of using a single +representation: as a byte string in utf-8. (This idea needs some extra logic +for efficient indexing, like a cache.) + .. _`optimized unicode representation`: http://www.python.org/dev/peps/pep-0393/ Translation Toolchain diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -41,3 +41,19 @@ Fix 3 broken links on PyPy published papers in docs. .. branch: jit-ordereddict + +.. branch: refactor-str-types +Remove multimethods on str/unicode/bytearray and make the implementations share code. + +.. branch: remove-del-from-generatoriterator +Speed up generators that don't yield inside try or wait blocks by skipping +unnecessary cleanup. + +.. branch: annotator +Remove FlowObjSpace. +Improve cohesion between rpython.flowspace and rpython.annotator. + +.. branch: detect-immutable-fields +mapdicts keep track of whether or not an attribute is every assigned to +multiple times. If it's only assigned once then an elidable lookup is used when +possible. diff --git a/pypy/goal/getnightly.py b/pypy/goal/getnightly.py --- a/pypy/goal/getnightly.py +++ b/pypy/goal/getnightly.py @@ -26,7 +26,12 @@ if branch == 'default': branch = 'trunk' -filename = 'pypy-c-jit-latest-%s.tar.bz2' % arch +if '--nojit' in sys.argv: + kind = 'nojit' +else: + kind = 'jit' + +filename = 'pypy-c-%s-latest-%s.tar.bz2' % (kind, arch) url = 'http://buildbot.pypy.org/nightly/%s/%s' % (branch, filename) tmp = py.path.local.mkdtemp() mydir = tmp.chdir() diff --git a/pypy/interpreter/astcompiler/codegen.py b/pypy/interpreter/astcompiler/codegen.py --- a/pypy/interpreter/astcompiler/codegen.py +++ b/pypy/interpreter/astcompiler/codegen.py @@ -1234,6 +1234,8 @@ flags |= consts.CO_NESTED if scope.is_generator: flags |= consts.CO_GENERATOR + if scope.has_yield_inside_try: + flags |= consts.CO_YIELD_INSIDE_TRY if scope.has_variable_arg: flags |= consts.CO_VARARGS if scope.has_keywords_arg: diff --git a/pypy/interpreter/astcompiler/consts.py b/pypy/interpreter/astcompiler/consts.py --- a/pypy/interpreter/astcompiler/consts.py +++ b/pypy/interpreter/astcompiler/consts.py @@ -17,6 +17,7 @@ CO_FUTURE_UNICODE_LITERALS = 0x20000 #pypy specific: CO_KILL_DOCSTRING = 0x100000 +CO_YIELD_INSIDE_TRY = 0x200000 PyCF_SOURCE_IS_UTF8 = 0x0100 PyCF_DONT_IMPLY_DEDENT = 0x0200 diff --git a/pypy/interpreter/astcompiler/symtable.py b/pypy/interpreter/astcompiler/symtable.py --- a/pypy/interpreter/astcompiler/symtable.py +++ b/pypy/interpreter/astcompiler/symtable.py @@ -43,6 +43,7 @@ self.child_has_free = False self.nested = False self.doc_removable = False + self._in_try_body_depth = 0 def lookup(self, name): """Find the scope of identifier 'name'.""" @@ -75,6 +76,14 @@ self.varnames.append(mangled) return mangled + def note_try_start(self, try_node): + """Called when a try is found, before visiting the body.""" + self._in_try_body_depth += 1 + + def note_try_end(self, try_node): + """Called after visiting a try body.""" + self._in_try_body_depth -= 1 + def note_yield(self, yield_node): """Called when a yield is found.""" raise SyntaxError("'yield' outside function", yield_node.lineno, @@ -210,6 +219,7 @@ self.has_variable_arg = False self.has_keywords_arg = False self.is_generator = False + self.has_yield_inside_try = False self.optimized = True self.return_with_value = False self.import_star = None @@ -220,6 +230,8 @@ raise SyntaxError("'return' with argument inside generator", self.ret.lineno, self.ret.col_offset) self.is_generator = True + if self._in_try_body_depth > 0: + self.has_yield_inside_try = True def note_return(self, ret): if ret.value: @@ -463,7 +475,12 @@ self.scope.new_temporary_name() if wih.optional_vars: self.scope.new_temporary_name() - ast.GenericASTVisitor.visit_With(self, wih) + wih.context_expr.walkabout(self) + if wih.optional_vars: + wih.optional_vars.walkabout(self) + self.scope.note_try_start(wih) + self.visit_sequence(wih.body) + self.scope.note_try_end(wih) def visit_arguments(self, arguments): scope = self.scope @@ -505,3 +522,16 @@ else: role = SYM_ASSIGNED self.note_symbol(name.id, role) + + def visit_TryExcept(self, node): + self.scope.note_try_start(node) + self.visit_sequence(node.body) + self.scope.note_try_end(node) + self.visit_sequence(node.handlers) + self.visit_sequence(node.orelse) + + def visit_TryFinally(self, node): + self.scope.note_try_start(node) + self.visit_sequence(node.body) + self.scope.note_try_end(node) + self.visit_sequence(node.finalbody) diff --git a/pypy/interpreter/astcompiler/test/test_compiler.py b/pypy/interpreter/astcompiler/test/test_compiler.py --- a/pypy/interpreter/astcompiler/test/test_compiler.py +++ b/pypy/interpreter/astcompiler/test/test_compiler.py @@ -1,4 +1,4 @@ -import py +import py, sys from pypy.interpreter.astcompiler import codegen, astbuilder, symtable, optimize from pypy.interpreter.pyparser import pyparse from pypy.interpreter.pyparser.test import expressions @@ -867,6 +867,9 @@ class AppTestCompiler: + def setup_class(cls): + cls.w_maxunicode = cls.space.wrap(sys.maxunicode) + def test_docstring_not_loaded(self): import StringIO, dis, sys ns = {} @@ -911,7 +914,17 @@ l = [a for a in Foo()] assert hint_called[0] assert l == list(range(5)) - + + def test_unicode_in_source(self): + import sys + d = {} + exec '# -*- coding: utf-8 -*-\n\nu = u"\xf0\x9f\x92\x8b"' in d + if sys.maxunicode > 65535 and self.maxunicode > 65535: + expected_length = 1 + else: + expected_length = 2 + assert len(d['u']) == expected_length + class TestOptimizations: def count_instructions(self, source): diff --git a/pypy/interpreter/astcompiler/test/test_symtable.py b/pypy/interpreter/astcompiler/test/test_symtable.py --- a/pypy/interpreter/astcompiler/test/test_symtable.py +++ b/pypy/interpreter/astcompiler/test/test_symtable.py @@ -346,6 +346,25 @@ assert exc.msg == "'return' with argument inside generator" scp = self.func_scope("def f():\n return\n yield x") + def test_yield_inside_try(self): + scp = self.func_scope("def f(): yield x") + assert not scp.has_yield_inside_try + scp = self.func_scope("def f():\n try:\n yield x\n except: pass") + assert scp.has_yield_inside_try + scp = self.func_scope("def f():\n try:\n yield x\n finally: pass") + assert scp.has_yield_inside_try + scp = self.func_scope("def f():\n with x: yield y") + assert scp.has_yield_inside_try + + def test_yield_outside_try(self): + for input in ("try: pass\n except: pass", + "try: pass\n except: yield y", + "try: pass\n finally: pass", + "try: pass\n finally: yield y", + "with x: pass"): + input = "def f():\n yield y\n %s\n yield y" % (input,) + assert not self.func_scope(input).has_yield_inside_try + def test_return(self): for input in ("class x: return", "return"): exc = py.test.raises(SyntaxError, self.func_scope, input).value diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -231,6 +231,10 @@ msg = "__int__ returned non-int (type '%T')" raise operationerrfmt(space.w_TypeError, msg, w_result) + def ord(self, space): + msg = "ord() expected string of length 1, but %T found" + raise operationerrfmt(space.w_TypeError, msg, self) + def __spacebind__(self, space): return self @@ -905,7 +909,7 @@ """ return self.unpackiterable(w_iterable, expected_length) - def listview_str(self, w_list): + def listview_bytes(self, w_list): """ Return a list of unwrapped strings out of a list of strings. If the argument is not a list or does not contain only strings, return None. May return None anyway. @@ -939,7 +943,7 @@ """ return (None, None) - def newlist_str(self, list_s): + def newlist_bytes(self, list_s): return self.newlist([self.wrap(s) for s in list_s]) def newlist_unicode(self, list_u): @@ -1396,6 +1400,9 @@ # This is here mostly just for gateway.int_unwrapping_space_method(). return bool(self.int_w(w_obj)) + def ord(self, w_obj): + return w_obj.ord(self) + # This is all interface for gateway.py. def gateway_int_w(self, w_obj): if self.isinstance_w(w_obj, self.w_float): diff --git a/pypy/interpreter/error.py b/pypy/interpreter/error.py --- a/pypy/interpreter/error.py +++ b/pypy/interpreter/error.py @@ -6,7 +6,7 @@ from errno import EINTR from rpython.rlib import jit -from rpython.rlib.objectmodel import we_are_translated +from rpython.rlib.objectmodel import we_are_translated, specialize from pypy.interpreter import debug @@ -40,12 +40,11 @@ self.debug_excs = [] def clear(self, space): - # for sys.exc_clear() - self.w_type = space.w_None - self._w_value = space.w_None - self._application_traceback = None - if not we_are_translated(): - del self.debug_excs[:] + # XXX remove this method. The point is that we cannot always + # hack at 'self' to clear w_type and _w_value, because in some + # corner cases the OperationError will be used again: see + # test_interpreter.py:test_with_statement_and_sys_clear. + pass def match(self, space, w_check_class): "Check if this application-level exception matches 'w_check_class'." @@ -300,6 +299,10 @@ """ self._application_traceback = traceback + at specialize.memo() +def get_cleared_operation_error(space): + return OperationError(space.w_None, space.w_None) + # ____________________________________________________________ # optimization only: avoid the slowest operation -- the string # formatting with '%' -- in the common case were we don't @@ -371,8 +374,8 @@ class OpErrFmtNoArgs(OperationError): def __init__(self, w_type, value): + self._value = value self.setup(w_type) - self._value = value def get_w_value(self, space): w_value = self._w_value diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py --- a/pypy/interpreter/executioncontext.py +++ b/pypy/interpreter/executioncontext.py @@ -1,5 +1,5 @@ import sys -from pypy.interpreter.error import OperationError +from pypy.interpreter.error import OperationError, get_cleared_operation_error from rpython.rlib.unroll import unrolling_iterable from rpython.rlib import jit @@ -217,6 +217,17 @@ if frame: # else, the exception goes nowhere and is lost frame.last_exception = operror + def clear_sys_exc_info(self): + # Find the frame out of which sys_exc_info() would return its result, + # and hack this frame's last_exception to become the cleared + # OperationError (which is different from None!). + frame = self.gettopframe_nohidden() + while frame: + if frame.last_exception is not None: + frame.last_exception = get_cleared_operation_error(self.space) + break + frame = self.getnextframe_nohidden(frame) + @jit.dont_look_inside def settrace(self, w_func): """Set the global trace function.""" diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py --- a/pypy/interpreter/gateway.py +++ b/pypy/interpreter/gateway.py @@ -520,12 +520,13 @@ # When a BuiltinCode is stored in a Function object, # you get the functionality of CPython's built-in function type. - def __init__(self, func, unwrap_spec=None, self_type=None, descrmismatch=None): + def __init__(self, func, unwrap_spec=None, self_type=None, + descrmismatch=None, doc=None): "NOT_RPYTHON" # 'implfunc' is the interpreter-level function. # Note that this uses a lot of (construction-time) introspection. Code.__init__(self, func.__name__) - self.docstring = func.__doc__ + self.docstring = doc or func.__doc__ self.identifier = "%s-%s-%s" % (func.__module__, func.__name__, getattr(self_type, '__name__', '*')) @@ -832,7 +833,7 @@ instancecache = {} def __new__(cls, f, app_name=None, unwrap_spec=None, descrmismatch=None, - as_classmethod=False): + as_classmethod=False, doc=None): "NOT_RPYTHON" # f must be a function whose name does NOT start with 'app_' @@ -861,7 +862,8 @@ cls.instancecache[key] = self self._code = BuiltinCode(f, unwrap_spec=unwrap_spec, self_type=self_type, - descrmismatch=descrmismatch) + descrmismatch=descrmismatch, + doc=doc) self.__name__ = f.func_name self.name = app_name self.as_classmethod = as_classmethod diff --git a/pypy/interpreter/generator.py b/pypy/interpreter/generator.py --- a/pypy/interpreter/generator.py +++ b/pypy/interpreter/generator.py @@ -155,20 +155,6 @@ code_name = self.pycode.co_name return space.wrap(code_name) - def __del__(self): - # Only bother enqueuing self to raise an exception if the frame is - # still not finished and finally or except blocks are present. - self.clear_all_weakrefs() - if self.frame is not None: - block = self.frame.lastblock - while block is not None: - if not isinstance(block, LoopBlock): - self.enqueue_for_destruction(self.space, - GeneratorIterator.descr_close, - "interrupting generator of ") - break - block = block.previous - # Results can be either an RPython list of W_Root, or it can be an # app-level W_ListObject, which also has an append() method, that's why we # generate 2 versions of the function and 2 jit drivers. @@ -211,3 +197,20 @@ return unpack_into unpack_into = _create_unpack_into() unpack_into_w = _create_unpack_into() + + +class GeneratorIteratorWithDel(GeneratorIterator): + + def __del__(self): + # Only bother enqueuing self to raise an exception if the frame is + # still not finished and finally or except blocks are present. + self.clear_all_weakrefs() + if self.frame is not None: + block = self.frame.lastblock + while block is not None: + if not isinstance(block, LoopBlock): + self.enqueue_for_destruction(self.space, + GeneratorIterator.descr_close, + "interrupting generator of ") + break + block = block.previous diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py --- a/pypy/interpreter/pycode.py +++ b/pypy/interpreter/pycode.py @@ -12,7 +12,7 @@ from pypy.interpreter.gateway import unwrap_spec from pypy.interpreter.astcompiler.consts import ( CO_OPTIMIZED, CO_NEWLOCALS, CO_VARARGS, CO_VARKEYWORDS, CO_NESTED, - CO_GENERATOR, CO_KILL_DOCSTRING) + CO_GENERATOR, CO_KILL_DOCSTRING, CO_YIELD_INSIDE_TRY) from pypy.tool.stdlib_opcode import opcodedesc, HAVE_ARGUMENT from rpython.rlib.rarithmetic import intmask from rpython.rlib.objectmodel import compute_hash @@ -31,7 +31,7 @@ # Magic numbers for the bytecode version in code objects. # See comments in pypy/module/imp/importing. cpython_magic, = struct.unpack("<195><164>" may become "\u005c\U000000E4" (16 bytes) + while ps < end: + if s[ps] == '\\': + lis.append(s[ps]) + ps += 1 + if ord(s[ps]) & 0x80: + # A multibyte sequence will follow, it will be + # escaped like \u1234. To avoid confusion with + # the backslash we just wrote, we emit "\u005c" + # instead. + lis.append("u005c") + if ord(s[ps]) & 0x80: # XXX inefficient + w, ps = decode_utf8(space, s, ps, end, "utf-32-be") + rn = len(w) + assert rn % 4 == 0 + for i in range(0, rn, 4): + lis.append('\\U') + lis.append(hexbyte(ord(w[i]))) + lis.append(hexbyte(ord(w[i+1]))) + lis.append(hexbyte(ord(w[i+2]))) + lis.append(hexbyte(ord(w[i+3]))) + else: + lis.append(s[ps]) + ps += 1 + return ''.join(lis) + def PyString_DecodeEscape(space, s, recode_encoding): """ Unescape a backslash-escaped string. If recode_encoding is non-zero, diff --git a/pypy/interpreter/pyparser/test/test_parsestring.py b/pypy/interpreter/pyparser/test/test_parsestring.py --- a/pypy/interpreter/pyparser/test/test_parsestring.py +++ b/pypy/interpreter/pyparser/test/test_parsestring.py @@ -1,10 +1,10 @@ from pypy.interpreter.pyparser import parsestring -import py +import py, sys class TestParsetring: - def parse_and_compare(self, literal, value): + def parse_and_compare(self, literal, value, encoding=None): space = self.space - w_ret = parsestring.parsestr(space, None, literal) + w_ret = parsestring.parsestr(space, encoding, literal) if isinstance(value, str): assert space.type(w_ret) == space.w_str assert space.str_w(w_ret) == value @@ -91,3 +91,18 @@ input = ["'", 'x', ' ', chr(0xc3), chr(0xa9), ' ', chr(92), 'n', "'"] w_ret = parsestring.parsestr(space, 'utf8', ''.join(input)) assert space.str_w(w_ret) == ''.join(expected) + + def test_wide_unicode_in_source(self): + if sys.maxunicode == 65535: + py.test.skip("requires a wide-unicode host") + self.parse_and_compare('u"\xf0\x9f\x92\x8b"', + unichr(0x1f48b), + encoding='utf-8') + + def test_decode_unicode_utf8(self): + buf = parsestring.decode_unicode_utf8(self.space, + 'u"\xf0\x9f\x92\x8b"', 2, 6) + if sys.maxunicode == 65535: + assert buf == r"\U0000d83d\U0000dc8b" + else: + assert buf == r"\U0001f48b" diff --git a/pypy/interpreter/test/test_gateway.py b/pypy/interpreter/test/test_gateway.py --- a/pypy/interpreter/test/test_gateway.py +++ b/pypy/interpreter/test/test_gateway.py @@ -708,6 +708,18 @@ never_called py.test.raises(AssertionError, space.wrap, gateway.interp2app_temp(g)) + def test_interp2app_doc(self): + space = self.space + def f(space, w_x): + """foo""" + w_f = space.wrap(gateway.interp2app_temp(f)) + assert space.unwrap(space.getattr(w_f, space.wrap('__doc__'))) == 'foo' + # + def g(space, w_x): + never_called + w_g = space.wrap(gateway.interp2app_temp(g, doc='bar')) + assert space.unwrap(space.getattr(w_g, space.wrap('__doc__'))) == 'bar' + class AppTestPyTestMark: @py.test.mark.unlikely_to_exist diff --git a/pypy/interpreter/test/test_interpreter.py b/pypy/interpreter/test/test_interpreter.py --- a/pypy/interpreter/test/test_interpreter.py +++ b/pypy/interpreter/test/test_interpreter.py @@ -311,3 +311,73 @@ assert str(e) == "maximum recursion depth exceeded" else: assert 0, "should have raised!" + + def test_with_statement_and_sys_clear(self): + import sys + class CM(object): + def __enter__(self): + return self + def __exit__(self, exc_type, exc_value, tb): + sys.exc_clear() + try: + with CM(): + 1 / 0 + raise AssertionError("should not be reached") + except ZeroDivisionError: + pass + + def test_sys_clear_while_handling_exception(self): + import sys + def f(): + try: + some_missing_name + except NameError: + g() + assert sys.exc_info()[0] is NameError + def g(): + assert sys.exc_info()[0] is NameError + try: + 1 / 0 + except ZeroDivisionError: + assert sys.exc_info()[0] is ZeroDivisionError + sys.exc_clear() + assert sys.exc_info()[0] is None + h() + assert sys.exc_info()[0] is None + def h(): + assert sys.exc_info()[0] is None + f() + + def test_sys_clear_while_handling_exception_nested(self): + import sys + def f(): + try: + some_missing_name + except NameError: + g() + assert sys.exc_info()[0] is NameError + def g(): + assert sys.exc_info()[0] is NameError + try: + 1 / 0 + except ZeroDivisionError: + assert sys.exc_info()[0] is ZeroDivisionError + h1() + assert sys.exc_info()[0] is None + h() + assert sys.exc_info()[0] is None + def h(): + assert sys.exc_info()[0] is None + def h1(): + sys.exc_clear() + f() + + def test_sys_clear_reraise(self): + import sys + def f(): + try: + 1 / 0 + except ZeroDivisionError: + sys.exc_clear() + raise + raises(TypeError, f) diff --git a/pypy/interpreter/unicodehelper.py b/pypy/interpreter/unicodehelper.py --- a/pypy/interpreter/unicodehelper.py +++ b/pypy/interpreter/unicodehelper.py @@ -30,7 +30,7 @@ # ____________________________________________________________ def encode(space, w_data, encoding=None, errors='strict'): - from pypy.objspace.std.unicodetype import encode_object + from pypy.objspace.std.unicodeobject import encode_object return encode_object(space, w_data, encoding, errors) # These functions take and return unwrapped rpython strings and unicodes diff --git a/pypy/module/__builtin__/interp_memoryview.py b/pypy/module/__builtin__/interp_memoryview.py --- a/pypy/module/__builtin__/interp_memoryview.py +++ b/pypy/module/__builtin__/interp_memoryview.py @@ -68,10 +68,14 @@ return W_MemoryView(buf) def descr_buffer(self, space): - """Note that memoryview() objects in PyPy support buffer(), whereas - not in CPython; but CPython supports passing memoryview() to most - built-in functions that accept buffers, with the notable exception - of the buffer() built-in.""" + """ + Note that memoryview() is very inconsistent in CPython: it does not + support the buffer interface but does support the new buffer + interface: as a result, it is possible to pass memoryview to + e.g. socket.send() but not to file.write(). For simplicity and + consistency, in PyPy memoryview DOES support buffer(), which means + that it is accepted in more places than CPython. + """ return space.wrap(self.buf) def descr_tobytes(self, space): diff --git a/pypy/module/__builtin__/test/test_classobj.py b/pypy/module/__builtin__/test/test_classobj.py --- a/pypy/module/__builtin__/test/test_classobj.py +++ b/pypy/module/__builtin__/test/test_classobj.py @@ -1061,14 +1061,14 @@ assert (D() >= A()) == 'D:A.ge' -class AppTestOldStyleClassStrDict(object): +class AppTestOldStyleClassBytesDict(object): def setup_class(cls): if cls.runappdirect: py.test.skip("can only be run on py.py") def is_strdict(space, w_class): - from pypy.objspace.std.dictmultiobject import StringDictStrategy + from pypy.objspace.std.dictmultiobject import BytesDictStrategy w_d = w_class.getdict(space) - return space.wrap(isinstance(w_d.strategy, StringDictStrategy)) + return space.wrap(isinstance(w_d.strategy, BytesDictStrategy)) cls.w_is_strdict = cls.space.wrap(gateway.interp2app(is_strdict)) diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py --- a/pypy/module/__pypy__/interp_magic.py +++ b/pypy/module/__pypy__/interp_magic.py @@ -3,7 +3,7 @@ from rpython.rlib.objectmodel import we_are_translated from pypy.objspace.std.listobject import W_ListObject from pypy.objspace.std.typeobject import MethodCache -from pypy.objspace.std.mapdict import IndexCache +from pypy.objspace.std.mapdict import MapAttrCache from rpython.rlib import rposix, rgc @@ -35,7 +35,7 @@ cache.misses = {} cache.hits = {} if space.config.objspace.std.withmapdict: - cache = space.fromcache(IndexCache) + cache = space.fromcache(MapAttrCache) cache.misses = {} cache.hits = {} @@ -45,7 +45,7 @@ in the mapdict cache with the given attribute name.""" assert space.config.objspace.std.withmethodcachecounter assert space.config.objspace.std.withmapdict - cache = space.fromcache(IndexCache) + cache = space.fromcache(MapAttrCache) return space.newtuple([space.newint(cache.hits.get(name, 0)), space.newint(cache.misses.get(name, 0))]) diff --git a/pypy/module/__pypy__/test/test_special.py b/pypy/module/__pypy__/test/test_special.py --- a/pypy/module/__pypy__/test/test_special.py +++ b/pypy/module/__pypy__/test/test_special.py @@ -51,7 +51,9 @@ l = [1, 2, 3] assert list_strategy(l) == "int" l = ["a", "b", "c"] - assert list_strategy(l) == "str" + assert list_strategy(l) == "bytes" + l = [u"a", u"b", u"c"] + assert list_strategy(l) == "unicode" l = [1.1, 2.2, 3.3] assert list_strategy(l) == "float" l = range(3) diff --git a/pypy/module/_cffi_backend/cdataobj.py b/pypy/module/_cffi_backend/cdataobj.py --- a/pypy/module/_cffi_backend/cdataobj.py +++ b/pypy/module/_cffi_backend/cdataobj.py @@ -5,7 +5,7 @@ from pypy.interpreter.gateway import interp2app from pypy.interpreter.typedef import TypeDef, make_weakref_descr -from rpython.rlib import objectmodel, rgc +from rpython.rlib import rgc from rpython.rlib.objectmodel import keepalive_until_here, specialize from rpython.rtyper.lltypesystem import lltype, rffi from rpython.tool.sourcetools import func_with_new_name diff --git a/pypy/module/_cffi_backend/ctypestruct.py b/pypy/module/_cffi_backend/ctypestruct.py --- a/pypy/module/_cffi_backend/ctypestruct.py +++ b/pypy/module/_cffi_backend/ctypestruct.py @@ -33,7 +33,7 @@ if self.fields_dict is None: space = self.space raise operationerrfmt(w_errorcls or space.w_TypeError, - "'%s' is not completed yet", self.name) + "'%s' is opaque or not completed yet", self.name) def _alignof(self): self.check_complete(w_errorcls=self.space.w_ValueError) diff --git a/pypy/module/_cffi_backend/handle.py b/pypy/module/_cffi_backend/handle.py --- a/pypy/module/_cffi_backend/handle.py +++ b/pypy/module/_cffi_backend/handle.py @@ -2,58 +2,13 @@ from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.gateway import unwrap_spec from pypy.module._cffi_backend import ctypeobj, ctypeptr, cdataobj -from pypy.module._weakref.interp__weakref import dead_ref from rpython.rtyper.lltypesystem import lltype, rffi +from rpython.rlib import rweaklist -def reduced_value(s): - while True: - divide = s & 1 - s >>= 1 - if not divide: - return s - -# ____________________________________________________________ - - -class CffiHandles: +class CffiHandles(rweaklist.RWeakListMixin): def __init__(self, space): - self.handles = [] - self.look_distance = 0 - - def reserve_next_handle_index(self): - # The reservation ordering done here is tweaked for pypy's - # memory allocator. We look from index 'look_distance'. - # Look_distance increases from 0. But we also look at - # "look_distance/2" or "/4" or "/8", etc. If we find that one - # of these secondary locations is free, we assume it's because - # there was recently a minor collection; so we reset - # look_distance to 0 and start again from the lowest locations. - length = len(self.handles) - for d in range(self.look_distance, length): - if self.handles[d]() is None: - self.look_distance = d + 1 - return d - s = reduced_value(d) - if self.handles[s]() is None: - break - # restart from the beginning - for d in range(0, length): - if self.handles[d]() is None: - self.look_distance = d + 1 - return d - # full! extend, but don't use '+=' here - self.handles = self.handles + [dead_ref] * (length // 3 + 5) - self.look_distance = length + 1 - return length - - def store_handle(self, index, content): - self.handles[index] = weakref.ref(content) - - def fetch_handle(self, index): - if 0 <= index < len(self.handles): - return self.handles[index]() - return None + self.initialize() def get(space): return space.fromcache(CffiHandles) diff --git a/pypy/module/_cffi_backend/newtype.py b/pypy/module/_cffi_backend/newtype.py --- a/pypy/module/_cffi_backend/newtype.py +++ b/pypy/module/_cffi_backend/newtype.py @@ -118,6 +118,7 @@ SF_MSVC_BITFIELDS = 1 SF_GCC_ARM_BITFIELDS = 2 SF_GCC_BIG_ENDIAN = 4 +SF_PACKED = 8 if sys.platform == 'win32': DEFAULT_SFLAGS = SF_MSVC_BITFIELDS @@ -190,8 +191,8 @@ boffset = 0 # reset each field at offset 0 # # update the total alignment requirement, but skip it if the - # field is an anonymous bitfield - falign = ftype.alignof() + # field is an anonymous bitfield or if SF_PACKED + falign = 1 if sflags & SF_PACKED else ftype.alignof() do_align = True if (sflags & SF_GCC_ARM_BITFIELDS) == 0 and fbitsize >= 0: if (sflags & SF_MSVC_BITFIELDS) == 0: @@ -305,6 +306,12 @@ if bits_already_occupied + fbitsize > 8 * ftype.size: # it would not fit, we need to start at the next # allowed position + if ((sflags & SF_PACKED) != 0 and + (bits_already_occupied & 7) != 0): + raise operationerrfmt(space.w_NotImplementedError, + "with 'packed', gcc would compile field " + "'%s.%s' to reuse some bits in the previous " + "field", w_ctype.name, fname) field_offset_bytes += falign assert boffset < field_offset_bytes * 8 boffset = field_offset_bytes * 8 diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -3137,6 +3137,44 @@ p = newp(BArray, None) assert sizeof(p[2:9]) == 7 * sizeof(BInt) +def test_packed(): + BLong = new_primitive_type("long") + BChar = new_primitive_type("char") + BShort = new_primitive_type("short") + BStruct = new_struct_type("struct foo") + complete_struct_or_union(BStruct, [('a1', BLong, -1), + ('a2', BChar, -1), + ('a3', BShort, -1)], + None, -1, -1, 8) # SF_PACKED==8 + d = BStruct.fields + assert len(d) == 3 + assert d[0][0] == 'a1' + assert d[0][1].type is BLong + assert d[0][1].offset == 0 + assert d[0][1].bitshift == -1 + assert d[0][1].bitsize == -1 + assert d[1][0] == 'a2' + assert d[1][1].type is BChar + assert d[1][1].offset == sizeof(BLong) + assert d[1][1].bitshift == -1 + assert d[1][1].bitsize == -1 + assert d[2][0] == 'a3' + assert d[2][1].type is BShort + assert d[2][1].offset == sizeof(BLong) + sizeof(BChar) + assert d[2][1].bitshift == -1 + assert d[2][1].bitsize == -1 + assert sizeof(BStruct) == sizeof(BLong) + sizeof(BChar) + sizeof(BShort) + assert alignof(BStruct) == 1 + +def test_packed_with_bitfields(): + BLong = new_primitive_type("long") + BChar = new_primitive_type("char") + BStruct = new_struct_type("struct foo") + py.test.raises(NotImplementedError, + complete_struct_or_union, + BStruct, [('a1', BLong, 30), + ('a2', BChar, 5)], + None, -1, -1, 8) # SF_PACKED==8 def test_version(): # this test is here mostly for PyPy diff --git a/pypy/module/_cffi_backend/test/test_handle.py b/pypy/module/_cffi_backend/test/test_handle.py --- a/pypy/module/_cffi_backend/test/test_handle.py +++ b/pypy/module/_cffi_backend/test/test_handle.py @@ -1,20 +1,5 @@ import random -from pypy.module._cffi_backend.handle import CffiHandles, reduced_value - - -def test_reduced_value(): - assert reduced_value(0) == 0 - assert reduced_value(1) == 0 - assert reduced_value(2) == 1 - assert reduced_value(3) == 0 - assert reduced_value(4) == 2 - assert reduced_value(5) == 1 - assert reduced_value(6) == 3 - assert reduced_value(7) == 0 - assert reduced_value(8) == 4 - assert reduced_value(9) == 2 - assert reduced_value(10) == 5 - assert reduced_value(11) == 1 +from pypy.module._cffi_backend.handle import CffiHandles class PseudoWeakRef(object): diff --git a/pypy/module/_codecs/interp_codecs.py b/pypy/module/_codecs/interp_codecs.py --- a/pypy/module/_codecs/interp_codecs.py +++ b/pypy/module/_codecs/interp_codecs.py @@ -709,7 +709,7 @@ @unwrap_spec(data=str, errors='str_or_None') def escape_encode(space, data, errors='strict'): - from pypy.objspace.std.stringobject import string_escape_encode + from pypy.objspace.std.bytesobject import string_escape_encode result = string_escape_encode(data, quote="'") start = 1 end = len(result) - 1 diff --git a/pypy/module/_csv/interp_reader.py b/pypy/module/_csv/interp_reader.py --- a/pypy/module/_csv/interp_reader.py +++ b/pypy/module/_csv/interp_reader.py @@ -39,6 +39,7 @@ field_builder.append(c) def save_field(self, field_builder): + space = self.space field = field_builder.build() if self.numeric_field: from rpython.rlib.rstring import ParseStringError @@ -46,12 +47,12 @@ self.numeric_field = False try: ff = string_to_float(field) - except ParseStringError, e: - raise OperationError(self.space.w_ValueError, - self.space.wrap(e.msg)) - w_obj = self.space.wrap(ff) + except ParseStringError as e: + from pypy.objspace.std.intobject import wrap_parsestringerror + raise wrap_parsestringerror(space, e, space.wrap(field)) + w_obj = space.wrap(ff) else: - w_obj = self.space.wrap(field) + w_obj = space.wrap(field) self.fields_w.append(w_obj) def next_w(self): diff --git a/pypy/module/_io/interp_bytesio.py b/pypy/module/_io/interp_bytesio.py --- a/pypy/module/_io/interp_bytesio.py +++ b/pypy/module/_io/interp_bytesio.py @@ -11,7 +11,7 @@ class W_BytesIO(RStringIO, W_BufferedIOBase): def __init__(self, space): - W_BufferedIOBase.__init__(self, space) + W_BufferedIOBase.__init__(self, space, add_to_autoflusher=False) self.init() def descr_init(self, space, w_initial_bytes=None): diff --git a/pypy/module/_io/interp_iobase.py b/pypy/module/_io/interp_iobase.py --- a/pypy/module/_io/interp_iobase.py +++ b/pypy/module/_io/interp_iobase.py @@ -5,7 +5,7 @@ from pypy.interpreter.gateway import interp2app from pypy.interpreter.error import OperationError, operationerrfmt from rpython.rlib.rstring import StringBuilder -from rpython.rlib import rweakref +from rpython.rlib import rweakref, rweaklist DEFAULT_BUFFER_SIZE = 8192 @@ -44,15 +44,15 @@ class W_IOBase(W_Root): - def __init__(self, space): + def __init__(self, space, add_to_autoflusher=True): # XXX: IOBase thinks it has to maintain its own internal state in # `__IOBase_closed` and call flush() by itself, but it is redundant # with whatever behaviour a non-trivial derived class will implement. self.space = space self.w_dict = space.newdict() self.__IOBase_closed = False - self.streamholder = None # needed by AutoFlusher - get_autoflusher(space).add(self) + if add_to_autoflusher: + get_autoflusher(space).add(self) def getdict(self, space): return self.w_dict @@ -114,7 +114,6 @@ space.call_method(self, "flush") finally: self.__IOBase_closed = True - get_autoflusher(space).remove(self) def flush_w(self, space): if self._CLOSED(): @@ -338,55 +337,35 @@ # functions to make sure that all streams are flushed on exit # ------------------------------------------------------------ -class StreamHolder(object): - def __init__(self, w_iobase): - self.w_iobase_ref = rweakref.ref(w_iobase) - w_iobase.autoflusher = self - def autoflush(self, space): - w_iobase = self.w_iobase_ref() - if w_iobase is not None: - try: - space.call_method(w_iobase, 'flush') - except OperationError: - # Silencing all errors is bad, but getting randomly - # interrupted here is equally as bad, and potentially - # more frequent (because of shutdown issues). - pass - - -class AutoFlusher(object): +class AutoFlusher(rweaklist.RWeakListMixin): def __init__(self, space): - self.streams = {} + self.initialize() def add(self, w_iobase): - assert w_iobase.streamholder is None if rweakref.has_weakref_support(): - holder = StreamHolder(w_iobase) - w_iobase.streamholder = holder - self.streams[holder] = None + self.add_handle(w_iobase) #else: # no support for weakrefs, so ignore and we # will not get autoflushing - def remove(self, w_iobase): - holder = w_iobase.streamholder - if holder is not None: - try: - del self.streams[holder] - except KeyError: - # this can happen in daemon threads - pass - def flush_all(self, space): - while self.streams: - for streamholder in self.streams.keys(): + while True: + handles = self.get_all_handles() + if len(handles) == 0: + break + self.initialize() # reset the state here + for wr in handles: + w_iobase = wr() + if w_iobase is None: + continue try: - del self.streams[streamholder] - except KeyError: - pass # key was removed in the meantime - else: - streamholder.autoflush(space) + space.call_method(w_iobase, 'flush') + except OperationError: + # Silencing all errors is bad, but getting randomly + # interrupted here is equally as bad, and potentially + # more frequent (because of shutdown issues). + pass def get_autoflusher(space): return space.fromcache(AutoFlusher) diff --git a/pypy/module/_multiprocessing/test/test_memory.py b/pypy/module/_multiprocessing/test/test_memory.py --- a/pypy/module/_multiprocessing/test/test_memory.py +++ b/pypy/module/_multiprocessing/test/test_memory.py @@ -1,6 +1,6 @@ class AppTestMemory: spaceconfig = dict(usemodules=('_multiprocessing', 'mmap', - '_rawffi', '_ffi', 'itertools')) + '_rawffi', 'itertools')) def test_address_of(self): import _multiprocessing diff --git a/pypy/module/_pickle_support/maker.py b/pypy/module/_pickle_support/maker.py --- a/pypy/module/_pickle_support/maker.py +++ b/pypy/module/_pickle_support/maker.py @@ -5,7 +5,7 @@ from pypy.interpreter.module import Module from pypy.interpreter.pyframe import PyFrame from pypy.interpreter.pytraceback import PyTraceback -from pypy.interpreter.generator import GeneratorIterator +from pypy.interpreter.generator import GeneratorIteratorWithDel from rpython.rlib.objectmodel import instantiate from pypy.interpreter.gateway import unwrap_spec from pypy.objspace.std.iterobject import W_SeqIterObject, W_ReverseSeqIterObject @@ -60,7 +60,7 @@ return space.wrap(tb) def generator_new(space): - new_generator = instantiate(GeneratorIterator) + new_generator = instantiate(GeneratorIteratorWithDel) return space.wrap(new_generator) @unwrap_spec(current=int, remaining=int, step=int) diff --git a/pypy/module/_rawffi/__init__.py b/pypy/module/_rawffi/__init__.py --- a/pypy/module/_rawffi/__init__.py +++ b/pypy/module/_rawffi/__init__.py @@ -2,6 +2,7 @@ """ from pypy.interpreter.mixedmodule import MixedModule +from pypy.module._rawffi import alt class Module(MixedModule): interpleveldefs = { @@ -19,6 +20,7 @@ 'wcharp2unicode' : 'interp_rawffi.wcharp2unicode', 'charp2rawstring' : 'interp_rawffi.charp2rawstring', 'wcharp2rawunicode' : 'interp_rawffi.wcharp2rawunicode', + 'rawstring2charp' : 'interp_rawffi.rawstring2charp', 'CallbackPtr' : 'callback.W_CallbackPtr', '_num_of_allocated_objects' : 'tracker.num_of_allocated_objects', 'get_libc' : 'interp_rawffi.get_libc', @@ -32,6 +34,10 @@ appleveldefs = { } + submodules = { + 'alt': alt.Module, + } + def buildloaders(cls): from pypy.module._rawffi import interp_rawffi diff --git a/pypy/module/_ffi/__init__.py b/pypy/module/_rawffi/alt/__init__.py rename from pypy/module/_ffi/__init__.py rename to pypy/module/_rawffi/alt/__init__.py diff --git a/pypy/module/_ffi/app_struct.py b/pypy/module/_rawffi/alt/app_struct.py rename from pypy/module/_ffi/app_struct.py rename to pypy/module/_rawffi/alt/app_struct.py --- a/pypy/module/_ffi/app_struct.py +++ b/pypy/module/_rawffi/alt/app_struct.py @@ -1,4 +1,4 @@ -import _ffi +from _rawffi import alt class MetaStructure(type): @@ -11,7 +11,7 @@ fields = dic.get('_fields_') if fields is None: return - struct_descr = _ffi._StructDescr(name, fields) + struct_descr = alt._StructDescr(name, fields) for field in fields: dic[field.name] = field dic['_struct_'] = struct_descr diff --git a/pypy/module/_ffi/interp_ffitype.py b/pypy/module/_rawffi/alt/interp_ffitype.py rename from pypy/module/_ffi/interp_ffitype.py rename to pypy/module/_rawffi/alt/interp_ffitype.py --- a/pypy/module/_ffi/interp_ffitype.py +++ b/pypy/module/_rawffi/alt/interp_ffitype.py @@ -116,7 +116,7 @@ types = [ # note: most of the type name directly come from the C equivalent, # with the exception of bytes: in C, ubyte and char are equivalent, - # but for _ffi the first expects a number while the second a 1-length + # but for here the first expects a number while the second a 1-length # string W_FFIType('slong', libffi.types.slong), W_FFIType('sint', libffi.types.sint), diff --git a/pypy/module/_ffi/interp_funcptr.py b/pypy/module/_rawffi/alt/interp_funcptr.py rename from pypy/module/_ffi/interp_funcptr.py rename to pypy/module/_rawffi/alt/interp_funcptr.py --- a/pypy/module/_ffi/interp_funcptr.py +++ b/pypy/module/_rawffi/alt/interp_funcptr.py @@ -3,7 +3,7 @@ operationerrfmt from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef -from pypy.module._ffi.interp_ffitype import W_FFIType +from pypy.module._rawffi.alt.interp_ffitype import W_FFIType # from rpython.rtyper.lltypesystem import lltype, rffi # @@ -13,7 +13,7 @@ from rpython.rlib.rdynload import DLOpenError from rpython.rlib.rarithmetic import r_uint from rpython.rlib.objectmodel import we_are_translated -from pypy.module._ffi.type_converter import FromAppLevelConverter, ToAppLevelConverter +from pypy.module._rawffi.alt.type_converter import FromAppLevelConverter, ToAppLevelConverter from pypy.module._rawffi.interp_rawffi import got_libffi_error, wrap_dlopenerror import os @@ -302,7 +302,7 @@ W_FuncPtr.typedef = TypeDef( - '_ffi.FuncPtr', + '_rawffi.alt.FuncPtr', __call__ = interp2app(W_FuncPtr.call), getaddr = interp2app(W_FuncPtr.getaddr), free_temp_buffers = interp2app(W_FuncPtr.free_temp_buffers), @@ -346,7 +346,7 @@ W_CDLL.typedef = TypeDef( - '_ffi.CDLL', + '_rawffi.alt.CDLL', __new__ = interp2app(descr_new_cdll), getfunc = interp2app(W_CDLL.getfunc), getaddressindll = interp2app(W_CDLL.getaddressindll), @@ -363,7 +363,7 @@ W_WinDLL.typedef = TypeDef( - '_ffi.WinDLL', + '_rawffi.alt.WinDLL', __new__ = interp2app(descr_new_windll), getfunc = interp2app(W_WinDLL.getfunc), getaddressindll = interp2app(W_WinDLL.getaddressindll), diff --git a/pypy/module/_ffi/interp_struct.py b/pypy/module/_rawffi/alt/interp_struct.py rename from pypy/module/_ffi/interp_struct.py rename to pypy/module/_rawffi/alt/interp_struct.py --- a/pypy/module/_ffi/interp_struct.py +++ b/pypy/module/_rawffi/alt/interp_struct.py @@ -8,8 +8,8 @@ from pypy.interpreter.typedef import TypeDef, interp_attrproperty from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.error import operationerrfmt -from pypy.module._ffi.interp_ffitype import W_FFIType -from pypy.module._ffi.type_converter import FromAppLevelConverter, ToAppLevelConverter +from pypy.module._rawffi.alt.interp_ffitype import W_FFIType +from pypy.module._rawffi.alt.type_converter import FromAppLevelConverter, ToAppLevelConverter class W_Field(W_Root): diff --git a/pypy/module/_ffi/test/__init__.py b/pypy/module/_rawffi/alt/test/__init__.py rename from pypy/module/_ffi/test/__init__.py rename to pypy/module/_rawffi/alt/test/__init__.py diff --git a/pypy/module/_ffi/test/test_ffitype.py b/pypy/module/_rawffi/alt/test/test_ffitype.py rename from pypy/module/_ffi/test/test_ffitype.py rename to pypy/module/_rawffi/alt/test/test_ffitype.py --- a/pypy/module/_ffi/test/test_ffitype.py +++ b/pypy/module/_rawffi/alt/test/test_ffitype.py @@ -1,21 +1,21 @@ -from pypy.module._ffi.test.test_funcptr import BaseAppTestFFI +from pypy.module._rawffi.alt.test.test_funcptr import BaseAppTestFFI class AppTestFFIType(BaseAppTestFFI): def test_simple_types(self): - from _ffi import types + from _rawffi.alt import types assert str(types.sint) == "" assert str(types.uint) == "" assert types.sint.name == 'sint' assert types.uint.name == 'uint' def test_sizeof(self): - from _ffi import types + from _rawffi.alt import types assert types.sbyte.sizeof() == 1 assert types.sint.sizeof() == 4 def test_typed_pointer(self): - from _ffi import types + from _rawffi.alt import types intptr = types.Pointer(types.sint) # create a typed pointer to sint assert intptr.deref_pointer() is types.sint assert str(intptr) == '' @@ -23,7 +23,7 @@ raises(TypeError, "types.Pointer(42)") def test_pointer_identity(self): - from _ffi import types + from _rawffi.alt import types x = types.Pointer(types.slong) y = types.Pointer(types.slong) z = types.Pointer(types.char) @@ -31,7 +31,7 @@ assert x is not z def test_char_p_cached(self): - from _ffi import types + from _rawffi.alt import types x = types.Pointer(types.char) assert x is types.char_p x = types.Pointer(types.unichar) diff --git a/pypy/module/_ffi/test/test_funcptr.py b/pypy/module/_rawffi/alt/test/test_funcptr.py rename from pypy/module/_ffi/test/test_funcptr.py rename to pypy/module/_rawffi/alt/test/test_funcptr.py --- a/pypy/module/_ffi/test/test_funcptr.py +++ b/pypy/module/_rawffi/alt/test/test_funcptr.py @@ -7,7 +7,7 @@ import sys, py class BaseAppTestFFI(object): - spaceconfig = dict(usemodules=('_ffi', '_rawffi')) + spaceconfig = dict(usemodules=('_rawffi',)) @classmethod def prepare_c_example(cls): @@ -62,17 +62,17 @@ cls.w_f_12_34_plus_56_78 = space.wrap(f_result) def test_libload(self): - import _ffi - _ffi.CDLL(self.libc_name) + import _rawffi.alt + _rawffi.alt.CDLL(self.libc_name) def test_libload_fail(self): - import _ffi - raises(OSError, _ffi.CDLL, "xxxxx_this_name_does_not_exist_xxxxx") + import _rawffi.alt + raises(OSError, _rawffi.alt.CDLL, "xxxxx_this_name_does_not_exist_xxxxx") def test_libload_None(self): if self.iswin32: skip("unix specific") - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types # this should return *all* loaded libs, dlopen(NULL) dll = CDLL(None) # libm should be loaded @@ -80,20 +80,20 @@ assert res == 1.0 def test_callfunc(self): - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types libm = CDLL(self.libm_name) pow = libm.getfunc('pow', [types.double, types.double], types.double) assert pow(2, 3) == 8 def test_getaddr(self): - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types libm = CDLL(self.libm_name) pow = libm.getfunc('pow', [types.double, types.double], types.double) assert pow.getaddr() == self.pow_addr def test_getaddressindll(self): import sys - from _ffi import CDLL + from _rawffi.alt import CDLL libm = CDLL(self.libm_name) pow_addr = libm.getaddressindll('pow') fff = sys.maxint*2-1 @@ -102,7 +102,7 @@ assert pow_addr == self.pow_addr & fff def test_func_fromaddr(self): - from _ffi import CDLL, types, FuncPtr + from _rawffi.alt import CDLL, types, FuncPtr libm = CDLL(self.libm_name) pow_addr = libm.getaddressindll('pow') pow = FuncPtr.fromaddr(pow_addr, 'pow', [types.double, types.double], @@ -117,7 +117,7 @@ } """ import sys - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types libfoo = CDLL(self.libfoo_name) sum_xy = libfoo.getfunc('sum_xy', [types.sint, types.sint], types.sint) assert sum_xy(30, 12) == 42 @@ -129,7 +129,7 @@ DLLEXPORT void set_dummy(int val) { dummy = val; } DLLEXPORT int get_dummy() { return dummy; } """ - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types libfoo = CDLL(self.libfoo_name) set_dummy = libfoo.getfunc('set_dummy', [types.sint], types.void) get_dummy = libfoo.getfunc('get_dummy', [], types.sint) @@ -144,7 +144,7 @@ DLLEXPORT int* get_dummy_ptr() { return &dummy; } DLLEXPORT void set_val_to_ptr(int* ptr, int val) { *ptr = val; } """ - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types libfoo = CDLL(self.libfoo_name) get_dummy = libfoo.getfunc('get_dummy', [], types.sint) get_dummy_ptr = libfoo.getfunc('get_dummy_ptr', [], types.void_p) @@ -163,7 +163,7 @@ DLLEXPORT int* get_dummy_ptr(); // defined in test_pointer_args DLLEXPORT void set_val_to_ptr(int* ptr, int val); // ditto """ - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types class MyPointerWrapper(object): def __init__(self, value): @@ -197,7 +197,7 @@ return len; } """ - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types import _rawffi libfoo = CDLL(self.libfoo_name) mystrlen = libfoo.getfunc('mystrlen', [types.char_p], types.slong) @@ -223,7 +223,7 @@ return len; } """ - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types import _rawffi libfoo = CDLL(self.libfoo_name) mystrlen = libfoo.getfunc('mystrlen_u', [types.unichar_p], types.slong) @@ -247,7 +247,7 @@ return s; } """ - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types import _rawffi libfoo = CDLL(self.libfoo_name) do_nothing = libfoo.getfunc('do_nothing', [types.char_p], types.char_p) @@ -264,7 +264,7 @@ DLLEXPORT int* get_dummy_ptr(); // defined in test_pointer_args DLLEXPORT void set_val_to_ptr(int* ptr, int val); // ditto """ - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types libfoo = CDLL(self.libfoo_name) intptr = types.Pointer(types.sint) @@ -283,7 +283,7 @@ DLLEXPORT long is_null_ptr(void* ptr) { return ptr == NULL; } """ import sys - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types libfoo = CDLL(self.libfoo_name) is_null_ptr = libfoo.getfunc('is_null_ptr', [types.void_p], types.ulong) assert not is_null_ptr(sys.maxint+1) @@ -296,7 +296,7 @@ } """ import sys - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types libfoo = CDLL(self.libfoo_name) sum_xy = libfoo.getfunc('sum_xy_ul', [types.ulong, types.ulong], types.ulong) @@ -313,7 +313,7 @@ return x+y; } """ - from _ffi import CDLL, types + from _rawffi.alt import CDLL, types libfoo = CDLL(self.libfoo_name) sum_xy = libfoo.getfunc('sum_xy_us', [types.ushort, types.ushort], types.ushort) @@ -327,7 +327,7 @@ From noreply at buildbot.pypy.org Mon Feb 3 05:19:40 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Mon, 3 Feb 2014 05:19:40 +0100 (CET) Subject: [pypy-commit] pypy default: fix per 3c0908d Message-ID: <20140203041940.59C181C1178@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r69049:06e579dd1bbe Date: 2014-02-02 20:18 -0800 http://bitbucket.org/pypy/pypy/changeset/06e579dd1bbe/ Log: fix per 3c0908d diff --git a/rpython/rlib/test/test_rarithmetic.py b/rpython/rlib/test/test_rarithmetic.py --- a/rpython/rlib/test/test_rarithmetic.py +++ b/rpython/rlib/test/test_rarithmetic.py @@ -492,9 +492,9 @@ py.test.raises(ParseStringError, string_to_int, '-0x', 16) exc = py.test.raises(ParseStringError, string_to_int, '') - assert exc.value.msg == "invalid literal for int() with base 10: ''" + assert exc.value.msg == "invalid literal for int() with base 10" exc = py.test.raises(ParseStringError, string_to_int, '', 0) - assert exc.value.msg == "invalid literal for int() with base 0: ''" + assert exc.value.msg == "invalid literal for int() with base 0" def test_string_to_int_overflow(self): import sys From noreply at buildbot.pypy.org Mon Feb 3 06:27:03 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Mon, 3 Feb 2014 06:27:03 +0100 (CET) Subject: [pypy-commit] pypy default: add test file that went missing in 083be7f23e9b Message-ID: <20140203052703.D56AF1D23CF@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r69050:6272b9da0c74 Date: 2014-02-03 05:18 +0000 http://bitbucket.org/pypy/pypy/changeset/6272b9da0c74/ Log: add test file that went missing in 083be7f23e9b diff --git a/rpython/rtyper/test/test_llannotation.py b/rpython/rtyper/test/test_llannotation.py new file mode 100644 --- /dev/null +++ b/rpython/rtyper/test/test_llannotation.py @@ -0,0 +1,89 @@ +import py.test +from rpython.annotator.model import ( + SomeInteger, SomeBool, SomeChar, unionof, SomeImpossibleValue, + UnionError, SomeInstance, SomeSingleFloat) +from rpython.rlib.rarithmetic import r_uint, r_singlefloat +from rpython.rtyper.llannotation import ( + SomePtr, annotation_to_lltype, ll_to_annotation) +from rpython.rtyper.typesystem import lltype +import rpython.rtyper.rtyper # make sure to import the world + +class C(object): + pass + +class DummyClassDef: + def __init__(self, cls=C): + self.cls = cls + self.name = cls.__name__ + +def test_ll_to_annotation(): + s_z = ll_to_annotation(lltype.Signed._defl()) + s_s = SomeInteger() + s_u = SomeInteger(nonneg=True, unsigned=True) + assert s_z.contains(s_s) + assert not s_z.contains(s_u) + s_uz = ll_to_annotation(lltype.Unsigned._defl()) + assert s_uz.contains(s_u) + assert ll_to_annotation(lltype.Bool._defl()).contains(SomeBool()) + assert ll_to_annotation(lltype.Char._defl()).contains(SomeChar()) + S = lltype.GcStruct('s') + A = lltype.GcArray() + s_p = ll_to_annotation(lltype.malloc(S)) + assert isinstance(s_p, SomePtr) and s_p.ll_ptrtype == lltype.Ptr(S) + s_p = ll_to_annotation(lltype.malloc(A, 0)) + assert isinstance(s_p, SomePtr) and s_p.ll_ptrtype == lltype.Ptr(A) + +def test_annotation_to_lltype(): + s_i = SomeInteger() + s_pos = SomeInteger(nonneg=True) + s_1 = SomeInteger(nonneg=True) + s_1.const = 1 + s_m1 = SomeInteger(nonneg=False) + s_m1.const = -1 + s_u = SomeInteger(nonneg=True, unsigned=True) + s_u1 = SomeInteger(nonneg=True, unsigned=True) + s_u1.const = r_uint(1) + assert annotation_to_lltype(s_i) == lltype.Signed + assert annotation_to_lltype(s_pos) == lltype.Signed + assert annotation_to_lltype(s_1) == lltype.Signed + assert annotation_to_lltype(s_m1) == lltype.Signed + assert annotation_to_lltype(s_u) == lltype.Unsigned + assert annotation_to_lltype(s_u1) == lltype.Unsigned + assert annotation_to_lltype(SomeBool()) == lltype.Bool + assert annotation_to_lltype(SomeChar()) == lltype.Char + PS = lltype.Ptr(lltype.GcStruct('s')) + s_p = SomePtr(ll_ptrtype=PS) + assert annotation_to_lltype(s_p) == PS + si0 = SomeInstance(DummyClassDef(), True) + with py.test.raises(ValueError): + annotation_to_lltype(si0) + s_singlefloat = SomeSingleFloat() + s_singlefloat.const = r_singlefloat(0.0) + assert annotation_to_lltype(s_singlefloat) == lltype.SingleFloat + +def test_ll_union(): + PS1 = lltype.Ptr(lltype.GcStruct('s')) + PS2 = lltype.Ptr(lltype.GcStruct('s')) + PS3 = lltype.Ptr(lltype.GcStruct('s3')) + PA1 = lltype.Ptr(lltype.GcArray()) + PA2 = lltype.Ptr(lltype.GcArray()) + + assert unionof(SomePtr(PS1), SomePtr(PS1)) == SomePtr(PS1) + assert unionof(SomePtr(PS1), SomePtr(PS2)) == SomePtr(PS2) + assert unionof(SomePtr(PS1), SomePtr(PS2)) == SomePtr(PS1) + + assert unionof(SomePtr(PA1), SomePtr(PA1)) == SomePtr(PA1) + assert unionof(SomePtr(PA1), SomePtr(PA2)) == SomePtr(PA2) + assert unionof(SomePtr(PA1), SomePtr(PA2)) == SomePtr(PA1) + + assert unionof(SomePtr(PS1), SomeImpossibleValue()) == SomePtr(PS1) + assert unionof(SomeImpossibleValue(), SomePtr(PS1)) == SomePtr(PS1) + + with py.test.raises(UnionError): + unionof(SomePtr(PA1), SomePtr(PS1)) + with py.test.raises(UnionError): + unionof(SomePtr(PS1), SomePtr(PS3)) + with py.test.raises(UnionError): + unionof(SomePtr(PS1), SomeInteger()) + with py.test.raises(UnionError): + unionof(SomeInteger(), SomePtr(PS1)) From noreply at buildbot.pypy.org Mon Feb 3 06:43:26 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Mon, 3 Feb 2014 06:43:26 +0100 (CET) Subject: [pypy-commit] pypy py3k: rekill str_w which snuck back in during a merge Message-ID: <20140203054326.946D91C087E@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r69051:f2754640183a Date: 2014-01-31 11:04 -0800 http://bitbucket.org/pypy/pypy/changeset/f2754640183a/ Log: rekill str_w which snuck back in during a merge diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py --- a/pypy/objspace/std/unicodeobject.py +++ b/pypy/objspace/std/unicodeobject.py @@ -57,9 +57,6 @@ return None return space.wrap(compute_unique_id(space.unicode_w(self))) - def str_w(self, space): - return space.str_w(space.str(self)) - def unicode_w(self, space): return self._value From noreply at buildbot.pypy.org Mon Feb 3 06:43:28 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Mon, 3 Feb 2014 06:43:28 +0100 (CET) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20140203054328.462D11C087E@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r69052:220c588360e5 Date: 2014-02-02 16:35 -0800 http://bitbucket.org/pypy/pypy/changeset/220c588360e5/ Log: merge default diff --git a/pypy/module/_cffi_backend/test/test_handle.py b/pypy/module/_cffi_backend/test/test_handle.py --- a/pypy/module/_cffi_backend/test/test_handle.py +++ b/pypy/module/_cffi_backend/test/test_handle.py @@ -1,20 +1,5 @@ import random -from pypy.module._cffi_backend.handle import CffiHandles, reduced_value - - -def test_reduced_value(): - assert reduced_value(0) == 0 - assert reduced_value(1) == 0 - assert reduced_value(2) == 1 - assert reduced_value(3) == 0 - assert reduced_value(4) == 2 - assert reduced_value(5) == 1 - assert reduced_value(6) == 3 - assert reduced_value(7) == 0 - assert reduced_value(8) == 4 - assert reduced_value(9) == 2 - assert reduced_value(10) == 5 - assert reduced_value(11) == 1 +from pypy.module._cffi_backend.handle import CffiHandles class PseudoWeakRef(object): diff --git a/pypy/module/_csv/interp_reader.py b/pypy/module/_csv/interp_reader.py --- a/pypy/module/_csv/interp_reader.py +++ b/pypy/module/_csv/interp_reader.py @@ -39,6 +39,7 @@ field_builder.append(c) def save_field(self, field_builder): + space = self.space field = field_builder.build() if self.numeric_field: from rpython.rlib.rstring import ParseStringError @@ -46,12 +47,12 @@ self.numeric_field = False try: ff = string_to_float(field) - except ParseStringError, e: - raise OperationError(self.space.w_ValueError, - self.space.wrap(e.msg)) - w_obj = self.space.wrap(ff) + except ParseStringError as e: + from pypy.objspace.std.inttype import wrap_parsestringerror + raise wrap_parsestringerror(space, e, space.wrap(field)) + w_obj = space.wrap(ff) else: - w_obj = self.space.wrap(field) + w_obj = space.wrap(field) self.fields_w.append(w_obj) def next_w(self): diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -251,6 +251,10 @@ value = space.is_true(self) return get_dtype_cache(space).w_booldtype.box(value) + def descr_zero(self, space): + from pypy.module.micronumpy.interp_dtype import get_dtype_cache + return get_dtype_cache(space).w_longdtype.box(0) + def descr_ravel(self, space): from pypy.module.micronumpy.base import convert_to_array w_values = space.newtuple([self]) @@ -582,6 +586,12 @@ __hash__ = interp2app(W_GenericBox.descr_hash), tolist = interp2app(W_GenericBox.item), + min = interp2app(W_GenericBox.descr_self), + max = interp2app(W_GenericBox.descr_self), + argmin = interp2app(W_GenericBox.descr_zero), + argmax = interp2app(W_GenericBox.descr_zero), + sum = interp2app(W_GenericBox.descr_self), + prod = interp2app(W_GenericBox.descr_self), any = interp2app(W_GenericBox.descr_any), all = interp2app(W_GenericBox.descr_all), ravel = interp2app(W_GenericBox.descr_ravel), diff --git a/pypy/module/micronumpy/test/test_scalar.py b/pypy/module/micronumpy/test/test_scalar.py --- a/pypy/module/micronumpy/test/test_scalar.py +++ b/pypy/module/micronumpy/test/test_scalar.py @@ -102,6 +102,16 @@ assert b == a assert b is not a + def test_methods(self): + import numpy as np + for a in [np.int32(2), np.float64(2.0), np.complex64(42)]: + for op in ['min', 'max', 'sum', 'prod']: + assert getattr(a, op)() == a + for op in ['argmin', 'argmax']: + b = getattr(a, op)() + assert type(b) is np.int_ + assert b == 0 + def test_buffer(self): import numpy as np a = np.int32(123) diff --git a/pypy/objspace/std/floattype.py b/pypy/objspace/std/floattype.py --- a/pypy/objspace/std/floattype.py +++ b/pypy/objspace/std/floattype.py @@ -34,20 +34,11 @@ value = space.float_w(w_obj) elif (space.isinstance_w(w_value, space.w_str) or space.isinstance_w(w_value, space.w_bytearray)): - strvalue = space.bufferstr_w(w_value) - try: - value = rfloat.string_to_float(strvalue.decode('latin-1')) - except ParseStringError, e: - raise OperationError(space.w_ValueError, - space.wrap(e.msg)) + value = _string_to_float(space, w_value, space.bufferstr_w(w_value)) elif space.isinstance_w(w_value, space.w_unicode): from unicodeobject import unicode_to_decimal_w - strvalue = unicode_to_decimal_w(space, w_value) - try: - value = rfloat.string_to_float(strvalue) - except ParseStringError, e: - raise OperationError(space.w_ValueError, - space.wrap(e.msg)) + value = _string_to_float(space, w_value, + unicode_to_decimal_w(space, w_value)) else: value = space.float_w(w_x) w_obj = space.allocate_instance(W_FloatObject, w_floattype) @@ -55,6 +46,14 @@ return w_obj +def _string_to_float(space, w_source, string): + try: + return rfloat.string_to_float(string) + except ParseStringError as e: + from pypy.objspace.std.inttype import wrap_parsestringerror + raise wrap_parsestringerror(space, e, w_source) + + def detect_floatformat(): from rpython.rtyper.lltypesystem import rffi, lltype buf = lltype.malloc(rffi.CCHARP.TO, 8, flavor='raw') diff --git a/pypy/objspace/std/inttype.py b/pypy/objspace/std/inttype.py --- a/pypy/objspace/std/inttype.py +++ b/pypy/objspace/std/inttype.py @@ -9,7 +9,8 @@ from rpython.rlib.rarithmetic import r_uint, string_to_int from rpython.rlib.objectmodel import instantiate from rpython.rlib.rbigint import rbigint -from rpython.rlib.rstring import ParseStringError, ParseStringOverflowError +from rpython.rlib.rstring import ( + InvalidBaseError, ParseStringError, ParseStringOverflowError) from rpython.rlib import jit # ____________________________________________________________ @@ -70,27 +71,33 @@ # ____________________________________________________________ ## @jit.elidable -## def string_to_int_or_long(space, string, base=10): +## def string_to_int_or_long(space, w_source, string, base=10): ## w_longval = None ## value = 0 ## try: ## value = string_to_int(string, base) -## except ParseStringError, e: -## raise OperationError(space.w_ValueError, -## space.wrap(e.msg)) +## except ParseStringError as e: +## raise wrap_parsestringerror(space, e, w_source) ## except ParseStringOverflowError, e: -## w_longval = retry_to_w_long(space, e.parser) +## w_longval = retry_to_w_long(space, e.parser, w_source) ## return value, w_longval -## def retry_to_w_long(space, parser): +## def retry_to_w_long(space, parser, w_source): ## parser.rewind() ## try: ## bigint = rbigint._from_numberstring_parser(parser) -## except ParseStringError, e: -## raise OperationError(space.w_ValueError, -## space.wrap(e.msg)) +## except ParseStringError as e: +## raise wrap_parsestringerror(space, e, w_source) ## return space.newlong_from_rbigint(bigint) +def wrap_parsestringerror(space, e, w_source): + if isinstance(e, InvalidBaseError): + w_msg = space.wrap(e.msg) + else: + w_msg = space.wrap('%s: %s' % (e.msg, + space.str_w(space.repr(w_source)))) + return OperationError(space.w_ValueError, w_msg) + ## @unwrap_spec(w_x = WrappedDefault(0)) ## def descr__new__(space, w_inttype, w_x, w_base=None): ## from pypy.objspace.std.intobject import W_IntObject @@ -117,11 +124,12 @@ ## # an overflowing long ## value = space.int_w(w_obj) ## elif space.isinstance_w(w_value, space.w_str): -## value, w_longval = string_to_int_or_long(space, space.str_w(w_value)) +## value, w_longval = string_to_int_or_long(space, w_value, +## space.str_w(w_value)) ## elif space.isinstance_w(w_value, space.w_unicode): ## from pypy.objspace.std.unicodeobject import unicode_to_decimal_w ## string = unicode_to_decimal_w(space, w_value) -## value, w_longval = string_to_int_or_long(space, string) +## value, w_longval = string_to_int_or_long(space, w_value, string) ## else: ## # If object supports the buffer interface ## try: @@ -134,7 +142,8 @@ ## w_value) ## else: ## buf = space.interp_w(Buffer, w_buffer) -## value, w_longval = string_to_int_or_long(space, buf.as_str()) +## value, w_longval = string_to_int_or_long(space, w_value, +## buf.as_str()) ## else: ## base = space.int_w(w_base) @@ -149,7 +158,7 @@ ## space.wrap("int() can't convert non-string " ## "with explicit base")) -## value, w_longval = string_to_int_or_long(space, s, base) +## value, w_longval = string_to_int_or_long(space, w_value, s, base) ## if w_longval is not None: ## if not space.is_w(w_inttype, space.w_int): diff --git a/pypy/objspace/std/longtype.py b/pypy/objspace/std/longtype.py --- a/pypy/objspace/std/longtype.py +++ b/pypy/objspace/std/longtype.py @@ -35,12 +35,12 @@ return _from_intlike(space, w_longtype, w_obj) elif space.isinstance_w(w_value, space.w_unicode): from pypy.objspace.std.unicodeobject import unicode_to_decimal_w - return string_to_w_long(space, w_longtype, + return string_to_w_long(space, w_longtype, w_value, unicode_to_decimal_w(space, w_value)) elif (space.isinstance_w(w_value, space.w_bytearray) or space.isinstance_w(w_value, space.w_bytes)): - strvalue = space.bufferstr_w(w_value) - return string_to_w_long(space, w_longtype, strvalue.decode('latin-1')) + return string_to_w_long(space, w_longtype, w_value, + space.bufferstr_w(w_value)) else: try: w_buffer = space.buffer(w_value) @@ -52,8 +52,8 @@ w_value) else: buf = space.interp_w(Buffer, w_buffer) - return string_to_w_long(space, w_longtype, - buf.as_str().decode('latin-1')) + return string_to_w_long(space, w_longtype, w_value, + buf.as_str()) else: try: base = space.int_w(w_base) @@ -73,7 +73,7 @@ raise OperationError(space.w_TypeError, space.wrap("int() can't convert non-string " "with explicit base")) - return string_to_w_long(space, w_longtype, s, base) + return string_to_w_long(space, w_longtype, w_value, s, base) def _from_intlike(space, w_longtype, w_intlike): @@ -83,12 +83,13 @@ return newbigint(space, w_longtype, space.bigint_w(w_obj)) -def string_to_w_long(space, w_longtype, s, base=10): +def string_to_w_long(space, w_longtype, w_source, string, base=10): try: - bigint = rbigint.fromstr(s, base, ignore_l_suffix=True, fname=u'int') - except ParseStringError, e: - raise OperationError(space.w_ValueError, - space.wrap(e.msg)) + bigint = rbigint.fromstr(string, base, ignore_l_suffix=True, + fname=u'int') + except ParseStringError as e: + from pypy.objspace.std.inttype import wrap_parsestringerror + raise wrap_parsestringerror(space, e, w_source) return newbigint(space, w_longtype, bigint) string_to_w_long._dont_inline_ = True diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -57,7 +57,7 @@ return True def delete(self, obj, selector): - return None + pass def find_map_attr(self, selector): if jit.we_are_jitted(): @@ -291,6 +291,7 @@ def delete(self, obj, selector): if selector == self.selector: # ok, attribute is deleted + self.ever_mutated = True return self.back.copy(obj) new_obj = self.back.delete(obj, selector) if new_obj is not None: diff --git a/pypy/objspace/std/test/test_intobject.py b/pypy/objspace/std/test/test_intobject.py --- a/pypy/objspace/std/test/test_intobject.py +++ b/pypy/objspace/std/test/test_intobject.py @@ -1,3 +1,4 @@ +# encoding: utf-8 import py import sys from pypy.objspace.std import intobject as iobj @@ -492,6 +493,18 @@ assert str(e.value) == ( "int() argument must be a string or a number, not 'list'") + def test_invalid_literal_message(self): + import sys + if '__pypy__' not in sys.builtin_module_names: + skip('PyPy 2.x/CPython 3.4 only') + for value in b' 1j ', u' 1٢٣٤j ': + try: + int(value) + except ValueError as e: + assert repr(value) in str(e) + else: + assert False, value + class AppTestIntOptimizedAdd(AppTestInt): spaceconfig = {"objspace.std.optimized_int_add": True} diff --git a/pypy/objspace/std/test/test_mapdict.py b/pypy/objspace/std/test/test_mapdict.py --- a/pypy/objspace/std/test/test_mapdict.py +++ b/pypy/objspace/std/test/test_mapdict.py @@ -144,7 +144,15 @@ assert obj2.map.back.ever_mutated == True assert obj2.map is obj.map - +def test_attr_immutability_delete(monkeypatch): + cls = Class() + obj = cls.instantiate() + obj.setdictvalue(space, "a", 10) + map1 = obj.map + obj.deldictvalue(space, "a") + obj.setdictvalue(space, "a", 20) + assert obj.map.ever_mutated == True + assert obj.map is map1 def test_delete(): for i, dattr in enumerate(["a", "b", "c"]): diff --git a/rpython/annotator/annrpython.py b/rpython/annotator/annrpython.py --- a/rpython/annotator/annrpython.py +++ b/rpython/annotator/annrpython.py @@ -24,7 +24,6 @@ def __init__(self, translator=None, policy=None, bookkeeper=None): import rpython.rtyper.extfuncregistry # has side effects - import rpython.rlib.nonconst # has side effects if translator is None: # interface for tests diff --git a/rpython/annotator/unaryop.py b/rpython/annotator/unaryop.py --- a/rpython/annotator/unaryop.py +++ b/rpython/annotator/unaryop.py @@ -4,7 +4,6 @@ from __future__ import absolute_import -from types import MethodType from rpython.flowspace.operation import op from rpython.annotator.model import (SomeObject, SomeInteger, SomeBool, SomeString, SomeChar, SomeList, SomeDict, SomeTuple, SomeImpossibleValue, @@ -757,63 +756,6 @@ # This should probably never happen raise AnnotatorError("Cannot call len on a pbc") -# annotation of low-level types -from rpython.rtyper.llannotation import ( - SomePtr, SomeLLADTMeth, ll_to_annotation, lltype_to_annotation, - annotation_to_lltype) - -class __extend__(SomePtr): - - def getattr(self, s_attr): - assert s_attr.is_constant(), "getattr on ptr %r with non-constant field-name" % self.ll_ptrtype - example = self.ll_ptrtype._example() - try: - v = example._lookup_adtmeth(s_attr.const) - except AttributeError: - v = getattr(example, s_attr.const) - return ll_to_annotation(v) - else: - if isinstance(v, MethodType): - from rpython.rtyper.lltypesystem import lltype - ll_ptrtype = lltype.typeOf(v.im_self) - assert isinstance(ll_ptrtype, (lltype.Ptr, lltype.InteriorPtr)) - return SomeLLADTMeth(ll_ptrtype, v.im_func) - return getbookkeeper().immutablevalue(v) - getattr.can_only_throw = [] - - def len(self): - length = self.ll_ptrtype._example()._fixedlength() - if length is None: - return SomeObject.len(self) - else: - return immutablevalue(length) - - def setattr(self, s_attr, s_value): # just doing checking - assert s_attr.is_constant(), "setattr on ptr %r with non-constant field-name" % self.ll_ptrtype - example = self.ll_ptrtype._example() - if getattr(example, s_attr.const) is not None: # ignore Void s_value - v_lltype = annotation_to_lltype(s_value) - setattr(example, s_attr.const, v_lltype._defl()) - - def call(self, args): - args_s, kwds_s = args.unpack() - if kwds_s: - raise Exception("keyword arguments to call to a low-level fn ptr") - info = 'argument to ll function pointer call' - llargs = [annotation_to_lltype(s_arg,info)._defl() for s_arg in args_s] - v = self.ll_ptrtype._example()(*llargs) - return ll_to_annotation(v) - - def bool(self): - return s_Bool - -class __extend__(SomeLLADTMeth): - - def call(self, args): - bookkeeper = getbookkeeper() - s_func = bookkeeper.immutablevalue(self.func) - return s_func.call(args.prepend(lltype_to_annotation(self.ll_ptrtype))) - #_________________________________________ # weakrefs diff --git a/rpython/rlib/rarithmetic.py b/rpython/rlib/rarithmetic.py --- a/rpython/rlib/rarithmetic.py +++ b/rpython/rlib/rarithmetic.py @@ -691,9 +691,8 @@ characters of 's'. Raises ParseStringError in case of error. Raises ParseStringOverflowError in case the result does not fit. """ - from rpython.rlib.rstring import NumberStringParser, \ - ParseStringOverflowError, \ - ParseStringError, strip_spaces + from rpython.rlib.rstring import ( + NumberStringParser, ParseStringOverflowError, strip_spaces) s = literal = strip_spaces(s) p = NumberStringParser(s, literal, base, u'int') base = p.base diff --git a/rpython/rlib/rfloat.py b/rpython/rlib/rfloat.py --- a/rpython/rlib/rfloat.py +++ b/rpython/rlib/rfloat.py @@ -25,7 +25,8 @@ globals().update(rffi_platform.configure(CConfig)) - at objectmodel.enforceargs(unicode) +INVALID_MSG = "invalid literal for float()" + def string_to_float(s): """ Conversion of string to float. @@ -37,10 +38,8 @@ from rpython.rlib.rstring import strip_spaces, ParseStringError s = strip_spaces(s) - if not s: - raise ParseStringError(u"empty string for float()") - + raise ParseStringError(INVALID_MSG) try: ascii_s = s.encode('ascii') @@ -72,9 +71,7 @@ try: return rstring_to_float(ascii_s) except ValueError: - # note that we still put the original unicode string in the error - # message, not ascii_s - raise ParseStringError(u"invalid literal for float(): '%s'" % s) + raise ParseStringError(INVALID_MSG) def rstring_to_float(s): from rpython.rlib.rdtoa import strtod diff --git a/rpython/rlib/rstring.py b/rpython/rlib/rstring.py --- a/rpython/rlib/rstring.py +++ b/rpython/rlib/rstring.py @@ -282,6 +282,9 @@ def __init__(self, msg): self.msg = msg +class InvalidBaseError(ParseStringError): + """Signals an invalid base argument""" + class ParseStringOverflowError(Exception): def __init__(self, parser): self.parser = parser @@ -290,13 +293,12 @@ class NumberStringParser: def error(self): - raise ParseStringError(u"invalid literal for %s() with base %d: '%s'" % - (self.fname, self.original_base, self.literal)) + raise ParseStringError("invalid literal for %s() with base %d" % + (self.fname, self.original_base)) @enforceargs(None, unicode, unicode, int, unicode) @with_unicode_literals def __init__(self, s, literal, base, fname): - self.literal = literal self.fname = fname sign = 1 if s.startswith('-'): @@ -317,7 +319,7 @@ else: base = 10 elif base < 2 or base > 36: - raise ParseStringError, u"%s() base must be >= 2 and <= 36" % (fname,) + raise InvalidBaseError("%s() base must be >= 2 and <= 36" % fname) self.base = base if base == 16 and (s.startswith('0x') or s.startswith('0X')): diff --git a/rpython/rlib/test/test_rweaklist.py b/rpython/rlib/test/test_rweaklist.py --- a/rpython/rlib/test/test_rweaklist.py +++ b/rpython/rlib/test/test_rweaklist.py @@ -1,5 +1,20 @@ import gc -from rpython.rlib.rweaklist import RWeakListMixin +from rpython.rlib.rweaklist import RWeakListMixin, _reduced_value as reduced_value + + +def test_reduced_value(): + assert reduced_value(0) == 0 + assert reduced_value(1) == 0 + assert reduced_value(2) == 1 + assert reduced_value(3) == 0 + assert reduced_value(4) == 2 + assert reduced_value(5) == 1 + assert reduced_value(6) == 3 + assert reduced_value(7) == 0 + assert reduced_value(8) == 4 + assert reduced_value(9) == 2 + assert reduced_value(10) == 5 + assert reduced_value(11) == 1 class A(object): diff --git a/rpython/rtyper/llannotation.py b/rpython/rtyper/llannotation.py --- a/rpython/rtyper/llannotation.py +++ b/rpython/rtyper/llannotation.py @@ -1,6 +1,7 @@ """ Code for annotating low-level thingies. """ +from types import MethodType from rpython.tool.pairtype import pair, pairtype from rpython.annotator.model import ( SomeObject, SomeSingleFloat, SomeFloat, SomeLongFloat, SomeChar, @@ -108,6 +109,54 @@ def can_be_none(self): return False + def getattr(self, s_attr): + from rpython.annotator.bookkeeper import getbookkeeper + if not s_attr.is_constant(): + raise AnnotatorError("getattr on ptr %r with non-constant " + "field-name" % self.ll_ptrtype) + example = self.ll_ptrtype._example() + try: + v = example._lookup_adtmeth(s_attr.const) + except AttributeError: + v = getattr(example, s_attr.const) + return ll_to_annotation(v) + else: + if isinstance(v, MethodType): + ll_ptrtype = lltype.typeOf(v.im_self) + assert isinstance(ll_ptrtype, (lltype.Ptr, lltype.InteriorPtr)) + return SomeLLADTMeth(ll_ptrtype, v.im_func) + return getbookkeeper().immutablevalue(v) + getattr.can_only_throw = [] + + def len(self): + from rpython.annotator.bookkeeper import getbookkeeper + length = self.ll_ptrtype._example()._fixedlength() + if length is None: + return SomeObject.len(self) + else: + return getbookkeeper().immutablevalue(length) + + def setattr(self, s_attr, s_value): # just doing checking + if not s_attr.is_constant(): + raise AnnotatorError("setattr on ptr %r with non-constant " + "field-name" % self.ll_ptrtype) + example = self.ll_ptrtype._example() + if getattr(example, s_attr.const) is not None: # ignore Void s_value + v_lltype = annotation_to_lltype(s_value) + setattr(example, s_attr.const, v_lltype._defl()) + + def call(self, args): + args_s, kwds_s = args.unpack() + if kwds_s: + raise Exception("keyword arguments to call to a low-level fn ptr") + info = 'argument to ll function pointer call' + llargs = [annotation_to_lltype(s_arg, info)._defl() for s_arg in args_s] + v = self.ll_ptrtype._example()(*llargs) + return ll_to_annotation(v) + + def bool(self): + return s_Bool + class SomeInteriorPtr(SomePtr): def __init__(self, ll_ptrtype): @@ -125,6 +174,13 @@ def can_be_none(self): return False + def call(self, args): + from rpython.annotator.bookkeeper import getbookkeeper + bookkeeper = getbookkeeper() + s_func = bookkeeper.immutablevalue(self.func) + return s_func.call(args.prepend(lltype_to_annotation(self.ll_ptrtype))) + + class __extend__(pairtype(SomePtr, SomePtr)): def union((p1, p2)): if p1.ll_ptrtype != p2.ll_ptrtype: diff --git a/rpython/translator/c/test/test_standalone.py b/rpython/translator/c/test/test_standalone.py --- a/rpython/translator/c/test/test_standalone.py +++ b/rpython/translator/c/test/test_standalone.py @@ -16,6 +16,20 @@ from rpython.conftest import cdir from rpython.conftest import option +def setup_module(module): + if os.name == 'nt': + # Do not open dreaded dialog box on segfault + import ctypes + SEM_NOGPFAULTERRORBOX = 0x0002 # From MSDN + old_err_mode = ctypes.windll.kernel32.GetErrorMode() + new_err_mode = old_err_mode | SEM_NOGPFAULTERRORBOX + ctypes.windll.kernel32.SetErrorMode(new_err_mode) + module.old_err_mode = old_err_mode + +def teardown_module(module): + if os.name == 'nt': + import ctypes + ctypes.windll.kernel32.SetErrorMode(module.old_err_mode) class StandaloneTests(object): config = None From noreply at buildbot.pypy.org Mon Feb 3 06:43:29 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Mon, 3 Feb 2014 06:43:29 +0100 (CET) Subject: [pypy-commit] pypy py3k: sync with upstream rpython Message-ID: <20140203054329.7DD241C087E@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r69053:9d1738eabe37 Date: 2014-02-02 21:38 -0800 http://bitbucket.org/pypy/pypy/changeset/9d1738eabe37/ Log: sync with upstream rpython diff --git a/rpython/rlib/rarithmetic.py b/rpython/rlib/rarithmetic.py --- a/rpython/rlib/rarithmetic.py +++ b/rpython/rlib/rarithmetic.py @@ -684,7 +684,6 @@ # String parsing support # --------------------------- - at objectmodel.enforceargs(unicode, None) def string_to_int(s, base=10): """Utility to converts a string to an integer. If base is 0, the proper base is guessed based on the leading @@ -694,7 +693,7 @@ from rpython.rlib.rstring import ( NumberStringParser, ParseStringOverflowError, strip_spaces) s = literal = strip_spaces(s) - p = NumberStringParser(s, literal, base, u'int') + p = NumberStringParser(s, literal, base, 'int') base = p.base result = 0 while True: diff --git a/rpython/rlib/rbigint.py b/rpython/rlib/rbigint.py --- a/rpython/rlib/rbigint.py +++ b/rpython/rlib/rbigint.py @@ -4,7 +4,7 @@ from rpython.rlib.rfloat import isinf, isnan from rpython.rlib.rstring import StringBuilder from rpython.rlib.debug import make_sure_not_resized, check_regular_int -from rpython.rlib.objectmodel import we_are_translated, specialize, enforceargs +from rpython.rlib.objectmodel import we_are_translated, specialize from rpython.rlib import jit from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rtyper import extregistry @@ -254,8 +254,7 @@ @staticmethod @jit.elidable - @enforceargs(unicode, None, None, None) - def fromstr(s, base=0, ignore_l_suffix=False, fname=u'long'): + def fromstr(s, base=0, ignore_l_suffix=False, fname='long'): """As string_to_int(), but optionally ignores an optional 'l' or 'L' suffix and returns an rbigint. """ diff --git a/rpython/rlib/rfloat.py b/rpython/rlib/rfloat.py --- a/rpython/rlib/rfloat.py +++ b/rpython/rlib/rfloat.py @@ -41,22 +41,7 @@ if not s: raise ParseStringError(INVALID_MSG) - try: - ascii_s = s.encode('ascii') - except UnicodeEncodeError: - # if s is not ASCII, it certainly is not a float literal (because the - # unicode-decimal to ascii-decimal conversion already happened - # earlier). We just set ascii_s to something which will fail when - # passed to rstring_to_float, to keep the code as similar as possible - # to the one we have on default. - # - # Note that CPython does something different and it encodes the string - # to UTF-8 before trying to parse it. We cannot since .encode('utf-8') - # is not RPython. However, it doesn't change anything since the UTF-8 - # encoded string would make rstring_to_float to fail anyway. - ascii_s = "not a float" - - low = ascii_s.lower() + low = s.lower() if low == "-inf" or low == "-infinity": return -INFINITY elif low == "inf" or low == "+inf": @@ -69,7 +54,7 @@ return -NAN try: - return rstring_to_float(ascii_s) + return rstring_to_float(s) except ValueError: raise ParseStringError(INVALID_MSG) diff --git a/rpython/rlib/rstring.py b/rpython/rlib/rstring.py --- a/rpython/rlib/rstring.py +++ b/rpython/rlib/rstring.py @@ -6,12 +6,11 @@ SomeInteger, SomeUnicodeCodePoint, SomeUnicodeString, SomePBC) from rpython.rtyper.llannotation import SomePtr from rpython.rlib import jit -from rpython.rlib.objectmodel import newlist_hint, specialize, enforceargs +from rpython.rlib.objectmodel import newlist_hint, specialize from rpython.rlib.rarithmetic import ovfcheck from rpython.rlib.unicodedata import unicodedb_5_2_0 as unicodedb from rpython.rtyper.extregistry import ExtRegistryEntry from rpython.tool.pairtype import pairtype -from rpython.tool.sourcetools import with_unicode_literals # -------------- public API for string functions ----------------------- @@ -264,8 +263,6 @@ # -------------- numeric parsing support -------------------- - at enforceargs(unicode) - at with_unicode_literals def strip_spaces(s): # XXX this is not locale-dependent p = 0 @@ -278,7 +275,6 @@ return s[p:q] class ParseStringError(Exception): - @enforceargs(None, unicode) def __init__(self, msg): self.msg = msg @@ -296,8 +292,6 @@ raise ParseStringError("invalid literal for %s() with base %d" % (self.fname, self.original_base)) - @enforceargs(None, unicode, unicode, int, unicode) - @with_unicode_literals def __init__(self, s, literal, base, fname): self.fname = fname sign = 1 @@ -337,7 +331,6 @@ def rewind(self): self.i = 0 - @with_unicode_literals def next_digit(self): # -1 => exhausted if self.i < self.n: c = self.s[self.i] diff --git a/rpython/rlib/test/test_rarithmetic.py b/rpython/rlib/test/test_rarithmetic.py --- a/rpython/rlib/test/test_rarithmetic.py +++ b/rpython/rlib/test/test_rarithmetic.py @@ -413,49 +413,49 @@ class TestStringToInt: def test_string_to_int(self): - cases = [(u'0', 0), - (u'1', 1), - (u'9', 9), - (u'10', 10), - (u'09', 9), - (u'0000101', 101), # not octal unless base 0 or 8 - (u'5123', 5123), - (u' 0', 0), - (u'0 ', 0), - (u' \t \n 32313 \f \v \r \n\r ', 32313), - (u'+12', 12), - (u'-5', -5), - (u'- 5', -5), - (u'+ 5', 5), - (u' -123456789 ', -123456789), + cases = [('0', 0), + ('1', 1), + ('9', 9), + ('10', 10), + ('09', 9), + ('0000101', 101), # not octal unless base 0 or 8 + ('5123', 5123), + (' 0', 0), + ('0 ', 0), + (' \t \n 32313 \f \v \r \n\r ', 32313), + ('+12', 12), + ('-5', -5), + ('- 5', -5), + ('+ 5', 5), + (' -123456789 ', -123456789), ] for s, expected in cases: assert string_to_int(s) == expected #assert string_to_bigint(s).tolong() == expected def test_string_to_int_base(self): - cases = [(u'111', 2, 7), - (u'010', 2, 2), - (u'102', 3, 11), - (u'103', 4, 19), - (u'107', 8, 71), - (u'109', 10, 109), - (u'10A', 11, 131), - (u'10a', 11, 131), - (u'10f', 16, 271), - (u'10F', 16, 271), - (u'0x10f', 16, 271), - (u'0x10F', 16, 271), - (u'10z', 36, 1331), - (u'10Z', 36, 1331), - (u'12', 0, 12), - (u'015', 0, 13), - (u'0x10', 0, 16), - (u'0XE', 0, 14), - (u'0', 0, 0), - (u'0b11', 2, 3), - (u'0B10', 2, 2), - (u'0o77', 8, 63), + cases = [('111', 2, 7), + ('010', 2, 2), + ('102', 3, 11), + ('103', 4, 19), + ('107', 8, 71), + ('109', 10, 109), + ('10A', 11, 131), + ('10a', 11, 131), + ('10f', 16, 271), + ('10F', 16, 271), + ('0x10f', 16, 271), + ('0x10F', 16, 271), + ('10z', 36, 1331), + ('10Z', 36, 1331), + ('12', 0, 12), + ('015', 0, 13), + ('0x10', 0, 16), + ('0XE', 0, 14), + ('0', 0, 0), + ('0b11', 2, 3), + ('0B10', 2, 2), + ('0o77', 8, 63), ] for s, base, expected in cases: assert string_to_int(s, base) == expected @@ -466,21 +466,21 @@ assert string_to_int('-'+s+' ', base) == -expected def test_string_to_int_error(self): - cases = [u'0x123', # must use base 0 or 16 - u' 0X12 ', - u'0b01', - u'0o01', - u'', - u'++12', - u'+-12', - u'-+12', - u'--12', - u'12a6', - u'12A6', - u'f', - u'Z', - u'.', - u'@', + cases = ['0x123', # must use base 0 or 16 + ' 0X12 ', + '0b01', + '0o01', + '', + '++12', + '+-12', + '-+12', + '--12', + '12a6', + '12A6', + 'f', + 'Z', + '.', + '@', ] for s in cases: py.test.raises(ParseStringError, string_to_int, s) @@ -488,39 +488,39 @@ py.test.raises(ParseStringError, string_to_int, s+' ') py.test.raises(ParseStringError, string_to_int, '+'+s) py.test.raises(ParseStringError, string_to_int, '-'+s) - py.test.raises(ParseStringError, string_to_int, u'0x', 16) - py.test.raises(ParseStringError, string_to_int, u'-0x', 16) + py.test.raises(ParseStringError, string_to_int, '0x', 16) + py.test.raises(ParseStringError, string_to_int, '-0x', 16) - exc = py.test.raises(ParseStringError, string_to_int, u'') + exc = py.test.raises(ParseStringError, string_to_int, '') assert exc.value.msg == "invalid literal for int() with base 10: ''" - exc = py.test.raises(ParseStringError, string_to_int, u'', 0) + exc = py.test.raises(ParseStringError, string_to_int, '', 0) assert exc.value.msg == "invalid literal for int() with base 0: ''" def test_string_to_int_overflow(self): import sys py.test.raises(ParseStringOverflowError, string_to_int, - unicode(sys.maxint*17)) + str(sys.maxint*17)) def test_string_to_int_not_overflow(self): import sys for x in [-sys.maxint-1, sys.maxint]: - y = string_to_int(unicode(x)) + y = string_to_int(str(x)) assert y == x def test_string_to_int_base_error(self): - cases = [(u'1', 1), - (u'1', 37), - (u'a', 0), - (u'9', 9), - (u'0x123', 7), - (u'145cdf', 15), - (u'12', 37), - (u'12', 98172), - (u'12', -1), - (u'12', -908), - (u'12.3', 10), - (u'12.3', 13), - (u'12.3', 16), + cases = [('1', 1), + ('1', 37), + ('a', 0), + ('9', 9), + ('0x123', 7), + ('145cdf', 15), + ('12', 37), + ('12', 98172), + ('12', -1), + ('12', -908), + ('12.3', 10), + ('12.3', 13), + ('12.3', 16), ] for s, base in cases: py.test.raises(ParseStringError, string_to_int, s, base) diff --git a/rpython/rlib/test/test_rbigint.py b/rpython/rlib/test/test_rbigint.py --- a/rpython/rlib/test/test_rbigint.py +++ b/rpython/rlib/test/test_rbigint.py @@ -212,24 +212,24 @@ def test_fromstr(self): from rpython.rlib.rstring import ParseStringError - assert rbigint.fromstr(u'123L').tolong() == 123 - assert rbigint.fromstr(u'123L ').tolong() == 123 - py.test.raises(ParseStringError, rbigint.fromstr, u'123L ', + assert rbigint.fromstr('123L').tolong() == 123 + assert rbigint.fromstr('123L ').tolong() == 123 + py.test.raises(ParseStringError, rbigint.fromstr, '123L ', ignore_l_suffix=True) - py.test.raises(ParseStringError, rbigint.fromstr, u'L') - py.test.raises(ParseStringError, rbigint.fromstr, u'L ') - e = py.test.raises(ParseStringError, rbigint.fromstr, u'L ', - fname=u'int') - assert u'int()' in e.value.msg - assert rbigint.fromstr(u'123L', 4).tolong() == 27 - assert rbigint.fromstr(u'123L', 30).tolong() == 27000 + 1800 + 90 + 21 - assert rbigint.fromstr(u'123L', 22).tolong() == 10648 + 968 + 66 + 21 - assert rbigint.fromstr(u'123L', 21).tolong() == 441 + 42 + 3 - assert rbigint.fromstr(u'1891234174197319').tolong() == 1891234174197319 + py.test.raises(ParseStringError, rbigint.fromstr, 'L') + py.test.raises(ParseStringError, rbigint.fromstr, 'L ') + e = py.test.raises(ParseStringError, rbigint.fromstr, 'L ', + fname='int') + assert 'int()' in e.value.msg + assert rbigint.fromstr('123L', 4).tolong() == 27 + assert rbigint.fromstr('123L', 30).tolong() == 27000 + 1800 + 90 + 21 + assert rbigint.fromstr('123L', 22).tolong() == 10648 + 968 + 66 + 21 + assert rbigint.fromstr('123L', 21).tolong() == 441 + 42 + 3 + assert rbigint.fromstr('1891234174197319').tolong() == 1891234174197319 def test_from_numberstring_parser(self): from rpython.rlib.rstring import NumberStringParser - parser = NumberStringParser(u"1231231241", u"1231231241", 10, u"long") + parser = NumberStringParser("1231231241", "1231231241", 10, "long") assert rbigint._from_numberstring_parser(parser).tolong() == 1231231241 def test_add(self): diff --git a/rpython/rlib/test/test_rfloat.py b/rpython/rlib/test/test_rfloat.py --- a/rpython/rlib/test/test_rfloat.py +++ b/rpython/rlib/test/test_rfloat.py @@ -217,19 +217,19 @@ def test_string_to_float(): from rpython.rlib.rstring import ParseStringError import random - assert string_to_float(u'0') == 0.0 - assert string_to_float(u'1') == 1.0 - assert string_to_float(u'-1.5') == -1.5 - assert string_to_float(u'1.5E2') == 150.0 - assert string_to_float(u'2.5E-1') == 0.25 - assert string_to_float(u'1e1111111111111') == float('1e1111111111111') - assert string_to_float(u'1e-1111111111111') == float('1e-1111111111111') - assert string_to_float(u'-1e1111111111111') == float('-1e1111111111111') - assert string_to_float(u'-1e-1111111111111') == float('-1e-1111111111111') - assert string_to_float(u'1e111111111111111111111') == float('1e111111111111111111111') - assert string_to_float(u'1e-111111111111111111111') == float('1e-111111111111111111111') - assert string_to_float(u'-1e111111111111111111111') == float('-1e111111111111111111111') - assert string_to_float(u'-1e-111111111111111111111') == float('-1e-111111111111111111111') + assert string_to_float('0') == 0.0 + assert string_to_float('1') == 1.0 + assert string_to_float('-1.5') == -1.5 + assert string_to_float('1.5E2') == 150.0 + assert string_to_float('2.5E-1') == 0.25 + assert string_to_float('1e1111111111111') == float('1e1111111111111') + assert string_to_float('1e-1111111111111') == float('1e-1111111111111') + assert string_to_float('-1e1111111111111') == float('-1e1111111111111') + assert string_to_float('-1e-1111111111111') == float('-1e-1111111111111') + assert string_to_float('1e111111111111111111111') == float('1e111111111111111111111') + assert string_to_float('1e-111111111111111111111') == float('1e-111111111111111111111') + assert string_to_float('-1e111111111111111111111') == float('-1e111111111111111111111') + assert string_to_float('-1e-111111111111111111111') == float('-1e-111111111111111111111') valid_parts = [['', ' ', ' \f\n\r\t\v'], ['', '+', '-'], @@ -251,7 +251,7 @@ for part2 in valid_parts[2]: for part3 in valid_parts[3]: for part4 in valid_parts[4]: - s = unicode(part0+part1+part2+part3+part4) + s = part0+part1+part2+part3+part4 assert (abs(string_to_float(s) - float(s)) <= 1E-13 * abs(float(s))) @@ -260,8 +260,8 @@ for i in range(20): parts = [random.choice(lst) for lst in valid_parts] parts[j] = invalid - s = u''.join(parts) + s = ''.join(parts) print repr(s) if s.strip(): # empty s raises OperationError directly py.test.raises(ParseStringError, string_to_float, s) - py.test.raises(ParseStringError, string_to_float, u"") + py.test.raises(ParseStringError, string_to_float, "") From noreply at buildbot.pypy.org Mon Feb 3 06:43:30 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Mon, 3 Feb 2014 06:43:30 +0100 (CET) Subject: [pypy-commit] pypy py3k: adapt to string based api Message-ID: <20140203054330.AD0F41C087E@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r69054:05d1dbaef6bd Date: 2014-02-02 21:39 -0800 http://bitbucket.org/pypy/pypy/changeset/05d1dbaef6bd/ Log: adapt to string based api diff --git a/pypy/module/_csv/interp_reader.py b/pypy/module/_csv/interp_reader.py --- a/pypy/module/_csv/interp_reader.py +++ b/pypy/module/_csv/interp_reader.py @@ -42,15 +42,8 @@ space = self.space field = field_builder.build() if self.numeric_field: - from rpython.rlib.rstring import ParseStringError - from rpython.rlib.rfloat import string_to_float self.numeric_field = False - try: - ff = string_to_float(field) - except ParseStringError as e: - from pypy.objspace.std.inttype import wrap_parsestringerror - raise wrap_parsestringerror(space, e, space.wrap(field)) - w_obj = space.wrap(ff) + w_obj = space.call_function(space.w_float, space.wrap(field)) else: w_obj = space.wrap(field) self.fields_w.append(w_obj) diff --git a/pypy/objspace/std/complextype.py b/pypy/objspace/std/complextype.py --- a/pypy/objspace/std/complextype.py +++ b/pypy/objspace/std/complextype.py @@ -1,4 +1,3 @@ -from rpython.tool.sourcetools import with_unicode_literals from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault from pypy.interpreter.error import OperationError, operationerrfmt from pypy.objspace.std.register_all import register_all @@ -19,7 +18,6 @@ register_all(vars(),globals()) - at with_unicode_literals def _split_complex(s): slen = len(s) if slen == 0: diff --git a/pypy/objspace/std/inttype.py b/pypy/objspace/std/inttype.py --- a/pypy/objspace/std/inttype.py +++ b/pypy/objspace/std/inttype.py @@ -94,8 +94,8 @@ if isinstance(e, InvalidBaseError): w_msg = space.wrap(e.msg) else: - w_msg = space.wrap('%s: %s' % (e.msg, - space.str_w(space.repr(w_source)))) + w_msg = space.wrap(u'%s: %s' % (unicode(e.msg), + space.unicode_w(space.repr(w_source)))) return OperationError(space.w_ValueError, w_msg) ## @unwrap_spec(w_x = WrappedDefault(0)) diff --git a/pypy/objspace/std/longtype.py b/pypy/objspace/std/longtype.py --- a/pypy/objspace/std/longtype.py +++ b/pypy/objspace/std/longtype.py @@ -67,8 +67,7 @@ s = unicode_to_decimal_w(space, w_value) else: try: - strval = space.bufferstr_w(w_value) - s = strval.decode('latin-1') + s = space.bufferstr_w(w_value) except OperationError: raise OperationError(space.w_TypeError, space.wrap("int() can't convert non-string " @@ -86,7 +85,7 @@ def string_to_w_long(space, w_longtype, w_source, string, base=10): try: bigint = rbigint.fromstr(string, base, ignore_l_suffix=True, - fname=u'int') + fname='int') except ParseStringError as e: from pypy.objspace.std.inttype import wrap_parsestringerror raise wrap_parsestringerror(space, e, w_source) diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py --- a/pypy/objspace/std/unicodeobject.py +++ b/pypy/objspace/std/unicodeobject.py @@ -1144,9 +1144,7 @@ # # In CPython3 the call to PyUnicode_EncodeDecimal has been replaced to a call # to PyUnicode_TransformDecimalToASCII, which is much simpler. Here, we do the -# equivalent. -# -# Note that, differently than default, we return an *unicode* RPython string +# equivalent plus the final step of encoding the result to utf-8. def unicode_to_decimal_w(space, w_unistr): if not isinstance(w_unistr, W_UnicodeObject): raise operationerrfmt(space.w_TypeError, "expected unicode, got '%T'", @@ -1164,7 +1162,7 @@ except KeyError: pass result[i] = unichr(uchr) - return u''.join(result) + return unicodehelper.encode_utf8(space, u''.join(result)) _repr_function, _ = make_unicode_escape_function( From noreply at buildbot.pypy.org Mon Feb 3 06:43:31 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Mon, 3 Feb 2014 06:43:31 +0100 (CET) Subject: [pypy-commit] pypy py3k: 2to3 Message-ID: <20140203054331.D8E1F1C087E@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r69055:274c5ad9d1ee Date: 2014-02-02 21:41 -0800 http://bitbucket.org/pypy/pypy/changeset/274c5ad9d1ee/ Log: 2to3 diff --git a/pypy/objspace/std/test/test_intobject.py b/pypy/objspace/std/test/test_intobject.py --- a/pypy/objspace/std/test/test_intobject.py +++ b/pypy/objspace/std/test/test_intobject.py @@ -497,7 +497,7 @@ import sys if '__pypy__' not in sys.builtin_module_names: skip('PyPy 2.x/CPython 3.4 only') - for value in b' 1j ', u' 1٢٣٤j ': + for value in b' 1j ', ' 1٢٣٤j ': try: int(value) except ValueError as e: From noreply at buildbot.pypy.org Mon Feb 3 06:43:33 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Mon, 3 Feb 2014 06:43:33 +0100 (CET) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20140203054333.152741C087E@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r69056:5b5f0da64f64 Date: 2014-02-02 21:42 -0800 http://bitbucket.org/pypy/pypy/changeset/5b5f0da64f64/ Log: merge default diff --git a/rpython/rlib/test/test_rarithmetic.py b/rpython/rlib/test/test_rarithmetic.py --- a/rpython/rlib/test/test_rarithmetic.py +++ b/rpython/rlib/test/test_rarithmetic.py @@ -492,9 +492,9 @@ py.test.raises(ParseStringError, string_to_int, '-0x', 16) exc = py.test.raises(ParseStringError, string_to_int, '') - assert exc.value.msg == "invalid literal for int() with base 10: ''" + assert exc.value.msg == "invalid literal for int() with base 10" exc = py.test.raises(ParseStringError, string_to_int, '', 0) - assert exc.value.msg == "invalid literal for int() with base 0: ''" + assert exc.value.msg == "invalid literal for int() with base 0" def test_string_to_int_overflow(self): import sys diff --git a/rpython/rtyper/test/test_llannotation.py b/rpython/rtyper/test/test_llannotation.py new file mode 100644 --- /dev/null +++ b/rpython/rtyper/test/test_llannotation.py @@ -0,0 +1,89 @@ +import py.test +from rpython.annotator.model import ( + SomeInteger, SomeBool, SomeChar, unionof, SomeImpossibleValue, + UnionError, SomeInstance, SomeSingleFloat) +from rpython.rlib.rarithmetic import r_uint, r_singlefloat +from rpython.rtyper.llannotation import ( + SomePtr, annotation_to_lltype, ll_to_annotation) +from rpython.rtyper.typesystem import lltype +import rpython.rtyper.rtyper # make sure to import the world + +class C(object): + pass + +class DummyClassDef: + def __init__(self, cls=C): + self.cls = cls + self.name = cls.__name__ + +def test_ll_to_annotation(): + s_z = ll_to_annotation(lltype.Signed._defl()) + s_s = SomeInteger() + s_u = SomeInteger(nonneg=True, unsigned=True) + assert s_z.contains(s_s) + assert not s_z.contains(s_u) + s_uz = ll_to_annotation(lltype.Unsigned._defl()) + assert s_uz.contains(s_u) + assert ll_to_annotation(lltype.Bool._defl()).contains(SomeBool()) + assert ll_to_annotation(lltype.Char._defl()).contains(SomeChar()) + S = lltype.GcStruct('s') + A = lltype.GcArray() + s_p = ll_to_annotation(lltype.malloc(S)) + assert isinstance(s_p, SomePtr) and s_p.ll_ptrtype == lltype.Ptr(S) + s_p = ll_to_annotation(lltype.malloc(A, 0)) + assert isinstance(s_p, SomePtr) and s_p.ll_ptrtype == lltype.Ptr(A) + +def test_annotation_to_lltype(): + s_i = SomeInteger() + s_pos = SomeInteger(nonneg=True) + s_1 = SomeInteger(nonneg=True) + s_1.const = 1 + s_m1 = SomeInteger(nonneg=False) + s_m1.const = -1 + s_u = SomeInteger(nonneg=True, unsigned=True) + s_u1 = SomeInteger(nonneg=True, unsigned=True) + s_u1.const = r_uint(1) + assert annotation_to_lltype(s_i) == lltype.Signed + assert annotation_to_lltype(s_pos) == lltype.Signed + assert annotation_to_lltype(s_1) == lltype.Signed + assert annotation_to_lltype(s_m1) == lltype.Signed + assert annotation_to_lltype(s_u) == lltype.Unsigned + assert annotation_to_lltype(s_u1) == lltype.Unsigned + assert annotation_to_lltype(SomeBool()) == lltype.Bool + assert annotation_to_lltype(SomeChar()) == lltype.Char + PS = lltype.Ptr(lltype.GcStruct('s')) + s_p = SomePtr(ll_ptrtype=PS) + assert annotation_to_lltype(s_p) == PS + si0 = SomeInstance(DummyClassDef(), True) + with py.test.raises(ValueError): + annotation_to_lltype(si0) + s_singlefloat = SomeSingleFloat() + s_singlefloat.const = r_singlefloat(0.0) + assert annotation_to_lltype(s_singlefloat) == lltype.SingleFloat + +def test_ll_union(): + PS1 = lltype.Ptr(lltype.GcStruct('s')) + PS2 = lltype.Ptr(lltype.GcStruct('s')) + PS3 = lltype.Ptr(lltype.GcStruct('s3')) + PA1 = lltype.Ptr(lltype.GcArray()) + PA2 = lltype.Ptr(lltype.GcArray()) + + assert unionof(SomePtr(PS1), SomePtr(PS1)) == SomePtr(PS1) + assert unionof(SomePtr(PS1), SomePtr(PS2)) == SomePtr(PS2) + assert unionof(SomePtr(PS1), SomePtr(PS2)) == SomePtr(PS1) + + assert unionof(SomePtr(PA1), SomePtr(PA1)) == SomePtr(PA1) + assert unionof(SomePtr(PA1), SomePtr(PA2)) == SomePtr(PA2) + assert unionof(SomePtr(PA1), SomePtr(PA2)) == SomePtr(PA1) + + assert unionof(SomePtr(PS1), SomeImpossibleValue()) == SomePtr(PS1) + assert unionof(SomeImpossibleValue(), SomePtr(PS1)) == SomePtr(PS1) + + with py.test.raises(UnionError): + unionof(SomePtr(PA1), SomePtr(PS1)) + with py.test.raises(UnionError): + unionof(SomePtr(PS1), SomePtr(PS3)) + with py.test.raises(UnionError): + unionof(SomePtr(PS1), SomeInteger()) + with py.test.raises(UnionError): + unionof(SomeInteger(), SomePtr(PS1)) From noreply at buildbot.pypy.org Mon Feb 3 11:37:48 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 3 Feb 2014 11:37:48 +0100 (CET) Subject: [pypy-commit] pypy default: Fix for 307818c61207 in test_pypy_c Message-ID: <20140203103748.182E81D2530@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69057:0e0d08198110 Date: 2014-02-03 11:37 +0100 http://bitbucket.org/pypy/pypy/changeset/0e0d08198110/ Log: Fix for 307818c61207 in test_pypy_c diff --git a/pypy/module/pypyjit/test_pypy_c/test_string.py b/pypy/module/pypyjit/test_pypy_c/test_string.py --- a/pypy/module/pypyjit/test_pypy_c/test_string.py +++ b/pypy/module/pypyjit/test_pypy_c/test_string.py @@ -80,7 +80,7 @@ i23 = strgetitem(p10, i19) p25 = newstr(1) strsetitem(p25, 0, i23) - p93 = call(ConstClass(fromstr), p25, 16, descr=) + p93 = call(ConstClass(fromstr2), p25, 16, descr=) guard_no_exception(descr=...) i94 = call(ConstClass(rbigint.toint), p93, descr=) guard_no_exception(descr=...) diff --git a/pypy/objspace/std/longtype.py b/pypy/objspace/std/longtype.py --- a/pypy/objspace/std/longtype.py +++ b/pypy/objspace/std/longtype.py @@ -78,7 +78,7 @@ def string_to_w_long(space, w_longtype, w_source, string, base=10): try: - bigint = rbigint.fromstr(string, base) + bigint = rbigint.fromstr2(string, base) except ParseStringError as e: from pypy.objspace.std.inttype import wrap_parsestringerror raise wrap_parsestringerror(space, e, w_source) diff --git a/rpython/rlib/rbigint.py b/rpython/rlib/rbigint.py --- a/rpython/rlib/rbigint.py +++ b/rpython/rlib/rbigint.py @@ -269,6 +269,13 @@ return rbigint._from_numberstring_parser(parser) @staticmethod + @jit.elidable + def fromstr2(s, base=0): + """A sub-version of fromstr(), already elidable to be JIT-called + with only two arguments.""" + return rbigint.fromstr(s, base) + + @staticmethod def _from_numberstring_parser(parser): return parse_digit_string(parser) diff --git a/rpython/rlib/test/test_rbigint.py b/rpython/rlib/test/test_rbigint.py --- a/rpython/rlib/test/test_rbigint.py +++ b/rpython/rlib/test/test_rbigint.py @@ -226,6 +226,7 @@ assert rbigint.fromstr('123L', 22).tolong() == 10648 + 968 + 66 + 21 assert rbigint.fromstr('123L', 21).tolong() == 441 + 42 + 3 assert rbigint.fromstr('1891234174197319').tolong() == 1891234174197319 + assert rbigint.fromstr2('123L', 4).tolong() == 27 def test_from_numberstring_parser(self): from rpython.rlib.rstring import NumberStringParser From noreply at buildbot.pypy.org Mon Feb 3 11:45:47 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Mon, 3 Feb 2014 11:45:47 +0100 (CET) Subject: [pypy-commit] pypy default: support the scroll wheel (and the fany all-direction scrolling) Message-ID: <20140203104547.8E3F21D257B@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: Changeset: r69058:b23c7d2fbe5b Date: 2014-02-03 11:45 +0100 http://bitbucket.org/pypy/pypy/changeset/b23c7d2fbe5b/ Log: support the scroll wheel (and the fany all-direction scrolling) diff --git a/dotviewer/graphdisplay.py b/dotviewer/graphdisplay.py --- a/dotviewer/graphdisplay.py +++ b/dotviewer/graphdisplay.py @@ -613,6 +613,19 @@ def process_MouseButtonUp(self, event): self.dragging = None pygame.event.set_grab(False) + # handle directional scrolling + if event.button == 4: + self.pan((0, -1)) + return + if event.button == 5: + self.pan((0, 1)) + return + if event.button == 6: + self.pan((-1, 0)) + return + if event.button == 7: + self.pan((1, 0)) + return if self.click_time is not None and abs(time.time() - self.click_time) < 1: # click (no significant dragging) self.notifyclick(self.click_origin) From noreply at buildbot.pypy.org Mon Feb 3 11:49:19 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Mon, 3 Feb 2014 11:49:19 +0100 (CET) Subject: [pypy-commit] pypy default: also add this to the help message Message-ID: <20140203104919.4B02D1D257B@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: Changeset: r69059:534a5450015b Date: 2014-02-03 11:48 +0100 http://bitbucket.org/pypy/pypy/changeset/534a5450015b/ Log: also add this to the help message diff --git a/dotviewer/graphdisplay.py b/dotviewer/graphdisplay.py --- a/dotviewer/graphdisplay.py +++ b/dotviewer/graphdisplay.py @@ -136,6 +136,7 @@ Click on objects to move around Drag with the left mouse button to zoom in/out Drag with the right mouse button to scroll + Use scroll wheel do scroll up or down """.replace('\n ', '\n').strip() # poor man's dedent From noreply at buildbot.pypy.org Mon Feb 3 22:32:35 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Mon, 3 Feb 2014 22:32:35 +0100 (CET) Subject: [pypy-commit] pypy default: less duplication Message-ID: <20140203213235.8A3701D24AC@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r69060:bb2c16ce58d9 Date: 2014-02-03 13:05 -0800 http://bitbucket.org/pypy/pypy/changeset/bb2c16ce58d9/ Log: less duplication diff --git a/pypy/interpreter/astcompiler/ast.py b/pypy/interpreter/astcompiler/ast.py --- a/pypy/interpreter/astcompiler/ast.py +++ b/pypy/interpreter/astcompiler/ast.py @@ -8,6 +8,12 @@ from rpython.tool.sourcetools import func_with_new_name +def raise_attriberr(space, w_obj, name): + raise operationerrfmt(space.w_AttributeError, + "'%T' object has no attribute '%s'", + w_obj, name) + + def check_string(space, w_obj): if not (space.isinstance_w(w_obj, space.w_str) or space.isinstance_w(w_obj, space.w_unicode)): @@ -2793,7 +2799,7 @@ def Module_get_body(space, w_self): if not w_self.initialization_state & 1: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -2834,7 +2840,7 @@ def Interactive_get_body(space, w_self): if not w_self.initialization_state & 1: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -2879,7 +2885,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 1: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') return space.wrap(w_self.body) def Expression_set_body(space, w_self, w_new_value): @@ -2922,7 +2928,7 @@ def Suite_get_body(space, w_self): if not w_self.initialization_state & 1: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -2967,7 +2973,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 1: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'lineno') + raise_attriberr(space, w_self, 'lineno') return space.wrap(w_self.lineno) def stmt_set_lineno(space, w_self, w_new_value): @@ -2988,7 +2994,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 2: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'col_offset') + raise_attriberr(space, w_self, 'col_offset') return space.wrap(w_self.col_offset) def stmt_set_col_offset(space, w_self, w_new_value): @@ -3018,7 +3024,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'name') + raise_attriberr(space, w_self, 'name') return space.wrap(w_self.name) def FunctionDef_set_name(space, w_self, w_new_value): @@ -3039,7 +3045,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'args') + raise_attriberr(space, w_self, 'args') return space.wrap(w_self.args) def FunctionDef_set_args(space, w_self, w_new_value): @@ -3056,7 +3062,7 @@ def FunctionDef_get_body(space, w_self): if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -3072,7 +3078,7 @@ def FunctionDef_get_decorator_list(space, w_self): if not w_self.initialization_state & 32: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'decorator_list') + raise_attriberr(space, w_self, 'decorator_list') if w_self.w_decorator_list is None: if w_self.decorator_list is None: list_w = [] @@ -3121,7 +3127,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'name') + raise_attriberr(space, w_self, 'name') return space.wrap(w_self.name) def ClassDef_set_name(space, w_self, w_new_value): @@ -3138,7 +3144,7 @@ def ClassDef_get_bases(space, w_self): if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'bases') + raise_attriberr(space, w_self, 'bases') if w_self.w_bases is None: if w_self.bases is None: list_w = [] @@ -3154,7 +3160,7 @@ def ClassDef_get_body(space, w_self): if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -3170,7 +3176,7 @@ def ClassDef_get_decorator_list(space, w_self): if not w_self.initialization_state & 32: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'decorator_list') + raise_attriberr(space, w_self, 'decorator_list') if w_self.w_decorator_list is None: if w_self.decorator_list is None: list_w = [] @@ -3220,7 +3226,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') + raise_attriberr(space, w_self, 'value') return space.wrap(w_self.value) def Return_set_value(space, w_self, w_new_value): @@ -3263,7 +3269,7 @@ def Delete_get_targets(space, w_self): if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'targets') + raise_attriberr(space, w_self, 'targets') if w_self.w_targets is None: if w_self.targets is None: list_w = [] @@ -3304,7 +3310,7 @@ def Assign_get_targets(space, w_self): if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'targets') + raise_attriberr(space, w_self, 'targets') if w_self.w_targets is None: if w_self.targets is None: list_w = [] @@ -3324,7 +3330,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') + raise_attriberr(space, w_self, 'value') return space.wrap(w_self.value) def Assign_set_value(space, w_self, w_new_value): @@ -3373,7 +3379,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'target') + raise_attriberr(space, w_self, 'target') return space.wrap(w_self.target) def AugAssign_set_target(space, w_self, w_new_value): @@ -3396,7 +3402,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'op') + raise_attriberr(space, w_self, 'op') return operator_to_class[w_self.op - 1]() def AugAssign_set_op(space, w_self, w_new_value): @@ -3419,7 +3425,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') + raise_attriberr(space, w_self, 'value') return space.wrap(w_self.value) def AugAssign_set_value(space, w_self, w_new_value): @@ -3468,7 +3474,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'dest') + raise_attriberr(space, w_self, 'dest') return space.wrap(w_self.dest) def Print_set_dest(space, w_self, w_new_value): @@ -3487,7 +3493,7 @@ def Print_get_values(space, w_self): if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'values') + raise_attriberr(space, w_self, 'values') if w_self.w_values is None: if w_self.values is None: list_w = [] @@ -3507,7 +3513,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'nl') + raise_attriberr(space, w_self, 'nl') return space.wrap(w_self.nl) def Print_set_nl(space, w_self, w_new_value): @@ -3555,7 +3561,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'target') + raise_attriberr(space, w_self, 'target') return space.wrap(w_self.target) def For_set_target(space, w_self, w_new_value): @@ -3578,7 +3584,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'iter') + raise_attriberr(space, w_self, 'iter') return space.wrap(w_self.iter) def For_set_iter(space, w_self, w_new_value): @@ -3597,7 +3603,7 @@ def For_get_body(space, w_self): if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -3613,7 +3619,7 @@ def For_get_orelse(space, w_self): if not w_self.initialization_state & 32: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'orelse') + raise_attriberr(space, w_self, 'orelse') if w_self.w_orelse is None: if w_self.orelse is None: list_w = [] @@ -3662,7 +3668,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'test') + raise_attriberr(space, w_self, 'test') return space.wrap(w_self.test) def While_set_test(space, w_self, w_new_value): @@ -3681,7 +3687,7 @@ def While_get_body(space, w_self): if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -3697,7 +3703,7 @@ def While_get_orelse(space, w_self): if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'orelse') + raise_attriberr(space, w_self, 'orelse') if w_self.w_orelse is None: if w_self.orelse is None: list_w = [] @@ -3745,7 +3751,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'test') + raise_attriberr(space, w_self, 'test') return space.wrap(w_self.test) def If_set_test(space, w_self, w_new_value): @@ -3764,7 +3770,7 @@ def If_get_body(space, w_self): if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -3780,7 +3786,7 @@ def If_get_orelse(space, w_self): if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'orelse') + raise_attriberr(space, w_self, 'orelse') if w_self.w_orelse is None: if w_self.orelse is None: list_w = [] @@ -3828,7 +3834,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'context_expr') + raise_attriberr(space, w_self, 'context_expr') return space.wrap(w_self.context_expr) def With_set_context_expr(space, w_self, w_new_value): @@ -3851,7 +3857,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'optional_vars') + raise_attriberr(space, w_self, 'optional_vars') return space.wrap(w_self.optional_vars) def With_set_optional_vars(space, w_self, w_new_value): @@ -3870,7 +3876,7 @@ def With_get_body(space, w_self): if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -3917,7 +3923,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'type') + raise_attriberr(space, w_self, 'type') return space.wrap(w_self.type) def Raise_set_type(space, w_self, w_new_value): @@ -3940,7 +3946,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'inst') + raise_attriberr(space, w_self, 'inst') return space.wrap(w_self.inst) def Raise_set_inst(space, w_self, w_new_value): @@ -3963,7 +3969,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'tback') + raise_attriberr(space, w_self, 'tback') return space.wrap(w_self.tback) def Raise_set_tback(space, w_self, w_new_value): @@ -4008,7 +4014,7 @@ def TryExcept_get_body(space, w_self): if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -4024,7 +4030,7 @@ def TryExcept_get_handlers(space, w_self): if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'handlers') + raise_attriberr(space, w_self, 'handlers') if w_self.w_handlers is None: if w_self.handlers is None: list_w = [] @@ -4040,7 +4046,7 @@ def TryExcept_get_orelse(space, w_self): if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'orelse') + raise_attriberr(space, w_self, 'orelse') if w_self.w_orelse is None: if w_self.orelse is None: list_w = [] @@ -4085,7 +4091,7 @@ def TryFinally_get_body(space, w_self): if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -4101,7 +4107,7 @@ def TryFinally_get_finalbody(space, w_self): if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'finalbody') + raise_attriberr(space, w_self, 'finalbody') if w_self.w_finalbody is None: if w_self.finalbody is None: list_w = [] @@ -4148,7 +4154,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'test') + raise_attriberr(space, w_self, 'test') return space.wrap(w_self.test) def Assert_set_test(space, w_self, w_new_value): @@ -4171,7 +4177,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'msg') + raise_attriberr(space, w_self, 'msg') return space.wrap(w_self.msg) def Assert_set_msg(space, w_self, w_new_value): @@ -4215,7 +4221,7 @@ def Import_get_names(space, w_self): if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'names') + raise_attriberr(space, w_self, 'names') if w_self.w_names is None: if w_self.names is None: list_w = [] @@ -4260,7 +4266,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'module') + raise_attriberr(space, w_self, 'module') return space.wrap(w_self.module) def ImportFrom_set_module(space, w_self, w_new_value): @@ -4280,7 +4286,7 @@ def ImportFrom_get_names(space, w_self): if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'names') + raise_attriberr(space, w_self, 'names') if w_self.w_names is None: if w_self.names is None: list_w = [] @@ -4300,7 +4306,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'level') + raise_attriberr(space, w_self, 'level') return space.wrap(w_self.level) def ImportFrom_set_level(space, w_self, w_new_value): @@ -4348,7 +4354,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') return space.wrap(w_self.body) def Exec_set_body(space, w_self, w_new_value): @@ -4371,7 +4377,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'globals') + raise_attriberr(space, w_self, 'globals') return space.wrap(w_self.globals) def Exec_set_globals(space, w_self, w_new_value): @@ -4394,7 +4400,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'locals') + raise_attriberr(space, w_self, 'locals') return space.wrap(w_self.locals) def Exec_set_locals(space, w_self, w_new_value): @@ -4439,7 +4445,7 @@ def Global_get_names(space, w_self): if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'names') + raise_attriberr(space, w_self, 'names') if w_self.w_names is None: if w_self.names is None: list_w = [] @@ -4484,7 +4490,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') + raise_attriberr(space, w_self, 'value') return space.wrap(w_self.value) def Expr_set_value(space, w_self, w_new_value): @@ -4582,7 +4588,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 1: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'lineno') + raise_attriberr(space, w_self, 'lineno') return space.wrap(w_self.lineno) def expr_set_lineno(space, w_self, w_new_value): @@ -4603,7 +4609,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 2: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'col_offset') + raise_attriberr(space, w_self, 'col_offset') return space.wrap(w_self.col_offset) def expr_set_col_offset(space, w_self, w_new_value): @@ -4633,7 +4639,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'op') + raise_attriberr(space, w_self, 'op') return boolop_to_class[w_self.op - 1]() def BoolOp_set_op(space, w_self, w_new_value): @@ -4652,7 +4658,7 @@ def BoolOp_get_values(space, w_self): if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'values') + raise_attriberr(space, w_self, 'values') if w_self.w_values is None: if w_self.values is None: list_w = [] @@ -4698,7 +4704,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'left') + raise_attriberr(space, w_self, 'left') return space.wrap(w_self.left) def BinOp_set_left(space, w_self, w_new_value): @@ -4721,7 +4727,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'op') + raise_attriberr(space, w_self, 'op') return operator_to_class[w_self.op - 1]() def BinOp_set_op(space, w_self, w_new_value): @@ -4744,7 +4750,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'right') + raise_attriberr(space, w_self, 'right') return space.wrap(w_self.right) def BinOp_set_right(space, w_self, w_new_value): @@ -4793,7 +4799,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'op') + raise_attriberr(space, w_self, 'op') return unaryop_to_class[w_self.op - 1]() def UnaryOp_set_op(space, w_self, w_new_value): @@ -4816,7 +4822,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'operand') + raise_attriberr(space, w_self, 'operand') return space.wrap(w_self.operand) def UnaryOp_set_operand(space, w_self, w_new_value): @@ -4864,7 +4870,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'args') + raise_attriberr(space, w_self, 'args') return space.wrap(w_self.args) def Lambda_set_args(space, w_self, w_new_value): @@ -4885,7 +4891,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') return space.wrap(w_self.body) def Lambda_set_body(space, w_self, w_new_value): @@ -4933,7 +4939,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'test') + raise_attriberr(space, w_self, 'test') return space.wrap(w_self.test) def IfExp_set_test(space, w_self, w_new_value): @@ -4956,7 +4962,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') return space.wrap(w_self.body) def IfExp_set_body(space, w_self, w_new_value): @@ -4979,7 +4985,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'orelse') + raise_attriberr(space, w_self, 'orelse') return space.wrap(w_self.orelse) def IfExp_set_orelse(space, w_self, w_new_value): @@ -5024,7 +5030,7 @@ def Dict_get_keys(space, w_self): if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'keys') + raise_attriberr(space, w_self, 'keys') if w_self.w_keys is None: if w_self.keys is None: list_w = [] @@ -5040,7 +5046,7 @@ def Dict_get_values(space, w_self): if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'values') + raise_attriberr(space, w_self, 'values') if w_self.w_values is None: if w_self.values is None: list_w = [] @@ -5083,7 +5089,7 @@ def Set_get_elts(space, w_self): if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'elts') + raise_attriberr(space, w_self, 'elts') if w_self.w_elts is None: if w_self.elts is None: list_w = [] @@ -5128,7 +5134,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'elt') + raise_attriberr(space, w_self, 'elt') return space.wrap(w_self.elt) def ListComp_set_elt(space, w_self, w_new_value): @@ -5147,7 +5153,7 @@ def ListComp_get_generators(space, w_self): if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'generators') + raise_attriberr(space, w_self, 'generators') if w_self.w_generators is None: if w_self.generators is None: list_w = [] @@ -5193,7 +5199,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'elt') + raise_attriberr(space, w_self, 'elt') return space.wrap(w_self.elt) def SetComp_set_elt(space, w_self, w_new_value): @@ -5212,7 +5218,7 @@ def SetComp_get_generators(space, w_self): if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'generators') + raise_attriberr(space, w_self, 'generators') if w_self.w_generators is None: if w_self.generators is None: list_w = [] @@ -5258,7 +5264,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'key') + raise_attriberr(space, w_self, 'key') return space.wrap(w_self.key) def DictComp_set_key(space, w_self, w_new_value): @@ -5281,7 +5287,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') + raise_attriberr(space, w_self, 'value') return space.wrap(w_self.value) def DictComp_set_value(space, w_self, w_new_value): @@ -5300,7 +5306,7 @@ def DictComp_get_generators(space, w_self): if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'generators') + raise_attriberr(space, w_self, 'generators') if w_self.w_generators is None: if w_self.generators is None: list_w = [] @@ -5347,7 +5353,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'elt') + raise_attriberr(space, w_self, 'elt') return space.wrap(w_self.elt) def GeneratorExp_set_elt(space, w_self, w_new_value): @@ -5366,7 +5372,7 @@ def GeneratorExp_get_generators(space, w_self): if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'generators') + raise_attriberr(space, w_self, 'generators') if w_self.w_generators is None: if w_self.generators is None: list_w = [] @@ -5412,7 +5418,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') + raise_attriberr(space, w_self, 'value') return space.wrap(w_self.value) def Yield_set_value(space, w_self, w_new_value): @@ -5459,7 +5465,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'left') + raise_attriberr(space, w_self, 'left') return space.wrap(w_self.left) def Compare_set_left(space, w_self, w_new_value): @@ -5478,7 +5484,7 @@ def Compare_get_ops(space, w_self): if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'ops') + raise_attriberr(space, w_self, 'ops') if w_self.w_ops is None: if w_self.ops is None: list_w = [] @@ -5494,7 +5500,7 @@ def Compare_get_comparators(space, w_self): if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'comparators') + raise_attriberr(space, w_self, 'comparators') if w_self.w_comparators is None: if w_self.comparators is None: list_w = [] @@ -5542,7 +5548,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'func') + raise_attriberr(space, w_self, 'func') return space.wrap(w_self.func) def Call_set_func(space, w_self, w_new_value): @@ -5561,7 +5567,7 @@ def Call_get_args(space, w_self): if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'args') + raise_attriberr(space, w_self, 'args') if w_self.w_args is None: if w_self.args is None: list_w = [] @@ -5577,7 +5583,7 @@ def Call_get_keywords(space, w_self): if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'keywords') + raise_attriberr(space, w_self, 'keywords') if w_self.w_keywords is None: if w_self.keywords is None: list_w = [] @@ -5597,7 +5603,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 32: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'starargs') + raise_attriberr(space, w_self, 'starargs') return space.wrap(w_self.starargs) def Call_set_starargs(space, w_self, w_new_value): @@ -5620,7 +5626,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 64: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'kwargs') + raise_attriberr(space, w_self, 'kwargs') return space.wrap(w_self.kwargs) def Call_set_kwargs(space, w_self, w_new_value): @@ -5673,7 +5679,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') + raise_attriberr(space, w_self, 'value') return space.wrap(w_self.value) def Repr_set_value(space, w_self, w_new_value): @@ -5720,7 +5726,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'n') + raise_attriberr(space, w_self, 'n') return w_self.n def Num_set_n(space, w_self, w_new_value): @@ -5765,7 +5771,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 's') + raise_attriberr(space, w_self, 's') return w_self.s def Str_set_s(space, w_self, w_new_value): @@ -5810,7 +5816,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') + raise_attriberr(space, w_self, 'value') return space.wrap(w_self.value) def Attribute_set_value(space, w_self, w_new_value): @@ -5833,7 +5839,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'attr') + raise_attriberr(space, w_self, 'attr') return space.wrap(w_self.attr) def Attribute_set_attr(space, w_self, w_new_value): @@ -5854,7 +5860,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'ctx') + raise_attriberr(space, w_self, 'ctx') return expr_context_to_class[w_self.ctx - 1]() def Attribute_set_ctx(space, w_self, w_new_value): @@ -5903,7 +5909,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') + raise_attriberr(space, w_self, 'value') return space.wrap(w_self.value) def Subscript_set_value(space, w_self, w_new_value): @@ -5926,7 +5932,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'slice') + raise_attriberr(space, w_self, 'slice') return space.wrap(w_self.slice) def Subscript_set_slice(space, w_self, w_new_value): @@ -5949,7 +5955,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'ctx') + raise_attriberr(space, w_self, 'ctx') return expr_context_to_class[w_self.ctx - 1]() def Subscript_set_ctx(space, w_self, w_new_value): @@ -5998,7 +6004,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'id') + raise_attriberr(space, w_self, 'id') return space.wrap(w_self.id) def Name_set_id(space, w_self, w_new_value): @@ -6019,7 +6025,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'ctx') + raise_attriberr(space, w_self, 'ctx') return expr_context_to_class[w_self.ctx - 1]() def Name_set_ctx(space, w_self, w_new_value): @@ -6063,7 +6069,7 @@ def List_get_elts(space, w_self): if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'elts') + raise_attriberr(space, w_self, 'elts') if w_self.w_elts is None: if w_self.elts is None: list_w = [] @@ -6083,7 +6089,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'ctx') + raise_attriberr(space, w_self, 'ctx') return expr_context_to_class[w_self.ctx - 1]() def List_set_ctx(space, w_self, w_new_value): @@ -6128,7 +6134,7 @@ def Tuple_get_elts(space, w_self): if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'elts') + raise_attriberr(space, w_self, 'elts') if w_self.w_elts is None: if w_self.elts is None: list_w = [] @@ -6148,7 +6154,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'ctx') + raise_attriberr(space, w_self, 'ctx') return expr_context_to_class[w_self.ctx - 1]() def Tuple_set_ctx(space, w_self, w_new_value): @@ -6197,7 +6203,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') + raise_attriberr(space, w_self, 'value') return w_self.value def Const_set_value(space, w_self, w_new_value): @@ -6315,7 +6321,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 1: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'lower') + raise_attriberr(space, w_self, 'lower') return space.wrap(w_self.lower) def Slice_set_lower(space, w_self, w_new_value): @@ -6338,7 +6344,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 2: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'upper') + raise_attriberr(space, w_self, 'upper') return space.wrap(w_self.upper) def Slice_set_upper(space, w_self, w_new_value): @@ -6361,7 +6367,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'step') + raise_attriberr(space, w_self, 'step') return space.wrap(w_self.step) def Slice_set_step(space, w_self, w_new_value): @@ -6406,7 +6412,7 @@ def ExtSlice_get_dims(space, w_self): if not w_self.initialization_state & 1: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'dims') + raise_attriberr(space, w_self, 'dims') if w_self.w_dims is None: if w_self.dims is None: list_w = [] @@ -6451,7 +6457,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 1: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') + raise_attriberr(space, w_self, 'value') return space.wrap(w_self.value) def Index_set_value(space, w_self, w_new_value): @@ -6722,7 +6728,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 1: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'target') + raise_attriberr(space, w_self, 'target') return space.wrap(w_self.target) def comprehension_set_target(space, w_self, w_new_value): @@ -6745,7 +6751,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 2: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'iter') + raise_attriberr(space, w_self, 'iter') return space.wrap(w_self.iter) def comprehension_set_iter(space, w_self, w_new_value): @@ -6764,7 +6770,7 @@ def comprehension_get_ifs(space, w_self): if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'ifs') + raise_attriberr(space, w_self, 'ifs') if w_self.w_ifs is None: if w_self.ifs is None: list_w = [] @@ -6811,7 +6817,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 1: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'lineno') + raise_attriberr(space, w_self, 'lineno') return space.wrap(w_self.lineno) def excepthandler_set_lineno(space, w_self, w_new_value): @@ -6832,7 +6838,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 2: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'col_offset') + raise_attriberr(space, w_self, 'col_offset') return space.wrap(w_self.col_offset) def excepthandler_set_col_offset(space, w_self, w_new_value): @@ -6862,7 +6868,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'type') + raise_attriberr(space, w_self, 'type') return space.wrap(w_self.type) def ExceptHandler_set_type(space, w_self, w_new_value): @@ -6885,7 +6891,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'name') + raise_attriberr(space, w_self, 'name') return space.wrap(w_self.name) def ExceptHandler_set_name(space, w_self, w_new_value): @@ -6904,7 +6910,7 @@ def ExceptHandler_get_body(space, w_self): if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -6947,7 +6953,7 @@ def arguments_get_args(space, w_self): if not w_self.initialization_state & 1: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'args') + raise_attriberr(space, w_self, 'args') if w_self.w_args is None: if w_self.args is None: list_w = [] @@ -6967,7 +6973,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 2: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'vararg') + raise_attriberr(space, w_self, 'vararg') return space.wrap(w_self.vararg) def arguments_set_vararg(space, w_self, w_new_value): @@ -6991,7 +6997,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'kwarg') + raise_attriberr(space, w_self, 'kwarg') return space.wrap(w_self.kwarg) def arguments_set_kwarg(space, w_self, w_new_value): @@ -7011,7 +7017,7 @@ def arguments_get_defaults(space, w_self): if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'defaults') + raise_attriberr(space, w_self, 'defaults') if w_self.w_defaults is None: if w_self.defaults is None: list_w = [] @@ -7060,7 +7066,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 1: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'arg') + raise_attriberr(space, w_self, 'arg') return space.wrap(w_self.arg) def keyword_set_arg(space, w_self, w_new_value): @@ -7081,7 +7087,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 2: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') + raise_attriberr(space, w_self, 'value') return space.wrap(w_self.value) def keyword_set_value(space, w_self, w_new_value): @@ -7129,7 +7135,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 1: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'name') + raise_attriberr(space, w_self, 'name') return space.wrap(w_self.name) def alias_set_name(space, w_self, w_new_value): @@ -7150,7 +7156,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 2: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'asname') + raise_attriberr(space, w_self, 'asname') return space.wrap(w_self.asname) def alias_set_asname(space, w_self, w_new_value): diff --git a/pypy/interpreter/astcompiler/tools/asdl_py.py b/pypy/interpreter/astcompiler/tools/asdl_py.py --- a/pypy/interpreter/astcompiler/tools/asdl_py.py +++ b/pypy/interpreter/astcompiler/tools/asdl_py.py @@ -409,8 +409,7 @@ self.emit(" if w_obj is not None:", 1) self.emit(" return w_obj", 1) self.emit("if not w_self.initialization_state & %s:" % (flag,), 1) - self.emit("raise operationerrfmt(space.w_AttributeError, \"'%%T' object has no attribute '%%s'\", w_self, '%s')" % - (field.name,), 2) + self.emit("raise_attriberr(space, w_self, '%s')" % (field.name,), 2) if field.seq: self.emit("if w_self.w_%s is None:" % (field.name,), 1) self.emit("if w_self.%s is None:" % (field.name,), 2) @@ -546,6 +545,12 @@ from rpython.tool.sourcetools import func_with_new_name +def raise_attriberr(space, w_obj, name): + raise operationerrfmt(space.w_AttributeError, + \"'%T' object has no attribute '%s'\", + w_obj, name) + + def check_string(space, w_obj): if not (space.isinstance_w(w_obj, space.w_str) or space.isinstance_w(w_obj, space.w_unicode)): From noreply at buildbot.pypy.org Mon Feb 3 22:32:37 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Mon, 3 Feb 2014 22:32:37 +0100 (CET) Subject: [pypy-commit] pypy default: shorten operationerrfmt -> oefmt to ease its use all over the place Message-ID: <20140203213237.2A62A1D24AC@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r69061:030972171657 Date: 2014-02-03 13:11 -0800 http://bitbucket.org/pypy/pypy/changeset/030972171657/ Log: shorten operationerrfmt -> oefmt to ease its use all over the place diff too long, truncating to 2000 out of 4552 lines diff --git a/pypy/interpreter/argument.py b/pypy/interpreter/argument.py --- a/pypy/interpreter/argument.py +++ b/pypy/interpreter/argument.py @@ -1,11 +1,11 @@ """ Arguments objects. """ - -from pypy.interpreter.error import OperationError, operationerrfmt from rpython.rlib.debug import make_sure_not_resized from rpython.rlib import jit +from pypy.interpreter.error import OperationError, oefmt + class Arguments(object): """ @@ -86,9 +86,9 @@ args_w = space.fixedview(w_stararg) except OperationError, e: if e.match(space, space.w_TypeError): - raise operationerrfmt( - space.w_TypeError, - "argument after * must be a sequence, not %T", w_stararg) + raise oefmt(space.w_TypeError, + "argument after * must be a sequence, not %T", + w_stararg) raise self.arguments_w = self.arguments_w + args_w @@ -113,10 +113,9 @@ w_keys = space.call_method(w_starstararg, "keys") except OperationError, e: if e.match(space, space.w_AttributeError): - raise operationerrfmt( - space.w_TypeError, - "argument after ** must be a mapping, not %T", - w_starstararg) + raise oefmt(space.w_TypeError, + "argument after ** must be a mapping, not %T", + w_starstararg) raise keys_w = space.unpackiterable(w_keys) keywords_w = [None] * len(keys_w) @@ -281,8 +280,7 @@ self._match_signature(w_firstarg, scope_w, signature, defaults_w, 0) except ArgErr, e: - raise operationerrfmt(self.space.w_TypeError, - "%s() %s", fnname, e.getmsg()) + raise oefmt(self.space.w_TypeError, "%s() %s", fnname, e.getmsg()) return signature.scope_length() def _parse(self, w_firstarg, signature, defaults_w, blindargs=0): @@ -304,8 +302,7 @@ try: return self._parse(w_firstarg, signature, defaults_w, blindargs) except ArgErr, e: - raise operationerrfmt(self.space.w_TypeError, - "%s() %s", fnname, e.getmsg()) + raise oefmt(self.space.w_TypeError, "%s() %s", fnname, e.getmsg()) @staticmethod def frompacked(space, w_args=None, w_kwds=None): @@ -344,10 +341,9 @@ for key in keywords: for otherkey in existingkeywords: if otherkey == key: - raise operationerrfmt(space.w_TypeError, - "got multiple values " - "for keyword argument " - "'%s'", key) + raise oefmt(space.w_TypeError, + "got multiple values for keyword argument '%s'", + key) def _do_combine_starstarargs_wrapped(space, keys_w, w_starstararg, keywords, keywords_w, existingkeywords): @@ -367,10 +363,9 @@ raise else: if existingkeywords and key in existingkeywords: - raise operationerrfmt(space.w_TypeError, - "got multiple values " - "for keyword argument " - "'%s'", key) + raise oefmt(space.w_TypeError, + "got multiple values for keyword argument '%s'", + key) keywords[i] = key keywords_w[i] = space.getitem(w_starstararg, w_key) i += 1 diff --git a/pypy/interpreter/astcompiler/ast.py b/pypy/interpreter/astcompiler/ast.py --- a/pypy/interpreter/astcompiler/ast.py +++ b/pypy/interpreter/astcompiler/ast.py @@ -1,17 +1,17 @@ # Generated by tools/asdl_py.py -from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter import typedef -from pypy.interpreter.gateway import interp2app -from pypy.interpreter.error import OperationError, operationerrfmt from rpython.rlib.unroll import unrolling_iterable from rpython.tool.pairtype import extendabletype from rpython.tool.sourcetools import func_with_new_name +from pypy.interpreter import typedef +from pypy.interpreter.baseobjspace import W_Root +from pypy.interpreter.error import OperationError, oefmt +from pypy.interpreter.gateway import interp2app + def raise_attriberr(space, w_obj, name): - raise operationerrfmt(space.w_AttributeError, - "'%T' object has no attribute '%s'", - w_obj, name) + raise oefmt(space.w_AttributeError, + "'%T' object has no attribute '%s'", w_obj, name) def check_string(space, w_obj): @@ -76,11 +76,13 @@ continue # field is optional w_obj = self.getdictvalue(space, missing) if w_obj is None: - err = "required field \"%s\" missing from %s" - raise operationerrfmt(space.w_TypeError, err, missing, host) + raise oefmt(space.w_TypeError, + "required field \"%s\" missing from %s", + missing, host) else: - err = "incorrect type for field \"%s\" in %s" - raise operationerrfmt(space.w_TypeError, err, missing, host) + raise oefmt(space.w_TypeError, + "incorrect type for field \"%s\" in %s", + missing, host) raise AssertionError("should not reach here") diff --git a/pypy/interpreter/astcompiler/tools/asdl_py.py b/pypy/interpreter/astcompiler/tools/asdl_py.py --- a/pypy/interpreter/astcompiler/tools/asdl_py.py +++ b/pypy/interpreter/astcompiler/tools/asdl_py.py @@ -536,19 +536,19 @@ HEAD = """# Generated by tools/asdl_py.py -from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter import typedef -from pypy.interpreter.gateway import interp2app -from pypy.interpreter.error import OperationError, operationerrfmt from rpython.rlib.unroll import unrolling_iterable from rpython.tool.pairtype import extendabletype from rpython.tool.sourcetools import func_with_new_name +from pypy.interpreter import typedef +from pypy.interpreter.baseobjspace import W_Root +from pypy.interpreter.error import OperationError, oefmt +from pypy.interpreter.gateway import interp2app + def raise_attriberr(space, w_obj, name): - raise operationerrfmt(space.w_AttributeError, - \"'%T' object has no attribute '%s'\", - w_obj, name) + raise oefmt(space.w_AttributeError, + \"'%T' object has no attribute '%s'\", w_obj, name) def check_string(space, w_obj): @@ -613,11 +613,13 @@ continue # field is optional w_obj = self.getdictvalue(space, missing) if w_obj is None: - err = "required field \\"%s\\" missing from %s" - raise operationerrfmt(space.w_TypeError, err, missing, host) + raise oefmt(space.w_TypeError, + "required field \\"%s\\" missing from %s", + missing, host) else: - err = "incorrect type for field \\"%s\\" in %s" - raise operationerrfmt(space.w_TypeError, err, missing, host) + raise oefmt(space.w_TypeError, + "incorrect type for field \\"%s\\" in %s", + missing, host) raise AssertionError("should not reach here") diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -11,8 +11,7 @@ from pypy.interpreter.executioncontext import (ExecutionContext, ActionFlag, UserDelAction) -from pypy.interpreter.error import (OperationError, operationerrfmt, - new_exception_class) +from pypy.interpreter.error import OperationError, new_exception_class, oefmt from pypy.interpreter.argument import Arguments from pypy.interpreter.miscutils import ThreadLocals @@ -61,9 +60,9 @@ return False def setdict(self, space, w_dict): - raise operationerrfmt(space.w_TypeError, - "attribute '__dict__' of %T objects " - "is not writable", self) + raise oefmt(space.w_TypeError, + "attribute '__dict__' of %T objects is not writable", + self) # to be used directly only by space.type implementations def getclass(self, space): @@ -123,8 +122,8 @@ classname = '?' else: classname = wrappable_class_name(RequiredClass) - msg = "'%s' object expected, got '%T' instead" - raise operationerrfmt(space.w_TypeError, msg, classname, self) + raise oefmt(space.w_TypeError, + "'%s' object expected, got '%T' instead", classname, self) # used by _weakref implemenation @@ -132,8 +131,8 @@ return None def setweakref(self, space, weakreflifeline): - raise operationerrfmt(space.w_TypeError, - "cannot create weak reference to '%T' object", self) + raise oefmt(space.w_TypeError, + "cannot create weak reference to '%T' object", self) def delweakref(self): pass @@ -215,25 +214,25 @@ self._typed_unwrap_error(space, "integer") def _typed_unwrap_error(self, space, expected): - raise operationerrfmt(space.w_TypeError, "expected %s, got %T object", - expected, self) + raise oefmt(space.w_TypeError, + "expected %s, got %T object", expected, self) def int(self, space): w_impl = space.lookup(self, '__int__') if w_impl is None: - raise operationerrfmt(space.w_TypeError, - "unsupported operand type for int(): '%T'", self) + raise oefmt(space.w_TypeError, + "unsupported operand type for int(): '%T'", self) w_result = space.get_and_call_function(w_impl, self) if (space.isinstance_w(w_result, space.w_int) or space.isinstance_w(w_result, space.w_long)): return w_result - msg = "__int__ returned non-int (type '%T')" - raise operationerrfmt(space.w_TypeError, msg, w_result) + raise oefmt(space.w_TypeError, + "__int__ returned non-int (type '%T')", w_result) def ord(self, space): - msg = "ord() expected string of length 1, but %T found" - raise operationerrfmt(space.w_TypeError, msg, self) + raise oefmt(space.w_TypeError, + "ord() expected string of length 1, but %T found", self) def __spacebind__(self, space): return self @@ -430,10 +429,9 @@ try: w_mod = self.builtin_modules[name] except KeyError: - raise operationerrfmt( - self.w_SystemError, - "getbuiltinmodule() called " - "with non-builtin module %s", name) + raise oefmt(self.w_SystemError, + "getbuiltinmodule() called with non-builtin module %s", + name) else: # Add the module to sys.modules self.setitem(w_modules, w_name, w_mod) @@ -753,9 +751,10 @@ if can_be_None and self.is_none(w_obj): return None if not isinstance(w_obj, RequiredClass): # or obj is None - msg = "'%s' object expected, got '%N' instead" - raise operationerrfmt(self.w_TypeError, msg, - wrappable_class_name(RequiredClass), w_obj.getclass(self)) + raise oefmt(self.w_TypeError, + "'%s' object expected, got '%N' instead", + wrappable_class_name(RequiredClass), + w_obj.getclass(self)) return w_obj interp_w._annspecialcase_ = 'specialize:arg(1)' @@ -832,13 +831,9 @@ items[idx] = w_item idx += 1 if idx < expected_length: - if idx == 1: - plural = "" - else: - plural = "s" - raise operationerrfmt(self.w_ValueError, - "need more than %d value%s to unpack", - idx, plural) + raise oefmt(self.w_ValueError, + "need more than %d value%s to unpack", + idx, "" if idx == 1 else "s") return items def unpackiterable_unroll(self, w_iterable, expected_length): @@ -1257,8 +1252,8 @@ except OperationError, err: if objdescr is None or not err.match(self, self.w_TypeError): raise - msg = "%s must be an integer, not %T" - raise operationerrfmt(self.w_TypeError, msg, objdescr, w_obj) + raise oefmt(self.w_TypeError, "%s must be an integer, not %T", + objdescr, w_obj) try: index = self.int_w(w_index) except OperationError, err: @@ -1271,9 +1266,9 @@ else: return sys.maxint else: - raise operationerrfmt( - w_exception, "cannot fit '%T' into an index-sized integer", - w_obj) + raise oefmt(w_exception, + "cannot fit '%T' into an index-sized integer", + w_obj) else: return index @@ -1517,9 +1512,9 @@ ) fd = self.int_w(w_fd) if fd < 0: - raise operationerrfmt(self.w_ValueError, - "file descriptor cannot be a negative integer (%d)", fd - ) + raise oefmt(self.w_ValueError, + "file descriptor cannot be a negative integer (%d)", + fd) return fd def warn(self, w_msg, w_warningcls, stacklevel=2): diff --git a/pypy/interpreter/error.py b/pypy/interpreter/error.py --- a/pypy/interpreter/error.py +++ b/pypy/interpreter/error.py @@ -226,9 +226,9 @@ def _exception_getclass(self, space, w_inst): w_type = space.exception_getclass(w_inst) if not space.exception_is_valid_class_w(w_type): - msg = ("exceptions must be old-style classes or derived " - "from BaseException, not %N") - raise operationerrfmt(space.w_TypeError, msg, w_type) + raise oefmt(space.w_TypeError, + "exceptions must be old-style classes or derived from " + "BaseException, not %N", w_type) return w_type def write_unraisable(self, space, where, w_object=None, @@ -383,15 +383,16 @@ self._w_value = w_value = space.wrap(self._value) return w_value -def get_operationerr_class(valuefmt): + at specialize.memo() +def get_operr_class(valuefmt): try: result = _fmtcache[valuefmt] except KeyError: result = _fmtcache[valuefmt] = get_operrcls2(valuefmt) return result -get_operationerr_class._annspecialcase_ = 'specialize:memo' -def operationerrfmt(w_type, valuefmt, *args): + at specialize.arg(1) +def oefmt(w_type, valuefmt, *args): """Equivalent to OperationError(w_type, space.wrap(valuefmt % args)). More efficient in the (common) case where the value is not actually needed. @@ -405,9 +406,8 @@ """ if not len(args): return OpErrFmtNoArgs(w_type, valuefmt) - OpErrFmt, strings = get_operationerr_class(valuefmt) + OpErrFmt, strings = get_operr_class(valuefmt) return OpErrFmt(w_type, strings, *args) -operationerrfmt._annspecialcase_ = 'specialize:arg(1)' # ____________________________________________________________ diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py --- a/pypy/interpreter/function.py +++ b/pypy/interpreter/function.py @@ -7,8 +7,8 @@ """ from rpython.rlib.unroll import unrolling_iterable -from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.baseobjspace import W_Root +from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.eval import Code from pypy.interpreter.argument import Arguments from rpython.rlib import jit @@ -413,9 +413,9 @@ if self.closure: closure_len = len(self.closure) if isinstance(code, PyCode) and closure_len != len(code.co_freevars): - raise operationerrfmt(space.w_ValueError, - "%N() requires a code object with %d free vars, not %d", - self, closure_len, len(code.co_freevars)) + raise oefmt(space.w_ValueError, + "%N() requires a code object with %d free vars, not " + "%d", self, closure_len, len(code.co_freevars)) self.fget_func_doc(space) # see test_issue1293 self.code = code @@ -495,10 +495,9 @@ instdescr = instname + " instance" else: instdescr = "instance" - msg = ("unbound method %N() must be called with %s " - "as first argument (got %s instead)") - raise operationerrfmt(space.w_TypeError, msg, - self, clsdescr, instdescr) + raise oefmt(space.w_TypeError, + "unbound method %N() must be called with %s as first " + "argument (got %s instead)", self, clsdescr, instdescr) return space.call_args(self.w_function, args) def descr_method_get(self, w_obj, w_cls=None): diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -12,7 +12,7 @@ from pypy.interpreter.argument import Arguments from pypy.interpreter.astcompiler import consts from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.error import OperationError, operationerrfmt +from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.executioncontext import ExecutionContext from pypy.interpreter.nestedscope import Cell from pypy.tool import stdlib_opcode @@ -622,8 +622,8 @@ line = self.pycode.co_firstlineno if new_lineno < line: - raise operationerrfmt(space.w_ValueError, - "line %d comes before the current code.", new_lineno) + raise oefmt(space.w_ValueError, + "line %d comes before the current code.", new_lineno) elif new_lineno == line: new_lasti = 0 else: @@ -639,8 +639,8 @@ break if new_lasti == -1: - raise operationerrfmt(space.w_ValueError, - "line %d comes after the current code.", new_lineno) + raise oefmt(space.w_ValueError, + "line %d comes after the current code.", new_lineno) # Don't jump to a line with an except in it. code = self.pycode.co_code @@ -687,9 +687,9 @@ assert len(blockstack) == 0 if new_lasti_setup_addr != f_lasti_setup_addr: - raise operationerrfmt(space.w_ValueError, - "can't jump into or out of a 'finally' block %d -> %d", - f_lasti_setup_addr, new_lasti_setup_addr) + raise oefmt(space.w_ValueError, + "can't jump into or out of a 'finally' block %d -> %d", + f_lasti_setup_addr, new_lasti_setup_addr) if new_lasti < self.last_instr: min_addr = new_lasti diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -14,7 +14,7 @@ gateway, function, eval, pyframe, pytraceback, pycode ) from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.error import OperationError, operationerrfmt +from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.nestedscope import Cell from pypy.interpreter.pycode import PyCode, BytecodeCorruption from pypy.tool.stdlib_opcode import bytecode_spec @@ -492,8 +492,9 @@ def _load_fast_failed(self, varindex): varname = self.getlocalvarname(varindex) - message = "local variable '%s' referenced before assignment" - raise operationerrfmt(self.space.w_UnboundLocalError, message, varname) + raise oefmt(self.space.w_UnboundLocalError, + "local variable '%s' referenced before assignment", + varname) _load_fast_failed._dont_inline_ = True def LOAD_CONST(self, constindex, next_instr): @@ -848,9 +849,8 @@ # catch KeyErrors and turn them into NameErrors if not e.match(self.space, self.space.w_KeyError): raise - message = "name '%s' is not defined" - raise operationerrfmt(self.space.w_NameError, message, - self.space.str_w(w_varname)) + raise oefmt(self.space.w_NameError, "name '%s' is not defined", + self.space.str_w(w_varname)) def UNPACK_SEQUENCE(self, itemcount, next_instr): w_iterable = self.popvalue() @@ -899,8 +899,8 @@ _load_global._always_inline_ = True def _load_global_failed(self, varname): - message = "global name '%s' is not defined" - raise operationerrfmt(self.space.w_NameError, message, varname) + raise oefmt(self.space.w_NameError, + "global name '%s' is not defined", varname) _load_global_failed._dont_inline_ = True def LOAD_GLOBAL(self, nameindex, next_instr): @@ -910,9 +910,9 @@ def DELETE_FAST(self, varindex, next_instr): if self.locals_stack_w[varindex] is None: varname = self.getlocalvarname(varindex) - message = "local variable '%s' referenced before assignment" - raise operationerrfmt(self.space.w_UnboundLocalError, message, - varname) + raise oefmt(self.space.w_UnboundLocalError, + "local variable '%s' referenced before assignment", + varname) self.locals_stack_w[varindex] = None def BUILD_TUPLE(self, itemcount, next_instr): @@ -1040,9 +1040,8 @@ except OperationError, e: if not e.match(self.space, self.space.w_AttributeError): raise - raise operationerrfmt(self.space.w_ImportError, - "cannot import name '%s'", - self.space.str_w(w_name)) + raise oefmt(self.space.w_ImportError, + "cannot import name '%s'", self.space.str_w(w_name)) self.pushvalue(w_obj) def YIELD_VALUE(self, oparg, next_instr): @@ -1127,9 +1126,9 @@ w_enter = self.space.lookup(w_manager, "__enter__") w_descr = self.space.lookup(w_manager, "__exit__") if w_enter is None or w_descr is None: - raise operationerrfmt(self.space.w_AttributeError, - "'%T' object is not a context manager" - " (no __enter__/__exit__ method)", w_manager) + raise oefmt(self.space.w_AttributeError, + "'%T' object is not a context manager (no __enter__/" + "__exit__ method)", w_manager) w_exit = self.space.get(w_descr, w_manager) self.settopvalue(w_exit) w_result = self.space.get_and_call_function(w_enter, w_manager) diff --git a/pypy/interpreter/test/test_error.py b/pypy/interpreter/test/test_error.py --- a/pypy/interpreter/test/test_error.py +++ b/pypy/interpreter/test/test_error.py @@ -1,7 +1,7 @@ import py, os, errno -from pypy.interpreter.error import OperationError, operationerrfmt -from pypy.interpreter.error import decompose_valuefmt, get_operrcls2 -from pypy.interpreter.error import wrap_oserror, new_exception_class +from pypy.interpreter.error import ( + OperationError, decompose_valuefmt, get_operrcls2, new_exception_class, + oefmt, wrap_oserror) def test_decompose_valuefmt(): @@ -22,59 +22,59 @@ assert cls2 is cls # caching assert strings2 == ("a ", " b ", " c") -def test_operationerrfmt(space): - operr = operationerrfmt("w_type", "abc %s def %d", "foo", 42) +def test_oefmt(space): + operr = oefmt("w_type", "abc %s def %d", "foo", 42) assert isinstance(operr, OperationError) assert operr.w_type == "w_type" assert operr._w_value is None assert operr._compute_value(space) == "abc foo def 42" - operr2 = operationerrfmt("w_type2", "a %s b %d c", "bar", 43) + operr2 = oefmt("w_type2", "a %s b %d c", "bar", 43) assert operr2.__class__ is operr.__class__ - operr3 = operationerrfmt("w_type2", "a %s b %s c", "bar", "4b") + operr3 = oefmt("w_type2", "a %s b %s c", "bar", "4b") assert operr3.__class__ is not operr.__class__ -def test_operationerrfmt_noargs(space): - operr = operationerrfmt(space.w_AttributeError, "no attribute 'foo'") +def test_oefmt_noargs(space): + operr = oefmt(space.w_AttributeError, "no attribute 'foo'") operr.normalize_exception(space) val = operr.get_w_value(space) assert space.isinstance_w(val, space.w_AttributeError) w_repr = space.repr(val) assert space.str_w(w_repr) == "AttributeError(\"no attribute 'foo'\",)" -def test_operationerrfmt_T(space): - operr = operationerrfmt(space.w_AttributeError, - "'%T' object has no attribute '%s'", - space.wrap('foo'), 'foo') +def test_oefmt_T(space): + operr = oefmt(space.w_AttributeError, + "'%T' object has no attribute '%s'", + space.wrap('foo'), 'foo') assert operr._compute_value(space) == "'str' object has no attribute 'foo'" - operr = operationerrfmt("w_type", - "'%T' object has no attribute '%s'", - space.wrap('foo'), 'foo') + operr = oefmt("w_type", + "'%T' object has no attribute '%s'", + space.wrap('foo'), 'foo') assert operr._compute_value(space) == "'str' object has no attribute 'foo'" -def test_operationerrfmt_N(space): - operr = operationerrfmt(space.w_AttributeError, - "'%N' object has no attribute '%s'", - space.type(space.wrap('foo')), 'foo') +def test_oefmt_N(space): + operr = oefmt(space.w_AttributeError, + "'%N' object has no attribute '%s'", + space.type(space.wrap('foo')), 'foo') assert operr._compute_value(space) == "'str' object has no attribute 'foo'" - operr = operationerrfmt("w_type", - "'%N' object has no attribute '%s'", - space.type(space.wrap('foo')), 'foo') + operr = oefmt("w_type", + "'%N' object has no attribute '%s'", + space.type(space.wrap('foo')), 'foo') assert operr._compute_value(space) == "'str' object has no attribute 'foo'" - operr = operationerrfmt(space.w_AttributeError, - "'%N' object has no attribute '%s'", - space.wrap('foo'), 'foo') + operr = oefmt(space.w_AttributeError, + "'%N' object has no attribute '%s'", + space.wrap('foo'), 'foo') assert operr._compute_value(space) == "'?' object has no attribute 'foo'" - operr = operationerrfmt("w_type", - "'%N' object has no attribute '%s'", - space.wrap('foo'), 'foo') + operr = oefmt("w_type", + "'%N' object has no attribute '%s'", + space.wrap('foo'), 'foo') assert operr._compute_value(space) == "'?' object has no attribute 'foo'" -def test_operationerrfmt_R(space): - operr = operationerrfmt(space.w_ValueError, "illegal newline value: %R", - space.wrap('foo')) +def test_oefmt_R(space): + operr = oefmt(space.w_ValueError, + "illegal newline value: %R", space.wrap('foo')) assert operr._compute_value(space) == "illegal newline value: 'foo'" - operr = operationerrfmt(space.w_ValueError, "illegal newline value: %R", - space.wrap("'PyLadies'")) + operr = oefmt(space.w_ValueError, "illegal newline value: %R", + space.wrap("'PyLadies'")) expected = "illegal newline value: \"'PyLadies'\"" assert operr._compute_value(space) == expected diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -2,7 +2,7 @@ from pypy.interpreter.argument import Arguments from pypy.interpreter.baseobjspace import W_Root, DescrMismatch -from pypy.interpreter.error import OperationError, operationerrfmt +from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import (interp2app, BuiltinCode, unwrap_spec, WrappedDefault) @@ -549,9 +549,9 @@ def typecheck(self, space, w_obj): if not space.isinstance_w(w_obj, self.w_cls): - m = "descriptor '%N' for '%N' objects doesn't apply to '%T' object" - raise operationerrfmt(space.w_TypeError, m, - self, self.w_cls, w_obj) + raise oefmt(space.w_TypeError, + "descriptor '%N' for '%N' objects doesn't apply to " + "'%T' object", self, self.w_cls, w_obj) def descr_member_get(self, space, w_obj, w_cls=None): """member.__get__(obj[, type]) -> value @@ -620,8 +620,9 @@ def descr_get_dict(space, w_obj): w_dict = w_obj.getdict(space) if w_dict is None: - msg = "descriptor '__dict__' doesn't apply to '%T' objects" - raise operationerrfmt(space.w_TypeError, msg, w_obj) + raise oefmt(space.w_TypeError, + "descriptor '__dict__' doesn't apply to '%T' objects", + w_obj) return w_dict def descr_set_dict(space, w_obj, w_dict): diff --git a/pypy/module/__builtin__/interp_classobj.py b/pypy/module/__builtin__/interp_classobj.py --- a/pypy/module/__builtin__/interp_classobj.py +++ b/pypy/module/__builtin__/interp_classobj.py @@ -1,5 +1,5 @@ import new -from pypy.interpreter.error import OperationError, operationerrfmt +from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import interp2app from pypy.interpreter.typedef import TypeDef, make_weakref_descr from pypy.interpreter.baseobjspace import W_Root @@ -10,8 +10,8 @@ def raise_type_err(space, argument, expected, w_obj): - raise operationerrfmt(space.w_TypeError, "argument %s must be %s, not %T", - argument, expected, w_obj) + raise oefmt(space.w_TypeError, + "argument %s must be %s, not %T", argument, expected, w_obj) def unwrap_attr(space, w_attr): try: @@ -126,10 +126,8 @@ return space.newtuple(self.bases_w) w_value = self.lookup(space, name) if w_value is None: - raise operationerrfmt( - space.w_AttributeError, - "class %s has no attribute '%s'", - self.name, name) + raise oefmt(space.w_AttributeError, + "class %s has no attribute '%s'", self.name, name) w_descr_get = space.lookup(w_value, '__get__') if w_descr_get is None: @@ -158,18 +156,15 @@ def descr_delattr(self, space, w_attr): name = unwrap_attr(space, w_attr) if name in ("__dict__", "__name__", "__bases__"): - raise operationerrfmt( - space.w_TypeError, - "cannot delete attribute '%s'", name) + raise oefmt(space.w_TypeError, + "cannot delete attribute '%s'", name) try: space.delitem(self.w_dict, w_attr) except OperationError, e: if not e.match(space, space.w_KeyError): raise - raise operationerrfmt( - space.w_AttributeError, - "class %s has no attribute '%s'", - self.name, name) + raise oefmt(space.w_AttributeError, + "class %s has no attribute '%s'", self.name, name) def descr_repr(self, space): mod = self.get_module_string(space) @@ -362,10 +357,9 @@ raise # not found at all if exc: - raise operationerrfmt( - space.w_AttributeError, - "%s instance has no attribute '%s'", - self.w_class.name, name) + raise oefmt(space.w_AttributeError, + "%s instance has no attribute '%s'", + self.w_class.name, name) else: return None @@ -416,10 +410,9 @@ space.call_function(w_meth, w_name) else: if not self.deldictvalue(space, name): - raise operationerrfmt( - space.w_AttributeError, - "%s instance has no attribute '%s'", - self.w_class.name, name) + raise oefmt(space.w_AttributeError, + "%s instance has no attribute '%s'", + self.w_class.name, name) def descr_repr(self, space): w_meth = self.getattr(space, '__repr__', False) diff --git a/pypy/module/__pypy__/interp_dict.py b/pypy/module/__pypy__/interp_dict.py --- a/pypy/module/__pypy__/interp_dict.py +++ b/pypy/module/__pypy__/interp_dict.py @@ -1,6 +1,6 @@ +from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import unwrap_spec -from pypy.interpreter.error import operationerrfmt, OperationError from pypy.objspace.std.dictmultiobject import W_DictMultiObject @unwrap_spec(type=str) @@ -30,8 +30,7 @@ elif type == 'strdict': return space.newdict(strdict=True) else: - raise operationerrfmt(space.w_TypeError, "unknown type of dict %s", - type) + raise oefmt(space.w_TypeError, "unknown type of dict %s", type) def dictstrategy(space, w_obj): """ dictstrategy(dict) diff --git a/pypy/module/_cffi_backend/cbuffer.py b/pypy/module/_cffi_backend/cbuffer.py --- a/pypy/module/_cffi_backend/cbuffer.py +++ b/pypy/module/_cffi_backend/cbuffer.py @@ -1,6 +1,6 @@ from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.buffer import RWBuffer -from pypy.interpreter.error import operationerrfmt +from pypy.interpreter.error import oefmt from pypy.interpreter.gateway import unwrap_spec, interp2app from pypy.interpreter.typedef import TypeDef, make_weakref_descr from pypy.module._cffi_backend import cdataobj, ctypeptr, ctypearray @@ -87,11 +87,9 @@ if size < 0: size = w_cdata._sizeof() else: - raise operationerrfmt(space.w_TypeError, - "expected a pointer or array cdata, got '%s'", - ctype.name) + raise oefmt(space.w_TypeError, + "expected a pointer or array cdata, got '%s'", ctype.name) if size < 0: - raise operationerrfmt(space.w_TypeError, - "don't know the size pointed to by '%s'", - ctype.name) + raise oefmt(space.w_TypeError, + "don't know the size pointed to by '%s'", ctype.name) return space.wrap(MiniBuffer(LLBuffer(w_cdata._cdata, size), w_cdata)) diff --git a/pypy/module/_cffi_backend/ccallback.py b/pypy/module/_cffi_backend/ccallback.py --- a/pypy/module/_cffi_backend/ccallback.py +++ b/pypy/module/_cffi_backend/ccallback.py @@ -7,7 +7,7 @@ from rpython.rlib.objectmodel import compute_unique_id, keepalive_until_here from rpython.rtyper.lltypesystem import lltype, rffi -from pypy.interpreter.error import OperationError, operationerrfmt +from pypy.interpreter.error import OperationError, oefmt from pypy.module._cffi_backend import cerrno, misc from pypy.module._cffi_backend.cdataobj import W_CData from pypy.module._cffi_backend.ctypefunc import SIZE_OF_FFI_ARG, BIG_ENDIAN, W_CTypeFunc @@ -26,9 +26,8 @@ W_CData.__init__(self, space, raw_closure, ctype) # if not space.is_true(space.callable(w_callable)): - raise operationerrfmt(space.w_TypeError, - "expected a callable object, not %T", - w_callable) + raise oefmt(space.w_TypeError, + "expected a callable object, not %T", w_callable) self.w_callable = w_callable # fresult = self.getfunctype().ctitem diff --git a/pypy/module/_cffi_backend/cdataobj.py b/pypy/module/_cffi_backend/cdataobj.py --- a/pypy/module/_cffi_backend/cdataobj.py +++ b/pypy/module/_cffi_backend/cdataobj.py @@ -1,7 +1,7 @@ import operator from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.error import OperationError, operationerrfmt +from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import interp2app from pypy.interpreter.typedef import TypeDef, make_weakref_descr @@ -78,9 +78,8 @@ space = self.space if isinstance(self.ctype, ctypearray.W_CTypeArray): return space.wrap(self.get_array_length()) - raise operationerrfmt(space.w_TypeError, - "cdata of type '%s' has no len()", - self.ctype.name) + raise oefmt(space.w_TypeError, + "cdata of type '%s' has no len()", self.ctype.name) def _make_comparison(name): op = getattr(operator, name) @@ -219,9 +218,9 @@ from rpython.rtyper.lltypesystem.rstr import copy_string_to_raw value = space.str_w(w_value) if len(value) != length: - raise operationerrfmt(space.w_ValueError, - "need a string of length %d, got %d", - length, len(value)) + raise oefmt(space.w_ValueError, + "need a string of length %d, got %d", + length, len(value)) copy_string_to_raw(llstr(value), cdata, 0, length) return # @@ -232,9 +231,8 @@ except OperationError, e: if not e.match(space, space.w_StopIteration): raise - raise operationerrfmt(space.w_ValueError, - "need %d values to unpack, got %d", - length, i) + raise oefmt(space.w_ValueError, + "need %d values to unpack, got %d", length, i) ctitem.convert_from_object(cdata, w_item) cdata = rffi.ptradd(cdata, ctitemsize) try: @@ -243,8 +241,8 @@ if not e.match(space, space.w_StopIteration): raise else: - raise operationerrfmt(space.w_ValueError, - "got more than %d values to unpack", length) + raise oefmt(space.w_ValueError, + "got more than %d values to unpack", length) def _add_or_sub(self, w_other, sign): space = self.space @@ -265,9 +263,9 @@ if (ct is not self.ctype or not isinstance(ct, ctypeptr.W_CTypePointer) or (ct.ctitem.size <= 0 and not ct.is_void_ptr)): - raise operationerrfmt(space.w_TypeError, - "cannot subtract cdata '%s' and cdata '%s'", - self.ctype.name, ct.name) + raise oefmt(space.w_TypeError, + "cannot subtract cdata '%s' and cdata '%s'", + self.ctype.name, ct.name) # itemsize = ct.ctitem.size if itemsize <= 0: itemsize = 1 diff --git a/pypy/module/_cffi_backend/ctypearray.py b/pypy/module/_cffi_backend/ctypearray.py --- a/pypy/module/_cffi_backend/ctypearray.py +++ b/pypy/module/_cffi_backend/ctypearray.py @@ -3,7 +3,7 @@ """ from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.error import OperationError, operationerrfmt +from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import interp2app from pypy.interpreter.typedef import TypeDef @@ -59,9 +59,9 @@ raise OperationError(space.w_IndexError, space.wrap("negative index not supported")) if i >= w_cdata.get_array_length(): - raise operationerrfmt(space.w_IndexError, - "index too large for cdata '%s' (expected %d < %d)", - self.name, i, w_cdata.get_array_length()) + raise oefmt(space.w_IndexError, + "index too large for cdata '%s' (expected %d < %d)", + self.name, i, w_cdata.get_array_length()) return self def _check_slice_index(self, w_cdata, start, stop): @@ -70,9 +70,9 @@ raise OperationError(space.w_IndexError, space.wrap("negative index not supported")) if stop > w_cdata.get_array_length(): - raise operationerrfmt(space.w_IndexError, - "index too large (expected %d <= %d)", - stop, w_cdata.get_array_length()) + raise oefmt(space.w_IndexError, + "index too large (expected %d <= %d)", + stop, w_cdata.get_array_length()) return self.ctptr def convert_from_object(self, cdata, w_ob): diff --git a/pypy/module/_cffi_backend/ctypefunc.py b/pypy/module/_cffi_backend/ctypefunc.py --- a/pypy/module/_cffi_backend/ctypefunc.py +++ b/pypy/module/_cffi_backend/ctypefunc.py @@ -3,7 +3,6 @@ """ import sys -from pypy.interpreter.error import OperationError, operationerrfmt from rpython.rlib import jit, clibffi, jit_libffi from rpython.rlib.jit_libffi import (CIF_DESCRIPTION, CIF_DESCRIPTION_P, @@ -11,6 +10,7 @@ from rpython.rlib.objectmodel import we_are_translated, instantiate from rpython.rtyper.lltypesystem import lltype, llmemory, rffi +from pypy.interpreter.error import OperationError, oefmt from pypy.module._cffi_backend import ctypearray, cdataobj, cerrno from pypy.module._cffi_backend.ctypeobj import W_CType from pypy.module._cffi_backend.ctypeptr import W_CTypePtrBase, W_CTypePointer @@ -52,10 +52,9 @@ if isinstance(w_obj, cdataobj.W_CData): ct = w_obj.ctype.get_vararg_type() else: - raise operationerrfmt(space.w_TypeError, - "argument %d passed in the variadic part " - "needs to be a cdata object (got %T)", - i + 1, w_obj) + raise oefmt(space.w_TypeError, + "argument %d passed in the variadic part needs to " + "be a cdata object (got %T)", i + 1, w_obj) fvarargs[i] = ct ctypefunc = instantiate(W_CTypeFunc) ctypefunc.space = space @@ -100,9 +99,9 @@ nargs_declared = len(self.fargs) if len(args_w) != nargs_declared: space = self.space - raise operationerrfmt(space.w_TypeError, - "'%s' expects %d arguments, got %d", - self.name, nargs_declared, len(args_w)) + raise oefmt(space.w_TypeError, + "'%s' expects %d arguments, got %d", + self.name, nargs_declared, len(args_w)) return self._call(funcaddr, args_w) else: # call of a variadic function @@ -113,9 +112,9 @@ nargs_declared = len(self.fargs) if len(args_w) < nargs_declared: space = self.space - raise operationerrfmt(space.w_TypeError, - "'%s' expects at least %d arguments, got %d", - self.name, nargs_declared, len(args_w)) + raise oefmt(space.w_TypeError, + "'%s' expects at least %d arguments, got %d", + self.name, nargs_declared, len(args_w)) completed = self.new_ctypefunc_completing_argtypes(args_w) return completed._call(funcaddr, args_w) @@ -187,16 +186,15 @@ def _missing_ffi_type(self, cifbuilder, is_result_type): space = self.space if self.size < 0: - raise operationerrfmt(space.w_TypeError, - "ctype '%s' has incomplete type", - self.name) + raise oefmt(space.w_TypeError, + "ctype '%s' has incomplete type", self.name) if is_result_type: place = "return value" else: place = "argument" - raise operationerrfmt(space.w_NotImplementedError, - "ctype '%s' (size %d) not supported as %s", - self.name, self.size, place) + raise oefmt(space.w_NotImplementedError, + "ctype '%s' (size %d) not supported as %s", + self.name, self.size, place) def _struct_ffi_type(self, cifbuilder, is_result_type): if self.size >= 0: diff --git a/pypy/module/_cffi_backend/ctypeobj.py b/pypy/module/_cffi_backend/ctypeobj.py --- a/pypy/module/_cffi_backend/ctypeobj.py +++ b/pypy/module/_cffi_backend/ctypeobj.py @@ -1,5 +1,5 @@ from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.error import OperationError, operationerrfmt +from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import interp2app from pypy.interpreter.typedef import TypeDef, make_weakref_descr, GetSetProperty @@ -54,34 +54,31 @@ def newp(self, w_init): space = self.space - raise operationerrfmt(space.w_TypeError, - "expected a pointer or array ctype, got '%s'", - self.name) + raise oefmt(space.w_TypeError, + "expected a pointer or array ctype, got '%s'", self.name) def cast(self, w_ob): space = self.space - raise operationerrfmt(space.w_TypeError, - "cannot cast to '%s'", self.name) + raise oefmt(space.w_TypeError, "cannot cast to '%s'", self.name) def cast_to_int(self, cdata): space = self.space - raise operationerrfmt(space.w_TypeError, - "int() not supported on cdata '%s'", self.name) + raise oefmt(space.w_TypeError, "int() not supported on cdata '%s'", + self.name) def float(self, cdata): space = self.space - raise operationerrfmt(space.w_TypeError, - "float() not supported on cdata '%s'", self.name) + raise oefmt(space.w_TypeError, "float() not supported on cdata '%s'", + self.name) def convert_to_object(self, cdata): space = self.space - raise operationerrfmt(space.w_TypeError, - "cannot return a cdata '%s'", self.name) + raise oefmt(space.w_TypeError, "cannot return a cdata '%s'", self.name) def convert_from_object(self, cdata, w_ob): space = self.space - raise operationerrfmt(space.w_TypeError, - "cannot initialize cdata '%s'", self.name) + raise oefmt(space.w_TypeError, "cannot initialize cdata '%s'", + self.name) def convert_argument_from_object(self, cdata, w_ob): self.convert_from_object(cdata, w_ob) @@ -90,20 +87,18 @@ def _convert_error(self, expected, w_got): space = self.space if isinstance(w_got, cdataobj.W_CData): - return operationerrfmt(space.w_TypeError, - "initializer for ctype '%s' must be a %s, " - "not cdata '%s'", self.name, expected, - w_got.ctype.name) + return oefmt(space.w_TypeError, + "initializer for ctype '%s' must be a %s, not cdata " + "'%s'", self.name, expected, w_got.ctype.name) else: - return operationerrfmt(space.w_TypeError, - "initializer for ctype '%s' must be a %s, " - "not %T", self.name, expected, w_got) + return oefmt(space.w_TypeError, + "initializer for ctype '%s' must be a %s, not %T", + self.name, expected, w_got) def _cannot_index(self): space = self.space - raise operationerrfmt(space.w_TypeError, - "cdata of type '%s' cannot be indexed", - self.name) + raise oefmt(space.w_TypeError, "cdata of type '%s' cannot be indexed", + self.name) def _check_subscript_index(self, w_cdata, i): raise self._cannot_index() @@ -113,15 +108,13 @@ def string(self, cdataobj, maxlen): space = self.space - raise operationerrfmt(space.w_TypeError, - "string(): unexpected cdata '%s' argument", - self.name) + raise oefmt(space.w_TypeError, + "string(): unexpected cdata '%s' argument", self.name) def add(self, cdata, i): space = self.space - raise operationerrfmt(space.w_TypeError, - "cannot add a cdata '%s' and a number", - self.name) + raise oefmt(space.w_TypeError, "cannot add a cdata '%s' and a number", + self.name) def insert_name(self, extra, extra_position): name = '%s%s%s' % (self.name[:self.name_position], @@ -144,9 +137,8 @@ def _alignof(self): space = self.space - raise operationerrfmt(space.w_ValueError, - "ctype '%s' is of unknown alignment", - self.name) + raise oefmt(space.w_ValueError, "ctype '%s' is of unknown alignment", + self.name) def typeoffsetof(self, fieldname): space = self.space @@ -163,14 +155,12 @@ def call(self, funcaddr, args_w): space = self.space - raise operationerrfmt(space.w_TypeError, - "cdata '%s' is not callable", self.name) + raise oefmt(space.w_TypeError, "cdata '%s' is not callable", self.name) def iter(self, cdata): space = self.space - raise operationerrfmt(space.w_TypeError, - "cdata '%s' does not support iteration", - self.name) + raise oefmt(space.w_TypeError, + "cdata '%s' does not support iteration", self.name) def unpackiterable_int(self, cdata): return None @@ -180,9 +170,8 @@ def getcfield(self, attr): space = self.space - raise operationerrfmt(space.w_AttributeError, - "cdata '%s' has no attribute '%s'", - self.name, attr) + raise oefmt(space.w_AttributeError, + "cdata '%s' has no attribute '%s'", self.name, attr) def copy_and_convert_to_object(self, cdata): return self.convert_to_object(cdata) @@ -202,9 +191,8 @@ return space.wrap(self.kind) # class attribute if attrchar == 'c': # cname return space.wrap(self.name) - raise operationerrfmt(space.w_AttributeError, - "ctype '%s' has no such attribute", - self.name) + raise oefmt(space.w_AttributeError, + "ctype '%s' has no such attribute", self.name) def fget_kind(self, space): return self._fget('k') def fget_cname(self, space): return self._fget('c') diff --git a/pypy/module/_cffi_backend/ctypeprim.py b/pypy/module/_cffi_backend/ctypeprim.py --- a/pypy/module/_cffi_backend/ctypeprim.py +++ b/pypy/module/_cffi_backend/ctypeprim.py @@ -3,13 +3,13 @@ """ import sys -from pypy.interpreter.error import operationerrfmt from rpython.rlib.rarithmetic import r_uint, r_ulonglong, intmask from rpython.rlib.objectmodel import keepalive_until_here from rpython.rlib import jit from rpython.rtyper.lltypesystem import lltype, rffi +from pypy.interpreter.error import oefmt from pypy.module._cffi_backend import cdataobj, misc from pypy.module._cffi_backend.ctypeobj import W_CType @@ -34,18 +34,18 @@ space = self.space s = space.str_w(w_ob) if len(s) != 1: - raise operationerrfmt(space.w_TypeError, - "cannot cast string of length %d to ctype '%s'", - len(s), self.name) + raise oefmt(space.w_TypeError, + "cannot cast string of length %d to ctype '%s'", + len(s), self.name) return ord(s[0]) def cast_unicode(self, w_ob): space = self.space s = space.unicode_w(w_ob) if len(s) != 1: - raise operationerrfmt(space.w_TypeError, - "cannot cast unicode string of length %d to ctype '%s'", - len(s), self.name) + raise oefmt(space.w_TypeError, + "cannot cast unicode string of length %d to ctype '%s'", + len(s), self.name) return ord(s[0]) def cast(self, w_ob): @@ -76,8 +76,8 @@ def _overflow(self, w_ob): space = self.space s = space.str_w(space.str(w_ob)) - raise operationerrfmt(space.w_OverflowError, - "integer %s does not fit '%s'", s, self.name) + raise oefmt(space.w_OverflowError, + "integer %s does not fit '%s'", s, self.name) def string(self, cdataobj, maxlen): if self.size == 1: @@ -330,9 +330,9 @@ space = self.space if isinstance(w_ob, cdataobj.W_CData): if not isinstance(w_ob.ctype, W_CTypePrimitive): - raise operationerrfmt(space.w_TypeError, - "cannot cast ctype '%s' to ctype '%s'", - w_ob.ctype.name, self.name) + raise oefmt(space.w_TypeError, + "cannot cast ctype '%s' to ctype '%s'", + w_ob.ctype.name, self.name) w_ob = w_ob.convert_to_object() # if space.isinstance_w(w_ob, space.w_str): diff --git a/pypy/module/_cffi_backend/ctypeptr.py b/pypy/module/_cffi_backend/ctypeptr.py --- a/pypy/module/_cffi_backend/ctypeptr.py +++ b/pypy/module/_cffi_backend/ctypeptr.py @@ -9,7 +9,7 @@ from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rtyper.lltypesystem.rstr import copy_string_to_raw, copy_unicode_to_raw -from pypy.interpreter.error import OperationError, operationerrfmt, wrap_oserror +from pypy.interpreter.error import OperationError, oefmt, wrap_oserror from pypy.module._cffi_backend import cdataobj, misc, ctypeprim, ctypevoid from pypy.module._cffi_backend.ctypeobj import W_CType @@ -62,9 +62,9 @@ space = self.space lst_w = space.listview(w_ob) if self.length >= 0 and len(lst_w) > self.length: - raise operationerrfmt(space.w_IndexError, - "too many initializers for '%s' (got %d)", - self.name, len(lst_w)) + raise oefmt(space.w_IndexError, + "too many initializers for '%s' (got %d)", + self.name, len(lst_w)) ctitem = self.ctitem for i in range(len(lst_w)): ctitem.convert_from_object(cdata, lst_w[i]) @@ -83,10 +83,9 @@ s = space.str_w(w_ob) n = len(s) if self.length >= 0 and n > self.length: - raise operationerrfmt(space.w_IndexError, - "initializer string is too long for '%s'" - " (got %d characters)", - self.name, n) + raise oefmt(space.w_IndexError, + "initializer string is too long for '%s' (got %d " + "characters)", self.name, n) copy_string_to_raw(llstr(s), cdata, 0, n) if n != self.length: cdata[n] = '\x00' @@ -96,10 +95,9 @@ s = space.unicode_w(w_ob) n = len(s) if self.length >= 0 and n > self.length: - raise operationerrfmt(space.w_IndexError, - "initializer unicode string is too long for '%s'" - " (got %d characters)", - self.name, n) + raise oefmt(space.w_IndexError, + "initializer unicode string is too long for '%s' " + "(got %d characters)", self.name, n) unichardata = rffi.cast(rffi.CWCHARP, cdata) copy_unicode_to_raw(llunicode(s), unichardata, 0, n) if n != self.length: @@ -112,9 +110,8 @@ if isinstance(self.ctitem, ctypeprim.W_CTypePrimitive): cdata = cdataobj._cdata if not cdata: - raise operationerrfmt(space.w_RuntimeError, - "cannot use string() on %s", - space.str_w(cdataobj.repr())) + raise oefmt(space.w_RuntimeError, "cannot use string() on %s", + space.str_w(cdataobj.repr())) # from pypy.module._cffi_backend import ctypearray length = maxlen @@ -196,9 +193,9 @@ ctitem = self.ctitem datasize = ctitem.size if datasize < 0: - raise operationerrfmt(space.w_TypeError, - "cannot instantiate ctype '%s' of unknown size", - self.name) + raise oefmt(space.w_TypeError, + "cannot instantiate ctype '%s' of unknown size", + self.name) if isinstance(ctitem, W_CTypeStructOrUnion): # 'newp' on a struct-or-union pointer: in this case, we return # a W_CDataPtrToStruct object which has a strong reference @@ -227,9 +224,8 @@ isinstance(w_cdata, cdataobj.W_CDataPtrToStructOrUnion)): if i != 0: space = self.space - raise operationerrfmt(space.w_IndexError, - "cdata '%s' can only be indexed by 0", - self.name) + raise oefmt(space.w_IndexError, + "cdata '%s' can only be indexed by 0", self.name) return self def _check_slice_index(self, w_cdata, start, stop): @@ -243,9 +239,9 @@ if self.is_void_ptr: itemsize = 1 else: - raise operationerrfmt(space.w_TypeError, - "ctype '%s' points to items of unknown size", - self.name) + raise oefmt(space.w_TypeError, + "ctype '%s' points to items of unknown size", + self.name) p = rffi.ptradd(cdata, i * itemsize) return cdataobj.W_CData(space, p, self) diff --git a/pypy/module/_cffi_backend/ctypestruct.py b/pypy/module/_cffi_backend/ctypestruct.py --- a/pypy/module/_cffi_backend/ctypestruct.py +++ b/pypy/module/_cffi_backend/ctypestruct.py @@ -2,8 +2,8 @@ Struct and unions. """ -from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.baseobjspace import W_Root +from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.typedef import TypeDef, interp_attrproperty from rpython.rlib import jit @@ -32,8 +32,8 @@ def check_complete(self, w_errorcls=None): if self.fields_dict is None: space = self.space - raise operationerrfmt(w_errorcls or space.w_TypeError, - "'%s' is opaque or not completed yet", self.name) + raise oefmt(w_errorcls or space.w_TypeError, + "'%s' is opaque or not completed yet", self.name) def _alignof(self): self.check_complete(w_errorcls=self.space.w_ValueError) @@ -106,9 +106,9 @@ space.isinstance_w(w_ob, space.w_tuple)): lst_w = space.listview(w_ob) if len(lst_w) > len(self.fields_list): - raise operationerrfmt(space.w_ValueError, - "too many initializers for '%s' (got %d)", - self.name, len(lst_w)) + raise oefmt(space.w_ValueError, + "too many initializers for '%s' (got %d)", + self.name, len(lst_w)) for i in range(len(lst_w)): optvarsize = self.fields_list[i].write_v(cdata, lst_w[i], optvarsize) @@ -161,10 +161,9 @@ space = self.space n = space.int_w(space.len(w_ob)) if n > 1: - raise operationerrfmt(space.w_ValueError, - "initializer for '%s': %d items given, but " - "only one supported (use a dict if needed)", - self.name, n) + raise oefmt(space.w_ValueError, + "initializer for '%s': %d items given, but only one " + "supported (use a dict if needed)", self.name, n) class W_CField(W_Root): @@ -295,10 +294,9 @@ fmin = r_longlong(0) fmax = r_longlong((r_ulonglong(1) << self.bitsize) - 1) if value < fmin or value > fmax: - raise operationerrfmt(space.w_OverflowError, - "value %d outside the range allowed by the " - "bit field width: %d <= x <= %d", - value, fmin, fmax) + raise oefmt(space.w_OverflowError, + "value %d outside the range allowed by the bit field " + "width: %d <= x <= %d", value, fmin, fmax) rawmask = ((r_ulonglong(1) << self.bitsize) - 1) << self.bitshift rawvalue = r_ulonglong(value) << self.bitshift rawfielddata = misc.read_raw_unsigned_data(cdata, ctype.size) diff --git a/pypy/module/_cffi_backend/func.py b/pypy/module/_cffi_backend/func.py --- a/pypy/module/_cffi_backend/func.py +++ b/pypy/module/_cffi_backend/func.py @@ -1,4 +1,4 @@ -from pypy.interpreter.error import OperationError, operationerrfmt +from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import unwrap_spec, WrappedDefault from pypy.module._cffi_backend import ctypeobj, cdataobj @@ -36,9 +36,8 @@ elif isinstance(w_obj, ctypeobj.W_CType): size = w_obj.size if size < 0: - raise operationerrfmt(space.w_ValueError, - "ctype '%s' is of unknown size", - w_obj.name) + raise oefmt(space.w_ValueError, + "ctype '%s' is of unknown size", w_obj.name) else: raise OperationError(space.w_TypeError, space.wrap("expected a 'cdata' or 'ctype' object")) diff --git a/pypy/module/_cffi_backend/handle.py b/pypy/module/_cffi_backend/handle.py --- a/pypy/module/_cffi_backend/handle.py +++ b/pypy/module/_cffi_backend/handle.py @@ -1,5 +1,5 @@ import weakref -from pypy.interpreter.error import OperationError, operationerrfmt +from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import unwrap_spec from pypy.module._cffi_backend import ctypeobj, ctypeptr, cdataobj from rpython.rtyper.lltypesystem import lltype, rffi @@ -19,8 +19,8 @@ def newp_handle(space, w_ctype, w_x): if (not isinstance(w_ctype, ctypeptr.W_CTypePointer) or not w_ctype.is_void_ptr): - raise operationerrfmt(space.w_TypeError, - "needs 'void *', got '%s'", w_ctype.name) + raise oefmt(space.w_TypeError, + "needs 'void *', got '%s'", w_ctype.name) index = get(space).reserve_next_handle_index() _cdata = rffi.cast(rffi.CCHARP, index + 1) new_cdataobj = cdataobj.W_CDataHandle(space, _cdata, w_ctype, w_x) @@ -32,9 +32,9 @@ ctype = w_cdata.ctype if (not isinstance(ctype, ctypeptr.W_CTypePtrOrArray) or not ctype.can_cast_anything): - raise operationerrfmt(space.w_TypeError, - "expected a 'cdata' object with a 'void *' out " - "of new_handle(), got '%s'", ctype.name) + raise oefmt(space.w_TypeError, + "expected a 'cdata' object with a 'void *' out of " + "new_handle(), got '%s'", ctype.name) index = rffi.cast(lltype.Signed, w_cdata._cdata) original_cdataobj = get(space).fetch_handle(index - 1) # diff --git a/pypy/module/_cffi_backend/libraryobj.py b/pypy/module/_cffi_backend/libraryobj.py --- a/pypy/module/_cffi_backend/libraryobj.py +++ b/pypy/module/_cffi_backend/libraryobj.py @@ -1,7 +1,7 @@ from __future__ import with_statement from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.error import operationerrfmt +from pypy.interpreter.error import oefmt from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef from pypy.module._rawffi.interp_rawffi import wrap_dlopenerror @@ -50,16 +50,15 @@ isinstance(w_ctype.ctitem, ctypevoid.W_CTypeVoid)): ok = True if not ok: - raise operationerrfmt(space.w_TypeError, - "function cdata expected, got '%s'", - w_ctype.name) + raise oefmt(space.w_TypeError, + "function cdata expected, got '%s'", w_ctype.name) # try: cdata = dlsym(self.handle, name) except KeyError: - raise operationerrfmt(space.w_KeyError, - "function '%s' not found in library '%s'", - name, self.name) + raise oefmt(space.w_KeyError, + "function '%s' not found in library '%s'", + name, self.name) return W_CData(space, rffi.cast(rffi.CCHARP, cdata), w_ctype) @unwrap_spec(w_ctype=W_CType, name=str) @@ -68,9 +67,9 @@ try: cdata = dlsym(self.handle, name) except KeyError: - raise operationerrfmt(space.w_KeyError, - "variable '%s' not found in library '%s'", - name, self.name) + raise oefmt(space.w_KeyError, + "variable '%s' not found in library '%s'", + name, self.name) return w_ctype.convert_to_object(rffi.cast(rffi.CCHARP, cdata)) @unwrap_spec(w_ctype=W_CType, name=str) @@ -79,9 +78,9 @@ try: cdata = dlsym(self.handle, name) except KeyError: - raise operationerrfmt(space.w_KeyError, - "variable '%s' not found in library '%s'", - name, self.name) + raise oefmt(space.w_KeyError, + "variable '%s' not found in library '%s'", + name, self.name) w_ctype.convert_from_object(rffi.cast(rffi.CCHARP, cdata), w_value) diff --git a/pypy/module/_cffi_backend/newtype.py b/pypy/module/_cffi_backend/newtype.py --- a/pypy/module/_cffi_backend/newtype.py +++ b/pypy/module/_cffi_backend/newtype.py @@ -1,5 +1,5 @@ import sys -from pypy.interpreter.error import OperationError, operationerrfmt +from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import unwrap_spec from rpython.rlib.objectmodel import specialize @@ -91,9 +91,8 @@ space.wrap("first arg must be a pointer ctype")) ctitem = w_ctptr.ctitem if ctitem.size < 0: - raise operationerrfmt(space.w_ValueError, - "array item of unknown size: '%s'", - ctitem.name) + raise oefmt(space.w_ValueError, "array item of unknown size: '%s'", + ctitem.name) if space.is_w(w_length, space.w_None): length = -1 arraysize = -1 @@ -175,17 +174,16 @@ if len(field_w) > 3: foffset = space.int_w(field_w[3]) # if fname in fields_dict: - raise operationerrfmt(space.w_KeyError, - "duplicate field name '%s'", fname) + raise oefmt(space.w_KeyError, "duplicate field name '%s'", fname) # if ftype.size < 0: if (isinstance(ftype, ctypearray.W_CTypeArray) and fbitsize < 0 and (i == len(fields_w) - 1 or foffset != -1)): with_var_array = True else: - raise operationerrfmt(space.w_TypeError, - "field '%s.%s' has ctype '%s' of unknown size", - w_ctype.name, fname, ftype.name) + raise oefmt(space.w_TypeError, + "field '%s.%s' has ctype '%s' of unknown size", + w_ctype.name, fname, ftype.name) # if is_union: boffset = 0 # reset each field at offset 0 @@ -250,24 +248,21 @@ # this is the case of a bitfield if foffset >= 0: - raise operationerrfmt(space.w_TypeError, - "field '%s.%s' is a bitfield, " - "but a fixed offset is specified", - w_ctype.name, fname) + raise oefmt(space.w_TypeError, + "field '%s.%s' is a bitfield, but a fixed offset " + "is specified", w_ctype.name, fname) if not (isinstance(ftype, ctypeprim.W_CTypePrimitiveSigned) or isinstance(ftype, ctypeprim.W_CTypePrimitiveUnsigned) or isinstance(ftype,ctypeprim.W_CTypePrimitiveCharOrUniChar)): - raise operationerrfmt(space.w_TypeError, - "field '%s.%s' declared as '%s' " - "cannot be a bit field", - w_ctype.name, fname, ftype.name) + raise oefmt(space.w_TypeError, + "field '%s.%s' declared as '%s' cannot be a bit " + "field", w_ctype.name, fname, ftype.name) if fbitsize > 8 * ftype.size: - raise operationerrfmt(space.w_TypeError, - "bit field '%s.%s' is declared '%s:%d'," - " which exceeds the width of the type", - w_ctype.name, fname, - ftype.name, fbitsize) + raise oefmt(space.w_TypeError, + "bit field '%s.%s' is declared '%s:%d', which " + "exceeds the width of the type", + w_ctype.name, fname, ftype.name, fbitsize) # compute the starting position of the theoretical field # that covers a complete 'ftype', inside of which we will @@ -277,9 +272,9 @@ if fbitsize == 0: if fname != '': - raise operationerrfmt(space.w_TypeError, - "field '%s.%s' is declared with :0", - w_ctype.name, fname) + raise oefmt(space.w_TypeError, + "field '%s.%s' is declared with :0", + w_ctype.name, fname) if (sflags & SF_MSVC_BITFIELDS) == 0: # GCC's notion of "ftype :0;" # pad boffset to a value aligned for "ftype" @@ -308,10 +303,11 @@ # allowed position if ((sflags & SF_PACKED) != 0 and (bits_already_occupied & 7) != 0): - raise operationerrfmt(space.w_NotImplementedError, - "with 'packed', gcc would compile field " - "'%s.%s' to reuse some bits in the previous " - "field", w_ctype.name, fname) + raise oefmt(space.w_NotImplementedError, + "with 'packed', gcc would compile " + "field '%s.%s' to reuse some bits in " + "the previous field", + w_ctype.name, fname) field_offset_bytes += falign assert boffset < field_offset_bytes * 8 boffset = field_offset_bytes * 8 @@ -362,9 +358,9 @@ totalsize = (got + alignment - 1) & ~(alignment - 1) totalsize = totalsize or 1 elif totalsize < got: - raise operationerrfmt(space.w_TypeError, - "%s cannot be of size %d: there are fields at least " - "up to %d", w_ctype.name, totalsize, got) + raise oefmt(space.w_TypeError, + "%s cannot be of size %d: there are fields at least up to " + "%d", w_ctype.name, totalsize, got) if totalalignment < 0: totalalignment = alignment @@ -436,11 +432,11 @@ or isinstance(w_fresult, ctypearray.W_CTypeArray)): if (isinstance(w_fresult, ctypestruct.W_CTypeStructOrUnion) and w_fresult.size < 0): - raise operationerrfmt(space.w_TypeError, - "result type '%s' is opaque", w_fresult.name) + raise oefmt(space.w_TypeError, + "result type '%s' is opaque", w_fresult.name) else: - raise operationerrfmt(space.w_TypeError, - "invalid result type: '%s'", w_fresult.name) + raise oefmt(space.w_TypeError, + "invalid result type: '%s'", w_fresult.name) # fct = ctypefunc.W_CTypeFunc(space, fargs, w_fresult, ellipsis) return fct diff --git a/pypy/module/_codecs/interp_codecs.py b/pypy/module/_codecs/interp_codecs.py --- a/pypy/module/_codecs/interp_codecs.py +++ b/pypy/module/_codecs/interp_codecs.py @@ -2,7 +2,7 @@ from rpython.rlib.objectmodel import we_are_translated from rpython.rlib.rstring import UnicodeBuilder -from pypy.interpreter.error import OperationError, operationerrfmt +from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault @@ -56,15 +56,15 @@ else: msg = ("encoding error handler must return " "(unicode, int) tuple, not %R") - raise operationerrfmt(space.w_TypeError, msg, w_res) + raise oefmt(space.w_TypeError, msg, w_res) w_replace, w_newpos = space.fixedview(w_res, 2) newpos = space.int_w(w_newpos) if newpos < 0: newpos = len(input) + newpos if newpos < 0 or newpos > len(input): - raise operationerrfmt( - space.w_IndexError, - "position %d from error handler out of bounds", newpos) + raise oefmt(space.w_IndexError, + "position %d from error handler out of bounds", + newpos) replace = space.unicode_w(w_replace) return replace, newpos return call_errorhandler @@ -164,9 +164,7 @@ state.codec_search_cache[normalized_encoding] = w_result state.modified() return w_result - raise operationerrfmt( - space.w_LookupError, - "unknown encoding: %s", encoding) + raise oefmt(space.w_LookupError, "unknown encoding: %s", encoding) # ____________________________________________________________ # Register standard error handlers @@ -216,8 +214,8 @@ text = u'\ufffd' * size return space.newtuple([space.wrap(text), w_end]) else: - raise operationerrfmt(space.w_TypeError, - "don't know how to handle %T in error callback", w_exc) + raise oefmt(space.w_TypeError, + "don't know how to handle %T in error callback", w_exc) def xmlcharrefreplace_errors(space, w_exc): check_exception(space, w_exc) @@ -236,8 +234,8 @@ pos += 1 return space.newtuple([space.wrap(builder.build()), w_end]) else: - raise operationerrfmt(space.w_TypeError, - "don't know how to handle %T in error callback", w_exc) + raise oefmt(space.w_TypeError, + "don't know how to handle %T in error callback", w_exc) def backslashreplace_errors(space, w_exc): check_exception(space, w_exc) @@ -268,8 +266,8 @@ pos += 1 return space.newtuple([space.wrap(builder.build()), w_end]) else: - raise operationerrfmt(space.w_TypeError, - "don't know how to handle %T in error callback", w_exc) + raise oefmt(space.w_TypeError, + "don't know how to handle %T in error callback", w_exc) def register_builtin_error_handlers(space): "NOT_RPYTHON" @@ -292,9 +290,8 @@ try: w_err_handler = state.codec_error_registry[errors] except KeyError: - raise operationerrfmt( - space.w_LookupError, - "unknown error handler name %s", errors) + raise oefmt(space.w_LookupError, + "unknown error handler name %s", errors) return w_err_handler diff --git a/pypy/module/_csv/interp_csv.py b/pypy/module/_csv/interp_csv.py --- a/pypy/module/_csv/interp_csv.py +++ b/pypy/module/_csv/interp_csv.py @@ -1,5 +1,5 @@ from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.error import OperationError, operationerrfmt +from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.typedef import TypeDef, interp_attrproperty from pypy.interpreter.typedef import GetSetProperty from pypy.interpreter.gateway import interp2app @@ -49,8 +49,7 @@ return src[0] if len(src) == 0: return '\0' - raise operationerrfmt(space.w_TypeError, - '"%s" must be a 1-character string', name) + raise oefmt(space.w_TypeError, '"%s" must be a 1-character string', name) def _build_dialect(space, w_dialect, w_delimiter, w_doublequote, w_escapechar, w_lineterminator, w_quotechar, w_quoting, diff --git a/pypy/module/_file/interp_file.py b/pypy/module/_file/interp_file.py --- a/pypy/module/_file/interp_file.py +++ b/pypy/module/_file/interp_file.py @@ -7,7 +7,7 @@ from rpython.rlib.rstring import StringBuilder from pypy.module._file.interp_stream import W_AbstractStream, StreamErrors from pypy.module.posix.interp_posix import dispatch_filename -from pypy.interpreter.error import OperationError, operationerrfmt +from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.typedef import (TypeDef, GetSetProperty, interp_attrproperty, make_weakref_descr, interp_attrproperty_w) from pypy.interpreter.gateway import interp2app, unwrap_spec @@ -79,8 +79,7 @@ if (not mode or mode[0] not in ['r', 'w', 'a', 'U'] or ('U' in mode and ('w' in mode or 'a' in mode))): space = self.space - raise operationerrfmt(space.w_ValueError, - "invalid mode: '%s'", mode) + raise oefmt(space.w_ValueError, "invalid mode: '%s'", mode) def check_closed(self): if self.stream is None: diff --git a/pypy/module/_io/interp_bufferedio.py b/pypy/module/_io/interp_bufferedio.py --- a/pypy/module/_io/interp_bufferedio.py +++ b/pypy/module/_io/interp_bufferedio.py @@ -1,8 +1,9 @@ from __future__ import with_statement + +from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.typedef import ( TypeDef, GetSetProperty, generic_new_descr, interp_attrproperty_w) from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault -from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.buffer import RWBuffer from rpython.rlib.rstring import StringBuilder from rpython.rlib.rarithmetic import r_longlong, intmask @@ -257,8 +258,8 @@ def seek_w(self, space, pos, whence=0): self._check_init(space) if whence not in (0, 1, 2): - raise operationerrfmt(space.w_ValueError, - "whence must be between 0 and 2, not %d", whence) + raise oefmt(space.w_ValueError, + "whence must be between 0 and 2, not %d", whence) self._check_closed(space, "seek of closed file") if whence != 2 and self.readable: # Check if seeking leaves us inside the current buffer, so as to diff --git a/pypy/module/_io/interp_bytesio.py b/pypy/module/_io/interp_bytesio.py --- a/pypy/module/_io/interp_bytesio.py +++ b/pypy/module/_io/interp_bytesio.py @@ -1,7 +1,7 @@ +from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.typedef import ( TypeDef, generic_new_descr, GetSetProperty) from pypy.interpreter.gateway import interp2app, unwrap_spec -from pypy.interpreter.error import OperationError, operationerrfmt from rpython.rlib.rStringIO import RStringIO from rpython.rlib.rarithmetic import r_longlong from pypy.module._io.interp_bufferedio import W_BufferedIOBase @@ -105,8 +105,8 @@ raise OperationError(space.w_OverflowError, space.wrap( "new position too large")) else: - raise operationerrfmt(space.w_ValueError, - "whence must be between 0 and 2, not %d", whence) + raise oefmt(space.w_ValueError, + "whence must be between 0 and 2, not %d", whence) self.seek(pos, whence) return space.wrap(self.tell()) @@ -137,9 +137,9 @@ self._check_closed(space) if space.len_w(w_state) != 3: - raise operationerrfmt(space.w_TypeError, - "%T.__setstate__ argument should be 3-tuple, got %T", - self, w_state) + raise oefmt(space.w_TypeError, + "%T.__setstate__ argument should be 3-tuple, got %T", + self, w_state) w_content, w_pos, w_dict = space.unpackiterable(w_state, 3) self.truncate(0) self.write_w(space, w_content) diff --git a/pypy/module/_io/interp_io.py b/pypy/module/_io/interp_io.py --- a/pypy/module/_io/interp_io.py +++ b/pypy/module/_io/interp_io.py @@ -1,6 +1,6 @@ import os -from pypy.interpreter.error import operationerrfmt, OperationError +from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import ( TypeDef, interp_attrproperty, generic_new_descr) @@ -42,7 +42,7 @@ if not (space.isinstance_w(w_file, space.w_basestring) or space.isinstance_w(w_file, space.w_int) or space.isinstance_w(w_file, space.w_long)): - raise operationerrfmt(space.w_TypeError, "invalid file: %R", w_file) + raise oefmt(space.w_TypeError, "invalid file: %R", w_file) reading = writing = appending = updating = text = binary = universal = False @@ -50,9 +50,7 @@ for flag in mode: uniq_mode[flag] = None if len(uniq_mode) != len(mode): - raise operationerrfmt(space.w_ValueError, - "invalid mode: %s", mode - ) + raise oefmt(space.w_ValueError, "invalid mode: %s", mode) for flag in mode: if flag == "r": reading = True @@ -70,9 +68,7 @@ universal = True reading = True else: - raise operationerrfmt(space.w_ValueError, - "invalid mode: %s", mode - ) + raise oefmt(space.w_ValueError, "invalid mode: %s", mode) rawmode = "" if reading: @@ -146,7 +142,7 @@ elif reading: buffer_cls = W_BufferedReader else: - raise operationerrfmt(space.w_ValueError, "unknown mode: '%s'", mode) + raise oefmt(space.w_ValueError, "unknown mode: '%s'", mode) w_buffer = space.call_function( space.gettypefor(buffer_cls), w_raw, space.wrap(buffering) ) diff --git a/pypy/module/_io/interp_iobase.py b/pypy/module/_io/interp_iobase.py --- a/pypy/module/_io/interp_iobase.py +++ b/pypy/module/_io/interp_iobase.py @@ -1,9 +1,9 @@ from pypy.interpreter.baseobjspace import W_Root +from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.typedef import ( TypeDef, GetSetProperty, generic_new_descr, descr_get_dict, descr_set_dict, make_weakref_descr) from pypy.interpreter.gateway import interp2app -from pypy.interpreter.error import OperationError, operationerrfmt from rpython.rlib.rstring import StringBuilder from rpython.rlib import rweakref, rweaklist @@ -180,10 +180,9 @@ if has_peek: w_readahead = space.call_method(self, "peek", space.wrap(1)) if not space.isinstance_w(w_readahead, space.w_str): - raise operationerrfmt( - space.w_IOError, - "peek() should have returned a bytes object, not '%T'", - w_readahead) + raise oefmt(space.w_IOError, + "peek() should have returned a bytes object, " + "not '%T'", w_readahead) length = space.len_w(w_readahead) if length > 0: n = 0 @@ -206,10 +205,9 @@ w_read = space.call_method(self, "read", space.wrap(nreadahead)) if not space.isinstance_w(w_read, space.w_str): - raise operationerrfmt( - space.w_IOError, - "peek() should have returned a bytes object, not '%T'", - w_read) + raise oefmt(space.w_IOError, + "peek() should have returned a bytes object, not " + "'%T'", w_read) read = space.str_w(w_read) if not read: break diff --git a/pypy/module/_io/interp_stringio.py b/pypy/module/_io/interp_stringio.py --- a/pypy/module/_io/interp_stringio.py +++ b/pypy/module/_io/interp_stringio.py @@ -1,7 +1,7 @@ +from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.typedef import ( TypeDef, generic_new_descr, GetSetProperty) from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault -from pypy.interpreter.error import OperationError, operationerrfmt from pypy.module._io.interp_textio import W_TextIOBase, W_IncrementalNewlineDecoder from pypy.module._io.interp_iobase import convert_size From noreply at buildbot.pypy.org Mon Feb 3 23:51:59 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Mon, 3 Feb 2014 23:51:59 +0100 (CET) Subject: [pypy-commit] pypy remove-intlong-smm: merge default Message-ID: <20140203225159.1049B1C0352@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: remove-intlong-smm Changeset: r69062:c89e66204e9a Date: 2014-02-03 14:23 -0800 http://bitbucket.org/pypy/pypy/changeset/c89e66204e9a/ Log: merge default diff too long, truncating to 2000 out of 5843 lines diff --git a/dotviewer/graphdisplay.py b/dotviewer/graphdisplay.py --- a/dotviewer/graphdisplay.py +++ b/dotviewer/graphdisplay.py @@ -136,6 +136,7 @@ Click on objects to move around Drag with the left mouse button to zoom in/out Drag with the right mouse button to scroll + Use scroll wheel do scroll up or down """.replace('\n ', '\n').strip() # poor man's dedent @@ -613,6 +614,19 @@ def process_MouseButtonUp(self, event): self.dragging = None pygame.event.set_grab(False) + # handle directional scrolling + if event.button == 4: + self.pan((0, -1)) + return + if event.button == 5: + self.pan((0, 1)) + return + if event.button == 6: + self.pan((-1, 0)) + return + if event.button == 7: + self.pan((1, 0)) + return if self.click_time is not None and abs(time.time() - self.click_time) < 1: # click (no significant dragging) self.notifyclick(self.click_origin) diff --git a/pypy/interpreter/argument.py b/pypy/interpreter/argument.py --- a/pypy/interpreter/argument.py +++ b/pypy/interpreter/argument.py @@ -1,11 +1,11 @@ """ Arguments objects. """ - -from pypy.interpreter.error import OperationError, operationerrfmt from rpython.rlib.debug import make_sure_not_resized from rpython.rlib import jit +from pypy.interpreter.error import OperationError, oefmt + class Arguments(object): """ @@ -86,9 +86,9 @@ args_w = space.fixedview(w_stararg) except OperationError, e: if e.match(space, space.w_TypeError): - raise operationerrfmt( - space.w_TypeError, - "argument after * must be a sequence, not %T", w_stararg) + raise oefmt(space.w_TypeError, + "argument after * must be a sequence, not %T", + w_stararg) raise self.arguments_w = self.arguments_w + args_w @@ -113,10 +113,9 @@ w_keys = space.call_method(w_starstararg, "keys") except OperationError, e: if e.match(space, space.w_AttributeError): - raise operationerrfmt( - space.w_TypeError, - "argument after ** must be a mapping, not %T", - w_starstararg) + raise oefmt(space.w_TypeError, + "argument after ** must be a mapping, not %T", + w_starstararg) raise keys_w = space.unpackiterable(w_keys) keywords_w = [None] * len(keys_w) @@ -281,8 +280,7 @@ self._match_signature(w_firstarg, scope_w, signature, defaults_w, 0) except ArgErr, e: - raise operationerrfmt(self.space.w_TypeError, - "%s() %s", fnname, e.getmsg()) + raise oefmt(self.space.w_TypeError, "%s() %s", fnname, e.getmsg()) return signature.scope_length() def _parse(self, w_firstarg, signature, defaults_w, blindargs=0): @@ -304,8 +302,7 @@ try: return self._parse(w_firstarg, signature, defaults_w, blindargs) except ArgErr, e: - raise operationerrfmt(self.space.w_TypeError, - "%s() %s", fnname, e.getmsg()) + raise oefmt(self.space.w_TypeError, "%s() %s", fnname, e.getmsg()) @staticmethod def frompacked(space, w_args=None, w_kwds=None): @@ -344,10 +341,9 @@ for key in keywords: for otherkey in existingkeywords: if otherkey == key: - raise operationerrfmt(space.w_TypeError, - "got multiple values " - "for keyword argument " - "'%s'", key) + raise oefmt(space.w_TypeError, + "got multiple values for keyword argument '%s'", + key) def _do_combine_starstarargs_wrapped(space, keys_w, w_starstararg, keywords, keywords_w, existingkeywords): @@ -367,10 +363,9 @@ raise else: if existingkeywords and key in existingkeywords: - raise operationerrfmt(space.w_TypeError, - "got multiple values " - "for keyword argument " - "'%s'", key) + raise oefmt(space.w_TypeError, + "got multiple values for keyword argument '%s'", + key) keywords[i] = key keywords_w[i] = space.getitem(w_starstararg, w_key) i += 1 diff --git a/pypy/interpreter/astcompiler/ast.py b/pypy/interpreter/astcompiler/ast.py --- a/pypy/interpreter/astcompiler/ast.py +++ b/pypy/interpreter/astcompiler/ast.py @@ -1,12 +1,18 @@ # Generated by tools/asdl_py.py -from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter import typedef -from pypy.interpreter.gateway import interp2app -from pypy.interpreter.error import OperationError, operationerrfmt from rpython.rlib.unroll import unrolling_iterable from rpython.tool.pairtype import extendabletype from rpython.tool.sourcetools import func_with_new_name +from pypy.interpreter import typedef +from pypy.interpreter.baseobjspace import W_Root +from pypy.interpreter.error import OperationError, oefmt +from pypy.interpreter.gateway import interp2app + + +def raise_attriberr(space, w_obj, name): + raise oefmt(space.w_AttributeError, + "'%T' object has no attribute '%s'", w_obj, name) + def check_string(space, w_obj): if not (space.isinstance_w(w_obj, space.w_str) or @@ -70,11 +76,13 @@ continue # field is optional w_obj = self.getdictvalue(space, missing) if w_obj is None: - err = "required field \"%s\" missing from %s" - raise operationerrfmt(space.w_TypeError, err, missing, host) + raise oefmt(space.w_TypeError, + "required field \"%s\" missing from %s", + missing, host) else: - err = "incorrect type for field \"%s\" in %s" - raise operationerrfmt(space.w_TypeError, err, missing, host) + raise oefmt(space.w_TypeError, + "incorrect type for field \"%s\" in %s", + missing, host) raise AssertionError("should not reach here") @@ -2793,7 +2801,7 @@ def Module_get_body(space, w_self): if not w_self.initialization_state & 1: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -2834,7 +2842,7 @@ def Interactive_get_body(space, w_self): if not w_self.initialization_state & 1: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -2879,7 +2887,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 1: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') return space.wrap(w_self.body) def Expression_set_body(space, w_self, w_new_value): @@ -2922,7 +2930,7 @@ def Suite_get_body(space, w_self): if not w_self.initialization_state & 1: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -2967,7 +2975,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 1: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'lineno') + raise_attriberr(space, w_self, 'lineno') return space.wrap(w_self.lineno) def stmt_set_lineno(space, w_self, w_new_value): @@ -2988,7 +2996,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 2: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'col_offset') + raise_attriberr(space, w_self, 'col_offset') return space.wrap(w_self.col_offset) def stmt_set_col_offset(space, w_self, w_new_value): @@ -3018,7 +3026,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'name') + raise_attriberr(space, w_self, 'name') return space.wrap(w_self.name) def FunctionDef_set_name(space, w_self, w_new_value): @@ -3039,7 +3047,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'args') + raise_attriberr(space, w_self, 'args') return space.wrap(w_self.args) def FunctionDef_set_args(space, w_self, w_new_value): @@ -3056,7 +3064,7 @@ def FunctionDef_get_body(space, w_self): if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -3072,7 +3080,7 @@ def FunctionDef_get_decorator_list(space, w_self): if not w_self.initialization_state & 32: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'decorator_list') + raise_attriberr(space, w_self, 'decorator_list') if w_self.w_decorator_list is None: if w_self.decorator_list is None: list_w = [] @@ -3121,7 +3129,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'name') + raise_attriberr(space, w_self, 'name') return space.wrap(w_self.name) def ClassDef_set_name(space, w_self, w_new_value): @@ -3138,7 +3146,7 @@ def ClassDef_get_bases(space, w_self): if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'bases') + raise_attriberr(space, w_self, 'bases') if w_self.w_bases is None: if w_self.bases is None: list_w = [] @@ -3154,7 +3162,7 @@ def ClassDef_get_body(space, w_self): if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -3170,7 +3178,7 @@ def ClassDef_get_decorator_list(space, w_self): if not w_self.initialization_state & 32: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'decorator_list') + raise_attriberr(space, w_self, 'decorator_list') if w_self.w_decorator_list is None: if w_self.decorator_list is None: list_w = [] @@ -3220,7 +3228,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') + raise_attriberr(space, w_self, 'value') return space.wrap(w_self.value) def Return_set_value(space, w_self, w_new_value): @@ -3263,7 +3271,7 @@ def Delete_get_targets(space, w_self): if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'targets') + raise_attriberr(space, w_self, 'targets') if w_self.w_targets is None: if w_self.targets is None: list_w = [] @@ -3304,7 +3312,7 @@ def Assign_get_targets(space, w_self): if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'targets') + raise_attriberr(space, w_self, 'targets') if w_self.w_targets is None: if w_self.targets is None: list_w = [] @@ -3324,7 +3332,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') + raise_attriberr(space, w_self, 'value') return space.wrap(w_self.value) def Assign_set_value(space, w_self, w_new_value): @@ -3373,7 +3381,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'target') + raise_attriberr(space, w_self, 'target') return space.wrap(w_self.target) def AugAssign_set_target(space, w_self, w_new_value): @@ -3396,7 +3404,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'op') + raise_attriberr(space, w_self, 'op') return operator_to_class[w_self.op - 1]() def AugAssign_set_op(space, w_self, w_new_value): @@ -3419,7 +3427,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') + raise_attriberr(space, w_self, 'value') return space.wrap(w_self.value) def AugAssign_set_value(space, w_self, w_new_value): @@ -3468,7 +3476,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'dest') + raise_attriberr(space, w_self, 'dest') return space.wrap(w_self.dest) def Print_set_dest(space, w_self, w_new_value): @@ -3487,7 +3495,7 @@ def Print_get_values(space, w_self): if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'values') + raise_attriberr(space, w_self, 'values') if w_self.w_values is None: if w_self.values is None: list_w = [] @@ -3507,7 +3515,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'nl') + raise_attriberr(space, w_self, 'nl') return space.wrap(w_self.nl) def Print_set_nl(space, w_self, w_new_value): @@ -3555,7 +3563,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'target') + raise_attriberr(space, w_self, 'target') return space.wrap(w_self.target) def For_set_target(space, w_self, w_new_value): @@ -3578,7 +3586,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'iter') + raise_attriberr(space, w_self, 'iter') return space.wrap(w_self.iter) def For_set_iter(space, w_self, w_new_value): @@ -3597,7 +3605,7 @@ def For_get_body(space, w_self): if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -3613,7 +3621,7 @@ def For_get_orelse(space, w_self): if not w_self.initialization_state & 32: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'orelse') + raise_attriberr(space, w_self, 'orelse') if w_self.w_orelse is None: if w_self.orelse is None: list_w = [] @@ -3662,7 +3670,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'test') + raise_attriberr(space, w_self, 'test') return space.wrap(w_self.test) def While_set_test(space, w_self, w_new_value): @@ -3681,7 +3689,7 @@ def While_get_body(space, w_self): if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -3697,7 +3705,7 @@ def While_get_orelse(space, w_self): if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'orelse') + raise_attriberr(space, w_self, 'orelse') if w_self.w_orelse is None: if w_self.orelse is None: list_w = [] @@ -3745,7 +3753,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'test') + raise_attriberr(space, w_self, 'test') return space.wrap(w_self.test) def If_set_test(space, w_self, w_new_value): @@ -3764,7 +3772,7 @@ def If_get_body(space, w_self): if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -3780,7 +3788,7 @@ def If_get_orelse(space, w_self): if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'orelse') + raise_attriberr(space, w_self, 'orelse') if w_self.w_orelse is None: if w_self.orelse is None: list_w = [] @@ -3828,7 +3836,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'context_expr') + raise_attriberr(space, w_self, 'context_expr') return space.wrap(w_self.context_expr) def With_set_context_expr(space, w_self, w_new_value): @@ -3851,7 +3859,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'optional_vars') + raise_attriberr(space, w_self, 'optional_vars') return space.wrap(w_self.optional_vars) def With_set_optional_vars(space, w_self, w_new_value): @@ -3870,7 +3878,7 @@ def With_get_body(space, w_self): if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -3917,7 +3925,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'type') + raise_attriberr(space, w_self, 'type') return space.wrap(w_self.type) def Raise_set_type(space, w_self, w_new_value): @@ -3940,7 +3948,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'inst') + raise_attriberr(space, w_self, 'inst') return space.wrap(w_self.inst) def Raise_set_inst(space, w_self, w_new_value): @@ -3963,7 +3971,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'tback') + raise_attriberr(space, w_self, 'tback') return space.wrap(w_self.tback) def Raise_set_tback(space, w_self, w_new_value): @@ -4008,7 +4016,7 @@ def TryExcept_get_body(space, w_self): if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -4024,7 +4032,7 @@ def TryExcept_get_handlers(space, w_self): if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'handlers') + raise_attriberr(space, w_self, 'handlers') if w_self.w_handlers is None: if w_self.handlers is None: list_w = [] @@ -4040,7 +4048,7 @@ def TryExcept_get_orelse(space, w_self): if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'orelse') + raise_attriberr(space, w_self, 'orelse') if w_self.w_orelse is None: if w_self.orelse is None: list_w = [] @@ -4085,7 +4093,7 @@ def TryFinally_get_body(space, w_self): if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -4101,7 +4109,7 @@ def TryFinally_get_finalbody(space, w_self): if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'finalbody') + raise_attriberr(space, w_self, 'finalbody') if w_self.w_finalbody is None: if w_self.finalbody is None: list_w = [] @@ -4148,7 +4156,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'test') + raise_attriberr(space, w_self, 'test') return space.wrap(w_self.test) def Assert_set_test(space, w_self, w_new_value): @@ -4171,7 +4179,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'msg') + raise_attriberr(space, w_self, 'msg') return space.wrap(w_self.msg) def Assert_set_msg(space, w_self, w_new_value): @@ -4215,7 +4223,7 @@ def Import_get_names(space, w_self): if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'names') + raise_attriberr(space, w_self, 'names') if w_self.w_names is None: if w_self.names is None: list_w = [] @@ -4260,7 +4268,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'module') + raise_attriberr(space, w_self, 'module') return space.wrap(w_self.module) def ImportFrom_set_module(space, w_self, w_new_value): @@ -4280,7 +4288,7 @@ def ImportFrom_get_names(space, w_self): if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'names') + raise_attriberr(space, w_self, 'names') if w_self.w_names is None: if w_self.names is None: list_w = [] @@ -4300,7 +4308,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'level') + raise_attriberr(space, w_self, 'level') return space.wrap(w_self.level) def ImportFrom_set_level(space, w_self, w_new_value): @@ -4348,7 +4356,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') return space.wrap(w_self.body) def Exec_set_body(space, w_self, w_new_value): @@ -4371,7 +4379,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'globals') + raise_attriberr(space, w_self, 'globals') return space.wrap(w_self.globals) def Exec_set_globals(space, w_self, w_new_value): @@ -4394,7 +4402,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'locals') + raise_attriberr(space, w_self, 'locals') return space.wrap(w_self.locals) def Exec_set_locals(space, w_self, w_new_value): @@ -4439,7 +4447,7 @@ def Global_get_names(space, w_self): if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'names') + raise_attriberr(space, w_self, 'names') if w_self.w_names is None: if w_self.names is None: list_w = [] @@ -4484,7 +4492,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') + raise_attriberr(space, w_self, 'value') return space.wrap(w_self.value) def Expr_set_value(space, w_self, w_new_value): @@ -4582,7 +4590,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 1: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'lineno') + raise_attriberr(space, w_self, 'lineno') return space.wrap(w_self.lineno) def expr_set_lineno(space, w_self, w_new_value): @@ -4603,7 +4611,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 2: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'col_offset') + raise_attriberr(space, w_self, 'col_offset') return space.wrap(w_self.col_offset) def expr_set_col_offset(space, w_self, w_new_value): @@ -4633,7 +4641,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'op') + raise_attriberr(space, w_self, 'op') return boolop_to_class[w_self.op - 1]() def BoolOp_set_op(space, w_self, w_new_value): @@ -4652,7 +4660,7 @@ def BoolOp_get_values(space, w_self): if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'values') + raise_attriberr(space, w_self, 'values') if w_self.w_values is None: if w_self.values is None: list_w = [] @@ -4698,7 +4706,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'left') + raise_attriberr(space, w_self, 'left') return space.wrap(w_self.left) def BinOp_set_left(space, w_self, w_new_value): @@ -4721,7 +4729,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'op') + raise_attriberr(space, w_self, 'op') return operator_to_class[w_self.op - 1]() def BinOp_set_op(space, w_self, w_new_value): @@ -4744,7 +4752,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'right') + raise_attriberr(space, w_self, 'right') return space.wrap(w_self.right) def BinOp_set_right(space, w_self, w_new_value): @@ -4793,7 +4801,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'op') + raise_attriberr(space, w_self, 'op') return unaryop_to_class[w_self.op - 1]() def UnaryOp_set_op(space, w_self, w_new_value): @@ -4816,7 +4824,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'operand') + raise_attriberr(space, w_self, 'operand') return space.wrap(w_self.operand) def UnaryOp_set_operand(space, w_self, w_new_value): @@ -4864,7 +4872,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'args') + raise_attriberr(space, w_self, 'args') return space.wrap(w_self.args) def Lambda_set_args(space, w_self, w_new_value): @@ -4885,7 +4893,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') return space.wrap(w_self.body) def Lambda_set_body(space, w_self, w_new_value): @@ -4933,7 +4941,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'test') + raise_attriberr(space, w_self, 'test') return space.wrap(w_self.test) def IfExp_set_test(space, w_self, w_new_value): @@ -4956,7 +4964,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') return space.wrap(w_self.body) def IfExp_set_body(space, w_self, w_new_value): @@ -4979,7 +4987,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'orelse') + raise_attriberr(space, w_self, 'orelse') return space.wrap(w_self.orelse) def IfExp_set_orelse(space, w_self, w_new_value): @@ -5024,7 +5032,7 @@ def Dict_get_keys(space, w_self): if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'keys') + raise_attriberr(space, w_self, 'keys') if w_self.w_keys is None: if w_self.keys is None: list_w = [] @@ -5040,7 +5048,7 @@ def Dict_get_values(space, w_self): if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'values') + raise_attriberr(space, w_self, 'values') if w_self.w_values is None: if w_self.values is None: list_w = [] @@ -5083,7 +5091,7 @@ def Set_get_elts(space, w_self): if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'elts') + raise_attriberr(space, w_self, 'elts') if w_self.w_elts is None: if w_self.elts is None: list_w = [] @@ -5128,7 +5136,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'elt') + raise_attriberr(space, w_self, 'elt') return space.wrap(w_self.elt) def ListComp_set_elt(space, w_self, w_new_value): @@ -5147,7 +5155,7 @@ def ListComp_get_generators(space, w_self): if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'generators') + raise_attriberr(space, w_self, 'generators') if w_self.w_generators is None: if w_self.generators is None: list_w = [] @@ -5193,7 +5201,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'elt') + raise_attriberr(space, w_self, 'elt') return space.wrap(w_self.elt) def SetComp_set_elt(space, w_self, w_new_value): @@ -5212,7 +5220,7 @@ def SetComp_get_generators(space, w_self): if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'generators') + raise_attriberr(space, w_self, 'generators') if w_self.w_generators is None: if w_self.generators is None: list_w = [] @@ -5258,7 +5266,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'key') + raise_attriberr(space, w_self, 'key') return space.wrap(w_self.key) def DictComp_set_key(space, w_self, w_new_value): @@ -5281,7 +5289,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') + raise_attriberr(space, w_self, 'value') return space.wrap(w_self.value) def DictComp_set_value(space, w_self, w_new_value): @@ -5300,7 +5308,7 @@ def DictComp_get_generators(space, w_self): if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'generators') + raise_attriberr(space, w_self, 'generators') if w_self.w_generators is None: if w_self.generators is None: list_w = [] @@ -5347,7 +5355,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'elt') + raise_attriberr(space, w_self, 'elt') return space.wrap(w_self.elt) def GeneratorExp_set_elt(space, w_self, w_new_value): @@ -5366,7 +5374,7 @@ def GeneratorExp_get_generators(space, w_self): if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'generators') + raise_attriberr(space, w_self, 'generators') if w_self.w_generators is None: if w_self.generators is None: list_w = [] @@ -5412,7 +5420,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') + raise_attriberr(space, w_self, 'value') return space.wrap(w_self.value) def Yield_set_value(space, w_self, w_new_value): @@ -5459,7 +5467,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'left') + raise_attriberr(space, w_self, 'left') return space.wrap(w_self.left) def Compare_set_left(space, w_self, w_new_value): @@ -5478,7 +5486,7 @@ def Compare_get_ops(space, w_self): if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'ops') + raise_attriberr(space, w_self, 'ops') if w_self.w_ops is None: if w_self.ops is None: list_w = [] @@ -5494,7 +5502,7 @@ def Compare_get_comparators(space, w_self): if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'comparators') + raise_attriberr(space, w_self, 'comparators') if w_self.w_comparators is None: if w_self.comparators is None: list_w = [] @@ -5542,7 +5550,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'func') + raise_attriberr(space, w_self, 'func') return space.wrap(w_self.func) def Call_set_func(space, w_self, w_new_value): @@ -5561,7 +5569,7 @@ def Call_get_args(space, w_self): if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'args') + raise_attriberr(space, w_self, 'args') if w_self.w_args is None: if w_self.args is None: list_w = [] @@ -5577,7 +5585,7 @@ def Call_get_keywords(space, w_self): if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'keywords') + raise_attriberr(space, w_self, 'keywords') if w_self.w_keywords is None: if w_self.keywords is None: list_w = [] @@ -5597,7 +5605,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 32: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'starargs') + raise_attriberr(space, w_self, 'starargs') return space.wrap(w_self.starargs) def Call_set_starargs(space, w_self, w_new_value): @@ -5620,7 +5628,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 64: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'kwargs') + raise_attriberr(space, w_self, 'kwargs') return space.wrap(w_self.kwargs) def Call_set_kwargs(space, w_self, w_new_value): @@ -5673,7 +5681,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') + raise_attriberr(space, w_self, 'value') return space.wrap(w_self.value) def Repr_set_value(space, w_self, w_new_value): @@ -5720,7 +5728,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'n') + raise_attriberr(space, w_self, 'n') return w_self.n def Num_set_n(space, w_self, w_new_value): @@ -5765,7 +5773,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 's') + raise_attriberr(space, w_self, 's') return w_self.s def Str_set_s(space, w_self, w_new_value): @@ -5810,7 +5818,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') + raise_attriberr(space, w_self, 'value') return space.wrap(w_self.value) def Attribute_set_value(space, w_self, w_new_value): @@ -5833,7 +5841,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'attr') + raise_attriberr(space, w_self, 'attr') return space.wrap(w_self.attr) def Attribute_set_attr(space, w_self, w_new_value): @@ -5854,7 +5862,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'ctx') + raise_attriberr(space, w_self, 'ctx') return expr_context_to_class[w_self.ctx - 1]() def Attribute_set_ctx(space, w_self, w_new_value): @@ -5903,7 +5911,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') + raise_attriberr(space, w_self, 'value') return space.wrap(w_self.value) def Subscript_set_value(space, w_self, w_new_value): @@ -5926,7 +5934,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'slice') + raise_attriberr(space, w_self, 'slice') return space.wrap(w_self.slice) def Subscript_set_slice(space, w_self, w_new_value): @@ -5949,7 +5957,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'ctx') + raise_attriberr(space, w_self, 'ctx') return expr_context_to_class[w_self.ctx - 1]() def Subscript_set_ctx(space, w_self, w_new_value): @@ -5998,7 +6006,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'id') + raise_attriberr(space, w_self, 'id') return space.wrap(w_self.id) def Name_set_id(space, w_self, w_new_value): @@ -6019,7 +6027,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'ctx') + raise_attriberr(space, w_self, 'ctx') return expr_context_to_class[w_self.ctx - 1]() def Name_set_ctx(space, w_self, w_new_value): @@ -6063,7 +6071,7 @@ def List_get_elts(space, w_self): if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'elts') + raise_attriberr(space, w_self, 'elts') if w_self.w_elts is None: if w_self.elts is None: list_w = [] @@ -6083,7 +6091,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'ctx') + raise_attriberr(space, w_self, 'ctx') return expr_context_to_class[w_self.ctx - 1]() def List_set_ctx(space, w_self, w_new_value): @@ -6128,7 +6136,7 @@ def Tuple_get_elts(space, w_self): if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'elts') + raise_attriberr(space, w_self, 'elts') if w_self.w_elts is None: if w_self.elts is None: list_w = [] @@ -6148,7 +6156,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'ctx') + raise_attriberr(space, w_self, 'ctx') return expr_context_to_class[w_self.ctx - 1]() def Tuple_set_ctx(space, w_self, w_new_value): @@ -6197,7 +6205,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') + raise_attriberr(space, w_self, 'value') return w_self.value def Const_set_value(space, w_self, w_new_value): @@ -6315,7 +6323,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 1: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'lower') + raise_attriberr(space, w_self, 'lower') return space.wrap(w_self.lower) def Slice_set_lower(space, w_self, w_new_value): @@ -6338,7 +6346,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 2: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'upper') + raise_attriberr(space, w_self, 'upper') return space.wrap(w_self.upper) def Slice_set_upper(space, w_self, w_new_value): @@ -6361,7 +6369,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'step') + raise_attriberr(space, w_self, 'step') return space.wrap(w_self.step) def Slice_set_step(space, w_self, w_new_value): @@ -6406,7 +6414,7 @@ def ExtSlice_get_dims(space, w_self): if not w_self.initialization_state & 1: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'dims') + raise_attriberr(space, w_self, 'dims') if w_self.w_dims is None: if w_self.dims is None: list_w = [] @@ -6451,7 +6459,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 1: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') + raise_attriberr(space, w_self, 'value') return space.wrap(w_self.value) def Index_set_value(space, w_self, w_new_value): @@ -6722,7 +6730,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 1: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'target') + raise_attriberr(space, w_self, 'target') return space.wrap(w_self.target) def comprehension_set_target(space, w_self, w_new_value): @@ -6745,7 +6753,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 2: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'iter') + raise_attriberr(space, w_self, 'iter') return space.wrap(w_self.iter) def comprehension_set_iter(space, w_self, w_new_value): @@ -6764,7 +6772,7 @@ def comprehension_get_ifs(space, w_self): if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'ifs') + raise_attriberr(space, w_self, 'ifs') if w_self.w_ifs is None: if w_self.ifs is None: list_w = [] @@ -6811,7 +6819,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 1: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'lineno') + raise_attriberr(space, w_self, 'lineno') return space.wrap(w_self.lineno) def excepthandler_set_lineno(space, w_self, w_new_value): @@ -6832,7 +6840,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 2: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'col_offset') + raise_attriberr(space, w_self, 'col_offset') return space.wrap(w_self.col_offset) def excepthandler_set_col_offset(space, w_self, w_new_value): @@ -6862,7 +6870,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'type') + raise_attriberr(space, w_self, 'type') return space.wrap(w_self.type) def ExceptHandler_set_type(space, w_self, w_new_value): @@ -6885,7 +6893,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'name') + raise_attriberr(space, w_self, 'name') return space.wrap(w_self.name) def ExceptHandler_set_name(space, w_self, w_new_value): @@ -6904,7 +6912,7 @@ def ExceptHandler_get_body(space, w_self): if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -6947,7 +6955,7 @@ def arguments_get_args(space, w_self): if not w_self.initialization_state & 1: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'args') + raise_attriberr(space, w_self, 'args') if w_self.w_args is None: if w_self.args is None: list_w = [] @@ -6967,7 +6975,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 2: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'vararg') + raise_attriberr(space, w_self, 'vararg') return space.wrap(w_self.vararg) def arguments_set_vararg(space, w_self, w_new_value): @@ -6991,7 +6999,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'kwarg') + raise_attriberr(space, w_self, 'kwarg') return space.wrap(w_self.kwarg) def arguments_set_kwarg(space, w_self, w_new_value): @@ -7011,7 +7019,7 @@ def arguments_get_defaults(space, w_self): if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'defaults') + raise_attriberr(space, w_self, 'defaults') if w_self.w_defaults is None: if w_self.defaults is None: list_w = [] @@ -7060,7 +7068,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 1: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'arg') + raise_attriberr(space, w_self, 'arg') return space.wrap(w_self.arg) def keyword_set_arg(space, w_self, w_new_value): @@ -7081,7 +7089,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 2: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') + raise_attriberr(space, w_self, 'value') return space.wrap(w_self.value) def keyword_set_value(space, w_self, w_new_value): @@ -7129,7 +7137,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 1: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'name') + raise_attriberr(space, w_self, 'name') return space.wrap(w_self.name) def alias_set_name(space, w_self, w_new_value): @@ -7150,7 +7158,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 2: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'asname') + raise_attriberr(space, w_self, 'asname') return space.wrap(w_self.asname) def alias_set_asname(space, w_self, w_new_value): diff --git a/pypy/interpreter/astcompiler/tools/asdl_py.py b/pypy/interpreter/astcompiler/tools/asdl_py.py --- a/pypy/interpreter/astcompiler/tools/asdl_py.py +++ b/pypy/interpreter/astcompiler/tools/asdl_py.py @@ -409,8 +409,7 @@ self.emit(" if w_obj is not None:", 1) self.emit(" return w_obj", 1) self.emit("if not w_self.initialization_state & %s:" % (flag,), 1) - self.emit("raise operationerrfmt(space.w_AttributeError, \"'%%T' object has no attribute '%%s'\", w_self, '%s')" % - (field.name,), 2) + self.emit("raise_attriberr(space, w_self, '%s')" % (field.name,), 2) if field.seq: self.emit("if w_self.w_%s is None:" % (field.name,), 1) self.emit("if w_self.%s is None:" % (field.name,), 2) @@ -537,14 +536,20 @@ HEAD = """# Generated by tools/asdl_py.py -from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter import typedef -from pypy.interpreter.gateway import interp2app -from pypy.interpreter.error import OperationError, operationerrfmt from rpython.rlib.unroll import unrolling_iterable from rpython.tool.pairtype import extendabletype from rpython.tool.sourcetools import func_with_new_name +from pypy.interpreter import typedef +from pypy.interpreter.baseobjspace import W_Root +from pypy.interpreter.error import OperationError, oefmt +from pypy.interpreter.gateway import interp2app + + +def raise_attriberr(space, w_obj, name): + raise oefmt(space.w_AttributeError, + \"'%T' object has no attribute '%s'\", w_obj, name) + def check_string(space, w_obj): if not (space.isinstance_w(w_obj, space.w_str) or @@ -608,11 +613,13 @@ continue # field is optional w_obj = self.getdictvalue(space, missing) if w_obj is None: - err = "required field \\"%s\\" missing from %s" - raise operationerrfmt(space.w_TypeError, err, missing, host) + raise oefmt(space.w_TypeError, + "required field \\"%s\\" missing from %s", + missing, host) else: - err = "incorrect type for field \\"%s\\" in %s" - raise operationerrfmt(space.w_TypeError, err, missing, host) + raise oefmt(space.w_TypeError, + "incorrect type for field \\"%s\\" in %s", + missing, host) raise AssertionError("should not reach here") diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -11,8 +11,7 @@ from pypy.interpreter.executioncontext import (ExecutionContext, ActionFlag, UserDelAction) -from pypy.interpreter.error import (OperationError, operationerrfmt, - new_exception_class) +from pypy.interpreter.error import OperationError, new_exception_class, oefmt from pypy.interpreter.argument import Arguments from pypy.interpreter.miscutils import ThreadLocals @@ -61,9 +60,9 @@ return False def setdict(self, space, w_dict): - raise operationerrfmt(space.w_TypeError, - "attribute '__dict__' of %T objects " - "is not writable", self) + raise oefmt(space.w_TypeError, + "attribute '__dict__' of %T objects is not writable", + self) # to be used directly only by space.type implementations def getclass(self, space): @@ -123,8 +122,8 @@ classname = '?' else: classname = wrappable_class_name(RequiredClass) - msg = "'%s' object expected, got '%T' instead" - raise operationerrfmt(space.w_TypeError, msg, classname, self) + raise oefmt(space.w_TypeError, + "'%s' object expected, got '%T' instead", classname, self) # used by _weakref implemenation @@ -132,8 +131,8 @@ return None def setweakref(self, space, weakreflifeline): - raise operationerrfmt(space.w_TypeError, - "cannot create weak reference to '%T' object", self) + raise oefmt(space.w_TypeError, + "cannot create weak reference to '%T' object", self) def delweakref(self): pass @@ -215,25 +214,25 @@ self._typed_unwrap_error(space, "integer") def _typed_unwrap_error(self, space, expected): - raise operationerrfmt(space.w_TypeError, "expected %s, got %T object", - expected, self) + raise oefmt(space.w_TypeError, + "expected %s, got %T object", expected, self) def int(self, space): w_impl = space.lookup(self, '__int__') if w_impl is None: - raise operationerrfmt(space.w_TypeError, - "unsupported operand type for int(): '%T'", self) + raise oefmt(space.w_TypeError, + "unsupported operand type for int(): '%T'", self) w_result = space.get_and_call_function(w_impl, self) if (space.isinstance_w(w_result, space.w_int) or space.isinstance_w(w_result, space.w_long)): return w_result - msg = "__int__ returned non-int (type '%T')" - raise operationerrfmt(space.w_TypeError, msg, w_result) + raise oefmt(space.w_TypeError, + "__int__ returned non-int (type '%T')", w_result) def ord(self, space): - msg = "ord() expected string of length 1, but %T found" - raise operationerrfmt(space.w_TypeError, msg, self) + raise oefmt(space.w_TypeError, + "ord() expected string of length 1, but %T found", self) def __spacebind__(self, space): return self @@ -430,10 +429,9 @@ try: w_mod = self.builtin_modules[name] except KeyError: - raise operationerrfmt( - self.w_SystemError, - "getbuiltinmodule() called " - "with non-builtin module %s", name) + raise oefmt(self.w_SystemError, + "getbuiltinmodule() called with non-builtin module %s", + name) else: # Add the module to sys.modules self.setitem(w_modules, w_name, w_mod) @@ -753,9 +751,10 @@ if can_be_None and self.is_none(w_obj): return None if not isinstance(w_obj, RequiredClass): # or obj is None - msg = "'%s' object expected, got '%N' instead" - raise operationerrfmt(self.w_TypeError, msg, - wrappable_class_name(RequiredClass), w_obj.getclass(self)) + raise oefmt(self.w_TypeError, + "'%s' object expected, got '%N' instead", + wrappable_class_name(RequiredClass), + w_obj.getclass(self)) return w_obj interp_w._annspecialcase_ = 'specialize:arg(1)' @@ -832,13 +831,9 @@ items[idx] = w_item idx += 1 if idx < expected_length: - if idx == 1: - plural = "" - else: - plural = "s" - raise operationerrfmt(self.w_ValueError, - "need more than %d value%s to unpack", - idx, plural) + raise oefmt(self.w_ValueError, + "need more than %d value%s to unpack", + idx, "" if idx == 1 else "s") return items def unpackiterable_unroll(self, w_iterable, expected_length): @@ -1257,8 +1252,8 @@ except OperationError, err: if objdescr is None or not err.match(self, self.w_TypeError): raise - msg = "%s must be an integer, not %T" - raise operationerrfmt(self.w_TypeError, msg, objdescr, w_obj) + raise oefmt(self.w_TypeError, "%s must be an integer, not %T", + objdescr, w_obj) try: index = self.int_w(w_index) except OperationError, err: @@ -1271,9 +1266,9 @@ else: return sys.maxint else: - raise operationerrfmt( - w_exception, "cannot fit '%T' into an index-sized integer", - w_obj) + raise oefmt(w_exception, + "cannot fit '%T' into an index-sized integer", + w_obj) else: return index @@ -1517,9 +1512,9 @@ ) fd = self.int_w(w_fd) if fd < 0: - raise operationerrfmt(self.w_ValueError, - "file descriptor cannot be a negative integer (%d)", fd - ) + raise oefmt(self.w_ValueError, + "file descriptor cannot be a negative integer (%d)", + fd) return fd def warn(self, w_msg, w_warningcls, stacklevel=2): diff --git a/pypy/interpreter/error.py b/pypy/interpreter/error.py --- a/pypy/interpreter/error.py +++ b/pypy/interpreter/error.py @@ -226,9 +226,9 @@ def _exception_getclass(self, space, w_inst): w_type = space.exception_getclass(w_inst) if not space.exception_is_valid_class_w(w_type): - msg = ("exceptions must be old-style classes or derived " - "from BaseException, not %N") - raise operationerrfmt(space.w_TypeError, msg, w_type) + raise oefmt(space.w_TypeError, + "exceptions must be old-style classes or derived from " + "BaseException, not %N", w_type) return w_type def write_unraisable(self, space, where, w_object=None, @@ -383,15 +383,16 @@ self._w_value = w_value = space.wrap(self._value) return w_value -def get_operationerr_class(valuefmt): + at specialize.memo() +def get_operr_class(valuefmt): try: result = _fmtcache[valuefmt] except KeyError: result = _fmtcache[valuefmt] = get_operrcls2(valuefmt) return result -get_operationerr_class._annspecialcase_ = 'specialize:memo' -def operationerrfmt(w_type, valuefmt, *args): + at specialize.arg(1) +def oefmt(w_type, valuefmt, *args): """Equivalent to OperationError(w_type, space.wrap(valuefmt % args)). More efficient in the (common) case where the value is not actually needed. @@ -405,9 +406,8 @@ """ if not len(args): return OpErrFmtNoArgs(w_type, valuefmt) - OpErrFmt, strings = get_operationerr_class(valuefmt) + OpErrFmt, strings = get_operr_class(valuefmt) return OpErrFmt(w_type, strings, *args) -operationerrfmt._annspecialcase_ = 'specialize:arg(1)' # ____________________________________________________________ diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py --- a/pypy/interpreter/function.py +++ b/pypy/interpreter/function.py @@ -7,8 +7,8 @@ """ from rpython.rlib.unroll import unrolling_iterable -from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.baseobjspace import W_Root +from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.eval import Code from pypy.interpreter.argument import Arguments from rpython.rlib import jit @@ -413,9 +413,9 @@ if self.closure: closure_len = len(self.closure) if isinstance(code, PyCode) and closure_len != len(code.co_freevars): - raise operationerrfmt(space.w_ValueError, - "%N() requires a code object with %d free vars, not %d", - self, closure_len, len(code.co_freevars)) + raise oefmt(space.w_ValueError, + "%N() requires a code object with %d free vars, not " + "%d", self, closure_len, len(code.co_freevars)) self.fget_func_doc(space) # see test_issue1293 self.code = code @@ -495,10 +495,9 @@ instdescr = instname + " instance" else: instdescr = "instance" - msg = ("unbound method %N() must be called with %s " - "as first argument (got %s instead)") - raise operationerrfmt(space.w_TypeError, msg, - self, clsdescr, instdescr) + raise oefmt(space.w_TypeError, + "unbound method %N() must be called with %s as first " + "argument (got %s instead)", self, clsdescr, instdescr) return space.call_args(self.w_function, args) def descr_method_get(self, w_obj, w_cls=None): diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -12,7 +12,7 @@ from pypy.interpreter.argument import Arguments from pypy.interpreter.astcompiler import consts from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.error import OperationError, operationerrfmt +from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.executioncontext import ExecutionContext from pypy.interpreter.nestedscope import Cell from pypy.tool import stdlib_opcode @@ -622,8 +622,8 @@ line = self.pycode.co_firstlineno if new_lineno < line: - raise operationerrfmt(space.w_ValueError, - "line %d comes before the current code.", new_lineno) + raise oefmt(space.w_ValueError, + "line %d comes before the current code.", new_lineno) elif new_lineno == line: new_lasti = 0 else: @@ -639,8 +639,8 @@ break if new_lasti == -1: - raise operationerrfmt(space.w_ValueError, - "line %d comes after the current code.", new_lineno) + raise oefmt(space.w_ValueError, + "line %d comes after the current code.", new_lineno) # Don't jump to a line with an except in it. code = self.pycode.co_code @@ -687,9 +687,9 @@ assert len(blockstack) == 0 if new_lasti_setup_addr != f_lasti_setup_addr: - raise operationerrfmt(space.w_ValueError, - "can't jump into or out of a 'finally' block %d -> %d", - f_lasti_setup_addr, new_lasti_setup_addr) + raise oefmt(space.w_ValueError, + "can't jump into or out of a 'finally' block %d -> %d", + f_lasti_setup_addr, new_lasti_setup_addr) if new_lasti < self.last_instr: min_addr = new_lasti diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -14,7 +14,7 @@ gateway, function, eval, pyframe, pytraceback, pycode ) from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.error import OperationError, operationerrfmt +from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.nestedscope import Cell from pypy.interpreter.pycode import PyCode, BytecodeCorruption from pypy.tool.stdlib_opcode import bytecode_spec @@ -492,8 +492,9 @@ def _load_fast_failed(self, varindex): varname = self.getlocalvarname(varindex) - message = "local variable '%s' referenced before assignment" - raise operationerrfmt(self.space.w_UnboundLocalError, message, varname) + raise oefmt(self.space.w_UnboundLocalError, + "local variable '%s' referenced before assignment", + varname) _load_fast_failed._dont_inline_ = True def LOAD_CONST(self, constindex, next_instr): @@ -848,9 +849,8 @@ # catch KeyErrors and turn them into NameErrors if not e.match(self.space, self.space.w_KeyError): raise - message = "name '%s' is not defined" - raise operationerrfmt(self.space.w_NameError, message, - self.space.str_w(w_varname)) + raise oefmt(self.space.w_NameError, "name '%s' is not defined", + self.space.str_w(w_varname)) def UNPACK_SEQUENCE(self, itemcount, next_instr): w_iterable = self.popvalue() @@ -899,8 +899,8 @@ _load_global._always_inline_ = True def _load_global_failed(self, varname): - message = "global name '%s' is not defined" - raise operationerrfmt(self.space.w_NameError, message, varname) + raise oefmt(self.space.w_NameError, + "global name '%s' is not defined", varname) _load_global_failed._dont_inline_ = True def LOAD_GLOBAL(self, nameindex, next_instr): @@ -910,9 +910,9 @@ def DELETE_FAST(self, varindex, next_instr): if self.locals_stack_w[varindex] is None: varname = self.getlocalvarname(varindex) - message = "local variable '%s' referenced before assignment" - raise operationerrfmt(self.space.w_UnboundLocalError, message, - varname) + raise oefmt(self.space.w_UnboundLocalError, + "local variable '%s' referenced before assignment", + varname) self.locals_stack_w[varindex] = None def BUILD_TUPLE(self, itemcount, next_instr): @@ -1040,9 +1040,8 @@ except OperationError, e: if not e.match(self.space, self.space.w_AttributeError): raise - raise operationerrfmt(self.space.w_ImportError, - "cannot import name '%s'", - self.space.str_w(w_name)) + raise oefmt(self.space.w_ImportError, + "cannot import name '%s'", self.space.str_w(w_name)) self.pushvalue(w_obj) def YIELD_VALUE(self, oparg, next_instr): @@ -1127,9 +1126,9 @@ w_enter = self.space.lookup(w_manager, "__enter__") w_descr = self.space.lookup(w_manager, "__exit__") if w_enter is None or w_descr is None: - raise operationerrfmt(self.space.w_AttributeError, - "'%T' object is not a context manager" - " (no __enter__/__exit__ method)", w_manager) + raise oefmt(self.space.w_AttributeError, + "'%T' object is not a context manager (no __enter__/" + "__exit__ method)", w_manager) w_exit = self.space.get(w_descr, w_manager) self.settopvalue(w_exit) w_result = self.space.get_and_call_function(w_enter, w_manager) diff --git a/pypy/interpreter/test/test_error.py b/pypy/interpreter/test/test_error.py --- a/pypy/interpreter/test/test_error.py +++ b/pypy/interpreter/test/test_error.py @@ -1,7 +1,7 @@ import py, os, errno -from pypy.interpreter.error import OperationError, operationerrfmt -from pypy.interpreter.error import decompose_valuefmt, get_operrcls2 -from pypy.interpreter.error import wrap_oserror, new_exception_class +from pypy.interpreter.error import ( + OperationError, decompose_valuefmt, get_operrcls2, new_exception_class, + oefmt, wrap_oserror) def test_decompose_valuefmt(): @@ -22,59 +22,59 @@ assert cls2 is cls # caching assert strings2 == ("a ", " b ", " c") -def test_operationerrfmt(space): - operr = operationerrfmt("w_type", "abc %s def %d", "foo", 42) +def test_oefmt(space): + operr = oefmt("w_type", "abc %s def %d", "foo", 42) assert isinstance(operr, OperationError) assert operr.w_type == "w_type" assert operr._w_value is None assert operr._compute_value(space) == "abc foo def 42" - operr2 = operationerrfmt("w_type2", "a %s b %d c", "bar", 43) + operr2 = oefmt("w_type2", "a %s b %d c", "bar", 43) assert operr2.__class__ is operr.__class__ - operr3 = operationerrfmt("w_type2", "a %s b %s c", "bar", "4b") + operr3 = oefmt("w_type2", "a %s b %s c", "bar", "4b") assert operr3.__class__ is not operr.__class__ -def test_operationerrfmt_noargs(space): - operr = operationerrfmt(space.w_AttributeError, "no attribute 'foo'") +def test_oefmt_noargs(space): + operr = oefmt(space.w_AttributeError, "no attribute 'foo'") operr.normalize_exception(space) val = operr.get_w_value(space) assert space.isinstance_w(val, space.w_AttributeError) w_repr = space.repr(val) assert space.str_w(w_repr) == "AttributeError(\"no attribute 'foo'\",)" -def test_operationerrfmt_T(space): - operr = operationerrfmt(space.w_AttributeError, - "'%T' object has no attribute '%s'", - space.wrap('foo'), 'foo') +def test_oefmt_T(space): + operr = oefmt(space.w_AttributeError, + "'%T' object has no attribute '%s'", + space.wrap('foo'), 'foo') assert operr._compute_value(space) == "'str' object has no attribute 'foo'" - operr = operationerrfmt("w_type", - "'%T' object has no attribute '%s'", - space.wrap('foo'), 'foo') + operr = oefmt("w_type", + "'%T' object has no attribute '%s'", + space.wrap('foo'), 'foo') assert operr._compute_value(space) == "'str' object has no attribute 'foo'" -def test_operationerrfmt_N(space): - operr = operationerrfmt(space.w_AttributeError, - "'%N' object has no attribute '%s'", - space.type(space.wrap('foo')), 'foo') +def test_oefmt_N(space): + operr = oefmt(space.w_AttributeError, + "'%N' object has no attribute '%s'", + space.type(space.wrap('foo')), 'foo') assert operr._compute_value(space) == "'str' object has no attribute 'foo'" - operr = operationerrfmt("w_type", - "'%N' object has no attribute '%s'", - space.type(space.wrap('foo')), 'foo') + operr = oefmt("w_type", + "'%N' object has no attribute '%s'", + space.type(space.wrap('foo')), 'foo') assert operr._compute_value(space) == "'str' object has no attribute 'foo'" - operr = operationerrfmt(space.w_AttributeError, - "'%N' object has no attribute '%s'", - space.wrap('foo'), 'foo') + operr = oefmt(space.w_AttributeError, + "'%N' object has no attribute '%s'", + space.wrap('foo'), 'foo') assert operr._compute_value(space) == "'?' object has no attribute 'foo'" - operr = operationerrfmt("w_type", - "'%N' object has no attribute '%s'", - space.wrap('foo'), 'foo') + operr = oefmt("w_type", + "'%N' object has no attribute '%s'", + space.wrap('foo'), 'foo') assert operr._compute_value(space) == "'?' object has no attribute 'foo'" -def test_operationerrfmt_R(space): - operr = operationerrfmt(space.w_ValueError, "illegal newline value: %R", - space.wrap('foo')) +def test_oefmt_R(space): + operr = oefmt(space.w_ValueError, + "illegal newline value: %R", space.wrap('foo')) assert operr._compute_value(space) == "illegal newline value: 'foo'" - operr = operationerrfmt(space.w_ValueError, "illegal newline value: %R", - space.wrap("'PyLadies'")) + operr = oefmt(space.w_ValueError, "illegal newline value: %R", + space.wrap("'PyLadies'")) expected = "illegal newline value: \"'PyLadies'\"" assert operr._compute_value(space) == expected diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -2,7 +2,7 @@ from pypy.interpreter.argument import Arguments from pypy.interpreter.baseobjspace import W_Root, DescrMismatch -from pypy.interpreter.error import OperationError, operationerrfmt +from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import (interp2app, BuiltinCode, unwrap_spec, WrappedDefault) @@ -549,9 +549,9 @@ def typecheck(self, space, w_obj): if not space.isinstance_w(w_obj, self.w_cls): - m = "descriptor '%N' for '%N' objects doesn't apply to '%T' object" - raise operationerrfmt(space.w_TypeError, m, - self, self.w_cls, w_obj) + raise oefmt(space.w_TypeError, + "descriptor '%N' for '%N' objects doesn't apply to " + "'%T' object", self, self.w_cls, w_obj) def descr_member_get(self, space, w_obj, w_cls=None): """member.__get__(obj[, type]) -> value @@ -620,8 +620,9 @@ def descr_get_dict(space, w_obj): w_dict = w_obj.getdict(space) if w_dict is None: - msg = "descriptor '__dict__' doesn't apply to '%T' objects" - raise operationerrfmt(space.w_TypeError, msg, w_obj) + raise oefmt(space.w_TypeError, + "descriptor '__dict__' doesn't apply to '%T' objects", + w_obj) return w_dict def descr_set_dict(space, w_obj, w_dict): diff --git a/pypy/module/__builtin__/interp_classobj.py b/pypy/module/__builtin__/interp_classobj.py --- a/pypy/module/__builtin__/interp_classobj.py +++ b/pypy/module/__builtin__/interp_classobj.py @@ -1,5 +1,5 @@ import new -from pypy.interpreter.error import OperationError, operationerrfmt +from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import interp2app from pypy.interpreter.typedef import TypeDef, make_weakref_descr from pypy.interpreter.baseobjspace import W_Root @@ -10,8 +10,8 @@ def raise_type_err(space, argument, expected, w_obj): - raise operationerrfmt(space.w_TypeError, "argument %s must be %s, not %T", - argument, expected, w_obj) + raise oefmt(space.w_TypeError, + "argument %s must be %s, not %T", argument, expected, w_obj) def unwrap_attr(space, w_attr): try: @@ -126,10 +126,8 @@ return space.newtuple(self.bases_w) w_value = self.lookup(space, name) if w_value is None: - raise operationerrfmt( - space.w_AttributeError, - "class %s has no attribute '%s'", - self.name, name) + raise oefmt(space.w_AttributeError, + "class %s has no attribute '%s'", self.name, name) w_descr_get = space.lookup(w_value, '__get__') if w_descr_get is None: @@ -158,18 +156,15 @@ def descr_delattr(self, space, w_attr): name = unwrap_attr(space, w_attr) if name in ("__dict__", "__name__", "__bases__"): - raise operationerrfmt( - space.w_TypeError, - "cannot delete attribute '%s'", name) + raise oefmt(space.w_TypeError, + "cannot delete attribute '%s'", name) try: space.delitem(self.w_dict, w_attr) except OperationError, e: if not e.match(space, space.w_KeyError): raise - raise operationerrfmt( - space.w_AttributeError, - "class %s has no attribute '%s'", - self.name, name) + raise oefmt(space.w_AttributeError, + "class %s has no attribute '%s'", self.name, name) def descr_repr(self, space): mod = self.get_module_string(space) @@ -362,10 +357,9 @@ raise # not found at all if exc: - raise operationerrfmt( - space.w_AttributeError, - "%s instance has no attribute '%s'", - self.w_class.name, name) + raise oefmt(space.w_AttributeError, + "%s instance has no attribute '%s'", + self.w_class.name, name) else: return None @@ -416,10 +410,9 @@ space.call_function(w_meth, w_name) else: if not self.deldictvalue(space, name): - raise operationerrfmt( - space.w_AttributeError, - "%s instance has no attribute '%s'", - self.w_class.name, name) + raise oefmt(space.w_AttributeError, + "%s instance has no attribute '%s'", + self.w_class.name, name) def descr_repr(self, space): w_meth = self.getattr(space, '__repr__', False) diff --git a/pypy/module/__pypy__/interp_dict.py b/pypy/module/__pypy__/interp_dict.py --- a/pypy/module/__pypy__/interp_dict.py +++ b/pypy/module/__pypy__/interp_dict.py @@ -1,6 +1,6 @@ +from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import unwrap_spec -from pypy.interpreter.error import operationerrfmt, OperationError from pypy.objspace.std.dictmultiobject import W_DictMultiObject @unwrap_spec(type=str) @@ -30,8 +30,7 @@ elif type == 'strdict': return space.newdict(strdict=True) else: From noreply at buildbot.pypy.org Mon Feb 3 23:52:00 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Mon, 3 Feb 2014 23:52:00 +0100 (CET) Subject: [pypy-commit] pypy remove-intlong-smm: operationerrfmt -> oefmt Message-ID: <20140203225200.372731C0352@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: remove-intlong-smm Changeset: r69063:3ebdf4080384 Date: 2014-02-03 14:47 -0800 http://bitbucket.org/pypy/pypy/changeset/3ebdf4080384/ Log: operationerrfmt -> oefmt diff --git a/pypy/objspace/std/intobject.py b/pypy/objspace/std/intobject.py --- a/pypy/objspace/std/intobject.py +++ b/pypy/objspace/std/intobject.py @@ -20,7 +20,7 @@ from pypy.interpreter import typedef from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.buffer import Buffer -from pypy.interpreter.error import OperationError, operationerrfmt +from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import ( WrappedDefault, interp2app, interpindirect2app, unwrap_spec) from pypy.objspace.std import newformat @@ -161,8 +161,8 @@ elif isinstance(w_modulus, W_AbstractIntObject): z = space.int_w(w_modulus) if z == 0: - raise operationerrfmt(space.w_ValueError, - "pow() 3rd argument cannot be 0") + raise oefmt(space.w_ValueError, + "pow() 3rd argument cannot be 0") else: # can't return NotImplemented (space.pow doesn't do full # ternary, i.e. w_modulus.__zpow__(self, w_exponent)), so @@ -307,8 +307,7 @@ try: z = ovfcheck(x // y) except ZeroDivisionError: - raise operationerrfmt(space.w_ZeroDivisionError, - "integer division by zero") + raise oefmt(space.w_ZeroDivisionError, "integer division by zero") return wrapint(space, z) descr_floordiv, descr_rfloordiv = _make_descr_binop(_floordiv) @@ -319,8 +318,7 @@ x = float(space.int_w(self)) y = float(space.int_w(w_other)) if y == 0.0: - raise operationerrfmt(space.w_ZeroDivisionError, - "division by zero") + raise oefmt(space.w_ZeroDivisionError, "division by zero") return space.wrap(x / y) descr_truediv, descr_rtruediv = _make_descr_binop(_truediv, ovf=False) @@ -330,8 +328,7 @@ try: z = ovfcheck(x % y) except ZeroDivisionError: - raise operationerrfmt(space.w_ZeroDivisionError, - "integer modulo by zero") + raise oefmt(space.w_ZeroDivisionError, "integer modulo by zero") return wrapint(space, z) descr_mod, descr_rmod = _make_descr_binop(_mod) @@ -341,8 +338,7 @@ try: z = ovfcheck(x // y) except ZeroDivisionError: - raise operationerrfmt(space.w_ZeroDivisionError, - "integer divmod by zero") + raise oefmt(space.w_ZeroDivisionError, "integer divmod by zero") # no overflow possible m = x % y w = space.wrap @@ -356,7 +352,7 @@ c = ovfcheck(a << b) return wrapint(space, c) if b < 0: - raise operationerrfmt(space.w_ValueError, "negative shift count") + raise oefmt(space.w_ValueError, "negative shift count") # b >= LONG_BIT if a == 0: return self.int(space) @@ -368,8 +364,7 @@ b = space.int_w(w_other) if r_uint(b) >= LONG_BIT: # not (0 <= b < LONG_BIT) if b < 0: - raise operationerrfmt(space.w_ValueError, - "negative shift count") + raise oefmt(space.w_ValueError, "negative shift count") # b >= LONG_BIT if a == 0: return self.int(space) @@ -414,9 +409,8 @@ def uint_w(self, space): intval = self.intval if intval < 0: - raise operationerrfmt(space.w_ValueError, - "cannot convert negative integer to " - "unsigned") + raise oefmt(space.w_ValueError, + "cannot convert negative integer to unsigned") return r_uint(intval) def bigint_w(self, space): @@ -459,9 +453,9 @@ def _pow_impl(space, iv, iw, iz): if iw < 0: if iz != 0: - raise operationerrfmt(space.w_TypeError, - "pow() 2nd argument cannot be negative when " - "3rd argument specified") + raise oefmt(space.w_TypeError, + "pow() 2nd argument cannot be negative when 3rd " + "argument specified") # bounce it, since it always returns float raise ValueError temp = iv @@ -572,9 +566,9 @@ except OperationError as e: if not e.match(space, space.w_TypeError): raise - raise operationerrfmt(space.w_TypeError, - "int() argument must be a string or a number, not '%T'", - w_value) + raise oefmt(space.w_TypeError, + "int() argument must be a string or a number, " + "not '%T'", w_value) else: buf = space.interp_w(Buffer, w_buffer) value, w_longval = _string_to_int_or_long(space, w_value, @@ -590,16 +584,16 @@ try: s = space.str_w(w_value) except OperationError as e: - raise operationerrfmt(space.w_TypeError, - "int() can't convert non-string with " - "explicit base") + raise oefmt(space.w_TypeError, + "int() can't convert non-string with explicit " + "base") value, w_longval = _string_to_int_or_long(space, w_value, s, base) if w_longval is not None: if not space.is_w(w_inttype, space.w_int): - raise operationerrfmt(space.w_OverflowError, - "long int too large to convert to int") + raise oefmt(space.w_OverflowError, + "long int too large to convert to int") return w_longval elif space.is_w(w_inttype, space.w_int): # common case diff --git a/pypy/objspace/std/longobject.py b/pypy/objspace/std/longobject.py --- a/pypy/objspace/std/longobject.py +++ b/pypy/objspace/std/longobject.py @@ -10,7 +10,7 @@ from pypy.interpreter import typedef from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.buffer import Buffer -from pypy.interpreter.error import OperationError, operationerrfmt +from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import ( WrappedDefault, interp2app, interpindirect2app, unwrap_spec) from pypy.objspace.std import newformat @@ -78,18 +78,17 @@ try: return space.wrap(bigint.bit_length()) except OverflowError: - raise operationerrfmt(space.w_OverflowError, - "too many digits in integer") + raise oefmt(space.w_OverflowError, "too many digits in integer") def _truediv(self, space, w_other): try: f = self.asbigint().truediv(w_other.asbigint()) except ZeroDivisionError: - raise operationerrfmt(space.w_ZeroDivisionError, - "long division or modulo by zero") + raise oefmt(space.w_ZeroDivisionError, + "long division or modulo by zero") except OverflowError: - raise operationerrfmt(space.w_OverflowError, - "long/long too large for a float") + raise oefmt(space.w_OverflowError, + "long/long too large for a float") return space.newfloat(f) @delegate_other @@ -226,8 +225,8 @@ try: return self.num.tofloat() except OverflowError: - raise operationerrfmt(space.w_OverflowError, - "long int too large to convert to float") + raise oefmt(space.w_OverflowError, + "long int too large to convert to float") def toint(self): return self.num.toint() @@ -249,20 +248,18 @@ try: return self.num.toint() except OverflowError: - raise operationerrfmt(space.w_OverflowError, - "long int too large to convert to int") + raise oefmt(space.w_OverflowError, + "long int too large to convert to int") def uint_w(self, space): try: return self.num.touint() except ValueError: - raise operationerrfmt(space.w_ValueError, - "cannot convert negative integer to " - "unsigned int") + raise oefmt(space.w_ValueError, + "cannot convert negative integer to unsigned int") except OverflowError: - raise operationerrfmt(space.w_OverflowError, - "long int too large to convert to unsigned " - "int") + raise oefmt(space.w_OverflowError, + "long int too large to convert to unsigned int") def bigint_w(self, space): return self.num @@ -318,14 +315,13 @@ return space.w_NotImplemented if w_exponent.asbigint().sign < 0: - raise operationerrfmt(space.w_TypeError, - "pow() 2nd argument cannot be negative when " - "3rd argument specified") + raise oefmt(space.w_TypeError, + "pow() 2nd argument cannot be negative when 3rd " + "argument specified") try: result = self.num.pow(w_exponent.asbigint(), w_modulus.asbigint()) except ValueError: - raise operationerrfmt(space.w_ValueError, - "pow 3rd argument cannot be 0") + raise oefmt(space.w_ValueError, "pow 3rd argument cannot be 0") return W_LongObject(result) @unwrap_spec(w_modulus=WrappedDefault(None)) @@ -410,23 +406,21 @@ def _lshift(self, space, w_other): if w_other.asbigint().sign < 0: - raise operationerrfmt(space.w_ValueError, "negative shift count") + raise oefmt(space.w_ValueError, "negative shift count") try: shift = w_other.asbigint().toint() except OverflowError: # b too big - raise operationerrfmt(space.w_OverflowError, - "shift count too large") + raise oefmt(space.w_OverflowError, "shift count too large") return W_LongObject(self.num.lshift(shift)) descr_lshift, descr_rlshift = _make_descr_binop(_lshift) def _rshift(self, space, w_other): if w_other.asbigint().sign < 0: - raise operationerrfmt(space.w_ValueError, "negative shift count") + raise oefmt(space.w_ValueError, "negative shift count") try: shift = w_other.asbigint().toint() except OverflowError: # b too big # XXX maybe just return 0L instead? - raise operationerrfmt(space.w_OverflowError, - "shift count too large") + raise oefmt(space.w_OverflowError, "shift count too large") return newlong(space, self.num.rshift(shift)) descr_rshift, descr_rrshift = _make_descr_binop(_rshift) @@ -434,8 +428,8 @@ try: z = self.num.floordiv(w_other.asbigint()) except ZeroDivisionError: - raise operationerrfmt(space.w_ZeroDivisionError, - "long division or modulo by zero") + raise oefmt(space.w_ZeroDivisionError, + "long division or modulo by zero") return newlong(space, z) descr_floordiv, descr_rfloordiv = _make_descr_binop(_floordiv) @@ -446,8 +440,8 @@ try: z = self.num.mod(w_other.asbigint()) except ZeroDivisionError: - raise operationerrfmt(space.w_ZeroDivisionError, - "long division or modulo by zero") + raise oefmt(space.w_ZeroDivisionError, + "long division or modulo by zero") return newlong(space, z) descr_mod, descr_rmod = _make_descr_binop(_mod) @@ -455,8 +449,8 @@ try: div, mod = self.num.divmod(w_other.asbigint()) except ZeroDivisionError: - raise operationerrfmt(space.w_ZeroDivisionError, - "long division or modulo by zero") + raise oefmt(space.w_ZeroDivisionError, + "long division or modulo by zero") return space.newtuple([newlong(space, div), newlong(space, mod)]) descr_divmod, descr_rdivmod = _make_descr_binop(_divmod) @@ -517,9 +511,9 @@ except OperationError, e: if not e.match(space, space.w_TypeError): raise - raise operationerrfmt(space.w_TypeError, - "long() argument must be a string or a number, not '%T'", - w_value) + raise oefmt(space.w_TypeError, + "long() argument must be a string or a number, " + "not '%T'", w_value) else: buf = space.interp_w(Buffer, w_buffer) return _string_to_w_long(space, w_longtype, w_value, @@ -534,9 +528,9 @@ try: s = space.str_w(w_value) except OperationError: - raise operationerrfmt(space.w_TypeError, - "long() can't convert non-string with " - "explicit base") + raise oefmt(space.w_TypeError, + "long() can't convert non-string with explicit " + "base") return _string_to_w_long(space, w_longtype, w_value, s, base) diff --git a/pypy/objspace/std/smalllongobject.py b/pypy/objspace/std/smalllongobject.py --- a/pypy/objspace/std/smalllongobject.py +++ b/pypy/objspace/std/smalllongobject.py @@ -9,7 +9,7 @@ from rpython.rlib.rbigint import rbigint from rpython.tool.sourcetools import func_renamer, func_with_new_name -from pypy.interpreter.error import operationerrfmt +from pypy.interpreter.error import oefmt from pypy.interpreter.gateway import WrappedDefault, unwrap_spec from pypy.objspace.std.intobject import W_AbstractIntObject from pypy.objspace.std.longobject import W_AbstractLongObject, W_LongObject @@ -49,20 +49,19 @@ b = intmask(a) if b == a: return b - raise operationerrfmt(space.w_OverflowError, - "long int too large to convert to int") + raise oefmt(space.w_OverflowError, + "long int too large to convert to int") def uint_w(self, space): a = self.longlong if a < 0: - raise operationerrfmt(space.w_ValueError, - "cannot convert negative integer to " - "unsigned int") + raise oefmt(space.w_ValueError, + "cannot convert negative integer to unsigned int") b = r_uint(a) if r_longlong(b) == a: return b - raise operationerrfmt(space.w_OverflowError, - "long int too large to convert to unsigned int") + raise oefmt(space.w_OverflowError, + "long int too large to convert to unsigned int") def bigint_w(self, space): return self.asbigint() @@ -133,8 +132,7 @@ z = w_modulus.longlong if z == 0: - raise operationerrfmt(space.w_ValueError, - "pow() 3rd argument cannot be 0") + raise oefmt(space.w_ValueError, "pow() 3rd argument cannot be 0") try: return _pow_impl(space, self.longlong, w_exponent, z) except ValueError: @@ -260,8 +258,7 @@ raise OverflowError z = x // y except ZeroDivisionError: - raise operationerrfmt(space.w_ZeroDivisionError, - "integer division by zero") + raise oefmt(space.w_ZeroDivisionError, "integer division by zero") return W_SmallLongObject(z) descr_floordiv, descr_rfloordiv = _make_descr_binop(_floordiv) @@ -276,8 +273,7 @@ raise OverflowError z = x % y except ZeroDivisionError: - raise operationerrfmt(space.w_ZeroDivisionError, - "integer modulo by zero") + raise oefmt(space.w_ZeroDivisionError, "integer modulo by zero") return W_SmallLongObject(z) descr_mod, descr_rmod = _make_descr_binop(_mod) @@ -289,8 +285,7 @@ raise OverflowError z = x // y except ZeroDivisionError: - raise operationerrfmt(space.w_ZeroDivisionError, - "integer divmod by zero") + raise oefmt(space.w_ZeroDivisionError, "integer divmod by zero") # no overflow possible m = x % y return space.newtuple([W_SmallLongObject(z), W_SmallLongObject(m)]) @@ -306,7 +301,7 @@ raise OverflowError return W_SmallLongObject(c) if b < 0: - raise operationerrfmt(space.w_ValueError, "negative shift count") + raise oefmt(space.w_ValueError, "negative shift count") # b >= LONGLONG_BIT if a == 0: return self @@ -319,8 +314,7 @@ b = space.int_w(w_other) if r_uint(b) >= LONGLONG_BIT: # not (0 <= b < LONGLONG_BIT) if b < 0: - raise operationerrfmt(space.w_ValueError, - "negative shift count") + raise oefmt(space.w_ValueError, "negative shift count") # b >= LONGLONG_BIT if a == 0: return self @@ -398,9 +392,9 @@ iw = space.int_w(w_int2) if iw < 0: if iz != 0: - raise operationerrfmt(space.w_TypeError, - "pow() 2nd argument cannot be negative when " - "3rd argument specified") + raise oefmt(space.w_TypeError, + "pow() 2nd argument cannot be negative when 3rd " + "argument specified") raise ValueError temp = iv ix = r_longlong(1) From noreply at buildbot.pypy.org Mon Feb 3 23:52:01 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Mon, 3 Feb 2014 23:52:01 +0100 (CET) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20140203225201.E7CEE1C0352@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r69064:bf137acef691 Date: 2014-02-03 14:13 -0800 http://bitbucket.org/pypy/pypy/changeset/bf137acef691/ Log: merge default diff too long, truncating to 2000 out of 5590 lines diff --git a/dotviewer/graphdisplay.py b/dotviewer/graphdisplay.py --- a/dotviewer/graphdisplay.py +++ b/dotviewer/graphdisplay.py @@ -136,6 +136,7 @@ Click on objects to move around Drag with the left mouse button to zoom in/out Drag with the right mouse button to scroll + Use scroll wheel do scroll up or down """.replace('\n ', '\n').strip() # poor man's dedent @@ -613,6 +614,19 @@ def process_MouseButtonUp(self, event): self.dragging = None pygame.event.set_grab(False) + # handle directional scrolling + if event.button == 4: + self.pan((0, -1)) + return + if event.button == 5: + self.pan((0, 1)) + return + if event.button == 6: + self.pan((-1, 0)) + return + if event.button == 7: + self.pan((1, 0)) + return if self.click_time is not None and abs(time.time() - self.click_time) < 1: # click (no significant dragging) self.notifyclick(self.click_origin) diff --git a/pypy/interpreter/argument.py b/pypy/interpreter/argument.py --- a/pypy/interpreter/argument.py +++ b/pypy/interpreter/argument.py @@ -1,12 +1,12 @@ """ Arguments objects. """ - -from pypy.interpreter.error import OperationError, operationerrfmt from rpython.rlib.debug import make_sure_not_resized from rpython.rlib import jit from rpython.rlib.objectmodel import enforceargs +from pypy.interpreter.error import OperationError, oefmt + class Arguments(object): """ @@ -88,9 +88,9 @@ args_w = space.fixedview(w_stararg) except OperationError, e: if e.match(space, space.w_TypeError): - raise operationerrfmt( - space.w_TypeError, - "argument after * must be a sequence, not %T", w_stararg) + raise oefmt(space.w_TypeError, + "argument after * must be a sequence, not %T", + w_stararg) raise self.arguments_w = self.arguments_w + args_w @@ -115,10 +115,9 @@ w_keys = space.call_method(w_starstararg, "keys") except OperationError, e: if e.match(space, space.w_AttributeError): - raise operationerrfmt( - space.w_TypeError, - "argument after ** must be a mapping, not %T", - w_starstararg) + raise oefmt(space.w_TypeError, + "argument after ** must be a mapping, not %T", + w_starstararg) raise keys_w = space.unpackiterable(w_keys) keywords_w = [None] * len(keys_w) @@ -309,8 +308,7 @@ scope_w, signature, defaults_w, w_kw_defs, 0) except ArgErr, e: - raise operationerrfmt(self.space.w_TypeError, - "%s() %8", fnname, e.getmsg()) + raise oefmt(self.space.w_TypeError, "%s() %8", fnname, e.getmsg()) return signature.scope_length() def _parse(self, w_firstarg, signature, defaults_w, w_kw_defs, blindargs=0): @@ -334,8 +332,7 @@ return self._parse(w_firstarg, signature, defaults_w, w_kw_defs, blindargs) except ArgErr, e: - raise operationerrfmt(self.space.w_TypeError, - "%s() %8", fnname, e.getmsg()) + raise oefmt(self.space.w_TypeError, "%s() %8", fnname, e.getmsg()) @staticmethod def frompacked(space, w_args=None, w_kwds=None): @@ -374,10 +371,9 @@ for key in keywords: for otherkey in existingkeywords: if otherkey == key: - raise operationerrfmt(space.w_TypeError, - "got multiple values " - "for keyword argument " - "'%s'", key) + raise oefmt(space.w_TypeError, + "got multiple values for keyword argument '%s'", + key) def _do_combine_starstarargs_wrapped(space, keys_w, w_starstararg, keywords, keywords_w, existingkeywords): @@ -397,10 +393,9 @@ raise else: if existingkeywords and key in existingkeywords: - raise operationerrfmt(space.w_TypeError, - "got multiple values " - "for keyword argument " - "'%s'", key) + raise oefmt(space.w_TypeError, + "got multiple values for keyword argument '%s'", + key) keywords[i] = key keywords_w[i] = space.getitem(w_starstararg, w_key) i += 1 diff --git a/pypy/interpreter/astcompiler/ast.py b/pypy/interpreter/astcompiler/ast.py --- a/pypy/interpreter/astcompiler/ast.py +++ b/pypy/interpreter/astcompiler/ast.py @@ -1,12 +1,18 @@ # Generated by tools/asdl_py.py -from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter import typedef -from pypy.interpreter.gateway import interp2app -from pypy.interpreter.error import OperationError, operationerrfmt from rpython.rlib.unroll import unrolling_iterable from rpython.tool.pairtype import extendabletype from rpython.tool.sourcetools import func_with_new_name +from pypy.interpreter import typedef +from pypy.interpreter.baseobjspace import W_Root +from pypy.interpreter.error import OperationError, oefmt +from pypy.interpreter.gateway import interp2app + + +def raise_attriberr(space, w_obj, name): + raise oefmt(space.w_AttributeError, + "'%T' object has no attribute '%s'", w_obj, name) + def check_string(space, w_obj): if not (space.isinstance_w(w_obj, space.w_str) or @@ -70,11 +76,13 @@ continue # field is optional w_obj = self.getdictvalue(space, missing) if w_obj is None: - err = "required field \"%s\" missing from %s" - raise operationerrfmt(space.w_TypeError, err, missing, host) + raise oefmt(space.w_TypeError, + "required field \"%s\" missing from %s", + missing, host) else: - err = "incorrect type for field \"%s\" in %s" - raise operationerrfmt(space.w_TypeError, err, missing, host) + raise oefmt(space.w_TypeError, + "incorrect type for field \"%s\" in %s", + missing, host) raise AssertionError("should not reach here") @@ -2872,7 +2880,7 @@ def Module_get_body(space, w_self): if not w_self.initialization_state & 1: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -2913,7 +2921,7 @@ def Interactive_get_body(space, w_self): if not w_self.initialization_state & 1: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -2958,7 +2966,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 1: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') return space.wrap(w_self.body) def Expression_set_body(space, w_self, w_new_value): @@ -3001,7 +3009,7 @@ def Suite_get_body(space, w_self): if not w_self.initialization_state & 1: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -3046,7 +3054,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 1: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'lineno') + raise_attriberr(space, w_self, 'lineno') return space.wrap(w_self.lineno) def stmt_set_lineno(space, w_self, w_new_value): @@ -3067,7 +3075,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 2: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'col_offset') + raise_attriberr(space, w_self, 'col_offset') return space.wrap(w_self.col_offset) def stmt_set_col_offset(space, w_self, w_new_value): @@ -3097,7 +3105,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'name') + raise_attriberr(space, w_self, 'name') if w_self.name is None: return space.w_None return space.wrap(w_self.name.decode('utf-8')) @@ -3120,7 +3128,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'args') + raise_attriberr(space, w_self, 'args') return space.wrap(w_self.args) def FunctionDef_set_args(space, w_self, w_new_value): @@ -3137,7 +3145,7 @@ def FunctionDef_get_body(space, w_self): if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -3153,7 +3161,7 @@ def FunctionDef_get_decorator_list(space, w_self): if not w_self.initialization_state & 32: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'decorator_list') + raise_attriberr(space, w_self, 'decorator_list') if w_self.w_decorator_list is None: if w_self.decorator_list is None: list_w = [] @@ -3173,7 +3181,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 64: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'returns') + raise_attriberr(space, w_self, 'returns') return space.wrap(w_self.returns) def FunctionDef_set_returns(space, w_self, w_new_value): @@ -3226,7 +3234,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'name') + raise_attriberr(space, w_self, 'name') if w_self.name is None: return space.w_None return space.wrap(w_self.name.decode('utf-8')) @@ -3245,7 +3253,7 @@ def ClassDef_get_bases(space, w_self): if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'bases') + raise_attriberr(space, w_self, 'bases') if w_self.w_bases is None: if w_self.bases is None: list_w = [] @@ -3261,7 +3269,7 @@ def ClassDef_get_keywords(space, w_self): if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'keywords') + raise_attriberr(space, w_self, 'keywords') if w_self.w_keywords is None: if w_self.keywords is None: list_w = [] @@ -3281,7 +3289,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 32: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'starargs') + raise_attriberr(space, w_self, 'starargs') return space.wrap(w_self.starargs) def ClassDef_set_starargs(space, w_self, w_new_value): @@ -3304,7 +3312,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 64: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'kwargs') + raise_attriberr(space, w_self, 'kwargs') return space.wrap(w_self.kwargs) def ClassDef_set_kwargs(space, w_self, w_new_value): @@ -3323,7 +3331,7 @@ def ClassDef_get_body(space, w_self): if not w_self.initialization_state & 128: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -3339,7 +3347,7 @@ def ClassDef_get_decorator_list(space, w_self): if not w_self.initialization_state & 256: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'decorator_list') + raise_attriberr(space, w_self, 'decorator_list') if w_self.w_decorator_list is None: if w_self.decorator_list is None: list_w = [] @@ -3393,7 +3401,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') + raise_attriberr(space, w_self, 'value') return space.wrap(w_self.value) def Return_set_value(space, w_self, w_new_value): @@ -3436,7 +3444,7 @@ def Delete_get_targets(space, w_self): if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'targets') + raise_attriberr(space, w_self, 'targets') if w_self.w_targets is None: if w_self.targets is None: list_w = [] @@ -3477,7 +3485,7 @@ def Assign_get_targets(space, w_self): if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'targets') + raise_attriberr(space, w_self, 'targets') if w_self.w_targets is None: if w_self.targets is None: list_w = [] @@ -3497,7 +3505,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') + raise_attriberr(space, w_self, 'value') return space.wrap(w_self.value) def Assign_set_value(space, w_self, w_new_value): @@ -3546,7 +3554,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'target') + raise_attriberr(space, w_self, 'target') return space.wrap(w_self.target) def AugAssign_set_target(space, w_self, w_new_value): @@ -3569,7 +3577,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'op') + raise_attriberr(space, w_self, 'op') return operator_to_class[w_self.op - 1]() def AugAssign_set_op(space, w_self, w_new_value): @@ -3592,7 +3600,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') + raise_attriberr(space, w_self, 'value') return space.wrap(w_self.value) def AugAssign_set_value(space, w_self, w_new_value): @@ -3641,7 +3649,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'target') + raise_attriberr(space, w_self, 'target') return space.wrap(w_self.target) def For_set_target(space, w_self, w_new_value): @@ -3664,7 +3672,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'iter') + raise_attriberr(space, w_self, 'iter') return space.wrap(w_self.iter) def For_set_iter(space, w_self, w_new_value): @@ -3683,7 +3691,7 @@ def For_get_body(space, w_self): if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -3699,7 +3707,7 @@ def For_get_orelse(space, w_self): if not w_self.initialization_state & 32: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'orelse') + raise_attriberr(space, w_self, 'orelse') if w_self.w_orelse is None: if w_self.orelse is None: list_w = [] @@ -3748,7 +3756,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'test') + raise_attriberr(space, w_self, 'test') return space.wrap(w_self.test) def While_set_test(space, w_self, w_new_value): @@ -3767,7 +3775,7 @@ def While_get_body(space, w_self): if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -3783,7 +3791,7 @@ def While_get_orelse(space, w_self): if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'orelse') + raise_attriberr(space, w_self, 'orelse') if w_self.w_orelse is None: if w_self.orelse is None: list_w = [] @@ -3831,7 +3839,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'test') + raise_attriberr(space, w_self, 'test') return space.wrap(w_self.test) def If_set_test(space, w_self, w_new_value): @@ -3850,7 +3858,7 @@ def If_get_body(space, w_self): if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -3866,7 +3874,7 @@ def If_get_orelse(space, w_self): if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'orelse') + raise_attriberr(space, w_self, 'orelse') if w_self.w_orelse is None: if w_self.orelse is None: list_w = [] @@ -3914,7 +3922,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'context_expr') + raise_attriberr(space, w_self, 'context_expr') return space.wrap(w_self.context_expr) def With_set_context_expr(space, w_self, w_new_value): @@ -3937,7 +3945,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'optional_vars') + raise_attriberr(space, w_self, 'optional_vars') return space.wrap(w_self.optional_vars) def With_set_optional_vars(space, w_self, w_new_value): @@ -3956,7 +3964,7 @@ def With_get_body(space, w_self): if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -4003,7 +4011,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'exc') + raise_attriberr(space, w_self, 'exc') return space.wrap(w_self.exc) def Raise_set_exc(space, w_self, w_new_value): @@ -4026,7 +4034,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'cause') + raise_attriberr(space, w_self, 'cause') return space.wrap(w_self.cause) def Raise_set_cause(space, w_self, w_new_value): @@ -4070,7 +4078,7 @@ def TryExcept_get_body(space, w_self): if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -4086,7 +4094,7 @@ def TryExcept_get_handlers(space, w_self): if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'handlers') + raise_attriberr(space, w_self, 'handlers') if w_self.w_handlers is None: if w_self.handlers is None: list_w = [] @@ -4102,7 +4110,7 @@ def TryExcept_get_orelse(space, w_self): if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'orelse') + raise_attriberr(space, w_self, 'orelse') if w_self.w_orelse is None: if w_self.orelse is None: list_w = [] @@ -4147,7 +4155,7 @@ def TryFinally_get_body(space, w_self): if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -4163,7 +4171,7 @@ def TryFinally_get_finalbody(space, w_self): if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'finalbody') + raise_attriberr(space, w_self, 'finalbody') if w_self.w_finalbody is None: if w_self.finalbody is None: list_w = [] @@ -4210,7 +4218,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'test') + raise_attriberr(space, w_self, 'test') return space.wrap(w_self.test) def Assert_set_test(space, w_self, w_new_value): @@ -4233,7 +4241,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'msg') + raise_attriberr(space, w_self, 'msg') return space.wrap(w_self.msg) def Assert_set_msg(space, w_self, w_new_value): @@ -4277,7 +4285,7 @@ def Import_get_names(space, w_self): if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'names') + raise_attriberr(space, w_self, 'names') if w_self.w_names is None: if w_self.names is None: list_w = [] @@ -4322,7 +4330,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'module') + raise_attriberr(space, w_self, 'module') if w_self.module is None: return space.w_None return space.wrap(w_self.module.decode('utf-8')) @@ -4344,7 +4352,7 @@ def ImportFrom_get_names(space, w_self): if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'names') + raise_attriberr(space, w_self, 'names') if w_self.w_names is None: if w_self.names is None: list_w = [] @@ -4364,7 +4372,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'level') + raise_attriberr(space, w_self, 'level') return space.wrap(w_self.level) def ImportFrom_set_level(space, w_self, w_new_value): @@ -4408,7 +4416,7 @@ def Global_get_names(space, w_self): if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'names') + raise_attriberr(space, w_self, 'names') if w_self.w_names is None: if w_self.names is None: list_w = [] @@ -4449,7 +4457,7 @@ def Nonlocal_get_names(space, w_self): if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'names') + raise_attriberr(space, w_self, 'names') if w_self.w_names is None: if w_self.names is None: list_w = [] @@ -4494,7 +4502,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') + raise_attriberr(space, w_self, 'value') return space.wrap(w_self.value) def Expr_set_value(space, w_self, w_new_value): @@ -4592,7 +4600,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 1: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'lineno') + raise_attriberr(space, w_self, 'lineno') return space.wrap(w_self.lineno) def expr_set_lineno(space, w_self, w_new_value): @@ -4613,7 +4621,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 2: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'col_offset') + raise_attriberr(space, w_self, 'col_offset') return space.wrap(w_self.col_offset) def expr_set_col_offset(space, w_self, w_new_value): @@ -4643,7 +4651,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'op') + raise_attriberr(space, w_self, 'op') return boolop_to_class[w_self.op - 1]() def BoolOp_set_op(space, w_self, w_new_value): @@ -4662,7 +4670,7 @@ def BoolOp_get_values(space, w_self): if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'values') + raise_attriberr(space, w_self, 'values') if w_self.w_values is None: if w_self.values is None: list_w = [] @@ -4708,7 +4716,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'left') + raise_attriberr(space, w_self, 'left') return space.wrap(w_self.left) def BinOp_set_left(space, w_self, w_new_value): @@ -4731,7 +4739,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'op') + raise_attriberr(space, w_self, 'op') return operator_to_class[w_self.op - 1]() def BinOp_set_op(space, w_self, w_new_value): @@ -4754,7 +4762,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'right') + raise_attriberr(space, w_self, 'right') return space.wrap(w_self.right) def BinOp_set_right(space, w_self, w_new_value): @@ -4803,7 +4811,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'op') + raise_attriberr(space, w_self, 'op') return unaryop_to_class[w_self.op - 1]() def UnaryOp_set_op(space, w_self, w_new_value): @@ -4826,7 +4834,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'operand') + raise_attriberr(space, w_self, 'operand') return space.wrap(w_self.operand) def UnaryOp_set_operand(space, w_self, w_new_value): @@ -4874,7 +4882,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'args') + raise_attriberr(space, w_self, 'args') return space.wrap(w_self.args) def Lambda_set_args(space, w_self, w_new_value): @@ -4895,7 +4903,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') return space.wrap(w_self.body) def Lambda_set_body(space, w_self, w_new_value): @@ -4943,7 +4951,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'test') + raise_attriberr(space, w_self, 'test') return space.wrap(w_self.test) def IfExp_set_test(space, w_self, w_new_value): @@ -4966,7 +4974,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') return space.wrap(w_self.body) def IfExp_set_body(space, w_self, w_new_value): @@ -4989,7 +4997,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'orelse') + raise_attriberr(space, w_self, 'orelse') return space.wrap(w_self.orelse) def IfExp_set_orelse(space, w_self, w_new_value): @@ -5034,7 +5042,7 @@ def Dict_get_keys(space, w_self): if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'keys') + raise_attriberr(space, w_self, 'keys') if w_self.w_keys is None: if w_self.keys is None: list_w = [] @@ -5050,7 +5058,7 @@ def Dict_get_values(space, w_self): if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'values') + raise_attriberr(space, w_self, 'values') if w_self.w_values is None: if w_self.values is None: list_w = [] @@ -5093,7 +5101,7 @@ def Set_get_elts(space, w_self): if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'elts') + raise_attriberr(space, w_self, 'elts') if w_self.w_elts is None: if w_self.elts is None: list_w = [] @@ -5138,7 +5146,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'elt') + raise_attriberr(space, w_self, 'elt') return space.wrap(w_self.elt) def ListComp_set_elt(space, w_self, w_new_value): @@ -5157,7 +5165,7 @@ def ListComp_get_generators(space, w_self): if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'generators') + raise_attriberr(space, w_self, 'generators') if w_self.w_generators is None: if w_self.generators is None: list_w = [] @@ -5203,7 +5211,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'elt') + raise_attriberr(space, w_self, 'elt') return space.wrap(w_self.elt) def SetComp_set_elt(space, w_self, w_new_value): @@ -5222,7 +5230,7 @@ def SetComp_get_generators(space, w_self): if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'generators') + raise_attriberr(space, w_self, 'generators') if w_self.w_generators is None: if w_self.generators is None: list_w = [] @@ -5268,7 +5276,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'key') + raise_attriberr(space, w_self, 'key') return space.wrap(w_self.key) def DictComp_set_key(space, w_self, w_new_value): @@ -5291,7 +5299,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') + raise_attriberr(space, w_self, 'value') return space.wrap(w_self.value) def DictComp_set_value(space, w_self, w_new_value): @@ -5310,7 +5318,7 @@ def DictComp_get_generators(space, w_self): if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'generators') + raise_attriberr(space, w_self, 'generators') if w_self.w_generators is None: if w_self.generators is None: list_w = [] @@ -5357,7 +5365,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'elt') + raise_attriberr(space, w_self, 'elt') return space.wrap(w_self.elt) def GeneratorExp_set_elt(space, w_self, w_new_value): @@ -5376,7 +5384,7 @@ def GeneratorExp_get_generators(space, w_self): if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'generators') + raise_attriberr(space, w_self, 'generators') if w_self.w_generators is None: if w_self.generators is None: list_w = [] @@ -5422,7 +5430,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') + raise_attriberr(space, w_self, 'value') return space.wrap(w_self.value) def Yield_set_value(space, w_self, w_new_value): @@ -5469,7 +5477,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'left') + raise_attriberr(space, w_self, 'left') return space.wrap(w_self.left) def Compare_set_left(space, w_self, w_new_value): @@ -5488,7 +5496,7 @@ def Compare_get_ops(space, w_self): if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'ops') + raise_attriberr(space, w_self, 'ops') if w_self.w_ops is None: if w_self.ops is None: list_w = [] @@ -5504,7 +5512,7 @@ def Compare_get_comparators(space, w_self): if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'comparators') + raise_attriberr(space, w_self, 'comparators') if w_self.w_comparators is None: if w_self.comparators is None: list_w = [] @@ -5552,7 +5560,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'func') + raise_attriberr(space, w_self, 'func') return space.wrap(w_self.func) def Call_set_func(space, w_self, w_new_value): @@ -5571,7 +5579,7 @@ def Call_get_args(space, w_self): if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'args') + raise_attriberr(space, w_self, 'args') if w_self.w_args is None: if w_self.args is None: list_w = [] @@ -5587,7 +5595,7 @@ def Call_get_keywords(space, w_self): if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'keywords') + raise_attriberr(space, w_self, 'keywords') if w_self.w_keywords is None: if w_self.keywords is None: list_w = [] @@ -5607,7 +5615,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 32: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'starargs') + raise_attriberr(space, w_self, 'starargs') return space.wrap(w_self.starargs) def Call_set_starargs(space, w_self, w_new_value): @@ -5630,7 +5638,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 64: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'kwargs') + raise_attriberr(space, w_self, 'kwargs') return space.wrap(w_self.kwargs) def Call_set_kwargs(space, w_self, w_new_value): @@ -5683,7 +5691,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'n') + raise_attriberr(space, w_self, 'n') return w_self.n def Num_set_n(space, w_self, w_new_value): @@ -5728,7 +5736,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 's') + raise_attriberr(space, w_self, 's') return w_self.s def Str_set_s(space, w_self, w_new_value): @@ -5773,7 +5781,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 's') + raise_attriberr(space, w_self, 's') return w_self.s def Bytes_set_s(space, w_self, w_new_value): @@ -5835,7 +5843,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') + raise_attriberr(space, w_self, 'value') return space.wrap(w_self.value) def Attribute_set_value(space, w_self, w_new_value): @@ -5858,7 +5866,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'attr') + raise_attriberr(space, w_self, 'attr') if w_self.attr is None: return space.w_None return space.wrap(w_self.attr.decode('utf-8')) @@ -5881,7 +5889,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'ctx') + raise_attriberr(space, w_self, 'ctx') return expr_context_to_class[w_self.ctx - 1]() def Attribute_set_ctx(space, w_self, w_new_value): @@ -5930,7 +5938,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') + raise_attriberr(space, w_self, 'value') return space.wrap(w_self.value) def Subscript_set_value(space, w_self, w_new_value): @@ -5953,7 +5961,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'slice') + raise_attriberr(space, w_self, 'slice') return space.wrap(w_self.slice) def Subscript_set_slice(space, w_self, w_new_value): @@ -5976,7 +5984,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'ctx') + raise_attriberr(space, w_self, 'ctx') return expr_context_to_class[w_self.ctx - 1]() def Subscript_set_ctx(space, w_self, w_new_value): @@ -6025,7 +6033,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') + raise_attriberr(space, w_self, 'value') return space.wrap(w_self.value) def Starred_set_value(space, w_self, w_new_value): @@ -6048,7 +6056,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'ctx') + raise_attriberr(space, w_self, 'ctx') return expr_context_to_class[w_self.ctx - 1]() def Starred_set_ctx(space, w_self, w_new_value): @@ -6096,7 +6104,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'id') + raise_attriberr(space, w_self, 'id') if w_self.id is None: return space.w_None return space.wrap(w_self.id.decode('utf-8')) @@ -6119,7 +6127,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'ctx') + raise_attriberr(space, w_self, 'ctx') return expr_context_to_class[w_self.ctx - 1]() def Name_set_ctx(space, w_self, w_new_value): @@ -6163,7 +6171,7 @@ def List_get_elts(space, w_self): if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'elts') + raise_attriberr(space, w_self, 'elts') if w_self.w_elts is None: if w_self.elts is None: list_w = [] @@ -6183,7 +6191,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'ctx') + raise_attriberr(space, w_self, 'ctx') return expr_context_to_class[w_self.ctx - 1]() def List_set_ctx(space, w_self, w_new_value): @@ -6228,7 +6236,7 @@ def Tuple_get_elts(space, w_self): if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'elts') + raise_attriberr(space, w_self, 'elts') if w_self.w_elts is None: if w_self.elts is None: list_w = [] @@ -6248,7 +6256,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'ctx') + raise_attriberr(space, w_self, 'ctx') return expr_context_to_class[w_self.ctx - 1]() def Tuple_set_ctx(space, w_self, w_new_value): @@ -6297,7 +6305,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') + raise_attriberr(space, w_self, 'value') return w_self.value def Const_set_value(space, w_self, w_new_value): @@ -6398,7 +6406,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 1: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'lower') + raise_attriberr(space, w_self, 'lower') return space.wrap(w_self.lower) def Slice_set_lower(space, w_self, w_new_value): @@ -6421,7 +6429,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 2: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'upper') + raise_attriberr(space, w_self, 'upper') return space.wrap(w_self.upper) def Slice_set_upper(space, w_self, w_new_value): @@ -6444,7 +6452,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'step') + raise_attriberr(space, w_self, 'step') return space.wrap(w_self.step) def Slice_set_step(space, w_self, w_new_value): @@ -6489,7 +6497,7 @@ def ExtSlice_get_dims(space, w_self): if not w_self.initialization_state & 1: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'dims') + raise_attriberr(space, w_self, 'dims') if w_self.w_dims is None: if w_self.dims is None: list_w = [] @@ -6534,7 +6542,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 1: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') + raise_attriberr(space, w_self, 'value') return space.wrap(w_self.value) def Index_set_value(space, w_self, w_new_value): @@ -6805,7 +6813,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 1: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'target') + raise_attriberr(space, w_self, 'target') return space.wrap(w_self.target) def comprehension_set_target(space, w_self, w_new_value): @@ -6828,7 +6836,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 2: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'iter') + raise_attriberr(space, w_self, 'iter') return space.wrap(w_self.iter) def comprehension_set_iter(space, w_self, w_new_value): @@ -6847,7 +6855,7 @@ def comprehension_get_ifs(space, w_self): if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'ifs') + raise_attriberr(space, w_self, 'ifs') if w_self.w_ifs is None: if w_self.ifs is None: list_w = [] @@ -6894,7 +6902,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 1: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'lineno') + raise_attriberr(space, w_self, 'lineno') return space.wrap(w_self.lineno) def excepthandler_set_lineno(space, w_self, w_new_value): @@ -6915,7 +6923,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 2: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'col_offset') + raise_attriberr(space, w_self, 'col_offset') return space.wrap(w_self.col_offset) def excepthandler_set_col_offset(space, w_self, w_new_value): @@ -6945,7 +6953,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'type') + raise_attriberr(space, w_self, 'type') return space.wrap(w_self.type) def ExceptHandler_set_type(space, w_self, w_new_value): @@ -6968,7 +6976,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'name') + raise_attriberr(space, w_self, 'name') if w_self.name is None: return space.w_None return space.wrap(w_self.name.decode('utf-8')) @@ -6990,7 +6998,7 @@ def ExceptHandler_get_body(space, w_self): if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -7033,7 +7041,7 @@ def arguments_get_args(space, w_self): if not w_self.initialization_state & 1: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'args') + raise_attriberr(space, w_self, 'args') if w_self.w_args is None: if w_self.args is None: list_w = [] @@ -7053,7 +7061,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 2: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'vararg') + raise_attriberr(space, w_self, 'vararg') if w_self.vararg is None: return space.w_None return space.wrap(w_self.vararg.decode('utf-8')) @@ -7079,7 +7087,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'varargannotation') + raise_attriberr(space, w_self, 'varargannotation') return space.wrap(w_self.varargannotation) def arguments_set_varargannotation(space, w_self, w_new_value): @@ -7098,7 +7106,7 @@ def arguments_get_kwonlyargs(space, w_self): if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'kwonlyargs') + raise_attriberr(space, w_self, 'kwonlyargs') if w_self.w_kwonlyargs is None: if w_self.kwonlyargs is None: list_w = [] @@ -7118,7 +7126,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'kwarg') + raise_attriberr(space, w_self, 'kwarg') if w_self.kwarg is None: return space.w_None return space.wrap(w_self.kwarg.decode('utf-8')) @@ -7144,7 +7152,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 32: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'kwargannotation') + raise_attriberr(space, w_self, 'kwargannotation') return space.wrap(w_self.kwargannotation) def arguments_set_kwargannotation(space, w_self, w_new_value): @@ -7163,7 +7171,7 @@ def arguments_get_defaults(space, w_self): if not w_self.initialization_state & 64: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'defaults') + raise_attriberr(space, w_self, 'defaults') if w_self.w_defaults is None: if w_self.defaults is None: list_w = [] @@ -7179,7 +7187,7 @@ def arguments_get_kw_defaults(space, w_self): if not w_self.initialization_state & 128: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'kw_defaults') + raise_attriberr(space, w_self, 'kw_defaults') if w_self.w_kw_defaults is None: if w_self.kw_defaults is None: list_w = [] @@ -7234,7 +7242,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 1: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'arg') + raise_attriberr(space, w_self, 'arg') if w_self.arg is None: return space.w_None return space.wrap(w_self.arg.decode('utf-8')) @@ -7257,7 +7265,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 2: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'annotation') + raise_attriberr(space, w_self, 'annotation') return space.wrap(w_self.annotation) def arg_set_annotation(space, w_self, w_new_value): @@ -7305,7 +7313,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 1: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'arg') + raise_attriberr(space, w_self, 'arg') if w_self.arg is None: return space.w_None return space.wrap(w_self.arg.decode('utf-8')) @@ -7328,7 +7336,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 2: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') + raise_attriberr(space, w_self, 'value') return space.wrap(w_self.value) def keyword_set_value(space, w_self, w_new_value): @@ -7376,7 +7384,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 1: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'name') + raise_attriberr(space, w_self, 'name') if w_self.name is None: return space.w_None return space.wrap(w_self.name.decode('utf-8')) @@ -7399,7 +7407,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 2: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'asname') + raise_attriberr(space, w_self, 'asname') if w_self.asname is None: return space.w_None return space.wrap(w_self.asname.decode('utf-8')) diff --git a/pypy/interpreter/astcompiler/tools/asdl_py.py b/pypy/interpreter/astcompiler/tools/asdl_py.py --- a/pypy/interpreter/astcompiler/tools/asdl_py.py +++ b/pypy/interpreter/astcompiler/tools/asdl_py.py @@ -416,8 +416,7 @@ self.emit(" if w_obj is not None:", 1) self.emit(" return w_obj", 1) self.emit("if not w_self.initialization_state & %s:" % (flag,), 1) - self.emit("raise operationerrfmt(space.w_AttributeError, \"'%%T' object has no attribute '%%s'\", w_self, '%s')" % - (field.name,), 2) + self.emit("raise_attriberr(space, w_self, '%s')" % (field.name,), 2) if field.seq: self.emit("if w_self.w_%s is None:" % (field.name,), 1) self.emit("if w_self.%s is None:" % (field.name,), 2) @@ -548,14 +547,20 @@ HEAD = """# Generated by tools/asdl_py.py -from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter import typedef -from pypy.interpreter.gateway import interp2app -from pypy.interpreter.error import OperationError, operationerrfmt from rpython.rlib.unroll import unrolling_iterable from rpython.tool.pairtype import extendabletype from rpython.tool.sourcetools import func_with_new_name +from pypy.interpreter import typedef +from pypy.interpreter.baseobjspace import W_Root +from pypy.interpreter.error import OperationError, oefmt +from pypy.interpreter.gateway import interp2app + + +def raise_attriberr(space, w_obj, name): + raise oefmt(space.w_AttributeError, + \"'%T' object has no attribute '%s'\", w_obj, name) + def check_string(space, w_obj): if not (space.isinstance_w(w_obj, space.w_str) or @@ -619,11 +624,13 @@ continue # field is optional w_obj = self.getdictvalue(space, missing) if w_obj is None: - err = "required field \\"%s\\" missing from %s" - raise operationerrfmt(space.w_TypeError, err, missing, host) + raise oefmt(space.w_TypeError, + "required field \\"%s\\" missing from %s", + missing, host) else: - err = "incorrect type for field \\"%s\\" in %s" - raise operationerrfmt(space.w_TypeError, err, missing, host) + raise oefmt(space.w_TypeError, + "incorrect type for field \\"%s\\" in %s", + missing, host) raise AssertionError("should not reach here") diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -11,8 +11,7 @@ from pypy.interpreter.executioncontext import (ExecutionContext, ActionFlag, UserDelAction) -from pypy.interpreter.error import (OperationError, operationerrfmt, - new_exception_class) +from pypy.interpreter.error import OperationError, new_exception_class, oefmt from pypy.interpreter.argument import Arguments from pypy.interpreter.miscutils import ThreadLocals @@ -61,9 +60,9 @@ return False def setdict(self, space, w_dict): - raise operationerrfmt(space.w_TypeError, - "attribute '__dict__' of %T objects " - "is not writable", self) + raise oefmt(space.w_TypeError, + "attribute '__dict__' of %T objects is not writable", + self) # to be used directly only by space.type implementations def getclass(self, space): @@ -122,8 +121,8 @@ classname = '?' else: classname = wrappable_class_name(RequiredClass) - msg = "'%s' object expected, got '%T' instead" - raise operationerrfmt(space.w_TypeError, msg, classname, self) + raise oefmt(space.w_TypeError, + "'%s' object expected, got '%T' instead", classname, self) # used by _weakref implemenation @@ -131,8 +130,8 @@ return None def setweakref(self, space, weakreflifeline): - raise operationerrfmt(space.w_TypeError, - "cannot create weak reference to '%T' object", self) + raise oefmt(space.w_TypeError, + "cannot create weak reference to '%T' object", self) def delweakref(self): pass @@ -217,24 +216,24 @@ self._typed_unwrap_error(space, "integer") def _typed_unwrap_error(self, space, expected): - raise operationerrfmt(space.w_TypeError, "expected %s, got %T object", - expected, self) + raise oefmt(space.w_TypeError, + "expected %s, got %T object", expected, self) def int(self, space): w_impl = space.lookup(self, '__int__') if w_impl is None: - raise operationerrfmt(space.w_TypeError, - "unsupported operand type for int(): '%T'", self) + raise oefmt(space.w_TypeError, + "unsupported operand type for int(): '%T'", self) w_result = space.get_and_call_function(w_impl, self) if space.isinstance_w(w_result, space.w_int): return w_result - msg = "__int__ returned non-int (type '%T')" - raise operationerrfmt(space.w_TypeError, msg, w_result) + raise oefmt(space.w_TypeError, + "__int__ returned non-int (type '%T')", w_result) def ord(self, space): - msg = "ord() expected string of length 1, but %T found" - raise operationerrfmt(space.w_TypeError, msg, self) + raise oefmt(space.w_TypeError, + "ord() expected string of length 1, but %T found", self) def __spacebind__(self, space): return self @@ -431,10 +430,9 @@ try: w_mod = self.builtin_modules[name] except KeyError: - raise operationerrfmt( - self.w_SystemError, - "getbuiltinmodule() called " - "with non-builtin module %s", name) + raise oefmt(self.w_SystemError, + "getbuiltinmodule() called with non-builtin module %s", + name) else: # Add the module to sys.modules self.setitem(w_modules, w_name, w_mod) @@ -765,9 +763,10 @@ if can_be_None and self.is_none(w_obj): return None if not isinstance(w_obj, RequiredClass): # or obj is None - msg = "'%s' object expected, got '%N' instead" - raise operationerrfmt(self.w_TypeError, msg, - wrappable_class_name(RequiredClass), w_obj.getclass(self)) + raise oefmt(self.w_TypeError, + "'%s' object expected, got '%N' instead", + wrappable_class_name(RequiredClass), + w_obj.getclass(self)) return w_obj interp_w._annspecialcase_ = 'specialize:arg(1)' @@ -845,9 +844,9 @@ items[idx] = w_item idx += 1 if idx < expected_length: - raise operationerrfmt(self.w_ValueError, - "need more than %d value%s to unpack", - idx, idx != 1 and "s" or "") + raise oefmt(self.w_ValueError, + "need more than %d value%s to unpack", + idx, "" if idx == 1 else "s") return items def unpackiterable_unroll(self, w_iterable, expected_length): @@ -1232,8 +1231,8 @@ except OperationError, err: if objdescr is None or not err.match(self, self.w_TypeError): raise - msg = "%s must be an integer, not %T" - raise operationerrfmt(self.w_TypeError, msg, objdescr, w_obj) + raise oefmt(self.w_TypeError, "%s must be an integer, not %T", + objdescr, w_obj) try: index = self.int_w(w_index) except OperationError, err: @@ -1246,9 +1245,9 @@ else: return sys.maxint else: - raise operationerrfmt( - w_exception, "cannot fit '%T' into an index-sized integer", - w_obj) + raise oefmt(w_exception, + "cannot fit '%T' into an index-sized integer", + w_obj) else: return index @@ -1569,9 +1568,9 @@ ) fd = self.int_w(w_fd) if fd < 0: - raise operationerrfmt(self.w_ValueError, - "file descriptor cannot be a negative integer (%d)", fd - ) + raise oefmt(self.w_ValueError, + "file descriptor cannot be a negative integer (%d)", + fd) return fd def warn(self, w_msg, w_warningcls, stacklevel=2): diff --git a/pypy/interpreter/error.py b/pypy/interpreter/error.py --- a/pypy/interpreter/error.py +++ b/pypy/interpreter/error.py @@ -272,8 +272,9 @@ def _exception_getclass(self, space, w_inst, what="exceptions"): w_type = space.exception_getclass(w_inst) if not space.exception_is_valid_class_w(w_type): - msg = "%s must derive from BaseException, not %N" - raise operationerrfmt(space.w_TypeError, msg, what, w_type) + raise oefmt(space.w_TypeError, + "%s must derive from BaseException, not %N", + what, w_type) return w_type def write_unraisable(self, space, where, w_object=None, @@ -437,15 +438,16 @@ self._w_value = w_value = space.wrap(self._value) return w_value -def get_operationerr_class(valuefmt): + at specialize.memo() +def get_operr_class(valuefmt): try: result = _fmtcache[valuefmt] except KeyError: result = _fmtcache[valuefmt] = get_operrcls2(valuefmt) return result -get_operationerr_class._annspecialcase_ = 'specialize:memo' -def operationerrfmt(w_type, valuefmt, *args): + at specialize.arg(1) +def oefmt(w_type, valuefmt, *args): """Equivalent to OperationError(w_type, space.wrap(valuefmt % args)). More efficient in the (common) case where the value is not actually needed. Note that in the py3k branch the exception message will @@ -461,9 +463,8 @@ """ if not len(args): return OpErrFmtNoArgs(w_type, valuefmt) - OpErrFmt, strings = get_operationerr_class(valuefmt) + OpErrFmt, strings = get_operr_class(valuefmt) return OpErrFmt(w_type, strings, *args) -operationerrfmt._annspecialcase_ = 'specialize:arg(1)' # ____________________________________________________________ diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py --- a/pypy/interpreter/function.py +++ b/pypy/interpreter/function.py @@ -7,8 +7,8 @@ """ from rpython.rlib.unroll import unrolling_iterable -from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.baseobjspace import W_Root +from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.eval import Code from pypy.interpreter.argument import Arguments from rpython.rlib import jit @@ -433,9 +433,9 @@ if self.closure: closure_len = len(self.closure) if isinstance(code, PyCode) and closure_len != len(code.co_freevars): - raise operationerrfmt(space.w_ValueError, - "%N() requires a code object with %d free vars, not %d", - self, closure_len, len(code.co_freevars)) + raise oefmt(space.w_ValueError, + "%N() requires a code object with %d free vars, not " + "%d", self, closure_len, len(code.co_freevars)) self.fget_func_doc(space) # see test_issue1293 self.code = code diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -12,7 +12,7 @@ from pypy.interpreter.argument import Arguments from pypy.interpreter.astcompiler import consts from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.error import OperationError, operationerrfmt +from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.executioncontext import ExecutionContext from pypy.interpreter.nestedscope import Cell from pypy.tool import stdlib_opcode @@ -625,8 +625,8 @@ line = self.pycode.co_firstlineno if new_lineno < line: - raise operationerrfmt(space.w_ValueError, - "line %d comes before the current code.", new_lineno) + raise oefmt(space.w_ValueError, + "line %d comes before the current code.", new_lineno) elif new_lineno == line: new_lasti = 0 else: @@ -642,8 +642,8 @@ break if new_lasti == -1: - raise operationerrfmt(space.w_ValueError, - "line %d comes after the current code.", new_lineno) + raise oefmt(space.w_ValueError, + "line %d comes after the current code.", new_lineno) # Don't jump to a line with an except in it. code = self.pycode.co_code @@ -690,9 +690,9 @@ assert len(blockstack) == 0 if new_lasti_setup_addr != f_lasti_setup_addr: - raise operationerrfmt(space.w_ValueError, - "can't jump into or out of a 'finally' block %d -> %d", - f_lasti_setup_addr, new_lasti_setup_addr) + raise oefmt(space.w_ValueError, + "can't jump into or out of a 'finally' block %d -> %d", + f_lasti_setup_addr, new_lasti_setup_addr) if new_lasti < self.last_instr: min_addr = new_lasti diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -14,7 +14,7 @@ gateway, function, eval, pyframe, pytraceback, pycode ) from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.error import OperationError, operationerrfmt +from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.nestedscope import Cell from pypy.interpreter.pycode import PyCode, BytecodeCorruption from pypy.tool.stdlib_opcode import bytecode_spec @@ -457,8 +457,9 @@ def _load_fast_failed(self, varindex): varname = self.getlocalvarname(varindex) - message = "local variable '%s' referenced before assignment" - raise operationerrfmt(self.space.w_UnboundLocalError, message, varname) + raise oefmt(self.space.w_UnboundLocalError, + "local variable '%s' referenced before assignment", + varname) _load_fast_failed._dont_inline_ = True def LOAD_CONST(self, constindex, next_instr): @@ -764,9 +765,8 @@ # catch KeyErrors and turn them into NameErrors if not e.match(self.space, self.space.w_KeyError): raise - message = "name '%s' is not defined" - raise operationerrfmt(self.space.w_NameError, message, - self.space.str_w(w_varname)) + raise oefmt(self.space.w_NameError, "name '%s' is not defined", + self.space.str_w(w_varname)) def UNPACK_SEQUENCE(self, itemcount, next_instr): w_iterable = self.popvalue() @@ -849,8 +849,8 @@ _load_global._always_inline_ = True def _load_global_failed(self, w_varname): - message = "global name %R is not defined" - raise operationerrfmt(self.space.w_NameError, message, w_varname) + raise oefmt(self.space.w_NameError, + "global name %R is not defined", w_varname) _load_global_failed._dont_inline_ = True def LOAD_GLOBAL(self, nameindex, next_instr): @@ -864,9 +864,9 @@ def DELETE_FAST(self, varindex, next_instr): if self.locals_stack_w[varindex] is None: varname = self.getlocalvarname(varindex) - message = "local variable '%s' referenced before assignment" - raise operationerrfmt(self.space.w_UnboundLocalError, message, - varname) + raise oefmt(self.space.w_UnboundLocalError, + "local variable '%s' referenced before assignment", + varname) self.locals_stack_w[varindex] = None def BUILD_TUPLE(self, itemcount, next_instr): @@ -992,9 +992,8 @@ except OperationError, e: if not e.match(self.space, self.space.w_AttributeError): raise - raise operationerrfmt(self.space.w_ImportError, - "cannot import name '%s'", - self.space.str_w(w_name)) + raise oefmt(self.space.w_ImportError, + "cannot import name '%s'", self.space.str_w(w_name)) self.pushvalue(w_obj) def YIELD_VALUE(self, oparg, next_instr): @@ -1082,9 +1081,9 @@ w_enter = self.space.lookup(w_manager, "__enter__") w_descr = self.space.lookup(w_manager, "__exit__") if w_enter is None or w_descr is None: - raise operationerrfmt(self.space.w_AttributeError, - "'%T' object is not a context manager" - " (no __enter__/__exit__ method)", w_manager) + raise oefmt(self.space.w_AttributeError, + "'%T' object is not a context manager (no __enter__/" + "__exit__ method)", w_manager) w_exit = self.space.get(w_descr, w_manager) self.settopvalue(w_exit) w_result = self.space.get_and_call_function(w_enter, w_manager) diff --git a/pypy/interpreter/test/test_error.py b/pypy/interpreter/test/test_error.py --- a/pypy/interpreter/test/test_error.py +++ b/pypy/interpreter/test/test_error.py @@ -1,9 +1,9 @@ # -*- encoding: utf-8 -*- import py, os, errno -from pypy.interpreter.error import OperationError, operationerrfmt -from pypy.interpreter.error import decompose_valuefmt, get_operrcls2 -from pypy.interpreter.error import wrap_oserror, new_exception_class +from pypy.interpreter.error import ( + OperationError, decompose_valuefmt, get_operrcls2, new_exception_class, + oefmt, wrap_oserror) def test_decompose_valuefmt(): @@ -24,61 +24,61 @@ assert cls2 is cls # caching assert strings2 == ("a ", " b ", " c") -def test_operationerrfmt(space): - operr = operationerrfmt("w_type", "abc %s def %d", "foo", 42) +def test_oefmt(space): + operr = oefmt("w_type", "abc %s def %d", "foo", 42) assert isinstance(operr, OperationError) assert operr.w_type == "w_type" assert operr._w_value is None val = operr._compute_value(space) assert val == u"abc foo def 42" assert isinstance(val, unicode) - operr2 = operationerrfmt("w_type2", "a %s b %d c", "bar", 43) + operr2 = oefmt("w_type2", "a %s b %d c", "bar", 43) assert operr2.__class__ is operr.__class__ - operr3 = operationerrfmt("w_type2", "a %s b %s c", "bar", "4b") + operr3 = oefmt("w_type2", "a %s b %s c", "bar", "4b") assert operr3.__class__ is not operr.__class__ -def test_operationerrfmt_noargs(space): - operr = operationerrfmt(space.w_AttributeError, "no attribute 'foo'") +def test_oefmt_noargs(space): + operr = oefmt(space.w_AttributeError, "no attribute 'foo'") operr.normalize_exception(space) val = operr.get_w_value(space) assert space.isinstance_w(val, space.w_AttributeError) w_repr = space.repr(val) assert space.str_w(w_repr) == "AttributeError(\"no attribute 'foo'\",)" -def test_operationerrfmt_T(space): - operr = operationerrfmt(space.w_AttributeError, - "'%T' object has no attribute '%s'", - space.wrap('foo'), 'foo') +def test_oefmt_T(space): + operr = oefmt(space.w_AttributeError, + "'%T' object has no attribute '%s'", + space.wrap('foo'), 'foo') assert operr._compute_value(space) == "'str' object has no attribute 'foo'" - operr = operationerrfmt("w_type", - "'%T' object has no attribute '%s'", - space.wrap('foo'), 'foo') + operr = oefmt("w_type", + "'%T' object has no attribute '%s'", + space.wrap('foo'), 'foo') assert operr._compute_value(space) == "'str' object has no attribute 'foo'" -def test_operationerrfmt_N(space): - operr = operationerrfmt(space.w_AttributeError, - "'%N' object has no attribute '%s'", - space.type(space.wrap('foo')), 'foo') +def test_oefmt_N(space): + operr = oefmt(space.w_AttributeError, + "'%N' object has no attribute '%s'", + space.type(space.wrap('foo')), 'foo') assert operr._compute_value(space) == "'str' object has no attribute 'foo'" - operr = operationerrfmt("w_type", - "'%N' object has no attribute '%s'", - space.type(space.wrap('foo')), 'foo') + operr = oefmt("w_type", + "'%N' object has no attribute '%s'", + space.type(space.wrap('foo')), 'foo') assert operr._compute_value(space) == "'str' object has no attribute 'foo'" - operr = operationerrfmt(space.w_AttributeError, - "'%N' object has no attribute '%s'", - space.wrap('foo'), 'foo') + operr = oefmt(space.w_AttributeError, + "'%N' object has no attribute '%s'", + space.wrap('foo'), 'foo') assert operr._compute_value(space) == "'?' object has no attribute 'foo'" - operr = operationerrfmt("w_type", - "'%N' object has no attribute '%s'", - space.wrap('foo'), 'foo') + operr = oefmt("w_type", + "'%N' object has no attribute '%s'", + space.wrap('foo'), 'foo') assert operr._compute_value(space) == "'?' object has no attribute 'foo'" -def test_operationerrfmt_R(space): - operr = operationerrfmt(space.w_ValueError, "illegal newline value: %R", - space.wrap('foo')) +def test_oefmt_R(space): + operr = oefmt(space.w_ValueError, + "illegal newline value: %R", space.wrap('foo')) assert operr._compute_value(space) == "illegal newline value: 'foo'" - operr = operationerrfmt(space.w_ValueError, "illegal newline value: %R", - space.wrap("'PyLadies'")) + operr = oefmt(space.w_ValueError, "illegal newline value: %R", + space.wrap("'PyLadies'")) expected = "illegal newline value: \"'PyLadies'\"" assert operr._compute_value(space) == expected diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -2,7 +2,7 @@ from pypy.interpreter.argument import Arguments from pypy.interpreter.baseobjspace import W_Root, DescrMismatch -from pypy.interpreter.error import OperationError, operationerrfmt +from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import (interp2app, BuiltinCode, unwrap_spec, WrappedDefault) @@ -561,9 +561,9 @@ def typecheck(self, space, w_obj): if not space.isinstance_w(w_obj, self.w_cls): - m = "descriptor '%N' for '%N' objects doesn't apply to '%T' object" - raise operationerrfmt(space.w_TypeError, m, - self, self.w_cls, w_obj) + raise oefmt(space.w_TypeError, + "descriptor '%N' for '%N' objects doesn't apply to " + "'%T' object", self, self.w_cls, w_obj) def descr_member_get(self, space, w_obj, w_cls=None): """member.__get__(obj[, type]) -> value @@ -632,8 +632,9 @@ def descr_get_dict(space, w_obj): w_dict = w_obj.getdict(space) if w_dict is None: - msg = "descriptor '__dict__' doesn't apply to '%T' objects" - raise operationerrfmt(space.w_TypeError, msg, w_obj) + raise oefmt(space.w_TypeError, + "descriptor '__dict__' doesn't apply to '%T' objects", + w_obj) return w_dict def descr_set_dict(space, w_obj, w_dict): diff --git a/pypy/module/__pypy__/interp_dict.py b/pypy/module/__pypy__/interp_dict.py --- a/pypy/module/__pypy__/interp_dict.py +++ b/pypy/module/__pypy__/interp_dict.py @@ -1,6 +1,6 @@ +from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import unwrap_spec -from pypy.interpreter.error import operationerrfmt, OperationError from pypy.objspace.std.dictmultiobject import W_DictMultiObject @unwrap_spec(type=str) @@ -30,8 +30,7 @@ elif type == 'strdict': return space.newdict(strdict=True) else: - raise operationerrfmt(space.w_TypeError, "unknown type of dict %s", - type) + raise oefmt(space.w_TypeError, "unknown type of dict %s", type) def dictstrategy(space, w_obj): """ dictstrategy(dict) diff --git a/pypy/module/_cffi_backend/cbuffer.py b/pypy/module/_cffi_backend/cbuffer.py --- a/pypy/module/_cffi_backend/cbuffer.py +++ b/pypy/module/_cffi_backend/cbuffer.py @@ -1,6 +1,6 @@ from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.buffer import RWBuffer -from pypy.interpreter.error import operationerrfmt +from pypy.interpreter.error import oefmt from pypy.interpreter.gateway import unwrap_spec, interp2app from pypy.interpreter.typedef import TypeDef, make_weakref_descr from pypy.module._cffi_backend import cdataobj, ctypeptr, ctypearray @@ -83,11 +83,9 @@ if size < 0: size = w_cdata._sizeof() else: - raise operationerrfmt(space.w_TypeError, - "expected a pointer or array cdata, got '%s'", - ctype.name) + raise oefmt(space.w_TypeError, + "expected a pointer or array cdata, got '%s'", ctype.name) if size < 0: - raise operationerrfmt(space.w_TypeError, - "don't know the size pointed to by '%s'", - ctype.name) + raise oefmt(space.w_TypeError, + "don't know the size pointed to by '%s'", ctype.name) return space.wrap(MiniBuffer(LLBuffer(w_cdata._cdata, size), w_cdata)) diff --git a/pypy/module/_cffi_backend/ccallback.py b/pypy/module/_cffi_backend/ccallback.py --- a/pypy/module/_cffi_backend/ccallback.py +++ b/pypy/module/_cffi_backend/ccallback.py @@ -7,7 +7,7 @@ from rpython.rlib.objectmodel import compute_unique_id, keepalive_until_here from rpython.rtyper.lltypesystem import lltype, rffi -from pypy.interpreter.error import OperationError, operationerrfmt +from pypy.interpreter.error import OperationError, oefmt from pypy.module._cffi_backend import cerrno, misc from pypy.module._cffi_backend.cdataobj import W_CData from pypy.module._cffi_backend.ctypefunc import SIZE_OF_FFI_ARG, BIG_ENDIAN, W_CTypeFunc @@ -26,9 +26,8 @@ From noreply at buildbot.pypy.org Mon Feb 3 23:52:03 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Mon, 3 Feb 2014 23:52:03 +0100 (CET) Subject: [pypy-commit] pypy py3k: operationerrfmt -> oefmt Message-ID: <20140203225203.48AB71C0352@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r69065:f42066bae86d Date: 2014-02-03 14:49 -0800 http://bitbucket.org/pypy/pypy/changeset/f42066bae86d/ Log: operationerrfmt -> oefmt diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -838,9 +838,9 @@ raise break # done if idx == expected_length: - raise operationerrfmt(self.w_ValueError, - "too many values to unpack (expected %d)", - expected_length) + raise oefmt(self.w_ValueError, + "too many values to unpack (expected %d)", + expected_length) items[idx] = w_item idx += 1 if idx < expected_length: diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -787,9 +787,9 @@ plural = '' else: plural = 's' - raise operationerrfmt(self.space.w_ValueError, - "need more than %d value%s to unpack", - itemcount, plural) + raise oefmt(self.space.w_ValueError, + "need more than %d value%s to unpack", + itemcount, plural) right = itemcount - right assert right >= 0 # push values in reverse order @@ -835,8 +835,8 @@ # fall-back w_value = self._load_global(w_varname) if w_value is None: - message = "name %R is not defined" - raise operationerrfmt(self.space.w_NameError, message, w_varname) + raise oefmt(self.space.w_NameError, + "name %R is not defined", w_varname) self.pushvalue(w_value) def _load_global(self, w_varname): @@ -1560,9 +1560,8 @@ except OperationError as e: if not e.match(space, space.w_TypeError): raise - raise operationerrfmt(space.w_TypeError, - "%s() arg 1 must be a %s object", - funcname, what) + raise oefmt(space.w_TypeError, + "%s() arg 1 must be a %s object", funcname, what) return source, flags @@ -1570,14 +1569,13 @@ """Ensure globals/locals exist and are of the correct type""" if (not space.is_none(w_globals) and not space.isinstance_w(w_globals, space.w_dict)): - raise operationerrfmt(space.w_TypeError, - '%s() arg 2 must be a dict, not %T', - funcname, w_globals) + raise oefmt(space.w_TypeError, + '%s() arg 2 must be a dict, not %T', funcname, w_globals) if (not space.is_none(w_locals) and space.lookup(w_locals, '__getitem__') is None): - raise operationerrfmt(space.w_TypeError, - '%s() arg 3 must be a mapping or None, not %T', - funcname, w_locals) + raise oefmt(space.w_TypeError, + '%s() arg 3 must be a mapping or None, not %T', + funcname, w_locals) if space.is_none(w_globals): if caller is None: diff --git a/pypy/interpreter/test/test_error.py b/pypy/interpreter/test/test_error.py --- a/pypy/interpreter/test/test_error.py +++ b/pypy/interpreter/test/test_error.py @@ -82,14 +82,14 @@ expected = "illegal newline value: \"'PyLadies'\"" assert operr._compute_value(space) == expected -def test_operationerrfmt_unicode(space): - operr = operationerrfmt("w_type", "abc %s", u"àèìòù") +def test_oefmt_unicode(space): + operr = oefmt("w_type", "abc %s", u"àèìòù") val = operr._compute_value(space) assert val == u"abc àèìòù" -def test_operationerrfmt_utf8(space): +def test_oefmt_utf8(space): arg = u"àèìòù".encode('utf-8') - operr = operationerrfmt("w_type", "abc %8", arg) + operr = oefmt("w_type", "abc %8", arg) val = operr._compute_value(space) assert val == u"abc àèìòù" diff --git a/pypy/module/__builtin__/functional.py b/pypy/module/__builtin__/functional.py --- a/pypy/module/__builtin__/functional.py +++ b/pypy/module/__builtin__/functional.py @@ -4,7 +4,7 @@ """ from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.error import OperationError, operationerrfmt +from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault from pypy.interpreter.typedef import TypeDef from rpython.rlib import jit @@ -442,8 +442,7 @@ return space.sequence_index(self, w_item) if not self._contains_long(space, w_item): - raise operationerrfmt(space.w_ValueError, "%R is not in range", - w_item) + raise oefmt(space.w_ValueError, "%R is not in range", w_item) w_index = space.sub(w_item, self.w_start) return space.floordiv(w_index, self.w_step) diff --git a/pypy/module/__builtin__/operation.py b/pypy/module/__builtin__/operation.py --- a/pypy/module/__builtin__/operation.py +++ b/pypy/module/__builtin__/operation.py @@ -3,7 +3,7 @@ """ from pypy.interpreter import gateway -from pypy.interpreter.error import OperationError, operationerrfmt +from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import unwrap_spec, WrappedDefault from rpython.rlib.runicode import UNICHR import __builtin__ @@ -105,9 +105,8 @@ same type as the number. ndigits may be negative.""" round = space.lookup(w_number, '__round__') if round is None: - raise operationerrfmt(space.w_TypeError, - "type %T doesn't define __round__ method", - w_number) + raise oefmt(space.w_TypeError, + "type %T doesn't define __round__ method", w_number) if w_ndigits is None: return space.get_and_call_function(round, w_number) else: diff --git a/pypy/module/_codecs/interp_codecs.py b/pypy/module/_codecs/interp_codecs.py --- a/pypy/module/_codecs/interp_codecs.py +++ b/pypy/module/_codecs/interp_codecs.py @@ -329,9 +329,8 @@ raise OperationError(space.type(w_exc), w_exc) return space.newtuple([space.wrap(unichr(ch)), space.wrap(start + 3)]) else: - raise operationerrfmt(space.w_TypeError, - "don't know how to handle %T in error callback", - w_exc) + raise oefmt(space.w_TypeError, + "don't know how to handle %T in error callback", w_exc) def surrogateescape_errors(space, w_exc): check_exception(space, w_exc) @@ -369,9 +368,8 @@ return space.newtuple([space.wrap(replace), space.wrap(start + consumed)]) else: - raise operationerrfmt(space.w_TypeError, - "don't know how to handle %T in error callback", - w_exc) + raise oefmt(space.w_TypeError, + "don't know how to handle %T in error callback", w_exc) def register_builtin_error_handlers(space): "NOT_RPYTHON" diff --git a/pypy/module/_io/interp_iobase.py b/pypy/module/_io/interp_iobase.py --- a/pypy/module/_io/interp_iobase.py +++ b/pypy/module/_io/interp_iobase.py @@ -172,8 +172,7 @@ return space.w_False def getstate_w(self, space): - raise operationerrfmt(space.w_TypeError, - "cannot serialize '%T' object", self) + raise oefmt(space.w_TypeError, "cannot serialize '%T' object", self) # ______________________________________________________________ diff --git a/pypy/module/_pypyjson/interp_decoder.py b/pypy/module/_pypyjson/interp_decoder.py --- a/pypy/module/_pypyjson/interp_decoder.py +++ b/pypy/module/_pypyjson/interp_decoder.py @@ -388,8 +388,7 @@ def loads(space, w_s): if space.isinstance_w(w_s, space.w_bytes): - raise operationerrfmt(space.w_TypeError, - "Expected string, got %T", w_s) + raise oefmt(space.w_TypeError, "Expected string, got %T", w_s) s = space.str_w(w_s) decoder = JSONDecoder(space, s) try: diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py --- a/pypy/module/_ssl/interp_ssl.py +++ b/pypy/module/_ssl/interp_ssl.py @@ -1,7 +1,6 @@ from __future__ import with_statement from rpython.rtyper.lltypesystem import rffi, lltype -from pypy.interpreter.error import ( - OperationError, operationerrfmt, wrap_oserror) +from pypy.interpreter.error import OperationError, oefmt, wrap_oserror from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.typedef import TypeDef, GetSetProperty from pypy.interpreter.gateway import interp2app, unwrap_spec @@ -102,8 +101,7 @@ elif protocol == PY_SSL_VERSION_SSL23: method = libssl_SSLv23_method() else: - raise operationerrfmt(space.w_ValueError, - "invalid protocol version") + raise oefmt(space.w_ValueError, "invalid protocol version") self.ctx = libssl_SSL_CTX_new(method) # Defaults diff --git a/pypy/module/imp/interp_imp.py b/pypy/module/imp/interp_imp.py --- a/pypy/module/imp/interp_imp.py +++ b/pypy/module/imp/interp_imp.py @@ -237,6 +237,6 @@ does not conform to PEP 3147 format, ValueError will be raised.""" sourcename = importing.make_source_pathname(pathname) if sourcename is None: - raise operationerrfmt(space.w_ValueError, - "Not a PEP 3147 pyc path: %s", pathname) + raise oefmt(space.w_ValueError, + "Not a PEP 3147 pyc path: %s", pathname) return space.fsdecode(space.wrapbytes(sourcename)) diff --git a/pypy/objspace/descroperation.py b/pypy/objspace/descroperation.py --- a/pypy/objspace/descroperation.py +++ b/pypy/objspace/descroperation.py @@ -251,9 +251,8 @@ w_restype = space.type(w_res) # Note there is no check for bool here because the only possible # instances of bool are w_False and w_True, which are checked above. - raise operationerrfmt(space.w_TypeError, - "__bool__ should return bool, returned %T", - w_obj) + raise oefmt(space.w_TypeError, + "__bool__ should return bool, returned %T", w_obj) def nonzero(space, w_obj): if space.is_true(w_obj): @@ -477,9 +476,8 @@ def buffer(space, w_obj): w_impl = space.lookup(w_obj, '__buffer__') if w_impl is None: - raise operationerrfmt(space.w_TypeError, - "'%T' does not support the buffer interface", - w_obj) + raise oefmt(space.w_TypeError, + "'%T' does not support the buffer interface", w_obj) return space.get_and_call_function(w_impl, w_obj) @@ -598,8 +596,8 @@ return space.not_(space.eq(w_obj1, w_obj2)) # # if we arrived here, they are unorderable - raise operationerrfmt(space.w_TypeError, "unorderable types: %T %s %T", - w_obj1, symbol, w_obj2) + raise oefmt(space.w_TypeError, + "unorderable types: %T %s %T", w_obj1, symbol, w_obj2) return func_with_new_name(comparison_impl, 'comparison_%s_impl'%left.strip('_')) diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -502,8 +502,7 @@ Example: bytes.fromhex('B9 01EF') -> b'\xb9\x01\xef'. """ if not space.is_w(space.type(w_hexstring), space.w_unicode): - raise operationerrfmt(space.w_TypeError, "must be str, not %T", - w_hexstring) + raise oefmt(space.w_TypeError, "must be str, not %T", w_hexstring) from pypy.objspace.std.bytearrayobject import _hexstring_to_array hexstring = space.unicode_w(w_hexstring) bytes = ''.join(_hexstring_to_array(space, hexstring)) @@ -614,8 +613,8 @@ else: raise if not 0 <= char < 256: - raise operationerrfmt(space.w_ValueError, - "character must be in range(256)") + raise oefmt(space.w_ValueError, + "character must be in range(256)") return space.newbool(self._value.find(chr(char)) >= 0) return self._StringMethods_descr_contains(space, w_sub) @@ -726,8 +725,8 @@ if w_bytes_method is not None: w_bytes = space.get_and_call_function(w_bytes_method, w_source) if not space.isinstance_w(w_bytes, space.w_bytes): - msg = "__bytes__ returned non-bytes (type '%T')" - raise operationerrfmt(space.w_TypeError, msg, w_bytes) + raise oefmt(space.w_TypeError, + "__bytes__ returned non-bytes (type '%T')", w_bytes) return [c for c in space.bytes_w(w_bytes)] # String-like argument diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -404,13 +404,12 @@ def _wrap_expected_length(self, expected, got): if got > expected: - raise operationerrfmt(self.w_ValueError, - "too many values to unpack (expected %d)", - expected) + raise oefmt(self.w_ValueError, + "too many values to unpack (expected %d)", expected) else: - raise operationerrfmt(self.w_ValueError, - "need more than %d value%s to unpack", - got, got != 1 and "s" or "") + raise oefmt(self.w_ValueError, + "need more than %d value%s to unpack", + got, "" if got == 1 else "s") def unpackiterable(self, w_obj, expected_length=-1): if isinstance(w_obj, W_AbstractTupleObject) and self._uses_tuple_iter(w_obj): diff --git a/pypy/objspace/std/stringmethods.py b/pypy/objspace/std/stringmethods.py --- a/pypy/objspace/std/stringmethods.py +++ b/pypy/objspace/std/stringmethods.py @@ -44,8 +44,8 @@ list_to = makebytesdata_w(space, w_to) if len(list_from) != len(list_to): - raise operationerrfmt(space.w_ValueError, - "maketrans arguments must have same length") + raise oefmt(space.w_ValueError, + "maketrans arguments must have same length") for i in range(len(list_from)): pos_from = ord(list_from[i]) @@ -510,9 +510,9 @@ if not e.match(space, space.w_TypeError): raise wanted = self._generic_name() - raise operationerrfmt(space.w_TypeError, - "startswith first arg must be %s or a tuple " - "of %s, not %T", wanted, wanted, w_prefix) + raise oefmt(space.w_TypeError, + "startswith first arg must be %s or a tuple of %s, " + "not %T", wanted, wanted, w_prefix) return space.newbool(res) def _startswith(self, space, value, w_prefix, start, end): @@ -532,9 +532,9 @@ if not e.match(space, space.w_TypeError): raise wanted = self._generic_name() - raise operationerrfmt(space.w_TypeError, - "endswith first arg must be %s or a tuple " - "of %s, not %T", wanted, wanted, w_suffix) + raise oefmt(space.w_TypeError, + "endswith first arg must be %s or a tuple of %s, not " + "%T", wanted, wanted, w_suffix) return space.newbool(res) def _endswith(self, space, value, w_prefix, start, end): diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py --- a/pypy/objspace/std/unicodeobject.py +++ b/pypy/objspace/std/unicodeobject.py @@ -99,9 +99,8 @@ def _op_val(self, space, w_other): if isinstance(w_other, W_UnicodeObject): return w_other._value - raise operationerrfmt(space.w_TypeError, - "Can't convert '%T' object to str implicitly", - w_other) + raise oefmt(space.w_TypeError, + "Can't convert '%T' object to str implicitly", w_other) def _chr(self, char): assert len(char) == 1 From noreply at buildbot.pypy.org Tue Feb 4 00:05:50 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 4 Feb 2014 00:05:50 +0100 (CET) Subject: [pypy-commit] pypy default: A hack, which very indirectly might fix the failing test_pypy_c (test_decode_ascii). The Message-ID: <20140203230550.D93001C0352@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69066:ff363fceb3c5 Date: 2014-02-04 00:03 +0100 http://bitbucket.org/pypy/pypy/changeset/ff363fceb3c5/ Log: A hack, which very indirectly might fix the failing test_pypy_c (test_decode_ascii). The issue is that str_decode_ascii() calls space.newtuple() indirectly, which before this change would call space.int_w(). Previously, it was thought that it would call any int_w() method, including the one from micronumpy, whereas in truth it can only call W_IntObject.int_w(). The one from micronumpy contains nowadays calls to random other space functions. diff --git a/pypy/objspace/std/specialisedtupleobject.py b/pypy/objspace/std/specialisedtupleobject.py --- a/pypy/objspace/std/specialisedtupleobject.py +++ b/pypy/objspace/std/specialisedtupleobject.py @@ -27,11 +27,11 @@ w_obj = values_w[i] val_type = typetuple[i] if val_type == int: - unwrapped = space.int_w(w_obj) + unwrapped = w_obj.int_w(space) elif val_type == float: - unwrapped = space.float_w(w_obj) + unwrapped = w_obj.float_w(space) elif val_type == str: - unwrapped = space.str_w(w_obj) + unwrapped = w_obj.str_w(space) elif val_type == object: unwrapped = w_obj else: @@ -127,16 +127,15 @@ Cls_ff = make_specialised_class((float, float)) def makespecialisedtuple(space, list_w): + from pypy.objspace.std.intobject import W_IntObject + from pypy.objspace.std.floatobject import W_FloatObject if len(list_w) == 2: w_arg1, w_arg2 = list_w - w_type1 = space.type(w_arg1) - if w_type1 is space.w_int: - w_type2 = space.type(w_arg2) - if w_type2 is space.w_int: + if isinstance(w_arg1, W_IntObject): + if isinstance(w_arg2, W_IntObject): return Cls_ii(space, w_arg1, w_arg2) - elif w_type1 is space.w_float: - w_type2 = space.type(w_arg2) - if w_type2 is space.w_float: + elif isinstance(w_arg1, W_FloatObject): + if isinstance(w_arg2, W_FloatObject): return Cls_ff(space, w_arg1, w_arg2) return Cls_oo(space, w_arg1, w_arg2) else: From noreply at buildbot.pypy.org Tue Feb 4 00:12:16 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 4 Feb 2014 00:12:16 +0100 (CET) Subject: [pypy-commit] pypy default: Test and fix Message-ID: <20140203231216.CC10D1C0352@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69067:36ce13e8847d Date: 2014-02-04 00:11 +0100 http://bitbucket.org/pypy/pypy/changeset/36ce13e8847d/ Log: Test and fix diff --git a/pypy/objspace/std/specialisedtupleobject.py b/pypy/objspace/std/specialisedtupleobject.py --- a/pypy/objspace/std/specialisedtupleobject.py +++ b/pypy/objspace/std/specialisedtupleobject.py @@ -131,11 +131,11 @@ from pypy.objspace.std.floatobject import W_FloatObject if len(list_w) == 2: w_arg1, w_arg2 = list_w - if isinstance(w_arg1, W_IntObject): - if isinstance(w_arg2, W_IntObject): + if type(w_arg1) is W_IntObject: + if type(w_arg2) is W_IntObject: return Cls_ii(space, w_arg1, w_arg2) - elif isinstance(w_arg1, W_FloatObject): - if isinstance(w_arg2, W_FloatObject): + elif type(w_arg1) is W_FloatObject: + if type(w_arg2) is W_FloatObject: return Cls_ff(space, w_arg1, w_arg2) return Cls_oo(space, w_arg1, w_arg2) else: diff --git a/pypy/objspace/std/test/test_specialisedtupleobject.py b/pypy/objspace/std/test/test_specialisedtupleobject.py --- a/pypy/objspace/std/test/test_specialisedtupleobject.py +++ b/pypy/objspace/std/test/test_specialisedtupleobject.py @@ -214,6 +214,14 @@ assert a == (1, 2.2,) + b assert not a != (1, 2.2) + b + def test_subclasses(self): + class I(int): pass + class F(float): pass + t = (I(42), I(43)) + assert type(t[0]) is I + t = (F(42), F(43)) + assert type(t[0]) is F + class AppTestAll(test_tupleobject.AppTestW_TupleObject): spaceconfig = {"objspace.std.withspecialisedtuple": True} From noreply at buildbot.pypy.org Tue Feb 4 02:04:01 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Tue, 4 Feb 2014 02:04:01 +0100 (CET) Subject: [pypy-commit] pypy default: better error messages for newlist_hint() and resizelist_hint() Message-ID: <20140204010401.9F9251C0352@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r69068:372d7cd3d6b7 Date: 2014-02-04 01:03 +0000 http://bitbucket.org/pypy/pypy/changeset/372d7cd3d6b7/ Log: better error messages for newlist_hint() and resizelist_hint() diff --git a/rpython/rlib/objectmodel.py b/rpython/rlib/objectmodel.py --- a/rpython/rlib/objectmodel.py +++ b/rpython/rlib/objectmodel.py @@ -338,9 +338,10 @@ _about_ = newlist_hint def compute_result_annotation(self, s_sizehint): - from rpython.annotator.model import SomeInteger + from rpython.annotator.model import SomeInteger, AnnotatorError - assert isinstance(s_sizehint, SomeInteger) + if not isinstance(s_sizehint, SomeInteger): + raise AnnotatorError("newlist_hint() argument must be an int") s_l = self.bookkeeper.newlist() s_l.listdef.listitem.resize() return s_l @@ -365,8 +366,10 @@ def compute_result_annotation(self, s_l, s_sizehint): from rpython.annotator import model as annmodel - assert isinstance(s_l, annmodel.SomeList) - assert isinstance(s_sizehint, annmodel.SomeInteger) + if not isinstance(s_l, annmodel.SomeList): + raise annmodel.AnnotatorError("First argument must be a list") + if not isinstance(s_sizehint, annmodel.SomeInteger): + raise annmodel.AnnotatorError("Second argument must be an integer") s_l.listdef.listitem.resize() def specialize_call(self, hop): From noreply at buildbot.pypy.org Tue Feb 4 02:05:18 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Tue, 4 Feb 2014 02:05:18 +0100 (CET) Subject: [pypy-commit] pypy NonConstant: kill unused option 'need_const' Message-ID: <20140204010518.C02DD1C0352@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: NonConstant Changeset: r69069:0d5d13d5e57a Date: 2014-02-01 17:53 +0000 http://bitbucket.org/pypy/pypy/changeset/0d5d13d5e57a/ Log: kill unused option 'need_const' diff --git a/rpython/annotator/bookkeeper.py b/rpython/annotator/bookkeeper.py --- a/rpython/annotator/bookkeeper.py +++ b/rpython/annotator/bookkeeper.py @@ -205,7 +205,7 @@ def immutableconstant(self, const): return self.immutablevalue(const.value) - def immutablevalue(self, x, need_const=True): + def immutablevalue(self, x): """The most precise SomeValue instance that contains the immutable value x.""" # convert unbound methods to the underlying function @@ -241,73 +241,51 @@ elif tp is bytearray: result = SomeByteArray() elif tp is tuple: - result = SomeTuple(items = [self.immutablevalue(e, need_const) for e in x]) + result = SomeTuple(items = [self.immutablevalue(e) for e in x]) elif tp is float: result = SomeFloat() elif tp is list: - if need_const: - key = Constant(x) - try: - return self.immutable_cache[key] - except KeyError: - result = SomeList(ListDef(self, s_ImpossibleValue)) - self.immutable_cache[key] = result - for e in x: - result.listdef.generalize(self.immutablevalue(e)) - result.const_box = key - return result - else: - listdef = ListDef(self, s_ImpossibleValue) + key = Constant(x) + try: + return self.immutable_cache[key] + except KeyError: + result = SomeList(ListDef(self, s_ImpossibleValue)) + self.immutable_cache[key] = result for e in x: - listdef.generalize(self.immutablevalue(e, False)) - result = SomeList(listdef) + result.listdef.generalize(self.immutablevalue(e)) + result.const_box = key + return result elif tp is dict or tp is r_dict or tp is SomeOrderedDict.knowntype: if tp is SomeOrderedDict.knowntype: cls = SomeOrderedDict else: cls = SomeDict - if need_const: - key = Constant(x) - try: - return self.immutable_cache[key] - except KeyError: - result = cls(DictDef(self, - s_ImpossibleValue, - s_ImpossibleValue, - is_r_dict = tp is r_dict)) - self.immutable_cache[key] = result - if tp is r_dict: - s_eqfn = self.immutablevalue(x.key_eq) - s_hashfn = self.immutablevalue(x.key_hash) - result.dictdef.dictkey.update_rdict_annotations(s_eqfn, - s_hashfn) - seen_elements = 0 - while seen_elements != len(x): - items = x.items() - for ek, ev in items: - result.dictdef.generalize_key(self.immutablevalue(ek)) - result.dictdef.generalize_value(self.immutablevalue(ev)) - result.dictdef.seen_prebuilt_key(ek) - seen_elements = len(items) - # if the dictionary grew during the iteration, - # start over again - result.const_box = key - return result - else: - dictdef = DictDef(self, - s_ImpossibleValue, - s_ImpossibleValue, - is_r_dict = tp is r_dict) + key = Constant(x) + try: + return self.immutable_cache[key] + except KeyError: + result = cls(DictDef(self, + s_ImpossibleValue, + s_ImpossibleValue, + is_r_dict = tp is r_dict)) + self.immutable_cache[key] = result if tp is r_dict: s_eqfn = self.immutablevalue(x.key_eq) s_hashfn = self.immutablevalue(x.key_hash) - dictdef.dictkey.update_rdict_annotations(s_eqfn, - s_hashfn) - for ek, ev in x.iteritems(): - dictdef.generalize_key(self.immutablevalue(ek, False)) - dictdef.generalize_value(self.immutablevalue(ev, False)) - dictdef.seen_prebuilt_key(ek) - result = cls(dictdef) + result.dictdef.dictkey.update_rdict_annotations(s_eqfn, + s_hashfn) + seen_elements = 0 + while seen_elements != len(x): + items = x.items() + for ek, ev in items: + result.dictdef.generalize_key(self.immutablevalue(ek)) + result.dictdef.generalize_value(self.immutablevalue(ev)) + result.dictdef.seen_prebuilt_key(ek) + seen_elements = len(items) + # if the dictionary grew during the iteration, + # start over again + result.const_box = key + return result elif tp is weakref.ReferenceType: x1 = x() if x1 is None: @@ -332,11 +310,11 @@ if hasattr(x, 'im_self') and hasattr(x, 'im_func'): # on top of PyPy, for cases like 'l.append' where 'l' is a # global constant list, the find_method() returns non-None - s_self = self.immutablevalue(x.im_self, need_const) + s_self = self.immutablevalue(x.im_self) result = s_self.find_method(x.im_func.__name__) elif hasattr(x, '__self__') and x.__self__ is not None: # for cases like 'l.append' where 'l' is a global constant list - s_self = self.immutablevalue(x.__self__, need_const) + s_self = self.immutablevalue(x.__self__) result = s_self.find_method(x.__name__) assert result is not None else: @@ -360,8 +338,7 @@ return s_None else: raise Exception("Don't know how to represent %r" % (x,)) - if need_const: - result.const = x + result.const = x return result def getdesc(self, pyobj): From noreply at buildbot.pypy.org Tue Feb 4 02:05:24 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Tue, 4 Feb 2014 02:05:24 +0100 (CET) Subject: [pypy-commit] pypy NonConstant: hg merge default Message-ID: <20140204010524.04BFE1C0352@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: NonConstant Changeset: r69070:917b04a0e6af Date: 2014-02-04 01:04 +0000 http://bitbucket.org/pypy/pypy/changeset/917b04a0e6af/ Log: hg merge default diff too long, truncating to 2000 out of 6318 lines diff --git a/dotviewer/graphdisplay.py b/dotviewer/graphdisplay.py --- a/dotviewer/graphdisplay.py +++ b/dotviewer/graphdisplay.py @@ -136,6 +136,7 @@ Click on objects to move around Drag with the left mouse button to zoom in/out Drag with the right mouse button to scroll + Use scroll wheel do scroll up or down """.replace('\n ', '\n').strip() # poor man's dedent @@ -613,6 +614,19 @@ def process_MouseButtonUp(self, event): self.dragging = None pygame.event.set_grab(False) + # handle directional scrolling + if event.button == 4: + self.pan((0, -1)) + return + if event.button == 5: + self.pan((0, 1)) + return + if event.button == 6: + self.pan((-1, 0)) + return + if event.button == 7: + self.pan((1, 0)) + return if self.click_time is not None and abs(time.time() - self.click_time) < 1: # click (no significant dragging) self.notifyclick(self.click_origin) diff --git a/pypy/interpreter/argument.py b/pypy/interpreter/argument.py --- a/pypy/interpreter/argument.py +++ b/pypy/interpreter/argument.py @@ -1,11 +1,11 @@ """ Arguments objects. """ - -from pypy.interpreter.error import OperationError, operationerrfmt from rpython.rlib.debug import make_sure_not_resized from rpython.rlib import jit +from pypy.interpreter.error import OperationError, oefmt + class Arguments(object): """ @@ -86,9 +86,9 @@ args_w = space.fixedview(w_stararg) except OperationError, e: if e.match(space, space.w_TypeError): - raise operationerrfmt( - space.w_TypeError, - "argument after * must be a sequence, not %T", w_stararg) + raise oefmt(space.w_TypeError, + "argument after * must be a sequence, not %T", + w_stararg) raise self.arguments_w = self.arguments_w + args_w @@ -113,10 +113,9 @@ w_keys = space.call_method(w_starstararg, "keys") except OperationError, e: if e.match(space, space.w_AttributeError): - raise operationerrfmt( - space.w_TypeError, - "argument after ** must be a mapping, not %T", - w_starstararg) + raise oefmt(space.w_TypeError, + "argument after ** must be a mapping, not %T", + w_starstararg) raise keys_w = space.unpackiterable(w_keys) keywords_w = [None] * len(keys_w) @@ -281,8 +280,7 @@ self._match_signature(w_firstarg, scope_w, signature, defaults_w, 0) except ArgErr, e: - raise operationerrfmt(self.space.w_TypeError, - "%s() %s", fnname, e.getmsg()) + raise oefmt(self.space.w_TypeError, "%s() %s", fnname, e.getmsg()) return signature.scope_length() def _parse(self, w_firstarg, signature, defaults_w, blindargs=0): @@ -304,8 +302,7 @@ try: return self._parse(w_firstarg, signature, defaults_w, blindargs) except ArgErr, e: - raise operationerrfmt(self.space.w_TypeError, - "%s() %s", fnname, e.getmsg()) + raise oefmt(self.space.w_TypeError, "%s() %s", fnname, e.getmsg()) @staticmethod def frompacked(space, w_args=None, w_kwds=None): @@ -344,10 +341,9 @@ for key in keywords: for otherkey in existingkeywords: if otherkey == key: - raise operationerrfmt(space.w_TypeError, - "got multiple values " - "for keyword argument " - "'%s'", key) + raise oefmt(space.w_TypeError, + "got multiple values for keyword argument '%s'", + key) def _do_combine_starstarargs_wrapped(space, keys_w, w_starstararg, keywords, keywords_w, existingkeywords): @@ -367,10 +363,9 @@ raise else: if existingkeywords and key in existingkeywords: - raise operationerrfmt(space.w_TypeError, - "got multiple values " - "for keyword argument " - "'%s'", key) + raise oefmt(space.w_TypeError, + "got multiple values for keyword argument '%s'", + key) keywords[i] = key keywords_w[i] = space.getitem(w_starstararg, w_key) i += 1 diff --git a/pypy/interpreter/astcompiler/ast.py b/pypy/interpreter/astcompiler/ast.py --- a/pypy/interpreter/astcompiler/ast.py +++ b/pypy/interpreter/astcompiler/ast.py @@ -1,12 +1,18 @@ # Generated by tools/asdl_py.py -from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter import typedef -from pypy.interpreter.gateway import interp2app -from pypy.interpreter.error import OperationError, operationerrfmt from rpython.rlib.unroll import unrolling_iterable from rpython.tool.pairtype import extendabletype from rpython.tool.sourcetools import func_with_new_name +from pypy.interpreter import typedef +from pypy.interpreter.baseobjspace import W_Root +from pypy.interpreter.error import OperationError, oefmt +from pypy.interpreter.gateway import interp2app + + +def raise_attriberr(space, w_obj, name): + raise oefmt(space.w_AttributeError, + "'%T' object has no attribute '%s'", w_obj, name) + def check_string(space, w_obj): if not (space.isinstance_w(w_obj, space.w_str) or @@ -70,11 +76,13 @@ continue # field is optional w_obj = self.getdictvalue(space, missing) if w_obj is None: - err = "required field \"%s\" missing from %s" - raise operationerrfmt(space.w_TypeError, err, missing, host) + raise oefmt(space.w_TypeError, + "required field \"%s\" missing from %s", + missing, host) else: - err = "incorrect type for field \"%s\" in %s" - raise operationerrfmt(space.w_TypeError, err, missing, host) + raise oefmt(space.w_TypeError, + "incorrect type for field \"%s\" in %s", + missing, host) raise AssertionError("should not reach here") @@ -2793,7 +2801,7 @@ def Module_get_body(space, w_self): if not w_self.initialization_state & 1: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -2834,7 +2842,7 @@ def Interactive_get_body(space, w_self): if not w_self.initialization_state & 1: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -2879,7 +2887,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 1: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') return space.wrap(w_self.body) def Expression_set_body(space, w_self, w_new_value): @@ -2922,7 +2930,7 @@ def Suite_get_body(space, w_self): if not w_self.initialization_state & 1: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -2967,7 +2975,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 1: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'lineno') + raise_attriberr(space, w_self, 'lineno') return space.wrap(w_self.lineno) def stmt_set_lineno(space, w_self, w_new_value): @@ -2988,7 +2996,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 2: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'col_offset') + raise_attriberr(space, w_self, 'col_offset') return space.wrap(w_self.col_offset) def stmt_set_col_offset(space, w_self, w_new_value): @@ -3018,7 +3026,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'name') + raise_attriberr(space, w_self, 'name') return space.wrap(w_self.name) def FunctionDef_set_name(space, w_self, w_new_value): @@ -3039,7 +3047,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'args') + raise_attriberr(space, w_self, 'args') return space.wrap(w_self.args) def FunctionDef_set_args(space, w_self, w_new_value): @@ -3056,7 +3064,7 @@ def FunctionDef_get_body(space, w_self): if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -3072,7 +3080,7 @@ def FunctionDef_get_decorator_list(space, w_self): if not w_self.initialization_state & 32: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'decorator_list') + raise_attriberr(space, w_self, 'decorator_list') if w_self.w_decorator_list is None: if w_self.decorator_list is None: list_w = [] @@ -3121,7 +3129,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'name') + raise_attriberr(space, w_self, 'name') return space.wrap(w_self.name) def ClassDef_set_name(space, w_self, w_new_value): @@ -3138,7 +3146,7 @@ def ClassDef_get_bases(space, w_self): if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'bases') + raise_attriberr(space, w_self, 'bases') if w_self.w_bases is None: if w_self.bases is None: list_w = [] @@ -3154,7 +3162,7 @@ def ClassDef_get_body(space, w_self): if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -3170,7 +3178,7 @@ def ClassDef_get_decorator_list(space, w_self): if not w_self.initialization_state & 32: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'decorator_list') + raise_attriberr(space, w_self, 'decorator_list') if w_self.w_decorator_list is None: if w_self.decorator_list is None: list_w = [] @@ -3220,7 +3228,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') + raise_attriberr(space, w_self, 'value') return space.wrap(w_self.value) def Return_set_value(space, w_self, w_new_value): @@ -3263,7 +3271,7 @@ def Delete_get_targets(space, w_self): if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'targets') + raise_attriberr(space, w_self, 'targets') if w_self.w_targets is None: if w_self.targets is None: list_w = [] @@ -3304,7 +3312,7 @@ def Assign_get_targets(space, w_self): if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'targets') + raise_attriberr(space, w_self, 'targets') if w_self.w_targets is None: if w_self.targets is None: list_w = [] @@ -3324,7 +3332,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') + raise_attriberr(space, w_self, 'value') return space.wrap(w_self.value) def Assign_set_value(space, w_self, w_new_value): @@ -3373,7 +3381,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'target') + raise_attriberr(space, w_self, 'target') return space.wrap(w_self.target) def AugAssign_set_target(space, w_self, w_new_value): @@ -3396,7 +3404,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'op') + raise_attriberr(space, w_self, 'op') return operator_to_class[w_self.op - 1]() def AugAssign_set_op(space, w_self, w_new_value): @@ -3419,7 +3427,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') + raise_attriberr(space, w_self, 'value') return space.wrap(w_self.value) def AugAssign_set_value(space, w_self, w_new_value): @@ -3468,7 +3476,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'dest') + raise_attriberr(space, w_self, 'dest') return space.wrap(w_self.dest) def Print_set_dest(space, w_self, w_new_value): @@ -3487,7 +3495,7 @@ def Print_get_values(space, w_self): if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'values') + raise_attriberr(space, w_self, 'values') if w_self.w_values is None: if w_self.values is None: list_w = [] @@ -3507,7 +3515,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'nl') + raise_attriberr(space, w_self, 'nl') return space.wrap(w_self.nl) def Print_set_nl(space, w_self, w_new_value): @@ -3555,7 +3563,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'target') + raise_attriberr(space, w_self, 'target') return space.wrap(w_self.target) def For_set_target(space, w_self, w_new_value): @@ -3578,7 +3586,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'iter') + raise_attriberr(space, w_self, 'iter') return space.wrap(w_self.iter) def For_set_iter(space, w_self, w_new_value): @@ -3597,7 +3605,7 @@ def For_get_body(space, w_self): if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -3613,7 +3621,7 @@ def For_get_orelse(space, w_self): if not w_self.initialization_state & 32: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'orelse') + raise_attriberr(space, w_self, 'orelse') if w_self.w_orelse is None: if w_self.orelse is None: list_w = [] @@ -3662,7 +3670,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'test') + raise_attriberr(space, w_self, 'test') return space.wrap(w_self.test) def While_set_test(space, w_self, w_new_value): @@ -3681,7 +3689,7 @@ def While_get_body(space, w_self): if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -3697,7 +3705,7 @@ def While_get_orelse(space, w_self): if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'orelse') + raise_attriberr(space, w_self, 'orelse') if w_self.w_orelse is None: if w_self.orelse is None: list_w = [] @@ -3745,7 +3753,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'test') + raise_attriberr(space, w_self, 'test') return space.wrap(w_self.test) def If_set_test(space, w_self, w_new_value): @@ -3764,7 +3772,7 @@ def If_get_body(space, w_self): if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -3780,7 +3788,7 @@ def If_get_orelse(space, w_self): if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'orelse') + raise_attriberr(space, w_self, 'orelse') if w_self.w_orelse is None: if w_self.orelse is None: list_w = [] @@ -3828,7 +3836,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'context_expr') + raise_attriberr(space, w_self, 'context_expr') return space.wrap(w_self.context_expr) def With_set_context_expr(space, w_self, w_new_value): @@ -3851,7 +3859,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'optional_vars') + raise_attriberr(space, w_self, 'optional_vars') return space.wrap(w_self.optional_vars) def With_set_optional_vars(space, w_self, w_new_value): @@ -3870,7 +3878,7 @@ def With_get_body(space, w_self): if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -3917,7 +3925,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'type') + raise_attriberr(space, w_self, 'type') return space.wrap(w_self.type) def Raise_set_type(space, w_self, w_new_value): @@ -3940,7 +3948,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'inst') + raise_attriberr(space, w_self, 'inst') return space.wrap(w_self.inst) def Raise_set_inst(space, w_self, w_new_value): @@ -3963,7 +3971,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'tback') + raise_attriberr(space, w_self, 'tback') return space.wrap(w_self.tback) def Raise_set_tback(space, w_self, w_new_value): @@ -4008,7 +4016,7 @@ def TryExcept_get_body(space, w_self): if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -4024,7 +4032,7 @@ def TryExcept_get_handlers(space, w_self): if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'handlers') + raise_attriberr(space, w_self, 'handlers') if w_self.w_handlers is None: if w_self.handlers is None: list_w = [] @@ -4040,7 +4048,7 @@ def TryExcept_get_orelse(space, w_self): if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'orelse') + raise_attriberr(space, w_self, 'orelse') if w_self.w_orelse is None: if w_self.orelse is None: list_w = [] @@ -4085,7 +4093,7 @@ def TryFinally_get_body(space, w_self): if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -4101,7 +4109,7 @@ def TryFinally_get_finalbody(space, w_self): if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'finalbody') + raise_attriberr(space, w_self, 'finalbody') if w_self.w_finalbody is None: if w_self.finalbody is None: list_w = [] @@ -4148,7 +4156,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'test') + raise_attriberr(space, w_self, 'test') return space.wrap(w_self.test) def Assert_set_test(space, w_self, w_new_value): @@ -4171,7 +4179,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'msg') + raise_attriberr(space, w_self, 'msg') return space.wrap(w_self.msg) def Assert_set_msg(space, w_self, w_new_value): @@ -4215,7 +4223,7 @@ def Import_get_names(space, w_self): if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'names') + raise_attriberr(space, w_self, 'names') if w_self.w_names is None: if w_self.names is None: list_w = [] @@ -4260,7 +4268,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'module') + raise_attriberr(space, w_self, 'module') return space.wrap(w_self.module) def ImportFrom_set_module(space, w_self, w_new_value): @@ -4280,7 +4288,7 @@ def ImportFrom_get_names(space, w_self): if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'names') + raise_attriberr(space, w_self, 'names') if w_self.w_names is None: if w_self.names is None: list_w = [] @@ -4300,7 +4308,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'level') + raise_attriberr(space, w_self, 'level') return space.wrap(w_self.level) def ImportFrom_set_level(space, w_self, w_new_value): @@ -4348,7 +4356,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') return space.wrap(w_self.body) def Exec_set_body(space, w_self, w_new_value): @@ -4371,7 +4379,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'globals') + raise_attriberr(space, w_self, 'globals') return space.wrap(w_self.globals) def Exec_set_globals(space, w_self, w_new_value): @@ -4394,7 +4402,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'locals') + raise_attriberr(space, w_self, 'locals') return space.wrap(w_self.locals) def Exec_set_locals(space, w_self, w_new_value): @@ -4439,7 +4447,7 @@ def Global_get_names(space, w_self): if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'names') + raise_attriberr(space, w_self, 'names') if w_self.w_names is None: if w_self.names is None: list_w = [] @@ -4484,7 +4492,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') + raise_attriberr(space, w_self, 'value') return space.wrap(w_self.value) def Expr_set_value(space, w_self, w_new_value): @@ -4582,7 +4590,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 1: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'lineno') + raise_attriberr(space, w_self, 'lineno') return space.wrap(w_self.lineno) def expr_set_lineno(space, w_self, w_new_value): @@ -4603,7 +4611,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 2: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'col_offset') + raise_attriberr(space, w_self, 'col_offset') return space.wrap(w_self.col_offset) def expr_set_col_offset(space, w_self, w_new_value): @@ -4633,7 +4641,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'op') + raise_attriberr(space, w_self, 'op') return boolop_to_class[w_self.op - 1]() def BoolOp_set_op(space, w_self, w_new_value): @@ -4652,7 +4660,7 @@ def BoolOp_get_values(space, w_self): if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'values') + raise_attriberr(space, w_self, 'values') if w_self.w_values is None: if w_self.values is None: list_w = [] @@ -4698,7 +4706,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'left') + raise_attriberr(space, w_self, 'left') return space.wrap(w_self.left) def BinOp_set_left(space, w_self, w_new_value): @@ -4721,7 +4729,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'op') + raise_attriberr(space, w_self, 'op') return operator_to_class[w_self.op - 1]() def BinOp_set_op(space, w_self, w_new_value): @@ -4744,7 +4752,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'right') + raise_attriberr(space, w_self, 'right') return space.wrap(w_self.right) def BinOp_set_right(space, w_self, w_new_value): @@ -4793,7 +4801,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'op') + raise_attriberr(space, w_self, 'op') return unaryop_to_class[w_self.op - 1]() def UnaryOp_set_op(space, w_self, w_new_value): @@ -4816,7 +4824,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'operand') + raise_attriberr(space, w_self, 'operand') return space.wrap(w_self.operand) def UnaryOp_set_operand(space, w_self, w_new_value): @@ -4864,7 +4872,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'args') + raise_attriberr(space, w_self, 'args') return space.wrap(w_self.args) def Lambda_set_args(space, w_self, w_new_value): @@ -4885,7 +4893,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') return space.wrap(w_self.body) def Lambda_set_body(space, w_self, w_new_value): @@ -4933,7 +4941,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'test') + raise_attriberr(space, w_self, 'test') return space.wrap(w_self.test) def IfExp_set_test(space, w_self, w_new_value): @@ -4956,7 +4964,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') return space.wrap(w_self.body) def IfExp_set_body(space, w_self, w_new_value): @@ -4979,7 +4987,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'orelse') + raise_attriberr(space, w_self, 'orelse') return space.wrap(w_self.orelse) def IfExp_set_orelse(space, w_self, w_new_value): @@ -5024,7 +5032,7 @@ def Dict_get_keys(space, w_self): if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'keys') + raise_attriberr(space, w_self, 'keys') if w_self.w_keys is None: if w_self.keys is None: list_w = [] @@ -5040,7 +5048,7 @@ def Dict_get_values(space, w_self): if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'values') + raise_attriberr(space, w_self, 'values') if w_self.w_values is None: if w_self.values is None: list_w = [] @@ -5083,7 +5091,7 @@ def Set_get_elts(space, w_self): if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'elts') + raise_attriberr(space, w_self, 'elts') if w_self.w_elts is None: if w_self.elts is None: list_w = [] @@ -5128,7 +5136,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'elt') + raise_attriberr(space, w_self, 'elt') return space.wrap(w_self.elt) def ListComp_set_elt(space, w_self, w_new_value): @@ -5147,7 +5155,7 @@ def ListComp_get_generators(space, w_self): if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'generators') + raise_attriberr(space, w_self, 'generators') if w_self.w_generators is None: if w_self.generators is None: list_w = [] @@ -5193,7 +5201,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'elt') + raise_attriberr(space, w_self, 'elt') return space.wrap(w_self.elt) def SetComp_set_elt(space, w_self, w_new_value): @@ -5212,7 +5220,7 @@ def SetComp_get_generators(space, w_self): if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'generators') + raise_attriberr(space, w_self, 'generators') if w_self.w_generators is None: if w_self.generators is None: list_w = [] @@ -5258,7 +5266,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'key') + raise_attriberr(space, w_self, 'key') return space.wrap(w_self.key) def DictComp_set_key(space, w_self, w_new_value): @@ -5281,7 +5289,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') + raise_attriberr(space, w_self, 'value') return space.wrap(w_self.value) def DictComp_set_value(space, w_self, w_new_value): @@ -5300,7 +5308,7 @@ def DictComp_get_generators(space, w_self): if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'generators') + raise_attriberr(space, w_self, 'generators') if w_self.w_generators is None: if w_self.generators is None: list_w = [] @@ -5347,7 +5355,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'elt') + raise_attriberr(space, w_self, 'elt') return space.wrap(w_self.elt) def GeneratorExp_set_elt(space, w_self, w_new_value): @@ -5366,7 +5374,7 @@ def GeneratorExp_get_generators(space, w_self): if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'generators') + raise_attriberr(space, w_self, 'generators') if w_self.w_generators is None: if w_self.generators is None: list_w = [] @@ -5412,7 +5420,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') + raise_attriberr(space, w_self, 'value') return space.wrap(w_self.value) def Yield_set_value(space, w_self, w_new_value): @@ -5459,7 +5467,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'left') + raise_attriberr(space, w_self, 'left') return space.wrap(w_self.left) def Compare_set_left(space, w_self, w_new_value): @@ -5478,7 +5486,7 @@ def Compare_get_ops(space, w_self): if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'ops') + raise_attriberr(space, w_self, 'ops') if w_self.w_ops is None: if w_self.ops is None: list_w = [] @@ -5494,7 +5502,7 @@ def Compare_get_comparators(space, w_self): if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'comparators') + raise_attriberr(space, w_self, 'comparators') if w_self.w_comparators is None: if w_self.comparators is None: list_w = [] @@ -5542,7 +5550,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'func') + raise_attriberr(space, w_self, 'func') return space.wrap(w_self.func) def Call_set_func(space, w_self, w_new_value): @@ -5561,7 +5569,7 @@ def Call_get_args(space, w_self): if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'args') + raise_attriberr(space, w_self, 'args') if w_self.w_args is None: if w_self.args is None: list_w = [] @@ -5577,7 +5585,7 @@ def Call_get_keywords(space, w_self): if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'keywords') + raise_attriberr(space, w_self, 'keywords') if w_self.w_keywords is None: if w_self.keywords is None: list_w = [] @@ -5597,7 +5605,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 32: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'starargs') + raise_attriberr(space, w_self, 'starargs') return space.wrap(w_self.starargs) def Call_set_starargs(space, w_self, w_new_value): @@ -5620,7 +5628,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 64: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'kwargs') + raise_attriberr(space, w_self, 'kwargs') return space.wrap(w_self.kwargs) def Call_set_kwargs(space, w_self, w_new_value): @@ -5673,7 +5681,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') + raise_attriberr(space, w_self, 'value') return space.wrap(w_self.value) def Repr_set_value(space, w_self, w_new_value): @@ -5720,7 +5728,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'n') + raise_attriberr(space, w_self, 'n') return w_self.n def Num_set_n(space, w_self, w_new_value): @@ -5765,7 +5773,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 's') + raise_attriberr(space, w_self, 's') return w_self.s def Str_set_s(space, w_self, w_new_value): @@ -5810,7 +5818,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') + raise_attriberr(space, w_self, 'value') return space.wrap(w_self.value) def Attribute_set_value(space, w_self, w_new_value): @@ -5833,7 +5841,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'attr') + raise_attriberr(space, w_self, 'attr') return space.wrap(w_self.attr) def Attribute_set_attr(space, w_self, w_new_value): @@ -5854,7 +5862,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'ctx') + raise_attriberr(space, w_self, 'ctx') return expr_context_to_class[w_self.ctx - 1]() def Attribute_set_ctx(space, w_self, w_new_value): @@ -5903,7 +5911,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') + raise_attriberr(space, w_self, 'value') return space.wrap(w_self.value) def Subscript_set_value(space, w_self, w_new_value): @@ -5926,7 +5934,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'slice') + raise_attriberr(space, w_self, 'slice') return space.wrap(w_self.slice) def Subscript_set_slice(space, w_self, w_new_value): @@ -5949,7 +5957,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'ctx') + raise_attriberr(space, w_self, 'ctx') return expr_context_to_class[w_self.ctx - 1]() def Subscript_set_ctx(space, w_self, w_new_value): @@ -5998,7 +6006,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'id') + raise_attriberr(space, w_self, 'id') return space.wrap(w_self.id) def Name_set_id(space, w_self, w_new_value): @@ -6019,7 +6027,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'ctx') + raise_attriberr(space, w_self, 'ctx') return expr_context_to_class[w_self.ctx - 1]() def Name_set_ctx(space, w_self, w_new_value): @@ -6063,7 +6071,7 @@ def List_get_elts(space, w_self): if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'elts') + raise_attriberr(space, w_self, 'elts') if w_self.w_elts is None: if w_self.elts is None: list_w = [] @@ -6083,7 +6091,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'ctx') + raise_attriberr(space, w_self, 'ctx') return expr_context_to_class[w_self.ctx - 1]() def List_set_ctx(space, w_self, w_new_value): @@ -6128,7 +6136,7 @@ def Tuple_get_elts(space, w_self): if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'elts') + raise_attriberr(space, w_self, 'elts') if w_self.w_elts is None: if w_self.elts is None: list_w = [] @@ -6148,7 +6156,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'ctx') + raise_attriberr(space, w_self, 'ctx') return expr_context_to_class[w_self.ctx - 1]() def Tuple_set_ctx(space, w_self, w_new_value): @@ -6197,7 +6205,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') + raise_attriberr(space, w_self, 'value') return w_self.value def Const_set_value(space, w_self, w_new_value): @@ -6315,7 +6323,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 1: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'lower') + raise_attriberr(space, w_self, 'lower') return space.wrap(w_self.lower) def Slice_set_lower(space, w_self, w_new_value): @@ -6338,7 +6346,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 2: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'upper') + raise_attriberr(space, w_self, 'upper') return space.wrap(w_self.upper) def Slice_set_upper(space, w_self, w_new_value): @@ -6361,7 +6369,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'step') + raise_attriberr(space, w_self, 'step') return space.wrap(w_self.step) def Slice_set_step(space, w_self, w_new_value): @@ -6406,7 +6414,7 @@ def ExtSlice_get_dims(space, w_self): if not w_self.initialization_state & 1: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'dims') + raise_attriberr(space, w_self, 'dims') if w_self.w_dims is None: if w_self.dims is None: list_w = [] @@ -6451,7 +6459,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 1: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') + raise_attriberr(space, w_self, 'value') return space.wrap(w_self.value) def Index_set_value(space, w_self, w_new_value): @@ -6722,7 +6730,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 1: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'target') + raise_attriberr(space, w_self, 'target') return space.wrap(w_self.target) def comprehension_set_target(space, w_self, w_new_value): @@ -6745,7 +6753,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 2: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'iter') + raise_attriberr(space, w_self, 'iter') return space.wrap(w_self.iter) def comprehension_set_iter(space, w_self, w_new_value): @@ -6764,7 +6772,7 @@ def comprehension_get_ifs(space, w_self): if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'ifs') + raise_attriberr(space, w_self, 'ifs') if w_self.w_ifs is None: if w_self.ifs is None: list_w = [] @@ -6811,7 +6819,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 1: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'lineno') + raise_attriberr(space, w_self, 'lineno') return space.wrap(w_self.lineno) def excepthandler_set_lineno(space, w_self, w_new_value): @@ -6832,7 +6840,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 2: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'col_offset') + raise_attriberr(space, w_self, 'col_offset') return space.wrap(w_self.col_offset) def excepthandler_set_col_offset(space, w_self, w_new_value): @@ -6862,7 +6870,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'type') + raise_attriberr(space, w_self, 'type') return space.wrap(w_self.type) def ExceptHandler_set_type(space, w_self, w_new_value): @@ -6885,7 +6893,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'name') + raise_attriberr(space, w_self, 'name') return space.wrap(w_self.name) def ExceptHandler_set_name(space, w_self, w_new_value): @@ -6904,7 +6912,7 @@ def ExceptHandler_get_body(space, w_self): if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -6947,7 +6955,7 @@ def arguments_get_args(space, w_self): if not w_self.initialization_state & 1: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'args') + raise_attriberr(space, w_self, 'args') if w_self.w_args is None: if w_self.args is None: list_w = [] @@ -6967,7 +6975,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 2: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'vararg') + raise_attriberr(space, w_self, 'vararg') return space.wrap(w_self.vararg) def arguments_set_vararg(space, w_self, w_new_value): @@ -6991,7 +6999,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'kwarg') + raise_attriberr(space, w_self, 'kwarg') return space.wrap(w_self.kwarg) def arguments_set_kwarg(space, w_self, w_new_value): @@ -7011,7 +7019,7 @@ def arguments_get_defaults(space, w_self): if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'defaults') + raise_attriberr(space, w_self, 'defaults') if w_self.w_defaults is None: if w_self.defaults is None: list_w = [] @@ -7060,7 +7068,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 1: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'arg') + raise_attriberr(space, w_self, 'arg') return space.wrap(w_self.arg) def keyword_set_arg(space, w_self, w_new_value): @@ -7081,7 +7089,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 2: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') + raise_attriberr(space, w_self, 'value') return space.wrap(w_self.value) def keyword_set_value(space, w_self, w_new_value): @@ -7129,7 +7137,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 1: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'name') + raise_attriberr(space, w_self, 'name') return space.wrap(w_self.name) def alias_set_name(space, w_self, w_new_value): @@ -7150,7 +7158,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 2: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'asname') + raise_attriberr(space, w_self, 'asname') return space.wrap(w_self.asname) def alias_set_asname(space, w_self, w_new_value): diff --git a/pypy/interpreter/astcompiler/tools/asdl_py.py b/pypy/interpreter/astcompiler/tools/asdl_py.py --- a/pypy/interpreter/astcompiler/tools/asdl_py.py +++ b/pypy/interpreter/astcompiler/tools/asdl_py.py @@ -409,8 +409,7 @@ self.emit(" if w_obj is not None:", 1) self.emit(" return w_obj", 1) self.emit("if not w_self.initialization_state & %s:" % (flag,), 1) - self.emit("raise operationerrfmt(space.w_AttributeError, \"'%%T' object has no attribute '%%s'\", w_self, '%s')" % - (field.name,), 2) + self.emit("raise_attriberr(space, w_self, '%s')" % (field.name,), 2) if field.seq: self.emit("if w_self.w_%s is None:" % (field.name,), 1) self.emit("if w_self.%s is None:" % (field.name,), 2) @@ -537,14 +536,20 @@ HEAD = """# Generated by tools/asdl_py.py -from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter import typedef -from pypy.interpreter.gateway import interp2app -from pypy.interpreter.error import OperationError, operationerrfmt from rpython.rlib.unroll import unrolling_iterable from rpython.tool.pairtype import extendabletype from rpython.tool.sourcetools import func_with_new_name +from pypy.interpreter import typedef +from pypy.interpreter.baseobjspace import W_Root +from pypy.interpreter.error import OperationError, oefmt +from pypy.interpreter.gateway import interp2app + + +def raise_attriberr(space, w_obj, name): + raise oefmt(space.w_AttributeError, + \"'%T' object has no attribute '%s'\", w_obj, name) + def check_string(space, w_obj): if not (space.isinstance_w(w_obj, space.w_str) or @@ -608,11 +613,13 @@ continue # field is optional w_obj = self.getdictvalue(space, missing) if w_obj is None: - err = "required field \\"%s\\" missing from %s" - raise operationerrfmt(space.w_TypeError, err, missing, host) + raise oefmt(space.w_TypeError, + "required field \\"%s\\" missing from %s", + missing, host) else: - err = "incorrect type for field \\"%s\\" in %s" - raise operationerrfmt(space.w_TypeError, err, missing, host) + raise oefmt(space.w_TypeError, + "incorrect type for field \\"%s\\" in %s", + missing, host) raise AssertionError("should not reach here") diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -11,8 +11,7 @@ from pypy.interpreter.executioncontext import (ExecutionContext, ActionFlag, UserDelAction) -from pypy.interpreter.error import (OperationError, operationerrfmt, - new_exception_class) +from pypy.interpreter.error import OperationError, new_exception_class, oefmt from pypy.interpreter.argument import Arguments from pypy.interpreter.miscutils import ThreadLocals @@ -61,9 +60,9 @@ return False def setdict(self, space, w_dict): - raise operationerrfmt(space.w_TypeError, - "attribute '__dict__' of %T objects " - "is not writable", self) + raise oefmt(space.w_TypeError, + "attribute '__dict__' of %T objects is not writable", + self) # to be used directly only by space.type implementations def getclass(self, space): @@ -123,8 +122,8 @@ classname = '?' else: classname = wrappable_class_name(RequiredClass) - msg = "'%s' object expected, got '%T' instead" - raise operationerrfmt(space.w_TypeError, msg, classname, self) + raise oefmt(space.w_TypeError, + "'%s' object expected, got '%T' instead", classname, self) # used by _weakref implemenation @@ -132,8 +131,8 @@ return None def setweakref(self, space, weakreflifeline): - raise operationerrfmt(space.w_TypeError, - "cannot create weak reference to '%T' object", self) + raise oefmt(space.w_TypeError, + "cannot create weak reference to '%T' object", self) def delweakref(self): pass @@ -215,25 +214,25 @@ self._typed_unwrap_error(space, "integer") def _typed_unwrap_error(self, space, expected): - raise operationerrfmt(space.w_TypeError, "expected %s, got %T object", - expected, self) + raise oefmt(space.w_TypeError, + "expected %s, got %T object", expected, self) def int(self, space): w_impl = space.lookup(self, '__int__') if w_impl is None: - raise operationerrfmt(space.w_TypeError, - "unsupported operand type for int(): '%T'", self) + raise oefmt(space.w_TypeError, + "unsupported operand type for int(): '%T'", self) w_result = space.get_and_call_function(w_impl, self) if (space.isinstance_w(w_result, space.w_int) or space.isinstance_w(w_result, space.w_long)): return w_result - msg = "__int__ returned non-int (type '%T')" - raise operationerrfmt(space.w_TypeError, msg, w_result) + raise oefmt(space.w_TypeError, + "__int__ returned non-int (type '%T')", w_result) def ord(self, space): - msg = "ord() expected string of length 1, but %T found" - raise operationerrfmt(space.w_TypeError, msg, self) + raise oefmt(space.w_TypeError, + "ord() expected string of length 1, but %T found", self) def __spacebind__(self, space): return self @@ -430,10 +429,9 @@ try: w_mod = self.builtin_modules[name] except KeyError: - raise operationerrfmt( - self.w_SystemError, - "getbuiltinmodule() called " - "with non-builtin module %s", name) + raise oefmt(self.w_SystemError, + "getbuiltinmodule() called with non-builtin module %s", + name) else: # Add the module to sys.modules self.setitem(w_modules, w_name, w_mod) @@ -753,9 +751,10 @@ if can_be_None and self.is_none(w_obj): return None if not isinstance(w_obj, RequiredClass): # or obj is None - msg = "'%s' object expected, got '%N' instead" - raise operationerrfmt(self.w_TypeError, msg, - wrappable_class_name(RequiredClass), w_obj.getclass(self)) + raise oefmt(self.w_TypeError, + "'%s' object expected, got '%N' instead", + wrappable_class_name(RequiredClass), + w_obj.getclass(self)) return w_obj interp_w._annspecialcase_ = 'specialize:arg(1)' @@ -832,13 +831,9 @@ items[idx] = w_item idx += 1 if idx < expected_length: - if idx == 1: - plural = "" - else: - plural = "s" - raise operationerrfmt(self.w_ValueError, - "need more than %d value%s to unpack", - idx, plural) + raise oefmt(self.w_ValueError, + "need more than %d value%s to unpack", + idx, "" if idx == 1 else "s") return items def unpackiterable_unroll(self, w_iterable, expected_length): @@ -1257,8 +1252,8 @@ except OperationError, err: if objdescr is None or not err.match(self, self.w_TypeError): raise - msg = "%s must be an integer, not %T" - raise operationerrfmt(self.w_TypeError, msg, objdescr, w_obj) + raise oefmt(self.w_TypeError, "%s must be an integer, not %T", + objdescr, w_obj) try: index = self.int_w(w_index) except OperationError, err: @@ -1271,9 +1266,9 @@ else: return sys.maxint else: - raise operationerrfmt( - w_exception, "cannot fit '%T' into an index-sized integer", - w_obj) + raise oefmt(w_exception, + "cannot fit '%T' into an index-sized integer", + w_obj) else: return index @@ -1517,9 +1512,9 @@ ) fd = self.int_w(w_fd) if fd < 0: - raise operationerrfmt(self.w_ValueError, - "file descriptor cannot be a negative integer (%d)", fd - ) + raise oefmt(self.w_ValueError, + "file descriptor cannot be a negative integer (%d)", + fd) return fd def warn(self, w_msg, w_warningcls, stacklevel=2): diff --git a/pypy/interpreter/error.py b/pypy/interpreter/error.py --- a/pypy/interpreter/error.py +++ b/pypy/interpreter/error.py @@ -226,9 +226,9 @@ def _exception_getclass(self, space, w_inst): w_type = space.exception_getclass(w_inst) if not space.exception_is_valid_class_w(w_type): - msg = ("exceptions must be old-style classes or derived " - "from BaseException, not %N") - raise operationerrfmt(space.w_TypeError, msg, w_type) + raise oefmt(space.w_TypeError, + "exceptions must be old-style classes or derived from " + "BaseException, not %N", w_type) return w_type def write_unraisable(self, space, where, w_object=None, @@ -383,15 +383,16 @@ self._w_value = w_value = space.wrap(self._value) return w_value -def get_operationerr_class(valuefmt): + at specialize.memo() +def get_operr_class(valuefmt): try: result = _fmtcache[valuefmt] except KeyError: result = _fmtcache[valuefmt] = get_operrcls2(valuefmt) return result -get_operationerr_class._annspecialcase_ = 'specialize:memo' -def operationerrfmt(w_type, valuefmt, *args): + at specialize.arg(1) +def oefmt(w_type, valuefmt, *args): """Equivalent to OperationError(w_type, space.wrap(valuefmt % args)). More efficient in the (common) case where the value is not actually needed. @@ -405,9 +406,8 @@ """ if not len(args): return OpErrFmtNoArgs(w_type, valuefmt) - OpErrFmt, strings = get_operationerr_class(valuefmt) + OpErrFmt, strings = get_operr_class(valuefmt) return OpErrFmt(w_type, strings, *args) -operationerrfmt._annspecialcase_ = 'specialize:arg(1)' # ____________________________________________________________ diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py --- a/pypy/interpreter/function.py +++ b/pypy/interpreter/function.py @@ -7,8 +7,8 @@ """ from rpython.rlib.unroll import unrolling_iterable -from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.baseobjspace import W_Root +from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.eval import Code from pypy.interpreter.argument import Arguments from rpython.rlib import jit @@ -413,9 +413,9 @@ if self.closure: closure_len = len(self.closure) if isinstance(code, PyCode) and closure_len != len(code.co_freevars): - raise operationerrfmt(space.w_ValueError, - "%N() requires a code object with %d free vars, not %d", - self, closure_len, len(code.co_freevars)) + raise oefmt(space.w_ValueError, + "%N() requires a code object with %d free vars, not " + "%d", self, closure_len, len(code.co_freevars)) self.fget_func_doc(space) # see test_issue1293 self.code = code @@ -495,10 +495,9 @@ instdescr = instname + " instance" else: instdescr = "instance" - msg = ("unbound method %N() must be called with %s " - "as first argument (got %s instead)") - raise operationerrfmt(space.w_TypeError, msg, - self, clsdescr, instdescr) + raise oefmt(space.w_TypeError, + "unbound method %N() must be called with %s as first " + "argument (got %s instead)", self, clsdescr, instdescr) return space.call_args(self.w_function, args) def descr_method_get(self, w_obj, w_cls=None): diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -12,7 +12,7 @@ from pypy.interpreter.argument import Arguments from pypy.interpreter.astcompiler import consts from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.error import OperationError, operationerrfmt +from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.executioncontext import ExecutionContext from pypy.interpreter.nestedscope import Cell from pypy.tool import stdlib_opcode @@ -622,8 +622,8 @@ line = self.pycode.co_firstlineno if new_lineno < line: - raise operationerrfmt(space.w_ValueError, - "line %d comes before the current code.", new_lineno) + raise oefmt(space.w_ValueError, + "line %d comes before the current code.", new_lineno) elif new_lineno == line: new_lasti = 0 else: @@ -639,8 +639,8 @@ break if new_lasti == -1: - raise operationerrfmt(space.w_ValueError, - "line %d comes after the current code.", new_lineno) + raise oefmt(space.w_ValueError, + "line %d comes after the current code.", new_lineno) # Don't jump to a line with an except in it. code = self.pycode.co_code @@ -687,9 +687,9 @@ assert len(blockstack) == 0 if new_lasti_setup_addr != f_lasti_setup_addr: - raise operationerrfmt(space.w_ValueError, - "can't jump into or out of a 'finally' block %d -> %d", - f_lasti_setup_addr, new_lasti_setup_addr) + raise oefmt(space.w_ValueError, + "can't jump into or out of a 'finally' block %d -> %d", + f_lasti_setup_addr, new_lasti_setup_addr) if new_lasti < self.last_instr: min_addr = new_lasti diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -14,7 +14,7 @@ gateway, function, eval, pyframe, pytraceback, pycode ) from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.error import OperationError, operationerrfmt +from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.nestedscope import Cell from pypy.interpreter.pycode import PyCode, BytecodeCorruption from pypy.tool.stdlib_opcode import bytecode_spec @@ -492,8 +492,9 @@ def _load_fast_failed(self, varindex): varname = self.getlocalvarname(varindex) - message = "local variable '%s' referenced before assignment" - raise operationerrfmt(self.space.w_UnboundLocalError, message, varname) + raise oefmt(self.space.w_UnboundLocalError, + "local variable '%s' referenced before assignment", + varname) _load_fast_failed._dont_inline_ = True def LOAD_CONST(self, constindex, next_instr): @@ -848,9 +849,8 @@ # catch KeyErrors and turn them into NameErrors if not e.match(self.space, self.space.w_KeyError): raise - message = "name '%s' is not defined" - raise operationerrfmt(self.space.w_NameError, message, - self.space.str_w(w_varname)) + raise oefmt(self.space.w_NameError, "name '%s' is not defined", + self.space.str_w(w_varname)) def UNPACK_SEQUENCE(self, itemcount, next_instr): w_iterable = self.popvalue() @@ -899,8 +899,8 @@ _load_global._always_inline_ = True def _load_global_failed(self, varname): - message = "global name '%s' is not defined" - raise operationerrfmt(self.space.w_NameError, message, varname) + raise oefmt(self.space.w_NameError, + "global name '%s' is not defined", varname) _load_global_failed._dont_inline_ = True def LOAD_GLOBAL(self, nameindex, next_instr): @@ -910,9 +910,9 @@ def DELETE_FAST(self, varindex, next_instr): if self.locals_stack_w[varindex] is None: varname = self.getlocalvarname(varindex) - message = "local variable '%s' referenced before assignment" - raise operationerrfmt(self.space.w_UnboundLocalError, message, - varname) + raise oefmt(self.space.w_UnboundLocalError, + "local variable '%s' referenced before assignment", + varname) self.locals_stack_w[varindex] = None def BUILD_TUPLE(self, itemcount, next_instr): @@ -1040,9 +1040,8 @@ except OperationError, e: if not e.match(self.space, self.space.w_AttributeError): raise - raise operationerrfmt(self.space.w_ImportError, - "cannot import name '%s'", - self.space.str_w(w_name)) + raise oefmt(self.space.w_ImportError, + "cannot import name '%s'", self.space.str_w(w_name)) self.pushvalue(w_obj) def YIELD_VALUE(self, oparg, next_instr): @@ -1127,9 +1126,9 @@ w_enter = self.space.lookup(w_manager, "__enter__") w_descr = self.space.lookup(w_manager, "__exit__") if w_enter is None or w_descr is None: - raise operationerrfmt(self.space.w_AttributeError, - "'%T' object is not a context manager" - " (no __enter__/__exit__ method)", w_manager) + raise oefmt(self.space.w_AttributeError, + "'%T' object is not a context manager (no __enter__/" + "__exit__ method)", w_manager) w_exit = self.space.get(w_descr, w_manager) self.settopvalue(w_exit) w_result = self.space.get_and_call_function(w_enter, w_manager) diff --git a/pypy/interpreter/test/test_error.py b/pypy/interpreter/test/test_error.py --- a/pypy/interpreter/test/test_error.py +++ b/pypy/interpreter/test/test_error.py @@ -1,7 +1,7 @@ import py, os, errno -from pypy.interpreter.error import OperationError, operationerrfmt -from pypy.interpreter.error import decompose_valuefmt, get_operrcls2 -from pypy.interpreter.error import wrap_oserror, new_exception_class +from pypy.interpreter.error import ( + OperationError, decompose_valuefmt, get_operrcls2, new_exception_class, + oefmt, wrap_oserror) def test_decompose_valuefmt(): @@ -22,59 +22,59 @@ assert cls2 is cls # caching assert strings2 == ("a ", " b ", " c") -def test_operationerrfmt(space): - operr = operationerrfmt("w_type", "abc %s def %d", "foo", 42) +def test_oefmt(space): + operr = oefmt("w_type", "abc %s def %d", "foo", 42) assert isinstance(operr, OperationError) assert operr.w_type == "w_type" assert operr._w_value is None assert operr._compute_value(space) == "abc foo def 42" - operr2 = operationerrfmt("w_type2", "a %s b %d c", "bar", 43) + operr2 = oefmt("w_type2", "a %s b %d c", "bar", 43) assert operr2.__class__ is operr.__class__ - operr3 = operationerrfmt("w_type2", "a %s b %s c", "bar", "4b") + operr3 = oefmt("w_type2", "a %s b %s c", "bar", "4b") assert operr3.__class__ is not operr.__class__ -def test_operationerrfmt_noargs(space): - operr = operationerrfmt(space.w_AttributeError, "no attribute 'foo'") +def test_oefmt_noargs(space): + operr = oefmt(space.w_AttributeError, "no attribute 'foo'") operr.normalize_exception(space) val = operr.get_w_value(space) assert space.isinstance_w(val, space.w_AttributeError) w_repr = space.repr(val) assert space.str_w(w_repr) == "AttributeError(\"no attribute 'foo'\",)" -def test_operationerrfmt_T(space): - operr = operationerrfmt(space.w_AttributeError, - "'%T' object has no attribute '%s'", - space.wrap('foo'), 'foo') +def test_oefmt_T(space): + operr = oefmt(space.w_AttributeError, + "'%T' object has no attribute '%s'", + space.wrap('foo'), 'foo') assert operr._compute_value(space) == "'str' object has no attribute 'foo'" - operr = operationerrfmt("w_type", - "'%T' object has no attribute '%s'", - space.wrap('foo'), 'foo') + operr = oefmt("w_type", + "'%T' object has no attribute '%s'", + space.wrap('foo'), 'foo') assert operr._compute_value(space) == "'str' object has no attribute 'foo'" -def test_operationerrfmt_N(space): - operr = operationerrfmt(space.w_AttributeError, - "'%N' object has no attribute '%s'", - space.type(space.wrap('foo')), 'foo') +def test_oefmt_N(space): + operr = oefmt(space.w_AttributeError, + "'%N' object has no attribute '%s'", + space.type(space.wrap('foo')), 'foo') assert operr._compute_value(space) == "'str' object has no attribute 'foo'" - operr = operationerrfmt("w_type", - "'%N' object has no attribute '%s'", - space.type(space.wrap('foo')), 'foo') + operr = oefmt("w_type", + "'%N' object has no attribute '%s'", + space.type(space.wrap('foo')), 'foo') assert operr._compute_value(space) == "'str' object has no attribute 'foo'" - operr = operationerrfmt(space.w_AttributeError, - "'%N' object has no attribute '%s'", - space.wrap('foo'), 'foo') + operr = oefmt(space.w_AttributeError, + "'%N' object has no attribute '%s'", + space.wrap('foo'), 'foo') assert operr._compute_value(space) == "'?' object has no attribute 'foo'" - operr = operationerrfmt("w_type", - "'%N' object has no attribute '%s'", - space.wrap('foo'), 'foo') + operr = oefmt("w_type", + "'%N' object has no attribute '%s'", + space.wrap('foo'), 'foo') assert operr._compute_value(space) == "'?' object has no attribute 'foo'" -def test_operationerrfmt_R(space): - operr = operationerrfmt(space.w_ValueError, "illegal newline value: %R", - space.wrap('foo')) +def test_oefmt_R(space): + operr = oefmt(space.w_ValueError, + "illegal newline value: %R", space.wrap('foo')) assert operr._compute_value(space) == "illegal newline value: 'foo'" - operr = operationerrfmt(space.w_ValueError, "illegal newline value: %R", - space.wrap("'PyLadies'")) + operr = oefmt(space.w_ValueError, "illegal newline value: %R", + space.wrap("'PyLadies'")) expected = "illegal newline value: \"'PyLadies'\"" assert operr._compute_value(space) == expected diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -2,7 +2,7 @@ from pypy.interpreter.argument import Arguments from pypy.interpreter.baseobjspace import W_Root, DescrMismatch -from pypy.interpreter.error import OperationError, operationerrfmt +from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import (interp2app, BuiltinCode, unwrap_spec, WrappedDefault) @@ -549,9 +549,9 @@ def typecheck(self, space, w_obj): if not space.isinstance_w(w_obj, self.w_cls): - m = "descriptor '%N' for '%N' objects doesn't apply to '%T' object" - raise operationerrfmt(space.w_TypeError, m, - self, self.w_cls, w_obj) + raise oefmt(space.w_TypeError, + "descriptor '%N' for '%N' objects doesn't apply to " + "'%T' object", self, self.w_cls, w_obj) def descr_member_get(self, space, w_obj, w_cls=None): """member.__get__(obj[, type]) -> value @@ -620,8 +620,9 @@ def descr_get_dict(space, w_obj): w_dict = w_obj.getdict(space) if w_dict is None: - msg = "descriptor '__dict__' doesn't apply to '%T' objects" - raise operationerrfmt(space.w_TypeError, msg, w_obj) + raise oefmt(space.w_TypeError, + "descriptor '__dict__' doesn't apply to '%T' objects", + w_obj) return w_dict def descr_set_dict(space, w_obj, w_dict): diff --git a/pypy/module/__builtin__/interp_classobj.py b/pypy/module/__builtin__/interp_classobj.py --- a/pypy/module/__builtin__/interp_classobj.py +++ b/pypy/module/__builtin__/interp_classobj.py @@ -1,5 +1,5 @@ import new -from pypy.interpreter.error import OperationError, operationerrfmt +from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import interp2app from pypy.interpreter.typedef import TypeDef, make_weakref_descr from pypy.interpreter.baseobjspace import W_Root @@ -10,8 +10,8 @@ def raise_type_err(space, argument, expected, w_obj): - raise operationerrfmt(space.w_TypeError, "argument %s must be %s, not %T", - argument, expected, w_obj) + raise oefmt(space.w_TypeError, + "argument %s must be %s, not %T", argument, expected, w_obj) def unwrap_attr(space, w_attr): try: @@ -126,10 +126,8 @@ return space.newtuple(self.bases_w) w_value = self.lookup(space, name) if w_value is None: - raise operationerrfmt( - space.w_AttributeError, - "class %s has no attribute '%s'", - self.name, name) + raise oefmt(space.w_AttributeError, + "class %s has no attribute '%s'", self.name, name) w_descr_get = space.lookup(w_value, '__get__') if w_descr_get is None: @@ -158,18 +156,15 @@ def descr_delattr(self, space, w_attr): name = unwrap_attr(space, w_attr) if name in ("__dict__", "__name__", "__bases__"): - raise operationerrfmt( - space.w_TypeError, - "cannot delete attribute '%s'", name) + raise oefmt(space.w_TypeError, + "cannot delete attribute '%s'", name) try: space.delitem(self.w_dict, w_attr) except OperationError, e: if not e.match(space, space.w_KeyError): raise - raise operationerrfmt( - space.w_AttributeError, - "class %s has no attribute '%s'", - self.name, name) + raise oefmt(space.w_AttributeError, + "class %s has no attribute '%s'", self.name, name) def descr_repr(self, space): mod = self.get_module_string(space) @@ -362,10 +357,9 @@ raise # not found at all if exc: - raise operationerrfmt( - space.w_AttributeError, - "%s instance has no attribute '%s'", - self.w_class.name, name) + raise oefmt(space.w_AttributeError, + "%s instance has no attribute '%s'", + self.w_class.name, name) else: return None @@ -416,10 +410,9 @@ space.call_function(w_meth, w_name) else: if not self.deldictvalue(space, name): - raise operationerrfmt( - space.w_AttributeError, - "%s instance has no attribute '%s'", - self.w_class.name, name) + raise oefmt(space.w_AttributeError, + "%s instance has no attribute '%s'", + self.w_class.name, name) def descr_repr(self, space): w_meth = self.getattr(space, '__repr__', False) diff --git a/pypy/module/__pypy__/interp_dict.py b/pypy/module/__pypy__/interp_dict.py --- a/pypy/module/__pypy__/interp_dict.py +++ b/pypy/module/__pypy__/interp_dict.py @@ -1,6 +1,6 @@ +from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import unwrap_spec -from pypy.interpreter.error import operationerrfmt, OperationError from pypy.objspace.std.dictmultiobject import W_DictMultiObject @unwrap_spec(type=str) @@ -30,8 +30,7 @@ elif type == 'strdict': return space.newdict(strdict=True) else: From noreply at buildbot.pypy.org Tue Feb 4 06:02:18 2014 From: noreply at buildbot.pypy.org (mattip) Date: Tue, 4 Feb 2014 06:02:18 +0100 (CET) Subject: [pypy-commit] pypy precompiled-headers: merge default into branch Message-ID: <20140204050218.3917E1C0470@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: precompiled-headers Changeset: r69071:80df77abc391 Date: 2014-02-01 19:57 +0200 http://bitbucket.org/pypy/pypy/changeset/80df77abc391/ Log: merge default into branch diff too long, truncating to 2000 out of 3978 lines diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -426,25 +426,12 @@ Could we use LLVM? ------------------ -In theory yes. But we tried to use it 5 or 6 times already, as a -translation backend or as a JIT backend --- and failed each time. +There is a (static) translation backend using LLVM in the branch +``llvm-translation-backend``. It can translate PyPy with or without the JIT on +Linux. -In more details: using LLVM as a (static) translation backend is -pointless nowadays because you can generate C code and compile it with -clang. (Note that compiling PyPy with clang gives a result that is not -faster than compiling it with gcc.) We might in theory get extra -benefits from LLVM's GC integration, but this requires more work on the -LLVM side before it would be remotely useful. Anyway, it could be -interfaced via a custom primitive in the C code. - -On the other hand, using LLVM as our JIT backend looks interesting as -well --- but again we made an attempt, and it failed: LLVM has no way to -patch the generated machine code. - -So the position of the core PyPy developers is that if anyone wants to -make an N+1'th attempt with LLVM, they are welcome, and will be happy to -provide help in the IRC channel, but they are left with the burden of proof -that (a) it works and (b) it gives important benefits. +Using LLVM as our JIT backend looks interesting as well -- we made an attempt, +but it failed: LLVM has no way to patch the generated machine code. ---------------------- How do I compile PyPy? diff --git a/pypy/interpreter/error.py b/pypy/interpreter/error.py --- a/pypy/interpreter/error.py +++ b/pypy/interpreter/error.py @@ -6,7 +6,7 @@ from errno import EINTR from rpython.rlib import jit -from rpython.rlib.objectmodel import we_are_translated +from rpython.rlib.objectmodel import we_are_translated, specialize from pypy.interpreter import debug @@ -40,12 +40,11 @@ self.debug_excs = [] def clear(self, space): - # for sys.exc_clear() - self.w_type = space.w_None - self._w_value = space.w_None - self._application_traceback = None - if not we_are_translated(): - del self.debug_excs[:] + # XXX remove this method. The point is that we cannot always + # hack at 'self' to clear w_type and _w_value, because in some + # corner cases the OperationError will be used again: see + # test_interpreter.py:test_with_statement_and_sys_clear. + pass def match(self, space, w_check_class): "Check if this application-level exception matches 'w_check_class'." @@ -300,6 +299,10 @@ """ self._application_traceback = traceback + at specialize.memo() +def get_cleared_operation_error(space): + return OperationError(space.w_None, space.w_None) + # ____________________________________________________________ # optimization only: avoid the slowest operation -- the string # formatting with '%' -- in the common case were we don't @@ -371,8 +374,8 @@ class OpErrFmtNoArgs(OperationError): def __init__(self, w_type, value): + self._value = value self.setup(w_type) - self._value = value def get_w_value(self, space): w_value = self._w_value diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py --- a/pypy/interpreter/executioncontext.py +++ b/pypy/interpreter/executioncontext.py @@ -1,5 +1,5 @@ import sys -from pypy.interpreter.error import OperationError +from pypy.interpreter.error import OperationError, get_cleared_operation_error from rpython.rlib.unroll import unrolling_iterable from rpython.rlib import jit @@ -217,6 +217,17 @@ if frame: # else, the exception goes nowhere and is lost frame.last_exception = operror + def clear_sys_exc_info(self): + # Find the frame out of which sys_exc_info() would return its result, + # and hack this frame's last_exception to become the cleared + # OperationError (which is different from None!). + frame = self.gettopframe_nohidden() + while frame: + if frame.last_exception is not None: + frame.last_exception = get_cleared_operation_error(self.space) + break + frame = self.getnextframe_nohidden(frame) + @jit.dont_look_inside def settrace(self, w_func): """Set the global trace function.""" diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -744,6 +744,9 @@ else: raise OperationError(space.w_TypeError, space.wrap("raise: no active exception to re-raise")) + if operror.w_type is space.w_None: + raise OperationError(space.w_TypeError, + space.wrap("raise: the exception to re-raise was cleared")) # re-raise, no new traceback obj will be attached self.last_exception = operror raise RaiseWithExplicitTraceback(operror) diff --git a/pypy/interpreter/test/test_interpreter.py b/pypy/interpreter/test/test_interpreter.py --- a/pypy/interpreter/test/test_interpreter.py +++ b/pypy/interpreter/test/test_interpreter.py @@ -311,3 +311,73 @@ assert str(e) == "maximum recursion depth exceeded" else: assert 0, "should have raised!" + + def test_with_statement_and_sys_clear(self): + import sys + class CM(object): + def __enter__(self): + return self + def __exit__(self, exc_type, exc_value, tb): + sys.exc_clear() + try: + with CM(): + 1 / 0 + raise AssertionError("should not be reached") + except ZeroDivisionError: + pass + + def test_sys_clear_while_handling_exception(self): + import sys + def f(): + try: + some_missing_name + except NameError: + g() + assert sys.exc_info()[0] is NameError + def g(): + assert sys.exc_info()[0] is NameError + try: + 1 / 0 + except ZeroDivisionError: + assert sys.exc_info()[0] is ZeroDivisionError + sys.exc_clear() + assert sys.exc_info()[0] is None + h() + assert sys.exc_info()[0] is None + def h(): + assert sys.exc_info()[0] is None + f() + + def test_sys_clear_while_handling_exception_nested(self): + import sys + def f(): + try: + some_missing_name + except NameError: + g() + assert sys.exc_info()[0] is NameError + def g(): + assert sys.exc_info()[0] is NameError + try: + 1 / 0 + except ZeroDivisionError: + assert sys.exc_info()[0] is ZeroDivisionError + h1() + assert sys.exc_info()[0] is None + h() + assert sys.exc_info()[0] is None + def h(): + assert sys.exc_info()[0] is None + def h1(): + sys.exc_clear() + f() + + def test_sys_clear_reraise(self): + import sys + def f(): + try: + 1 / 0 + except ZeroDivisionError: + sys.exc_clear() + raise + raises(TypeError, f) diff --git a/pypy/module/_cffi_backend/handle.py b/pypy/module/_cffi_backend/handle.py --- a/pypy/module/_cffi_backend/handle.py +++ b/pypy/module/_cffi_backend/handle.py @@ -2,58 +2,13 @@ from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.gateway import unwrap_spec from pypy.module._cffi_backend import ctypeobj, ctypeptr, cdataobj -from pypy.module._weakref.interp__weakref import dead_ref from rpython.rtyper.lltypesystem import lltype, rffi +from rpython.rlib import rweaklist -def reduced_value(s): - while True: - divide = s & 1 - s >>= 1 - if not divide: - return s - -# ____________________________________________________________ - - -class CffiHandles: +class CffiHandles(rweaklist.RWeakListMixin): def __init__(self, space): - self.handles = [] - self.look_distance = 0 - - def reserve_next_handle_index(self): - # The reservation ordering done here is tweaked for pypy's - # memory allocator. We look from index 'look_distance'. - # Look_distance increases from 0. But we also look at - # "look_distance/2" or "/4" or "/8", etc. If we find that one - # of these secondary locations is free, we assume it's because - # there was recently a minor collection; so we reset - # look_distance to 0 and start again from the lowest locations. - length = len(self.handles) - for d in range(self.look_distance, length): - if self.handles[d]() is None: - self.look_distance = d + 1 - return d - s = reduced_value(d) - if self.handles[s]() is None: - break - # restart from the beginning - for d in range(0, length): - if self.handles[d]() is None: - self.look_distance = d + 1 - return d - # full! extend, but don't use '+=' here - self.handles = self.handles + [dead_ref] * (length // 3 + 5) - self.look_distance = length + 1 - return length - - def store_handle(self, index, content): - self.handles[index] = weakref.ref(content) - - def fetch_handle(self, index): - if 0 <= index < len(self.handles): - return self.handles[index]() - return None + self.initialize() def get(space): return space.fromcache(CffiHandles) diff --git a/pypy/module/_cffi_backend/test/test_handle.py b/pypy/module/_cffi_backend/test/test_handle.py --- a/pypy/module/_cffi_backend/test/test_handle.py +++ b/pypy/module/_cffi_backend/test/test_handle.py @@ -1,20 +1,5 @@ import random -from pypy.module._cffi_backend.handle import CffiHandles, reduced_value - - -def test_reduced_value(): - assert reduced_value(0) == 0 - assert reduced_value(1) == 0 - assert reduced_value(2) == 1 - assert reduced_value(3) == 0 - assert reduced_value(4) == 2 - assert reduced_value(5) == 1 - assert reduced_value(6) == 3 - assert reduced_value(7) == 0 - assert reduced_value(8) == 4 - assert reduced_value(9) == 2 - assert reduced_value(10) == 5 - assert reduced_value(11) == 1 +from pypy.module._cffi_backend.handle import CffiHandles class PseudoWeakRef(object): diff --git a/pypy/module/_io/interp_bytesio.py b/pypy/module/_io/interp_bytesio.py --- a/pypy/module/_io/interp_bytesio.py +++ b/pypy/module/_io/interp_bytesio.py @@ -11,7 +11,7 @@ class W_BytesIO(RStringIO, W_BufferedIOBase): def __init__(self, space): - W_BufferedIOBase.__init__(self, space) + W_BufferedIOBase.__init__(self, space, add_to_autoflusher=False) self.init() def descr_init(self, space, w_initial_bytes=None): diff --git a/pypy/module/_io/interp_iobase.py b/pypy/module/_io/interp_iobase.py --- a/pypy/module/_io/interp_iobase.py +++ b/pypy/module/_io/interp_iobase.py @@ -5,7 +5,7 @@ from pypy.interpreter.gateway import interp2app from pypy.interpreter.error import OperationError, operationerrfmt from rpython.rlib.rstring import StringBuilder -from rpython.rlib import rweakref +from rpython.rlib import rweakref, rweaklist DEFAULT_BUFFER_SIZE = 8192 @@ -44,15 +44,15 @@ class W_IOBase(W_Root): - def __init__(self, space): + def __init__(self, space, add_to_autoflusher=True): # XXX: IOBase thinks it has to maintain its own internal state in # `__IOBase_closed` and call flush() by itself, but it is redundant # with whatever behaviour a non-trivial derived class will implement. self.space = space self.w_dict = space.newdict() self.__IOBase_closed = False - self.streamholder = None # needed by AutoFlusher - get_autoflusher(space).add(self) + if add_to_autoflusher: + get_autoflusher(space).add(self) def getdict(self, space): return self.w_dict @@ -114,7 +114,6 @@ space.call_method(self, "flush") finally: self.__IOBase_closed = True - get_autoflusher(space).remove(self) def flush_w(self, space): if self._CLOSED(): @@ -338,55 +337,35 @@ # functions to make sure that all streams are flushed on exit # ------------------------------------------------------------ -class StreamHolder(object): - def __init__(self, w_iobase): - self.w_iobase_ref = rweakref.ref(w_iobase) - w_iobase.autoflusher = self - def autoflush(self, space): - w_iobase = self.w_iobase_ref() - if w_iobase is not None: - try: - space.call_method(w_iobase, 'flush') - except OperationError: - # Silencing all errors is bad, but getting randomly - # interrupted here is equally as bad, and potentially - # more frequent (because of shutdown issues). - pass - - -class AutoFlusher(object): +class AutoFlusher(rweaklist.RWeakListMixin): def __init__(self, space): - self.streams = {} + self.initialize() def add(self, w_iobase): - assert w_iobase.streamholder is None if rweakref.has_weakref_support(): - holder = StreamHolder(w_iobase) - w_iobase.streamholder = holder - self.streams[holder] = None + self.add_handle(w_iobase) #else: # no support for weakrefs, so ignore and we # will not get autoflushing - def remove(self, w_iobase): - holder = w_iobase.streamholder - if holder is not None: - try: - del self.streams[holder] - except KeyError: - # this can happen in daemon threads - pass - def flush_all(self, space): - while self.streams: - for streamholder in self.streams.keys(): + while True: + handles = self.get_all_handles() + if len(handles) == 0: + break + self.initialize() # reset the state here + for wr in handles: + w_iobase = wr() + if w_iobase is None: + continue try: - del self.streams[streamholder] - except KeyError: - pass # key was removed in the meantime - else: - streamholder.autoflush(space) + space.call_method(w_iobase, 'flush') + except OperationError: + # Silencing all errors is bad, but getting randomly + # interrupted here is equally as bad, and potentially + # more frequent (because of shutdown issues). + pass def get_autoflusher(space): return space.fromcache(AutoFlusher) diff --git a/pypy/module/_weakref/interp__weakref.py b/pypy/module/_weakref/interp__weakref.py --- a/pypy/module/_weakref/interp__weakref.py +++ b/pypy/module/_weakref/interp__weakref.py @@ -6,6 +6,7 @@ from rpython.rlib import jit from rpython.rlib.rshrinklist import AbstractShrinkList from rpython.rlib.objectmodel import specialize +from rpython.rlib.rweakref import dead_ref import weakref @@ -144,14 +145,6 @@ # ____________________________________________________________ -class Dummy: - pass -dead_ref = weakref.ref(Dummy()) -for i in range(5): - if dead_ref() is not None: - import gc; gc.collect() -assert dead_ref() is None - class W_WeakrefBase(W_Root): def __init__(w_self, space, w_obj, w_callable): diff --git a/pypy/module/exceptions/interp_exceptions.py b/pypy/module/exceptions/interp_exceptions.py --- a/pypy/module/exceptions/interp_exceptions.py +++ b/pypy/module/exceptions/interp_exceptions.py @@ -446,6 +446,9 @@ if hasattr(rwin32, 'build_winerror_to_errno'): _winerror_to_errno, _default_errno = rwin32.build_winerror_to_errno() + # Python 2 doesn't map ERROR_DIRECTORY (267) to ENOTDIR but + # Python 3 (CPython issue #12802) and build_winerror_to_errno do + del _winerror_to_errno[267] else: _winerror_to_errno, _default_errno = {}, 22 # EINVAL diff --git a/pypy/module/micronumpy/arrayimpl/concrete.py b/pypy/module/micronumpy/arrayimpl/concrete.py --- a/pypy/module/micronumpy/arrayimpl/concrete.py +++ b/pypy/module/micronumpy/arrayimpl/concrete.py @@ -367,6 +367,9 @@ return SliceArray(0, strides, backstrides, new_shape, self, orig_array) + def set_dtype(self, space, dtype): + self.dtype = dtype + def argsort(self, space, w_axis): from pypy.module.micronumpy.arrayimpl.sort import argsort_array return argsort_array(self, space, w_axis) diff --git a/pypy/module/micronumpy/arrayimpl/scalar.py b/pypy/module/micronumpy/arrayimpl/scalar.py --- a/pypy/module/micronumpy/arrayimpl/scalar.py +++ b/pypy/module/micronumpy/arrayimpl/scalar.py @@ -173,6 +173,10 @@ raise OperationError(space.w_ValueError, space.wrap( "total size of the array must be unchanged")) + def set_dtype(self, space, dtype): + self.value = self.value.convert_to(space, dtype) + self.dtype = dtype + def reshape(self, space, orig_array, new_shape): return self.set_shape(space, orig_array, new_shape) diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -256,6 +256,10 @@ value = space.is_true(self) return get_dtype_cache(space).w_booldtype.box(value) + def descr_zero(self, space): + from pypy.module.micronumpy.interp_dtype import get_dtype_cache + return get_dtype_cache(space).w_longdtype.box(0) + def descr_ravel(self, space): from pypy.module.micronumpy.base import convert_to_array w_values = space.newtuple([self]) @@ -327,6 +331,9 @@ def descr_buffer(self, space): return self.descr_ravel(space).descr_get_data(space) + def descr_byteswap(self, space): + return self.get_dtype(space).itemtype.byteswap(self) + w_flags = None def descr_get_flags(self, space): if self.w_flags is None: @@ -583,6 +590,12 @@ __hash__ = interp2app(W_GenericBox.descr_hash), tolist = interp2app(W_GenericBox.item), + min = interp2app(W_GenericBox.descr_self), + max = interp2app(W_GenericBox.descr_self), + argmin = interp2app(W_GenericBox.descr_zero), + argmax = interp2app(W_GenericBox.descr_zero), + sum = interp2app(W_GenericBox.descr_self), + prod = interp2app(W_GenericBox.descr_self), any = interp2app(W_GenericBox.descr_any), all = interp2app(W_GenericBox.descr_all), ravel = interp2app(W_GenericBox.descr_ravel), @@ -592,6 +605,7 @@ view = interp2app(W_GenericBox.descr_view), squeeze = interp2app(W_GenericBox.descr_self), copy = interp2app(W_GenericBox.descr_copy), + byteswap = interp2app(W_GenericBox.descr_byteswap), dtype = GetSetProperty(W_GenericBox.descr_get_dtype), size = GetSetProperty(W_GenericBox.descr_get_size), diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -207,7 +207,7 @@ space.wrap(offset)])) return w_d - def set_fields(self, space, w_fields): + def descr_set_fields(self, space, w_fields): if w_fields == space.w_None: self.fields = None else: @@ -233,19 +233,26 @@ return space.w_None return space.newtuple([space.wrap(name) for name in self.fieldnames]) - def set_names(self, space, w_names): - self.fieldnames = [] - if w_names == space.w_None: - return - else: + def descr_set_names(self, space, w_names): + fieldnames = [] + if w_names != space.w_None: iter = space.iter(w_names) while True: try: - self.fieldnames.append(space.str_w(space.next(iter))) + name = space.str_w(space.next(iter)) except OperationError, e: if not e.match(space, space.w_StopIteration): raise break + if name in fieldnames: + raise OperationError(space.w_ValueError, space.wrap( + "Duplicate field names given.")) + fieldnames.append(name) + self.fieldnames = fieldnames + + def descr_del_names(self, space): + raise OperationError(space.w_AttributeError, space.wrap( + "Cannot delete dtype names attribute")) def descr_get_hasobject(self, space): return space.w_False @@ -321,10 +328,10 @@ self.byteorder = endian fieldnames = space.getitem(w_data, space.wrap(3)) - self.set_names(space, fieldnames) + self.descr_set_names(space, fieldnames) fields = space.getitem(w_data, space.wrap(4)) - self.set_fields(space, fields) + self.descr_set_fields(space, fields) @unwrap_spec(new_order=str) def descr_newbyteorder(self, space, new_order=NPY_SWAP): @@ -468,7 +475,9 @@ shape = GetSetProperty(W_Dtype.descr_get_shape), isnative = GetSetProperty(W_Dtype.descr_get_isnative), fields = GetSetProperty(W_Dtype.descr_get_fields), - names = GetSetProperty(W_Dtype.descr_get_names), + names = GetSetProperty(W_Dtype.descr_get_names, + W_Dtype.descr_set_names, + W_Dtype.descr_del_names), hasobject = GetSetProperty(W_Dtype.descr_get_hasobject), descr = GetSetProperty(W_Dtype.descr_get_descr), ) @@ -794,29 +803,19 @@ for dtype in reversed(self.builtin_dtypes): self.dtypes_by_num[dtype.num] = dtype self.dtypes_by_name[dtype.name] = dtype - can_name = dtype.kind + str(dtype.get_size()) - self.dtypes_by_name[can_name] = dtype - self.dtypes_by_name[NPY_NATBYTE + can_name] = dtype - self.dtypes_by_name[NPY_NATIVE + can_name] = dtype - new_name = NPY_OPPBYTE + can_name - itemtype = type(dtype.itemtype)(False) - self.dtypes_by_name[new_name] = W_Dtype( - itemtype, dtype.num, dtype.kind, new_name, dtype.char, - dtype.w_box_type, byteorder=NPY_OPPBYTE, - float_type=dtype.float_type) - if dtype.kind != dtype.char: - can_name = dtype.char + for can_name in [dtype.kind + str(dtype.get_size()), + dtype.char]: + self.dtypes_by_name[can_name] = dtype self.dtypes_by_name[NPY_NATBYTE + can_name] = dtype self.dtypes_by_name[NPY_NATIVE + can_name] = dtype new_name = NPY_OPPBYTE + can_name + itemtype = type(dtype.itemtype)(False) self.dtypes_by_name[new_name] = W_Dtype( itemtype, dtype.num, dtype.kind, new_name, dtype.char, dtype.w_box_type, byteorder=NPY_OPPBYTE, float_type=dtype.float_type) - for alias in dtype.aliases: self.dtypes_by_name[alias] = dtype - self.dtypes_by_name[dtype.char] = dtype typeinfo_full = { 'LONGLONG': self.w_int64dtype, diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -84,6 +84,19 @@ def descr_get_dtype(self, space): return self.implementation.dtype + def descr_set_dtype(self, space, w_dtype): + dtype = space.interp_w(interp_dtype.W_Dtype, + space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype)) + if (dtype.get_size() != self.get_dtype().get_size() or + dtype.is_flexible_type() or self.get_dtype().is_flexible_type()): + raise OperationError(space.w_ValueError, space.wrap( + "new type not compatible with array.")) + self.implementation.set_dtype(space, dtype) + + def descr_del_dtype(self, space): + raise OperationError(space.w_AttributeError, space.wrap( + "Cannot delete array dtype")) + def descr_get_ndim(self, space): return space.wrap(len(self.get_shape())) @@ -489,6 +502,15 @@ raise OperationError(space.w_NotImplementedError, space.wrap( "non-int arg not supported")) + def descr_itemset(self, space, args_w): + if len(args_w) == 0: + raise OperationError(space.w_ValueError, space.wrap( + "itemset must have at least one argument")) + if len(args_w) != len(self.get_shape()) + 1: + raise OperationError(space.w_ValueError, space.wrap( + "incorrect number of indices for array")) + self.descr_setitem(space, space.newtuple(args_w[:-1]), args_w[-1]) + def descr___array__(self, space, w_dtype=None): if not space.is_none(w_dtype): raise OperationError(space.w_NotImplementedError, space.wrap( @@ -629,10 +651,6 @@ raise OperationError(space.w_NotImplementedError, space.wrap( "getfield not implemented yet")) - def descr_itemset(self, space, w_arg): - raise OperationError(space.w_NotImplementedError, space.wrap( - "itemset not implemented yet")) - @unwrap_spec(new_order=str) def descr_newbyteorder(self, space, new_order=NPY_SWAP): return self.descr_view(space, @@ -903,8 +921,8 @@ w_res = self.descr_mul(space, other) assert isinstance(w_res, W_NDimArray) return w_res.descr_sum(space, space.wrap(-1), out) - dtype = interp_ufuncs.find_binop_result_dtype(space, - self.get_dtype(), other.get_dtype()) + dtype = interp_ufuncs.find_binop_result_dtype(space, self.get_dtype(), + other.get_dtype()) if self.get_size() < 1 and other.get_size() < 1: # numpy compatability return W_NDimArray.new_scalar(space, dtype, space.wrap(0)) @@ -912,25 +930,27 @@ out_shape, other_critical_dim = _match_dot_shapes(space, self, other) if out: matches = True - if len(out.get_shape()) != len(out_shape): + if dtype != out.get_dtype(): + matches = False + elif not out.implementation.order == "C": + matches = False + elif len(out.get_shape()) != len(out_shape): matches = False else: for i in range(len(out_shape)): if out.get_shape()[i] != out_shape[i]: matches = False break - if dtype != out.get_dtype(): - matches = False - if not out.implementation.order == "C": - matches = False if not matches: raise OperationError(space.w_ValueError, space.wrap( - 'output array is not acceptable (must have the right type, nr dimensions, and be a C-Array)')) + 'output array is not acceptable (must have the right type, ' + 'nr dimensions, and be a C-Array)')) w_res = out + w_res.fill(space, self.get_dtype().coerce(space, None)) else: w_res = W_NDimArray.from_shape(space, out_shape, dtype, w_instance=self) # This is the place to add fpypy and blas - return loop.multidim_dot(space, self, other, w_res, dtype, + return loop.multidim_dot(space, self, other, w_res, dtype, other_critical_dim) def descr_mean(self, space, __args__): @@ -946,7 +966,8 @@ def _reduce_ufunc_impl(ufunc_name, promote_to_largest=False, cumulative=False): - def impl(self, space, w_axis=None, w_dtype=None, w_out=None): + @unwrap_spec(keepdims=bool) + def impl(self, space, w_axis=None, w_dtype=None, w_out=None, keepdims=False): if space.is_none(w_out): out = None elif not isinstance(w_out, W_NDimArray): @@ -956,7 +977,7 @@ out = w_out return getattr(interp_ufuncs.get(space), ufunc_name).reduce( space, self, promote_to_largest, w_axis, - False, out, w_dtype, cumulative=cumulative) + keepdims, out, w_dtype, cumulative=cumulative) return func_with_new_name(impl, "reduce_%s_impl_%d_%d" % (ufunc_name, promote_to_largest, cumulative)) @@ -1288,7 +1309,9 @@ __gt__ = interp2app(W_NDimArray.descr_gt), __ge__ = interp2app(W_NDimArray.descr_ge), - dtype = GetSetProperty(W_NDimArray.descr_get_dtype), + dtype = GetSetProperty(W_NDimArray.descr_get_dtype, + W_NDimArray.descr_set_dtype, + W_NDimArray.descr_del_dtype), shape = GetSetProperty(W_NDimArray.descr_get_shape, W_NDimArray.descr_set_shape), strides = GetSetProperty(W_NDimArray.descr_get_strides), @@ -1336,6 +1359,7 @@ flat = GetSetProperty(W_NDimArray.descr_get_flatiter, W_NDimArray.descr_set_flatiter), item = interp2app(W_NDimArray.descr_item), + itemset = interp2app(W_NDimArray.descr_itemset), real = GetSetProperty(W_NDimArray.descr_get_real, W_NDimArray.descr_set_real), imag = GetSetProperty(W_NDimArray.descr_get_imag, diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -252,8 +252,20 @@ if out: out.set_scalar_value(res) return out + if keepdims: + shape = [1] * len(obj_shape) + out = W_NDimArray.from_shape(space, [1] * len(obj_shape), dtype, w_instance=obj) + out.implementation.setitem(0, res) + return out return res + def descr_outer(self, space, __args__): + return self._outer(space, __args__) + + def _outer(self, space, __args__): + raise OperationError(space.w_ValueError, + space.wrap("outer product only supported for binary functions")) + class W_Ufunc1(W_Ufunc): _immutable_fields_ = ["func", "bool_result"] argcount = 1 @@ -432,6 +444,7 @@ nin = interp_attrproperty("argcount", cls=W_Ufunc), reduce = interp2app(W_Ufunc.descr_reduce), + outer = interp2app(W_Ufunc.descr_outer), ) diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -146,8 +146,7 @@ while not obj_iter.done(): reduce_driver.jit_merge_point(shapelen=shapelen, func=func, done_func=done_func, - calc_dtype=calc_dtype, - ) + calc_dtype=calc_dtype) rval = obj_iter.getitem().convert_to(space, calc_dtype) if done_func is not None and done_func(calc_dtype, rval): return rval @@ -172,8 +171,7 @@ shapelen = len(obj.get_shape()) while not obj_iter.done(): reduce_cum_driver.jit_merge_point(shapelen=shapelen, func=func, - dtype=calc_dtype, - ) + dtype=calc_dtype) rval = obj_iter.getitem().convert_to(space, calc_dtype) cur_value = func(calc_dtype, cur_value, rval) out_iter.setitem(cur_value) @@ -271,8 +269,7 @@ iter.next() shapelen = len(arr.get_shape()) while not iter.done(): - arg_driver.jit_merge_point(shapelen=shapelen, dtype=dtype, - ) + arg_driver.jit_merge_point(shapelen=shapelen, dtype=dtype) w_val = iter.getitem() new_best = getattr(dtype.itemtype, op_name)(cur_best, w_val) if dtype.itemtype.ne(new_best, cur_best): @@ -311,6 +308,7 @@ if i != right_critical_dim] right_skip = range(len(left_shape) - 1) result_skip = [len(result.get_shape()) - (len(right_shape) > 1)] + assert result.get_dtype() == dtype outi = result.create_dot_iter(broadcast_shape, result_skip) lefti = left.create_dot_iter(broadcast_shape, left_skip) righti = right.create_dot_iter(broadcast_shape, right_skip) @@ -318,10 +316,10 @@ dot_driver.jit_merge_point(dtype=dtype) lval = lefti.getitem().convert_to(space, dtype) rval = righti.getitem().convert_to(space, dtype) - outval = outi.getitem().convert_to(space, dtype) + outval = outi.getitem() v = dtype.itemtype.mul(lval, rval) - value = dtype.itemtype.add(v, outval).convert_to(space, dtype) - outi.setitem(value) + v = dtype.itemtype.add(v, outval) + outi.setitem(v) outi.next() righti.next() lefti.next() @@ -652,8 +650,8 @@ out_iter = out.create_iter(shape) while not arr_iter.done(): round_driver.jit_merge_point(shapelen=shapelen, dtype=dtype) - w_v = dtype.itemtype.round(arr_iter.getitem().convert_to(space, dtype), - decimals) + w_v = arr_iter.getitem().convert_to(space, dtype) + w_v = dtype.itemtype.round(w_v, decimals) out_iter.setitem(w_v) arr_iter.next() out_iter.next() diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py --- a/pypy/module/micronumpy/strides.py +++ b/pypy/module/micronumpy/strides.py @@ -72,6 +72,8 @@ is_rec_type = dtype is not None and dtype.is_record_type() if is_rec_type and is_single_elem(space, w_iterable, is_rec_type): return [], [w_iterable] + if isinstance(w_iterable, W_NDimArray) and w_iterable.is_scalar(): + return [], [w_iterable] shape = [space.len_w(w_iterable)] batch = space.listview(w_iterable) while True: diff --git a/pypy/module/micronumpy/test/test_arrayops.py b/pypy/module/micronumpy/test/test_arrayops.py --- a/pypy/module/micronumpy/test/test_arrayops.py +++ b/pypy/module/micronumpy/test/test_arrayops.py @@ -56,6 +56,10 @@ b = arange(12).reshape(4, 3) c = a.dot(b) assert (c == [[ 42, 48, 54], [114, 136, 158], [186, 224, 262]]).all() + c = a.dot(b.astype(float)) + assert (c == [[ 42, 48, 54], [114, 136, 158], [186, 224, 262]]).all() + c = a.astype(float).dot(b) + assert (c == [[ 42, 48, 54], [114, 136, 158], [186, 224, 262]]).all() a = arange(24).reshape(2, 3, 4) raises(ValueError, "a.dot(a)") @@ -91,9 +95,11 @@ out = arange(9).reshape(3, 3) c = dot(a, b, out=out) assert (c == out).all() - out = arange(9,dtype=float).reshape(3, 3) + assert (c == [[42, 48, 54], [114, 136, 158], [186, 224, 262]]).all() + out = arange(9, dtype=float).reshape(3, 3) exc = raises(ValueError, dot, a, b, out) - assert exc.value[0].find('not acceptable') > 0 + assert exc.value[0] == ('output array is not acceptable (must have the ' + 'right type, nr dimensions, and be a C-Array)') def test_choose_basic(self): from numpypy import array diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -204,6 +204,9 @@ assert array([256], 'B')[0] == 0 assert array([32768], 'h')[0] == -32768 assert array([65536], 'H')[0] == 0 + a = array([65520], dtype='float64') + b = array(a, dtype='float16') + assert b == float('inf') if dtype('l').itemsize == 4: # 32-bit raises(OverflowError, "array([2**32/2], 'i')") raises(OverflowError, "array([2**32], 'I')") @@ -785,6 +788,14 @@ assert dtype('>i8').str == '>i8' assert dtype('int8').str == '|i1' assert dtype('float').str == byteorder + 'f8' + assert dtype('f').str == byteorder + 'f4' + assert dtype('=f').str == byteorder + 'f4' + assert dtype('>f').str == '>f4' + assert dtype('d').str == '>f8' + assert dtype(' wrapped int or long - - else: - # strict version - - def accept_int_arg(self): - w_obj = self.accept_obj_arg() - return self.space.int_w(w_obj) - - def accept_uint_arg(self): - w_obj = self.accept_obj_arg() - return self.space.uint_w(w_obj) - - def accept_longlong_arg(self): - w_obj = self.accept_obj_arg() - return self.space.r_longlong_w(w_obj) - - def accept_ulonglong_arg(self): - w_obj = self.accept_obj_arg() - return self.space.r_ulonglong_w(w_obj) + def _maybe_float(self, w_obj): + space = self.space + if space.isinstance_w(w_obj, space.w_float): + msg = "struct: integer argument expected, got float" + else: + msg = "integer argument expected, got non-integer" + space.warn(space.wrap(msg), space.w_DeprecationWarning) + return space.int(w_obj) # wrapped float -> wrapped int or long def accept_bool_arg(self): w_obj = self.accept_obj_arg() diff --git a/pypy/module/sys/vm.py b/pypy/module/sys/vm.py --- a/pypy/module/sys/vm.py +++ b/pypy/module/sys/vm.py @@ -155,9 +155,7 @@ to exc_info() will return (None,None,None) until another exception is raised and caught in the current thread or the execution stack returns to a frame where another exception is being handled.""" - operror = space.getexecutioncontext().sys_exc_info() - if operror is not None: - operror.clear(space) + space.getexecutioncontext().clear_sys_exc_info() def settrace(space, w_func): """Set the global debug tracing function. It will be called on each diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -57,7 +57,7 @@ return True def delete(self, obj, selector): - return None + pass def find_map_attr(self, selector): if jit.we_are_jitted(): @@ -291,6 +291,7 @@ def delete(self, obj, selector): if selector == self.selector: # ok, attribute is deleted + self.ever_mutated = True return self.back.copy(obj) new_obj = self.back.delete(obj, selector) if new_obj is not None: diff --git a/pypy/objspace/std/test/test_mapdict.py b/pypy/objspace/std/test/test_mapdict.py --- a/pypy/objspace/std/test/test_mapdict.py +++ b/pypy/objspace/std/test/test_mapdict.py @@ -144,7 +144,15 @@ assert obj2.map.back.ever_mutated == True assert obj2.map is obj.map - +def test_attr_immutability_delete(monkeypatch): + cls = Class() + obj = cls.instantiate() + obj.setdictvalue(space, "a", 10) + map1 = obj.map + obj.deldictvalue(space, "a") + obj.setdictvalue(space, "a", 20) + assert obj.map.ever_mutated == True + assert obj.map is map1 def test_delete(): for i, dattr in enumerate(["a", "b", "c"]): diff --git a/rpython/annotator/binaryop.py b/rpython/annotator/binaryop.py --- a/rpython/annotator/binaryop.py +++ b/rpython/annotator/binaryop.py @@ -10,7 +10,7 @@ SomeDict, SomeOrderedDict, SomeUnicodeCodePoint, SomeUnicodeString, SomeTuple, SomeImpossibleValue, s_ImpossibleValue, SomeInstance, SomeBuiltin, SomeIterator, SomePBC, SomeFloat, s_None, SomeByteArray, - SomeWeakRef, SomeAddress, SomeTypedAddressAccess, SomeSingleFloat, + SomeWeakRef, SomeSingleFloat, SomeLongFloat, SomeType, SomeConstantType, unionof, UnionError, read_can_only_throw, add_knowntypedata, merge_knowntypedata,) @@ -826,50 +826,6 @@ raise AnnotatorError('add on %r' % pbc) return s_ImpossibleValue -# ____________________________________________________________ -# annotation of low-level types -from rpython.annotator.model import SomePtr -from rpython.annotator.model import ll_to_annotation, annotation_to_lltype - -class __extend__(pairtype(SomePtr, SomePtr)): - def union((p1, p2)): - assert p1.ll_ptrtype == p2.ll_ptrtype,("mixing of incompatible pointer types: %r, %r" % - (p1.ll_ptrtype, p2.ll_ptrtype)) - return SomePtr(p1.ll_ptrtype) - -class __extend__(pairtype(SomePtr, SomeInteger)): - - def getitem((p, int1)): - example = p.ll_ptrtype._example() - try: - v = example[0] - except IndexError: - return None # impossible value, e.g. FixedSizeArray(0) - return ll_to_annotation(v) - getitem.can_only_throw = [] - - def setitem((p, int1), s_value): # just doing checking - example = p.ll_ptrtype._example() - if example[0] is not None: # ignore Void s_value - v_lltype = annotation_to_lltype(s_value) - example[0] = v_lltype._defl() - setitem.can_only_throw = [] - -class __extend__(pairtype(SomePtr, SomeObject)): - def union((p, obj)): - assert False, ("mixing pointer type %r with something else %r" % (p.ll_ptrtype, obj)) - - def getitem((p, obj)): - assert False,"ptr %r getitem index not an int: %r" % (p.ll_ptrtype, obj) - - def setitem((p, obj), s_value): - assert False,"ptr %r setitem index not an int: %r" % (p.ll_ptrtype, obj) - -class __extend__(pairtype(SomeObject, SomePtr)): - def union((obj, p2)): - return pair(p2, obj).union() - - #_________________________________________ # weakrefs @@ -884,62 +840,3 @@ if basedef is None: # no common base class! complain... return SomeObject() return SomeWeakRef(basedef) - -#_________________________________________ -# memory addresses - -class __extend__(pairtype(SomeAddress, SomeAddress)): - def union((s_addr1, s_addr2)): - return SomeAddress() - - def sub((s_addr1, s_addr2)): - if s_addr1.is_null_address() and s_addr2.is_null_address(): - return getbookkeeper().immutablevalue(0) - return SomeInteger() - - def is_((s_addr1, s_addr2)): - assert False, "comparisons with is not supported by addresses" - -class __extend__(pairtype(SomeTypedAddressAccess, SomeTypedAddressAccess)): - def union((s_taa1, s_taa2)): - assert s_taa1.type == s_taa2.type - return s_taa1 - -class __extend__(pairtype(SomeTypedAddressAccess, SomeInteger)): - def getitem((s_taa, s_int)): - from rpython.annotator.model import lltype_to_annotation - return lltype_to_annotation(s_taa.type) - getitem.can_only_throw = [] - - def setitem((s_taa, s_int), s_value): - from rpython.annotator.model import annotation_to_lltype - assert annotation_to_lltype(s_value) is s_taa.type - setitem.can_only_throw = [] - - -class __extend__(pairtype(SomeAddress, SomeInteger)): - def add((s_addr, s_int)): - return SomeAddress() - - def sub((s_addr, s_int)): - return SomeAddress() - -class __extend__(pairtype(SomeAddress, SomeImpossibleValue)): - # need to override this specifically to hide the 'raise UnionError' - # of pairtype(SomeAddress, SomeObject). - def union((s_addr, s_imp)): - return s_addr - -class __extend__(pairtype(SomeImpossibleValue, SomeAddress)): - # need to override this specifically to hide the 'raise UnionError' - # of pairtype(SomeObject, SomeAddress). - def union((s_imp, s_addr)): - return s_addr - -class __extend__(pairtype(SomeAddress, SomeObject)): - def union((s_addr, s_obj)): - raise UnionError(s_addr, s_obj) - -class __extend__(pairtype(SomeObject, SomeAddress)): - def union((s_obj, s_addr)): - raise UnionError(s_obj, s_addr) diff --git a/rpython/annotator/bookkeeper.py b/rpython/annotator/bookkeeper.py --- a/rpython/annotator/bookkeeper.py +++ b/rpython/annotator/bookkeeper.py @@ -8,11 +8,13 @@ from rpython.flowspace.model import Constant from rpython.annotator.model import (SomeOrderedDict, - SomeString, SomeChar, SomeFloat, SomePtr, unionof, SomeInstance, SomeDict, - SomeBuiltin, SomePBC, SomeInteger, TLS, SomeAddress, SomeUnicodeCodePoint, - s_None, s_ImpossibleValue, SomeLLADTMeth, SomeBool, SomeTuple, + SomeString, SomeChar, SomeFloat, unionof, SomeInstance, SomeDict, + SomeBuiltin, SomePBC, SomeInteger, TLS, SomeUnicodeCodePoint, + s_None, s_ImpossibleValue, SomeBool, SomeTuple, SomeImpossibleValue, SomeUnicodeString, SomeList, HarmlesslyBlocked, - SomeWeakRef, lltype_to_annotation, SomeType, SomeByteArray, SomeConstantType) + SomeWeakRef, SomeByteArray, SomeConstantType) +from rpython.rtyper.llannotation import ( + SomeAddress, SomePtr, SomeLLADTMeth, lltype_to_annotation) from rpython.annotator.classdef import InstanceSource, ClassDef from rpython.annotator.listdef import ListDef, ListItem from rpython.annotator.dictdef import DictDef diff --git a/rpython/annotator/builtin.py b/rpython/annotator/builtin.py --- a/rpython/annotator/builtin.py +++ b/rpython/annotator/builtin.py @@ -4,12 +4,12 @@ import sys from rpython.annotator.model import ( - SomeInteger, SomeObject, SomeChar, SomeBool, SomeString, SomeTuple, s_Bool, - SomeUnicodeCodePoint, SomeAddress, SomeFloat, unionof, SomeUnicodeString, + SomeInteger, SomeObject, SomeChar, SomeBool, SomeString, SomeTuple, + SomeUnicodeCodePoint, SomeFloat, unionof, SomeUnicodeString, SomePBC, SomeInstance, SomeDict, SomeList, SomeWeakRef, SomeIterator, - SomeOrderedDict, - SomeByteArray, annotation_to_lltype, lltype_to_annotation, - ll_to_annotation, add_knowntypedata, s_ImpossibleValue,) + SomeOrderedDict, SomeByteArray, add_knowntypedata, s_ImpossibleValue,) +from rpython.rtyper.llannotation import ( + SomeAddress, annotation_to_lltype, lltype_to_annotation, ll_to_annotation) from rpython.annotator.bookkeeper import getbookkeeper from rpython.annotator import description from rpython.flowspace.model import Constant @@ -356,7 +356,7 @@ @analyzer_for(rpython.rtyper.lltypesystem.llmemory.cast_ptr_to_adr) def llmemory_cast_ptr_to_adr(s): - from rpython.annotator.model import SomeInteriorPtr + from rpython.rtyper.llannotation import SomeInteriorPtr assert not isinstance(s, SomeInteriorPtr) return SomeAddress() @@ -389,7 +389,7 @@ # annotation of low-level types -from rpython.annotator.model import SomePtr +from rpython.rtyper.llannotation import SomePtr from rpython.rtyper.lltypesystem import lltype @analyzer_for(lltype.malloc) diff --git a/rpython/annotator/description.py b/rpython/annotator/description.py --- a/rpython/annotator/description.py +++ b/rpython/annotator/description.py @@ -624,7 +624,7 @@ except ValueError: pass else: - from rpython.annotator.model import SomePtr + from rpython.rtyper.llannotation import SomePtr assert not isinstance(s_arg, SomePtr) else: # call the constructor diff --git a/rpython/annotator/model.py b/rpython/annotator/model.py --- a/rpython/annotator/model.py +++ b/rpython/annotator/model.py @@ -569,139 +569,6 @@ self.classdef = classdef # ____________________________________________________________ -# memory addresses - -from rpython.rtyper.lltypesystem import llmemory - - -class SomeAddress(SomeObject): - immutable = True - - def can_be_none(self): - return False - - def is_null_address(self): - return self.is_immutable_constant() and not self.const - - -# The following class is used to annotate the intermediate value that -# appears in expressions of the form: -# addr.signed[offset] and addr.signed[offset] = value - -class SomeTypedAddressAccess(SomeObject): - def __init__(self, type): - self.type = type - - def can_be_none(self): - return False - -#____________________________________________________________ -# annotation of low-level types - -from rpython.rtyper.lltypesystem import lltype - - -class SomePtr(SomeObject): - knowntype = lltype._ptr - immutable = True - - def __init__(self, ll_ptrtype): - assert isinstance(ll_ptrtype, lltype.Ptr) - self.ll_ptrtype = ll_ptrtype - - def can_be_none(self): - return False - - -class SomeInteriorPtr(SomePtr): - def __init__(self, ll_ptrtype): - assert isinstance(ll_ptrtype, lltype.InteriorPtr) - self.ll_ptrtype = ll_ptrtype - - -class SomeLLADTMeth(SomeObject): - immutable = True - - def __init__(self, ll_ptrtype, func): - self.ll_ptrtype = ll_ptrtype - self.func = func - - def can_be_none(self): - return False - - - -annotation_to_ll_map = [ - (SomeSingleFloat(), lltype.SingleFloat), - (s_None, lltype.Void), # also matches SomeImpossibleValue() - (s_Bool, lltype.Bool), - (SomeFloat(), lltype.Float), - (SomeLongFloat(), lltype.LongFloat), - (SomeChar(), lltype.Char), - (SomeUnicodeCodePoint(), lltype.UniChar), - (SomeAddress(), llmemory.Address), -] - - -def annotation_to_lltype(s_val, info=None): - if isinstance(s_val, SomeInteriorPtr): - p = s_val.ll_ptrtype - if 0 in p.offsets: - assert list(p.offsets).count(0) == 1 - return lltype.Ptr(lltype.Ptr(p.PARENTTYPE)._interior_ptr_type_with_index(p.TO)) - else: - return lltype.Ptr(p.PARENTTYPE) - if isinstance(s_val, SomePtr): - return s_val.ll_ptrtype - if type(s_val) is SomeInteger: - return lltype.build_number(None, s_val.knowntype) - - for witness, T in annotation_to_ll_map: - if witness.contains(s_val): - return T - if info is None: - info = '' - else: - info = '%s: ' % info - raise ValueError("%sshould return a low-level type,\ngot instead %r" % ( - info, s_val)) - -ll_to_annotation_map = dict([(ll, ann) for ann, ll in annotation_to_ll_map]) - - -def lltype_to_annotation(T): - try: - s = ll_to_annotation_map.get(T) - except TypeError: - s = None # unhashable T, e.g. a Ptr(GcForwardReference()) - if s is None: - if isinstance(T, lltype.Typedef): - return lltype_to_annotation(T.OF) - if isinstance(T, lltype.Number): - return SomeInteger(knowntype=T._type) - elif isinstance(T, lltype.InteriorPtr): - return SomeInteriorPtr(T) - else: - return SomePtr(T) - else: - return s - - -def ll_to_annotation(v): - if v is None: - # i think we can only get here in the case of void-returning - # functions - return s_None - if isinstance(v, lltype._interior_ptr): - ob = v._parent - if ob is None: - raise RuntimeError - T = lltype.InteriorPtr(lltype.typeOf(ob), v._T, v._offsets) - return SomeInteriorPtr(T) - return lltype_to_annotation(lltype.typeOf(v)) - - -# ____________________________________________________________ class AnnotatorError(Exception): diff --git a/rpython/annotator/signature.py b/rpython/annotator/signature.py --- a/rpython/annotator/signature.py +++ b/rpython/annotator/signature.py @@ -2,10 +2,11 @@ from __future__ import absolute_import import types -from rpython.annotator.model import SomeBool, SomeInteger, SomeString,\ - SomeFloat, SomeList, SomeDict, s_None, \ - SomeObject, SomeInstance, SomeTuple, lltype_to_annotation,\ - unionof, SomeUnicodeString, SomeType, AnnotatorError +from rpython.annotator.model import ( + SomeBool, SomeInteger, SomeString, SomeFloat, SomeList, SomeDict, s_None, + SomeObject, SomeInstance, SomeTuple, unionof, SomeUnicodeString, SomeType, + AnnotatorError) +from rpython.rtyper.llannotation import lltype_to_annotation from rpython.annotator.listdef import ListDef from rpython.annotator.dictdef import DictDef diff --git a/rpython/annotator/test/test_model.py b/rpython/annotator/test/test_model.py --- a/rpython/annotator/test/test_model.py +++ b/rpython/annotator/test/test_model.py @@ -99,73 +99,6 @@ assert not s1.contains(s2) assert s1 != s2 -def test_ll_to_annotation(): - s_z = ll_to_annotation(lltype.Signed._defl()) - s_s = SomeInteger() - s_u = SomeInteger(nonneg=True, unsigned=True) - assert s_z.contains(s_s) - assert not s_z.contains(s_u) - s_uz = ll_to_annotation(lltype.Unsigned._defl()) - assert s_uz.contains(s_u) - assert ll_to_annotation(lltype.Bool._defl()).contains(SomeBool()) - assert ll_to_annotation(lltype.Char._defl()).contains(SomeChar()) - S = lltype.GcStruct('s') - A = lltype.GcArray() - s_p = ll_to_annotation(lltype.malloc(S)) - assert isinstance(s_p, SomePtr) and s_p.ll_ptrtype == lltype.Ptr(S) - s_p = ll_to_annotation(lltype.malloc(A, 0)) - assert isinstance(s_p, SomePtr) and s_p.ll_ptrtype == lltype.Ptr(A) - -def test_annotation_to_lltype(): - from rpython.rlib.rarithmetic import r_uint, r_singlefloat - s_i = SomeInteger() - s_pos = SomeInteger(nonneg=True) - s_1 = SomeInteger(nonneg=True); s_1.const = 1 - s_m1 = SomeInteger(nonneg=False); s_m1.const = -1 - s_u = SomeInteger(nonneg=True, unsigned=True); - s_u1 = SomeInteger(nonneg=True, unsigned=True); - s_u1.const = r_uint(1) - assert annotation_to_lltype(s_i) == lltype.Signed - assert annotation_to_lltype(s_pos) == lltype.Signed - assert annotation_to_lltype(s_1) == lltype.Signed - assert annotation_to_lltype(s_m1) == lltype.Signed - assert annotation_to_lltype(s_u) == lltype.Unsigned - assert annotation_to_lltype(s_u1) == lltype.Unsigned - assert annotation_to_lltype(SomeBool()) == lltype.Bool - assert annotation_to_lltype(SomeChar()) == lltype.Char - PS = lltype.Ptr(lltype.GcStruct('s')) - s_p = SomePtr(ll_ptrtype=PS) - assert annotation_to_lltype(s_p) == PS - py.test.raises(ValueError, "annotation_to_lltype(si0)") - s_singlefloat = SomeSingleFloat() - s_singlefloat.const = r_singlefloat(0.0) - assert annotation_to_lltype(s_singlefloat) == lltype.SingleFloat - -def test_ll_union(): - PS1 = lltype.Ptr(lltype.GcStruct('s')) - PS2 = lltype.Ptr(lltype.GcStruct('s')) - PS3 = lltype.Ptr(lltype.GcStruct('s3')) - PA1 = lltype.Ptr(lltype.GcArray()) - PA2 = lltype.Ptr(lltype.GcArray()) - - assert unionof(SomePtr(PS1),SomePtr(PS1)) == SomePtr(PS1) - assert unionof(SomePtr(PS1),SomePtr(PS2)) == SomePtr(PS2) - assert unionof(SomePtr(PS1),SomePtr(PS2)) == SomePtr(PS1) - - assert unionof(SomePtr(PA1),SomePtr(PA1)) == SomePtr(PA1) - assert unionof(SomePtr(PA1),SomePtr(PA2)) == SomePtr(PA2) - assert unionof(SomePtr(PA1),SomePtr(PA2)) == SomePtr(PA1) - - assert unionof(SomePtr(PS1),SomeImpossibleValue()) == SomePtr(PS1) - assert unionof(SomeImpossibleValue(), SomePtr(PS1)) == SomePtr(PS1) - - py.test.raises(AssertionError, "unionof(SomePtr(PA1), SomePtr(PS1))") - py.test.raises(AssertionError, "unionof(SomePtr(PS1), SomePtr(PS3))") - py.test.raises(AssertionError, "unionof(SomePtr(PS1), SomeInteger())") - py.test.raises(AssertionError, "unionof(SomePtr(PS1), SomeObject())") - py.test.raises(AssertionError, "unionof(SomeInteger(), SomePtr(PS1))") - py.test.raises(AssertionError, "unionof(SomeObject(), SomePtr(PS1))") - def test_nan(): f1 = SomeFloat() f1.const = float("nan") diff --git a/rpython/annotator/unaryop.py b/rpython/annotator/unaryop.py --- a/rpython/annotator/unaryop.py +++ b/rpython/annotator/unaryop.py @@ -4,12 +4,11 @@ from __future__ import absolute_import -from types import MethodType from rpython.flowspace.operation import op from rpython.annotator.model import (SomeObject, SomeInteger, SomeBool, SomeString, SomeChar, SomeList, SomeDict, SomeTuple, SomeImpossibleValue, SomeUnicodeCodePoint, SomeInstance, SomeBuiltin, SomeFloat, SomeIterator, - SomePBC, SomeTypedAddressAccess, SomeAddress, SomeType, s_ImpossibleValue, + SomePBC, SomeType, s_ImpossibleValue, s_Bool, s_None, unionof, add_knowntypedata, HarmlesslyBlocked, SomeWeakRef, SomeUnicodeString, SomeByteArray) from rpython.annotator.bookkeeper import getbookkeeper @@ -757,62 +756,6 @@ # This should probably never happen raise AnnotatorError("Cannot call len on a pbc") -# annotation of low-level types -from rpython.annotator.model import SomePtr, SomeLLADTMeth -from rpython.annotator.model import ll_to_annotation, lltype_to_annotation, annotation_to_lltype - -class __extend__(SomePtr): - - def getattr(self, s_attr): - assert s_attr.is_constant(), "getattr on ptr %r with non-constant field-name" % self.ll_ptrtype - example = self.ll_ptrtype._example() - try: - v = example._lookup_adtmeth(s_attr.const) - except AttributeError: - v = getattr(example, s_attr.const) - return ll_to_annotation(v) - else: - if isinstance(v, MethodType): - from rpython.rtyper.lltypesystem import lltype - ll_ptrtype = lltype.typeOf(v.im_self) - assert isinstance(ll_ptrtype, (lltype.Ptr, lltype.InteriorPtr)) - return SomeLLADTMeth(ll_ptrtype, v.im_func) - return getbookkeeper().immutablevalue(v) - getattr.can_only_throw = [] - - def len(self): - length = self.ll_ptrtype._example()._fixedlength() - if length is None: - return SomeObject.len(self) - else: - return immutablevalue(length) - - def setattr(self, s_attr, s_value): # just doing checking - assert s_attr.is_constant(), "setattr on ptr %r with non-constant field-name" % self.ll_ptrtype - example = self.ll_ptrtype._example() - if getattr(example, s_attr.const) is not None: # ignore Void s_value - v_lltype = annotation_to_lltype(s_value) - setattr(example, s_attr.const, v_lltype._defl()) - - def call(self, args): - args_s, kwds_s = args.unpack() - if kwds_s: - raise Exception("keyword arguments to call to a low-level fn ptr") - info = 'argument to ll function pointer call' - llargs = [annotation_to_lltype(s_arg,info)._defl() for s_arg in args_s] - v = self.ll_ptrtype._example()(*llargs) - return ll_to_annotation(v) - - def bool(self): - return s_Bool - -class __extend__(SomeLLADTMeth): - - def call(self, args): - bookkeeper = getbookkeeper() - s_func = bookkeeper.immutablevalue(self.func) - return s_func.call(args.prepend(lltype_to_annotation(self.ll_ptrtype))) - #_________________________________________ # weakrefs @@ -822,20 +765,3 @@ return s_None # known to be a dead weakref else: return SomeInstance(self.classdef, can_be_None=True) - -#_________________________________________ -# memory addresses - -from rpython.rtyper.lltypesystem import llmemory - -class __extend__(SomeAddress): - def getattr(self, s_attr): - assert s_attr.is_constant() - assert isinstance(s_attr, SomeString) - assert s_attr.const in llmemory.supported_access_types - return SomeTypedAddressAccess( - llmemory.supported_access_types[s_attr.const]) - getattr.can_only_throw = [] - - def bool(self): - return s_Bool diff --git a/rpython/jit/backend/llsupport/llmodel.py b/rpython/jit/backend/llsupport/llmodel.py --- a/rpython/jit/backend/llsupport/llmodel.py +++ b/rpython/jit/backend/llsupport/llmodel.py @@ -2,6 +2,7 @@ from rpython.rtyper.lltypesystem.lloperation import llop from rpython.rtyper.llinterp import LLInterpreter from rpython.rtyper.annlowlevel import llhelper, MixLevelHelperAnnotator +from rpython.rtyper.llannotation import lltype_to_annotation from rpython.rlib.objectmodel import we_are_translated, specialize from rpython.jit.metainterp import history from rpython.jit.codewriter import heaptracker, longlong @@ -14,7 +15,6 @@ FieldDescr, ArrayDescr, CallDescr, InteriorFieldDescr, FLAG_POINTER, FLAG_FLOAT) from rpython.jit.backend.llsupport.asmmemmgr import AsmMemoryManager -from rpython.annotator import model as annmodel from rpython.rlib.unroll import unrolling_iterable @@ -111,8 +111,8 @@ fptr = llhelper(FUNC_TP, realloc_frame) else: FUNC = FUNC_TP.TO - args_s = [annmodel.lltype_to_annotation(ARG) for ARG in FUNC.ARGS] - s_result = annmodel.lltype_to_annotation(FUNC.RESULT) + args_s = [lltype_to_annotation(ARG) for ARG in FUNC.ARGS] + s_result = lltype_to_annotation(FUNC.RESULT) mixlevelann = MixLevelHelperAnnotator(self.rtyper) graph = mixlevelann.getgraph(realloc_frame, args_s, s_result) fptr = mixlevelann.graph2delayed(graph, FUNC) @@ -123,8 +123,8 @@ fptr = llhelper(FUNC_TP, realloc_frame_crash) else: FUNC = FUNC_TP.TO - args_s = [annmodel.lltype_to_annotation(ARG) for ARG in FUNC.ARGS] - s_result = annmodel.lltype_to_annotation(FUNC.RESULT) + args_s = [lltype_to_annotation(ARG) for ARG in FUNC.ARGS] + s_result = lltype_to_annotation(FUNC.RESULT) mixlevelann = MixLevelHelperAnnotator(self.rtyper) graph = mixlevelann.getgraph(realloc_frame_crash, args_s, s_result) fptr = mixlevelann.graph2delayed(graph, FUNC) diff --git a/rpython/jit/codewriter/support.py b/rpython/jit/codewriter/support.py --- a/rpython/jit/codewriter/support.py +++ b/rpython/jit/codewriter/support.py @@ -1,6 +1,7 @@ import sys from rpython.annotator import model as annmodel +from rpython.rtyper.llannotation import lltype_to_annotation from rpython.annotator.policy import AnnotatorPolicy from rpython.flowspace.model import Variable, Constant from rpython.jit.metainterp.typesystem import deref @@ -32,7 +33,7 @@ if T == lltype.Ptr(ll_rstr.STR): t = str else: - t = annmodel.lltype_to_annotation(T) + t = lltype_to_annotation(T) return a.typeannotation(t) def annotate(func, values, inline=None, backendoptimize=True, @@ -814,12 +815,12 @@ return rtyper._builtin_func_for_spec_cache[key] except (KeyError, AttributeError): pass - args_s = [annmodel.lltype_to_annotation(v) for v in ll_args] + args_s = [lltype_to_annotation(v) for v in ll_args] if '.' not in oopspec_name: # 'newxxx' operations LIST_OR_DICT = ll_res else: LIST_OR_DICT = ll_args[0] - s_result = annmodel.lltype_to_annotation(ll_res) + s_result = lltype_to_annotation(ll_res) impl = setup_extra_builtin(rtyper, oopspec_name, len(args_s), extra) if getattr(impl, 'need_result_type', False): bk = rtyper.annotator.bookkeeper diff --git a/rpython/jit/metainterp/test/test_virtualizable.py b/rpython/jit/metainterp/test/test_virtualizable.py --- a/rpython/jit/metainterp/test/test_virtualizable.py +++ b/rpython/jit/metainterp/test/test_virtualizable.py @@ -9,6 +9,7 @@ from rpython.rlib.jit import JitDriver, hint, dont_look_inside, promote, virtual_ref from rpython.rlib.rarithmetic import intmask from rpython.rtyper.annlowlevel import hlstr +from rpython.rtyper.llannotation import lltype_to_annotation from rpython.rtyper.extregistry import ExtRegistryEntry from rpython.rtyper.lltypesystem import lltype, lloperation, rclass, llmemory from rpython.rtyper.rclass import IR_IMMUTABLE, IR_IMMUTABLE_ARRAY, FieldListAccessor @@ -23,7 +24,6 @@ _about_ = promote_virtualizable def compute_result_annotation(self, *args): - from rpython.annotator.model import lltype_to_annotation return lltype_to_annotation(lltype.Void) def specialize_call(self, hop): diff --git a/rpython/jit/metainterp/warmspot.py b/rpython/jit/metainterp/warmspot.py --- a/rpython/jit/metainterp/warmspot.py +++ b/rpython/jit/metainterp/warmspot.py @@ -4,6 +4,7 @@ from rpython.rtyper.lltypesystem import lltype, llmemory from rpython.rtyper.annlowlevel import (llhelper, MixLevelHelperAnnotator, cast_base_ptr_to_instance, hlstr) +from rpython.rtyper.llannotation import lltype_to_annotation from rpython.annotator import model as annmodel from rpython.rtyper.llinterp import LLException from rpython.rtyper.test.test_llinterp import get_interpreter, clear_tcache @@ -662,8 +663,8 @@ if not self.cpu.translate_support_code: return llhelper(FUNCPTR, func) FUNC = FUNCPTR.TO - args_s = [annmodel.lltype_to_annotation(ARG) for ARG in FUNC.ARGS] - s_result = annmodel.lltype_to_annotation(FUNC.RESULT) + args_s = [lltype_to_annotation(ARG) for ARG in FUNC.ARGS] + s_result = lltype_to_annotation(FUNC.RESULT) graph = self.annhelper.getgraph(func, args_s, s_result) return self.annhelper.graph2delayed(graph, FUNC) diff --git a/rpython/memory/gctransform/asmgcroot.py b/rpython/memory/gctransform/asmgcroot.py --- a/rpython/memory/gctransform/asmgcroot.py +++ b/rpython/memory/gctransform/asmgcroot.py @@ -7,6 +7,7 @@ from rpython.rtyper.lltypesystem.lloperation import llop from rpython.memory.gctransform.framework import ( BaseFrameworkGCTransformer, BaseRootWalker) +from rpython.rtyper.llannotation import SomeAddress from rpython.rtyper.rbuiltin import gen_cast from rpython.translator.unsimplify import copyvar, varoftype from rpython.translator.tool.cbuild import ExternalCompilationInfo @@ -215,7 +216,7 @@ # update the global stack counter rffi.stackcounter.stacks_counter += 1 # - s_addr = annmodel.SomeAddress() + s_addr = SomeAddress() s_None = annmodel.s_None self.gc_detach_callback_pieces_ptr = getfn(gc_detach_callback_pieces, From noreply at buildbot.pypy.org Tue Feb 4 06:02:19 2014 From: noreply at buildbot.pypy.org (mattip) Date: Tue, 4 Feb 2014 06:02:19 +0100 (CET) Subject: [pypy-commit] pypy precompiled-headers: add more include guards, permeate headers_to_precompile to everywhere needed, create windows make targets without precompiled headers Message-ID: <20140204050219.769611C0470@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: precompiled-headers Changeset: r69072:9598a8f45a76 Date: 2014-02-02 22:59 +0200 http://bitbucket.org/pypy/pypy/changeset/9598a8f45a76/ Log: add more include guards, permeate headers_to_precompile to everywhere needed, create windows make targets without precompiled headers diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -910,6 +910,8 @@ # implement function callbacks and generate function decls functions = [] pypy_decls = [] + pypy_decls.append("#ifndef _PYPY_PYPY_DECL_H\n") + pypy_decls.append("#define _PYPY_PYPY_DECL_H\n") pypy_decls.append("#ifndef PYPY_STANDALONE\n") pypy_decls.append("#ifdef __cplusplus") pypy_decls.append("extern \"C\" {") @@ -953,6 +955,7 @@ pypy_decls.append("}") pypy_decls.append("#endif") pypy_decls.append("#endif /*PYPY_STANDALONE*/\n") + pypy_decls.append("#endif /*_PYPY_PYPY_DECL_H*/\n") pypy_decl_h = udir.join('pypy_decl.h') pypy_decl_h.write('\n'.join(pypy_decls)) diff --git a/rpython/translator/c/dlltool.py b/rpython/translator/c/dlltool.py --- a/rpython/translator/c/dlltool.py +++ b/rpython/translator/c/dlltool.py @@ -21,7 +21,8 @@ entrypoints.append(getfunctionptr(graph)) return entrypoints - def gen_makefile(self, targetdir, exe_name=None): + def gen_makefile(self, targetdir, exe_name=None, + headers_to_precompile=[]): pass # XXX finish def compile(self): diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -260,12 +260,13 @@ defines['PYPY_MAIN_FUNCTION'] = "pypy_main_startup" self.eci = self.eci.merge(ExternalCompilationInfo( export_symbols=["pypy_main_startup", "pypy_debug_file"])) - self.eci, cfile, extra = gen_source(db, modulename, targetdir, - self.eci, defines=defines, - split=self.split) + self.eci, cfile, extra, headers_to_precompile = \ + gen_source(db, modulename, targetdir, + self.eci, defines=defines, split=self.split) self.c_source_filename = py.path.local(cfile) self.extrafiles = self.eventually_copy(extra) - self.gen_makefile(targetdir, exe_name=exe_name) + self.gen_makefile(targetdir, exe_name=exe_name, + headers_to_precompile=headers_to_precompile) return cfile def eventually_copy(self, cfiles): @@ -375,13 +376,14 @@ self._compiled = True return self.executable_name - def gen_makefile(self, targetdir, exe_name=None): + def gen_makefile(self, targetdir, exe_name=None, headers_to_precompile=[]): cfiles = [self.c_source_filename] + self.extrafiles if exe_name is not None: exe_name = targetdir.join(exe_name) mk = self.translator.platform.gen_makefile( cfiles, self.eci, path=targetdir, exe_name=exe_name, + headers_to_precompile=headers_to_precompile, shared=self.config.translation.shared) if self.has_profopt(): @@ -511,6 +513,7 @@ def __init__(self, database): self.database = database self.extrafiles = [] + self.headers_to_precompile = [] self.path = None self.namespace = NameManager() @@ -539,6 +542,8 @@ filepath = self.path.join(name) if name.endswith('.c'): self.extrafiles.append(filepath) + if name.endswith('.h'): + self.headers_to_precompile.append(filepath) return filepath.open('w') def getextrafiles(self): @@ -732,12 +737,14 @@ print >> f, "#endif" def gen_preimpl(f, database): + f.write('#ifndef _PY_PREIMPLE_H\n#define _PY_PREIMPL_H\n') if database.translator is None or database.translator.rtyper is None: return preimplementationlines = pre_include_code_lines( database, database.translator.rtyper) for line in preimplementationlines: print >> f, line + f.write('#endif /* _PY_PREIMPL_H */\n') def gen_startupcode(f, database): # generate the start-up code and put it into a function @@ -799,6 +806,7 @@ f = filename.open('w') incfilename = targetdir.join('common_header.h') fi = incfilename.open('w') + fi.write('#ifndef _PY_COMMON_HEADER_H\n#define _PY_COMMON_HEADER_H\n') # # Header @@ -811,6 +819,7 @@ eci.write_c_header(fi) print >> fi, '#include "src/g_prerequisite.h"' + fi.write('#endif /* _PY_COMMON_HEADER_H*/\n') fi.close() @@ -822,6 +831,8 @@ sg.set_strategy(targetdir, split) database.prepare_inline_helpers() sg.gen_readable_parts_of_source(f) + headers_to_precompile = sg.headers_to_precompile[:] + headers_to_precompile.insert(0, incfilename) gen_startupcode(f, database) f.close() @@ -835,4 +846,4 @@ eci = add_extra_files(eci) eci = eci.convert_sources_to_files() files, eci = eci.get_module_files() - return eci, filename, sg.getextrafiles() + list(files) + return eci, filename, sg.getextrafiles() + list(files), headers_to_precompile diff --git a/rpython/translator/platform/__init__.py b/rpython/translator/platform/__init__.py --- a/rpython/translator/platform/__init__.py +++ b/rpython/translator/platform/__init__.py @@ -100,7 +100,7 @@ return ExecutionResult(returncode, stdout, stderr) def gen_makefile(self, cfiles, eci, exe_name=None, path=None, - shared=False, cfile_precompilation=None): + shared=False, headers_to_precompile=[]): raise NotImplementedError("Pure abstract baseclass") def __repr__(self): diff --git a/rpython/translator/platform/posix.py b/rpython/translator/platform/posix.py --- a/rpython/translator/platform/posix.py +++ b/rpython/translator/platform/posix.py @@ -83,7 +83,7 @@ return [entry[2:] for entry in out.split()] def gen_makefile(self, cfiles, eci, exe_name=None, path=None, - shared=False, cfile_precompilation=None): + shared=False, headers_to_precompile=[]): cfiles = self._all_cfiles(cfiles, eci) if path is None: diff --git a/rpython/translator/platform/windows.py b/rpython/translator/platform/windows.py --- a/rpython/translator/platform/windows.py +++ b/rpython/translator/platform/windows.py @@ -249,7 +249,7 @@ def gen_makefile(self, cfiles, eci, exe_name=None, path=None, - shared=False, cfile_precompilation=None): + shared=False, headers_to_precompile=[]): cfiles = self._all_cfiles(cfiles, eci) if path is None: @@ -319,32 +319,35 @@ definitions.append(('_WIN64', '1')) rules = [ + ('all', '$(DEFAULT_TARGET)', []), ('.asm.obj', '', '$(MASM) /nologo /Fo$@ /c $< $(INCLUDEDIRS)'), ] - if cfile_precompilation: + if len(headers_to_precompile)>0: stdafx_h = path.join('stdafx.h') txt = '#ifndef PYPY_STDAFX_H\n' txt += '#define PYPY_STDAFX_H\n' - txt += '\n'.join(['#include "' + m.pathrel(c) + '"' for c in cfile_precompilation]) + txt += '\n'.join(['#include "' + m.pathrel(c) + '"' for c in headers_to_precompile]) txt += '\n#endif\n' stdafx_h.write(txt) stdafx_c = path.join('stdafx.c') stdafx_c.write('#include "stdafx.h"\n') definitions.append(('CREATE_PCH', '/Ycstdafx.h /Fpstdafx.pch /FIstdafx.h')) definitions.append(('USE_PCH', '/Yustdafx.h /Fpstdafx.pch /FIstdafx.h')) - rules.append(('all', 'stdafx.pch $(DEFAULT_TARGET)', [])) - rules.append(('stdafx.pch', '', + rules.append(('$(OBJECTS)', 'stdafx.pch', [])) + rules.append(('stdafx.pch', 'stdafx.h', '$(CC) stdafx.c /c /nologo $(CFLAGS) $(CFLAGSEXTRA) $(CREATE_PCH) $(INCLUDEDIRS)')) rules.append(('.c.obj', '', '$(CC) /nologo $(CFLAGS) $(CFLAGSEXTRA) $(USE_PCH) /Fo$@ /c $< $(INCLUDEDIRS)')) + #Do not use precompiled headers for some files + rules.append((r'{..\module_cache}.c{..\module_cache}.obj', '', + '$(CC) /nologo $(CFLAGS) $(CFLAGSEXTRA) /Fo$@ /c $< $(INCLUDEDIRS)')) + rules.append(('allocator.obj', 'allocator.c', + '$(CC) /nologo $(CFLAGS) $(CFLAGSEXTRA) /Fo$@ /c $< $(INCLUDEDIRS)')) - target_deps = 'stdafx.obj $(OBJECTS)' else: - rules.append(('all', '$(DEFAULT_TARGET)', [])) rules.append(('.c.obj', '', '$(CC) /nologo $(CFLAGS) $(CFLAGSEXTRA) /Fo$@ /c $< $(INCLUDEDIRS)')) - target_deps = '$(OBJECTS)' for args in definitions: @@ -366,12 +369,12 @@ rel_ofiles[-1]) objects = ' @obj_names.rsp' if self.version < 80: - m.rule('$(TARGET)', target_deps, + m.rule('$(TARGET)', '$(OBJECTS)', create_obj_response_file + [\ '$(CC_LINK) /nologo $(LDFLAGS) $(LDFLAGSEXTRA)' + objects + ' /out:$@ $(LIBDIRS) $(LIBS)', ]) else: - m.rule('$(TARGET)', target_deps, + m.rule('$(TARGET)', '$(OBJECTS)', create_obj_response_file + [\ '$(CC_LINK) /nologo $(LDFLAGS) $(LDFLAGSEXTRA)' + objects + ' $(LINKFILES) /out:$@ $(LIBDIRS) $(LIBS) /MANIFEST /MANIFESTFILE:$*.manifest', 'mt.exe -nologo -manifest $*.manifest -outputresource:$@;1', From noreply at buildbot.pypy.org Tue Feb 4 06:02:20 2014 From: noreply at buildbot.pypy.org (mattip) Date: Tue, 4 Feb 2014 06:02:20 +0100 (CET) Subject: [pypy-commit] pypy precompiled-headers: simplify expression Message-ID: <20140204050220.B08001C0470@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: precompiled-headers Changeset: r69073:dba9e64cc34c Date: 2014-02-03 00:01 +0200 http://bitbucket.org/pypy/pypy/changeset/dba9e64cc34c/ Log: simplify expression diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -388,7 +388,7 @@ if self.has_profopt(): profopt = self.config.translation.profopt - mk.definition('ABS_TARGET', '$(shell python -c "import sys,os; print os.path.abspath(sys.argv[1])" $(TARGET))') + mk.definition('ABS_TARGET', str(targetdir.join('$(TARGET)'))) mk.definition('DEFAULT_TARGET', 'profopt') mk.definition('PROFOPT', profopt) From noreply at buildbot.pypy.org Tue Feb 4 06:02:21 2014 From: noreply at buildbot.pypy.org (mattip) Date: Tue, 4 Feb 2014 06:02:21 +0100 (CET) Subject: [pypy-commit] pypy precompiled-headers: fix renamed kwarg Message-ID: <20140204050221.D70281C0470@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: precompiled-headers Changeset: r69074:fe4a03461072 Date: 2014-02-03 00:02 +0200 http://bitbucket.org/pypy/pypy/changeset/fe4a03461072/ Log: fix renamed kwarg diff --git a/rpython/translator/platform/test/test_makefile.py b/rpython/translator/platform/test/test_makefile.py --- a/rpython/translator/platform/test/test_makefile.py +++ b/rpython/translator/platform/test/test_makefile.py @@ -124,7 +124,7 @@ self.platform.execute_makefile(mk, extra_opts=['clean']) # Write a super-duper makefile with precompiled headers mk = self.platform.gen_makefile(cfiles, eci, path=tmpdir, - cfile_precompilation=cfiles_precompiled_headers,) + headers_to_precompile=cfiles_precompiled_headers,) mk.rule(*clean) mk.write() t0 = time.clock() From noreply at buildbot.pypy.org Tue Feb 4 06:02:23 2014 From: noreply at buildbot.pypy.org (mattip) Date: Tue, 4 Feb 2014 06:02:23 +0100 (CET) Subject: [pypy-commit] pypy precompiled-headers: do not use precompiled headers for separate_module_files Message-ID: <20140204050223.1B82B1C0470@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: precompiled-headers Changeset: r69075:a34b7d819333 Date: 2014-02-03 01:54 +0200 http://bitbucket.org/pypy/pypy/changeset/a34b7d819333/ Log: do not use precompiled headers for separate_module_files diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -845,5 +845,4 @@ eci = add_extra_files(eci) eci = eci.convert_sources_to_files() - files, eci = eci.get_module_files() - return eci, filename, sg.getextrafiles() + list(files), headers_to_precompile + return eci, filename, sg.getextrafiles(), headers_to_precompile From noreply at buildbot.pypy.org Tue Feb 4 06:02:24 2014 From: noreply at buildbot.pypy.org (mattip) Date: Tue, 4 Feb 2014 06:02:24 +0100 (CET) Subject: [pypy-commit] pypy precompiled-headers: create rules for files that do not use precompiled headers Message-ID: <20140204050224.3F99C1C0470@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: precompiled-headers Changeset: r69076:227c4011452f Date: 2014-02-03 01:54 +0200 http://bitbucket.org/pypy/pypy/changeset/227c4011452f/ Log: create rules for files that do not use precompiled headers diff --git a/rpython/translator/platform/windows.py b/rpython/translator/platform/windows.py --- a/rpython/translator/platform/windows.py +++ b/rpython/translator/platform/windows.py @@ -336,18 +336,31 @@ definitions.append(('USE_PCH', '/Yustdafx.h /Fpstdafx.pch /FIstdafx.h')) rules.append(('$(OBJECTS)', 'stdafx.pch', [])) rules.append(('stdafx.pch', 'stdafx.h', - '$(CC) stdafx.c /c /nologo $(CFLAGS) $(CFLAGSEXTRA) $(CREATE_PCH) $(INCLUDEDIRS)')) + '$(CC) stdafx.c /c /nologo $(CFLAGS) $(CFLAGSEXTRA) ' + '$(CREATE_PCH) $(INCLUDEDIRS)')) rules.append(('.c.obj', '', - '$(CC) /nologo $(CFLAGS) $(CFLAGSEXTRA) $(USE_PCH) /Fo$@ /c $< $(INCLUDEDIRS)')) + '$(CC) /nologo $(CFLAGS) $(CFLAGSEXTRA) $(USE_PCH) ' + '/Fo$@ /c $< $(INCLUDEDIRS)')) #Do not use precompiled headers for some files - rules.append((r'{..\module_cache}.c{..\module_cache}.obj', '', - '$(CC) /nologo $(CFLAGS) $(CFLAGSEXTRA) /Fo$@ /c $< $(INCLUDEDIRS)')) - rules.append(('allocator.obj', 'allocator.c', - '$(CC) /nologo $(CFLAGS) $(CFLAGSEXTRA) /Fo$@ /c $< $(INCLUDEDIRS)')) + #rules.append((r'{..\module_cache}.c{..\module_cache}.obj', '', + # '$(CC) /nologo $(CFLAGS) $(CFLAGSEXTRA) /Fo$@ /c $< $(INCLUDEDIRS)')) + # nmake cannot handle wildcard target specifications, so we must + # create a rule for compiling each file from eci since they cannot use + # precompiled headers :( + no_precompile = [] + for f in list(eci.separate_module_files): + f = m.pathrel(f) + if f not in no_precompile and f.endswith('.c'): + no_precompile.append(f) + target = f[:-1] + 'obj' + rules.append((target, f, + '$(CC) /nologo $(CFLAGS) $(CFLAGSEXTRA) ' + '/Fo%s /c %s $(INCLUDEDIRS)' %(target, f))) else: rules.append(('.c.obj', '', - '$(CC) /nologo $(CFLAGS) $(CFLAGSEXTRA) /Fo$@ /c $< $(INCLUDEDIRS)')) + '$(CC) /nologo $(CFLAGS) $(CFLAGSEXTRA) ' + '/Fo$@ /c $< $(INCLUDEDIRS)')) for args in definitions: From noreply at buildbot.pypy.org Tue Feb 4 06:02:25 2014 From: noreply at buildbot.pypy.org (mattip) Date: Tue, 4 Feb 2014 06:02:25 +0100 (CET) Subject: [pypy-commit] pypy precompiled-headers: add separate_module_files to cfiles by hand Message-ID: <20140204050225.6522C1C0470@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: precompiled-headers Changeset: r69077:3388da518026 Date: 2014-02-03 23:24 +0200 http://bitbucket.org/pypy/pypy/changeset/3388da518026/ Log: add separate_module_files to cfiles by hand diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -377,7 +377,8 @@ return self.executable_name def gen_makefile(self, targetdir, exe_name=None, headers_to_precompile=[]): - cfiles = [self.c_source_filename] + self.extrafiles + cfiles = [self.c_source_filename] + self.extrafiles + list(self.eci.separate_module_files) + xxx if exe_name is not None: exe_name = targetdir.join(exe_name) mk = self.translator.platform.gen_makefile( diff --git a/rpython/translator/platform/windows.py b/rpython/translator/platform/windows.py --- a/rpython/translator/platform/windows.py +++ b/rpython/translator/platform/windows.py @@ -349,7 +349,7 @@ # precompiled headers :( no_precompile = [] for f in list(eci.separate_module_files): - f = m.pathrel(f) + f = m.pathrel(py.path.local(f)) if f not in no_precompile and f.endswith('.c'): no_precompile.append(f) target = f[:-1] + 'obj' From noreply at buildbot.pypy.org Tue Feb 4 06:02:26 2014 From: noreply at buildbot.pypy.org (mattip) Date: Tue, 4 Feb 2014 06:02:26 +0100 (CET) Subject: [pypy-commit] pypy precompiled-headers: explicitly list files not able to use precompiled headers (module files) Message-ID: <20140204050226.8F4651C0470@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: precompiled-headers Changeset: r69078:291b3f0d5243 Date: 2014-02-03 23:56 +0200 http://bitbucket.org/pypy/pypy/changeset/291b3f0d5243/ Log: explicitly list files not able to use precompiled headers (module files) diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -377,14 +377,16 @@ return self.executable_name def gen_makefile(self, targetdir, exe_name=None, headers_to_precompile=[]): - cfiles = [self.c_source_filename] + self.extrafiles + list(self.eci.separate_module_files) - xxx + module_files = self.eventually_copy(self.eci.separate_module_files) + self.eci.separate_module_files = [] + cfiles = [self.c_source_filename] + self.extrafiles + list(module_files) if exe_name is not None: exe_name = targetdir.join(exe_name) mk = self.translator.platform.gen_makefile( cfiles, self.eci, path=targetdir, exe_name=exe_name, headers_to_precompile=headers_to_precompile, + no_precompile_cfiles = module_files, shared=self.config.translation.shared) if self.has_profopt(): diff --git a/rpython/translator/platform/__init__.py b/rpython/translator/platform/__init__.py --- a/rpython/translator/platform/__init__.py +++ b/rpython/translator/platform/__init__.py @@ -100,7 +100,8 @@ return ExecutionResult(returncode, stdout, stderr) def gen_makefile(self, cfiles, eci, exe_name=None, path=None, - shared=False, headers_to_precompile=[]): + shared=False, headers_to_precompile=[], + no_precompile_cfiles = []): raise NotImplementedError("Pure abstract baseclass") def __repr__(self): diff --git a/rpython/translator/platform/posix.py b/rpython/translator/platform/posix.py --- a/rpython/translator/platform/posix.py +++ b/rpython/translator/platform/posix.py @@ -83,7 +83,8 @@ return [entry[2:] for entry in out.split()] def gen_makefile(self, cfiles, eci, exe_name=None, path=None, - shared=False, headers_to_precompile=[]): + shared=False, headers_to_precompile=[], + no_precompile_cfiles = []): cfiles = self._all_cfiles(cfiles, eci) if path is None: diff --git a/rpython/translator/platform/windows.py b/rpython/translator/platform/windows.py --- a/rpython/translator/platform/windows.py +++ b/rpython/translator/platform/windows.py @@ -249,7 +249,8 @@ def gen_makefile(self, cfiles, eci, exe_name=None, path=None, - shared=False, headers_to_precompile=[]): + shared=False, headers_to_precompile=[], + no_precompile_cfiles = []): cfiles = self._all_cfiles(cfiles, eci) if path is None: @@ -348,7 +349,7 @@ # create a rule for compiling each file from eci since they cannot use # precompiled headers :( no_precompile = [] - for f in list(eci.separate_module_files): + for f in list(no_precompile_cfiles): f = m.pathrel(py.path.local(f)) if f not in no_precompile and f.endswith('.c'): no_precompile.append(f) From noreply at buildbot.pypy.org Tue Feb 4 06:02:30 2014 From: noreply at buildbot.pypy.org (mattip) Date: Tue, 4 Feb 2014 06:02:30 +0100 (CET) Subject: [pypy-commit] pypy precompiled-headers: merge default into branch Message-ID: <20140204050230.0FAD81C0470@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: precompiled-headers Changeset: r69079:dd07756a34a4 Date: 2014-02-04 07:01 +0200 http://bitbucket.org/pypy/pypy/changeset/dd07756a34a4/ Log: merge default into branch diff too long, truncating to 2000 out of 6294 lines diff --git a/dotviewer/graphdisplay.py b/dotviewer/graphdisplay.py --- a/dotviewer/graphdisplay.py +++ b/dotviewer/graphdisplay.py @@ -136,6 +136,7 @@ Click on objects to move around Drag with the left mouse button to zoom in/out Drag with the right mouse button to scroll + Use scroll wheel do scroll up or down """.replace('\n ', '\n').strip() # poor man's dedent @@ -613,6 +614,19 @@ def process_MouseButtonUp(self, event): self.dragging = None pygame.event.set_grab(False) + # handle directional scrolling + if event.button == 4: + self.pan((0, -1)) + return + if event.button == 5: + self.pan((0, 1)) + return + if event.button == 6: + self.pan((-1, 0)) + return + if event.button == 7: + self.pan((1, 0)) + return if self.click_time is not None and abs(time.time() - self.click_time) < 1: # click (no significant dragging) self.notifyclick(self.click_origin) diff --git a/pypy/interpreter/argument.py b/pypy/interpreter/argument.py --- a/pypy/interpreter/argument.py +++ b/pypy/interpreter/argument.py @@ -1,11 +1,11 @@ """ Arguments objects. """ - -from pypy.interpreter.error import OperationError, operationerrfmt from rpython.rlib.debug import make_sure_not_resized from rpython.rlib import jit +from pypy.interpreter.error import OperationError, oefmt + class Arguments(object): """ @@ -86,9 +86,9 @@ args_w = space.fixedview(w_stararg) except OperationError, e: if e.match(space, space.w_TypeError): - raise operationerrfmt( - space.w_TypeError, - "argument after * must be a sequence, not %T", w_stararg) + raise oefmt(space.w_TypeError, + "argument after * must be a sequence, not %T", + w_stararg) raise self.arguments_w = self.arguments_w + args_w @@ -113,10 +113,9 @@ w_keys = space.call_method(w_starstararg, "keys") except OperationError, e: if e.match(space, space.w_AttributeError): - raise operationerrfmt( - space.w_TypeError, - "argument after ** must be a mapping, not %T", - w_starstararg) + raise oefmt(space.w_TypeError, + "argument after ** must be a mapping, not %T", + w_starstararg) raise keys_w = space.unpackiterable(w_keys) keywords_w = [None] * len(keys_w) @@ -281,8 +280,7 @@ self._match_signature(w_firstarg, scope_w, signature, defaults_w, 0) except ArgErr, e: - raise operationerrfmt(self.space.w_TypeError, - "%s() %s", fnname, e.getmsg()) + raise oefmt(self.space.w_TypeError, "%s() %s", fnname, e.getmsg()) return signature.scope_length() def _parse(self, w_firstarg, signature, defaults_w, blindargs=0): @@ -304,8 +302,7 @@ try: return self._parse(w_firstarg, signature, defaults_w, blindargs) except ArgErr, e: - raise operationerrfmt(self.space.w_TypeError, - "%s() %s", fnname, e.getmsg()) + raise oefmt(self.space.w_TypeError, "%s() %s", fnname, e.getmsg()) @staticmethod def frompacked(space, w_args=None, w_kwds=None): @@ -344,10 +341,9 @@ for key in keywords: for otherkey in existingkeywords: if otherkey == key: - raise operationerrfmt(space.w_TypeError, - "got multiple values " - "for keyword argument " - "'%s'", key) + raise oefmt(space.w_TypeError, + "got multiple values for keyword argument '%s'", + key) def _do_combine_starstarargs_wrapped(space, keys_w, w_starstararg, keywords, keywords_w, existingkeywords): @@ -367,10 +363,9 @@ raise else: if existingkeywords and key in existingkeywords: - raise operationerrfmt(space.w_TypeError, - "got multiple values " - "for keyword argument " - "'%s'", key) + raise oefmt(space.w_TypeError, + "got multiple values for keyword argument '%s'", + key) keywords[i] = key keywords_w[i] = space.getitem(w_starstararg, w_key) i += 1 diff --git a/pypy/interpreter/astcompiler/ast.py b/pypy/interpreter/astcompiler/ast.py --- a/pypy/interpreter/astcompiler/ast.py +++ b/pypy/interpreter/astcompiler/ast.py @@ -1,12 +1,18 @@ # Generated by tools/asdl_py.py -from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter import typedef -from pypy.interpreter.gateway import interp2app -from pypy.interpreter.error import OperationError, operationerrfmt from rpython.rlib.unroll import unrolling_iterable from rpython.tool.pairtype import extendabletype from rpython.tool.sourcetools import func_with_new_name +from pypy.interpreter import typedef +from pypy.interpreter.baseobjspace import W_Root +from pypy.interpreter.error import OperationError, oefmt +from pypy.interpreter.gateway import interp2app + + +def raise_attriberr(space, w_obj, name): + raise oefmt(space.w_AttributeError, + "'%T' object has no attribute '%s'", w_obj, name) + def check_string(space, w_obj): if not (space.isinstance_w(w_obj, space.w_str) or @@ -70,11 +76,13 @@ continue # field is optional w_obj = self.getdictvalue(space, missing) if w_obj is None: - err = "required field \"%s\" missing from %s" - raise operationerrfmt(space.w_TypeError, err, missing, host) + raise oefmt(space.w_TypeError, + "required field \"%s\" missing from %s", + missing, host) else: - err = "incorrect type for field \"%s\" in %s" - raise operationerrfmt(space.w_TypeError, err, missing, host) + raise oefmt(space.w_TypeError, + "incorrect type for field \"%s\" in %s", + missing, host) raise AssertionError("should not reach here") @@ -2793,7 +2801,7 @@ def Module_get_body(space, w_self): if not w_self.initialization_state & 1: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -2834,7 +2842,7 @@ def Interactive_get_body(space, w_self): if not w_self.initialization_state & 1: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -2879,7 +2887,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 1: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') return space.wrap(w_self.body) def Expression_set_body(space, w_self, w_new_value): @@ -2922,7 +2930,7 @@ def Suite_get_body(space, w_self): if not w_self.initialization_state & 1: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -2967,7 +2975,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 1: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'lineno') + raise_attriberr(space, w_self, 'lineno') return space.wrap(w_self.lineno) def stmt_set_lineno(space, w_self, w_new_value): @@ -2988,7 +2996,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 2: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'col_offset') + raise_attriberr(space, w_self, 'col_offset') return space.wrap(w_self.col_offset) def stmt_set_col_offset(space, w_self, w_new_value): @@ -3018,7 +3026,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'name') + raise_attriberr(space, w_self, 'name') return space.wrap(w_self.name) def FunctionDef_set_name(space, w_self, w_new_value): @@ -3039,7 +3047,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'args') + raise_attriberr(space, w_self, 'args') return space.wrap(w_self.args) def FunctionDef_set_args(space, w_self, w_new_value): @@ -3056,7 +3064,7 @@ def FunctionDef_get_body(space, w_self): if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -3072,7 +3080,7 @@ def FunctionDef_get_decorator_list(space, w_self): if not w_self.initialization_state & 32: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'decorator_list') + raise_attriberr(space, w_self, 'decorator_list') if w_self.w_decorator_list is None: if w_self.decorator_list is None: list_w = [] @@ -3121,7 +3129,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'name') + raise_attriberr(space, w_self, 'name') return space.wrap(w_self.name) def ClassDef_set_name(space, w_self, w_new_value): @@ -3138,7 +3146,7 @@ def ClassDef_get_bases(space, w_self): if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'bases') + raise_attriberr(space, w_self, 'bases') if w_self.w_bases is None: if w_self.bases is None: list_w = [] @@ -3154,7 +3162,7 @@ def ClassDef_get_body(space, w_self): if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -3170,7 +3178,7 @@ def ClassDef_get_decorator_list(space, w_self): if not w_self.initialization_state & 32: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'decorator_list') + raise_attriberr(space, w_self, 'decorator_list') if w_self.w_decorator_list is None: if w_self.decorator_list is None: list_w = [] @@ -3220,7 +3228,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') + raise_attriberr(space, w_self, 'value') return space.wrap(w_self.value) def Return_set_value(space, w_self, w_new_value): @@ -3263,7 +3271,7 @@ def Delete_get_targets(space, w_self): if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'targets') + raise_attriberr(space, w_self, 'targets') if w_self.w_targets is None: if w_self.targets is None: list_w = [] @@ -3304,7 +3312,7 @@ def Assign_get_targets(space, w_self): if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'targets') + raise_attriberr(space, w_self, 'targets') if w_self.w_targets is None: if w_self.targets is None: list_w = [] @@ -3324,7 +3332,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') + raise_attriberr(space, w_self, 'value') return space.wrap(w_self.value) def Assign_set_value(space, w_self, w_new_value): @@ -3373,7 +3381,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'target') + raise_attriberr(space, w_self, 'target') return space.wrap(w_self.target) def AugAssign_set_target(space, w_self, w_new_value): @@ -3396,7 +3404,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'op') + raise_attriberr(space, w_self, 'op') return operator_to_class[w_self.op - 1]() def AugAssign_set_op(space, w_self, w_new_value): @@ -3419,7 +3427,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') + raise_attriberr(space, w_self, 'value') return space.wrap(w_self.value) def AugAssign_set_value(space, w_self, w_new_value): @@ -3468,7 +3476,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'dest') + raise_attriberr(space, w_self, 'dest') return space.wrap(w_self.dest) def Print_set_dest(space, w_self, w_new_value): @@ -3487,7 +3495,7 @@ def Print_get_values(space, w_self): if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'values') + raise_attriberr(space, w_self, 'values') if w_self.w_values is None: if w_self.values is None: list_w = [] @@ -3507,7 +3515,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'nl') + raise_attriberr(space, w_self, 'nl') return space.wrap(w_self.nl) def Print_set_nl(space, w_self, w_new_value): @@ -3555,7 +3563,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'target') + raise_attriberr(space, w_self, 'target') return space.wrap(w_self.target) def For_set_target(space, w_self, w_new_value): @@ -3578,7 +3586,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'iter') + raise_attriberr(space, w_self, 'iter') return space.wrap(w_self.iter) def For_set_iter(space, w_self, w_new_value): @@ -3597,7 +3605,7 @@ def For_get_body(space, w_self): if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -3613,7 +3621,7 @@ def For_get_orelse(space, w_self): if not w_self.initialization_state & 32: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'orelse') + raise_attriberr(space, w_self, 'orelse') if w_self.w_orelse is None: if w_self.orelse is None: list_w = [] @@ -3662,7 +3670,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'test') + raise_attriberr(space, w_self, 'test') return space.wrap(w_self.test) def While_set_test(space, w_self, w_new_value): @@ -3681,7 +3689,7 @@ def While_get_body(space, w_self): if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -3697,7 +3705,7 @@ def While_get_orelse(space, w_self): if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'orelse') + raise_attriberr(space, w_self, 'orelse') if w_self.w_orelse is None: if w_self.orelse is None: list_w = [] @@ -3745,7 +3753,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'test') + raise_attriberr(space, w_self, 'test') return space.wrap(w_self.test) def If_set_test(space, w_self, w_new_value): @@ -3764,7 +3772,7 @@ def If_get_body(space, w_self): if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -3780,7 +3788,7 @@ def If_get_orelse(space, w_self): if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'orelse') + raise_attriberr(space, w_self, 'orelse') if w_self.w_orelse is None: if w_self.orelse is None: list_w = [] @@ -3828,7 +3836,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'context_expr') + raise_attriberr(space, w_self, 'context_expr') return space.wrap(w_self.context_expr) def With_set_context_expr(space, w_self, w_new_value): @@ -3851,7 +3859,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'optional_vars') + raise_attriberr(space, w_self, 'optional_vars') return space.wrap(w_self.optional_vars) def With_set_optional_vars(space, w_self, w_new_value): @@ -3870,7 +3878,7 @@ def With_get_body(space, w_self): if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -3917,7 +3925,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'type') + raise_attriberr(space, w_self, 'type') return space.wrap(w_self.type) def Raise_set_type(space, w_self, w_new_value): @@ -3940,7 +3948,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'inst') + raise_attriberr(space, w_self, 'inst') return space.wrap(w_self.inst) def Raise_set_inst(space, w_self, w_new_value): @@ -3963,7 +3971,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'tback') + raise_attriberr(space, w_self, 'tback') return space.wrap(w_self.tback) def Raise_set_tback(space, w_self, w_new_value): @@ -4008,7 +4016,7 @@ def TryExcept_get_body(space, w_self): if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -4024,7 +4032,7 @@ def TryExcept_get_handlers(space, w_self): if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'handlers') + raise_attriberr(space, w_self, 'handlers') if w_self.w_handlers is None: if w_self.handlers is None: list_w = [] @@ -4040,7 +4048,7 @@ def TryExcept_get_orelse(space, w_self): if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'orelse') + raise_attriberr(space, w_self, 'orelse') if w_self.w_orelse is None: if w_self.orelse is None: list_w = [] @@ -4085,7 +4093,7 @@ def TryFinally_get_body(space, w_self): if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -4101,7 +4109,7 @@ def TryFinally_get_finalbody(space, w_self): if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'finalbody') + raise_attriberr(space, w_self, 'finalbody') if w_self.w_finalbody is None: if w_self.finalbody is None: list_w = [] @@ -4148,7 +4156,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'test') + raise_attriberr(space, w_self, 'test') return space.wrap(w_self.test) def Assert_set_test(space, w_self, w_new_value): @@ -4171,7 +4179,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'msg') + raise_attriberr(space, w_self, 'msg') return space.wrap(w_self.msg) def Assert_set_msg(space, w_self, w_new_value): @@ -4215,7 +4223,7 @@ def Import_get_names(space, w_self): if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'names') + raise_attriberr(space, w_self, 'names') if w_self.w_names is None: if w_self.names is None: list_w = [] @@ -4260,7 +4268,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'module') + raise_attriberr(space, w_self, 'module') return space.wrap(w_self.module) def ImportFrom_set_module(space, w_self, w_new_value): @@ -4280,7 +4288,7 @@ def ImportFrom_get_names(space, w_self): if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'names') + raise_attriberr(space, w_self, 'names') if w_self.w_names is None: if w_self.names is None: list_w = [] @@ -4300,7 +4308,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'level') + raise_attriberr(space, w_self, 'level') return space.wrap(w_self.level) def ImportFrom_set_level(space, w_self, w_new_value): @@ -4348,7 +4356,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') return space.wrap(w_self.body) def Exec_set_body(space, w_self, w_new_value): @@ -4371,7 +4379,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'globals') + raise_attriberr(space, w_self, 'globals') return space.wrap(w_self.globals) def Exec_set_globals(space, w_self, w_new_value): @@ -4394,7 +4402,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'locals') + raise_attriberr(space, w_self, 'locals') return space.wrap(w_self.locals) def Exec_set_locals(space, w_self, w_new_value): @@ -4439,7 +4447,7 @@ def Global_get_names(space, w_self): if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'names') + raise_attriberr(space, w_self, 'names') if w_self.w_names is None: if w_self.names is None: list_w = [] @@ -4484,7 +4492,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') + raise_attriberr(space, w_self, 'value') return space.wrap(w_self.value) def Expr_set_value(space, w_self, w_new_value): @@ -4582,7 +4590,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 1: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'lineno') + raise_attriberr(space, w_self, 'lineno') return space.wrap(w_self.lineno) def expr_set_lineno(space, w_self, w_new_value): @@ -4603,7 +4611,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 2: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'col_offset') + raise_attriberr(space, w_self, 'col_offset') return space.wrap(w_self.col_offset) def expr_set_col_offset(space, w_self, w_new_value): @@ -4633,7 +4641,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'op') + raise_attriberr(space, w_self, 'op') return boolop_to_class[w_self.op - 1]() def BoolOp_set_op(space, w_self, w_new_value): @@ -4652,7 +4660,7 @@ def BoolOp_get_values(space, w_self): if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'values') + raise_attriberr(space, w_self, 'values') if w_self.w_values is None: if w_self.values is None: list_w = [] @@ -4698,7 +4706,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'left') + raise_attriberr(space, w_self, 'left') return space.wrap(w_self.left) def BinOp_set_left(space, w_self, w_new_value): @@ -4721,7 +4729,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'op') + raise_attriberr(space, w_self, 'op') return operator_to_class[w_self.op - 1]() def BinOp_set_op(space, w_self, w_new_value): @@ -4744,7 +4752,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'right') + raise_attriberr(space, w_self, 'right') return space.wrap(w_self.right) def BinOp_set_right(space, w_self, w_new_value): @@ -4793,7 +4801,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'op') + raise_attriberr(space, w_self, 'op') return unaryop_to_class[w_self.op - 1]() def UnaryOp_set_op(space, w_self, w_new_value): @@ -4816,7 +4824,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'operand') + raise_attriberr(space, w_self, 'operand') return space.wrap(w_self.operand) def UnaryOp_set_operand(space, w_self, w_new_value): @@ -4864,7 +4872,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'args') + raise_attriberr(space, w_self, 'args') return space.wrap(w_self.args) def Lambda_set_args(space, w_self, w_new_value): @@ -4885,7 +4893,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') return space.wrap(w_self.body) def Lambda_set_body(space, w_self, w_new_value): @@ -4933,7 +4941,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'test') + raise_attriberr(space, w_self, 'test') return space.wrap(w_self.test) def IfExp_set_test(space, w_self, w_new_value): @@ -4956,7 +4964,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') return space.wrap(w_self.body) def IfExp_set_body(space, w_self, w_new_value): @@ -4979,7 +4987,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'orelse') + raise_attriberr(space, w_self, 'orelse') return space.wrap(w_self.orelse) def IfExp_set_orelse(space, w_self, w_new_value): @@ -5024,7 +5032,7 @@ def Dict_get_keys(space, w_self): if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'keys') + raise_attriberr(space, w_self, 'keys') if w_self.w_keys is None: if w_self.keys is None: list_w = [] @@ -5040,7 +5048,7 @@ def Dict_get_values(space, w_self): if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'values') + raise_attriberr(space, w_self, 'values') if w_self.w_values is None: if w_self.values is None: list_w = [] @@ -5083,7 +5091,7 @@ def Set_get_elts(space, w_self): if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'elts') + raise_attriberr(space, w_self, 'elts') if w_self.w_elts is None: if w_self.elts is None: list_w = [] @@ -5128,7 +5136,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'elt') + raise_attriberr(space, w_self, 'elt') return space.wrap(w_self.elt) def ListComp_set_elt(space, w_self, w_new_value): @@ -5147,7 +5155,7 @@ def ListComp_get_generators(space, w_self): if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'generators') + raise_attriberr(space, w_self, 'generators') if w_self.w_generators is None: if w_self.generators is None: list_w = [] @@ -5193,7 +5201,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'elt') + raise_attriberr(space, w_self, 'elt') return space.wrap(w_self.elt) def SetComp_set_elt(space, w_self, w_new_value): @@ -5212,7 +5220,7 @@ def SetComp_get_generators(space, w_self): if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'generators') + raise_attriberr(space, w_self, 'generators') if w_self.w_generators is None: if w_self.generators is None: list_w = [] @@ -5258,7 +5266,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'key') + raise_attriberr(space, w_self, 'key') return space.wrap(w_self.key) def DictComp_set_key(space, w_self, w_new_value): @@ -5281,7 +5289,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') + raise_attriberr(space, w_self, 'value') return space.wrap(w_self.value) def DictComp_set_value(space, w_self, w_new_value): @@ -5300,7 +5308,7 @@ def DictComp_get_generators(space, w_self): if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'generators') + raise_attriberr(space, w_self, 'generators') if w_self.w_generators is None: if w_self.generators is None: list_w = [] @@ -5347,7 +5355,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'elt') + raise_attriberr(space, w_self, 'elt') return space.wrap(w_self.elt) def GeneratorExp_set_elt(space, w_self, w_new_value): @@ -5366,7 +5374,7 @@ def GeneratorExp_get_generators(space, w_self): if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'generators') + raise_attriberr(space, w_self, 'generators') if w_self.w_generators is None: if w_self.generators is None: list_w = [] @@ -5412,7 +5420,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') + raise_attriberr(space, w_self, 'value') return space.wrap(w_self.value) def Yield_set_value(space, w_self, w_new_value): @@ -5459,7 +5467,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'left') + raise_attriberr(space, w_self, 'left') return space.wrap(w_self.left) def Compare_set_left(space, w_self, w_new_value): @@ -5478,7 +5486,7 @@ def Compare_get_ops(space, w_self): if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'ops') + raise_attriberr(space, w_self, 'ops') if w_self.w_ops is None: if w_self.ops is None: list_w = [] @@ -5494,7 +5502,7 @@ def Compare_get_comparators(space, w_self): if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'comparators') + raise_attriberr(space, w_self, 'comparators') if w_self.w_comparators is None: if w_self.comparators is None: list_w = [] @@ -5542,7 +5550,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'func') + raise_attriberr(space, w_self, 'func') return space.wrap(w_self.func) def Call_set_func(space, w_self, w_new_value): @@ -5561,7 +5569,7 @@ def Call_get_args(space, w_self): if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'args') + raise_attriberr(space, w_self, 'args') if w_self.w_args is None: if w_self.args is None: list_w = [] @@ -5577,7 +5585,7 @@ def Call_get_keywords(space, w_self): if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'keywords') + raise_attriberr(space, w_self, 'keywords') if w_self.w_keywords is None: if w_self.keywords is None: list_w = [] @@ -5597,7 +5605,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 32: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'starargs') + raise_attriberr(space, w_self, 'starargs') return space.wrap(w_self.starargs) def Call_set_starargs(space, w_self, w_new_value): @@ -5620,7 +5628,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 64: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'kwargs') + raise_attriberr(space, w_self, 'kwargs') return space.wrap(w_self.kwargs) def Call_set_kwargs(space, w_self, w_new_value): @@ -5673,7 +5681,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') + raise_attriberr(space, w_self, 'value') return space.wrap(w_self.value) def Repr_set_value(space, w_self, w_new_value): @@ -5720,7 +5728,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'n') + raise_attriberr(space, w_self, 'n') return w_self.n def Num_set_n(space, w_self, w_new_value): @@ -5765,7 +5773,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 's') + raise_attriberr(space, w_self, 's') return w_self.s def Str_set_s(space, w_self, w_new_value): @@ -5810,7 +5818,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') + raise_attriberr(space, w_self, 'value') return space.wrap(w_self.value) def Attribute_set_value(space, w_self, w_new_value): @@ -5833,7 +5841,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'attr') + raise_attriberr(space, w_self, 'attr') return space.wrap(w_self.attr) def Attribute_set_attr(space, w_self, w_new_value): @@ -5854,7 +5862,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'ctx') + raise_attriberr(space, w_self, 'ctx') return expr_context_to_class[w_self.ctx - 1]() def Attribute_set_ctx(space, w_self, w_new_value): @@ -5903,7 +5911,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') + raise_attriberr(space, w_self, 'value') return space.wrap(w_self.value) def Subscript_set_value(space, w_self, w_new_value): @@ -5926,7 +5934,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'slice') + raise_attriberr(space, w_self, 'slice') return space.wrap(w_self.slice) def Subscript_set_slice(space, w_self, w_new_value): @@ -5949,7 +5957,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'ctx') + raise_attriberr(space, w_self, 'ctx') return expr_context_to_class[w_self.ctx - 1]() def Subscript_set_ctx(space, w_self, w_new_value): @@ -5998,7 +6006,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'id') + raise_attriberr(space, w_self, 'id') return space.wrap(w_self.id) def Name_set_id(space, w_self, w_new_value): @@ -6019,7 +6027,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'ctx') + raise_attriberr(space, w_self, 'ctx') return expr_context_to_class[w_self.ctx - 1]() def Name_set_ctx(space, w_self, w_new_value): @@ -6063,7 +6071,7 @@ def List_get_elts(space, w_self): if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'elts') + raise_attriberr(space, w_self, 'elts') if w_self.w_elts is None: if w_self.elts is None: list_w = [] @@ -6083,7 +6091,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'ctx') + raise_attriberr(space, w_self, 'ctx') return expr_context_to_class[w_self.ctx - 1]() def List_set_ctx(space, w_self, w_new_value): @@ -6128,7 +6136,7 @@ def Tuple_get_elts(space, w_self): if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'elts') + raise_attriberr(space, w_self, 'elts') if w_self.w_elts is None: if w_self.elts is None: list_w = [] @@ -6148,7 +6156,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'ctx') + raise_attriberr(space, w_self, 'ctx') return expr_context_to_class[w_self.ctx - 1]() def Tuple_set_ctx(space, w_self, w_new_value): @@ -6197,7 +6205,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') + raise_attriberr(space, w_self, 'value') return w_self.value def Const_set_value(space, w_self, w_new_value): @@ -6315,7 +6323,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 1: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'lower') + raise_attriberr(space, w_self, 'lower') return space.wrap(w_self.lower) def Slice_set_lower(space, w_self, w_new_value): @@ -6338,7 +6346,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 2: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'upper') + raise_attriberr(space, w_self, 'upper') return space.wrap(w_self.upper) def Slice_set_upper(space, w_self, w_new_value): @@ -6361,7 +6369,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'step') + raise_attriberr(space, w_self, 'step') return space.wrap(w_self.step) def Slice_set_step(space, w_self, w_new_value): @@ -6406,7 +6414,7 @@ def ExtSlice_get_dims(space, w_self): if not w_self.initialization_state & 1: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'dims') + raise_attriberr(space, w_self, 'dims') if w_self.w_dims is None: if w_self.dims is None: list_w = [] @@ -6451,7 +6459,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 1: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') + raise_attriberr(space, w_self, 'value') return space.wrap(w_self.value) def Index_set_value(space, w_self, w_new_value): @@ -6722,7 +6730,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 1: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'target') + raise_attriberr(space, w_self, 'target') return space.wrap(w_self.target) def comprehension_set_target(space, w_self, w_new_value): @@ -6745,7 +6753,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 2: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'iter') + raise_attriberr(space, w_self, 'iter') return space.wrap(w_self.iter) def comprehension_set_iter(space, w_self, w_new_value): @@ -6764,7 +6772,7 @@ def comprehension_get_ifs(space, w_self): if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'ifs') + raise_attriberr(space, w_self, 'ifs') if w_self.w_ifs is None: if w_self.ifs is None: list_w = [] @@ -6811,7 +6819,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 1: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'lineno') + raise_attriberr(space, w_self, 'lineno') return space.wrap(w_self.lineno) def excepthandler_set_lineno(space, w_self, w_new_value): @@ -6832,7 +6840,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 2: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'col_offset') + raise_attriberr(space, w_self, 'col_offset') return space.wrap(w_self.col_offset) def excepthandler_set_col_offset(space, w_self, w_new_value): @@ -6862,7 +6870,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'type') + raise_attriberr(space, w_self, 'type') return space.wrap(w_self.type) def ExceptHandler_set_type(space, w_self, w_new_value): @@ -6885,7 +6893,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'name') + raise_attriberr(space, w_self, 'name') return space.wrap(w_self.name) def ExceptHandler_set_name(space, w_self, w_new_value): @@ -6904,7 +6912,7 @@ def ExceptHandler_get_body(space, w_self): if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -6947,7 +6955,7 @@ def arguments_get_args(space, w_self): if not w_self.initialization_state & 1: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'args') + raise_attriberr(space, w_self, 'args') if w_self.w_args is None: if w_self.args is None: list_w = [] @@ -6967,7 +6975,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 2: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'vararg') + raise_attriberr(space, w_self, 'vararg') return space.wrap(w_self.vararg) def arguments_set_vararg(space, w_self, w_new_value): @@ -6991,7 +6999,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'kwarg') + raise_attriberr(space, w_self, 'kwarg') return space.wrap(w_self.kwarg) def arguments_set_kwarg(space, w_self, w_new_value): @@ -7011,7 +7019,7 @@ def arguments_get_defaults(space, w_self): if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'defaults') + raise_attriberr(space, w_self, 'defaults') if w_self.w_defaults is None: if w_self.defaults is None: list_w = [] @@ -7060,7 +7068,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 1: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'arg') + raise_attriberr(space, w_self, 'arg') return space.wrap(w_self.arg) def keyword_set_arg(space, w_self, w_new_value): @@ -7081,7 +7089,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 2: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') + raise_attriberr(space, w_self, 'value') return space.wrap(w_self.value) def keyword_set_value(space, w_self, w_new_value): @@ -7129,7 +7137,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 1: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'name') + raise_attriberr(space, w_self, 'name') return space.wrap(w_self.name) def alias_set_name(space, w_self, w_new_value): @@ -7150,7 +7158,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 2: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'asname') + raise_attriberr(space, w_self, 'asname') return space.wrap(w_self.asname) def alias_set_asname(space, w_self, w_new_value): diff --git a/pypy/interpreter/astcompiler/tools/asdl_py.py b/pypy/interpreter/astcompiler/tools/asdl_py.py --- a/pypy/interpreter/astcompiler/tools/asdl_py.py +++ b/pypy/interpreter/astcompiler/tools/asdl_py.py @@ -409,8 +409,7 @@ self.emit(" if w_obj is not None:", 1) self.emit(" return w_obj", 1) self.emit("if not w_self.initialization_state & %s:" % (flag,), 1) - self.emit("raise operationerrfmt(space.w_AttributeError, \"'%%T' object has no attribute '%%s'\", w_self, '%s')" % - (field.name,), 2) + self.emit("raise_attriberr(space, w_self, '%s')" % (field.name,), 2) if field.seq: self.emit("if w_self.w_%s is None:" % (field.name,), 1) self.emit("if w_self.%s is None:" % (field.name,), 2) @@ -537,14 +536,20 @@ HEAD = """# Generated by tools/asdl_py.py -from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter import typedef -from pypy.interpreter.gateway import interp2app -from pypy.interpreter.error import OperationError, operationerrfmt from rpython.rlib.unroll import unrolling_iterable from rpython.tool.pairtype import extendabletype from rpython.tool.sourcetools import func_with_new_name +from pypy.interpreter import typedef +from pypy.interpreter.baseobjspace import W_Root +from pypy.interpreter.error import OperationError, oefmt +from pypy.interpreter.gateway import interp2app + + +def raise_attriberr(space, w_obj, name): + raise oefmt(space.w_AttributeError, + \"'%T' object has no attribute '%s'\", w_obj, name) + def check_string(space, w_obj): if not (space.isinstance_w(w_obj, space.w_str) or @@ -608,11 +613,13 @@ continue # field is optional w_obj = self.getdictvalue(space, missing) if w_obj is None: - err = "required field \\"%s\\" missing from %s" - raise operationerrfmt(space.w_TypeError, err, missing, host) + raise oefmt(space.w_TypeError, + "required field \\"%s\\" missing from %s", + missing, host) else: - err = "incorrect type for field \\"%s\\" in %s" - raise operationerrfmt(space.w_TypeError, err, missing, host) + raise oefmt(space.w_TypeError, + "incorrect type for field \\"%s\\" in %s", + missing, host) raise AssertionError("should not reach here") diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -11,8 +11,7 @@ from pypy.interpreter.executioncontext import (ExecutionContext, ActionFlag, UserDelAction) -from pypy.interpreter.error import (OperationError, operationerrfmt, - new_exception_class) +from pypy.interpreter.error import OperationError, new_exception_class, oefmt from pypy.interpreter.argument import Arguments from pypy.interpreter.miscutils import ThreadLocals @@ -61,9 +60,9 @@ return False def setdict(self, space, w_dict): - raise operationerrfmt(space.w_TypeError, - "attribute '__dict__' of %T objects " - "is not writable", self) + raise oefmt(space.w_TypeError, + "attribute '__dict__' of %T objects is not writable", + self) # to be used directly only by space.type implementations def getclass(self, space): @@ -123,8 +122,8 @@ classname = '?' else: classname = wrappable_class_name(RequiredClass) - msg = "'%s' object expected, got '%T' instead" - raise operationerrfmt(space.w_TypeError, msg, classname, self) + raise oefmt(space.w_TypeError, + "'%s' object expected, got '%T' instead", classname, self) # used by _weakref implemenation @@ -132,8 +131,8 @@ return None def setweakref(self, space, weakreflifeline): - raise operationerrfmt(space.w_TypeError, - "cannot create weak reference to '%T' object", self) + raise oefmt(space.w_TypeError, + "cannot create weak reference to '%T' object", self) def delweakref(self): pass @@ -215,25 +214,25 @@ self._typed_unwrap_error(space, "integer") def _typed_unwrap_error(self, space, expected): - raise operationerrfmt(space.w_TypeError, "expected %s, got %T object", - expected, self) + raise oefmt(space.w_TypeError, + "expected %s, got %T object", expected, self) def int(self, space): w_impl = space.lookup(self, '__int__') if w_impl is None: - raise operationerrfmt(space.w_TypeError, - "unsupported operand type for int(): '%T'", self) + raise oefmt(space.w_TypeError, + "unsupported operand type for int(): '%T'", self) w_result = space.get_and_call_function(w_impl, self) if (space.isinstance_w(w_result, space.w_int) or space.isinstance_w(w_result, space.w_long)): return w_result - msg = "__int__ returned non-int (type '%T')" - raise operationerrfmt(space.w_TypeError, msg, w_result) + raise oefmt(space.w_TypeError, + "__int__ returned non-int (type '%T')", w_result) def ord(self, space): - msg = "ord() expected string of length 1, but %T found" - raise operationerrfmt(space.w_TypeError, msg, self) + raise oefmt(space.w_TypeError, + "ord() expected string of length 1, but %T found", self) def __spacebind__(self, space): return self @@ -430,10 +429,9 @@ try: w_mod = self.builtin_modules[name] except KeyError: - raise operationerrfmt( - self.w_SystemError, - "getbuiltinmodule() called " - "with non-builtin module %s", name) + raise oefmt(self.w_SystemError, + "getbuiltinmodule() called with non-builtin module %s", + name) else: # Add the module to sys.modules self.setitem(w_modules, w_name, w_mod) @@ -753,9 +751,10 @@ if can_be_None and self.is_none(w_obj): return None if not isinstance(w_obj, RequiredClass): # or obj is None - msg = "'%s' object expected, got '%N' instead" - raise operationerrfmt(self.w_TypeError, msg, - wrappable_class_name(RequiredClass), w_obj.getclass(self)) + raise oefmt(self.w_TypeError, + "'%s' object expected, got '%N' instead", + wrappable_class_name(RequiredClass), + w_obj.getclass(self)) return w_obj interp_w._annspecialcase_ = 'specialize:arg(1)' @@ -832,13 +831,9 @@ items[idx] = w_item idx += 1 if idx < expected_length: - if idx == 1: - plural = "" - else: - plural = "s" - raise operationerrfmt(self.w_ValueError, - "need more than %d value%s to unpack", - idx, plural) + raise oefmt(self.w_ValueError, + "need more than %d value%s to unpack", + idx, "" if idx == 1 else "s") return items def unpackiterable_unroll(self, w_iterable, expected_length): @@ -1257,8 +1252,8 @@ except OperationError, err: if objdescr is None or not err.match(self, self.w_TypeError): raise - msg = "%s must be an integer, not %T" - raise operationerrfmt(self.w_TypeError, msg, objdescr, w_obj) + raise oefmt(self.w_TypeError, "%s must be an integer, not %T", + objdescr, w_obj) try: index = self.int_w(w_index) except OperationError, err: @@ -1271,9 +1266,9 @@ else: return sys.maxint else: - raise operationerrfmt( - w_exception, "cannot fit '%T' into an index-sized integer", - w_obj) + raise oefmt(w_exception, + "cannot fit '%T' into an index-sized integer", + w_obj) else: return index @@ -1517,9 +1512,9 @@ ) fd = self.int_w(w_fd) if fd < 0: - raise operationerrfmt(self.w_ValueError, - "file descriptor cannot be a negative integer (%d)", fd - ) + raise oefmt(self.w_ValueError, + "file descriptor cannot be a negative integer (%d)", + fd) return fd def warn(self, w_msg, w_warningcls, stacklevel=2): diff --git a/pypy/interpreter/error.py b/pypy/interpreter/error.py --- a/pypy/interpreter/error.py +++ b/pypy/interpreter/error.py @@ -226,9 +226,9 @@ def _exception_getclass(self, space, w_inst): w_type = space.exception_getclass(w_inst) if not space.exception_is_valid_class_w(w_type): - msg = ("exceptions must be old-style classes or derived " - "from BaseException, not %N") - raise operationerrfmt(space.w_TypeError, msg, w_type) + raise oefmt(space.w_TypeError, + "exceptions must be old-style classes or derived from " + "BaseException, not %N", w_type) return w_type def write_unraisable(self, space, where, w_object=None, @@ -383,15 +383,16 @@ self._w_value = w_value = space.wrap(self._value) return w_value -def get_operationerr_class(valuefmt): + at specialize.memo() +def get_operr_class(valuefmt): try: result = _fmtcache[valuefmt] except KeyError: result = _fmtcache[valuefmt] = get_operrcls2(valuefmt) return result -get_operationerr_class._annspecialcase_ = 'specialize:memo' -def operationerrfmt(w_type, valuefmt, *args): + at specialize.arg(1) +def oefmt(w_type, valuefmt, *args): """Equivalent to OperationError(w_type, space.wrap(valuefmt % args)). More efficient in the (common) case where the value is not actually needed. @@ -405,9 +406,8 @@ """ if not len(args): return OpErrFmtNoArgs(w_type, valuefmt) - OpErrFmt, strings = get_operationerr_class(valuefmt) + OpErrFmt, strings = get_operr_class(valuefmt) return OpErrFmt(w_type, strings, *args) -operationerrfmt._annspecialcase_ = 'specialize:arg(1)' # ____________________________________________________________ diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py --- a/pypy/interpreter/function.py +++ b/pypy/interpreter/function.py @@ -7,8 +7,8 @@ """ from rpython.rlib.unroll import unrolling_iterable -from pypy.interpreter.error import OperationError, operationerrfmt from pypy.interpreter.baseobjspace import W_Root +from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.eval import Code from pypy.interpreter.argument import Arguments from rpython.rlib import jit @@ -413,9 +413,9 @@ if self.closure: closure_len = len(self.closure) if isinstance(code, PyCode) and closure_len != len(code.co_freevars): - raise operationerrfmt(space.w_ValueError, - "%N() requires a code object with %d free vars, not %d", - self, closure_len, len(code.co_freevars)) + raise oefmt(space.w_ValueError, + "%N() requires a code object with %d free vars, not " + "%d", self, closure_len, len(code.co_freevars)) self.fget_func_doc(space) # see test_issue1293 self.code = code @@ -495,10 +495,9 @@ instdescr = instname + " instance" else: instdescr = "instance" - msg = ("unbound method %N() must be called with %s " - "as first argument (got %s instead)") - raise operationerrfmt(space.w_TypeError, msg, - self, clsdescr, instdescr) + raise oefmt(space.w_TypeError, + "unbound method %N() must be called with %s as first " + "argument (got %s instead)", self, clsdescr, instdescr) return space.call_args(self.w_function, args) def descr_method_get(self, w_obj, w_cls=None): diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -12,7 +12,7 @@ from pypy.interpreter.argument import Arguments from pypy.interpreter.astcompiler import consts from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.error import OperationError, operationerrfmt +from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.executioncontext import ExecutionContext from pypy.interpreter.nestedscope import Cell from pypy.tool import stdlib_opcode @@ -622,8 +622,8 @@ line = self.pycode.co_firstlineno if new_lineno < line: - raise operationerrfmt(space.w_ValueError, - "line %d comes before the current code.", new_lineno) + raise oefmt(space.w_ValueError, + "line %d comes before the current code.", new_lineno) elif new_lineno == line: new_lasti = 0 else: @@ -639,8 +639,8 @@ break if new_lasti == -1: - raise operationerrfmt(space.w_ValueError, - "line %d comes after the current code.", new_lineno) + raise oefmt(space.w_ValueError, + "line %d comes after the current code.", new_lineno) # Don't jump to a line with an except in it. code = self.pycode.co_code @@ -687,9 +687,9 @@ assert len(blockstack) == 0 if new_lasti_setup_addr != f_lasti_setup_addr: - raise operationerrfmt(space.w_ValueError, - "can't jump into or out of a 'finally' block %d -> %d", - f_lasti_setup_addr, new_lasti_setup_addr) + raise oefmt(space.w_ValueError, + "can't jump into or out of a 'finally' block %d -> %d", + f_lasti_setup_addr, new_lasti_setup_addr) if new_lasti < self.last_instr: min_addr = new_lasti diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py --- a/pypy/interpreter/pyopcode.py +++ b/pypy/interpreter/pyopcode.py @@ -14,7 +14,7 @@ gateway, function, eval, pyframe, pytraceback, pycode ) from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.error import OperationError, operationerrfmt +from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.nestedscope import Cell from pypy.interpreter.pycode import PyCode, BytecodeCorruption from pypy.tool.stdlib_opcode import bytecode_spec @@ -492,8 +492,9 @@ def _load_fast_failed(self, varindex): varname = self.getlocalvarname(varindex) - message = "local variable '%s' referenced before assignment" - raise operationerrfmt(self.space.w_UnboundLocalError, message, varname) + raise oefmt(self.space.w_UnboundLocalError, + "local variable '%s' referenced before assignment", + varname) _load_fast_failed._dont_inline_ = True def LOAD_CONST(self, constindex, next_instr): @@ -848,9 +849,8 @@ # catch KeyErrors and turn them into NameErrors if not e.match(self.space, self.space.w_KeyError): raise - message = "name '%s' is not defined" - raise operationerrfmt(self.space.w_NameError, message, - self.space.str_w(w_varname)) + raise oefmt(self.space.w_NameError, "name '%s' is not defined", + self.space.str_w(w_varname)) def UNPACK_SEQUENCE(self, itemcount, next_instr): w_iterable = self.popvalue() @@ -899,8 +899,8 @@ _load_global._always_inline_ = True def _load_global_failed(self, varname): - message = "global name '%s' is not defined" - raise operationerrfmt(self.space.w_NameError, message, varname) + raise oefmt(self.space.w_NameError, + "global name '%s' is not defined", varname) _load_global_failed._dont_inline_ = True def LOAD_GLOBAL(self, nameindex, next_instr): @@ -910,9 +910,9 @@ def DELETE_FAST(self, varindex, next_instr): if self.locals_stack_w[varindex] is None: varname = self.getlocalvarname(varindex) - message = "local variable '%s' referenced before assignment" - raise operationerrfmt(self.space.w_UnboundLocalError, message, - varname) + raise oefmt(self.space.w_UnboundLocalError, + "local variable '%s' referenced before assignment", + varname) self.locals_stack_w[varindex] = None def BUILD_TUPLE(self, itemcount, next_instr): @@ -1040,9 +1040,8 @@ except OperationError, e: if not e.match(self.space, self.space.w_AttributeError): raise - raise operationerrfmt(self.space.w_ImportError, - "cannot import name '%s'", - self.space.str_w(w_name)) + raise oefmt(self.space.w_ImportError, + "cannot import name '%s'", self.space.str_w(w_name)) self.pushvalue(w_obj) def YIELD_VALUE(self, oparg, next_instr): @@ -1127,9 +1126,9 @@ w_enter = self.space.lookup(w_manager, "__enter__") w_descr = self.space.lookup(w_manager, "__exit__") if w_enter is None or w_descr is None: - raise operationerrfmt(self.space.w_AttributeError, - "'%T' object is not a context manager" - " (no __enter__/__exit__ method)", w_manager) + raise oefmt(self.space.w_AttributeError, + "'%T' object is not a context manager (no __enter__/" + "__exit__ method)", w_manager) w_exit = self.space.get(w_descr, w_manager) self.settopvalue(w_exit) w_result = self.space.get_and_call_function(w_enter, w_manager) diff --git a/pypy/interpreter/test/test_error.py b/pypy/interpreter/test/test_error.py --- a/pypy/interpreter/test/test_error.py +++ b/pypy/interpreter/test/test_error.py @@ -1,7 +1,7 @@ import py, os, errno -from pypy.interpreter.error import OperationError, operationerrfmt -from pypy.interpreter.error import decompose_valuefmt, get_operrcls2 -from pypy.interpreter.error import wrap_oserror, new_exception_class +from pypy.interpreter.error import ( + OperationError, decompose_valuefmt, get_operrcls2, new_exception_class, + oefmt, wrap_oserror) def test_decompose_valuefmt(): @@ -22,59 +22,59 @@ assert cls2 is cls # caching assert strings2 == ("a ", " b ", " c") -def test_operationerrfmt(space): - operr = operationerrfmt("w_type", "abc %s def %d", "foo", 42) +def test_oefmt(space): + operr = oefmt("w_type", "abc %s def %d", "foo", 42) assert isinstance(operr, OperationError) assert operr.w_type == "w_type" assert operr._w_value is None assert operr._compute_value(space) == "abc foo def 42" - operr2 = operationerrfmt("w_type2", "a %s b %d c", "bar", 43) + operr2 = oefmt("w_type2", "a %s b %d c", "bar", 43) assert operr2.__class__ is operr.__class__ - operr3 = operationerrfmt("w_type2", "a %s b %s c", "bar", "4b") + operr3 = oefmt("w_type2", "a %s b %s c", "bar", "4b") assert operr3.__class__ is not operr.__class__ -def test_operationerrfmt_noargs(space): - operr = operationerrfmt(space.w_AttributeError, "no attribute 'foo'") +def test_oefmt_noargs(space): + operr = oefmt(space.w_AttributeError, "no attribute 'foo'") operr.normalize_exception(space) val = operr.get_w_value(space) assert space.isinstance_w(val, space.w_AttributeError) w_repr = space.repr(val) assert space.str_w(w_repr) == "AttributeError(\"no attribute 'foo'\",)" -def test_operationerrfmt_T(space): - operr = operationerrfmt(space.w_AttributeError, - "'%T' object has no attribute '%s'", - space.wrap('foo'), 'foo') +def test_oefmt_T(space): + operr = oefmt(space.w_AttributeError, + "'%T' object has no attribute '%s'", + space.wrap('foo'), 'foo') assert operr._compute_value(space) == "'str' object has no attribute 'foo'" - operr = operationerrfmt("w_type", - "'%T' object has no attribute '%s'", - space.wrap('foo'), 'foo') + operr = oefmt("w_type", + "'%T' object has no attribute '%s'", + space.wrap('foo'), 'foo') assert operr._compute_value(space) == "'str' object has no attribute 'foo'" -def test_operationerrfmt_N(space): - operr = operationerrfmt(space.w_AttributeError, - "'%N' object has no attribute '%s'", - space.type(space.wrap('foo')), 'foo') +def test_oefmt_N(space): + operr = oefmt(space.w_AttributeError, + "'%N' object has no attribute '%s'", + space.type(space.wrap('foo')), 'foo') assert operr._compute_value(space) == "'str' object has no attribute 'foo'" - operr = operationerrfmt("w_type", - "'%N' object has no attribute '%s'", - space.type(space.wrap('foo')), 'foo') + operr = oefmt("w_type", + "'%N' object has no attribute '%s'", + space.type(space.wrap('foo')), 'foo') assert operr._compute_value(space) == "'str' object has no attribute 'foo'" - operr = operationerrfmt(space.w_AttributeError, - "'%N' object has no attribute '%s'", - space.wrap('foo'), 'foo') + operr = oefmt(space.w_AttributeError, + "'%N' object has no attribute '%s'", + space.wrap('foo'), 'foo') assert operr._compute_value(space) == "'?' object has no attribute 'foo'" - operr = operationerrfmt("w_type", - "'%N' object has no attribute '%s'", - space.wrap('foo'), 'foo') + operr = oefmt("w_type", + "'%N' object has no attribute '%s'", + space.wrap('foo'), 'foo') assert operr._compute_value(space) == "'?' object has no attribute 'foo'" -def test_operationerrfmt_R(space): - operr = operationerrfmt(space.w_ValueError, "illegal newline value: %R", - space.wrap('foo')) +def test_oefmt_R(space): + operr = oefmt(space.w_ValueError, + "illegal newline value: %R", space.wrap('foo')) assert operr._compute_value(space) == "illegal newline value: 'foo'" - operr = operationerrfmt(space.w_ValueError, "illegal newline value: %R", - space.wrap("'PyLadies'")) + operr = oefmt(space.w_ValueError, "illegal newline value: %R", + space.wrap("'PyLadies'")) expected = "illegal newline value: \"'PyLadies'\"" assert operr._compute_value(space) == expected diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -2,7 +2,7 @@ from pypy.interpreter.argument import Arguments from pypy.interpreter.baseobjspace import W_Root, DescrMismatch -from pypy.interpreter.error import OperationError, operationerrfmt +from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import (interp2app, BuiltinCode, unwrap_spec, WrappedDefault) @@ -549,9 +549,9 @@ def typecheck(self, space, w_obj): if not space.isinstance_w(w_obj, self.w_cls): - m = "descriptor '%N' for '%N' objects doesn't apply to '%T' object" - raise operationerrfmt(space.w_TypeError, m, - self, self.w_cls, w_obj) + raise oefmt(space.w_TypeError, + "descriptor '%N' for '%N' objects doesn't apply to " + "'%T' object", self, self.w_cls, w_obj) def descr_member_get(self, space, w_obj, w_cls=None): """member.__get__(obj[, type]) -> value @@ -620,8 +620,9 @@ def descr_get_dict(space, w_obj): w_dict = w_obj.getdict(space) if w_dict is None: - msg = "descriptor '__dict__' doesn't apply to '%T' objects" - raise operationerrfmt(space.w_TypeError, msg, w_obj) + raise oefmt(space.w_TypeError, + "descriptor '__dict__' doesn't apply to '%T' objects", + w_obj) return w_dict def descr_set_dict(space, w_obj, w_dict): diff --git a/pypy/module/__builtin__/interp_classobj.py b/pypy/module/__builtin__/interp_classobj.py --- a/pypy/module/__builtin__/interp_classobj.py +++ b/pypy/module/__builtin__/interp_classobj.py @@ -1,5 +1,5 @@ import new -from pypy.interpreter.error import OperationError, operationerrfmt +from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import interp2app from pypy.interpreter.typedef import TypeDef, make_weakref_descr from pypy.interpreter.baseobjspace import W_Root @@ -10,8 +10,8 @@ def raise_type_err(space, argument, expected, w_obj): - raise operationerrfmt(space.w_TypeError, "argument %s must be %s, not %T", - argument, expected, w_obj) + raise oefmt(space.w_TypeError, + "argument %s must be %s, not %T", argument, expected, w_obj) def unwrap_attr(space, w_attr): try: @@ -126,10 +126,8 @@ return space.newtuple(self.bases_w) w_value = self.lookup(space, name) if w_value is None: - raise operationerrfmt( - space.w_AttributeError, - "class %s has no attribute '%s'", - self.name, name) + raise oefmt(space.w_AttributeError, + "class %s has no attribute '%s'", self.name, name) w_descr_get = space.lookup(w_value, '__get__') if w_descr_get is None: @@ -158,18 +156,15 @@ def descr_delattr(self, space, w_attr): name = unwrap_attr(space, w_attr) if name in ("__dict__", "__name__", "__bases__"): - raise operationerrfmt( - space.w_TypeError, - "cannot delete attribute '%s'", name) + raise oefmt(space.w_TypeError, + "cannot delete attribute '%s'", name) try: space.delitem(self.w_dict, w_attr) except OperationError, e: if not e.match(space, space.w_KeyError): raise - raise operationerrfmt( - space.w_AttributeError, - "class %s has no attribute '%s'", - self.name, name) + raise oefmt(space.w_AttributeError, + "class %s has no attribute '%s'", self.name, name) def descr_repr(self, space): mod = self.get_module_string(space) @@ -362,10 +357,9 @@ raise # not found at all if exc: - raise operationerrfmt( - space.w_AttributeError, - "%s instance has no attribute '%s'", - self.w_class.name, name) + raise oefmt(space.w_AttributeError, + "%s instance has no attribute '%s'", + self.w_class.name, name) else: return None @@ -416,10 +410,9 @@ space.call_function(w_meth, w_name) else: if not self.deldictvalue(space, name): - raise operationerrfmt( - space.w_AttributeError, - "%s instance has no attribute '%s'", - self.w_class.name, name) + raise oefmt(space.w_AttributeError, + "%s instance has no attribute '%s'", + self.w_class.name, name) def descr_repr(self, space): w_meth = self.getattr(space, '__repr__', False) diff --git a/pypy/module/__pypy__/interp_dict.py b/pypy/module/__pypy__/interp_dict.py --- a/pypy/module/__pypy__/interp_dict.py +++ b/pypy/module/__pypy__/interp_dict.py @@ -1,6 +1,6 @@ +from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import unwrap_spec -from pypy.interpreter.error import operationerrfmt, OperationError from pypy.objspace.std.dictmultiobject import W_DictMultiObject @unwrap_spec(type=str) @@ -30,8 +30,7 @@ elif type == 'strdict': return space.newdict(strdict=True) else: From noreply at buildbot.pypy.org Wed Feb 5 00:10:51 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 5 Feb 2014 00:10:51 +0100 (CET) Subject: [pypy-commit] pypy default: we no longer use with_unicode_literals Message-ID: <20140204231051.285991C3235@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r69080:51c4bcbe3e4d Date: 2014-02-04 12:01 -0800 http://bitbucket.org/pypy/pypy/changeset/51c4bcbe3e4d/ Log: we no longer use with_unicode_literals diff --git a/rpython/tool/sourcetools.py b/rpython/tool/sourcetools.py --- a/rpython/tool/sourcetools.py +++ b/rpython/tool/sourcetools.py @@ -6,7 +6,6 @@ # XXX We should try to generalize and single out one approach to dynamic # XXX code compilation. -import types import sys, os, inspect, new import py @@ -296,40 +295,3 @@ result.func_defaults = f.func_defaults result.func_dict.update(f.func_dict) return result - - -def _convert_const_maybe(x, encoding): - if isinstance(x, str): - return x.decode(encoding) - elif isinstance(x, tuple): - items = [_convert_const_maybe(item, encoding) for item in x] - return tuple(items) - return x - -def with_unicode_literals(fn=None, **kwds): - """Decorator that replace all string literals with unicode literals. - Similar to 'from __future__ import string literals' at function level. - Useful to limit changes in the py3k branch. - """ - encoding = kwds.pop('encoding', 'ascii') - if kwds: - raise TypeError("Unexpected keyword argument(s): %s" % ', '.join(kwds.keys())) - def decorator(fn): - co = fn.func_code - new_consts = [] - for const in co.co_consts: - new_consts.append(_convert_const_maybe(const, encoding)) - new_consts = tuple(new_consts) - new_code = types.CodeType(co.co_argcount, co.co_nlocals, co.co_stacksize, - co.co_flags, co.co_code, new_consts, co.co_names, - co.co_varnames, co.co_filename, co.co_name, - co.co_firstlineno, co.co_lnotab) - fn.func_code = new_code - return fn - # - # support the usage of @with_unicode_literals instead of @with_unicode_literals() - if fn is not None: - assert type(fn) is types.FunctionType - return decorator(fn) - else: - return decorator diff --git a/rpython/tool/test/test_sourcetools.py b/rpython/tool/test/test_sourcetools.py --- a/rpython/tool/test/test_sourcetools.py +++ b/rpython/tool/test/test_sourcetools.py @@ -1,7 +1,5 @@ -# -*- encoding: utf-8 -*- -import py from rpython.tool.sourcetools import ( - func_with_new_name, func_renamer, rpython_wrapper, with_unicode_literals) + func_renamer, func_with_new_name, rpython_wrapper) def test_rename(): def f(x, y=5): @@ -57,30 +55,3 @@ ('decorated', 40, 2), ('bar', 40, 2), ] - - -def test_with_unicode_literals(): - @with_unicode_literals() - def foo(): - return 'hello' - assert type(foo()) is unicode - # - @with_unicode_literals - def foo(): - return 'hello' - assert type(foo()) is unicode - # - def foo(): - return 'hello àèì' - py.test.raises(UnicodeDecodeError, "with_unicode_literals(foo)") - # - @with_unicode_literals(encoding='utf-8') - def foo(): - return 'hello àèì' - assert foo() == u'hello àèì' - # - @with_unicode_literals - def foo(): - return ('a', 'b') - assert type(foo()[0]) is unicode - From noreply at buildbot.pypy.org Wed Feb 5 00:10:52 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Wed, 5 Feb 2014 00:10:52 +0100 (CET) Subject: [pypy-commit] pypy py3k: this is probably the intended workaround for the tests. fixes unicode handling Message-ID: <20140204231052.945EB1C3235@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r69081:835b774e50f6 Date: 2014-02-04 15:09 -0800 http://bitbucket.org/pypy/pypy/changeset/835b774e50f6/ Log: this is probably the intended workaround for the tests. fixes unicode handling issue1574 diff --git a/lib_pypy/pyrepl/reader.py b/lib_pypy/pyrepl/reader.py --- a/lib_pypy/pyrepl/reader.py +++ b/lib_pypy/pyrepl/reader.py @@ -58,7 +58,7 @@ else: return c -if 'a'[0] == 'a': +if 'a'[0] == b'a': # When running tests with python2, bytes characters are bytes. def _my_unctrl(c, uc=_my_unctrl): return uc(ord(c)) From noreply at buildbot.pypy.org Wed Feb 5 09:17:48 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Wed, 5 Feb 2014 09:17:48 +0100 (CET) Subject: [pypy-commit] stmgc c7: performance: not always do a safe-point in stm_allocate() (still missing a way to request it though) Message-ID: <20140205081748.046581C01AE@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r701:4313bc715c7e Date: 2014-02-05 09:18 +0100 http://bitbucket.org/pypy/stmgc/changeset/4313bc715c7e/ Log: performance: not always do a safe-point in stm_allocate() (still missing a way to request it though) diff --git a/c7/nursery.c b/c7/nursery.c --- a/c7/nursery.c +++ b/c7/nursery.c @@ -177,6 +177,13 @@ localchar_t *collect_and_reserve(size_t size) { + /* reset nursery_current (left invalid by the caller) */ + _STM_TL->nursery_current -= size; + + /* XXX: check for requested safe-point (by setting nursery_current + too high or similar) */ + + _stm_start_safe_point(0); /* don't release the COLLECT lock, that needs to be done afterwards if we want a major collection */ @@ -196,16 +203,12 @@ { object_t *result; - _stm_start_safe_point(LOCK_COLLECT); - /* all collections may happen here */ - _stm_stop_safe_point(LOCK_COLLECT); - assert(_STM_TL->active); assert(size % 8 == 0); assert(16 <= size); /* XXX move out of fastpath */ - if (size >= NURSERY_SECTION) { + if (UNLIKELY(size >= NURSERY_SECTION)) { /* allocate large objects outside the nursery immediately, otherwise they may trigger too many minor collections and degrade performance */ @@ -231,7 +234,6 @@ assert((uintptr_t)new_current < (1L << 32)); if ((uintptr_t)new_current > FIRST_AFTER_NURSERY_PAGE * 4096) { - _STM_TL->nursery_current = current; /* reset for nursery-clearing in minor_collect!! */ current = collect_and_reserve(size); } From noreply at buildbot.pypy.org Wed Feb 5 09:59:42 2014 From: noreply at buildbot.pypy.org (krono) Date: Wed, 5 Feb 2014 09:59:42 +0100 (CET) Subject: [pypy-commit] lang-smalltalk default: add shebang to targets Message-ID: <20140205085942.42DBC1C321C@cobra.cs.uni-duesseldorf.de> Author: Tobias Pape Branch: Changeset: r594:63f6f3f9b38f Date: 2014-02-05 09:59 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/63f6f3f9b38f/ Log: add shebang to targets diff --git a/targetimageloadingsmalltalk.py b/targetimageloadingsmalltalk.py old mode 100644 new mode 100755 --- a/targetimageloadingsmalltalk.py +++ b/targetimageloadingsmalltalk.py @@ -1,3 +1,4 @@ +#! /usr/bin/env python import sys, time import os diff --git a/targettinybenchsmalltalk.py b/targettinybenchsmalltalk.py old mode 100644 new mode 100755 --- a/targettinybenchsmalltalk.py +++ b/targettinybenchsmalltalk.py @@ -1,3 +1,4 @@ +#! /usr/bin/env python import os, sys from spyvm import model, interpreter, primitives, shadow, constants from spyvm.tool.analyseimage import create_squeakimage, create_testimage From noreply at buildbot.pypy.org Wed Feb 5 13:47:30 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Wed, 5 Feb 2014 13:47:30 +0100 (CET) Subject: [pypy-commit] stmgc c7: add parallel version of n-queens Message-ID: <20140205124730.6B4F51C0153@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r703:852f9a4772d4 Date: 2014-02-05 13:02 +0100 http://bitbucket.org/pypy/stmgc/changeset/852f9a4772d4/ Log: add parallel version of n-queens diff --git a/duhton/demo/nqueens.duh b/duhton/demo/nqueens.duh --- a/duhton/demo/nqueens.duh +++ b/duhton/demo/nqueens.duh @@ -3,13 +3,33 @@ -(setq count (container 0)) (defun abs (i) (if (<= 0 i) i (- 0 i))) +(defun clean_list (n) + (setq i n) + (setq res (list)) + (while (> i 0) + (append res 0) + (setq i (- i 1)) + ) + res + ) + +(defun copy_list (xs) + (setq res (list)) + (setq idx 0) + (while (< idx (len xs)) + (append res (get xs idx)) + (setq idx (+ idx 1)) + ) + res + ) + + (defun attacks (hist col i j) (|| (== (get hist j) i) (== (abs (- (get hist j) i)) @@ -38,11 +58,11 @@ ) ) -(defun solve (n col hist) +(defun solve (n col hist count) (if (== col n) (progn (set count (+ (get count) 1)) - (print_solution hist n) + ;; (print_solution hist n) ) ;; else @@ -57,7 +77,7 @@ (if (>= j col) (progn (set hist col i) - (solve n (+ col 1) hist) + (solve n (+ col 1) hist count) )) (setq i (+ i 1)) @@ -66,20 +86,63 @@ ) +(defun solve_parallel (n col hist count) + (if (== col n) + (progn + (set count (+ (get count) 1)) + ;; (print_solution hist n) + ) -(defun clean_list (n) - (setq i n) - (setq res (list)) - (while (> i 0) - (append res 0) - (setq i (- i 1)) + ;; else + (setq i 0) + (setq transaction-limit 1) + (if (== col transaction-limit) + (setq counts (list))) + + (while (< i n) + (setq j 0) + (while (&& (< j col) + (not (attacks hist col i j))) + (setq j (+ j 1)) + ) + + (if (>= j col) + (progn + (set hist col i) + (if (== col transaction-limit) + (progn + (setq new_cont (container 0)) + (append counts new_cont) + (transaction solve n (+ col 1) (copy_list hist) new_cont) + ) + (solve_parallel n (+ col 1) hist count) + ) + ) + ) + ;; iterator + (setq i (+ i 1)) + ) + + (if (== col transaction-limit) + (progn + (run-transactions) + (setq i 0) + (while (< i (len counts)) + (set count (+ (get count) (get (get counts i)))) + (setq i (+ i 1)) + ) + ) + ) ) - res ) -(setq n 8) -(solve n 0 (clean_list n)) + + +(setq count (container 0)) + +(setq n 11) +(solve_parallel n 0 (clean_list n) count) (print (quote solutions:) (get count)) From noreply at buildbot.pypy.org Wed Feb 5 13:47:29 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Wed, 5 Feb 2014 13:47:29 +0100 (CET) Subject: [pypy-commit] stmgc c7: simple n-queens demo for duhton Message-ID: <20140205124729.58CDC1C0153@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r702:ff1022e19989 Date: 2014-02-05 10:57 +0100 http://bitbucket.org/pypy/stmgc/changeset/ff1022e19989/ Log: simple n-queens demo for duhton diff --git a/duhton/demo/nqueens.duh b/duhton/demo/nqueens.duh new file mode 100644 --- /dev/null +++ b/duhton/demo/nqueens.duh @@ -0,0 +1,85 @@ + + + + + +(setq count (container 0)) + +(defun abs (i) + (if (<= 0 i) + i + (- 0 i))) + +(defun attacks (hist col i j) + (|| (== (get hist j) i) + (== (abs (- (get hist j) i)) + (- col j))) + ) + +(defun print_solution (hist n) + (print (quote solution) n) + (setq i 0) + (while (< i n) + (setq line (list)) + (setq j 0) + (while (< j n) + (if (== j (get hist i)) + (append line (quote Q)) + (if (== 0 (% (+ i j) 2)) + (append line (quote .)) + (append line (quote ,)) + ) + ) + (setq j (+ j 1)) + ) + + (print line) + (setq i (+ i 1)) + ) + ) + +(defun solve (n col hist) + (if (== col n) + (progn + (set count (+ (get count) 1)) + (print_solution hist n) + ) + + ;; else + (setq i 0) + (while (< i n) + (setq j 0) + (while (&& (< j col) + (not (attacks hist col i j))) + (setq j (+ j 1)) + ) + + (if (>= j col) + (progn + (set hist col i) + (solve n (+ col 1) hist) + )) + + (setq i (+ i 1)) + ) + ) + ) + + + +(defun clean_list (n) + (setq i n) + (setq res (list)) + (while (> i 0) + (append res 0) + (setq i (- i 1)) + ) + res + ) + + + +(setq n 8) +(solve n 0 (clean_list n)) +(print (quote solutions:) (get count)) + diff --git a/duhton/demo/sort.duh b/duhton/demo/sort.duh --- a/duhton/demo/sort.duh +++ b/duhton/demo/sort.duh @@ -173,7 +173,7 @@ (setq current (time)) (print (quote before-random)) -(setq cs (random_list 200000)) +(setq cs (random_list 300000)) (print (quote time-random:) (- (time) current)) ;; (print_list cs) From noreply at buildbot.pypy.org Wed Feb 5 13:47:31 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Wed, 5 Feb 2014 13:47:31 +0100 (CET) Subject: [pypy-commit] stmgc c7: fix missing read-barrier in duhton Message-ID: <20140205124731.732841C0153@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r704:679c5904557a Date: 2014-02-05 13:48 +0100 http://bitbucket.org/pypy/stmgc/changeset/679c5904557a/ Log: fix missing read-barrier in duhton diff --git a/c7/core.c b/c7/core.c --- a/c7/core.c +++ b/c7/core.c @@ -124,6 +124,7 @@ uintptr_t lock_idx = (((uintptr_t)obj) >> 4) - READMARKER_START; uint8_t lock_num = _STM_TL->thread_num + 1; uint8_t prev_owner; + uint8_t retries = 0; retry: do { prev_owner = __sync_val_compare_and_swap(&write_locks[lock_idx], @@ -142,14 +143,18 @@ _stm_stop_safe_point(0); goto retry; } - /* XXXXXX */ - //_stm_start_semi_safe_point(); - //usleep(1); - //_stm_stop_semi_safe_point(); - // try again.... XXX + + + if (retries < 1) { + _stm_start_safe_point(0); + usleep(1); + _stm_stop_safe_point(0); + retries++; + goto retry; + } + stm_abort_transaction(); /* XXX: only abort if we are younger */ - spin_loop(); } while (1); /* remove the write-barrier ONLY if we have the write-lock */ diff --git a/duhton/demo/synth.duh b/duhton/demo/synth.duh new file mode 100644 --- /dev/null +++ b/duhton/demo/synth.duh @@ -0,0 +1,88 @@ + + + +(defun clean_list (n) + (setq i n) + (setq res (list)) + (while (> i 0) + (append res 0) + (setq i (- i 1)) + ) + res + ) + + +(setq _rand (container (list 133542157 362436069 521288629 88675123))) +(defun xor128 () + (setq lst (get _rand)) + (setq x (get lst 0)) + (setq y (get lst 1)) + (setq z (get lst 2)) + (setq w (get lst 3)) + + (setq t (^ x (<< x 11))) + (setq x y) + (setq y z) + (setq z w) + + (setq w (^ w (^ (>> w 19) (^ t (>> t 8))))) + (set lst 0 x) + (set lst 1 y) + (set lst 2 z) + (set lst 3 w) + w + ) + + +(defun random_list (n max) + (setq i n) + (setq res (list)) + (while (> i 0) + (append res (% (xor128) max)) + (setq i (- i 1)) + ) + res + ) + + + + +(defun worker (shared private) + (setq i 1) + (while (< i 10000) + ;; every 200th modification is on 'shared' + (if (== (% i 200) 0) + (set shared (+ (get shared) 1)) + (set private (+ (get private) 1)) + ) + + (setq i (+ i 1)) + ) + ) + + + +(setq N 800) +(setq RAND_MAX 10) +(setq CONFL_IF_BELOW 1) + + +(setq shared (container 0)) + +(setq rand-list (random_list N RAND_MAX)) +(setq i 0) +(while (< i N) + (setq private (container 0)) + (if (< (get rand-list i) CONFL_IF_BELOW) + ;; conflicting transaction + (transaction worker shared private) + ;; else non-conflicting + (transaction worker private private) + ) + + (setq i (+ i 1)) + ) + +(run-transactions) +(print (quote shared) (get shared)) + diff --git a/duhton/transaction.c b/duhton/transaction.c --- a/duhton/transaction.c +++ b/duhton/transaction.c @@ -93,7 +93,7 @@ stm_start_inevitable_transaction(); root = du_pending_transactions; - /* _du_read1(root); IMMUTABLE */ + _du_read1(root); /* not immutable... */ if (root->cdr != Du_None) { DuObject *cell = root->cdr; @@ -135,8 +135,9 @@ stm_thread_local_obj = NULL; while (__builtin_setjmp(here) == 1) { } - stm_start_transaction(&here); - + //stm_start_transaction(&here); + stm_start_inevitable_transaction(); + /* _du_read1(pending); IMMUTABLE */ DuObject *result = _DuCons_CAR(pending); DuObject *next = _DuCons_NEXT(pending); From noreply at buildbot.pypy.org Wed Feb 5 14:51:35 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Wed, 5 Feb 2014 14:51:35 +0100 (CET) Subject: [pypy-commit] stmgc c7: implement requesting of safe-points and older-transaction-succeeds in write-write conflicts Message-ID: <20140205135135.B29B91C019D@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r705:d7f5d26b082d Date: 2014-02-05 14:52 +0100 http://bitbucket.org/pypy/stmgc/changeset/d7f5d26b082d/ Log: implement requesting of safe-points and older-transaction-succeeds in write-write conflicts diff --git a/c7/core.c b/c7/core.c --- a/c7/core.c +++ b/c7/core.c @@ -21,6 +21,7 @@ static int num_threads_started; uint8_t write_locks[READMARKER_END - READMARKER_START]; volatile uint8_t inevitable_lock __attribute__((aligned(64))); /* cache-line alignment */ +long global_age = 0; struct _thread_local1_s* _stm_dbg_get_tl(int thread) { @@ -134,18 +135,16 @@ if ((!prev_owner) || (prev_owner == lock_num)) break; - if (_STM_TL->active == 2) { + struct _thread_local1_s* other_tl = _stm_dbg_get_tl(prev_owner - 1); + if ((_STM_TL->age < other_tl->age) || (_STM_TL->active == 2)) { /* we must succeed! */ - _stm_dbg_get_tl(prev_owner - 1)->need_abort = 1; + other_tl->need_abort = 1; _stm_start_safe_point(0); /* XXX: not good, maybe should be signalled by other thread */ usleep(1); _stm_stop_safe_point(0); goto retry; - } - - - if (retries < 1) { + } else if (retries < 1) { _stm_start_safe_point(0); usleep(1); _stm_stop_safe_point(0); @@ -176,8 +175,8 @@ _stm_restore_local_state(thread_num); _STM_TL->nursery_current = (localchar_t*)(FIRST_NURSERY_PAGE * 4096); - memset((void*)real_address((object_t*)_STM_TL->nursery_current), 0x0, - (FIRST_AFTER_NURSERY_PAGE - FIRST_NURSERY_PAGE) * 4096); /* clear nursery */ + memset((void*)real_address((object_t*)CLEAR_SYNC_REQUEST(_STM_TL->nursery_current)), + 0x0, (FIRST_AFTER_NURSERY_PAGE - FIRST_NURSERY_PAGE) * 4096); /* clear nursery */ _STM_TL->shadow_stack = NULL; _STM_TL->shadow_stack_base = NULL; @@ -386,6 +385,8 @@ _STM_TL->jmpbufptr = jmpbufptr; _STM_TL->active = 1; _STM_TL->need_abort = 0; + /* global_age is approximate -> no synchronization required */ + _STM_TL->age = global_age++; fprintf(stderr, "%c", 'S'+_STM_TL->thread_num*32); } diff --git a/c7/core.h b/c7/core.h --- a/c7/core.h +++ b/c7/core.h @@ -94,6 +94,10 @@ jmpbufptr_t *jmpbufptr; uint8_t transaction_read_version; + /* unsynchronized/inaccurate start age of transaction + XXX: may be replaced with size_of(read/write-set) */ + long age; + /* static threads, not pthreads */ int thread_num; char *thread_base; @@ -105,7 +109,10 @@ object_t **shadow_stack; object_t **shadow_stack_base; - localchar_t *nursery_current; + union { + localchar_t *nursery_current; + uint32_t nursery_current_halfwords[2]; + }; struct stm_list_s *modified_objects; diff --git a/c7/nursery.c b/c7/nursery.c --- a/c7/nursery.c +++ b/c7/nursery.c @@ -166,7 +166,7 @@ /* clear nursery */ localchar_t *nursery_base = (localchar_t*)(FIRST_NURSERY_PAGE * 4096); memset((void*)real_address((object_t*)nursery_base), 0x0, - _STM_TL->nursery_current - nursery_base); + CLEAR_SYNC_REQUEST(_STM_TL->nursery_current) - nursery_base); _STM_TL->nursery_current = nursery_base; } @@ -177,23 +177,33 @@ localchar_t *collect_and_reserve(size_t size) { + localchar_t *new_current = _STM_TL->nursery_current; + + while (((uintptr_t)new_current > FIRST_AFTER_NURSERY_PAGE * 4096) + && _STM_TL->nursery_current_halfwords[1]) { + + _STM_TL->nursery_current_halfwords[1] = 0; + _stm_start_safe_point(0); + /* no collect, it would mess with nursery_current */ + _stm_stop_safe_point(0); + + new_current = _STM_TL->nursery_current; + } + + if (!((uintptr_t)new_current > FIRST_AFTER_NURSERY_PAGE * 4096)) { + /* after safe-point, new_current is actually fine again */ + return new_current - size; + } + /* reset nursery_current (left invalid by the caller) */ _STM_TL->nursery_current -= size; - /* XXX: check for requested safe-point (by setting nursery_current - too high or similar) */ - - - _stm_start_safe_point(0); /* don't release the COLLECT lock, - that needs to be done afterwards if - we want a major collection */ minor_collect(); - _stm_stop_safe_point(0); /* XXX: if we_want_major_collect: acquire EXCLUSIVE & COLLECT lock and do it */ - localchar_t *current = _STM_TL->nursery_current; + localchar_t *current = CLEAR_SYNC_REQUEST(_STM_TL->nursery_current); _STM_TL->nursery_current = current + size; return current; } @@ -231,7 +241,6 @@ localchar_t *current = _STM_TL->nursery_current; localchar_t *new_current = current + size; _STM_TL->nursery_current = new_current; - assert((uintptr_t)new_current < (1L << 32)); if ((uintptr_t)new_current > FIRST_AFTER_NURSERY_PAGE * 4096) { current = collect_and_reserve(size); @@ -312,7 +321,7 @@ /* clear the nursery */ localchar_t *nursery_base = (localchar_t*)(FIRST_NURSERY_PAGE * 4096); memset((void*)real_address((object_t*)nursery_base), 0x0, - _STM_TL->nursery_current - nursery_base); + CLEAR_SYNC_REQUEST(_STM_TL->nursery_current) - nursery_base); _STM_TL->nursery_current = nursery_base; diff --git a/c7/stmsync.c b/c7/stmsync.c --- a/c7/stmsync.c +++ b/c7/stmsync.c @@ -108,7 +108,7 @@ assert(!_STM_TL->active); /* assert(!_STM_TL->need_abort); may happen, but will be cleared by start_transaction() */ - assert(_STM_TL->nursery_current == (localchar_t*)(FIRST_NURSERY_PAGE * 4096)); + assert(CLEAR_SYNC_REQUEST(_STM_TL->nursery_current) == (localchar_t*)(FIRST_NURSERY_PAGE * 4096)); } void _stm_acquire_tl_segment() @@ -270,10 +270,12 @@ _stm_grab_thread_segment(); } - if (flags & LOCK_EXCLUSIVE) + if (flags & LOCK_EXCLUSIVE) { + stm_request_safe_point(1 - _STM_TL->thread_num); stm_start_exclusive_lock(); - else + } else { stm_start_shared_lock(); + } if (flags & LOCK_COLLECT) { /* if we released the collection lock */ /* acquire read-collection. always succeeds because @@ -296,3 +298,9 @@ +void stm_request_safe_point(int thread_num) +{ + struct _thread_local1_s* other_tl = _stm_dbg_get_tl(thread_num); + other_tl->nursery_current_halfwords[1] = 1; +} + diff --git a/c7/stmsync.h b/c7/stmsync.h --- a/c7/stmsync.h +++ b/c7/stmsync.h @@ -17,3 +17,8 @@ THREAD_YIELD = (1 << 2), }; + +void stm_request_safe_point(int thread_num); + +#define CLEAR_SYNC_REQUEST(nursery_current) ((localchar_t*)(((uintptr_t)(nursery_current)) & 0xffffffff)) + diff --git a/duhton/demo/synth.duh b/duhton/demo/synth.duh --- a/duhton/demo/synth.duh +++ b/duhton/demo/synth.duh @@ -62,8 +62,10 @@ -(setq N 800) -(setq RAND_MAX 10) +(setq N 1000) +;; CONFL_IF_BELOW / RAND_MAX == ratio of conflicting transactions +;; to non conflicting ones +(setq RAND_MAX 8) (setq CONFL_IF_BELOW 1) From noreply at buildbot.pypy.org Wed Feb 5 22:39:13 2014 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 5 Feb 2014 22:39:13 +0100 (CET) Subject: [pypy-commit] pypy precompiled-headers: copy seperate_module_files even when no makefile is used Message-ID: <20140205213913.D9C981D234D@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: precompiled-headers Changeset: r69082:8c1b2356d7bd Date: 2014-02-05 22:01 +0200 http://bitbucket.org/pypy/pypy/changeset/8c1b2356d7bd/ Log: copy seperate_module_files even when no makefile is used diff --git a/rpython/translator/c/dlltool.py b/rpython/translator/c/dlltool.py --- a/rpython/translator/c/dlltool.py +++ b/rpython/translator/c/dlltool.py @@ -31,6 +31,8 @@ extsymeci = ExternalCompilationInfo(export_symbols=export_symbols) self.eci = self.eci.merge(extsymeci) files = [self.c_source_filename] + self.extrafiles + files += self.eventually_copy(self.eci.separate_module_files) + self.eci.separate_module_files = () oname = self.name self.so_name = self.translator.platform.compile(files, self.eci, standalone=False, From noreply at buildbot.pypy.org Wed Feb 5 22:39:15 2014 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 5 Feb 2014 22:39:15 +0100 (CET) Subject: [pypy-commit] pypy precompiled-headers: percolate keword changes to darwin Message-ID: <20140205213915.0422F1D234D@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: precompiled-headers Changeset: r69083:8ceff85c1482 Date: 2014-02-05 22:26 +0200 http://bitbucket.org/pypy/pypy/changeset/8ceff85c1482/ Log: percolate keword changes to darwin diff --git a/rpython/translator/platform/darwin.py b/rpython/translator/platform/darwin.py --- a/rpython/translator/platform/darwin.py +++ b/rpython/translator/platform/darwin.py @@ -50,14 +50,17 @@ return ["-Wl,-exported_symbols_list,%s" % (response_file,)] def gen_makefile(self, cfiles, eci, exe_name=None, path=None, - shared=False, cfile_precompilation=None): + shared=False, headers_to_precompile=[], + no_precompile_cfiles = []): # ensure frameworks are passed in the Makefile fs = self._frameworks(eci.frameworks) if len(fs) > 0: # concat (-framework, FrameworkName) pairs self.extra_libs += tuple(map(" ".join, zip(fs[::2], fs[1::2]))) mk = super(Darwin, self).gen_makefile(cfiles, eci, exe_name, path, - shared, cfile_precompilation) + shared=shared, + headers_to_precompile=headers_to_precompile, + no_precompile_cfiles = no_precompile_cfiles) return mk From noreply at buildbot.pypy.org Wed Feb 5 22:39:16 2014 From: noreply at buildbot.pypy.org (mattip) Date: Wed, 5 Feb 2014 22:39:16 +0100 (CET) Subject: [pypy-commit] pypy precompiled-headers: implement for posix, fix test for posix and pep8 cleanup Message-ID: <20140205213916.330DC1D234D@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: precompiled-headers Changeset: r69084:7fb7c9228605 Date: 2014-02-05 23:38 +0200 http://bitbucket.org/pypy/pypy/changeset/7fb7c9228605/ Log: implement for posix, fix test for posix and pep8 cleanup diff --git a/rpython/translator/platform/posix.py b/rpython/translator/platform/posix.py --- a/rpython/translator/platform/posix.py +++ b/rpython/translator/platform/posix.py @@ -158,16 +158,29 @@ ('CC_LINK', eci.use_cpp_linker and 'g++' or '$(CC)'), ('LINKFILES', eci.link_files), ] - for args in definitions: - m.definition(*args) rules = [ ('all', '$(DEFAULT_TARGET)', []), ('$(TARGET)', '$(OBJECTS)', '$(CC_LINK) $(LDFLAGSEXTRA) -o $@ $(OBJECTS) $(LIBDIRS) $(LIBS) $(LINKFILES) $(LDFLAGS)'), - ('%.o', '%.c', '$(CC) $(CFLAGS) $(CFLAGSEXTRA) -o $@ -c $< $(INCLUDEDIRS)'), ('%.o', '%.cxx', '$(CXX) $(CFLAGS) $(CFLAGSEXTRA) -o $@ -c $< $(INCLUDEDIRS)'), ] + if len(headers_to_precompile)>0: + stdafx_h = path.join('stdafx.h') + txt = '#ifndef PYPY_STDAFX_H\n' + txt += '#define PYPY_STDAFX_H\n' + txt += '\n'.join(['#include "' + m.pathrel(c) + '"' for c in headers_to_precompile]) + txt += '\n#endif\n' + stdafx_h.write(txt) + rules.append(('$(OBJECTS)', 'stdafx.h.gch', [])) + rules.append(('%.h.gch', '%.h', + '$(CC) $(CFLAGS) $(CFLAGSEXTRA) -o $@ -c $< $(INCLUDEDIRS)')) + rules.append(('%.o', '%.c', '$(CC) $(CFLAGS) $(CFLAGSEXTRA) -include stdafx.h -o $@ -c $< $(INCLUDEDIRS)')) + else: + rules.append(('%.o', '%.c', '$(CC) $(CFLAGS) $(CFLAGSEXTRA) -o $@ -c $< $(INCLUDEDIRS)')) + + for args in definitions: + m.definition(*args) for rule in rules: m.rule(*rule) diff --git a/rpython/translator/platform/test/test_makefile.py b/rpython/translator/platform/test/test_makefile.py --- a/rpython/translator/platform/test/test_makefile.py +++ b/rpython/translator/platform/test/test_makefile.py @@ -6,6 +6,13 @@ from StringIO import StringIO import re, sys +import time +if sys.platform == 'win32': + get_time = time.clock +else: + get_time = time.time + + def test_simple_makefile(): m = Makefile() m.definition('CC', 'xxx') @@ -31,7 +38,7 @@ m.write(s) val = s.getvalue() assert not re.search('CC += +xxx', val, re.M) - assert re.search('CC += +yyy', val, re.M) + assert re.search('CC += +yyy', val, re.M) class TestMakefile(object): platform = host @@ -41,13 +48,13 @@ assert res.out == expected if self.strict_on_stderr: assert res.err == '' - assert res.returncode == 0 - + assert res.returncode == 0 + def test_900_files(self): txt = '#include \n' for i in range(900): txt += 'int func%03d();\n' % i - txt += 'int main() {\n int j=0;' + txt += 'int main() {\n int j=0;' for i in range(900): txt += ' j += func%03d();\n' % i txt += ' printf("%d\\n", j);\n' @@ -71,7 +78,6 @@ self.check_res(res, '%d\n' %sum(range(900))) def test_precompiled_headers(self): - import time tmpdir = udir.join('precompiled_headers').ensure(dir=1) # Create an eci that should not use precompiled headers eci = ExternalCompilationInfo(include_dirs=[tmpdir]) @@ -79,7 +85,7 @@ eci.separate_module_files = [main_c] ncfiles = 10 nprecompiled_headers = 20 - txt = '' + txt = '#include \n' for i in range(ncfiles): txt += "int func%03d();\n" % i txt += "\nint main(int argc, char * argv[])\n" @@ -97,8 +103,8 @@ for j in range(3000): txt += "int pcfunc%03d_%03d();\n" %(i, j) txt += '#endif' - pch_name.write(txt) - cfiles_precompiled_headers.append(pch_name) + pch_name.write(txt) + cfiles_precompiled_headers.append(pch_name) # Create some cfiles with headers we want precompiled cfiles = [] for i in range(ncfiles): @@ -108,18 +114,18 @@ txt += '#include "%s"\n' % pch_name txt += "int func%03d(){ return %d;};\n" % (i, i) c_name.write(txt) - cfiles.append(c_name) + cfiles.append(c_name) if sys.platform == 'win32': clean = ('clean', '', 'for %f in ( $(OBJECTS) $(TARGET) ) do @if exist %f del /f %f') - else: + else: clean = ('clean', '', 'rm -f $(OBJECTS) $(TARGET) ') #write a non-precompiled header makefile mk = self.platform.gen_makefile(cfiles, eci, path=tmpdir) mk.rule(*clean) mk.write() - t0 = time.clock() + t0 = get_time() self.platform.execute_makefile(mk) - t1 = time.clock() + t1 = get_time() t_normal = t1 - t0 self.platform.execute_makefile(mk, extra_opts=['clean']) # Write a super-duper makefile with precompiled headers @@ -127,13 +133,13 @@ headers_to_precompile=cfiles_precompiled_headers,) mk.rule(*clean) mk.write() - t0 = time.clock() + t0 = get_time() self.platform.execute_makefile(mk) - t1 = time.clock() + t1 = get_time() t_precompiled = t1 - t0 res = self.platform.execute(mk.exe_name) self.check_res(res, '%d\n' %sum(range(ncfiles))) print "precompiled haeder 'make' time %.2f, non-precompiled header time %.2f" %(t_precompiled, t_normal) - assert t_precompiled < t_normal * 0.5 + assert t_precompiled < t_normal * 0.8 - + diff --git a/rpython/translator/platform/windows.py b/rpython/translator/platform/windows.py --- a/rpython/translator/platform/windows.py +++ b/rpython/translator/platform/windows.py @@ -31,7 +31,7 @@ raise Exception("Win64 is not supported. You must either build for Win32" " or contribute the missing support in PyPy.") return _get_compiler_type(cc, True) - + def _get_msvc_env(vsver, x64flag): try: toolsdir = os.environ['VS%sCOMNTOOLS' % vsver] @@ -94,7 +94,7 @@ name = "msvc" so_ext = 'dll' exe_ext = 'exe' - + relevant_environ = ('PATH', 'INCLUDE', 'LIB') cc = 'cl.exe' @@ -105,7 +105,7 @@ standalone_only = () shared_only = () environ = None - + def __init__(self, cc=None, x64=False): self.x64 = x64 msvc_compiler_environ = find_msvc_env(x64) @@ -136,7 +136,7 @@ else: masm32 = 'ml.exe' masm64 = 'ml64.exe' - + if x64: self.masm = masm64 else: @@ -336,10 +336,10 @@ definitions.append(('CREATE_PCH', '/Ycstdafx.h /Fpstdafx.pch /FIstdafx.h')) definitions.append(('USE_PCH', '/Yustdafx.h /Fpstdafx.pch /FIstdafx.h')) rules.append(('$(OBJECTS)', 'stdafx.pch', [])) - rules.append(('stdafx.pch', 'stdafx.h', + rules.append(('stdafx.pch', 'stdafx.h', '$(CC) stdafx.c /c /nologo $(CFLAGS) $(CFLAGSEXTRA) ' '$(CREATE_PCH) $(INCLUDEDIRS)')) - rules.append(('.c.obj', '', + rules.append(('.c.obj', '', '$(CC) /nologo $(CFLAGS) $(CFLAGSEXTRA) $(USE_PCH) ' '/Fo$@ /c $< $(INCLUDEDIRS)')) #Do not use precompiled headers for some files @@ -359,7 +359,7 @@ '/Fo%s /c %s $(INCLUDEDIRS)' %(target, f))) else: - rules.append(('.c.obj', '', + rules.append(('.c.obj', '', '$(CC) /nologo $(CFLAGS) $(CFLAGSEXTRA) ' '/Fo$@ /c $< $(INCLUDEDIRS)')) @@ -369,7 +369,7 @@ for rule in rules: m.rule(*rule) - + objects = ' $(OBJECTS)' create_obj_response_file = [] if len(' '.join(rel_ofiles)) > 4000: @@ -378,7 +378,7 @@ for i in range(len(rel_ofiles) - 1): create_obj_response_file.append('echo %s >> obj_names.rsp' % \ rel_ofiles[i]) - # use cmd /c for the last one so that the file is flushed + # use cmd /c for the last one so that the file is flushed create_obj_response_file.append('cmd /c echo %s >> obj_names.rsp' % \ rel_ofiles[-1]) objects = ' @obj_names.rsp' From noreply at buildbot.pypy.org Thu Feb 6 02:41:20 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Thu, 6 Feb 2014 02:41:20 +0100 (CET) Subject: [pypy-commit] pypy default: get source information for UnionErrors arising from non-RPython PBCs (such as ['a', 1]) Message-ID: <20140206014120.DD9FC1C0153@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r69085:437833a4447c Date: 2014-02-06 01:16 +0000 http://bitbucket.org/pypy/pypy/changeset/437833a4447c/ Log: get source information for UnionErrors arising from non-RPython PBCs (such as ['a', 1]) diff --git a/rpython/annotator/annrpython.py b/rpython/annotator/annrpython.py --- a/rpython/annotator/annrpython.py +++ b/rpython/annotator/annrpython.py @@ -582,18 +582,18 @@ def consider_op(self, block, opindex): op = block.operations[opindex] - argcells = [self.binding(a) for a in op.args] + try: + argcells = [self.binding(a) for a in op.args] - # let's be careful about avoiding propagated SomeImpossibleValues - # to enter an op; the latter can result in violations of the - # more general results invariant: e.g. if SomeImpossibleValue enters is_ - # is_(SomeImpossibleValue, None) -> SomeBool - # is_(SomeInstance(not None), None) -> SomeBool(const=False) ... - # boom -- in the assert of setbinding() - for arg in argcells: - if isinstance(arg, annmodel.SomeImpossibleValue): - raise BlockedInference(self, op, opindex) - try: + # let's be careful about avoiding propagated SomeImpossibleValues + # to enter an op; the latter can result in violations of the + # more general results invariant: e.g. if SomeImpossibleValue enters is_ + # is_(SomeImpossibleValue, None) -> SomeBool + # is_(SomeInstance(not None), None) -> SomeBool(const=False) ... + # boom -- in the assert of setbinding() + for arg in argcells: + if isinstance(arg, annmodel.SomeImpossibleValue): + raise BlockedInference(self, op, opindex) resultcell = op.consider(self, *argcells) except annmodel.AnnotatorError as e: # note that UnionError is a subclass graph = self.bookkeeper.position_key[0] diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -4139,6 +4139,16 @@ a.build_types(f, [str]) assert ("Cannot prove that the object is callable" in exc.value.msg) + def test_UnionError_on_PBC(self): + l = ['a', 1] + def f(x): + l.append(x) + a = self.RPythonAnnotator() + with py.test.raises(annmodel.UnionError) as excinfo: + a.build_types(f, [int]) + assert 'Happened at file' in excinfo.value.source + assert 'Known variable annotations:' in excinfo.value.source + def test_str_format_error(self): def f(s, x): return s.format(x) From noreply at buildbot.pypy.org Thu Feb 6 05:11:19 2014 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 6 Feb 2014 05:11:19 +0100 (CET) Subject: [pypy-commit] pypy precompiled-headers: precompiled headers define _GNU_SOURCE Message-ID: <20140206041119.E0E5D1C0153@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: precompiled-headers Changeset: r69086:4853ab8f1bf9 Date: 2014-02-06 06:03 +0200 http://bitbucket.org/pypy/pypy/changeset/4853ab8f1bf9/ Log: precompiled headers define _GNU_SOURCE diff --git a/rpython/translator/c/src/profiling.c b/rpython/translator/c/src/profiling.c --- a/rpython/translator/c/src/profiling.c +++ b/rpython/translator/c/src/profiling.c @@ -3,10 +3,7 @@ /* Linux GCC implementation */ -#ifndef _GNU_SOURCE -#define _GNU_SOURCE #include -#endif static cpu_set_t base_cpu_set; static int profiling_setup = 0; From noreply at buildbot.pypy.org Thu Feb 6 15:58:48 2014 From: noreply at buildbot.pypy.org (squeaky) Date: Thu, 6 Feb 2014 15:58:48 +0100 (CET) Subject: [pypy-commit] jitviewer default: Don't duplicate requirements from setup.py Message-ID: <20140206145848.8BD0E1C1504@cobra.cs.uni-duesseldorf.de> Author: Squeaky Branch: Changeset: r254:709f1b155aaf Date: 2014-02-06 15:53 +0100 http://bitbucket.org/pypy/jitviewer/changeset/709f1b155aaf/ Log: Don't duplicate requirements from setup.py diff --git a/requirements.txt b/requirements.txt --- a/requirements.txt +++ b/requirements.txt @@ -1,3 +1,1 @@ -flask -pygments -simplejson +-e . From noreply at buildbot.pypy.org Thu Feb 6 16:47:46 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 6 Feb 2014 16:47:46 +0100 (CET) Subject: [pypy-commit] pypy default: Add __pypy__.locals_to_fast(), from an idea by Fabio Zadrozny to allow a Message-ID: <20140206154746.AB3111C14F2@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69087:e4876660324a Date: 2014-02-06 16:46 +0100 http://bitbucket.org/pypy/pypy/changeset/e4876660324a/ Log: Add __pypy__.locals_to_fast(), from an idea by Fabio Zadrozny to allow a Python debugger to modify local variables more freely. The original is http://bugs.python.org/issue1654367 . diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -81,6 +81,7 @@ 'newdict' : 'interp_dict.newdict', 'dictstrategy' : 'interp_dict.dictstrategy', 'set_debug' : 'interp_magic.set_debug', + 'locals_to_fast' : 'interp_magic.locals_to_fast', } if sys.platform == 'win32': interpleveldefs['get_console_cp'] = 'interp_magic.get_console_cp' diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py --- a/pypy/module/__pypy__/interp_magic.py +++ b/pypy/module/__pypy__/interp_magic.py @@ -1,5 +1,6 @@ from pypy.interpreter.error import OperationError, wrap_oserror from pypy.interpreter.gateway import unwrap_spec +from pypy.interpreter.pyframe import PyFrame from rpython.rlib.objectmodel import we_are_translated from pypy.objspace.std.listobject import W_ListObject from pypy.objspace.std.typeobject import MethodCache @@ -111,3 +112,8 @@ @unwrap_spec(estimate=int) def add_memory_pressure(estimate): rgc.add_memory_pressure(estimate) + + at unwrap_spec(w_frame=PyFrame) +def locals_to_fast(space, w_frame): + assert isinstance(w_frame, PyFrame) + w_frame.locals2fast() diff --git a/pypy/module/__pypy__/test/test_locals2fast.py b/pypy/module/__pypy__/test/test_locals2fast.py new file mode 100644 --- /dev/null +++ b/pypy/module/__pypy__/test/test_locals2fast.py @@ -0,0 +1,81 @@ +# Tests from Fabio Zadrozny + + +class AppTestLocals2Fast: + """ + Test setting locals in one function from another function + using several approaches. + """ + + def setup_class(cls): + cls.w_save_locals = cls.space.appexec([], """(): + import sys + if '__pypy__' in sys.builtin_module_names: + import __pypy__ + save_locals = __pypy__.locals_to_fast + else: + # CPython version + import ctypes + @staticmethod + def save_locals(frame): + ctypes.pythonapi.PyFrame_LocalsToFast( + ctypes.py_object(frame), ctypes.c_int(0)) + return save_locals + """) + + def test_set_locals_using_save_locals(self): + import sys + def use_save_locals(name, value): + frame = sys._getframe().f_back + locals_dict = frame.f_locals + locals_dict[name] = value + self.save_locals(frame) + def test_method(fn): + x = 1 + # The method 'fn' should attempt to set x = 2 in the current frame. + fn('x', 2) + return x + x = test_method(use_save_locals) + assert x == 2 + + def test_frame_simple_change(self): + import sys + frame = sys._getframe() + a = 20 + frame.f_locals['a'] = 50 + self.save_locals(frame) + assert a == 50 + + def test_frame_co_freevars(self): + import sys + outer_var = 20 + def func(): + frame = sys._getframe() + frame.f_locals['outer_var'] = 50 + self.save_locals(frame) + assert outer_var == 50 + func() + + def test_frame_co_cellvars(self): + import sys + def check_co_vars(a): + frame = sys._getframe() + def function2(): + print a + assert 'a' in frame.f_code.co_cellvars + frame = sys._getframe() + frame.f_locals['a'] = 50 + self.save_locals(frame) + assert a == 50 + check_co_vars(1) + + def test_frame_change_in_inner_frame(self): + import sys + def change(f): + assert f is not sys._getframe() + f.f_locals['a'] = 50 + self.save_locals(f) + frame = sys._getframe() + a = 20 + change(frame) + assert a == 50 From noreply at buildbot.pypy.org Thu Feb 6 17:42:07 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Thu, 6 Feb 2014 17:42:07 +0100 (CET) Subject: [pypy-commit] cffi alex_gaynor/handle-the-case-where-someone-has-made-i-1391634819444: Handle the case where someone has made `import weakref` return a weird proxy. Message-ID: <20140206164207.2B3D31C1413@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: alex_gaynor/handle-the-case-where-someone-has-made-i-1391634819444 Changeset: r1461:b90a5f638975 Date: 2014-02-05 21:13 +0000 http://bitbucket.org/cffi/cffi/changeset/b90a5f638975/ Log: Handle the case where someone has made `import weakref` return a weird proxy. Fixes: https://bugs.pypy.org/issue1688 diff --git a/cffi/model.py b/cffi/model.py --- a/cffi/model.py +++ b/cffi/model.py @@ -1,4 +1,6 @@ +import types import weakref + from .lock import allocate_lock @@ -469,8 +471,7 @@ # initialize the __typecache attribute, either at the module level # if ffi._backend is a module, or at the class level if ffi._backend # is some instance. - ModuleType = type(weakref) - if isinstance(ffi._backend, ModuleType): + if isinstance(ffi._backend, types.ModuleType): ffi._backend.__typecache = weakref.WeakValueDictionary() else: type(ffi._backend).__typecache = weakref.WeakValueDictionary() From noreply at buildbot.pypy.org Thu Feb 6 17:42:08 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 6 Feb 2014 17:42:08 +0100 (CET) Subject: [pypy-commit] cffi default: Merged in alex_gaynor/cffi-2/alex_gaynor/handle-the-case-where-someone-has-made-i-1391634819444 (pull request #26) Message-ID: <20140206164208.459CC1C1413@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1462:9f3caeed71db Date: 2014-02-06 17:41 +0100 http://bitbucket.org/cffi/cffi/changeset/9f3caeed71db/ Log: Merged in alex_gaynor/cffi-2/alex_gaynor/handle-the-case-where- someone-has-made-i-1391634819444 (pull request #26) Handle the case where someone has made `import weakref` return a weird proxy. diff --git a/cffi/model.py b/cffi/model.py --- a/cffi/model.py +++ b/cffi/model.py @@ -1,4 +1,6 @@ +import types import weakref + from .lock import allocate_lock @@ -469,8 +471,7 @@ # initialize the __typecache attribute, either at the module level # if ffi._backend is a module, or at the class level if ffi._backend # is some instance. - ModuleType = type(weakref) - if isinstance(ffi._backend, ModuleType): + if isinstance(ffi._backend, types.ModuleType): ffi._backend.__typecache = weakref.WeakValueDictionary() else: type(ffi._backend).__typecache = weakref.WeakValueDictionary() From noreply at buildbot.pypy.org Thu Feb 6 18:56:08 2014 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 6 Feb 2014 18:56:08 +0100 (CET) Subject: [pypy-commit] pypy precompiled-headers: backed out changeset: 4853ab8f1bf9 Message-ID: <20140206175608.8BBF01C1504@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: precompiled-headers Changeset: r69088:f9034e83c282 Date: 2014-02-06 06:34 +0200 http://bitbucket.org/pypy/pypy/changeset/f9034e83c282/ Log: backed out changeset: 4853ab8f1bf9 diff --git a/rpython/translator/c/src/profiling.c b/rpython/translator/c/src/profiling.c --- a/rpython/translator/c/src/profiling.c +++ b/rpython/translator/c/src/profiling.c @@ -3,7 +3,10 @@ /* Linux GCC implementation */ +#ifndef _GNU_SOURCE +#define _GNU_SOURCE #include +#endif static cpu_set_t base_cpu_set; static int profiling_setup = 0; From noreply at buildbot.pypy.org Thu Feb 6 18:56:09 2014 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 6 Feb 2014 18:56:09 +0100 (CET) Subject: [pypy-commit] pypy precompiled-headers: backed out changeset: 7fb7c9228605 Message-ID: <20140206175609.C20C31C1504@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: precompiled-headers Changeset: r69089:bceb0005f00f Date: 2014-02-06 19:49 +0200 http://bitbucket.org/pypy/pypy/changeset/bceb0005f00f/ Log: backed out changeset: 7fb7c9228605 diff --git a/rpython/translator/platform/posix.py b/rpython/translator/platform/posix.py --- a/rpython/translator/platform/posix.py +++ b/rpython/translator/platform/posix.py @@ -158,29 +158,16 @@ ('CC_LINK', eci.use_cpp_linker and 'g++' or '$(CC)'), ('LINKFILES', eci.link_files), ] + for args in definitions: + m.definition(*args) rules = [ ('all', '$(DEFAULT_TARGET)', []), ('$(TARGET)', '$(OBJECTS)', '$(CC_LINK) $(LDFLAGSEXTRA) -o $@ $(OBJECTS) $(LIBDIRS) $(LIBS) $(LINKFILES) $(LDFLAGS)'), + ('%.o', '%.c', '$(CC) $(CFLAGS) $(CFLAGSEXTRA) -o $@ -c $< $(INCLUDEDIRS)'), ('%.o', '%.cxx', '$(CXX) $(CFLAGS) $(CFLAGSEXTRA) -o $@ -c $< $(INCLUDEDIRS)'), ] - if len(headers_to_precompile)>0: - stdafx_h = path.join('stdafx.h') - txt = '#ifndef PYPY_STDAFX_H\n' - txt += '#define PYPY_STDAFX_H\n' - txt += '\n'.join(['#include "' + m.pathrel(c) + '"' for c in headers_to_precompile]) - txt += '\n#endif\n' - stdafx_h.write(txt) - rules.append(('$(OBJECTS)', 'stdafx.h.gch', [])) - rules.append(('%.h.gch', '%.h', - '$(CC) $(CFLAGS) $(CFLAGSEXTRA) -o $@ -c $< $(INCLUDEDIRS)')) - rules.append(('%.o', '%.c', '$(CC) $(CFLAGS) $(CFLAGSEXTRA) -include stdafx.h -o $@ -c $< $(INCLUDEDIRS)')) - else: - rules.append(('%.o', '%.c', '$(CC) $(CFLAGS) $(CFLAGSEXTRA) -o $@ -c $< $(INCLUDEDIRS)')) - - for args in definitions: - m.definition(*args) for rule in rules: m.rule(*rule) diff --git a/rpython/translator/platform/test/test_makefile.py b/rpython/translator/platform/test/test_makefile.py --- a/rpython/translator/platform/test/test_makefile.py +++ b/rpython/translator/platform/test/test_makefile.py @@ -6,13 +6,6 @@ from StringIO import StringIO import re, sys -import time -if sys.platform == 'win32': - get_time = time.clock -else: - get_time = time.time - - def test_simple_makefile(): m = Makefile() m.definition('CC', 'xxx') @@ -38,7 +31,7 @@ m.write(s) val = s.getvalue() assert not re.search('CC += +xxx', val, re.M) - assert re.search('CC += +yyy', val, re.M) + assert re.search('CC += +yyy', val, re.M) class TestMakefile(object): platform = host @@ -48,13 +41,13 @@ assert res.out == expected if self.strict_on_stderr: assert res.err == '' - assert res.returncode == 0 - + assert res.returncode == 0 + def test_900_files(self): txt = '#include \n' for i in range(900): txt += 'int func%03d();\n' % i - txt += 'int main() {\n int j=0;' + txt += 'int main() {\n int j=0;' for i in range(900): txt += ' j += func%03d();\n' % i txt += ' printf("%d\\n", j);\n' @@ -78,6 +71,7 @@ self.check_res(res, '%d\n' %sum(range(900))) def test_precompiled_headers(self): + import time tmpdir = udir.join('precompiled_headers').ensure(dir=1) # Create an eci that should not use precompiled headers eci = ExternalCompilationInfo(include_dirs=[tmpdir]) @@ -85,7 +79,7 @@ eci.separate_module_files = [main_c] ncfiles = 10 nprecompiled_headers = 20 - txt = '#include \n' + txt = '' for i in range(ncfiles): txt += "int func%03d();\n" % i txt += "\nint main(int argc, char * argv[])\n" @@ -103,8 +97,8 @@ for j in range(3000): txt += "int pcfunc%03d_%03d();\n" %(i, j) txt += '#endif' - pch_name.write(txt) - cfiles_precompiled_headers.append(pch_name) + pch_name.write(txt) + cfiles_precompiled_headers.append(pch_name) # Create some cfiles with headers we want precompiled cfiles = [] for i in range(ncfiles): @@ -114,18 +108,18 @@ txt += '#include "%s"\n' % pch_name txt += "int func%03d(){ return %d;};\n" % (i, i) c_name.write(txt) - cfiles.append(c_name) + cfiles.append(c_name) if sys.platform == 'win32': clean = ('clean', '', 'for %f in ( $(OBJECTS) $(TARGET) ) do @if exist %f del /f %f') - else: + else: clean = ('clean', '', 'rm -f $(OBJECTS) $(TARGET) ') #write a non-precompiled header makefile mk = self.platform.gen_makefile(cfiles, eci, path=tmpdir) mk.rule(*clean) mk.write() - t0 = get_time() + t0 = time.clock() self.platform.execute_makefile(mk) - t1 = get_time() + t1 = time.clock() t_normal = t1 - t0 self.platform.execute_makefile(mk, extra_opts=['clean']) # Write a super-duper makefile with precompiled headers @@ -133,13 +127,13 @@ headers_to_precompile=cfiles_precompiled_headers,) mk.rule(*clean) mk.write() - t0 = get_time() + t0 = time.clock() self.platform.execute_makefile(mk) - t1 = get_time() + t1 = time.clock() t_precompiled = t1 - t0 res = self.platform.execute(mk.exe_name) self.check_res(res, '%d\n' %sum(range(ncfiles))) print "precompiled haeder 'make' time %.2f, non-precompiled header time %.2f" %(t_precompiled, t_normal) - assert t_precompiled < t_normal * 0.8 + assert t_precompiled < t_normal * 0.5 - + diff --git a/rpython/translator/platform/windows.py b/rpython/translator/platform/windows.py --- a/rpython/translator/platform/windows.py +++ b/rpython/translator/platform/windows.py @@ -31,7 +31,7 @@ raise Exception("Win64 is not supported. You must either build for Win32" " or contribute the missing support in PyPy.") return _get_compiler_type(cc, True) - + def _get_msvc_env(vsver, x64flag): try: toolsdir = os.environ['VS%sCOMNTOOLS' % vsver] @@ -94,7 +94,7 @@ name = "msvc" so_ext = 'dll' exe_ext = 'exe' - + relevant_environ = ('PATH', 'INCLUDE', 'LIB') cc = 'cl.exe' @@ -105,7 +105,7 @@ standalone_only = () shared_only = () environ = None - + def __init__(self, cc=None, x64=False): self.x64 = x64 msvc_compiler_environ = find_msvc_env(x64) @@ -136,7 +136,7 @@ else: masm32 = 'ml.exe' masm64 = 'ml64.exe' - + if x64: self.masm = masm64 else: @@ -336,10 +336,10 @@ definitions.append(('CREATE_PCH', '/Ycstdafx.h /Fpstdafx.pch /FIstdafx.h')) definitions.append(('USE_PCH', '/Yustdafx.h /Fpstdafx.pch /FIstdafx.h')) rules.append(('$(OBJECTS)', 'stdafx.pch', [])) - rules.append(('stdafx.pch', 'stdafx.h', + rules.append(('stdafx.pch', 'stdafx.h', '$(CC) stdafx.c /c /nologo $(CFLAGS) $(CFLAGSEXTRA) ' '$(CREATE_PCH) $(INCLUDEDIRS)')) - rules.append(('.c.obj', '', + rules.append(('.c.obj', '', '$(CC) /nologo $(CFLAGS) $(CFLAGSEXTRA) $(USE_PCH) ' '/Fo$@ /c $< $(INCLUDEDIRS)')) #Do not use precompiled headers for some files @@ -359,7 +359,7 @@ '/Fo%s /c %s $(INCLUDEDIRS)' %(target, f))) else: - rules.append(('.c.obj', '', + rules.append(('.c.obj', '', '$(CC) /nologo $(CFLAGS) $(CFLAGSEXTRA) ' '/Fo$@ /c $< $(INCLUDEDIRS)')) @@ -369,7 +369,7 @@ for rule in rules: m.rule(*rule) - + objects = ' $(OBJECTS)' create_obj_response_file = [] if len(' '.join(rel_ofiles)) > 4000: @@ -378,7 +378,7 @@ for i in range(len(rel_ofiles) - 1): create_obj_response_file.append('echo %s >> obj_names.rsp' % \ rel_ofiles[i]) - # use cmd /c for the last one so that the file is flushed + # use cmd /c for the last one so that the file is flushed create_obj_response_file.append('cmd /c echo %s >> obj_names.rsp' % \ rel_ofiles[-1]) objects = ' @obj_names.rsp' From noreply at buildbot.pypy.org Thu Feb 6 21:36:14 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 6 Feb 2014 21:36:14 +0100 (CET) Subject: [pypy-commit] pypy default: fix test_pypy_c:test_struct_module after 8e51b6cb4481 Message-ID: <20140206203614.0C9E01C0153@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69090:dc128e49e02e Date: 2014-02-06 15:33 -0500 http://bitbucket.org/pypy/pypy/changeset/dc128e49e02e/ Log: fix test_pypy_c:test_struct_module after 8e51b6cb4481 diff --git a/pypy/module/pypyjit/test_pypy_c/test_misc.py b/pypy/module/pypyjit/test_pypy_c/test_misc.py --- a/pypy/module/pypyjit/test_pypy_c/test_misc.py +++ b/pypy/module/pypyjit/test_pypy_c/test_misc.py @@ -333,8 +333,8 @@ loop, = log.loops_by_id("struct") if sys.maxint == 2 ** 63 - 1: extra = """ - i8 = int_lt(i4, -2147483648) - guard_false(i8, descr=...) + i8 = int_ge(i4, -2147483648) + guard_true(i8, descr=...) """ else: extra = "" From noreply at buildbot.pypy.org Fri Feb 7 09:55:33 2014 From: noreply at buildbot.pypy.org (timfel) Date: Fri, 7 Feb 2014 09:55:33 +0100 (CET) Subject: [pypy-commit] lang-smalltalk default: fix COPY_BITS prim if we call the rpython plugin Message-ID: <20140207085533.4F60C1C1190@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: Changeset: r596:2d1aef2a70a3 Date: 2014-02-05 10:15 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/2d1aef2a70a3/ Log: fix COPY_BITS prim if we call the rpython plugin diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -659,7 +659,8 @@ return w_rcvr except shadow.MethodNotFound: from spyvm.plugins.bitblt import BitBltPlugin - return BitBltPlugin.call("primitiveCopyBits", interp, s_frame, argcount, s_method) + BitBltPlugin.call("primitiveCopyBits", interp, s_frame, argcount, s_method) + return w_rcvr @expose_primitive(BE_CURSOR) def func(interp, s_frame, argcount): From noreply at buildbot.pypy.org Fri Feb 7 09:55:32 2014 From: noreply at buildbot.pypy.org (timfel) Date: Fri, 7 Feb 2014 09:55:32 +0100 (CET) Subject: [pypy-commit] lang-smalltalk default: dynamically get the pixelbuffer, because realloc may move it Message-ID: <20140207085532.136791C1190@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: Changeset: r595:1bcfc3ded0b0 Date: 2014-02-05 10:14 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/1bcfc3ded0b0/ Log: dynamically get the pixelbuffer, because realloc may move it diff --git a/spyvm/display.py b/spyvm/display.py --- a/spyvm/display.py +++ b/spyvm/display.py @@ -39,7 +39,8 @@ class SDLDisplay(object): _attrs_ = ["screen", "width", "height", "depth", "surface", "has_surface", "mouse_position", "button", "key", "interrupt_key", "_defer_updates", - "_deferred_event"] + "_deferred_event", "pixelbuffer"] + _immutable_fields_ = ["pixelbuffer?"] def __init__(self, title): assert RSDL.Init(RSDL.INIT_VIDEO) >= 0 @@ -69,9 +70,10 @@ raise RuntimeError elif d == 8: self.set_squeak_colormap(self.screen) + self.pixelbuffer = rffi.cast(rffi.UINTP, self.screen.c_pixels) def get_pixelbuffer(self): - return rffi.cast(rffi.ULONGP, self.screen.c_pixels) + return jit.promote(self.pixelbuffer) def defer_updates(self, flag): self._defer_updates = flag diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -991,7 +991,6 @@ def __init__(self, space, w_class, size, depth, display): W_AbstractObjectWithClassReference.__init__(self, space, w_class) self._real_depth_buffer = lltype.malloc(rffi.CArray(rffi.UINT), size, flavor='raw') - self.pixelbuffer = display.get_pixelbuffer() self._realsize = size self.display = display self._depth = depth @@ -1027,7 +1026,7 @@ def setword(self, n, word): self._real_depth_buffer[n] = word - self.pixelbuffer[n] = word + self.display.get_pixelbuffer()[n] = word def is_array_object(self): return True @@ -1061,13 +1060,13 @@ ((msb & mask) << 11) ) - self.pixelbuffer[n] = r_uint(lsb | (msb << 16)) + self.display.get_pixelbuffer()[n] = r_uint(lsb | (msb << 16)) class W_8BitDisplayBitmap(W_DisplayBitmap): def setword(self, n, word): self._real_depth_buffer[n] = word - self.pixelbuffer[n] = r_uint( + self.display.get_pixelbuffer()[n] = r_uint( (word >> 24) | ((word >> 8) & 0x0000ff00) | ((word << 8) & 0x00ff0000) | @@ -1092,7 +1091,7 @@ pixel = r_uint(word) >> rshift mapword |= (r_uint(pixel) << (i * 8)) word <<= self._depth - self.pixelbuffer[pos] = mapword + self.display.get_pixelbuffer()[pos] = mapword pos += 1 def compute_pos(self, n): From noreply at buildbot.pypy.org Fri Feb 7 09:55:35 2014 From: noreply at buildbot.pypy.org (timfel) Date: Fri, 7 Feb 2014 09:55:35 +0100 (CET) Subject: [pypy-commit] lang-smalltalk default: try to improve byteobject comparisons Message-ID: <20140207085535.C716E1C1190@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: Changeset: r597:4830ce6194cb Date: 2014-02-05 10:15 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/4830ce6194cb/ Log: try to improve byteobject comparisons diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -796,13 +796,15 @@ return True def is_same_object(self, other): + if self is other: + return True # XXX this sounds very wrong to me - if not isinstance(other, W_BytesObject): + elif not isinstance(other, W_BytesObject): return False size = self.size() if size != other.size(): return False - if size > 256 and self.bytes is not None and other.bytes is not None: + elif size > 256 and self.bytes is not None and other.bytes is not None: return self.bytes == other.bytes else: return self.has_same_chars(other, size) From noreply at buildbot.pypy.org Fri Feb 7 09:55:38 2014 From: noreply at buildbot.pypy.org (timfel) Date: Fri, 7 Feb 2014 09:55:38 +0100 (CET) Subject: [pypy-commit] lang-smalltalk default: merge remote Message-ID: <20140207085538.10A581C1190@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: Changeset: r598:e4c2e5637146 Date: 2014-02-07 09:54 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/e4c2e5637146/ Log: merge remote diff --git a/targetimageloadingsmalltalk.py b/targetimageloadingsmalltalk.py old mode 100644 new mode 100755 --- a/targetimageloadingsmalltalk.py +++ b/targetimageloadingsmalltalk.py @@ -1,3 +1,4 @@ +#! /usr/bin/env python import sys, time import os diff --git a/targettinybenchsmalltalk.py b/targettinybenchsmalltalk.py old mode 100644 new mode 100755 --- a/targettinybenchsmalltalk.py +++ b/targettinybenchsmalltalk.py @@ -1,3 +1,4 @@ +#! /usr/bin/env python import os, sys from spyvm import model, interpreter, primitives, shadow, constants from spyvm.tool.analyseimage import create_squeakimage, create_testimage From noreply at buildbot.pypy.org Fri Feb 7 10:45:46 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Fri, 7 Feb 2014 10:45:46 +0100 (CET) Subject: [pypy-commit] stmgc c7: small changes Message-ID: <20140207094546.B0D5F1C0500@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r706:a4bc64e1d240 Date: 2014-02-07 10:46 +0100 http://bitbucket.org/pypy/stmgc/changeset/a4bc64e1d240/ Log: small changes diff --git a/c7/core.c b/c7/core.c --- a/c7/core.c +++ b/c7/core.c @@ -205,6 +205,7 @@ assert(READMARKER_START < READMARKER_END); assert(READMARKER_END <= 4096UL * FIRST_OBJECT_PAGE); assert(FIRST_OBJECT_PAGE < NB_PAGES); + assert((NB_NURSERY_PAGES * 4096) % NURSERY_SECTION == 0); object_pages = mmap(NULL, TOTAL_MEMORY, PROT_READ | PROT_WRITE, diff --git a/c7/core.h b/c7/core.h --- a/c7/core.h +++ b/c7/core.h @@ -134,7 +134,11 @@ /* ==================== HELPERS ==================== */ - +#ifdef NDEBUG +#define OPT_ASSERT(cond) do { if (!(cond)) __builtin_unreachable(); } while (0) +#else +#define OPT_ASSERT(cond) assert(cond) +#endif #define LIKELY(x) __builtin_expect(x, true) #define UNLIKELY(x) __builtin_expect(x, false) #define IMPLY(a, b) (!(a) || (b)) diff --git a/duhton/demo/synth.duh b/duhton/demo/synth.duh --- a/duhton/demo/synth.duh +++ b/duhton/demo/synth.duh @@ -68,9 +68,14 @@ (setq RAND_MAX 8) (setq CONFL_IF_BELOW 1) +(print (quote N:) N) +(print (quote RAND_MAX:) RAND_MAX) +(print (quote CONFL_IF_BELOW:) CONFL_IF_BELOW) + +(setq timer (time)) +(print (quote setup-transactions:) timer) (setq shared (container 0)) - (setq rand-list (random_list N RAND_MAX)) (setq i 0) (while (< i N) @@ -85,6 +90,9 @@ (setq i (+ i 1)) ) +(print (quote setup-time-diff:) (- (time) timer)) +(setq timer (time)) (run-transactions) +(print (quote run-time-diff:) (- (time) timer)) (print (quote shared) (get shared)) From noreply at buildbot.pypy.org Fri Feb 7 11:08:03 2014 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 7 Feb 2014 11:08:03 +0100 (CET) Subject: [pypy-commit] pypy precompiled-headers: skip test on gcc Message-ID: <20140207100803.D2B621C0500@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: precompiled-headers Changeset: r69091:b6aba01a6fc2 Date: 2014-02-07 09:07 +0200 http://bitbucket.org/pypy/pypy/changeset/b6aba01a6fc2/ Log: skip test on gcc diff --git a/rpython/translator/platform/test/test_makefile.py b/rpython/translator/platform/test/test_makefile.py --- a/rpython/translator/platform/test/test_makefile.py +++ b/rpython/translator/platform/test/test_makefile.py @@ -4,7 +4,7 @@ from rpython.tool.udir import udir from rpython.translator.tool.cbuild import ExternalCompilationInfo from StringIO import StringIO -import re, sys +import re, sys, py def test_simple_makefile(): m = Makefile() @@ -71,6 +71,8 @@ self.check_res(res, '%d\n' %sum(range(900))) def test_precompiled_headers(self): + if self.platform.cc != 'cl.exe': + py.test.skip("Only MSVC profits from precompiled headers") import time tmpdir = udir.join('precompiled_headers').ensure(dir=1) # Create an eci that should not use precompiled headers @@ -111,15 +113,17 @@ cfiles.append(c_name) if sys.platform == 'win32': clean = ('clean', '', 'for %f in ( $(OBJECTS) $(TARGET) ) do @if exist %f del /f %f') + get_time = time.clock else: clean = ('clean', '', 'rm -f $(OBJECTS) $(TARGET) ') + get_time = time.time #write a non-precompiled header makefile mk = self.platform.gen_makefile(cfiles, eci, path=tmpdir) mk.rule(*clean) mk.write() - t0 = time.clock() + t0 = get_time() self.platform.execute_makefile(mk) - t1 = time.clock() + t1 = get_time() t_normal = t1 - t0 self.platform.execute_makefile(mk, extra_opts=['clean']) # Write a super-duper makefile with precompiled headers @@ -127,9 +131,9 @@ headers_to_precompile=cfiles_precompiled_headers,) mk.rule(*clean) mk.write() - t0 = time.clock() + t0 = get_time() self.platform.execute_makefile(mk) - t1 = time.clock() + t1 = get_time() t_precompiled = t1 - t0 res = self.platform.execute(mk.exe_name) self.check_res(res, '%d\n' %sum(range(ncfiles))) From noreply at buildbot.pypy.org Fri Feb 7 11:08:05 2014 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 7 Feb 2014 11:08:05 +0100 (CET) Subject: [pypy-commit] pypy precompiled-headers: merge default into branch Message-ID: <20140207100805.74DA71C0500@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: precompiled-headers Changeset: r69092:b238bc9d56d5 Date: 2014-02-07 09:08 +0200 http://bitbucket.org/pypy/pypy/changeset/b238bc9d56d5/ Log: merge default into branch diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -81,6 +81,7 @@ 'newdict' : 'interp_dict.newdict', 'dictstrategy' : 'interp_dict.dictstrategy', 'set_debug' : 'interp_magic.set_debug', + 'locals_to_fast' : 'interp_magic.locals_to_fast', } if sys.platform == 'win32': interpleveldefs['get_console_cp'] = 'interp_magic.get_console_cp' diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py --- a/pypy/module/__pypy__/interp_magic.py +++ b/pypy/module/__pypy__/interp_magic.py @@ -1,5 +1,6 @@ from pypy.interpreter.error import OperationError, wrap_oserror from pypy.interpreter.gateway import unwrap_spec +from pypy.interpreter.pyframe import PyFrame from rpython.rlib.objectmodel import we_are_translated from pypy.objspace.std.listobject import W_ListObject from pypy.objspace.std.typeobject import MethodCache @@ -111,3 +112,8 @@ @unwrap_spec(estimate=int) def add_memory_pressure(estimate): rgc.add_memory_pressure(estimate) + + at unwrap_spec(w_frame=PyFrame) +def locals_to_fast(space, w_frame): + assert isinstance(w_frame, PyFrame) + w_frame.locals2fast() diff --git a/pypy/module/__pypy__/test/test_locals2fast.py b/pypy/module/__pypy__/test/test_locals2fast.py new file mode 100644 --- /dev/null +++ b/pypy/module/__pypy__/test/test_locals2fast.py @@ -0,0 +1,81 @@ +# Tests from Fabio Zadrozny + + +class AppTestLocals2Fast: + """ + Test setting locals in one function from another function + using several approaches. + """ + + def setup_class(cls): + cls.w_save_locals = cls.space.appexec([], """(): + import sys + if '__pypy__' in sys.builtin_module_names: + import __pypy__ + save_locals = __pypy__.locals_to_fast + else: + # CPython version + import ctypes + @staticmethod + def save_locals(frame): + ctypes.pythonapi.PyFrame_LocalsToFast( + ctypes.py_object(frame), ctypes.c_int(0)) + return save_locals + """) + + def test_set_locals_using_save_locals(self): + import sys + def use_save_locals(name, value): + frame = sys._getframe().f_back + locals_dict = frame.f_locals + locals_dict[name] = value + self.save_locals(frame) + def test_method(fn): + x = 1 + # The method 'fn' should attempt to set x = 2 in the current frame. + fn('x', 2) + return x + x = test_method(use_save_locals) + assert x == 2 + + def test_frame_simple_change(self): + import sys + frame = sys._getframe() + a = 20 + frame.f_locals['a'] = 50 + self.save_locals(frame) + assert a == 50 + + def test_frame_co_freevars(self): + import sys + outer_var = 20 + def func(): + frame = sys._getframe() + frame.f_locals['outer_var'] = 50 + self.save_locals(frame) + assert outer_var == 50 + func() + + def test_frame_co_cellvars(self): + import sys + def check_co_vars(a): + frame = sys._getframe() + def function2(): + print a + assert 'a' in frame.f_code.co_cellvars + frame = sys._getframe() + frame.f_locals['a'] = 50 + self.save_locals(frame) + assert a == 50 + check_co_vars(1) + + def test_frame_change_in_inner_frame(self): + import sys + def change(f): + assert f is not sys._getframe() + f.f_locals['a'] = 50 + self.save_locals(f) + frame = sys._getframe() + a = 20 + change(frame) + assert a == 50 diff --git a/pypy/module/pypyjit/test_pypy_c/test_misc.py b/pypy/module/pypyjit/test_pypy_c/test_misc.py --- a/pypy/module/pypyjit/test_pypy_c/test_misc.py +++ b/pypy/module/pypyjit/test_pypy_c/test_misc.py @@ -333,8 +333,8 @@ loop, = log.loops_by_id("struct") if sys.maxint == 2 ** 63 - 1: extra = """ - i8 = int_lt(i4, -2147483648) - guard_false(i8, descr=...) + i8 = int_ge(i4, -2147483648) + guard_true(i8, descr=...) """ else: extra = "" diff --git a/rpython/annotator/annrpython.py b/rpython/annotator/annrpython.py --- a/rpython/annotator/annrpython.py +++ b/rpython/annotator/annrpython.py @@ -582,18 +582,18 @@ def consider_op(self, block, opindex): op = block.operations[opindex] - argcells = [self.binding(a) for a in op.args] + try: + argcells = [self.binding(a) for a in op.args] - # let's be careful about avoiding propagated SomeImpossibleValues - # to enter an op; the latter can result in violations of the - # more general results invariant: e.g. if SomeImpossibleValue enters is_ - # is_(SomeImpossibleValue, None) -> SomeBool - # is_(SomeInstance(not None), None) -> SomeBool(const=False) ... - # boom -- in the assert of setbinding() - for arg in argcells: - if isinstance(arg, annmodel.SomeImpossibleValue): - raise BlockedInference(self, op, opindex) - try: + # let's be careful about avoiding propagated SomeImpossibleValues + # to enter an op; the latter can result in violations of the + # more general results invariant: e.g. if SomeImpossibleValue enters is_ + # is_(SomeImpossibleValue, None) -> SomeBool + # is_(SomeInstance(not None), None) -> SomeBool(const=False) ... + # boom -- in the assert of setbinding() + for arg in argcells: + if isinstance(arg, annmodel.SomeImpossibleValue): + raise BlockedInference(self, op, opindex) resultcell = op.consider(self, *argcells) except annmodel.AnnotatorError as e: # note that UnionError is a subclass graph = self.bookkeeper.position_key[0] diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -4139,6 +4139,16 @@ a.build_types(f, [str]) assert ("Cannot prove that the object is callable" in exc.value.msg) + def test_UnionError_on_PBC(self): + l = ['a', 1] + def f(x): + l.append(x) + a = self.RPythonAnnotator() + with py.test.raises(annmodel.UnionError) as excinfo: + a.build_types(f, [int]) + assert 'Happened at file' in excinfo.value.source + assert 'Known variable annotations:' in excinfo.value.source + def test_str_format_error(self): def f(s, x): return s.format(x) diff --git a/rpython/tool/sourcetools.py b/rpython/tool/sourcetools.py --- a/rpython/tool/sourcetools.py +++ b/rpython/tool/sourcetools.py @@ -6,7 +6,6 @@ # XXX We should try to generalize and single out one approach to dynamic # XXX code compilation. -import types import sys, os, inspect, new import py @@ -296,40 +295,3 @@ result.func_defaults = f.func_defaults result.func_dict.update(f.func_dict) return result - - -def _convert_const_maybe(x, encoding): - if isinstance(x, str): - return x.decode(encoding) - elif isinstance(x, tuple): - items = [_convert_const_maybe(item, encoding) for item in x] - return tuple(items) - return x - -def with_unicode_literals(fn=None, **kwds): - """Decorator that replace all string literals with unicode literals. - Similar to 'from __future__ import string literals' at function level. - Useful to limit changes in the py3k branch. - """ - encoding = kwds.pop('encoding', 'ascii') - if kwds: - raise TypeError("Unexpected keyword argument(s): %s" % ', '.join(kwds.keys())) - def decorator(fn): - co = fn.func_code - new_consts = [] - for const in co.co_consts: - new_consts.append(_convert_const_maybe(const, encoding)) - new_consts = tuple(new_consts) - new_code = types.CodeType(co.co_argcount, co.co_nlocals, co.co_stacksize, - co.co_flags, co.co_code, new_consts, co.co_names, - co.co_varnames, co.co_filename, co.co_name, - co.co_firstlineno, co.co_lnotab) - fn.func_code = new_code - return fn - # - # support the usage of @with_unicode_literals instead of @with_unicode_literals() - if fn is not None: - assert type(fn) is types.FunctionType - return decorator(fn) - else: - return decorator diff --git a/rpython/tool/test/test_sourcetools.py b/rpython/tool/test/test_sourcetools.py --- a/rpython/tool/test/test_sourcetools.py +++ b/rpython/tool/test/test_sourcetools.py @@ -1,7 +1,5 @@ -# -*- encoding: utf-8 -*- -import py from rpython.tool.sourcetools import ( - func_with_new_name, func_renamer, rpython_wrapper, with_unicode_literals) + func_renamer, func_with_new_name, rpython_wrapper) def test_rename(): def f(x, y=5): @@ -57,30 +55,3 @@ ('decorated', 40, 2), ('bar', 40, 2), ] - - -def test_with_unicode_literals(): - @with_unicode_literals() - def foo(): - return 'hello' - assert type(foo()) is unicode - # - @with_unicode_literals - def foo(): - return 'hello' - assert type(foo()) is unicode - # - def foo(): - return 'hello àèì' - py.test.raises(UnicodeDecodeError, "with_unicode_literals(foo)") - # - @with_unicode_literals(encoding='utf-8') - def foo(): - return 'hello àèì' - assert foo() == u'hello àèì' - # - @with_unicode_literals - def foo(): - return ('a', 'b') - assert type(foo()[0]) is unicode - From noreply at buildbot.pypy.org Fri Feb 7 11:08:06 2014 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 7 Feb 2014 11:08:06 +0100 (CET) Subject: [pypy-commit] pypy precompiled-headers: close branch to be merged Message-ID: <20140207100806.A7ED31C0500@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: precompiled-headers Changeset: r69093:c82a167147b1 Date: 2014-02-07 09:11 +0200 http://bitbucket.org/pypy/pypy/changeset/c82a167147b1/ Log: close branch to be merged From noreply at buildbot.pypy.org Fri Feb 7 11:08:08 2014 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 7 Feb 2014 11:08:08 +0100 (CET) Subject: [pypy-commit] pypy default: merge precompiled-headers which uses Makefile with precompiled headers on MSVC Message-ID: <20140207100808.2AF381C0500@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r69094:506a567ec761 Date: 2014-02-07 09:14 +0200 http://bitbucket.org/pypy/pypy/changeset/506a567ec761/ Log: merge precompiled-headers which uses Makefile with precompiled headers on MSVC diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -910,6 +910,8 @@ # implement function callbacks and generate function decls functions = [] pypy_decls = [] + pypy_decls.append("#ifndef _PYPY_PYPY_DECL_H\n") + pypy_decls.append("#define _PYPY_PYPY_DECL_H\n") pypy_decls.append("#ifndef PYPY_STANDALONE\n") pypy_decls.append("#ifdef __cplusplus") pypy_decls.append("extern \"C\" {") @@ -953,6 +955,7 @@ pypy_decls.append("}") pypy_decls.append("#endif") pypy_decls.append("#endif /*PYPY_STANDALONE*/\n") + pypy_decls.append("#endif /*_PYPY_PYPY_DECL_H*/\n") pypy_decl_h = udir.join('pypy_decl.h') pypy_decl_h.write('\n'.join(pypy_decls)) diff --git a/rpython/translator/c/dlltool.py b/rpython/translator/c/dlltool.py --- a/rpython/translator/c/dlltool.py +++ b/rpython/translator/c/dlltool.py @@ -21,7 +21,8 @@ entrypoints.append(getfunctionptr(graph)) return entrypoints - def gen_makefile(self, targetdir, exe_name=None): + def gen_makefile(self, targetdir, exe_name=None, + headers_to_precompile=[]): pass # XXX finish def compile(self): @@ -30,6 +31,8 @@ extsymeci = ExternalCompilationInfo(export_symbols=export_symbols) self.eci = self.eci.merge(extsymeci) files = [self.c_source_filename] + self.extrafiles + files += self.eventually_copy(self.eci.separate_module_files) + self.eci.separate_module_files = () oname = self.name self.so_name = self.translator.platform.compile(files, self.eci, standalone=False, diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -260,12 +260,13 @@ defines['PYPY_MAIN_FUNCTION'] = "pypy_main_startup" self.eci = self.eci.merge(ExternalCompilationInfo( export_symbols=["pypy_main_startup", "pypy_debug_file"])) - self.eci, cfile, extra = gen_source(db, modulename, targetdir, - self.eci, defines=defines, - split=self.split) + self.eci, cfile, extra, headers_to_precompile = \ + gen_source(db, modulename, targetdir, + self.eci, defines=defines, split=self.split) self.c_source_filename = py.path.local(cfile) self.extrafiles = self.eventually_copy(extra) - self.gen_makefile(targetdir, exe_name=exe_name) + self.gen_makefile(targetdir, exe_name=exe_name, + headers_to_precompile=headers_to_precompile) return cfile def eventually_copy(self, cfiles): @@ -375,18 +376,22 @@ self._compiled = True return self.executable_name - def gen_makefile(self, targetdir, exe_name=None): - cfiles = [self.c_source_filename] + self.extrafiles + def gen_makefile(self, targetdir, exe_name=None, headers_to_precompile=[]): + module_files = self.eventually_copy(self.eci.separate_module_files) + self.eci.separate_module_files = [] + cfiles = [self.c_source_filename] + self.extrafiles + list(module_files) if exe_name is not None: exe_name = targetdir.join(exe_name) mk = self.translator.platform.gen_makefile( cfiles, self.eci, path=targetdir, exe_name=exe_name, + headers_to_precompile=headers_to_precompile, + no_precompile_cfiles = module_files, shared=self.config.translation.shared) if self.has_profopt(): profopt = self.config.translation.profopt - mk.definition('ABS_TARGET', '$(shell python -c "import sys,os; print os.path.abspath(sys.argv[1])" $(TARGET))') + mk.definition('ABS_TARGET', str(targetdir.join('$(TARGET)'))) mk.definition('DEFAULT_TARGET', 'profopt') mk.definition('PROFOPT', profopt) @@ -511,6 +516,7 @@ def __init__(self, database): self.database = database self.extrafiles = [] + self.headers_to_precompile = [] self.path = None self.namespace = NameManager() @@ -539,6 +545,8 @@ filepath = self.path.join(name) if name.endswith('.c'): self.extrafiles.append(filepath) + if name.endswith('.h'): + self.headers_to_precompile.append(filepath) return filepath.open('w') def getextrafiles(self): @@ -686,11 +694,11 @@ print >> fc, '/***********************************************************/' print >> fc, '/*** Implementations ***/' print >> fc - print >> fc, '#define PYPY_FILE_NAME "%s"' % name print >> fc, '#include "common_header.h"' print >> fc, '#include "structdef.h"' print >> fc, '#include "forwarddecl.h"' print >> fc, '#include "preimpl.h"' + print >> fc, '#define PYPY_FILE_NAME "%s"' % name print >> fc, '#include "src/g_include.h"' print >> fc print >> fc, MARKER @@ -732,12 +740,14 @@ print >> f, "#endif" def gen_preimpl(f, database): + f.write('#ifndef _PY_PREIMPLE_H\n#define _PY_PREIMPL_H\n') if database.translator is None or database.translator.rtyper is None: return preimplementationlines = pre_include_code_lines( database, database.translator.rtyper) for line in preimplementationlines: print >> f, line + f.write('#endif /* _PY_PREIMPL_H */\n') def gen_startupcode(f, database): # generate the start-up code and put it into a function @@ -799,6 +809,7 @@ f = filename.open('w') incfilename = targetdir.join('common_header.h') fi = incfilename.open('w') + fi.write('#ifndef _PY_COMMON_HEADER_H\n#define _PY_COMMON_HEADER_H\n') # # Header @@ -811,6 +822,7 @@ eci.write_c_header(fi) print >> fi, '#include "src/g_prerequisite.h"' + fi.write('#endif /* _PY_COMMON_HEADER_H*/\n') fi.close() @@ -822,6 +834,8 @@ sg.set_strategy(targetdir, split) database.prepare_inline_helpers() sg.gen_readable_parts_of_source(f) + headers_to_precompile = sg.headers_to_precompile[:] + headers_to_precompile.insert(0, incfilename) gen_startupcode(f, database) f.close() @@ -834,5 +848,4 @@ eci = add_extra_files(eci) eci = eci.convert_sources_to_files() - files, eci = eci.get_module_files() - return eci, filename, sg.getextrafiles() + list(files) + return eci, filename, sg.getextrafiles(), headers_to_precompile diff --git a/rpython/translator/platform/__init__.py b/rpython/translator/platform/__init__.py --- a/rpython/translator/platform/__init__.py +++ b/rpython/translator/platform/__init__.py @@ -100,7 +100,8 @@ return ExecutionResult(returncode, stdout, stderr) def gen_makefile(self, cfiles, eci, exe_name=None, path=None, - shared=False): + shared=False, headers_to_precompile=[], + no_precompile_cfiles = []): raise NotImplementedError("Pure abstract baseclass") def __repr__(self): diff --git a/rpython/translator/platform/darwin.py b/rpython/translator/platform/darwin.py --- a/rpython/translator/platform/darwin.py +++ b/rpython/translator/platform/darwin.py @@ -50,14 +50,17 @@ return ["-Wl,-exported_symbols_list,%s" % (response_file,)] def gen_makefile(self, cfiles, eci, exe_name=None, path=None, - shared=False): + shared=False, headers_to_precompile=[], + no_precompile_cfiles = []): # ensure frameworks are passed in the Makefile fs = self._frameworks(eci.frameworks) if len(fs) > 0: # concat (-framework, FrameworkName) pairs self.extra_libs += tuple(map(" ".join, zip(fs[::2], fs[1::2]))) mk = super(Darwin, self).gen_makefile(cfiles, eci, exe_name, path, - shared) + shared=shared, + headers_to_precompile=headers_to_precompile, + no_precompile_cfiles = no_precompile_cfiles) return mk diff --git a/rpython/translator/platform/posix.py b/rpython/translator/platform/posix.py --- a/rpython/translator/platform/posix.py +++ b/rpython/translator/platform/posix.py @@ -83,7 +83,8 @@ return [entry[2:] for entry in out.split()] def gen_makefile(self, cfiles, eci, exe_name=None, path=None, - shared=False): + shared=False, headers_to_precompile=[], + no_precompile_cfiles = []): cfiles = self._all_cfiles(cfiles, eci) if path is None: diff --git a/rpython/translator/platform/test/test_distutils.py b/rpython/translator/platform/test/test_distutils.py --- a/rpython/translator/platform/test/test_distutils.py +++ b/rpython/translator/platform/test/test_distutils.py @@ -11,3 +11,7 @@ def test_900_files(self): py.test.skip('Makefiles not suppoerted') + + def test_precompiled_headers(self): + py.test.skip('Makefiles not suppoerted') + diff --git a/rpython/translator/platform/test/test_makefile.py b/rpython/translator/platform/test/test_makefile.py --- a/rpython/translator/platform/test/test_makefile.py +++ b/rpython/translator/platform/test/test_makefile.py @@ -1,7 +1,10 @@ from rpython.translator.platform.posix import GnuMakefile as Makefile +from rpython.translator.platform import host +from rpython.tool.udir import udir +from rpython.translator.tool.cbuild import ExternalCompilationInfo from StringIO import StringIO -import re +import re, sys, py def test_simple_makefile(): m = Makefile() @@ -29,3 +32,112 @@ val = s.getvalue() assert not re.search('CC += +xxx', val, re.M) assert re.search('CC += +yyy', val, re.M) + +class TestMakefile(object): + platform = host + strict_on_stderr = True + + def check_res(self, res, expected='42\n'): + assert res.out == expected + if self.strict_on_stderr: + assert res.err == '' + assert res.returncode == 0 + + def test_900_files(self): + txt = '#include \n' + for i in range(900): + txt += 'int func%03d();\n' % i + txt += 'int main() {\n int j=0;' + for i in range(900): + txt += ' j += func%03d();\n' % i + txt += ' printf("%d\\n", j);\n' + txt += ' return 0;};\n' + cfile = udir.join('test_900_files.c') + cfile.write(txt) + cfiles = [cfile] + for i in range(900): + cfile2 = udir.join('implement%03d.c' %i) + cfile2.write(''' + int func%03d() + { + return %d; + } + ''' % (i, i)) + cfiles.append(cfile2) + mk = self.platform.gen_makefile(cfiles, ExternalCompilationInfo(), path=udir) + mk.write() + self.platform.execute_makefile(mk) + res = self.platform.execute(udir.join('test_900_files')) + self.check_res(res, '%d\n' %sum(range(900))) + + def test_precompiled_headers(self): + if self.platform.cc != 'cl.exe': + py.test.skip("Only MSVC profits from precompiled headers") + import time + tmpdir = udir.join('precompiled_headers').ensure(dir=1) + # Create an eci that should not use precompiled headers + eci = ExternalCompilationInfo(include_dirs=[tmpdir]) + main_c = tmpdir.join('main_no_pch.c') + eci.separate_module_files = [main_c] + ncfiles = 10 + nprecompiled_headers = 20 + txt = '' + for i in range(ncfiles): + txt += "int func%03d();\n" % i + txt += "\nint main(int argc, char * argv[])\n" + txt += "{\n int i=0;\n" + for i in range(ncfiles): + txt += " i += func%03d();\n" % i + txt += ' printf("%d\\n", i);\n' + txt += " return 0;\n};\n" + main_c.write(txt) + # Create some large headers with dummy functions to be precompiled + cfiles_precompiled_headers = [] + for i in range(nprecompiled_headers): + pch_name =tmpdir.join('pcheader%03d.h' % i) + txt = '#ifndef PCHEADER%03d_H\n#define PCHEADER%03d_H\n' %(i, i) + for j in range(3000): + txt += "int pcfunc%03d_%03d();\n" %(i, j) + txt += '#endif' + pch_name.write(txt) + cfiles_precompiled_headers.append(pch_name) + # Create some cfiles with headers we want precompiled + cfiles = [] + for i in range(ncfiles): + c_name =tmpdir.join('implement%03d.c' % i) + txt = '' + for pch_name in cfiles_precompiled_headers: + txt += '#include "%s"\n' % pch_name + txt += "int func%03d(){ return %d;};\n" % (i, i) + c_name.write(txt) + cfiles.append(c_name) + if sys.platform == 'win32': + clean = ('clean', '', 'for %f in ( $(OBJECTS) $(TARGET) ) do @if exist %f del /f %f') + get_time = time.clock + else: + clean = ('clean', '', 'rm -f $(OBJECTS) $(TARGET) ') + get_time = time.time + #write a non-precompiled header makefile + mk = self.platform.gen_makefile(cfiles, eci, path=tmpdir) + mk.rule(*clean) + mk.write() + t0 = get_time() + self.platform.execute_makefile(mk) + t1 = get_time() + t_normal = t1 - t0 + self.platform.execute_makefile(mk, extra_opts=['clean']) + # Write a super-duper makefile with precompiled headers + mk = self.platform.gen_makefile(cfiles, eci, path=tmpdir, + headers_to_precompile=cfiles_precompiled_headers,) + mk.rule(*clean) + mk.write() + t0 = get_time() + self.platform.execute_makefile(mk) + t1 = get_time() + t_precompiled = t1 - t0 + res = self.platform.execute(mk.exe_name) + self.check_res(res, '%d\n' %sum(range(ncfiles))) + print "precompiled haeder 'make' time %.2f, non-precompiled header time %.2f" %(t_precompiled, t_normal) + assert t_precompiled < t_normal * 0.5 + + diff --git a/rpython/translator/platform/test/test_platform.py b/rpython/translator/platform/test/test_platform.py --- a/rpython/translator/platform/test/test_platform.py +++ b/rpython/translator/platform/test/test_platform.py @@ -59,34 +59,6 @@ res = self.platform.execute(executable) self.check_res(res) - def test_900_files(self): - txt = '#include \n' - for i in range(900): - txt += 'int func%03d();\n' % i - txt += 'int main() {\n int j=0;' - for i in range(900): - txt += ' j += func%03d();\n' % i - txt += ' printf("%d\\n", j);\n' - txt += ' return 0;};\n' - cfile = udir.join('test_900_files.c') - cfile.write(txt) - cfiles = [cfile] - for i in range(900): - cfile2 = udir.join('implement%03d.c' %i) - cfile2.write(''' - int func%03d() - { - return %d; - } - ''' % (i, i)) - cfiles.append(cfile2) - mk = self.platform.gen_makefile(cfiles, ExternalCompilationInfo(), path=udir) - mk.write() - self.platform.execute_makefile(mk) - res = self.platform.execute(udir.join('test_900_files')) - self.check_res(res, '%d\n' %sum(range(900))) - - def test_nice_errors(self): cfile = udir.join('test_nice_errors.c') cfile.write('') diff --git a/rpython/translator/platform/windows.py b/rpython/translator/platform/windows.py --- a/rpython/translator/platform/windows.py +++ b/rpython/translator/platform/windows.py @@ -249,7 +249,8 @@ def gen_makefile(self, cfiles, eci, exe_name=None, path=None, - shared=False): + shared=False, headers_to_precompile=[], + no_precompile_cfiles = []): cfiles = self._all_cfiles(cfiles, eci) if path is None: @@ -318,15 +319,54 @@ if self.x64: definitions.append(('_WIN64', '1')) + rules = [ + ('all', '$(DEFAULT_TARGET)', []), + ('.asm.obj', '', '$(MASM) /nologo /Fo$@ /c $< $(INCLUDEDIRS)'), + ] + + if len(headers_to_precompile)>0: + stdafx_h = path.join('stdafx.h') + txt = '#ifndef PYPY_STDAFX_H\n' + txt += '#define PYPY_STDAFX_H\n' + txt += '\n'.join(['#include "' + m.pathrel(c) + '"' for c in headers_to_precompile]) + txt += '\n#endif\n' + stdafx_h.write(txt) + stdafx_c = path.join('stdafx.c') + stdafx_c.write('#include "stdafx.h"\n') + definitions.append(('CREATE_PCH', '/Ycstdafx.h /Fpstdafx.pch /FIstdafx.h')) + definitions.append(('USE_PCH', '/Yustdafx.h /Fpstdafx.pch /FIstdafx.h')) + rules.append(('$(OBJECTS)', 'stdafx.pch', [])) + rules.append(('stdafx.pch', 'stdafx.h', + '$(CC) stdafx.c /c /nologo $(CFLAGS) $(CFLAGSEXTRA) ' + '$(CREATE_PCH) $(INCLUDEDIRS)')) + rules.append(('.c.obj', '', + '$(CC) /nologo $(CFLAGS) $(CFLAGSEXTRA) $(USE_PCH) ' + '/Fo$@ /c $< $(INCLUDEDIRS)')) + #Do not use precompiled headers for some files + #rules.append((r'{..\module_cache}.c{..\module_cache}.obj', '', + # '$(CC) /nologo $(CFLAGS) $(CFLAGSEXTRA) /Fo$@ /c $< $(INCLUDEDIRS)')) + # nmake cannot handle wildcard target specifications, so we must + # create a rule for compiling each file from eci since they cannot use + # precompiled headers :( + no_precompile = [] + for f in list(no_precompile_cfiles): + f = m.pathrel(py.path.local(f)) + if f not in no_precompile and f.endswith('.c'): + no_precompile.append(f) + target = f[:-1] + 'obj' + rules.append((target, f, + '$(CC) /nologo $(CFLAGS) $(CFLAGSEXTRA) ' + '/Fo%s /c %s $(INCLUDEDIRS)' %(target, f))) + + else: + rules.append(('.c.obj', '', + '$(CC) /nologo $(CFLAGS) $(CFLAGSEXTRA) ' + '/Fo$@ /c $< $(INCLUDEDIRS)')) + + for args in definitions: m.definition(*args) - rules = [ - ('all', '$(DEFAULT_TARGET)', []), - ('.c.obj', '', '$(CC) /nologo $(CFLAGS) $(CFLAGSEXTRA) /Fo$@ /c $< $(INCLUDEDIRS)'), - ('.asm.obj', '', '$(MASM) /nologo /Fo$@ /c $< $(INCLUDEDIRS)'), - ] - for rule in rules: m.rule(*rule) @@ -392,6 +432,25 @@ self._handle_error(returncode, stdout, stderr, path.join('make')) +class WinDefinition(posix.Definition): + def write(self, f): + def write_list(prefix, lst): + lst = lst or [''] + for i, fn in enumerate(lst): + print >> f, prefix, fn, + if i < len(lst)-1: + print >> f, '\\' + else: + print >> f + prefix = ' ' * len(prefix) + name, value = self.name, self.value + if isinstance(value, str): + f.write('%s = %s\n' % (name, value)) + else: + write_list('%s =' % (name,), value) + f.write('\n') + + class NMakefile(posix.GnuMakefile): def write(self, out=None): # nmake expands macros when it parses rules. @@ -410,6 +469,14 @@ if out is None: f.close() + def definition(self, name, value): + defs = self.defs + defn = WinDefinition(name, value) + if name in defs: + self.lines[defs[name]] = defn + else: + defs[name] = len(self.lines) + self.lines.append(defn) class MingwPlatform(posix.BasePosix): name = 'mingw32' From noreply at buildbot.pypy.org Fri Feb 7 11:08:09 2014 From: noreply at buildbot.pypy.org (mattip) Date: Fri, 7 Feb 2014 11:08:09 +0100 (CET) Subject: [pypy-commit] pypy default: document merged branch Message-ID: <20140207100809.758AB1C0500@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r69095:62009b849a14 Date: 2014-02-07 12:06 +0200 http://bitbucket.org/pypy/pypy/changeset/62009b849a14/ Log: document merged branch diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -57,3 +57,10 @@ mapdicts keep track of whether or not an attribute is every assigned to multiple times. If it's only assigned once then an elidable lookup is used when possible. + +.. branch: precompiled-headers +Create a Makefile using precompiled headers for MSVC platforms. +The downside is a messy nmake-compatible Makefile. Since gcc shows minimal +speedup, it was not implemented. + + From noreply at buildbot.pypy.org Fri Feb 7 13:12:41 2014 From: noreply at buildbot.pypy.org (timfel) Date: Fri, 7 Feb 2014 13:12:41 +0100 (CET) Subject: [pypy-commit] lang-smalltalk 64bit-c2: graft changes from 64bit branch onto master Message-ID: <20140207121241.EA69C1C0153@cobra.cs.uni-duesseldorf.de> Author: Tim Felgentreff Branch: 64bit-c2 Changeset: r599:8343cdb32ec6 Date: 2014-02-07 13:12 +0100 http://bitbucket.org/pypy/lang-smalltalk/changeset/8343cdb32ec6/ Log: graft changes from 64bit branch onto master diff --git a/spyvm/constants.py b/spyvm/constants.py --- a/spyvm/constants.py +++ b/spyvm/constants.py @@ -144,7 +144,7 @@ "timerSemaphore" : SO_TIMER_SEMAPHORE, } -LONG_BIT = 32 +from rpython.rlib.rarithmetic import LONG_BIT TAGGED_MAXINT = 2 ** (LONG_BIT - 2) - 1 TAGGED_MININT = -2 ** (LONG_BIT - 2) diff --git a/spyvm/display.py b/spyvm/display.py --- a/spyvm/display.py +++ b/spyvm/display.py @@ -1,4 +1,3 @@ -from rpython.rlib.rarithmetic import r_uint from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rlib.runicode import unicode_encode_utf_8 from rpython.rlib import jit diff --git a/spyvm/model.py b/spyvm/model.py --- a/spyvm/model.py +++ b/spyvm/model.py @@ -15,15 +15,23 @@ that create W_PointersObjects of correct size with attached shadows. """ import sys, weakref -from spyvm import constants, error +from spyvm import constants, error, system from rpython.rlib import rrandom, objectmodel, jit, signature -from rpython.rlib.rarithmetic import intmask, r_uint, r_int +from rpython.rlib.rarithmetic import intmask, r_uint32, r_uint, r_int from rpython.tool.pairtype import extendabletype from rpython.rlib.objectmodel import instantiate, compute_hash from rpython.rtyper.lltypesystem import lltype, rffi from rsdl import RSDL, RSDL_helper + +if system.IS_64BIT: + from rpython.rlib.rarithmetic import widen +else: + def widen(x): + return x + + class W_Object(object): """Root of Squeak model, abstract.""" _attrs_ = [] # no RPython-level instance variables allowed in W_Object @@ -168,7 +176,7 @@ return isinstance(self.value, int) and self.value < 0x8000 def lshift(self, space, shift): - from rpython.rlib.rarithmetic import ovfcheck, intmask, r_uint + from rpython.rlib.rarithmetic import ovfcheck, intmask # shift > 0, therefore the highest bit of upperbound is not set, # i.e. upperbound is positive upperbound = intmask(r_uint(-1) >> shift) @@ -294,7 +302,6 @@ return space.wrap_int((self.value >> shift) & mask) def unwrap_uint(self, space): - from rpython.rlib.rarithmetic import r_uint return r_uint(self.value) def clone(self, space): @@ -396,11 +403,11 @@ from rpython.rlib.rstruct.ieee import float_pack r = float_pack(self.value, 8) # C double if n0 == 0: - return space.wrap_uint(r_uint(intmask(r >> 32))) + return space.wrap_uint(r_uint32(intmask(r >> 32))) else: # bounds-check for primitive access is done in the primitive assert n0 == 1 - return space.wrap_uint(r_uint(intmask(r))) + return space.wrap_uint(r_uint32(intmask(r))) def store(self, space, n0, w_obj): from rpython.rlib.rstruct.ieee import float_unpack, float_pack @@ -757,14 +764,19 @@ byte0 = ord(self.getchar(byte_index0)) byte1 = ord(self.getchar(byte_index0 + 1)) << 8 if byte1 & 0x8000 != 0: - byte1 = intmask(r_uint(0xffff0000) | r_uint(byte1)) + byte1 = intmask(widen(r_uint32(0xffff0000)) | widen(r_uint32(byte1))) return space.wrap_int(byte1 | byte0) def short_atput0(self, space, index0, w_value): from rpython.rlib.rarithmetic import int_between i_value = space.unwrap_int(w_value) - if not int_between(-0x8000, i_value, 0x8000): - raise error.PrimitiveFailedError + if constants.LONG_BIT == 64: + if (not int_between(0, i_value, 0x8000) and + not int_between(0, i_value ^ (0xffffffff), 0x8000)): + raise error.PrimitiveFailedError + else: + if not int_between(-0x8000, i_value, 0x8000): + raise error.PrimitiveFailedError byte_index0 = index0 * 2 byte0 = i_value & 0xff byte1 = (i_value & 0xff00) >> 8 @@ -897,20 +909,25 @@ else: short = (word >> 16) & 0xffff if short & 0x8000 != 0: - short = r_uint(0xffff0000) | r_uint(short) + short = widen(r_uint32(0xffff0000)) | short return space.wrap_int(intmask(short)) def short_atput0(self, space, index0, w_value): from rpython.rlib.rarithmetic import int_between i_value = space.unwrap_int(w_value) - if not int_between(-0x8000, i_value, 0x8000): - raise error.PrimitiveFailedError + if constants.LONG_BIT == 64: + if (not int_between(0, i_value, 0x8000) and + not int_between(0, i_value ^ (0xffffffff), 0x8000)): + raise error.PrimitiveFailedError + else: + if not int_between(-0x8000, i_value, 0x8000): + raise error.PrimitiveFailedError word_index0 = index0 / 2 - word = intmask(self.getword(word_index0)) + word = intmask(r_uint32(self.getword(word_index0))) if index0 % 2 == 0: - word = intmask(r_uint(word) & r_uint(0xffff0000)) | (i_value & 0xffff) + word = intmask(widen(r_uint32(word)) & widen(r_uint32(0xffff0000))) | (i_value & 0xffff) else: - word = (i_value << 16) | (word & 0xffff) + word = intmask(r_uint32((i_value << 16) | (word & 0xffff))) value = r_uint(word) self.setword(word_index0, value) @@ -977,7 +994,7 @@ class W_DisplayBitmap(W_AbstractObjectWithClassReference): _attrs_ = ['pixelbuffer', '_realsize', '_real_depth_buffer', 'display', '_depth'] - _immutable_fields_ = ['_realsize', 'display', '_depth'] + _immutable_fields_ = ['_realsize', 'display', '_depth', '_real_depth_buffer'] @staticmethod def create(space, w_class, size, depth, display): @@ -992,7 +1009,7 @@ def __init__(self, space, w_class, size, depth, display): W_AbstractObjectWithClassReference.__init__(self, space, w_class) - self._real_depth_buffer = lltype.malloc(rffi.CArray(rffi.UINT), size, flavor='raw') + self._real_depth_buffer = [r_uint(0)] * size self._realsize = size self.display = display self._depth = depth @@ -1003,7 +1020,7 @@ def atput0(self, space, index0, w_value): word = space.unwrap_uint(w_value) - self.setword(index0, word) + self.setword(index0, r_uint(word)) def flush_to_screen(self): self.display.flip() @@ -1028,7 +1045,7 @@ def setword(self, n, word): self._real_depth_buffer[n] = word - self.display.get_pixelbuffer()[n] = word + self.display.get_pixelbuffer()[n] = r_uint32(word) def is_array_object(self): return True @@ -1062,13 +1079,13 @@ ((msb & mask) << 11) ) - self.display.get_pixelbuffer()[n] = r_uint(lsb | (msb << 16)) + self.display.get_pixelbuffer()[n] = r_uint32(lsb | (msb << 16)) class W_8BitDisplayBitmap(W_DisplayBitmap): def setword(self, n, word): self._real_depth_buffer[n] = word - self.display.get_pixelbuffer()[n] = r_uint( + self.display.get_pixelbuffer()[n] = r_uint32( (word >> 24) | ((word >> 8) & 0x0000ff00) | ((word << 8) & 0x00ff0000) | @@ -1081,7 +1098,7 @@ @jit.unroll_safe def setword(self, n, word): self._real_depth_buffer[n] = word - word = r_uint(word) + nWord = r_uint(word) pos = self.compute_pos(n) assert self._depth <= 4 rshift = 32 - self._depth @@ -1090,10 +1107,10 @@ return mapword = r_uint(0) for i in xrange(4): - pixel = r_uint(word) >> rshift + pixel = r_uint(nWord) >> rshift mapword |= (r_uint(pixel) << (i * 8)) - word <<= self._depth - self.display.get_pixelbuffer()[pos] = mapword + nWord <<= self._depth + self.display.get_pixelbuffer()[pos] = r_uint32(mapword) pos += 1 def compute_pos(self, n): diff --git a/spyvm/objspace.py b/spyvm/objspace.py --- a/spyvm/objspace.py +++ b/spyvm/objspace.py @@ -1,6 +1,6 @@ import os -from spyvm import constants, model, shadow, wrapper +from spyvm import constants, model, shadow, wrapper, system from spyvm.error import UnwrappingError, WrappingError, PrimitiveFailedError from rpython.rlib import jit, rpath from rpython.rlib.objectmodel import instantiate, specialize @@ -15,7 +15,7 @@ self.make_bootstrap_objects() def find_executable(self, executable): - if os.sep in executable or (os.name == "nt" and ":" in executable): + if os.sep in executable or (system.IS_WINDOWS and ":" in executable): return executable path = os.environ.get("PATH") if path: @@ -198,9 +198,8 @@ # methods for wrapping and unwrapping stuff def wrap_int(self, val): - from spyvm import constants - assert isinstance(val, int) - # we don't do tagging + if not isinstance(val, int): + raise WrappingError return model.W_SmallInteger(val) def wrap_uint(self, val): diff --git a/spyvm/plugins/bitblt.py b/spyvm/plugins/bitblt.py --- a/spyvm/plugins/bitblt.py +++ b/spyvm/plugins/bitblt.py @@ -17,7 +17,7 @@ raise PrimitiveFailedError("BitBlt primitive not called in BitBlt object!") # only allow combinationRules 0-41 - combinationRule = interp.space.unwrap_positive_32bit_int(w_rcvr.fetch(interp.space, 3)) + combinationRule = interp.space.unwrap_int(w_rcvr.fetch(interp.space, 3)) if combinationRule > 41: raise PrimitiveFailedError("Missing combinationRule %d" % combinationRule) diff --git a/spyvm/primitives.py b/spyvm/primitives.py --- a/spyvm/primitives.py +++ b/spyvm/primitives.py @@ -5,7 +5,7 @@ from spyvm import model, shadow from spyvm import constants, display from spyvm.error import PrimitiveFailedError, \ - PrimitiveNotYetWrittenError + PrimitiveNotYetWrittenError, WrappingError from spyvm import wrapper from rpython.rlib import rarithmetic, rfloat, unroll, jit @@ -296,9 +296,13 @@ @expose_primitive(FLOAT_TRUNCATED, unwrap_spec=[float]) def func(interp, s_frame, f): try: - return interp.space.wrap_int(rarithmetic.ovfcheck_float_to_int(f)) + integer = rarithmetic.ovfcheck_float_to_int(f) except OverflowError: raise PrimitiveFailedError + try: + return interp.space.wrap_int(integer) # in 64bit VMs, this may fail + except WrappingError: + raise PrimitiveFailedError @expose_primitive(FLOAT_TIMES_TWO_POWER, unwrap_spec=[float, int]) def func(interp, s_frame, rcvr, arg): @@ -647,17 +651,22 @@ def func(interp, s_frame, argcount, s_method): from spyvm.interpreter import Return w_rcvr = s_frame.peek(0) - try: - s_frame._sendSelfSelector(interp.image.w_simulateCopyBits, 0, interp) - except Return: - w_dest_form = w_rcvr.fetch(interp.space, 0) - w_display = interp.space.objtable['w_display'] - if w_dest_form.is_same_object(w_display): - w_bitmap = w_display.fetch(interp.space, 0) - assert isinstance(w_bitmap, model.W_DisplayBitmap) - w_bitmap.flush_to_screen() - return w_rcvr - except shadow.MethodNotFound: + w_display = interp.space.objtable['w_display'] + if interp.space.unwrap_int(w_display.fetch(interp.space, 3)) == 1: + try: + s_frame._sendSelfSelector(interp.image.w_simulateCopyBits, 0, interp) + except Return: + w_dest_form = w_rcvr.fetch(interp.space, 0) + if w_dest_form.is_same_object(w_display): + w_bitmap = w_display.fetch(interp.space, 0) + assert isinstance(w_bitmap, model.W_DisplayBitmap) + w_bitmap.flush_to_screen() + return w_rcvr + except shadow.MethodNotFound: + from spyvm.plugins.bitblt import BitBltPlugin + BitBltPlugin.call("primitiveCopyBits", interp, s_frame, argcount, s_method) + return w_rcvr + else: from spyvm.plugins.bitblt import BitBltPlugin BitBltPlugin.call("primitiveCopyBits", interp, s_frame, argcount, s_method) return w_rcvr @@ -872,6 +881,15 @@ w_rcvr.s_class = w_arg.s_class + +if constants.LONG_BIT == 32: + def callIProxy(signature, interp, s_frame, argcount, s_method): + from spyvm.interpreter_proxy import IProxy + return IProxy.call(signature, interp, s_frame, argcount, s_method) +else: + def callIProxy(signature, interp, s_frame, argcount, s_method): + raise PrimitiveFailedError + @expose_primitive(EXTERNAL_CALL, clean_stack=False, no_result=True, compiled_method=True) def func(interp, s_frame, argcount, s_method): space = interp.space @@ -898,8 +916,7 @@ from spyvm.plugins.vmdebugging import DebuggingPlugin return DebuggingPlugin.call(signature[1], interp, s_frame, argcount, s_method) else: - from spyvm.interpreter_proxy import IProxy - return IProxy.call(signature, interp, s_frame, argcount, s_method) + return callIProxy(signature, interp, s_frame, argcount, s_method) raise PrimitiveFailedError @expose_primitive(COMPILED_METHOD_FLUSH_CACHE, unwrap_spec=[object]) @@ -1074,7 +1091,7 @@ sec_since_epoch = rarithmetic.r_uint(time.time()) # XXX: overflow check necessary? sec_since_1901 = sec_since_epoch + secs_between_1901_and_1970 - return interp.space.wrap_uint(sec_since_1901) + return interp.space.wrap_uint(rarithmetic.r_uint(sec_since_1901)) #____________________________________________________________________________ @@ -1118,7 +1135,7 @@ w_arg.setchar(i, chr(new_value)) elif isinstance(w_arg, model.W_WordsObject) or isinstance(w_arg, model.W_DisplayBitmap): for i in xrange(w_arg.size()): - w_arg.setword(i, new_value) + w_arg.setword(i, rarithmetic.r_uint(new_value)) else: raise PrimitiveFailedError return w_arg diff --git a/spyvm/squeakimage.py b/spyvm/squeakimage.py --- a/spyvm/squeakimage.py +++ b/spyvm/squeakimage.py @@ -383,12 +383,11 @@ self.startup_time = time.time() def run_spy_hacks(self, space): - pass - # w_display = space.objtable["w_display"] - # if w_display is not None and w_display is not space.w_nil: - # if space.unwrap_int(w_display.fetch(space, 3)) < 8: - # # non-native indexed color depth not well supported - # w_display.store(space, 3, space.wrap_int(8)) + if constants.LONG_BIT == 64: + w_display = space.objtable["w_display"] + if w_display is not None and w_display is not space.w_nil: + if space.unwrap_int(w_display.fetch(space, 3)) < 32: + w_display.store(space, 3, space.wrap_int(32)) def find_symbol(self, space, reader, symbol): w_dnu = self.special(constants.SO_DOES_NOT_UNDERSTAND) diff --git a/targetimageloadingsmalltalk.py b/targetimageloadingsmalltalk.py --- a/targetimageloadingsmalltalk.py +++ b/targetimageloadingsmalltalk.py @@ -6,9 +6,8 @@ from rpython.rlib import jit, rpath from spyvm import model, interpreter, squeakimage, objspace, wrapper,\ - error, shadow + error, shadow, system from spyvm.tool.analyseimage import create_image -from spyvm.interpreter_proxy import VirtualMachine def _run_benchmark(interp, number, benchmark, arg): @@ -222,6 +221,9 @@ # driver.config.translation.gc = "stmgc" # driver.config.translation.gcrootfinder = "stm" from rpython.rlib import rgc + driver.exe_name = "rsqueakvm" + if system.IS_64BIT: + driver.exe_name += "-64" if hasattr(rgc, "stm_is_enabled"): driver.config.translation.stm = True driver.config.translation.thread = True From noreply at buildbot.pypy.org Fri Feb 7 13:43:30 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 7 Feb 2014 13:43:30 +0100 (CET) Subject: [pypy-commit] stmgc c7: Try to be extra careful around the "lock" in nursery_current Message-ID: <20140207124330.ABD851C0153@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7 Changeset: r707:e6bce725abb4 Date: 2014-02-07 13:43 +0100 http://bitbucket.org/pypy/stmgc/changeset/e6bce725abb4/ Log: Try to be extra careful around the "lock" in nursery_current diff --git a/c7/core.c b/c7/core.c --- a/c7/core.c +++ b/c7/core.c @@ -175,7 +175,7 @@ _stm_restore_local_state(thread_num); _STM_TL->nursery_current = (localchar_t*)(FIRST_NURSERY_PAGE * 4096); - memset((void*)real_address((object_t*)CLEAR_SYNC_REQUEST(_STM_TL->nursery_current)), + memset((void*)real_address((object_t*)NURSERY_CURRENT(_STM_TL)), 0x0, (FIRST_AFTER_NURSERY_PAGE - FIRST_NURSERY_PAGE) * 4096); /* clear nursery */ _STM_TL->shadow_stack = NULL; diff --git a/c7/core.h b/c7/core.h --- a/c7/core.h +++ b/c7/core.h @@ -5,6 +5,20 @@ #include #include #include +#include + +#if LONG_MAX == 2147483647 +# error "Requires a 64-bit environment" +#endif + +#if BYTE_ORDER == 1234 +# define LENDIAN 1 // little endian +#elif BYTE_ORDER == 4321 +# define LENDIAN 0 // big endian +#else +# error "Unsupported endianness" +#endif + #define NB_PAGES (6*256*256) // 6*256MB #define NB_THREADS 2 diff --git a/c7/nursery.c b/c7/nursery.c --- a/c7/nursery.c +++ b/c7/nursery.c @@ -166,8 +166,8 @@ /* clear nursery */ localchar_t *nursery_base = (localchar_t*)(FIRST_NURSERY_PAGE * 4096); memset((void*)real_address((object_t*)nursery_base), 0x0, - CLEAR_SYNC_REQUEST(_STM_TL->nursery_current) - nursery_base); - _STM_TL->nursery_current = nursery_base; + NURSERY_CURRENT(_STM_TL) - nursery_base); + SET_NURSERY_CURRENT(_STM_TL, nursery_base); } void _stm_minor_collect() @@ -180,9 +180,9 @@ localchar_t *new_current = _STM_TL->nursery_current; while (((uintptr_t)new_current > FIRST_AFTER_NURSERY_PAGE * 4096) - && _STM_TL->nursery_current_halfwords[1]) { + && _STM_TL->nursery_current_halfwords[LENDIAN]) { - _STM_TL->nursery_current_halfwords[1] = 0; + _STM_TL->nursery_current_halfwords[LENDIAN] = 0; _stm_start_safe_point(0); /* no collect, it would mess with nursery_current */ _stm_stop_safe_point(0); @@ -196,15 +196,16 @@ } /* reset nursery_current (left invalid by the caller) */ - _STM_TL->nursery_current -= size; + SET_NURSERY_CURRENT(_STM_TL, new_current - size); minor_collect(); /* XXX: if we_want_major_collect: acquire EXCLUSIVE & COLLECT lock and do it */ - localchar_t *current = CLEAR_SYNC_REQUEST(_STM_TL->nursery_current); - _STM_TL->nursery_current = current + size; + localchar_t *current = NURSERY_CURRENT(_STM_TL); + assert((uintptr_t)current + size <= FIRST_AFTER_NURSERY_PAGE * 4096); + SET_NURSERY_CURRENT(_STM_TL, current + size); return current; } @@ -240,7 +241,7 @@ localchar_t *current = _STM_TL->nursery_current; localchar_t *new_current = current + size; - _STM_TL->nursery_current = new_current; + SET_NURSERY_CURRENT(_STM_TL, new_current); if ((uintptr_t)new_current > FIRST_AFTER_NURSERY_PAGE * 4096) { current = collect_and_reserve(size); @@ -321,8 +322,8 @@ /* clear the nursery */ localchar_t *nursery_base = (localchar_t*)(FIRST_NURSERY_PAGE * 4096); memset((void*)real_address((object_t*)nursery_base), 0x0, - CLEAR_SYNC_REQUEST(_STM_TL->nursery_current) - nursery_base); - _STM_TL->nursery_current = nursery_base; + NURSERY_CURRENT(_STM_TL) - nursery_base); + SET_NURSERY_CURRENT(_STM_TL, nursery_base); /* reset the alloc-pages to the state at the start of the transaction */ diff --git a/c7/stmsync.c b/c7/stmsync.c --- a/c7/stmsync.c +++ b/c7/stmsync.c @@ -108,7 +108,7 @@ assert(!_STM_TL->active); /* assert(!_STM_TL->need_abort); may happen, but will be cleared by start_transaction() */ - assert(CLEAR_SYNC_REQUEST(_STM_TL->nursery_current) == (localchar_t*)(FIRST_NURSERY_PAGE * 4096)); + assert(NURSERY_CURRENT(_STM_TL) == (localchar_t*)(FIRST_NURSERY_PAGE * 4096)); } void _stm_acquire_tl_segment() @@ -301,6 +301,6 @@ void stm_request_safe_point(int thread_num) { struct _thread_local1_s* other_tl = _stm_dbg_get_tl(thread_num); - other_tl->nursery_current_halfwords[1] = 1; + other_tl->nursery_current_halfwords[LENDIAN] = 1; } diff --git a/c7/stmsync.h b/c7/stmsync.h --- a/c7/stmsync.h +++ b/c7/stmsync.h @@ -20,5 +20,11 @@ void stm_request_safe_point(int thread_num); -#define CLEAR_SYNC_REQUEST(nursery_current) ((localchar_t*)(((uintptr_t)(nursery_current)) & 0xffffffff)) +#define NURSERY_CURRENT(tls) \ + ((localchar_t *)(uintptr_t)( \ + (tls)->nursery_current_halfwords[1-LENDIAN])) + +#define SET_NURSERY_CURRENT(tls, new_value) \ + ((tls)->nursery_current_halfwords[1-LENDIAN] = \ + (uintptr_t)(new_value)) From noreply at buildbot.pypy.org Fri Feb 7 15:46:17 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Fri, 7 Feb 2014 15:46:17 +0100 (CET) Subject: [pypy-commit] stmgc c7: another comment Message-ID: <20140207144617.2A9A21C02D4@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r709:1a1f89d3003c Date: 2014-02-07 15:46 +0100 http://bitbucket.org/pypy/stmgc/changeset/1a1f89d3003c/ Log: another comment diff --git a/c7/core.h b/c7/core.h --- a/c7/core.h +++ b/c7/core.h @@ -200,6 +200,10 @@ static inline void write_fence(void) { #if defined(__amd64__) || defined(__i386__) + /* this is only a compiler barrier + use __sync_synchronize(...) or other __sync_OPs that + are locked by the CPU if you need to prevent + loads to be moved before stores to different locations */ asm("" : : : "memory"); #else # error "Define write_fence() for your architecture" From noreply at buildbot.pypy.org Fri Feb 7 15:46:16 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Fri, 7 Feb 2014 15:46:16 +0100 (CET) Subject: [pypy-commit] stmgc c7: add a comment Message-ID: <20140207144616.10F9D1C02D4@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7 Changeset: r708:e281aed3728a Date: 2014-02-07 15:35 +0100 http://bitbucket.org/pypy/stmgc/changeset/e281aed3728a/ Log: add a comment diff --git a/c7/core.c b/c7/core.c --- a/c7/core.c +++ b/c7/core.c @@ -388,6 +388,9 @@ _STM_TL->need_abort = 0; /* global_age is approximate -> no synchronization required */ _STM_TL->age = global_age++; + /* XXX: only increment our age on commit, not abort? that way we + are more likely to succeed next time, thus prevent starvation + (may be fairer, but should probably be done per pthread??) */ fprintf(stderr, "%c", 'S'+_STM_TL->thread_num*32); } diff --git a/c7/pages.c b/c7/pages.c --- a/c7/pages.c +++ b/c7/pages.c @@ -25,7 +25,7 @@ uintptr_t index_page_never_used; uint8_t flag_page_private[NB_PAGES]; -uint8_t list_lock = 0; +volatile uint8_t list_lock = 0; struct stm_list_s *single_page_list; From noreply at buildbot.pypy.org Fri Feb 7 17:26:54 2014 From: noreply at buildbot.pypy.org (antocuni) Date: Fri, 7 Feb 2014 17:26:54 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: first draft for the abstract Message-ID: <20140207162654.DEA3A1C0500@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: extradoc Changeset: r5145:dc92848ce8b4 Date: 2014-02-07 17:26 +0100 http://bitbucket.org/pypy/extradoc/changeset/dc92848ce8b4/ Log: first draft for the abstract diff --git a/talk/ep2013/status/abstract.rst b/talk/ep2013/status/abstract.rst new file mode 100644 --- /dev/null +++ b/talk/ep2013/status/abstract.rst @@ -0,0 +1,16 @@ +PyPy status talk (a.k.a.: no no, PyPy is not dead) +=================================================== + +In this talk we will present the current status of PyPy, with a particular +focus on what happened in the last two years, since the last EuroPython PyPy +talk. We will give a brief overview of the current speed and the on-going +development efforts, including but not limited to: + +- the status of the JIT and PyPy performance in general + +- the improvements on the Garbage Collector + +- the status of numpy and Python 3 compatibility + +- the status and ideas of the STM (Software Transactional Memory) research + project, which aims to solve the GIL problem From noreply at buildbot.pypy.org Fri Feb 7 17:29:52 2014 From: noreply at buildbot.pypy.org (antocuni) Date: Fri, 7 Feb 2014 17:29:52 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: bah Message-ID: <20140207162952.853F61C02D4@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: extradoc Changeset: r5146:40c945b881d2 Date: 2014-02-07 17:29 +0100 http://bitbucket.org/pypy/extradoc/changeset/40c945b881d2/ Log: bah diff --git a/talk/ep2013/status/abstract.rst b/talk/ep2014/status/abstract.rst rename from talk/ep2013/status/abstract.rst rename to talk/ep2014/status/abstract.rst From noreply at buildbot.pypy.org Fri Feb 7 18:10:52 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 7 Feb 2014 18:10:52 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: STM talk abstract, first version Message-ID: <20140207171052.6771A1C02D4@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5147:d1c12ea246a5 Date: 2014-02-07 18:10 +0100 http://bitbucket.org/pypy/extradoc/changeset/d1c12ea246a5/ Log: STM talk abstract, first version diff --git a/talk/ep2014/stm/abstract.rst b/talk/ep2014/stm/abstract.rst new file mode 100644 --- /dev/null +++ b/talk/ep2014/stm/abstract.rst @@ -0,0 +1,41 @@ +Using All These Cores: Transactional Memory in PyPy +=================================================== + +Abstract +-------- + +PyPy, the Python implementation written in Python, experimentally +supports Transactional Memory (TM). The strength of TM is to enable a +novel use of multithreading, inheritently safe, and not limited to +special use cases like other approaches. This talk will focus on how it +works under the hood. + + +Description +----------- + +PyPy is a fast alternative Python implementation. Software +Transactional Memory (STM) is a current academic research topic. Put +the two together --brew for a couple of years-- and we get a version of +PyPy that runs on multiple cores, without the infamous Global +Interpreter Lock (GIL). + +The current research is based on a recent new insight that promises to +give really good performance. The speed of STM is generally measured by +two factors: the ability to scale with the number of CPUs, and the +amount of overhead when compared with other approach in a single CPU (in +this case, with the regular PyPy with the GIL). Scaling is not really a +problem here, but single-CPU performance is --- or used to be. This new +approach gives a single-threaded overhead that should be very low --- +maybe 20%, which would definitely be news for STM systems. Right now +(February 2014) we are still implementing it, so we cannot give final +numbers yet, but early results on a small interpreter for a custom +language are around 15%. This might be a deal-changer for STM. + +In the talk, I will describe our progress, hopefully along with real +numbers and demos. I will then dive under the hood of PyPy to give an +idea about how it works. I will conclude with a picture of how the +future of multi-threaded programming might looks like, for high-level +languages like Python. I will also mention CPython: how hard (or not) +it would be to change the CPython source code to use the same approach. + From noreply at buildbot.pypy.org Fri Feb 7 18:22:50 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 7 Feb 2014 18:22:50 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: Light rewrite of this para Message-ID: <20140207172250.345AB1C02D4@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5148:b53fb9b9054a Date: 2014-02-07 18:22 +0100 http://bitbucket.org/pypy/extradoc/changeset/b53fb9b9054a/ Log: Light rewrite of this para diff --git a/talk/ep2014/stm/abstract.rst b/talk/ep2014/stm/abstract.rst --- a/talk/ep2014/stm/abstract.rst +++ b/talk/ep2014/stm/abstract.rst @@ -23,14 +23,14 @@ The current research is based on a recent new insight that promises to give really good performance. The speed of STM is generally measured by two factors: the ability to scale with the number of CPUs, and the -amount of overhead when compared with other approach in a single CPU (in -this case, with the regular PyPy with the GIL). Scaling is not really a -problem here, but single-CPU performance is --- or used to be. This new -approach gives a single-threaded overhead that should be very low --- -maybe 20%, which would definitely be news for STM systems. Right now -(February 2014) we are still implementing it, so we cannot give final -numbers yet, but early results on a small interpreter for a custom -language are around 15%. This might be a deal-changer for STM. +amount of overhead when compared with other approaches in a single CPU +(in this case, with the regular PyPy with the GIL). Scaling is not +really a problem here, but single-CPU performance is --or used to be. +This new approach gives a single-threaded overhead that should be very +low, maybe 20%, which would definitely be news for STM systems. Right +now (February 2014) we are still implementing it, so we cannot give +final numbers yet, but early results on a small interpreter for a custom +language are around 15%. This looks like a deal-changer for STM. In the talk, I will describe our progress, hopefully along with real numbers and demos. I will then dive under the hood of PyPy to give an From noreply at buildbot.pypy.org Fri Feb 7 18:33:23 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 7 Feb 2014 18:33:23 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: Expand and add CFFI Message-ID: <20140207173323.B6C6A1C0A5B@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5149:550b7a914c24 Date: 2014-02-07 18:33 +0100 http://bitbucket.org/pypy/extradoc/changeset/550b7a914c24/ Log: Expand and add CFFI diff --git a/talk/ep2014/status/abstract.rst b/talk/ep2014/status/abstract.rst --- a/talk/ep2014/status/abstract.rst +++ b/talk/ep2014/status/abstract.rst @@ -1,16 +1,36 @@ PyPy status talk (a.k.a.: no no, PyPy is not dead) =================================================== -In this talk we will present the current status of PyPy, with a particular -focus on what happened in the last two years, since the last EuroPython PyPy -talk. We will give a brief overview of the current speed and the on-going -development efforts, including but not limited to: +Abstract +-------- -- the status of the JIT and PyPy performance in general +The current status of PyPy, with a particular focus on what happened in +the last two years, since the last EuroPython PyPy talk. We will give a +brief overview of the current speed and the on-going development efforts +on the JIT, the GC, NumPy, Python 3 compatibility, CFFI, STM... -- the improvements on the Garbage Collector -- the status of numpy and Python 3 compatibility +Description +----------- -- the status and ideas of the STM (Software Transactional Memory) research - project, which aims to solve the GIL problem +In this talk we will present the current status of PyPy, with a +particular focus on what happened in the last two years, since the last +EuroPython PyPy talk. We will give an overview of the current speed and +the on-going development efforts, including but not limited to: + +- the status of the Just-in-Time Compiler (JIT) and PyPy performance in + general; + +- the improvements on the Garbage Collector (GC); + +- the status of the NumPy and Python 3 compatibility subprojects; + +- CFFI, which aims to be a general C interface mechanism for both + CPython and PyPy; + +- a quick overview of the STM (Software Transactional Memory) research + project, which aims to solve the GIL problem. + +This is the "general PyPy status talk" that we give every year at +EuroPython (except last year; hence the "no no, PyPy is not dead" part +of the title of this talk). From noreply at buildbot.pypy.org Fri Feb 7 20:24:41 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 7 Feb 2014 20:24:41 +0100 (CET) Subject: [pypy-commit] stmgc c7: Use __sync_synchronize() as a general fall-back. Update comments. Message-ID: <20140207192441.791891C02D4@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7 Changeset: r710:f59e497a6ec5 Date: 2014-02-07 20:24 +0100 http://bitbucket.org/pypy/stmgc/changeset/f59e497a6ec5/ Log: Use __sync_synchronize() as a general fall-back. Update comments. diff --git a/c7/core.h b/c7/core.h --- a/c7/core.h +++ b/c7/core.h @@ -199,14 +199,18 @@ static inline void write_fence(void) { + /* This function inserts a "write fence". The goal is to make + sure that past writes are really pushed to memory before + the future writes. We assume that the corresponding "read + fence" effect is done automatically by a corresponding + __sync_bool_compare_and_swap(). */ #if defined(__amd64__) || defined(__i386__) - /* this is only a compiler barrier - use __sync_synchronize(...) or other __sync_OPs that - are locked by the CPU if you need to prevent - loads to be moved before stores to different locations */ + /* this is only a compiler barrier, which is enough on x86 */ asm("" : : : "memory"); #else -# error "Define write_fence() for your architecture" + /* general fall-back, but we might have more efficient + alternative on some other platforms too */ + __sync_synchronize(); #endif } From noreply at buildbot.pypy.org Fri Feb 7 20:36:03 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 7 Feb 2014 20:36:03 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: reword Message-ID: <20140207193603.125EE1C0153@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: extradoc Changeset: r5150:74f17fa92c47 Date: 2013-11-12 15:14 -0800 http://bitbucket.org/pypy/extradoc/changeset/74f17fa92c47/ Log: reword diff --git a/blog/draft/py3k-status-update-12.rst b/blog/draft/py3k-status-update-12.rst --- a/blog/draft/py3k-status-update-12.rst +++ b/blog/draft/py3k-status-update-12.rst @@ -31,8 +31,7 @@ operations, so this came with a regression in performance in this area. We're now in the process of solving this. Part of this work also involves some -house cleaning on these numeric types which will also benefit the default -branch. +house cleaning on these numeric types which also benefits the default branch. cheers, Phil From noreply at buildbot.pypy.org Fri Feb 7 20:36:05 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 7 Feb 2014 20:36:05 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: merge upstream Message-ID: <20140207193605.089031C0153@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: extradoc Changeset: r5151:1694aba0aca8 Date: 2014-02-07 11:35 -0800 http://bitbucket.org/pypy/extradoc/changeset/1694aba0aca8/ Log: merge upstream diff --git a/planning/jit.txt b/planning/jit.txt --- a/planning/jit.txt +++ b/planning/jit.txt @@ -48,6 +48,18 @@ - ovfcheck(a << b) will do ``result >> b`` and check that the result is equal to ``a``, instead of looking at the x86 flags. +- Track whether floats in the JIT could possibly have overflowed into + ``inf``/``nan`` + + f81 = cast_int_to_float(i79) + f82 = float_add(f81, 11235582092889474423308157442431404585112356118389416079589380072358292237843810195794279832650471001320007117491962084853674360550901038905802964414967132773610493339054092829768888725077880882465817684505312860552384417646403930092119569408801702322709406917786643639996702871154982269052209770601514008576.000000) + i83 = float_eq(f82, f81) + guard_false(i83, descr=) + + For example, here this is the test for ``isinf(i81)``, but it's impossible + for ``i81`` to be ``inf`` because ``float(sys.maxint)`` is a finite value. + + OPTIMIZATIONS ------------- diff --git a/sprintinfo/leysin-winter-2014/announcement.txt b/sprintinfo/leysin-winter-2014/announcement.txt new file mode 100644 --- /dev/null +++ b/sprintinfo/leysin-winter-2014/announcement.txt @@ -0,0 +1,62 @@ +===================================================================== + PyPy Leysin Winter Sprint (11-19st January 2014) +===================================================================== + +The next PyPy sprint will be in Leysin, Switzerland, for the ninth time. +This is a fully public sprint: newcomers and topics other than those +proposed below are welcome. + +------------------------------ +Goals and topics of the sprint +------------------------------ + +* Py3k: work towards supporting Python 3 in PyPy + +* NumPyPy: work towards supporting the numpy module in PyPy + +* STM: work towards supporting Software Transactional Memory + +* And as usual, the main side goal is to have fun in winter sports :-) + We can take a day off for ski. + +----------- +Exact times +----------- + +For a change, and as an attempt to simplify things, I specified the +dates as 11-19 January 2014, where 11 and 19 are travel days. We will +work full days between the 12 and the 18. You are of course allowed to +show up for a part of that time only, too. + +----------------------- +Location & Accomodation +----------------------- + +Leysin, Switzerland, "same place as before". Let me refresh your +memory: both the sprint venue and the lodging will be in a very spacious +pair of chalets built specifically for bed & breakfast: +http://www.ermina.ch/. The place has a good ADSL Internet connexion +with wireless installed. You can of course arrange your own lodging +anywhere (as long as you are in Leysin, you cannot be more than a 15 +minutes walk away from the sprint venue), but I definitely recommend +lodging there too -- you won't find a better view anywhere else (though +you probably won't get much worse ones easily, either :-) + +Please *confirm* that you are coming so that we can adjust the +reservations as appropriate. The rate so far has been around 60 CHF a +night all included in 2-person rooms, with breakfast. There are larger +rooms too (less expensive per person) and maybe the possibility to get a +single room if you really want to. + +Please register by Mercurial:: + + https://bitbucket.org/pypy/extradoc/ + https://bitbucket.org/pypy/extradoc/raw/extradoc/sprintinfo/leysin-winter-2014 + +or on the pypy-dev mailing list if you do not yet have check-in rights: + + http://mail.python.org/mailman/listinfo/pypy-dev + +You need a Swiss-to-(insert country here) power adapter. There will be +some Swiss-to-EU adapters around -- bring a EU-format power strip if you +have one. diff --git a/sprintinfo/leysin-winter-2014/people.txt b/sprintinfo/leysin-winter-2014/people.txt new file mode 100644 --- /dev/null +++ b/sprintinfo/leysin-winter-2014/people.txt @@ -0,0 +1,63 @@ + +People coming to the Leysin sprint Winter 2014 +================================================== + +People who have a ``?`` in their arrive/depart or accomodation +column are known to be coming but there are no details +available yet from them. + + +==================== ============== ======================= + Name Arrive/Depart Accomodation +==================== ============== ======================= +Armin Rigo private +Romain Guillebert 11-19 Ermina +Christian Clauss 11-12 & 18-19 I live nearby +Maciej Fijalkowski 11-18 Ermina +Remi Meier 11-19 Ermina +Johan Råde 11-18 Ermina +Antonio Cuni 14-18 Ermina +Manuel Jacob 12-19 private +==================== ============== ======================= + + +People on the following list were present at previous sprints: + +==================== ============== ===================== + Name Arrive/Depart Accomodation +==================== ============== ===================== +Romain Guillebert ? ? +Michael Foord ? ? +David Schneider ? ? +Jacob Hallen ? ? +Laura Creighton ? ? +Hakan Ardo ? ? +Carl Friedrich Bolz ? ? +Samuele Pedroni ? ? +Anders Hammarquist ? ? +Christian Tismer ? ? +Niko Matsakis ? ? +Toby Watson ? ? +Paul deGrandis ? ? +Michael Hudson ? ? +Anders Lehmann ? ? +Niklaus Haldimann ? ? +Lene Wagner ? ? +Amaury Forgeot d'Arc ? ? +Valentino Volonghi ? ? +Boris Feigin ? ? +Andrew Thompson ? ? +Bert Freudenberg ? ? +Beatrice Duering ? ? +Richard Emslie ? ? +Johan Hahn ? ? +Stephan Diehl ? ? +Alexander Schremmer ? ? +Anders Chrigstroem ? ? +Eric van Riet Paap ? ? +Holger Krekel ? ? +Guido Wesdorp ? ? +Leonardo Santagada ? ? +Alexandre Fayolle ? ? +Sylvain Thénault ? ? +==================== ============== ===================== diff --git a/sprintinfo/leysin-winter-2014/planning.txt b/sprintinfo/leysin-winter-2014/planning.txt new file mode 100644 --- /dev/null +++ b/sprintinfo/leysin-winter-2014/planning.txt @@ -0,0 +1,46 @@ + +People +------ + +Johan Rade +Remi Meier +Maciej Fijalkowski +Romain Guillebert +Armin Rigo +Manuel Jacob +Antonio Cuni + +Topics +------ + +* numpy stuff, fix bugs from bug tracker (rguillebert, antocuni around) + +* look at codespeed2 + +* resume-refactor branch (fijal, rguillebert) MORE PROGRESS + +* GC pinning + +* asmgcc bug with greenlets and --shared (FIXED) + +* think about --shared by default + +* CFFI 1.0 + +* STM (remi, armin) DONE in transaction breaks, started c7 + +* discuss about C++ / cppyy, look into importing pyshiboken (johan pessimistic, ?) + +* try cppyy to run on windows (johan) IN PROGRESS + +* ctypes: https://bugs.pypy.org/issue1671 DONE + +* longs multiplication: patch at https://bugs.pypy.org/issue892 + +* look into merging refactor-str-types (mjacob, antocuni) FIX TRANSLATION + +* tweaking ast classes: https://bugs.pypy.org/issue1673 (mjacob) + +* skiing (fijal, DONE) + +* add jit_merge_point to tuple_contains (anybody) diff --git a/talk/ep2014/status/abstract.rst b/talk/ep2014/status/abstract.rst new file mode 100644 --- /dev/null +++ b/talk/ep2014/status/abstract.rst @@ -0,0 +1,36 @@ +PyPy status talk (a.k.a.: no no, PyPy is not dead) +=================================================== + +Abstract +-------- + +The current status of PyPy, with a particular focus on what happened in +the last two years, since the last EuroPython PyPy talk. We will give a +brief overview of the current speed and the on-going development efforts +on the JIT, the GC, NumPy, Python 3 compatibility, CFFI, STM... + + +Description +----------- + +In this talk we will present the current status of PyPy, with a +particular focus on what happened in the last two years, since the last +EuroPython PyPy talk. We will give an overview of the current speed and +the on-going development efforts, including but not limited to: + +- the status of the Just-in-Time Compiler (JIT) and PyPy performance in + general; + +- the improvements on the Garbage Collector (GC); + +- the status of the NumPy and Python 3 compatibility subprojects; + +- CFFI, which aims to be a general C interface mechanism for both + CPython and PyPy; + +- a quick overview of the STM (Software Transactional Memory) research + project, which aims to solve the GIL problem. + +This is the "general PyPy status talk" that we give every year at +EuroPython (except last year; hence the "no no, PyPy is not dead" part +of the title of this talk). diff --git a/talk/ep2014/stm/abstract.rst b/talk/ep2014/stm/abstract.rst new file mode 100644 --- /dev/null +++ b/talk/ep2014/stm/abstract.rst @@ -0,0 +1,41 @@ +Using All These Cores: Transactional Memory in PyPy +=================================================== + +Abstract +-------- + +PyPy, the Python implementation written in Python, experimentally +supports Transactional Memory (TM). The strength of TM is to enable a +novel use of multithreading, inheritently safe, and not limited to +special use cases like other approaches. This talk will focus on how it +works under the hood. + + +Description +----------- + +PyPy is a fast alternative Python implementation. Software +Transactional Memory (STM) is a current academic research topic. Put +the two together --brew for a couple of years-- and we get a version of +PyPy that runs on multiple cores, without the infamous Global +Interpreter Lock (GIL). + +The current research is based on a recent new insight that promises to +give really good performance. The speed of STM is generally measured by +two factors: the ability to scale with the number of CPUs, and the +amount of overhead when compared with other approaches in a single CPU +(in this case, with the regular PyPy with the GIL). Scaling is not +really a problem here, but single-CPU performance is --or used to be. +This new approach gives a single-threaded overhead that should be very +low, maybe 20%, which would definitely be news for STM systems. Right +now (February 2014) we are still implementing it, so we cannot give +final numbers yet, but early results on a small interpreter for a custom +language are around 15%. This looks like a deal-changer for STM. + +In the talk, I will describe our progress, hopefully along with real +numbers and demos. I will then dive under the hood of PyPy to give an +idea about how it works. I will conclude with a picture of how the +future of multi-threaded programming might looks like, for high-level +languages like Python. I will also mention CPython: how hard (or not) +it would be to change the CPython source code to use the same approach. + diff --git a/talk/fosdem2014/Makefile b/talk/fosdem2014/Makefile new file mode 100644 --- /dev/null +++ b/talk/fosdem2014/Makefile @@ -0,0 +1,10 @@ +# Note to myself (arigo): run in the 64-bit environment + +pypy-stm.pdf: pypy-stm.tex + pdflatex pypy-stm.tex + +pypy-stm.tex: pypy-stm.rst + rst2beamer.py --stylesheet=stylesheet.latex --documentoptions=14pt --input-encoding=utf8 --output-encoding=utf8 $< | python expand-itemize.py > pypy-stm.tex + +clean: + rm -f pypy-stm.tex pypy-stm.pdf diff --git a/talk/fosdem2014/expand-itemize.py b/talk/fosdem2014/expand-itemize.py new file mode 100644 --- /dev/null +++ b/talk/fosdem2014/expand-itemize.py @@ -0,0 +1,10 @@ +import sys + +def expand(in_file, out_file): + for line in in_file: + line = line.replace(r'\begin{itemize}', + r'\begin{itemize}\setlength{\itemsep}{10pt}') + out_file.write(line) + +if __name__ == '__main__': + expand(sys.stdin, sys.stdout) diff --git a/talk/fosdem2014/pypy-jit/Makefile b/talk/fosdem2014/pypy-jit/Makefile new file mode 100644 --- /dev/null +++ b/talk/fosdem2014/pypy-jit/Makefile @@ -0,0 +1,16 @@ +# you can find rst2beamer.py and inkscapeslide.py here: +# http://bitbucket.org/antocuni/env/src/619f486c4fad/bin/rst2beamer.py +# http://bitbucket.org/antocuni/env/src/619f486c4fad/bin/inkscapeslide.py + + +talk.pdf: talk.rst author.latex stylesheet.latex + rst2beamer.py --input-encoding=utf8 --output-encoding=utf8 --stylesheet=stylesheet.latex --documentoptions=14pt talk.rst talk.latex || exit + sed 's/\\date{}/\\input{author.latex}/' -i talk.latex || exit + #sed 's/\\maketitle/\\input{title.latex}/' -i talk.latex || exit + pdflatex talk.latex || exit + +view: talk.pdf + evince talk.pdf > /dev/null 2>&1 & + +xpdf: talk.pdf + xpdf talk.pdf & diff --git a/talk/fosdem2014/pypy-jit/Speed.png b/talk/fosdem2014/pypy-jit/Speed.png new file mode 100644 index 0000000000000000000000000000000000000000..796a1ed2ef8f48d701a54242e78694ac16a70762 GIT binary patch [cut] diff --git a/talk/fosdem2014/pypy-jit/author.latex b/talk/fosdem2014/pypy-jit/author.latex new file mode 100644 --- /dev/null +++ b/talk/fosdem2014/pypy-jit/author.latex @@ -0,0 +1,8 @@ +\definecolor{rrblitbackground}{rgb}{0.0, 0.0, 0.0} + +\title[How PyPy makes your code run fast]{How PyPy makes your code run fast} +\author[rguillebert] +{Romain Guillebert} + +\institute{FOSDEM} +\date{February 2nd, 2014} diff --git a/talk/fosdem2014/pypy-jit/beamerdefs.txt b/talk/fosdem2014/pypy-jit/beamerdefs.txt new file mode 100644 --- /dev/null +++ b/talk/fosdem2014/pypy-jit/beamerdefs.txt @@ -0,0 +1,108 @@ +.. colors +.. =========================== + +.. role:: green +.. role:: red + + +.. general useful commands +.. =========================== + +.. |pause| raw:: latex + + \pause + +.. |small| raw:: latex + + {\small + +.. |end_small| raw:: latex + + } + +.. |scriptsize| raw:: latex + + {\scriptsize + +.. |end_scriptsize| raw:: latex + + } + +.. |strike<| raw:: latex + + \sout{ + +.. closed bracket +.. =========================== + +.. |>| raw:: latex + + } + + +.. example block +.. =========================== + +.. |example<| raw:: latex + + \begin{exampleblock}{ + + +.. |end_example| raw:: latex + + \end{exampleblock} + + + +.. alert block +.. =========================== + +.. |alert<| raw:: latex + + \begin{alertblock}{ + + +.. |end_alert| raw:: latex + + \end{alertblock} + + + +.. columns +.. =========================== + +.. |column1| raw:: latex + + \begin{columns} + \begin{column}{0.45\textwidth} + +.. |column2| raw:: latex + + \end{column} + \begin{column}{0.45\textwidth} + + +.. |end_columns| raw:: latex + + \end{column} + \end{columns} + + + +.. |snake| image:: ../../img/py-web-new.png + :scale: 15% + + + +.. nested blocks +.. =========================== + +.. |nested| raw:: latex + + \begin{columns} + \begin{column}{0.85\textwidth} + +.. |end_nested| raw:: latex + + \end{column} + \end{columns} diff --git a/talk/fosdem2014/pypy-jit/rst2beamer.py b/talk/fosdem2014/pypy-jit/rst2beamer.py new file mode 100755 --- /dev/null +++ b/talk/fosdem2014/pypy-jit/rst2beamer.py @@ -0,0 +1,267 @@ +#!/usr/bin/env python +# encoding: utf-8 +""" +A docutils script converting restructured text into Beamer-flavoured LaTeX. + +Beamer is a LaTeX document class for presentations. Via this script, ReST can +be used to prepare slides. It can be called:: + + rst2beamer.py infile.txt > outfile.tex + +where ``infile.tex`` contains the produced Beamer LaTeX. + +See for more details. + +""" +# TODO: modifications for handout sections? +# TOOD: sections and subsections? +# TODO: enable beamer themes? +# TODO: convert document metadata to front page fields? +# TODO: toc-conversion? +# TODO: fix descriptions + +# Unless otherwise stated, created by P-M Agapow on 2007-08-21 +# and open for academic & non-commercial use and modification . + +__docformat__ = 'restructuredtext en' +__author__ = "Paul-Michael Agapow " +__version__ = "0.2" + + +### IMPORTS ### + +import locale +from docutils.core import publish_cmdline, default_description +from docutils.writers.latex2e import Writer as Latex2eWriter +from docutils.writers.latex2e import LaTeXTranslator, DocumentClass +from docutils import nodes + +## Syntax highlighting: + +""" + .. sourcecode:: python + + My code goes here. + + + :copyright: 2007 by Georg Brandl. + :license: BSD, see LICENSE for more details. +""" + +from pygments.formatters import HtmlFormatter, LatexFormatter + +# The default formatter +DEFAULT = LatexFormatter() + + +from docutils.parsers.rst import directives + +from pygments import highlight +from pygments.lexers import get_lexer_by_name, TextLexer + +VARIANTS = { + 'linenos': LatexFormatter(linenos=True), +} + +def pygments_directive(name, arguments, options, content, lineno, + content_offset, block_text, state, state_machine): + try: + lexer = get_lexer_by_name(arguments[0]) + except ValueError: + # no lexer found - use the text one instead of an exception + lexer = TextLexer() + formatter = DEFAULT + parsed = highlight(u'\n'.join(content), lexer, formatter) + return [nodes.raw('', parsed, format='latex')] + +pygments_directive.arguments = (1, 0, 1) +pygments_directive.content = 1 +pygments_directive.options = dict([(key, directives.flag) for key in VARIANTS]) + +directives.register_directive('sourcecode', pygments_directive) + + +## multiple images as a single animation + +""" + .. animage:: foo-p*.pdf + :align: center + :scale: 50% +""" + +from glob import glob +import copy +from docutils.parsers.rst import directives +from docutils.parsers.rst.directives.images import Image +import docutils + +class Animage(Image): # Animated Image :-) + + def run(self): + def raw(text): + return docutils.nodes.raw('', text, format='latex') + + nodes = Image.run(self) + img = nodes[0] + if not isinstance(img, docutils.nodes.image): + return nodes # not an image, WTF? + newnodes = [] + pattern = img.attributes['uri'] + filenames = sorted(glob(pattern)) + for i, filename in enumerate(filenames): + newimg = copy.deepcopy(img) + newimg.attributes['uri'] = filename + newnodes += [raw(r'\only<%d>{' % (i+1)), + newimg, + raw('}')] + return newnodes + +directives.register_directive('animage', Animage) + + + + +## CONSTANTS & DEFINES: ### + +BEAMER_SPEC = ( + 'Beamer options', + 'These are derived almost entirely from the LaTeX2e options', + tuple ( + [ + ( + 'Specify theme.', + ['--theme'], + {'default': '', } + ), + ( + 'Specify document options. Multiple options can be given, ' + 'separated by commas. Default is "10pt,a4paper".', + ['--documentoptions'], + {'default': '', } + ), + ] + list (Latex2eWriter.settings_spec[2][2:]) + ), +) + +BEAMER_DEFAULTS = { + 'output_encoding': 'latin-1', + 'documentclass': 'beamer', +} + + +### IMPLEMENTATION ### + +try: + locale.setlocale (locale.LC_ALL, '') +except: + pass + +class BeamerTranslator (LaTeXTranslator): + """ + A converter for docutils elements to beamer-flavoured latex. + """ + + def __init__ (self, document): + LaTeXTranslator.__init__ (self, document) + self.head_prefix = [x for x in self.head_prefix if ('{typearea}' not in x)] + hyperref_posn = [i for i in range (len (self.head_prefix)) if ('{hyperref}' in self.head_prefix[i])] + if not hyperref_posn: + self.head_prefix.append(None) + hyperref_posn = [-1] # XXX hack + self.head_prefix[hyperref_posn[0]] = ('\\usepackage{hyperref}\n' + + '\\usepackage{fancyvrb}\n' + + LatexFormatter(style="manni").get_style_defs() + + "\n") + + self.head_prefix.extend ([ + '\\definecolor{rrblitbackground}{rgb}{0.55, 0.3, 0.1}\n', + '\\newenvironment{rtbliteral}{\n', + '\\begin{ttfamily}\n', + '\\color{rrblitbackground}\n', + '}{\n', + '\\end{ttfamily}\n', + '}\n', + ]) + # this fixes the hardcoded section titles in docutils 0.4 + self.d_class = DocumentClass ('article') + + def begin_frametag (self, node): + if "verbatim" in str(node).lower(): + return '\\begin{frame}[containsverbatim,fragile]\n' + else: + return '\\begin{frame}\n' + + def end_frametag (self): + return '\\end{frame}\n' + + def visit_section (self, node): + if (self.section_level == 0): + self.body.append (self.begin_frametag(node)) + LaTeXTranslator.visit_section (self, node) + + def depart_section (self, node): + # Remove counter for potential subsections: + LaTeXTranslator.depart_section (self, node) + if (self.section_level == 0): + self.body.append (self.end_frametag()) + + def visit_title (self, node): + if (self.section_level == 1): + self.body.append ('\\frametitle{%s}\n\n' % self.encode(node.astext())) + raise nodes.SkipNode + else: + LaTeXTranslator.visit_title (self, node) + + def depart_title (self, node): + if (self.section_level != 1): + LaTeXTranslator.depart_title (self, node) + + def visit_literal_block(self, node): + if not self.active_table.is_open(): + self.body.append('\n\n\\smallskip\n\\begin{rtbliteral}\n') + self.context.append('\\end{rtbliteral}\n\\smallskip\n\n') + else: + self.body.append('\n') + self.context.append('\n') + if (self.settings.use_verbatim_when_possible and (len(node) == 1) + # in case of a parsed-literal containing just a "**bold**" word: + and isinstance(node[0], nodes.Text)): + self.verbatim = 1 + self.body.append('\\begin{verbatim}\n') + else: + self.literal_block = 1 + self.insert_none_breaking_blanks = 1 + + def depart_literal_block(self, node): + if self.verbatim: + self.body.append('\n\\end{verbatim}\n') + self.verbatim = 0 + else: + self.body.append('\n') + self.insert_none_breaking_blanks = 0 + self.literal_block = 0 + self.body.append(self.context.pop()) + + +class BeamerWriter (Latex2eWriter): + """ + A docutils writer that modifies the translator and settings for beamer. + """ + settings_spec = BEAMER_SPEC + settings_defaults = BEAMER_DEFAULTS + + def __init__(self): + Latex2eWriter.__init__(self) + self.translator_class = BeamerTranslator + + + + +if __name__ == '__main__': + description = ( + "Generates Beamer-flavoured LaTeX for PDF-based presentations." + default_description) + publish_cmdline (writer=BeamerWriter(), description=description) + + +### END ###################################################################### + diff --git a/talk/fosdem2014/pypy-jit/stylesheet.latex b/talk/fosdem2014/pypy-jit/stylesheet.latex new file mode 100644 --- /dev/null +++ b/talk/fosdem2014/pypy-jit/stylesheet.latex @@ -0,0 +1,11 @@ +\usetheme{Boadilla} +\usecolortheme{whale} +\setbeamercovered{transparent} +\setbeamertemplate{navigation symbols}{} + +\definecolor{darkgreen}{rgb}{0, 0.5, 0.0} +\newcommand{\docutilsrolegreen}[1]{\color{darkgreen}#1\normalcolor} +\newcommand{\docutilsrolered}[1]{\color{red}#1\normalcolor} + +\newcommand{\green}[1]{\color{darkgreen}#1\normalcolor} +\newcommand{\red}[1]{\color{red}#1\normalcolor} diff --git a/talk/fosdem2014/pypy-jit/talk.pdf b/talk/fosdem2014/pypy-jit/talk.pdf new file mode 100644 index 0000000000000000000000000000000000000000..78643be7cc20a8371fc7ae6eebf74543936b90f7 GIT binary patch [cut] diff --git a/talk/fosdem2014/pypy-jit/talk.rst b/talk/fosdem2014/pypy-jit/talk.rst new file mode 100644 --- /dev/null +++ b/talk/fosdem2014/pypy-jit/talk.rst @@ -0,0 +1,116 @@ +================================= +How PyPy makes your code run fast +================================= + +Introduction +============ + +* Romain Guillebert, @rguillebert + +* PyPy contributor for ~3 years + +* NumPyPy contributor + +* Please interrupt me + +* How the PyPy JIT works (kind of) + +* Warning : May contain traces of machine code + +speed.pypy.org +============== + +.. image:: Speed.png + :scale: 40% + :align: center + +AOT +=== + +* Ahead of time compilation + +* GCC + +* Can optimize only on what it knows before running the program + +Interpreter +=========== + +* CPython, PyPy + +* Executes an abstract representation of the program + +* Not very smart + +JIT +=== + +* PyPy + +* Gathers information at runtime + +* Produces optimized machine code + +RPython +======= + +* Statically typed subset of Python + +* The RPython compiler automatically generates the JIT from the annotated RPython code + +* The JIT can be added with just one line of code + +* More hints are needed to have an efficient JIT + +Tracing JIT +=========== + +* Optimizes loops + +* Traces one iteration of a loop + +* Produces a linear trace of execution + +* Inlines almost everything + +* The trace is then optimized and compiled + +Guard +===== + +* The JIT produces a linear trace, but the code isn't + +* The JIT can make assumptions that are not always true + +* Guard : If this is true, continue, otherwise return to the interpreter + +* guard_true, guard_class, guard_no_exception, ... + +Bridge +====== + +* After a guard has failed X times, the other path is traced, compiled and attached to the trace + +Optimizations +============= + +* Virtuals + +* Virtualizables + +* Promotion + +Jitviewer +========= + +* Jitviewer demo + +Demo +==== + +* Edge detection algorithm + +Questions +========= + +* Questions ? diff --git a/talk/fosdem2014/pypy-stm.pdf b/talk/fosdem2014/pypy-stm.pdf new file mode 100644 index 0000000000000000000000000000000000000000..78ca0e1e4a6f24dd1d5ebb89e45fe4c8f6bf95e8 GIT binary patch [cut] diff --git a/talk/fosdem2014/pypy-stm.rst b/talk/fosdem2014/pypy-stm.rst new file mode 100644 --- /dev/null +++ b/talk/fosdem2014/pypy-stm.rst @@ -0,0 +1,246 @@ +========================================================== +Using All These Cores: Transactional Memory under the hood +========================================================== + + +.. summary: + - Intro + - Using multiple threads: C++, Java; Jython, IronPython + - the GIL in CPython + - "bytecode" is uninteresting for the Python programmer + - but larger blocks are + - if we can make these larger blocks atomic, we win + - "with atomic:" + - theoretical only so far! + - best example: event-driven *non-multithreaded* systems + - under the hood: transactional memory + + +Introduction +============ + +* Armin Rigo, PyPy dev, CPython dev + +* Co-author: Remi Meier, ETHZ + +* This talk applies to Python or any similar language + + +Problem +======= + +* Most computer's CPUs today have multiple cores + +* How to use them? + + +Multithread programming +======================= + +* C, C++, Java, .NET, ... + +* Jython, IronPython + + +CPython, PyPy +============= + +* No story so far + +* Alternatives for various cases + +* Some fine and some horrible + + +The GIL +======= + +* Global Interpreter Lock + +* "Each bytecode is executed atomically" + + +Transactional Memory +==================== + +* Recent research (past ~10 years) + +* Optimistically runs multiple threads even if they + are supposed to be waiting on the same lock + +* Usually, high overheads + + +Expected results +================ + +* Runs multiple threads despite a single GIL + +* Does not remove the GIL, but solves the original problem anyway + + +Transactional Memory +==================== + +* STM: Software Transactional Memory + +* HTM: Hardware Transactional Memory + +* Hybrids + + +Status +====== + +* STM is still at least 2x slower (on one core) + +* HTM: tested in Ruby with Intel Haswell CPUs, not bad but + still disappointing (imo) + + +STM C7 +====== + +* c7 is our group's research (there were a lot of previous + research that failed to give good results) + +* Hope: much less than 2x slower for "PyPy-like" usages + +* (insert description here) + + +Atomic sections +=============== + +* GIL = "each bytecode is atomic" + +* One bytecode? Obscure for the regular Python programmer + +* Larger atomic sections: + +:: + + with atomic: + ... + + +Larger atomic sections +====================== + +* New way to synchronize multiple threads + +* All ``atomic`` blocks appear to run serialized + +* With STM/HTM, they actually run in parallel as far as possible + + +No threads? +=========== + +* Works even if you don't use threads! + +* If the Twisted reactor (say) was modified to start a pool of threads, + and to run all events in "``with atomic:``" + +* ...Then the end result is the same, for any Twisted application + + +Behind-the-scene threads +======================== + +* The thread pool added behind the scene lets a STM/HTM-enabled + Python run on several cores + +* The "``with atomic:``" means that the semantics of the Twisted + application didn't change + + +Summary (optimistic) +==================== + +* If you are using Twisted... + +* ...Your program will run on multiple cores ``:-)`` + + +Conflicts +========= + +* Actually, your program will likely fail to use multiple cores + out of the box + +* ...Because of "conflicts": each event should be "often" independent, + but may not be + +* Example: incrementing a global counter, or otherwise changing some + global object systematically + + +Some work left for you to do +============================ + +* You need to figure out where the conficts are + +* Maybe using some debugger-like tools that report conflicts + +* Then you need (hopefully small) rewrites to avoid them + + +Some work left for us to do, first +================================== + +* Additional conflicts come from Twisted itself + +* Example: the logging system, which may need to use queues + +* This means that some of the core Python data structures (dicts, + queues...) may need refactorings too + + +What is the point? +================== + +* The point is that with STM/HTM your program is always *correct* + (as much as the single-core version is) + +* You need to work in order to fix the most obvious conflicts + +* If you don't, it won't be faster than the single-core original + + +What did we win? +================ + +* Regular approach to multithreading: your program is always *fast* + +* You need to work in order to fix the bugs (races, deadlocks...) + +* You need to find and fix *all* bugs -- as opposed to the STM/HTM + version where you only fix *some* issues until it is fast enough + + +Scope +===== + +* Twisted / Tornado / Eventlet / Stackless / etc.: event-driven programming + +* Any program computing something complicated, e.g. over all items in + a dictionary, occasionally updating a shared state, etc. + +* In general, any CPU-bound program with identifiable sections that + have a good chance to be parallelizable: "a good chance" is enough + + +Conclusion +========== + +* Mostly theoretical for now: there is a risk it won't work in + practice [1] + +* Expect progress in the following months: http://morepypy.blogspot.com/ + +:: + + - + +[1] I bet it will, eventually ``:-)`` diff --git a/talk/fosdem2014/stylesheet.latex b/talk/fosdem2014/stylesheet.latex new file mode 100644 --- /dev/null +++ b/talk/fosdem2014/stylesheet.latex @@ -0,0 +1,10 @@ +\usetheme{Warsaw} +\usecolortheme{whale} +\setbeamercovered{transparent} +\definecolor{darkgreen}{rgb}{0, 0.5, 0.0} +\newcommand{\docutilsrolegreen}[1]{\color{darkgreen}#1\normalcolor} +\newcommand{\docutilsrolered}[1]{\color{red}#1\normalcolor} +\addtobeamertemplate{block begin}{}{\setlength{\parskip}{35pt plus 1pt minus 1pt}} + +\newcommand{\green}[1]{\color{darkgreen}#1\normalcolor} +\newcommand{\red}[1]{\color{red}#1\normalcolor} diff --git a/talk/pycon2014/language-summit.rst b/talk/pycon2014/language-summit.rst new file mode 100644 --- /dev/null +++ b/talk/pycon2014/language-summit.rst @@ -0,0 +1,7 @@ +---------------------------- +Language summit presentation +---------------------------- + +We should give a ~10 minute presentation about the status of PyPy. + +(Asked by Michael Foord) From noreply at buildbot.pypy.org Sat Feb 8 11:14:43 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 8 Feb 2014 11:14:43 +0100 (CET) Subject: [pypy-commit] pypy default: Add tests Message-ID: <20140208101443.966851C05CE@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69096:4d5c3e138c63 Date: 2014-02-08 11:01 +0100 http://bitbucket.org/pypy/pypy/changeset/4d5c3e138c63/ Log: Add tests diff --git a/rpython/jit/metainterp/test/test_history.py b/rpython/jit/metainterp/test/test_history.py --- a/rpython/jit/metainterp/test/test_history.py +++ b/rpython/jit/metainterp/test/test_history.py @@ -1,5 +1,6 @@ from rpython.jit.metainterp.history import * from rpython.rtyper.lltypesystem import lltype, llmemory, rffi +from rpython.rlib.rfloat import NAN, INFINITY def test_repr(): @@ -38,3 +39,20 @@ assert not c3a.same_constant(c1b) assert not c3a.same_constant(c2b) assert c3a.same_constant(c3b) + +def test_same_constant_float(): + c1 = Const._new(12.34) + c2 = Const._new(12.34) + c3 = Const._new(NAN) + c4 = Const._new(NAN) + c5 = Const._new(INFINITY) + c6 = Const._new(INFINITY) + assert c1.same_constant(c2) + assert c3.same_constant(c4) + assert c5.same_constant(c6) + assert not c1.same_constant(c4) + assert not c1.same_constant(c6) + assert not c3.same_constant(c2) + assert not c3.same_constant(c6) + assert not c5.same_constant(c2) + assert not c5.same_constant(c4) From noreply at buildbot.pypy.org Sat Feb 8 11:14:44 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 8 Feb 2014 11:14:44 +0100 (CET) Subject: [pypy-commit] pypy default: An extra test Message-ID: <20140208101444.C48581C05CE@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69097:f6c9256018a2 Date: 2014-02-08 11:11 +0100 http://bitbucket.org/pypy/pypy/changeset/f6c9256018a2/ Log: An extra test diff --git a/rpython/jit/metainterp/test/test_history.py b/rpython/jit/metainterp/test/test_history.py --- a/rpython/jit/metainterp/test/test_history.py +++ b/rpython/jit/metainterp/test/test_history.py @@ -1,6 +1,8 @@ from rpython.jit.metainterp.history import * from rpython.rtyper.lltypesystem import lltype, llmemory, rffi from rpython.rlib.rfloat import NAN, INFINITY +from rpython.jit.codewriter import longlong +from rpython.translator.c.test.test_standalone import StandaloneTests def test_repr(): @@ -56,3 +58,19 @@ assert not c3.same_constant(c6) assert not c5.same_constant(c2) assert not c5.same_constant(c4) + + +class TestZTranslated(StandaloneTests): + def test_ztranslated_same_constant_float(self): + def fn(args): + n = INFINITY + c1 = ConstFloat(longlong.getfloatstorage(n - INFINITY)) + c2 = ConstFloat(longlong.getfloatstorage(n - INFINITY)) + c3 = ConstFloat(longlong.getfloatstorage(12.34)) + if c1.same_constant(c2): + print "ok!" + return 0 + + t, cbuilder = self.compile(fn) + data = cbuilder.cmdexec('') + assert "ok!\n" in data From noreply at buildbot.pypy.org Sat Feb 8 11:14:47 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 8 Feb 2014 11:14:47 +0100 (CET) Subject: [pypy-commit] pypy default: Backout b58a2c01fd59, adding this check again. I'm not sure, but I think Message-ID: <20140208101447.690F71C05CE@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69099:966942db58df Date: 2014-02-08 11:13 +0100 http://bitbucket.org/pypy/pypy/changeset/966942db58df/ Log: Backout b58a2c01fd59, adding this check again. I'm not sure, but I think it should be fixed by 3a0ef8f31265. diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -594,11 +594,9 @@ if tobox is not None: # sanity check: see whether the current struct value # corresponds to what the cache thinks the value is - # XXX pypy with the following check fails on micronumpy, - # XXX investigate - #resbox = executor.execute(self.metainterp.cpu, self.metainterp, - # rop.GETFIELD_GC, fielddescr, box) - #assert resbox.constbox().same_constant(tobox.constbox()) + resbox = executor.execute(self.metainterp.cpu, self.metainterp, + rop.GETFIELD_GC, fielddescr, box) + assert resbox.constbox().same_constant(tobox.constbox()) return tobox resbox = self.execute_with_descr(opnum, fielddescr, box) self.metainterp.heapcache.getfield_now_known(box, fielddescr, resbox) From noreply at buildbot.pypy.org Sat Feb 8 11:14:46 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 8 Feb 2014 11:14:46 +0100 (CET) Subject: [pypy-commit] pypy default: Fix the test on 64-bit Message-ID: <20140208101446.2AE261C05CE@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69098:3a0ef8f31265 Date: 2014-02-08 11:11 +0100 http://bitbucket.org/pypy/pypy/changeset/3a0ef8f31265/ Log: Fix the test on 64-bit diff --git a/rpython/jit/codewriter/longlong.py b/rpython/jit/codewriter/longlong.py --- a/rpython/jit/codewriter/longlong.py +++ b/rpython/jit/codewriter/longlong.py @@ -26,6 +26,7 @@ getrealfloat = lambda x: x gethash = compute_hash gethash_fast = longlong2float.float2longlong + extract_bits = longlong2float.float2longlong is_longlong = lambda TYPE: False # ------------------------------------- @@ -42,6 +43,7 @@ getrealfloat = longlong2float.longlong2float gethash = lambda xll: rarithmetic.intmask(xll - (xll >> 32)) gethash_fast = gethash + extract_bits = lambda x: x is_longlong = lambda TYPE: (TYPE is lltype.SignedLongLong or TYPE is lltype.UnsignedLongLong) diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -275,7 +275,8 @@ def same_constant(self, other): if isinstance(other, ConstFloat): - return self.value == other.value + return (longlong.extract_bits(self.value) == + longlong.extract_bits(other.value)) return False def nonnull(self): From noreply at buildbot.pypy.org Sat Feb 8 11:20:18 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 8 Feb 2014 11:20:18 +0100 (CET) Subject: [pypy-commit] pypy default: Add comment Message-ID: <20140208102018.53A541C0F86@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69100:dd43ba74cfeb Date: 2014-02-08 11:19 +0100 http://bitbucket.org/pypy/pypy/changeset/dd43ba74cfeb/ Log: Add comment diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -275,6 +275,9 @@ def same_constant(self, other): if isinstance(other, ConstFloat): + # careful in this comparison: if self.value and other.value + # are both NaN, stored as regular floats, then it will turn + # out to be false... return (longlong.extract_bits(self.value) == longlong.extract_bits(other.value)) return False From noreply at buildbot.pypy.org Sat Feb 8 11:36:46 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 8 Feb 2014 11:36:46 +0100 (CET) Subject: [pypy-commit] pypy default: Expand the comment Message-ID: <20140208103646.216821C05CE@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69101:61a9f16c3e8b Date: 2014-02-08 11:35 +0100 http://bitbucket.org/pypy/pypy/changeset/61a9f16c3e8b/ Log: Expand the comment diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -276,8 +276,9 @@ def same_constant(self, other): if isinstance(other, ConstFloat): # careful in this comparison: if self.value and other.value - # are both NaN, stored as regular floats, then it will turn - # out to be false... + # are both NaN, stored as regular floats (i.e. on 64-bit), + # then just using "==" would say False: two NaNs are always + # different from each other. return (longlong.extract_bits(self.value) == longlong.extract_bits(other.value)) return False From noreply at buildbot.pypy.org Sat Feb 8 23:12:21 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 8 Feb 2014 23:12:21 +0100 (CET) Subject: [pypy-commit] pypy default: Merged in krono/pypy/camelot (pull request #205) Message-ID: <20140208221221.B6A121C05CE@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69103:8d9c30585d33 Date: 2014-02-08 23:04 +0100 http://bitbucket.org/pypy/pypy/changeset/8d9c30585d33/ Log: Merged in krono/pypy/camelot (pull request #205) Essential! Thanks Tobias. diff --git a/rpython/tool/ansi_mandelbrot.py b/rpython/tool/ansi_mandelbrot.py --- a/rpython/tool/ansi_mandelbrot.py +++ b/rpython/tool/ansi_mandelbrot.py @@ -14,8 +14,12 @@ """ -palette = [39, 34, 35, 36, 31, 33, 32, 37] - +import os +if os.environ.get('TERM', 'dumb').find('256') > 0: + from ansiramp import ansi_ramp80 + palette = map(lambda x: "38;5;%d" % x, ansi_ramp80) +else: + palette = [39, 34, 35, 36, 31, 33, 32, 37] colour_range = None # used for debugging diff --git a/rpython/tool/ansiramp.py b/rpython/tool/ansiramp.py new file mode 100755 --- /dev/null +++ b/rpython/tool/ansiramp.py @@ -0,0 +1,20 @@ +#! /usr/bin/env python +import colorsys + +def hsv2ansi(h, s, v): + # h: 0..1, s/v: 0..1 + if s < 0.001: + return int(v * 23) + 232 + r, g, b = map(lambda x: int(x * 5), colorsys.hsv_to_rgb(h, s, v)) + return 16 + (r * 36) + (g * 6) + b + +def ramp_idx(i, num): + h = 0.57 + float(i)/num + s = float(num - i) / i if i > (num * 0.85) else 1 + v = 1 + return hsv2ansi(h, s, v) + +def ansi_ramp(num): + return [ramp_idx(i, num) for i in range(num)] + +ansi_ramp80 = ansi_ramp(80) From noreply at buildbot.pypy.org Sat Feb 8 23:12:20 2014 From: noreply at buildbot.pypy.org (krono) Date: Sat, 8 Feb 2014 23:12:20 +0100 (CET) Subject: [pypy-commit] pypy camelot: Color change for mandelbrot on 256 color terminals Message-ID: <20140208221220.8D8EF1C05CE@cobra.cs.uni-duesseldorf.de> Author: Tobias Pape Branch: camelot Changeset: r69102:3be1ce5533b4 Date: 2014-02-08 22:50 +0100 http://bitbucket.org/pypy/pypy/changeset/3be1ce5533b4/ Log: Color change for mandelbrot on 256 color terminals diff --git a/rpython/tool/ansi_mandelbrot.py b/rpython/tool/ansi_mandelbrot.py --- a/rpython/tool/ansi_mandelbrot.py +++ b/rpython/tool/ansi_mandelbrot.py @@ -14,8 +14,12 @@ """ -palette = [39, 34, 35, 36, 31, 33, 32, 37] - +import os +if os.environ.get('TERM', 'dumb').find('256') > 0: + from ansiramp import ansi_ramp80 + palette = map(lambda x: "38;5;%d" % x, ansi_ramp80) +else: + palette = [39, 34, 35, 36, 31, 33, 32, 37] colour_range = None # used for debugging diff --git a/rpython/tool/ansiramp.py b/rpython/tool/ansiramp.py new file mode 100755 --- /dev/null +++ b/rpython/tool/ansiramp.py @@ -0,0 +1,20 @@ +#! /usr/bin/env python +import colorsys + +def hsv2ansi(h, s, v): + # h: 0..1, s/v: 0..1 + if s < 0.001: + return int(v * 23) + 232 + r, g, b = map(lambda x: int(x * 5), colorsys.hsv_to_rgb(h, s, v)) + return 16 + (r * 36) + (g * 6) + b + +def ramp_idx(i, num): + h = 0.57 + float(i)/num + s = float(num - i) / i if i > (num * 0.85) else 1 + v = 1 + return hsv2ansi(h, s, v) + +def ansi_ramp(num): + return [ramp_idx(i, num) for i in range(num)] + +ansi_ramp80 = ansi_ramp(80) From noreply at buildbot.pypy.org Sun Feb 9 09:52:02 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 9 Feb 2014 09:52:02 +0100 (CET) Subject: [pypy-commit] pypy.org extradoc: Add an "Update" paragraph at the beginning of each donation page, Message-ID: <20140209085202.6122E1C025A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r473:ee04263f4041 Date: 2014-02-09 09:51 +0100 http://bitbucket.org/pypy/pypy.org/changeset/ee04263f4041/ Log: Add an "Update" paragraph at the beginning of each donation page, as discussed on pypy-z. diff --git a/numpydonate.html b/numpydonate.html --- a/numpydonate.html +++ b/numpydonate.html @@ -45,6 +45,17 @@

Call for donations - PyPy to support Numpy!

+

UPDATE (February 2014):

+
+Thanks to our donors, we have raised 77% of +the total so far. Work on this topic has been happening, and +continues to happen, within the budget – even if not within the +timeline described below. We have simply not found enough time to +work on it as much as we wanted, and thus did not consume the money as +quickly as predicted. The ratio “progress / $ used” so far +corresponds roughly to what we expected. The document below is the +original call for proposal, and we still accept donations for this +topic.

This is a proposal to provide a fully compatible working NumPy implementation for PyPy. This has long been a very commonly requested feature for PyPy as well as a worthy goal given that PyPy performs extremely well on numeric diff --git a/py3donate.html b/py3donate.html --- a/py3donate.html +++ b/py3donate.html @@ -45,6 +45,17 @@

Call for donations - PyPy to support Python3!

+

UPDATE (February 2014):

+
+Thanks to our donors, we have raised 45% of +the total so far. Work on this topic has been happening, and +continues to happen, within the budget – even if not within the +timeline described below. We have simply not found enough time to +work on it as much as we wanted, and thus did not consume the money as +quickly as predicted. The ratio “progress / $ used” so far +corresponds roughly to what we expected. The document below is the +original call for proposal, and we still accept donations for this +topic.

The release of Python 3 has been a major undertaking for the Python community, both technically and socially. So far the PyPy interpreter implements only version 2 of the Python language and is increasingly diff --git a/source/numpydonate.txt b/source/numpydonate.txt --- a/source/numpydonate.txt +++ b/source/numpydonate.txt @@ -3,6 +3,19 @@ title: Call for donations - PyPy to support Numpy! --- +UPDATE (February 2014): + + *Thanks to our donors, we have raised 77% of + the total so far. Work on this topic has been happening, and + continues to happen, within the budget --- even if not within the + timeline described below. We have simply not found enough time to + work on it as much as we wanted, and thus did not consume the money as + quickly as predicted. The ratio "progress / $ used" so far + corresponds roughly to what we expected. The document below is the + original call for proposal, and we still accept donations for this + topic.* + + This is a proposal to provide a fully compatible working `NumPy`_ implementation for PyPy. This has long been a very commonly `requested feature`_ for PyPy as well as a worthy goal given that PyPy performs extremely well on numeric diff --git a/source/py3donate.txt b/source/py3donate.txt --- a/source/py3donate.txt +++ b/source/py3donate.txt @@ -3,6 +3,19 @@ title: Call for donations - PyPy to support Python3! --- +UPDATE (February 2014): + + *Thanks to our donors, we have raised 45% of + the total so far. Work on this topic has been happening, and + continues to happen, within the budget --- even if not within the + timeline described below. We have simply not found enough time to + work on it as much as we wanted, and thus did not consume the money as + quickly as predicted. The ratio "progress / $ used" so far + corresponds roughly to what we expected. The document below is the + original call for proposal, and we still accept donations for this + topic.* + + The release of Python 3 has been a major undertaking for the Python community, both technically and socially. So far the PyPy interpreter implements only version 2 of the Python language and is increasingly diff --git a/source/tmdonate.txt b/source/tmdonate.txt --- a/source/tmdonate.txt +++ b/source/tmdonate.txt @@ -7,6 +7,18 @@ Transactional Memory / Automatic Mutual Exclusion ================================================= +UPDATE (February 2014): + + *Thanks to our donors, we have raised 52% of + the total so far. Work on this topic has been happening, and + continues to happen, within the budget --- even if not within the + timeline described below. We have simply not found enough time to + work on it as much as we wanted, and thus did not consume the money as + quickly as predicted. The ratio "progress / $ used" so far + corresponds roughly to what we expected. The document below is the + original call for proposal, and we still accept donations for this + topic.* + Introduction ============ diff --git a/tmdonate.html b/tmdonate.html --- a/tmdonate.html +++ b/tmdonate.html @@ -45,6 +45,17 @@

Call for donations - Transactional Memory / Automatic Mutual Exclusion in PyPy

+

UPDATE (February 2014):

+
+Thanks to our donors, we have raised 52% of +the total so far. Work on this topic has been happening, and +continues to happen, within the budget – even if not within the +timeline described below. We have simply not found enough time to +work on it as much as we wanted, and thus did not consume the money as +quickly as predicted. The ratio “progress / $ used” so far +corresponds roughly to what we expected. The document below is the +original call for proposal, and we still accept donations for this +topic.

Introduction

In the presence of today's machines with multiple processors, Python From noreply at buildbot.pypy.org Sun Feb 9 10:02:01 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 9 Feb 2014 10:02:01 +0100 (CET) Subject: [pypy-commit] pypy.org extradoc: Link to all posts tagged "stm". Message-ID: <20140209090201.A4FD01C025A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r474:7ee79d52e112 Date: 2014-02-09 10:01 +0100 http://bitbucket.org/pypy/pypy.org/changeset/7ee79d52e112/ Log: Link to all posts tagged "stm". diff --git a/source/tmdonate.txt b/source/tmdonate.txt --- a/source/tmdonate.txt +++ b/source/tmdonate.txt @@ -281,12 +281,12 @@ * `Original blog post`__ * pypy-dev mails `[1]`__ `[2]`__ -* `The most recent blog post`__ +* `All our blog posts about stm`__ .. __: http://morepypy.blogspot.com/2011/08/we-need-software-transactional-memory.html .. __: http://mail.python.org/pipermail/pypy-dev/2011-August/008153.html .. __: http://mail.python.org/pipermail/pypy-dev/2012-January/009034.html -.. __: http://morepypy.blogspot.com/2012/01/transactional-memory-ii.html +.. __: http://morepypy.blogspot.com/search/label/stm Work plan diff --git a/tmdonate.html b/tmdonate.html --- a/tmdonate.html +++ b/tmdonate.html @@ -255,7 +255,7 @@

From noreply at buildbot.pypy.org Sun Feb 9 10:26:26 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 9 Feb 2014 10:26:26 +0100 (CET) Subject: [pypy-commit] stmgc c7: Mention other OSes Message-ID: <20140209092626.3B66D1C025A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7 Changeset: r711:c6989ce5e182 Date: 2014-02-09 10:26 +0100 http://bitbucket.org/pypy/stmgc/changeset/c6989ce5e182/ Log: Mention other OSes diff --git a/c7/README.txt b/c7/README.txt --- a/c7/README.txt +++ b/c7/README.txt @@ -45,6 +45,11 @@ memory, which gives a zero-cost way to share data at different addresses. +NOTE: this functionality is only available on Linux. There are +potential ideas for other OSes, like a Windows device driver that would +tweak the OS' page tables. But it would need serious research to know +if it is feasible. + Memory organization ------------------- From noreply at buildbot.pypy.org Sun Feb 9 10:54:47 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 9 Feb 2014 10:54:47 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: Draft blog post about the current status of stm Message-ID: <20140209095447.92B041C01F2@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5152:b05e14b5a97b Date: 2014-02-09 10:54 +0100 http://bitbucket.org/pypy/extradoc/changeset/b05e14b5a97b/ Log: Draft blog post about the current status of stm diff --git a/blog/draft/stm-feb2014.txt b/blog/draft/stm-feb2014.txt new file mode 100644 --- /dev/null +++ b/blog/draft/stm-feb2014.txt @@ -0,0 +1,45 @@ +Hi all, + +A quick note about the Software Transactional Memory (STM) front. + +Since the previous +post, we believe we progressed a lot by discovering an alternative +core model for software transactions. Why do I say "believe"? It's +because it means again that we have to rewrite from scratch the C +library handling STM. This is currently work in progress. Once this is +done, we should be able to adapt the existing pypy-stm to run on top of +it without much rewriting efforts; in fact it should simplify the +difficult issues we ran into for the JIT. + +You can read about the basic ideas of this new C library here. +It is still STM-only, not HTM, but because it doesn't constantly move +objects around in memory, it would be easier to adapt an HTM version. +There are even potential ideas about a hybrid TM, like using HTM but +only to speed up the commits. It is based on a Linux-only system call, remap_file_pages() +(poll: who heard about it before? :-). As previously, the work is done +by Remi Meier and myself. + +Currently, the C library is incomplete, but early experiments show good +results in running duhton, +the interpreter for a minimal language created for the purpose of +testing STM. Good results means we brough down the slow-downs from +60-80% to around 15%, from the non-STM-enabled to the STM-enabled +version on one thread (of course, the idea is that the STM version +scales when using more than one CPU core). + +This means that we are looking forward to a result that is much better +than originally predicted. The pypy-stm has chances to run at a +one-thread speed that is only "n%" slower than the regular pypy-jit, for +a value of "n" that is optimistically 15 --- but more likely some number +around 25 or 50. This is seriously better than the original estimate, +which was "between 2x and 5x". It would mean that using pypy-stm is +worthwhile even with two cores already. + +More updates later... + +Armin From noreply at buildbot.pypy.org Sun Feb 9 18:17:14 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 9 Feb 2014 18:17:14 +0100 (CET) Subject: [pypy-commit] pypy default: Document the branch Message-ID: <20140209171714.7A7381D2520@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69105:0897cf9a54c9 Date: 2014-02-09 18:16 +0100 http://bitbucket.org/pypy/pypy/changeset/0897cf9a54c9/ Log: Document the branch diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -63,4 +63,7 @@ The downside is a messy nmake-compatible Makefile. Since gcc shows minimal speedup, it was not implemented. - +.. branch: camelot +With a properly configured 256-color terminal (TERM=...-256color), the +Mandelbrot set shown during translation now uses a range of 50 colours. +Essential! From noreply at buildbot.pypy.org Sun Feb 9 18:17:49 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 9 Feb 2014 18:17:49 +0100 (CET) Subject: [pypy-commit] pypy default: merge Message-ID: <20140209171749.DE69B1D2520@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69106:d1d0468da79f Date: 2014-02-09 18:17 +0100 http://bitbucket.org/pypy/pypy/changeset/d1d0468da79f/ Log: merge From noreply at buildbot.pypy.org Sun Feb 9 18:31:27 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 9 Feb 2014 18:31:27 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: A branch to refactor c7. Message-ID: <20140209173127.9A2901D253A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r712:c5fdbd84fd92 Date: 2014-02-09 18:29 +0100 http://bitbucket.org/pypy/stmgc/changeset/c5fdbd84fd92/ Log: A branch to refactor c7. From noreply at buildbot.pypy.org Sun Feb 9 18:31:28 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 9 Feb 2014 18:31:28 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: Start a refactoring step. Goal: the user program includes only "stmgc.h", Message-ID: <20140209173128.B36CB1D253A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r713:d8932faafda2 Date: 2014-02-09 14:51 +0100 http://bitbucket.org/pypy/stmgc/changeset/d8932faafda2/ Log: Start a refactoring step. Goal: the user program includes only "stmgc.h", and compiles only "stmgc.c". diff --git a/c7/stmgc.h b/c7/stmgc.h new file mode 100644 --- /dev/null +++ b/c7/stmgc.h @@ -0,0 +1,147 @@ +#ifndef _STMGC_H +#define _STMGC_H + + +/* ==================== INTERNAL ==================== */ + +/* See "API" below. */ + + +#include +#include +#include +#include + +#if LONG_MAX == 2147483647 +# error "Requires a 64-bit environment" +#endif + +#if BYTE_ORDER == 1234 +# define LENDIAN 1 // little endian +#elif BYTE_ORDER == 4321 +# define LENDIAN 0 // big endian +#else +# error "Unsupported endianness" +#endif + + +enum { + /* set if the write-barrier slowpath needs to trigger. set on all + old objects if there was no write-barrier on it in the same + transaction and no collection inbetween. */ + GCFLAG_WRITE_BARRIER = (1 << 0), +}; + + +#define TLPREFIX __attribute__((address_space(256))) + +typedef TLPREFIX struct object_s object_t; +typedef TLPREFIX struct stm_pub_region_info_s stm_pub_region_info_t; +typedef TLPREFIX struct stm_read_marker_s stm_read_marker_t; +typedef TLPREFIX char stm_char; +typedef void* stm_jmpbufptr_t[5]; /* for use with __builtin_setjmp() */ + +struct stm_read_marker_s { + uint8_t rm; +}; + +struct stm_pub_region_info_s { + uint8_t transaction_read_version; + stm_char *nursery_current; + uint64_t nursery_block_end; +}; +#define STM_PRINFO ((stm_pub_region_info_t *)4352) + +struct stm_thread_local_s { + object_t **shadowstack, **shadowstack_base; + stm_jmpbufptr_t jmpbuf; + /* internal fields follow */ + int _flags; + struct stm_thread_local_s *_prev, *_next; +} stm_thread_local_t; + +/* this should use llvm's coldcc calling convention, + but it's not exposed to C code so far */ +void _stm_write_slowpath(object_t *); + + +/* ==================== HELPERS ==================== */ +#ifdef NDEBUG +#define OPT_ASSERT(cond) do { if (!(cond)) __builtin_unreachable(); } while (0) +#else +#define OPT_ASSERT(cond) assert(cond) +#endif +#define LIKELY(x) __builtin_expect(x, true) +#define UNLIKELY(x) __builtin_expect(x, false) +#define IMPLY(a, b) (!(a) || (b)) + + +/* ==================== API ==================== */ + +/* Structure of objects + -------------------- + + Objects manipulated by the user program, and managed by this library, + must start with a "struct object_s" field. Pointers to any user object + must use the "TLPREFIX struct foo *" type --- don't forget TLPREFIX. + The best is to use typedefs like above. + + The object_s part contains some fields reserved for the STM library. + Right now this is only one byte. +*/ + +struct object_s { + uint8_t stm_flags; /* reserved for the STM library */ +}; + +static inline void stm_read(object_t *obj) +{ + ((stm_read_marker_t *)(((uintptr_t)obj) >> 4))->rm = + STM_PRINFO->transaction_read_version; +} + +static inline void stm_write(object_t *obj) +{ + if (UNLIKELY(obj->stm_flags & GCFLAG_WRITE_BARRIER)) + _stm_write_slowpath(obj); +} + +/* must be provided by the user of this library */ +extern ssize_t stmcb_size(struct object_s *); +extern void stmcb_trace(struct object_s *, void (object_t **)); + + +stm_char *_stm_allocate_slowpath(ssize_t); + +static inline object_t *stm_allocate(ssize_t size) +{ + assert((size % 8) == 0); + assert(size >= 16); + + stm_char *p = STM_PRINFO->nursery_current; + stm_char *end = p + size; + STM_PRINFO->nursery_current = end; + if (UNLIKELY((uint64_t)end > STM_PRINFO->nursery_block_end)) + p = _stm_allocate_slowpath(size); + return (object_t *)p; +} + +object_t *stm_allocate_prebuilt(ssize_t size); + +void stm_setup(void); +void stm_teardown(void); +void stm_register_thread_local(stm_thread_local_t *tl); +void stm_unregister_thread_local(stm_thread_local_t *tl); + +void stm_start_transaction(stm_thread_local_t *tl); +void stm_start_inevitable_transaction(stm_thread_local_t *tl); +void stm_commit_transaction(void); +void stm_abort_transaction(void); +void stm_become_inevitable(char* msg); + +stm_thread_local_t *_stm_test_switch(stm_thread_local_t *tl); + + +/* ==================== END ==================== */ + +#endif From noreply at buildbot.pypy.org Sun Feb 9 18:31:29 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 9 Feb 2014 18:31:29 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: progress Message-ID: <20140209173129.B4B6C1D253A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r714:3ffcf3c3473c Date: 2014-02-09 16:00 +0100 http://bitbucket.org/pypy/stmgc/changeset/3ffcf3c3473c/ Log: progress diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -36,7 +36,7 @@ #define TLPREFIX __attribute__((address_space(256))) typedef TLPREFIX struct object_s object_t; -typedef TLPREFIX struct stm_pub_region_info_s stm_pub_region_info_t; +typedef TLPREFIX struct stm_region_info_s stm_region_info_t; typedef TLPREFIX struct stm_read_marker_s stm_read_marker_t; typedef TLPREFIX char stm_char; typedef void* stm_jmpbufptr_t[5]; /* for use with __builtin_setjmp() */ @@ -45,24 +45,32 @@ uint8_t rm; }; -struct stm_pub_region_info_s { +struct stm_region_info_s { uint8_t transaction_read_version; + uint8_t active; /* 0 = no, 1 = active, 2 = inevitable */ stm_char *nursery_current; uint64_t nursery_block_end; + char *thread_base; }; -#define STM_PRINFO ((stm_pub_region_info_t *)4352) +#define STM_REGION ((stm_region_info_t *)4352) -struct stm_thread_local_s { +typedef struct stm_thread_local_s { object_t **shadowstack, **shadowstack_base; stm_jmpbufptr_t jmpbuf; - /* internal fields follow */ - int _flags; - struct stm_thread_local_s *_prev, *_next; + /* the following fields are handled automatically by the library */ + int region_number; + struct stm_thread_local_s *prev, *next; } stm_thread_local_t; /* this should use llvm's coldcc calling convention, but it's not exposed to C code so far */ void _stm_write_slowpath(object_t *); +stm_char *_stm_allocate_slowpath(ssize_t); +void _stm_become_inevitable(char*); + +bool _stm_was_read(object_t *object); +bool _stm_was_written(object_t *object); +stm_thread_local_t *_stm_test_switch(stm_thread_local_t *); /* ==================== HELPERS ==================== */ @@ -97,7 +105,7 @@ static inline void stm_read(object_t *obj) { ((stm_read_marker_t *)(((uintptr_t)obj) >> 4))->rm = - STM_PRINFO->transaction_read_version; + STM_REGION->transaction_read_version; } static inline void stm_write(object_t *obj) @@ -106,27 +114,26 @@ _stm_write_slowpath(obj); } -/* must be provided by the user of this library */ -extern ssize_t stmcb_size(struct object_s *); +/* Must be provided by the user of this library. + The "size rounded up" must be a multiple of 8 and at least 16. */ +extern ssize_t stmcb_size_rounded_up(struct object_s *); extern void stmcb_trace(struct object_s *, void (object_t **)); -stm_char *_stm_allocate_slowpath(ssize_t); +static inline object_t *stm_allocate(ssize_t size_rounded_up) +{ + OPT_ASSERT(size_rounded_up >= 16); + OPT_ASSERT((size_rounded_up & 7) == 0); -static inline object_t *stm_allocate(ssize_t size) -{ - assert((size % 8) == 0); - assert(size >= 16); - - stm_char *p = STM_PRINFO->nursery_current; - stm_char *end = p + size; - STM_PRINFO->nursery_current = end; - if (UNLIKELY((uint64_t)end > STM_PRINFO->nursery_block_end)) - p = _stm_allocate_slowpath(size); + stm_char *p = STM_REGION->nursery_current; + stm_char *end = p + size_rounded_up; + STM_REGION->nursery_current = end; + if (UNLIKELY((uint64_t)end > STM_REGION->nursery_block_end)) + p = _stm_allocate_slowpath(size_rounded_up); return (object_t *)p; } -object_t *stm_allocate_prebuilt(ssize_t size); +object_t *stm_allocate_prebuilt(ssize_t size_rounded_up); void stm_setup(void); void stm_teardown(void); @@ -137,9 +144,16 @@ void stm_start_inevitable_transaction(stm_thread_local_t *tl); void stm_commit_transaction(void); void stm_abort_transaction(void); -void stm_become_inevitable(char* msg); -stm_thread_local_t *_stm_test_switch(stm_thread_local_t *tl); +#define STM_START_TRANSACTION(tl) ({ \ + int _restart = __builtin_setjmp((tl)->jmpbuf); \ + stm_start_transaction(tl); \ + _restart; }) + +static inline void stm_become_inevitable(char* msg) { + if (STM_REGION->active == 1) + _stm_become_inevitable(msg); +} /* ==================== END ==================== */ diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -1,21 +1,16 @@ import os import cffi +import sys +assert sys.maxint == 9223372036854775807, "requires a 64-bit environment" # ---------- os.environ['CC'] = 'clang' parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) -header_files = [os.path.join(parent_dir, _n) for _n in - """core.h pagecopy.h list.h - reader_writer_lock.h - nursery.h pages.h - stmsync.h largemalloc.h""".split()] -source_files = [os.path.join(parent_dir, _n) for _n in - """core.c pagecopy.c list.c - reader_writer_lock.c - nursery.c pages.c - stmsync.c largemalloc.c""".split()] +source_files = [os.path.join(parent_dir, "stmgc.c")] +all_files = [os.path.join(parent_dir, _n) for _n in os.listdir(parent_dir) + if _n.endswith('.h') or _n.endswith('.c')] _pycache_ = os.path.join(parent_dir, 'test', '__pycache__') if os.path.exists(_pycache_): @@ -23,8 +18,7 @@ if _fs: _fsmtime = min(os.stat(os.path.join(_pycache_, _f)).st_mtime for _f in _fs) - if any(os.stat(src).st_mtime >= _fsmtime - for src in header_files + source_files): + if any(os.stat(src).st_mtime >= _fsmtime for src in all_files): import shutil shutil.rmtree(_pycache_) @@ -36,31 +30,40 @@ typedef ... jmpbufptr_t; #define SIZEOF_MYOBJ ... -#define NB_NURSERY_PAGES ... -#define NURSERY_SECTION ... +typedef struct { + object_t **shadowstack, **shadowstack_base; + stm_jmpbufptr_t jmpbuf; + ...; +} stm_thread_local_t; + +void stm_read(object_t *obj); +void stm_write(object_t *obj); +object_t *stm_allocate(ssize_t size_rounded_up); +object_t *stm_allocate_prebuilt(ssize_t size_rounded_up); void stm_setup(void); -void stm_setup_pthread(void); +void stm_teardown(void); +void stm_register_thread_local(stm_thread_local_t *tl); +void stm_unregister_thread_local(stm_thread_local_t *tl); -void stm_start_transaction(jmpbufptr_t *); -bool _stm_stop_transaction(void); -object_t *stm_allocate(size_t size); +void stm_start_transaction(stm_thread_local_t *tl); +void stm_start_inevitable_transaction(stm_thread_local_t *tl); +void stm_commit_transaction(void); +void stm_abort_transaction(void); +void stm_become_inevitable(char* msg); -void stm_read(object_t *object); -void stm_write(object_t *object); bool _checked_stm_write(object_t *object); -_Bool _stm_was_read(object_t *object); -_Bool _stm_was_written(object_t *object); - -void _stm_restore_local_state(int thread_num); -void stm_teardown(void); -void stm_teardown_pthread(void); +bool _stm_was_read(object_t *object); +bool _stm_was_written(object_t *object); +stm_thread_local_t *_stm_test_switch(stm_thread_local_t *); char *_stm_real_address(object_t *o); -object_t *_stm_tl_address(char *ptr); +object_t *_stm_region_address(char *ptr); bool _stm_is_young(object_t *o); -object_t *_stm_allocate_old(size_t size); +""") + +TEMPORARILY_DISABLED = """ void _stm_start_safe_point(uint8_t); void _stm_stop_safe_point(uint8_t); bool _stm_check_stop_safe_point(void); @@ -120,18 +123,14 @@ void stm_become_inevitable(char* msg); void stm_start_inevitable_transaction(); bool _checked_stm_become_inevitable(); +""" -""") lib = ffi.verify(''' #include #include -#include "core.h" -#include "pages.h" -#include "nursery.h" -#include "stmsync.h" -#include "largemalloc.h" +#include "../stmgc.h" struct myobj_s { struct object_s hdr; @@ -140,10 +139,6 @@ typedef TLPREFIX struct myobj_s myobj_t; #define SIZEOF_MYOBJ sizeof(struct myobj_s) -size_t stm_object_size_rounded_up(object_t * obj) { - return 16; -} - uint8_t _stm_get_flags(object_t *obj) { return obj->stm_flags; @@ -250,12 +245,14 @@ } -size_t stmcb_size(struct object_s *obj) +ssize_t stmcb_size_rounded_up(struct object_s *obj) { struct myobj_s *myobj = (struct myobj_s*)obj; if (myobj->type_id < 421420) { /* basic case: tid equals 42 plus the size of the object */ assert(myobj->type_id >= 42 + sizeof(struct myobj_s)); + assert((myobj->type_id - 42) >= 16); + assert(((myobj->type_id - 42) & 7) == 0); return myobj->type_id - 42; } else { @@ -289,13 +286,9 @@ force_generic_engine=True) -import sys -if sys.maxint > 2**32: - WORD = 8 -else: - WORD = 4 - +WORD = 8 HDR = lib.SIZEOF_MYOBJ +assert HDR == 8 class Conflict(Exception): pass From noreply at buildbot.pypy.org Sun Feb 9 18:31:30 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 9 Feb 2014 18:31:30 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: in-progress Message-ID: <20140209173130.D1AC41D253A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r715:3650a3e0e16c Date: 2014-02-09 17:50 +0100 http://bitbucket.org/pypy/stmgc/changeset/3650a3e0e16c/ Log: in-progress diff --git a/c7/core.c b/c7/core.c deleted file mode 100644 --- a/c7/core.c +++ /dev/null @@ -1,511 +0,0 @@ -#define _GNU_SOURCE -#include -#include -#include -#include -#include -#include - -#include - -#include "core.h" -#include "list.h" -#include "reader_writer_lock.h" -#include "nursery.h" -#include "pages.h" -#include "stmsync.h" -#include "largemalloc.h" - - -char *object_pages; -static int num_threads_started; -uint8_t write_locks[READMARKER_END - READMARKER_START]; -volatile uint8_t inevitable_lock __attribute__((aligned(64))); /* cache-line alignment */ -long global_age = 0; - -struct _thread_local1_s* _stm_dbg_get_tl(int thread) -{ - if (thread == -1) - return (struct _thread_local1_s*)real_address((object_t*)_STM_TL); - return (struct _thread_local1_s*)REAL_ADDRESS(get_thread_base(thread), _STM_TL); -} - -bool _stm_was_read_remote(char *base, object_t *obj) -{ - struct read_marker_s *marker = (struct read_marker_s *) - (base + (((uintptr_t)obj) >> 4)); - struct _thread_local1_s *other_TL1 = (struct _thread_local1_s*) - (base + (uintptr_t)_STM_TL); - return (marker->rm == other_TL1->transaction_read_version); -} - -bool _stm_was_read(object_t *obj) -{ - read_marker_t *marker = (read_marker_t *)(((uintptr_t)obj) >> 4); - return (marker->rm == _STM_TL->transaction_read_version); -} - -bool _stm_was_written(object_t *obj) -{ - /* if the obj was written to in the current transaction - and doesn't trigger the write-barrier slowpath */ - return !(obj->stm_flags & GCFLAG_WRITE_BARRIER); -} - - - -static void push_modified_to_other_threads() -{ - /* WE HAVE THE EXCLUSIVE LOCK HERE */ - - struct stm_list_s *modified = _STM_TL->modified_objects; - char *local_base = _STM_TL->thread_base; - char *remote_base = get_thread_base(1 - _STM_TL->thread_num); - bool conflicted = 0; - - STM_LIST_FOREACH( - modified, - ({ - if (!conflicted) - conflicted = _stm_was_read_remote(remote_base, item); - - /* clear the write-lock */ - uintptr_t lock_idx = (((uintptr_t)item) >> 4) - READMARKER_START; - assert(write_locks[lock_idx] == _STM_TL->thread_num + 1); - write_locks[lock_idx] = 0; - - _stm_move_object(item, - REAL_ADDRESS(local_base, item), - REAL_ADDRESS(remote_base, item)); - })); - - if (conflicted) { - struct _thread_local1_s *remote_TL = (struct _thread_local1_s *) - REAL_ADDRESS(remote_base, _STM_TL); - remote_TL->need_abort = 1; - } -} - - - -void _stm_write_slowpath(object_t *obj) -{ - uintptr_t pagenum = ((uintptr_t)obj) / 4096; - assert(pagenum < NB_PAGES); - assert(!_stm_is_young(obj)); - - LIST_APPEND(_STM_TL->old_objects_to_trace, obj); - - /* for old objects from the same transaction we don't need - to privatize the pages */ - if (obj->stm_flags & GCFLAG_NOT_COMMITTED) { - obj->stm_flags &= ~GCFLAG_WRITE_BARRIER; - return; - } - - /* privatize if SHARED_PAGE */ - uintptr_t pagenum2, pages; - if (obj->stm_flags & GCFLAG_SMALL) { - pagenum2 = pagenum; - pages = 1; - } else { - _stm_chunk_pages((struct object_s*)REAL_ADDRESS(get_thread_base(0), obj), - &pagenum2, &pages); - assert(pagenum == pagenum2); - /* assert(pages == (stmcb_size(real_address(obj)) + 4095) / 4096); - not true if obj spans two pages, but is itself smaller than 1 */ - } - - for (pagenum2 += pages - 1; pagenum2 >= pagenum; pagenum2--) - stm_pages_privatize(pagenum2); - - - /* claim the write-lock for this object (XXX: maybe a fastpath - for prev_owner == lock_num?) */ - uintptr_t lock_idx = (((uintptr_t)obj) >> 4) - READMARKER_START; - uint8_t lock_num = _STM_TL->thread_num + 1; - uint8_t prev_owner; - uint8_t retries = 0; - retry: - do { - prev_owner = __sync_val_compare_and_swap(&write_locks[lock_idx], - 0, lock_num); - - /* if there was no lock-holder or we already have the lock */ - if ((!prev_owner) || (prev_owner == lock_num)) - break; - - struct _thread_local1_s* other_tl = _stm_dbg_get_tl(prev_owner - 1); - if ((_STM_TL->age < other_tl->age) || (_STM_TL->active == 2)) { - /* we must succeed! */ - other_tl->need_abort = 1; - _stm_start_safe_point(0); - /* XXX: not good, maybe should be signalled by other thread */ - usleep(1); - _stm_stop_safe_point(0); - goto retry; - } else if (retries < 1) { - _stm_start_safe_point(0); - usleep(1); - _stm_stop_safe_point(0); - retries++; - goto retry; - } - - stm_abort_transaction(); - /* XXX: only abort if we are younger */ - } while (1); - - /* remove the write-barrier ONLY if we have the write-lock */ - obj->stm_flags &= ~GCFLAG_WRITE_BARRIER; - - if (prev_owner == 0) { - /* otherwise, we have the lock and already added it to - modified_objects / read-marker */ - stm_read(obj); - LIST_APPEND(_STM_TL->modified_objects, obj); - } -} - -void _stm_setup_static_thread(void) -{ - int thread_num = __sync_fetch_and_add(&num_threads_started, 1); - assert(thread_num < 2); /* only 2 threads for now */ - - _stm_restore_local_state(thread_num); - - _STM_TL->nursery_current = (localchar_t*)(FIRST_NURSERY_PAGE * 4096); - memset((void*)real_address((object_t*)NURSERY_CURRENT(_STM_TL)), - 0x0, (FIRST_AFTER_NURSERY_PAGE - FIRST_NURSERY_PAGE) * 4096); /* clear nursery */ - - _STM_TL->shadow_stack = NULL; - _STM_TL->shadow_stack_base = NULL; - - _STM_TL->old_objects_to_trace = stm_list_create(); - - _STM_TL->modified_objects = stm_list_create(); - _STM_TL->uncommitted_objects = stm_list_create(); - assert(!_STM_TL->active); - _stm_assert_clean_tl(); -} - -void stm_setup(void) -{ - _stm_reset_shared_lock(); - _stm_reset_pages(); - - inevitable_lock = 0; - - /* Check that some values are acceptable */ - assert(4096 <= ((uintptr_t)_STM_TL)); - assert(((uintptr_t)_STM_TL) == ((uintptr_t)_STM_TL)); - assert(((uintptr_t)_STM_TL) + sizeof(*_STM_TL) <= 8192); - assert(2 <= FIRST_READMARKER_PAGE); - assert(FIRST_READMARKER_PAGE * 4096UL <= READMARKER_START); - assert(READMARKER_START < READMARKER_END); - assert(READMARKER_END <= 4096UL * FIRST_OBJECT_PAGE); - assert(FIRST_OBJECT_PAGE < NB_PAGES); - assert((NB_NURSERY_PAGES * 4096) % NURSERY_SECTION == 0); - - object_pages = mmap(NULL, TOTAL_MEMORY, - PROT_READ | PROT_WRITE, - MAP_PAGES_FLAGS, -1, 0); - if (object_pages == MAP_FAILED) { - perror("object_pages mmap"); - abort(); - } - - long i; - for (i = 0; i < NB_THREADS; i++) { - char *thread_base = get_thread_base(i); - - /* In each thread's section, the first page is where TLPREFIX'ed - NULL accesses land. We mprotect it so that accesses fail. */ - mprotect(thread_base, 4096, PROT_NONE); - - /* Fill the TLS page (page 1) with 0xDD */ - memset(REAL_ADDRESS(thread_base, 4096), 0xDD, 4096); - /* Make a "hole" at _STM_TL / _STM_TL */ - memset(REAL_ADDRESS(thread_base, _STM_TL), 0, sizeof(*_STM_TL)); - - /* Pages in range(2, FIRST_READMARKER_PAGE) are never used */ - if (FIRST_READMARKER_PAGE > 2) - mprotect(thread_base + 8192, (FIRST_READMARKER_PAGE - 2) * 4096UL, - PROT_NONE); - - struct _thread_local1_s *th = - (struct _thread_local1_s *)REAL_ADDRESS(thread_base, _STM_TL); - - th->thread_num = i; - th->thread_base = thread_base; - - if (i > 0) { - int res; - res = remap_file_pages( - thread_base + FIRST_AFTER_NURSERY_PAGE * 4096UL, - (NB_PAGES - FIRST_AFTER_NURSERY_PAGE) * 4096UL, - 0, FIRST_AFTER_NURSERY_PAGE, 0); - - if (res != 0) { - perror("remap_file_pages"); - abort(); - } - } - } - - for (i = FIRST_NURSERY_PAGE; i < FIRST_AFTER_NURSERY_PAGE; i++) - stm_set_page_flag(i, PRIVATE_PAGE); /* nursery is private. - or should it be UNCOMMITTED??? */ - - num_threads_started = 0; - - assert(HEAP_PAGES < NB_PAGES - FIRST_AFTER_NURSERY_PAGE); - assert(HEAP_PAGES > 10); - - uintptr_t first_heap = stm_pages_reserve(HEAP_PAGES); - char *heap = REAL_ADDRESS(get_thread_base(0), first_heap * 4096UL); - assert(memset(heap, 0xcd, HEAP_PAGES * 4096)); // testing - stm_largemalloc_init(heap, HEAP_PAGES * 4096UL); - - for (i = 0; i < NB_THREADS; i++) { - _stm_setup_static_thread(); - } -} - - - -void _stm_teardown_static_thread(int thread_num) -{ - _stm_restore_local_state(thread_num); - - _stm_assert_clean_tl(); - _stm_reset_shared_lock(); - - stm_list_free(_STM_TL->modified_objects); - _STM_TL->modified_objects = NULL; - - assert(stm_list_is_empty(_STM_TL->uncommitted_objects)); - stm_list_free(_STM_TL->uncommitted_objects); - - assert(_STM_TL->old_objects_to_trace->count == 0); - stm_list_free(_STM_TL->old_objects_to_trace); - - _stm_restore_local_state(-1); // invalid -} - -void stm_teardown(void) -{ - for (; num_threads_started > 0; num_threads_started--) { - _stm_teardown_static_thread(num_threads_started - 1); - } - - assert(inevitable_lock == 0); - munmap(object_pages, TOTAL_MEMORY); - _stm_reset_pages(); - memset(write_locks, 0, sizeof(write_locks)); - object_pages = NULL; -} - - - -static void reset_transaction_read_version(void) -{ - /* force-reset all read markers to 0 */ - - /* XXX measure the time taken by this madvise() and the following - zeroing of pages done lazily by the kernel; compare it with using - 16-bit read_versions. - */ - /* XXX try to use madvise() on smaller ranges of memory. In my - measures, we could gain a factor 2 --- not really more, even if - the range of virtual addresses below is very large, as long as it - is already mostly non-reserved pages. (The following call keeps - them non-reserved; apparently the kernel just skips them very - quickly.) - */ - int res = madvise((void*)real_address - ((object_t*) (FIRST_READMARKER_PAGE * 4096UL)), - (FIRST_OBJECT_PAGE - FIRST_READMARKER_PAGE) * 4096UL, - MADV_DONTNEED); - if (res < 0) { - perror("madvise"); - abort(); - } - _STM_TL->transaction_read_version = 1; -} - - -void stm_become_inevitable(char* msg) -{ - if (_STM_TL->active == 2) - return; - assert(_STM_TL->active == 1); - fprintf(stderr, "%c", 'I'+_STM_TL->thread_num*32); - - uint8_t our_lock = _STM_TL->thread_num + 1; - do { - _stm_start_safe_point(LOCK_COLLECT); - _stm_stop_safe_point(LOCK_COLLECT|LOCK_EXCLUSIVE); - - if (!inevitable_lock) - break; - - _stm_start_safe_point(LOCK_EXCLUSIVE|LOCK_COLLECT); - _stm_stop_safe_point(LOCK_COLLECT); - } while (1); - - inevitable_lock = our_lock; - _STM_TL->active = 2; - - _stm_start_safe_point(LOCK_EXCLUSIVE|LOCK_COLLECT); - _stm_stop_safe_point(LOCK_COLLECT); -} - -void stm_start_inevitable_transaction() -{ - stm_start_transaction(NULL); - stm_become_inevitable("stm_start_inevitable_transaction"); -} - -void stm_start_transaction(jmpbufptr_t *jmpbufptr) -{ - /* GS invalid before this point! */ - _stm_stop_safe_point(LOCK_COLLECT|THREAD_YIELD); - - assert(!_STM_TL->active); - - uint8_t old_rv = _STM_TL->transaction_read_version; - _STM_TL->transaction_read_version = old_rv + 1; - if (UNLIKELY(old_rv == 0xff)) - reset_transaction_read_version(); - - assert(stm_list_is_empty(_STM_TL->modified_objects)); - - nursery_on_start(); - - _STM_TL->jmpbufptr = jmpbufptr; - _STM_TL->active = 1; - _STM_TL->need_abort = 0; - /* global_age is approximate -> no synchronization required */ - _STM_TL->age = global_age++; - /* XXX: only increment our age on commit, not abort? that way we - are more likely to succeed next time, thus prevent starvation - (may be fairer, but should probably be done per pthread??) */ - - fprintf(stderr, "%c", 'S'+_STM_TL->thread_num*32); -} - - -void stm_stop_transaction(void) -{ - assert(_STM_TL->active); - - /* do the minor_collection here and not in nursery_on_commit, - since here we can still run concurrently with other threads - as we don't hold the exclusive lock yet. */ - _stm_minor_collect(); - - /* Some operations require us to have the EXCLUSIVE lock */ - if (_STM_TL->active == 1) { - while (1) { - _stm_start_safe_point(LOCK_COLLECT); - usleep(1); /* XXX: better algorithm that allows - for waiting on a mutex */ - _stm_stop_safe_point(LOCK_COLLECT|LOCK_EXCLUSIVE); - - if (!inevitable_lock) - break; - - _stm_start_safe_point(LOCK_COLLECT|LOCK_EXCLUSIVE); - _stm_stop_safe_point(LOCK_COLLECT); - } - /* we have the exclusive lock */ - } else { - /* inevitable! no other transaction could have committed - or aborted us */ - _stm_start_safe_point(LOCK_COLLECT); - _stm_stop_safe_point(LOCK_EXCLUSIVE|LOCK_COLLECT); - inevitable_lock = 0; - } - - _STM_TL->jmpbufptr = NULL; /* cannot abort any more */ - - /* push uncommitted objects to other threads */ - nursery_on_commit(); - - /* copy modified object versions to other threads */ - push_modified_to_other_threads(); - stm_list_clear(_STM_TL->modified_objects); - - - _STM_TL->active = 0; - - fprintf(stderr, "%c", 'C'+_STM_TL->thread_num*32); - - _stm_start_safe_point(LOCK_EXCLUSIVE|LOCK_COLLECT|THREAD_YIELD); - /* GS invalid after this point! */ -} - - -static void reset_modified_from_other_threads() -{ - /* pull the right versions from other threads in order - to reset our pages as part of an abort */ - - struct stm_list_s *modified = _STM_TL->modified_objects; - char *local_base = _STM_TL->thread_base; - char *remote_base = get_thread_base(1 - _STM_TL->thread_num); - - STM_LIST_FOREACH( - modified, - ({ - /* note: same as push_modified_to... but src/dst swapped - TODO: unify both... */ - - /* check at least the first page (required by move_obj() */ - assert(stm_get_page_flag((uintptr_t)item / 4096) == PRIVATE_PAGE); - - _stm_move_object(item, - REAL_ADDRESS(remote_base, item), - REAL_ADDRESS(local_base, item)); - - /* copying from the other thread re-added the - WRITE_BARRIER flag */ - assert(item->stm_flags & GCFLAG_WRITE_BARRIER); - - /* write all changes to the object before we release the - write lock below */ - write_fence(); - - /* clear the write-lock */ - uintptr_t lock_idx = (((uintptr_t)item) >> 4) - READMARKER_START; - assert(write_locks[lock_idx]); - write_locks[lock_idx] = 0; - })); -} - - -void stm_abort_transaction(void) -{ - /* here we hold the shared lock as a reader or writer */ - assert(_STM_TL->active == 1); - - nursery_on_abort(); - - assert(_STM_TL->jmpbufptr != NULL); - assert(_STM_TL->jmpbufptr != (jmpbufptr_t *)-1); /* for tests only */ - _STM_TL->active = 0; - _STM_TL->need_abort = 0; - - /* reset all the modified objects (incl. re-adding GCFLAG_WRITE_BARRIER) */ - reset_modified_from_other_threads(); - stm_list_clear(_STM_TL->modified_objects); - - jmpbufptr_t *buf = _STM_TL->jmpbufptr; /* _STM_TL not valid during safe-point */ - fprintf(stderr, "%c", 'A'+_STM_TL->thread_num*32); - - _stm_start_safe_point(LOCK_COLLECT|THREAD_YIELD); - /* GS invalid after this point! */ - - __builtin_longjmp(*buf, 1); -} diff --git a/c7/core.h b/c7/core.h deleted file mode 100644 --- a/c7/core.h +++ /dev/null @@ -1,277 +0,0 @@ -#ifndef _STM_CORE_H -#define _STM_CORE_H - -#include -#include -#include -#include -#include - -#if LONG_MAX == 2147483647 -# error "Requires a 64-bit environment" -#endif - -#if BYTE_ORDER == 1234 -# define LENDIAN 1 // little endian -#elif BYTE_ORDER == 4321 -# define LENDIAN 0 // big endian -#else -# error "Unsupported endianness" -#endif - - -#define NB_PAGES (6*256*256) // 6*256MB -#define NB_THREADS 2 -#define MAP_PAGES_FLAGS (MAP_SHARED | MAP_ANONYMOUS | MAP_NORESERVE) -#define LARGE_OBJECT_WORDS 36 -#define NB_NURSERY_PAGES 1024 // 4MB -#define LENGTH_SHADOW_STACK 163840 - -#define NURSERY_SECTION (32*4096) -/* (NB_NURSERY_PAGE * 4096) % NURSERY_SECTION == 0 */ - - -#define TOTAL_MEMORY (NB_PAGES * 4096UL * NB_THREADS) -#define READMARKER_END ((NB_PAGES * 4096UL) >> 4) -#define FIRST_OBJECT_PAGE ((READMARKER_END + 4095) / 4096UL) -#define FIRST_NURSERY_PAGE FIRST_OBJECT_PAGE -#define READMARKER_START ((FIRST_OBJECT_PAGE * 4096UL) >> 4) -#define FIRST_READMARKER_PAGE (READMARKER_START / 4096UL) -#define FIRST_AFTER_NURSERY_PAGE (FIRST_OBJECT_PAGE + NB_NURSERY_PAGES) -#define HEAP_PAGES (((NB_PAGES - FIRST_AFTER_NURSERY_PAGE) * 3) / 4) - - - -enum { - /* set if the write-barrier slowpath needs to trigger. set on all - old objects if there was no write-barrier on it in the same - transaction and no collection inbetween. */ - GCFLAG_WRITE_BARRIER = (1 << 0), - /* set on objects which are in pages visible to others (SHARED - or PRIVATE), but not committed yet. So only visible from - this transaction. */ - GCFLAG_NOT_COMMITTED = (1 << 1), - /* only used during collections to mark an obj as moved out of the - generation it was in */ - GCFLAG_MOVED = (1 << 2), - /* objects smaller than one page and even smaller than - LARGE_OBJECT_WORDS * 8 bytes */ - GCFLAG_SMALL = (1 << 3), -}; - - - -#define TLPREFIX __attribute__((address_space(256))) - -typedef TLPREFIX struct _thread_local1_s _thread_local1_t; -typedef TLPREFIX struct object_s object_t; -typedef TLPREFIX struct alloc_for_size_s alloc_for_size_t; -typedef TLPREFIX struct read_marker_s read_marker_t; -typedef TLPREFIX char localchar_t; -typedef void* jmpbufptr_t[5]; /* for use with __builtin_setjmp() */ - -/* Structure of objects - -------------------- - - Objects manipulated by the user program, and managed by this library, - must start with a "struct object_s" field. Pointers to any user object - must use the "TLPREFIX struct foo *" type --- don't forget TLPREFIX. - The best is to use typedefs like above. - - The object_s part contains some fields reserved for the STM library, - as well as a 32-bit integer field that can be freely used by the user - program. However, right now this field must be read-only --- i.e. it - must never be modified on any object that may already belong to a - past transaction; you can only set it on just-allocated objects. The - best is to consider it as a field that is written to only once on - newly allocated objects. -*/ - - -struct object_s { - uint8_t stm_flags; /* reserved for the STM library */ - /* make sure it doesn't get bigger than 4 bytes for performance - reasons */ -}; - -struct read_marker_s { - uint8_t rm; -}; - -struct alloc_for_size_s { - localchar_t *next; - uint16_t start, stop; - bool flag_partial_page; -}; - -struct _thread_local1_s { - jmpbufptr_t *jmpbufptr; - uint8_t transaction_read_version; - - /* unsynchronized/inaccurate start age of transaction - XXX: may be replaced with size_of(read/write-set) */ - long age; - - /* static threads, not pthreads */ - int thread_num; - char *thread_base; - - uint8_t active; /* 1 normal, 2 inevitable, 0 no trans. */ - bool need_abort; - - object_t **old_shadow_stack; - object_t **shadow_stack; - object_t **shadow_stack_base; - - union { - localchar_t *nursery_current; - uint32_t nursery_current_halfwords[2]; - }; - - struct stm_list_s *modified_objects; - - struct alloc_for_size_s alloc[LARGE_OBJECT_WORDS]; - struct stm_list_s *uncommitted_objects; - - struct stm_list_s *old_objects_to_trace; -}; -#define _STM_TL ((_thread_local1_t *)4352) - - - -extern char *object_pages; /* start of MMAP region */ -extern uint8_t write_locks[READMARKER_END - READMARKER_START]; - -/* this should use llvm's coldcc calling convention, - but it's not exposed to C code so far */ -void _stm_write_slowpath(object_t *); - - -/* ==================== HELPERS ==================== */ -#ifdef NDEBUG -#define OPT_ASSERT(cond) do { if (!(cond)) __builtin_unreachable(); } while (0) -#else -#define OPT_ASSERT(cond) assert(cond) -#endif -#define LIKELY(x) __builtin_expect(x, true) -#define UNLIKELY(x) __builtin_expect(x, false) -#define IMPLY(a, b) (!(a) || (b)) - -#define REAL_ADDRESS(object_pages, src) ((object_pages) + (uintptr_t)(src)) - - -static inline struct object_s *real_address(object_t *src) -{ - return (struct object_s*)REAL_ADDRESS(_STM_TL->thread_base, src); -} - -static inline char *_stm_real_address(object_t *o) -{ - if (o == NULL) - return NULL; - assert(FIRST_OBJECT_PAGE * 4096 <= (uintptr_t)o - && (uintptr_t)o < NB_PAGES * 4096); - return (char*)real_address(o); -} - -static inline object_t *_stm_tl_address(char *ptr) -{ - if (ptr == NULL) - return NULL; - - uintptr_t res = ptr - _STM_TL->thread_base; - assert(FIRST_OBJECT_PAGE * 4096 <= res - && res < NB_PAGES * 4096); - return (object_t*)res; -} - -static inline char *get_thread_base(long thread_num) -{ - return object_pages + thread_num * (NB_PAGES * 4096UL); -} - - -static inline void spin_loop(void) -{ - asm("pause" : : : "memory"); -} - - -static inline void write_fence(void) -{ - /* This function inserts a "write fence". The goal is to make - sure that past writes are really pushed to memory before - the future writes. We assume that the corresponding "read - fence" effect is done automatically by a corresponding - __sync_bool_compare_and_swap(). */ -#if defined(__amd64__) || defined(__i386__) - /* this is only a compiler barrier, which is enough on x86 */ - asm("" : : : "memory"); -#else - /* general fall-back, but we might have more efficient - alternative on some other platforms too */ - __sync_synchronize(); -#endif -} - - - -/* ==================== API ==================== */ - -static inline void stm_read(object_t *obj) -{ - ((read_marker_t *)(((uintptr_t)obj) >> 4))->rm = - _STM_TL->transaction_read_version; -} - -static inline void stm_write(object_t *obj) -{ - if (UNLIKELY(obj->stm_flags & GCFLAG_WRITE_BARRIER)) - _stm_write_slowpath(obj); -} - -static inline void stm_push_root(object_t *obj) -{ - *(_STM_TL->shadow_stack++) = obj; -} - -static inline object_t *stm_pop_root(void) -{ - return *(--_STM_TL->shadow_stack); -} - -/* must be provided by the user of this library */ -extern size_t stmcb_size(struct object_s *); -extern void stmcb_trace(struct object_s *, void (object_t **)); - -char* _stm_restore_local_state(int thread_num); -void stm_teardown(void); -void stm_teardown_pthread(void); -bool _stm_is_in_transaction(void); -void _stm_assert_clean_tl(void); - -bool _stm_was_read(object_t *obj); -bool _stm_was_written(object_t *obj); - -object_t *stm_allocate(size_t size); -void stm_setup(void); -void stm_setup_pthread(void); - -void stm_start_transaction(jmpbufptr_t *jmpbufptr); -void stm_stop_transaction(void); - - -object_t *_stm_allocate_old(size_t size); - -object_t *stm_allocate_prebuilt(size_t size); - -void stm_abort_transaction(void); - -void _stm_minor_collect(); -void stm_become_inevitable(char* msg); -void stm_start_inevitable_transaction(); - -struct _thread_local1_s* _stm_dbg_get_tl(int thread); /* -1 is current thread */ - - -#endif diff --git a/c7/stm/core.c b/c7/stm/core.c new file mode 100644 --- /dev/null +++ b/c7/stm/core.c @@ -0,0 +1,6 @@ + + +void _stm_write_slowpath(object_t *obj) +{ + abort(); +} diff --git a/c7/stm/core.h b/c7/stm/core.h new file mode 100644 --- /dev/null +++ b/c7/stm/core.h @@ -0,0 +1,47 @@ + +#define NB_PAGES (1500*256) // 1500MB +#define NB_THREADS 2 +#define MAP_PAGES_FLAGS (MAP_SHARED | MAP_ANONYMOUS | MAP_NORESERVE) +#define LARGE_OBJECT_WORDS 36 +#define NB_NURSERY_PAGES 1024 // 4MB + +#define NURSERY_SECTION_SIZE (24*4096) + + +#define TOTAL_MEMORY (NB_PAGES * 4096UL * NB_THREADS) +#define READMARKER_END ((NB_PAGES * 4096UL) >> 4) +#define START_OBJECT_PAGE ((READMARKER_END + 4095) / 4096UL) +#define START_NURSERY_PAGE START_OBJECT_PAGE +#define READMARKER_START ((START_OBJECT_PAGE * 4096UL) >> 4) +#define START_READMARKER_PAGE (READMARKER_START / 4096UL) +#define STOP_NURSERY_PAGE (START_NURSERY_PAGE + NB_NURSERY_PAGES) + + +enum { + /* set if the write-barrier slowpath needs to trigger. set on all + old objects if there was no write-barrier on it in the same + transaction and no collection inbetween. */ + GCFLAG_WRITE_BARRIER = _STM_GCFLAG_WRITE_BARRIER, + /* set on objects which are in pages visible to others (SHARED + or PRIVATE), but not committed yet. So only visible from + this transaction. */ + //GCFLAG_NOT_COMMITTED = _STM_GCFLAG_WRITE_BARRIER << 1, + /* only used during collections to mark an obj as moved out of the + generation it was in */ + //GCFLAG_MOVED = _STM_GCFLAG_WRITE_BARRIER << 2, + /* objects smaller than one page and even smaller than + LARGE_OBJECT_WORDS * 8 bytes */ + //GCFLAG_SMALL = _STM_GCFLAG_WRITE_BARRIER << 3, +}; + + +#define STM_PREGION ((stm_priv_region_info_t *)STM_REGION) + +typedef TLPREFIX struct stm_priv_region_info_s stm_priv_region_info_t; + +struct stm_priv_region_info_s { + struct stm_region_info_s pub; +}; + + +#define REAL_ADDRESS(thread_base, src) ((thread_base) + (uintptr_t)(src)) diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c new file mode 100644 --- /dev/null +++ b/c7/stm/gcpage.c @@ -0,0 +1,11 @@ + +stm_char *_stm_allocate_slowpath(ssize_t size_rounded_up) +{ + abort(); +} + + +object_t *stm_allocate_prebuilt(ssize_t size_rounded_up) +{ + abort(); +} diff --git a/c7/stm/misc.c b/c7/stm/misc.c new file mode 100644 --- /dev/null +++ b/c7/stm/misc.c @@ -0,0 +1,34 @@ +#include + + +char *_stm_real_address(object_t *o) +{ + if (o == NULL) + return NULL; + + assert(START_OBJECT_PAGE * 4096UL <= (uintptr_t)o + && (uintptr_t)o < NB_PAGES * 4096UL); + return REAL_ADDRESS(STM_REGION->region_base, o); +} + +object_t *_stm_region_address(char *ptr) +{ + if (ptr == NULL) + return NULL; + + uintptr_t res = ptr - STM_REGION->region_base; + assert(START_OBJECT_PAGE * 4096UL <= res + && res < NB_PAGES * 4096UL); + return (object_t*)res; +} + +bool _stm_was_read(object_t *obj) +{ + return ((stm_read_marker_t *)(((uintptr_t)obj) >> 4))->rm == + STM_REGION->transaction_read_version; +} + +bool _stm_was_written(object_t *obj) +{ + return !(obj->stm_flags & GCFLAG_WRITE_BARRIER); +} diff --git a/c7/stm/setup.c b/c7/stm/setup.c new file mode 100644 --- /dev/null +++ b/c7/stm/setup.c @@ -0,0 +1,86 @@ + + +void stm_setup(void) +{ +#if 0 + _stm_reset_shared_lock(); + _stm_reset_pages(); + + inevitable_lock = 0; + + /* Check that some values are acceptable */ + assert(4096 <= ((uintptr_t)_STM_TL)); + assert(((uintptr_t)_STM_TL) == ((uintptr_t)_STM_TL)); + assert(((uintptr_t)_STM_TL) + sizeof(*_STM_TL) <= 8192); + assert(2 <= FIRST_READMARKER_PAGE); + assert(FIRST_READMARKER_PAGE * 4096UL <= READMARKER_START); + assert(READMARKER_START < READMARKER_END); + assert(READMARKER_END <= 4096UL * FIRST_OBJECT_PAGE); + assert(FIRST_OBJECT_PAGE < NB_PAGES); + assert((NB_NURSERY_PAGES * 4096) % NURSERY_SECTION == 0); + + object_pages = mmap(NULL, TOTAL_MEMORY, + PROT_READ | PROT_WRITE, + MAP_PAGES_FLAGS, -1, 0); + if (object_pages == MAP_FAILED) { + perror("object_pages mmap"); + abort(); + } + + long i; + for (i = 0; i < NB_THREADS; i++) { + char *thread_base = get_thread_base(i); + + /* In each thread's section, the first page is where TLPREFIX'ed + NULL accesses land. We mprotect it so that accesses fail. */ + mprotect(thread_base, 4096, PROT_NONE); + + /* Fill the TLS page (page 1) with 0xDD */ + memset(REAL_ADDRESS(thread_base, 4096), 0xDD, 4096); + /* Make a "hole" at _STM_TL / _STM_TL */ + memset(REAL_ADDRESS(thread_base, _STM_TL), 0, sizeof(*_STM_TL)); + + /* Pages in range(2, FIRST_READMARKER_PAGE) are never used */ + if (FIRST_READMARKER_PAGE > 2) + mprotect(thread_base + 8192, (FIRST_READMARKER_PAGE - 2) * 4096UL, + PROT_NONE); + + struct _thread_local1_s *th = + (struct _thread_local1_s *)REAL_ADDRESS(thread_base, _STM_TL); + + th->thread_num = i; + th->thread_base = thread_base; + + if (i > 0) { + int res; + res = remap_file_pages( + thread_base + FIRST_AFTER_NURSERY_PAGE * 4096UL, + (NB_PAGES - FIRST_AFTER_NURSERY_PAGE) * 4096UL, + 0, FIRST_AFTER_NURSERY_PAGE, 0); + + if (res != 0) { + perror("remap_file_pages"); + abort(); + } + } + } + + for (i = FIRST_NURSERY_PAGE; i < FIRST_AFTER_NURSERY_PAGE; i++) + stm_set_page_flag(i, PRIVATE_PAGE); /* nursery is private. + or should it be UNCOMMITTED??? */ + + num_threads_started = 0; + + assert(HEAP_PAGES < NB_PAGES - FIRST_AFTER_NURSERY_PAGE); + assert(HEAP_PAGES > 10); + + uintptr_t first_heap = stm_pages_reserve(HEAP_PAGES); + char *heap = REAL_ADDRESS(get_thread_base(0), first_heap * 4096UL); + assert(memset(heap, 0xcd, HEAP_PAGES * 4096)); // testing + stm_largemalloc_init(heap, HEAP_PAGES * 4096UL); + + for (i = 0; i < NB_THREADS; i++) { + _stm_setup_static_thread(); + } +#endif +} diff --git a/c7/stmgc.c b/c7/stmgc.c new file mode 100644 --- /dev/null +++ b/c7/stmgc.c @@ -0,0 +1,7 @@ +#include "stmgc.h" +#include "stm/core.h" + +#include "stm/misc.c" +#include "stm/core.c" +#include "stm/gcpage.c" +#include "stm/setup.c" diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -9,8 +9,11 @@ #include #include +#include #include #include +#include +#include #if LONG_MAX == 2147483647 # error "Requires a 64-bit environment" @@ -25,21 +28,13 @@ #endif -enum { - /* set if the write-barrier slowpath needs to trigger. set on all - old objects if there was no write-barrier on it in the same - transaction and no collection inbetween. */ - GCFLAG_WRITE_BARRIER = (1 << 0), -}; - - #define TLPREFIX __attribute__((address_space(256))) typedef TLPREFIX struct object_s object_t; typedef TLPREFIX struct stm_region_info_s stm_region_info_t; typedef TLPREFIX struct stm_read_marker_s stm_read_marker_t; typedef TLPREFIX char stm_char; -typedef void* stm_jmpbufptr_t[5]; /* for use with __builtin_setjmp() */ +typedef void* stm_jmpbuf_t[5]; /* for use with __builtin_setjmp() */ struct stm_read_marker_s { uint8_t rm; @@ -47,18 +42,19 @@ struct stm_region_info_s { uint8_t transaction_read_version; - uint8_t active; /* 0 = no, 1 = active, 2 = inevitable */ stm_char *nursery_current; - uint64_t nursery_block_end; - char *thread_base; + uintptr_t nursery_section_end; + char *region_base; + struct stm_thread_local_s *running_thread; + stm_jmpbuf_t *jmpbuf_ptr; }; #define STM_REGION ((stm_region_info_t *)4352) typedef struct stm_thread_local_s { + /* every thread should handle the shadow stack itself */ object_t **shadowstack, **shadowstack_base; - stm_jmpbufptr_t jmpbuf; - /* the following fields are handled automatically by the library */ - int region_number; + /* the next fields are handled automatically by the library */ + stm_region_info_t *running_in_region; struct stm_thread_local_s *prev, *next; } stm_thread_local_t; @@ -68,9 +64,15 @@ stm_char *_stm_allocate_slowpath(ssize_t); void _stm_become_inevitable(char*); -bool _stm_was_read(object_t *object); -bool _stm_was_written(object_t *object); -stm_thread_local_t *_stm_test_switch(stm_thread_local_t *); +#ifdef STM_TESTS +bool _stm_was_read(object_t *obj); +bool _stm_was_written(object_t *obj); +bool _stm_in_nursery(object_t *obj); +char *_stm_real_address(object_t *o); +object_t *_stm_region_address(char *ptr); +#endif + +#define _STM_GCFLAG_WRITE_BARRIER 0x01 /* ==================== HELPERS ==================== */ @@ -110,7 +112,7 @@ static inline void stm_write(object_t *obj) { - if (UNLIKELY(obj->stm_flags & GCFLAG_WRITE_BARRIER)) + if (UNLIKELY(obj->stm_flags & _STM_GCFLAG_WRITE_BARRIER)) _stm_write_slowpath(obj); } @@ -128,7 +130,7 @@ stm_char *p = STM_REGION->nursery_current; stm_char *end = p + size_rounded_up; STM_REGION->nursery_current = end; - if (UNLIKELY((uint64_t)end > STM_REGION->nursery_block_end)) + if (UNLIKELY((uintptr_t)end > STM_REGION->nursery_section_end)) p = _stm_allocate_slowpath(size_rounded_up); return (object_t *)p; } @@ -140,18 +142,20 @@ void stm_register_thread_local(stm_thread_local_t *tl); void stm_unregister_thread_local(stm_thread_local_t *tl); -void stm_start_transaction(stm_thread_local_t *tl); +void stm_start_transaction(stm_thread_local_t *tl, stm_jmpbuf_t *jmpbuf); void stm_start_inevitable_transaction(stm_thread_local_t *tl); void stm_commit_transaction(void); void stm_abort_transaction(void); -#define STM_START_TRANSACTION(tl) ({ \ - int _restart = __builtin_setjmp((tl)->jmpbuf); \ - stm_start_transaction(tl); \ - _restart; }) +#define STM_START_TRANSACTION(tl) ({ \ + stm_jmpbuf_t _buf; \ + int _restart = __builtin_setjmp(_buf); \ + stm_start_transaction(tl, _buf); \ + _restart; \ +}) static inline void stm_become_inevitable(char* msg) { - if (STM_REGION->active == 1) + if (STM_REGION->jmpbuf_ptr != NULL) _stm_become_inevitable(msg); } diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -27,43 +27,40 @@ ffi = cffi.FFI() ffi.cdef(""" typedef ... object_t; -typedef ... jmpbufptr_t; #define SIZEOF_MYOBJ ... typedef struct { object_t **shadowstack, **shadowstack_base; - stm_jmpbufptr_t jmpbuf; ...; } stm_thread_local_t; void stm_read(object_t *obj); -void stm_write(object_t *obj); +/*void stm_write(object_t *obj); use _checked_stm_write() instead */ object_t *stm_allocate(ssize_t size_rounded_up); object_t *stm_allocate_prebuilt(ssize_t size_rounded_up); void stm_setup(void); + +bool _checked_stm_write(object_t *obj); +bool _stm_was_read(object_t *obj); +bool _stm_was_written(object_t *obj); +""") + + +TEMPORARILY_DISABLED = """ void stm_teardown(void); void stm_register_thread_local(stm_thread_local_t *tl); void stm_unregister_thread_local(stm_thread_local_t *tl); -void stm_start_transaction(stm_thread_local_t *tl); +void stm_start_transaction(stm_thread_local_t *tl, stm_jmpbuf_t *jmpbuf); void stm_start_inevitable_transaction(stm_thread_local_t *tl); void stm_commit_transaction(void); void stm_abort_transaction(void); void stm_become_inevitable(char* msg); +bool _stm_in_nursery(object_t *obj); +char *_stm_real_address(object_t *obj); +object_t *_stm_region_address(char *ptr); -bool _checked_stm_write(object_t *object); -bool _stm_was_read(object_t *object); -bool _stm_was_written(object_t *object); -stm_thread_local_t *_stm_test_switch(stm_thread_local_t *); - -char *_stm_real_address(object_t *o); -object_t *_stm_region_address(char *ptr); -bool _stm_is_young(object_t *o); -""") - - -TEMPORARILY_DISABLED = """ void _stm_start_safe_point(uint8_t); void _stm_stop_safe_point(uint8_t); bool _stm_check_stop_safe_point(void); @@ -130,7 +127,7 @@ #include #include -#include "../stmgc.h" +#include "stmgc.h" struct myobj_s { struct object_s hdr; @@ -144,7 +141,7 @@ return obj->stm_flags; } - +#if 0 bool _checked_stm_become_inevitable() { jmpbufptr_t here; int tn = _STM_TL->thread_num; @@ -158,21 +155,23 @@ _stm_dbg_get_tl(tn)->jmpbufptr = (jmpbufptr_t*)-1; return 1; } +#endif bool _checked_stm_write(object_t *object) { - jmpbufptr_t here; - int tn = _STM_TL->thread_num; + stm_jmpbuf_t here; + stm_region_info_t *region = STM_REGION; if (__builtin_setjmp(here) == 0) { // returned directly - assert(_STM_TL->jmpbufptr == (jmpbufptr_t*)-1); - _STM_TL->jmpbufptr = &here; - stm_write(object); - _STM_TL->jmpbufptr = (jmpbufptr_t*)-1; - return 0; + assert(region->jmpbuf_ptr == (stm_jmpbuf_t *)-1); + region->jmpbuf_ptr = &here; + stm_write(object); + region->jmpbuf_ptr = (stm_jmpbuf_t *)-1; + return 0; } - _stm_dbg_get_tl(tn)->jmpbufptr = (jmpbufptr_t*)-1; + region->jmpbuf_ptr = (stm_jmpbuf_t *)-1; return 1; } +#if 0 bool _stm_stop_transaction(void) { jmpbufptr_t here; int tn = _STM_TL->thread_num; @@ -214,6 +213,7 @@ _stm_dbg_get_tl(tn)->jmpbufptr = (jmpbufptr_t*)-1; return 1; } +#endif void _set_type_id(object_t *obj, uint32_t h) @@ -228,7 +228,7 @@ void _set_ptr(object_t *obj, int n, object_t *v) { - localchar_t *field_addr = ((localchar_t*)obj); + stm_char *field_addr = ((stm_char*)obj); field_addr += SIZEOF_MYOBJ; /* header */ field_addr += n * sizeof(void*); /* field */ object_t * TLPREFIX * field = (object_t * TLPREFIX *)field_addr; @@ -237,7 +237,7 @@ object_t * _get_ptr(object_t *obj, int n) { - localchar_t *field_addr = ((localchar_t*)obj); + stm_char *field_addr = ((stm_char*)obj); field_addr += SIZEOF_MYOBJ; /* header */ field_addr += n * sizeof(void*); /* field */ object_t * TLPREFIX * field = (object_t * TLPREFIX *)field_addr; @@ -294,7 +294,7 @@ pass def is_in_nursery(o): - return lib._stm_is_young(o) + return lib._stm_in_nursery(o) def stm_allocate_old(size): o = lib._stm_allocate_old(size) @@ -333,8 +333,8 @@ def stm_get_real_address(obj): return lib._stm_real_address(ffi.cast('object_t*', obj)) -def stm_get_tl_address(ptr): - return int(ffi.cast('uintptr_t', lib._stm_tl_address(ptr))) +def stm_get_region_address(ptr): + return int(ffi.cast('uintptr_t', lib._stm_region_address(ptr))) def stm_read(o): lib.stm_read(o) From noreply at buildbot.pypy.org Sun Feb 9 18:31:31 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 9 Feb 2014 18:31:31 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: in-progress Message-ID: <20140209173131.D3AEC1D253A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r716:e6463fbbe6f9 Date: 2014-02-09 18:27 +0100 http://bitbucket.org/pypy/stmgc/changeset/e6463fbbe6f9/ Log: in-progress diff --git a/c7/stm/setup.c b/c7/stm/setup.c --- a/c7/stm/setup.c +++ b/c7/stm/setup.c @@ -84,3 +84,7 @@ } #endif } + +void stm_teardown(void) +{ +} diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -40,6 +40,7 @@ object_t *stm_allocate_prebuilt(ssize_t size_rounded_up); void stm_setup(void); +void stm_teardown(void); bool _checked_stm_write(object_t *obj); bool _stm_was_read(object_t *obj); @@ -48,7 +49,6 @@ TEMPORARILY_DISABLED = """ -void stm_teardown(void); void stm_register_thread_local(stm_thread_local_t *tl); void stm_unregister_thread_local(stm_thread_local_t *tl); @@ -399,26 +399,26 @@ def setup_method(self, meth): lib.stm_setup() - lib.stm_setup_thread() - lib.stm_setup_thread() - lib._stm_restore_local_state(0) - self.current_thread = 0 +## lib.stm_setup_thread() +## lib.stm_setup_thread() +## lib._stm_restore_local_state(0) +## self.current_thread = 0 def teardown_method(self, meth): - if self.current_thread != 1: - self.switch(1) - if lib._stm_is_in_transaction(): - stm_stop_transaction() +## if self.current_thread != 1: +## self.switch(1) +## if lib._stm_is_in_transaction(): +## stm_stop_transaction() - self.switch(0) - if lib._stm_is_in_transaction(): - stm_stop_transaction() +## self.switch(0) +## if lib._stm_is_in_transaction(): +## stm_stop_transaction() - lib._stm_restore_local_state(1) - lib._stm_teardown_thread() - lib._stm_restore_local_state(0) - lib._stm_teardown_thread() - lib._stm_teardown() +## lib._stm_restore_local_state(1) +## lib._stm_teardown_thread() +## lib._stm_restore_local_state(0) +## lib._stm_teardown_thread() + lib.stm_teardown() def switch(self, thread_num): assert thread_num != self.current_thread From noreply at buildbot.pypy.org Sun Feb 9 21:13:41 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sun, 9 Feb 2014 21:13:41 +0100 (CET) Subject: [pypy-commit] pypy default: msvc makefile fixes for debug builds Message-ID: <20140209201341.C7F021C01F2@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r69107:1f3ad486ca96 Date: 2014-02-09 17:13 +0200 http://bitbucket.org/pypy/pypy/changeset/1f3ad486ca96/ Log: msvc makefile fixes for debug builds diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -432,8 +432,8 @@ mk.definition('ASMFILES', sfiles) mk.definition('ASMLBLFILES', lblsfiles) mk.definition('GCMAPFILES', gcmapfiles) - if sys.platform == 'win32': - mk.definition('DEBUGFLAGS', '/MD /Zi') + if self.translator.platform.name == 'msvc': + mk.definition('DEBUGFLAGS', '-MD -Zi') else: if self.config.translation.shared: mk.definition('DEBUGFLAGS', '-O2 -fomit-frame-pointer -g -fPIC') @@ -489,11 +489,11 @@ mk.rule('.PRECIOUS', '%.s', "# don't remove .s files if Ctrl-C'ed") else: - if sys.platform == 'win32': - mk.definition('DEBUGFLAGS', '/MD /Zi') + if self.translator.platform.name == 'msvc': + mk.definition('DEBUGFLAGS', '-MD -Zi') else: mk.definition('DEBUGFLAGS', '-O1 -g') - if sys.platform == 'win32': + if self.translator.platform.name == 'msvc': mk.rule('debug_target', 'debugmode_$(DEFAULT_TARGET)', 'rem') else: mk.rule('debug_target', '$(TARGET)', '#') diff --git a/rpython/translator/platform/windows.py b/rpython/translator/platform/windows.py --- a/rpython/translator/platform/windows.py +++ b/rpython/translator/platform/windows.py @@ -314,6 +314,7 @@ ('CC_LINK', self.link), ('LINKFILES', eci.link_files), ('MASM', self.masm), + ('MAKE', 'nmake.exe'), ('_WIN32', '1'), ] if self.x64: From noreply at buildbot.pypy.org Sun Feb 9 21:13:43 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sun, 9 Feb 2014 21:13:43 +0100 (CET) Subject: [pypy-commit] pypy default: skip select(file) tests on windows Message-ID: <20140209201343.068F81C01F2@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r69108:a6787c81b80b Date: 2014-02-09 21:59 +0200 http://bitbucket.org/pypy/pypy/changeset/a6787c81b80b/ Log: skip select(file) tests on windows diff --git a/rpython/rlib/test/test_rpoll.py b/rpython/rlib/test/test_rpoll.py --- a/rpython/rlib/test/test_rpoll.py +++ b/rpython/rlib/test/test_rpoll.py @@ -1,5 +1,6 @@ import os import errno +import py from rpython.rlib.rsocket import * from rpython.rlib.rpoll import * @@ -55,6 +56,8 @@ serv.close() def test_select(): + if os.name == 'nt': + py.test.skip('cannot select on file handles on windows') def f(): readend, writeend = os.pipe() try: @@ -72,6 +75,8 @@ interpret(f, []) def test_select_timeout(): + if os.name == 'nt': + py.test.skip('cannot select on file handles on windows') from time import time def f(): # once there was a bug where the sleeping time was doubled From noreply at buildbot.pypy.org Sun Feb 9 21:13:44 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sun, 9 Feb 2014 21:13:44 +0100 (CET) Subject: [pypy-commit] pypy default: more fixes for debug build on windows Message-ID: <20140209201344.229F21C01F2@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r69109:682b833547c5 Date: 2014-02-09 22:11 +0200 http://bitbucket.org/pypy/pypy/changeset/682b833547c5/ Log: more fixes for debug build on windows diff --git a/rpython/translator/platform/windows.py b/rpython/translator/platform/windows.py --- a/rpython/translator/platform/windows.py +++ b/rpython/translator/platform/windows.py @@ -412,7 +412,7 @@ 'mt.exe -nologo -manifest $*.manifest -outputresource:$@;1', ]) m.rule('debugmode_$(DEFAULT_TARGET)', ['debugmode_$(TARGET)', 'main.obj'], - ['$(CC_LINK) /nologo /DEBUG main.obj $(SHARED_IMPORT_LIB) /out:$@' + ['$(CC_LINK) /nologo /DEBUG main.obj debugmode_$(SHARED_IMPORT_LIB) /out:$@' ]) return m From noreply at buildbot.pypy.org Sun Feb 9 23:11:33 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 9 Feb 2014 23:11:33 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: Updates Message-ID: <20140209221133.862F91C1041@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5153:f63e8aa171a5 Date: 2014-02-09 23:11 +0100 http://bitbucket.org/pypy/extradoc/changeset/f63e8aa171a5/ Log: Updates diff --git a/blog/draft/stm-feb2014.txt b/blog/draft/stm-feb2014.txt --- a/blog/draft/stm-feb2014.txt +++ b/blog/draft/stm-feb2014.txt @@ -10,7 +10,11 @@ library handling STM. This is currently work in progress. Once this is done, we should be able to adapt the existing pypy-stm to run on top of it without much rewriting efforts; in fact it should simplify the -difficult issues we ran into for the JIT. +difficult issues we ran into for the JIT. So while this is basically +yet another restart similar to last +June's, the difference is that the work that we have put in the PyPy +part (as opposed to the C library) remains. You can read about the basic ideas of this new C library here. @@ -28,9 +32,10 @@ href="http://morepypy.blogspot.com/2013/07/software-transactional-memory-lisp.html">duhton, the interpreter for a minimal language created for the purpose of testing STM. Good results means we brough down the slow-downs from -60-80% to around 15%, from the non-STM-enabled to the STM-enabled -version on one thread (of course, the idea is that the STM version -scales when using more than one CPU core). +60-80% (previous version) to around 15% (current version). This number +measures the slow-down from the non-STM-enabled to the STM-enabled +version, on one CPU core; of course, the idea is that the STM version +scales up when using more than one core. This means that we are looking forward to a result that is much better than originally predicted. The pypy-stm has chances to run at a @@ -38,7 +43,7 @@ a value of "n" that is optimistically 15 --- but more likely some number around 25 or 50. This is seriously better than the original estimate, which was "between 2x and 5x". It would mean that using pypy-stm is -worthwhile even with two cores already. +quite worthwhile even with just two cores. More updates later... From noreply at buildbot.pypy.org Mon Feb 10 00:25:32 2014 From: noreply at buildbot.pypy.org (krono) Date: Mon, 10 Feb 2014 00:25:32 +0100 (CET) Subject: [pypy-commit] pypy default: Tiny color ramp refactoring. Message-ID: <20140209232532.73E881C05CE@cobra.cs.uni-duesseldorf.de> Author: Tobias Pape Branch: Changeset: r69110:3c8a727d84e0 Date: 2014-02-10 00:00 +0100 http://bitbucket.org/pypy/pypy/changeset/3c8a727d84e0/ Log: Tiny color ramp refactoring. I promise, it is the last diff --git a/rpython/tool/ansiramp.py b/rpython/tool/ansiramp.py --- a/rpython/tool/ansiramp.py +++ b/rpython/tool/ansiramp.py @@ -3,14 +3,16 @@ def hsv2ansi(h, s, v): # h: 0..1, s/v: 0..1 - if s < 0.001: + if s < 0.1: return int(v * 23) + 232 r, g, b = map(lambda x: int(x * 5), colorsys.hsv_to_rgb(h, s, v)) return 16 + (r * 36) + (g * 6) + b def ramp_idx(i, num): - h = 0.57 + float(i)/num - s = float(num - i) / i if i > (num * 0.85) else 1 + assert num > 0 + i0 = float(i) / num + h = 0.57 + i0 + s = 1 - pow(i0,3) v = 1 return hsv2ansi(h, s, v) @@ -18,3 +20,10 @@ return [ramp_idx(i, num) for i in range(num)] ansi_ramp80 = ansi_ramp(80) + +if __name__ == '__main__': + import sys + from py.io import ansi_print + colors = int(sys.argv[1]) if len(sys.argv) > 1 else 80 + for col in range(colors): + ansi_print('#', "38;5;%d" % ramp_idx(col, colors), newline=False, flush=True) From noreply at buildbot.pypy.org Mon Feb 10 00:25:33 2014 From: noreply at buildbot.pypy.org (krono) Date: Mon, 10 Feb 2014 00:25:33 +0100 (CET) Subject: [pypy-commit] pypy default: Automated merge with ssh://bitbucket.org/pypy/pypy Message-ID: <20140209232533.9E0CA1C05CE@cobra.cs.uni-duesseldorf.de> Author: Tobias Pape Branch: Changeset: r69111:b224e605f614 Date: 2014-02-10 00:00 +0100 http://bitbucket.org/pypy/pypy/changeset/b224e605f614/ Log: Automated merge with ssh://bitbucket.org/pypy/pypy diff --git a/rpython/tool/ansiramp.py b/rpython/tool/ansiramp.py --- a/rpython/tool/ansiramp.py +++ b/rpython/tool/ansiramp.py @@ -3,14 +3,16 @@ def hsv2ansi(h, s, v): # h: 0..1, s/v: 0..1 - if s < 0.001: + if s < 0.1: return int(v * 23) + 232 r, g, b = map(lambda x: int(x * 5), colorsys.hsv_to_rgb(h, s, v)) return 16 + (r * 36) + (g * 6) + b def ramp_idx(i, num): - h = 0.57 + float(i)/num - s = float(num - i) / i if i > (num * 0.85) else 1 + assert num > 0 + i0 = float(i) / num + h = 0.57 + i0 + s = 1 - pow(i0,3) v = 1 return hsv2ansi(h, s, v) @@ -18,3 +20,10 @@ return [ramp_idx(i, num) for i in range(num)] ansi_ramp80 = ansi_ramp(80) + +if __name__ == '__main__': + import sys + from py.io import ansi_print + colors = int(sys.argv[1]) if len(sys.argv) > 1 else 80 + for col in range(colors): + ansi_print('#', "38;5;%d" % ramp_idx(col, colors), newline=False, flush=True) From noreply at buildbot.pypy.org Mon Feb 10 09:16:02 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 10 Feb 2014 09:16:02 +0100 (CET) Subject: [pypy-commit] pypy default: Argh. I forgot that the _Py_get_xxx_type() functions also need a Message-ID: <20140210081602.0D6501C01F2@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69112:87a46a9756fb Date: 2014-02-10 09:15 +0100 http://bitbucket.org/pypy/pypy/changeset/87a46a9756fb/ Log: Argh. I forgot that the _Py_get_xxx_type() functions also need a declaration in the headers. Without it, it would compile and run fine (with warnings), *except* on OS/X, where pointers to built-ins are not within the first 2GB, so the value doesn't fit an "int"... diff --git a/pypy/module/cpyext/include/bufferobject.h b/pypy/module/cpyext/include/bufferobject.h --- a/pypy/module/cpyext/include/bufferobject.h +++ b/pypy/module/cpyext/include/bufferobject.h @@ -37,7 +37,7 @@ PyObject* PyBuffer_New(Py_ssize_t size); -void _Py_init_bufferobject(void); +PyTypeObject *_Py_get_buffer_type(void); #ifdef __cplusplus } diff --git a/pypy/module/cpyext/include/pycapsule.h b/pypy/module/cpyext/include/pycapsule.h --- a/pypy/module/cpyext/include/pycapsule.h +++ b/pypy/module/cpyext/include/pycapsule.h @@ -50,7 +50,7 @@ PyAPI_FUNC(void *) PyCapsule_Import(const char *name, int no_block); -void _Py_init_capsule(void); +PyTypeObject *_Py_get_capsule_type(void); #ifdef __cplusplus } diff --git a/pypy/module/cpyext/include/pycobject.h b/pypy/module/cpyext/include/pycobject.h --- a/pypy/module/cpyext/include/pycobject.h +++ b/pypy/module/cpyext/include/pycobject.h @@ -48,7 +48,7 @@ } PyCObject; #endif -void _Py_init_pycobject(void); +PyTypeObject *_Py_get_cobject_type(void); #ifdef __cplusplus } From noreply at buildbot.pypy.org Mon Feb 10 09:21:02 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 10 Feb 2014 09:21:02 +0100 (CET) Subject: [pypy-commit] pypy default: Oups Message-ID: <20140210082102.7CE5A1C0150@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69113:054bd73871cf Date: 2014-02-10 09:20 +0100 http://bitbucket.org/pypy/pypy/changeset/054bd73871cf/ Log: Oups diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -103,8 +103,7 @@ .. _`extension modules`: cpython_differences.html#extension-modules .. _`cpython differences`: cpython_differences.html -.. _`compatibility wiki`: -.. https://bitbucket.org/pypy/compatibility/wiki/Home +.. _`compatibility wiki`: https://bitbucket.org/pypy/compatibility/wiki/Home .. _cffi: http://cffi.readthedocs.org/ --------------------------------- From noreply at buildbot.pypy.org Mon Feb 10 11:00:59 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 10 Feb 2014 11:00:59 +0100 (CET) Subject: [pypy-commit] pypy default: Test and fix. Message-ID: <20140210100059.4C6121C02A7@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69114:8c8b45fbe536 Date: 2014-02-10 11:00 +0100 http://bitbucket.org/pypy/pypy/changeset/8c8b45fbe536/ Log: Test and fix. diff --git a/rpython/rlib/rfile.py b/rpython/rlib/rfile.py --- a/rpython/rlib/rfile.py +++ b/rpython/rlib/rfile.py @@ -29,9 +29,9 @@ OFF_T = CC['off_t'] c_open = llexternal('fopen', [rffi.CCHARP, rffi.CCHARP], lltype.Ptr(FILE)) c_close = llexternal('fclose', [lltype.Ptr(FILE)], rffi.INT) -c_write = llexternal('fwrite', [rffi.CCHARP, rffi.SIZE_T, rffi.SIZE_T, +c_fwrite = llexternal('fwrite', [rffi.CCHARP, rffi.SIZE_T, rffi.SIZE_T, lltype.Ptr(FILE)], rffi.SIZE_T) -c_read = llexternal('fread', [rffi.CCHARP, rffi.SIZE_T, rffi.SIZE_T, +c_fread = llexternal('fread', [rffi.CCHARP, rffi.SIZE_T, rffi.SIZE_T, lltype.Ptr(FILE)], rffi.SIZE_T) c_feof = llexternal('feof', [lltype.Ptr(FILE)], rffi.INT) c_ferror = llexternal('ferror', [lltype.Ptr(FILE)], rffi.INT) @@ -40,7 +40,7 @@ rffi.INT) c_tmpfile = llexternal('tmpfile', [], lltype.Ptr(FILE)) c_fileno = llexternal('fileno', [lltype.Ptr(FILE)], rffi.INT) -c_ftell = llexternal('ftell', [lltype.Ptr(FILE)], lltype.Signed) +c_ftell = llexternal('ftell', [lltype.Ptr(FILE)], rffi.LONG) c_fflush = llexternal('fflush', [lltype.Ptr(FILE)], rffi.INT) c_ftruncate = llexternal('ftruncate', [rffi.INT, OFF_T], rffi.INT, macro=True) @@ -89,18 +89,11 @@ try: # note that since we got a nonmoving buffer, it is either raw # or already cannot move, so the arithmetics below are fine - total_bytes = 0 - ll_current = ll_value - while total_bytes < len(value): - bytes = c_write(ll_current, 1, len(value) - r_uint(total_bytes), - ll_file) - if bytes == 0: - errno = rposix.get_errno() - raise OSError(errno, os.strerror(errno)) - total_bytes += bytes - ll_current = rffi.cast(rffi.CCHARP, - rffi.cast(lltype.Unsigned, ll_value) + - total_bytes) + length = len(value) + bytes = c_fwrite(ll_value, 1, length, ll_file) + if bytes != length: + errno = rposix.get_errno() + raise OSError(errno, os.strerror(errno)) finally: rffi.free_nonmovingbuffer(value, ll_value) @@ -124,7 +117,8 @@ try: s = StringBuilder() while True: - returned_size = c_read(buf, 1, BASE_BUF_SIZE, ll_file) + returned_size = c_fread(buf, 1, BASE_BUF_SIZE, ll_file) + returned_size = intmask(returned_size) # is between 0 and BASE_BUF_SIZE if returned_size == 0: if c_feof(ll_file): # ok, finished @@ -138,13 +132,13 @@ else: raw_buf, gc_buf = rffi.alloc_buffer(size) try: - returned_size = c_read(raw_buf, 1, size, ll_file) + returned_size = c_fread(raw_buf, 1, size, ll_file) + returned_size = intmask(returned_size) # is between 0 and size if returned_size == 0: if not c_feof(ll_file): errno = c_ferror(ll_file) raise OSError(errno, os.strerror(errno)) - s = rffi.str_from_buffer(raw_buf, gc_buf, size, - rffi.cast(lltype.Signed, returned_size)) + s = rffi.str_from_buffer(raw_buf, gc_buf, size, returned_size) finally: rffi.keep_buffer_alive_until_here(raw_buf, gc_buf) return s diff --git a/rpython/rlib/test/test_rfile.py b/rpython/rlib/test/test_rfile.py --- a/rpython/rlib/test/test_rfile.py +++ b/rpython/rlib/test/test_rfile.py @@ -142,6 +142,15 @@ cls.tmpdir = udir.join('test_rfile_direct') cls.tmpdir.ensure(dir=True) + def test_read_a_lot(self): + fname = str(self.tmpdir.join('file_read_a_lot')) + with open(fname, 'w') as f: + f.write('dupa' * 999) + f = rfile.create_file(fname, 'r') + s = f.read() + assert s == 'dupa' * 999 + f.close() + def test_readline(self): fname = str(self.tmpdir.join('file_readline')) j = 0 From noreply at buildbot.pypy.org Mon Feb 10 11:10:12 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 10 Feb 2014 11:10:12 +0100 (CET) Subject: [pypy-commit] pypy default: Add create_popen_file(), which calls directly the C library's popen() Message-ID: <20140210101012.482E21C0150@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69115:56fc72b18321 Date: 2014-02-10 11:09 +0100 http://bitbucket.org/pypy/pypy/changeset/56fc72b18321/ Log: Add create_popen_file(), which calls directly the C library's popen() function. diff --git a/rpython/rlib/rfile.py b/rpython/rlib/rfile.py --- a/rpython/rlib/rfile.py +++ b/rpython/rlib/rfile.py @@ -47,6 +47,9 @@ c_fgets = llexternal('fgets', [rffi.CCHARP, rffi.INT, lltype.Ptr(FILE)], rffi.CCHARP) +c_popen = llexternal('popen', [rffi.CCHARP, rffi.CCHARP], lltype.Ptr(FILE)) +c_pclose = llexternal('pclose', [lltype.Ptr(FILE)], rffi.INT) + BASE_BUF_SIZE = 4096 BASE_LINE_SIZE = 100 @@ -75,6 +78,21 @@ raise OSError(errno, os.strerror(errno)) return RFile(res) +def create_popen_file(command, type): + ll_command = rffi.str2charp(command) + try: + ll_type = rffi.str2charp(type) + try: + ll_f = c_popen(ll_command, ll_type) + if not ll_f: + errno = rposix.get_errno() + raise OSError(errno, os.strerror(errno)) + finally: + lltype.free(ll_type, flavor='raw') + finally: + lltype.free(ll_command, flavor='raw') + return RPopenFile(ll_f) + class RFile(object): def __init__(self, ll_file): self.ll_file = ll_file @@ -100,12 +118,15 @@ def close(self): if self.ll_file: # double close is allowed - res = c_close(self.ll_file) + res = self._do_close() self.ll_file = lltype.nullptr(FILE) if res == -1: errno = rposix.get_errno() raise OSError(errno, os.strerror(errno)) + def _do_close(self): + return c_close(self.ll_file) + def read(self, size=-1): # XXX CPython uses a more delicate logic here ll_file = self.ll_file @@ -234,3 +255,9 @@ finally: rffi.keep_buffer_alive_until_here(raw_buf, gc_buf) raise ValueError("I/O operation on closed file") + + +class RPopenFile(RFile): + + def _do_close(self): + return c_pclose(self.ll_file) diff --git a/rpython/rlib/test/test_rfile.py b/rpython/rlib/test/test_rfile.py --- a/rpython/rlib/test/test_rfile.py +++ b/rpython/rlib/test/test_rfile.py @@ -1,5 +1,5 @@ -import os +import os, sys, py from rpython.rtyper.test.tool import BaseRtypingTest from rpython.tool.udir import udir from rpython.rlib import rfile @@ -184,3 +184,15 @@ got = f.readline() assert got == '' f.close() + + +class TestPopen: + def setup_class(cls): + if sys.platform == 'win32': + py.test.skip("not for win32") + + def test_popen(self): + f = rfile.create_popen_file("python -c 'print 42'", "r") + s = f.read() + f.close() + assert s == '42\n' From noreply at buildbot.pypy.org Mon Feb 10 11:14:34 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 10 Feb 2014 11:14:34 +0100 (CET) Subject: [pypy-commit] pypy default: Prevent a race condition whereby thread B can still use an RFile Message-ID: <20140210101434.498AF1C01F2@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69116:0388bb74d17c Date: 2014-02-10 11:13 +0100 http://bitbucket.org/pypy/pypy/changeset/0388bb74d17c/ Log: Prevent a race condition whereby thread B can still use an RFile while thread A is blocked in the call to fclose() or pclose(). diff --git a/rpython/rlib/rfile.py b/rpython/rlib/rfile.py --- a/rpython/rlib/rfile.py +++ b/rpython/rlib/rfile.py @@ -116,16 +116,16 @@ rffi.free_nonmovingbuffer(value, ll_value) def close(self): - if self.ll_file: + ll_f = self.ll_file + if ll_f: # double close is allowed - res = self._do_close() self.ll_file = lltype.nullptr(FILE) + res = self._do_close(ll_f) if res == -1: errno = rposix.get_errno() raise OSError(errno, os.strerror(errno)) - def _do_close(self): - return c_close(self.ll_file) + _do_close = staticmethod(c_close) # overridden in RPopenFile def read(self, size=-1): # XXX CPython uses a more delicate logic here @@ -258,6 +258,4 @@ class RPopenFile(RFile): - - def _do_close(self): - return c_pclose(self.ll_file) + _do_close = staticmethod(c_pclose) From noreply at buildbot.pypy.org Mon Feb 10 11:22:06 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 10 Feb 2014 11:22:06 +0100 (CET) Subject: [pypy-commit] pypy default: c_clearerror() inconsistently called. Fix Message-ID: <20140210102206.9B5EB1C0150@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69117:0b6cac2a8414 Date: 2014-02-10 11:21 +0100 http://bitbucket.org/pypy/pypy/changeset/0b6cac2a8414/ Log: c_clearerror() inconsistently called. Fix diff --git a/rpython/rlib/rfile.py b/rpython/rlib/rfile.py --- a/rpython/rlib/rfile.py +++ b/rpython/rlib/rfile.py @@ -144,9 +144,7 @@ if c_feof(ll_file): # ok, finished return s.build() - errno = c_ferror(ll_file) - c_clearerror(ll_file) - raise OSError(errno, os.strerror(errno)) + raise _error(ll_file) s.append_charpsize(buf, returned_size) finally: lltype.free(buf, flavor='raw') @@ -157,8 +155,7 @@ returned_size = intmask(returned_size) # is between 0 and size if returned_size == 0: if not c_feof(ll_file): - errno = c_ferror(ll_file) - raise OSError(errno, os.strerror(errno)) + raise _error(ll_file) s = rffi.str_from_buffer(raw_buf, gc_buf, size, returned_size) finally: rffi.keep_buffer_alive_until_here(raw_buf, gc_buf) @@ -215,8 +212,7 @@ if not result: if c_feof(self.ll_file): # ok return 0 - errno = c_ferror(self.ll_file) - raise OSError(errno, os.strerror(errno)) + raise _error(self.ll_file) # # Assume that fgets() works as documented, and additionally # never writes beyond the final \0, which the CPython @@ -259,3 +255,9 @@ class RPopenFile(RFile): _do_close = staticmethod(c_pclose) + + +def _error(ll_file): + errno = c_ferror(ll_file) + c_clearerror(ll_file) + raise OSError(errno, os.strerror(errno)) From noreply at buildbot.pypy.org Mon Feb 10 12:03:13 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 10 Feb 2014 12:03:13 +0100 (CET) Subject: [pypy-commit] pypy default: Kill this outdated comment line: it should be implemented on any Message-ID: <20140210110313.102A51C0F47@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69118:2c9c60b7e067 Date: 2014-02-10 12:02 +0100 http://bitbucket.org/pypy/pypy/changeset/2c9c60b7e067/ Log: Kill this outdated comment line: it should be implemented on any reasonable GC nowadays. diff --git a/rpython/rlib/rgc.py b/rpython/rlib/rgc.py --- a/rpython/rlib/rgc.py +++ b/rpython/rlib/rgc.py @@ -15,7 +15,6 @@ def set_max_heap_size(nbytes): """Limit the heap size to n bytes. - So far only implemented by the Boehm GC and the semispace/generation GCs. """ pass From noreply at buildbot.pypy.org Mon Feb 10 16:38:53 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 10 Feb 2014 16:38:53 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: in-progress Message-ID: <20140210153853.D6C371C35D0@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r717:9f7a1243b6ad Date: 2014-02-10 16:34 +0100 http://bitbucket.org/pypy/stmgc/changeset/9f7a1243b6ad/ Log: in-progress diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -1,6 +1,16 @@ +#ifndef _STM_CORE_H_ +# error "must be compiled via stmgc.c" +#endif void _stm_write_slowpath(object_t *obj) { abort(); } + +void stm_start_transaction(stm_thread_local_t *tl, stm_jmpbuf_t *jmpbuf) +{ + /* GS invalid before this point! */ + _stm_stop_safe_point(LOCK_COLLECT|THREAD_YIELD); + +} diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -1,6 +1,13 @@ +#define _STM_CORE_H_ + +#include +#include +#include +#include + #define NB_PAGES (1500*256) // 1500MB -#define NB_THREADS 2 +#define NB_REGIONS 2 #define MAP_PAGES_FLAGS (MAP_SHARED | MAP_ANONYMOUS | MAP_NORESERVE) #define LARGE_OBJECT_WORDS 36 #define NB_NURSERY_PAGES 1024 // 4MB @@ -8,13 +15,13 @@ #define NURSERY_SECTION_SIZE (24*4096) -#define TOTAL_MEMORY (NB_PAGES * 4096UL * NB_THREADS) +#define TOTAL_MEMORY (NB_PAGES * 4096UL * NB_REGIONS) #define READMARKER_END ((NB_PAGES * 4096UL) >> 4) -#define START_OBJECT_PAGE ((READMARKER_END + 4095) / 4096UL) -#define START_NURSERY_PAGE START_OBJECT_PAGE -#define READMARKER_START ((START_OBJECT_PAGE * 4096UL) >> 4) -#define START_READMARKER_PAGE (READMARKER_START / 4096UL) -#define STOP_NURSERY_PAGE (START_NURSERY_PAGE + NB_NURSERY_PAGES) +#define FIRST_OBJECT_PAGE ((READMARKER_END + 4095) / 4096UL) +#define FIRST_NURSERY_PAGE FIRST_OBJECT_PAGE +#define READMARKER_START ((FIRST_OBJECT_PAGE * 4096UL) >> 4) +#define FIRST_READMARKER_PAGE (READMARKER_START / 4096UL) +#define END_NURSERY_PAGE (FIRST_NURSERY_PAGE + NB_NURSERY_PAGES) enum { @@ -43,5 +50,22 @@ struct stm_region_info_s pub; }; +static char *stm_object_pages; +static stm_thread_local_t *stm_thread_locals = NULL; -#define REAL_ADDRESS(thread_base, src) ((thread_base) + (uintptr_t)(src)) + +#define REAL_ADDRESS(region_base, src) ((region_base) + (uintptr_t)(src)) + +static inline char *get_region_base(long region_num) { + return stm_object_pages + region_num * (NB_PAGES * 4096UL); +} + +static inline struct stm_region_info_s *get_region(long region_num) { + return (struct stm_region_info_s *)REAL_ADDRESS( + get_region_base(region_num), STM_PREGION); +} + +static inline struct stm_priv_region_info_s *get_priv_region(long region_num) { + return (struct stm_priv_region_info_s *)REAL_ADDRESS( + get_region_base(region_num), STM_PREGION); +} diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -1,3 +1,7 @@ +#ifndef _STM_CORE_H_ +# error "must be compiled via stmgc.c" +#endif + stm_char *_stm_allocate_slowpath(ssize_t size_rounded_up) { diff --git a/c7/stm/misc.c b/c7/stm/misc.c --- a/c7/stm/misc.c +++ b/c7/stm/misc.c @@ -1,4 +1,6 @@ -#include +#ifndef _STM_CORE_H_ +# error "must be compiled via stmgc.c" +#endif char *_stm_real_address(object_t *o) @@ -6,7 +8,7 @@ if (o == NULL) return NULL; - assert(START_OBJECT_PAGE * 4096UL <= (uintptr_t)o + assert(FIRST_OBJECT_PAGE * 4096UL <= (uintptr_t)o && (uintptr_t)o < NB_PAGES * 4096UL); return REAL_ADDRESS(STM_REGION->region_base, o); } @@ -17,7 +19,7 @@ return NULL; uintptr_t res = ptr - STM_REGION->region_base; - assert(START_OBJECT_PAGE * 4096UL <= res + assert(FIRST_OBJECT_PAGE * 4096UL <= res && res < NB_PAGES * 4096UL); return (object_t*)res; } diff --git a/c7/stm/pages.c b/c7/stm/pages.c new file mode 100644 --- /dev/null +++ b/c7/stm/pages.c @@ -0,0 +1,30 @@ +#ifndef _STM_CORE_H_ +# error "must be compiled via stmgc.c" +#endif + + +static void pages_initialize_shared(uintptr_t pagenum, uintptr_t count) +{ + /* call remap_file_pages() to make all pages in the + range(pagenum, pagenum+count) refer to the same + physical range of pages from region 0 */ + long i; + for (i = 1; i < NB_REGIONS; i++) { + char *region_base = get_region_base(i); + int res = remap_file_pages(region_base + pagenum * 4096UL, + count * 4096UL, + 0, pagenum, 0); + if (res != 0) { + perror("remap_file_pages"); + abort(); + } + } + for (; count > 0; count--) { + flag_page_private[pagenum++] = SHARED_PAGE; + } +} + +static void _pages_privatize(uintptr_t pagenum, uintptr_t count) +{ + abort(); +} diff --git a/c7/stm/pages.h b/c7/stm/pages.h new file mode 100644 --- /dev/null +++ b/c7/stm/pages.h @@ -0,0 +1,31 @@ + +enum { + /* The page is not in use. Assume that each region sees its own copy. */ + FREE_PAGE=0, + + /* The page is shared by all threads. Each region sees the same + physical page (the one that is within the region 0 mmap address). */ + SHARED_PAGE, + + /* Page being in the process of privatization */ + REMAPPING_PAGE, + + /* Page private for each thread */ + PRIVATE_PAGE, + +}; /* used for flag_page_private */ + + +static uint8_t flag_page_private[NB_PAGES]; + + +static void _pages_privatize(uintptr_t pagenum, uintptr_t count); +static void pages_initialize_shared(uintptr_t pagenum, uintptr_t count); + +inline static void pages_privatize(uintptr_t pagenum, uintptr_t count) { + while (flag_page_private[pagenum + count - 1] == PRIVATE_PAGE) { + if (!--count) + return; + } + _pages_privatize(pagenum, count); +} diff --git a/c7/stm/setup.c b/c7/stm/setup.c --- a/c7/stm/setup.c +++ b/c7/stm/setup.c @@ -1,3 +1,6 @@ +#ifndef _STM_CORE_H_ +# error "must be compiled via stmgc.c" +#endif void stm_setup(void) @@ -7,84 +10,93 @@ _stm_reset_pages(); inevitable_lock = 0; - +#endif + /* Check that some values are acceptable */ - assert(4096 <= ((uintptr_t)_STM_TL)); - assert(((uintptr_t)_STM_TL) == ((uintptr_t)_STM_TL)); - assert(((uintptr_t)_STM_TL) + sizeof(*_STM_TL) <= 8192); + assert(4096 <= ((uintptr_t)STM_REGION)); + assert((uintptr_t)STM_REGION == (uintptr_t)STM_PREGION); + assert(((uintptr_t)STM_PREGION) + sizeof(*STM_PREGION) <= 8192); assert(2 <= FIRST_READMARKER_PAGE); assert(FIRST_READMARKER_PAGE * 4096UL <= READMARKER_START); assert(READMARKER_START < READMARKER_END); assert(READMARKER_END <= 4096UL * FIRST_OBJECT_PAGE); assert(FIRST_OBJECT_PAGE < NB_PAGES); - assert((NB_NURSERY_PAGES * 4096) % NURSERY_SECTION == 0); - object_pages = mmap(NULL, TOTAL_MEMORY, - PROT_READ | PROT_WRITE, - MAP_PAGES_FLAGS, -1, 0); - if (object_pages == MAP_FAILED) { - perror("object_pages mmap"); + stm_object_pages = mmap(NULL, TOTAL_MEMORY, + PROT_READ | PROT_WRITE, + MAP_PAGES_FLAGS, -1, 0); + if (stm_object_pages == MAP_FAILED) { + perror("stm_object_pages mmap"); abort(); } long i; - for (i = 0; i < NB_THREADS; i++) { - char *thread_base = get_thread_base(i); + for (i = 0; i < NB_REGIONS; i++) { + char *region_base = get_region_base(i); - /* In each thread's section, the first page is where TLPREFIX'ed + /* In each region, the first page is where TLPREFIX'ed NULL accesses land. We mprotect it so that accesses fail. */ - mprotect(thread_base, 4096, PROT_NONE); + mprotect(region_base, 4096, PROT_NONE); - /* Fill the TLS page (page 1) with 0xDD */ - memset(REAL_ADDRESS(thread_base, 4096), 0xDD, 4096); - /* Make a "hole" at _STM_TL / _STM_TL */ - memset(REAL_ADDRESS(thread_base, _STM_TL), 0, sizeof(*_STM_TL)); + /* Fill the TLS page (page 1) with 0xDD, for debugging */ + memset(REAL_ADDRESS(region_base, 4096), 0xDD, 4096); + /* Make a "hole" at STM_PREGION */ + memset(REAL_ADDRESS(region_base, STM_PREGION), 0, + sizeof(*STM_PREGION)); /* Pages in range(2, FIRST_READMARKER_PAGE) are never used */ if (FIRST_READMARKER_PAGE > 2) - mprotect(thread_base + 8192, (FIRST_READMARKER_PAGE - 2) * 4096UL, - PROT_NONE); + mprotect(region_base + 8192, (FIRST_READMARKER_PAGE - 2) * 4096UL, + PROT_NONE); - struct _thread_local1_s *th = - (struct _thread_local1_s *)REAL_ADDRESS(thread_base, _STM_TL); - - th->thread_num = i; - th->thread_base = thread_base; - - if (i > 0) { - int res; - res = remap_file_pages( - thread_base + FIRST_AFTER_NURSERY_PAGE * 4096UL, - (NB_PAGES - FIRST_AFTER_NURSERY_PAGE) * 4096UL, - 0, FIRST_AFTER_NURSERY_PAGE, 0); - - if (res != 0) { - perror("remap_file_pages"); - abort(); - } - } + struct stm_priv_region_info_s *pr = get_priv_region(i); + pr->pub.region_num = i; + pr->pub.region_base = region_base; } - for (i = FIRST_NURSERY_PAGE; i < FIRST_AFTER_NURSERY_PAGE; i++) - stm_set_page_flag(i, PRIVATE_PAGE); /* nursery is private. - or should it be UNCOMMITTED??? */ - - num_threads_started = 0; + /* Make the nursery pages shared. The other pages are + shared lazily, as remap_file_pages() takes a relatively + long time for each page. */ + pages_initialize_shared(FIRST_NURSERY_PAGE, NB_NURSERY_PAGES); - assert(HEAP_PAGES < NB_PAGES - FIRST_AFTER_NURSERY_PAGE); - assert(HEAP_PAGES > 10); - - uintptr_t first_heap = stm_pages_reserve(HEAP_PAGES); - char *heap = REAL_ADDRESS(get_thread_base(0), first_heap * 4096UL); - assert(memset(heap, 0xcd, HEAP_PAGES * 4096)); // testing +#if 0 stm_largemalloc_init(heap, HEAP_PAGES * 4096UL); - - for (i = 0; i < NB_THREADS; i++) { - _stm_setup_static_thread(); - } #endif } void stm_teardown(void) { + /* This function is called during testing, but normal programs don't + need to call it. */ + munmap(stm_object_pages, TOTAL_MEMORY); + stm_object_pages = NULL; + + memset(flag_page_private, 0, sizeof(flag_page_private)); } + +void stm_register_thread_local(stm_thread_local_t *tl) +{ + if (stm_thread_locals == NULL) { + stm_thread_locals = tl->next = tl->prev = tl; + } + else { + tl->next = stm_thread_locals; + tl->prev = stm_thread_locals->prev; + stm_thread_locals->prev->next = tl; + stm_thread_locals->prev = tl; + } + tl->associated_region = get_region(0); +} + +void stm_unregister_thread_local(stm_thread_local_t *tl) +{ + if (tl == stm_thread_locals) { + stm_thread_locals = stm_thread_locals->next; + if (tl == stm_thread_locals) { + stm_thread_locals = NULL; + return; + } + } + tl->prev->next = tl->next; + tl->next->prev = tl->prev; +} diff --git a/c7/stmgc.c b/c7/stmgc.c --- a/c7/stmgc.c +++ b/c7/stmgc.c @@ -1,7 +1,10 @@ +#define _GNU_SOURCE #include "stmgc.h" #include "stm/core.h" +#include "stm/pages.h" #include "stm/misc.c" #include "stm/core.c" +#include "stm/pages.c" #include "stm/gcpage.c" #include "stm/setup.c" diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -42,9 +42,10 @@ struct stm_region_info_s { uint8_t transaction_read_version; + int region_num; + char *region_base; stm_char *nursery_current; uintptr_t nursery_section_end; - char *region_base; struct stm_thread_local_s *running_thread; stm_jmpbuf_t *jmpbuf_ptr; }; @@ -54,7 +55,7 @@ /* every thread should handle the shadow stack itself */ object_t **shadowstack, **shadowstack_base; /* the next fields are handled automatically by the library */ - stm_region_info_t *running_in_region; + struct stm_region_info_s *associated_region; struct stm_thread_local_s *prev, *next; } stm_thread_local_t; @@ -147,10 +148,9 @@ void stm_commit_transaction(void); void stm_abort_transaction(void); -#define STM_START_TRANSACTION(tl) ({ \ - stm_jmpbuf_t _buf; \ - int _restart = __builtin_setjmp(_buf); \ - stm_start_transaction(tl, _buf); \ +#define STM_START_TRANSACTION(tl, jmpbuf) ({ \ + int _restart = __builtin_setjmp(jmpbuf); \ + stm_start_transaction(tl, jmpbuf); \ _restart; \ }) diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -9,8 +9,11 @@ parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) source_files = [os.path.join(parent_dir, "stmgc.c")] -all_files = [os.path.join(parent_dir, _n) for _n in os.listdir(parent_dir) - if _n.endswith('.h') or _n.endswith('.c')] +all_files = [os.path.join(parent_dir, "stmgc.h"), + os.path.join(parent_dir, "stmgc.c")] + [ + os.path.join(parent_dir, 'stm', _n) + for _n in os.listdir(os.path.join(parent_dir, 'stm')) + if _n.endswith('.h') or _n.endswith('.c')] _pycache_ = os.path.join(parent_dir, 'test', '__pycache__') if os.path.exists(_pycache_): @@ -27,6 +30,7 @@ ffi = cffi.FFI() ffi.cdef(""" typedef ... object_t; +typedef ... stm_jmpbuf_t; #define SIZEOF_MYOBJ ... typedef struct { @@ -45,14 +49,15 @@ bool _checked_stm_write(object_t *obj); bool _stm_was_read(object_t *obj); bool _stm_was_written(object_t *obj); -""") - -TEMPORARILY_DISABLED = """ void stm_register_thread_local(stm_thread_local_t *tl); void stm_unregister_thread_local(stm_thread_local_t *tl); void stm_start_transaction(stm_thread_local_t *tl, stm_jmpbuf_t *jmpbuf); +""") + + +TEMPORARILY_DISABLED = """ void stm_start_inevitable_transaction(stm_thread_local_t *tl); void stm_commit_transaction(void); void stm_abort_transaction(void); @@ -355,9 +360,6 @@ def stm_pop_root(): return lib.stm_pop_root() -def stm_start_transaction(): - lib.stm_start_transaction(ffi.cast("jmpbufptr_t*", -1)) - def stm_stop_transaction(): if lib._stm_stop_transaction(): raise Conflict() @@ -394,31 +396,34 @@ def stm_get_flags(o): return lib._stm_get_flags(o) +def _allocate_thread_local(): + tl = ffi.new("stm_thread_local_t *") + lib.stm_register_thread_local(tl) + return tl + class BaseTest(object): def setup_method(self, meth): lib.stm_setup() -## lib.stm_setup_thread() -## lib.stm_setup_thread() -## lib._stm_restore_local_state(0) -## self.current_thread = 0 + self.tls = [_allocate_thread_local(), _allocate_thread_local()] + self.current_thread = 0 + self.running_transaction = set() def teardown_method(self, meth): -## if self.current_thread != 1: -## self.switch(1) -## if lib._stm_is_in_transaction(): -## stm_stop_transaction() + for n in sorted(self.running_transaction): + self.switch(n) + self.abort_transaction() + for tl in self.tls: + lib.stm_unregister_thread_local(tl) + lib.stm_teardown() -## self.switch(0) -## if lib._stm_is_in_transaction(): -## stm_stop_transaction() - -## lib._stm_restore_local_state(1) -## lib._stm_teardown_thread() -## lib._stm_restore_local_state(0) -## lib._stm_teardown_thread() - lib.stm_teardown() + def start_transaction(self): + n = self.current_thread + assert n not in self.running_transaction + tl = self.tls[n] + lib.stm_start_transaction(tl, ffi.cast("stm_jmpbuf_t *", -1)) + self.running_transaction.add(n) def switch(self, thread_num): assert thread_num != self.current_thread @@ -428,5 +433,3 @@ lib._stm_restore_local_state(thread_num) if lib._stm_is_in_transaction(): stm_stop_safe_point() # can raise Conflict - - diff --git a/c7/test/test_basic.py b/c7/test/test_basic.py --- a/c7/test/test_basic.py +++ b/c7/test/test_basic.py @@ -7,7 +7,7 @@ pass def test_thread_local_allocations(self): - stm_start_transaction() + self.start_transaction() lp1 = stm_allocate(16) lp2 = stm_allocate(16) assert is_in_nursery(lp1) From noreply at buildbot.pypy.org Mon Feb 10 16:38:54 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 10 Feb 2014 16:38:54 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: Rename "region" -> "segment". Message-ID: <20140210153854.D79EB1C35D0@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r718:3b6d316ef539 Date: 2014-02-10 16:38 +0100 http://bitbucket.org/pypy/stmgc/changeset/3b6d316ef539/ Log: Rename "region" -> "segment". diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -7,7 +7,7 @@ #define NB_PAGES (1500*256) // 1500MB -#define NB_REGIONS 2 +#define NB_SEGMENTS 2 #define MAP_PAGES_FLAGS (MAP_SHARED | MAP_ANONYMOUS | MAP_NORESERVE) #define LARGE_OBJECT_WORDS 36 #define NB_NURSERY_PAGES 1024 // 4MB @@ -15,7 +15,7 @@ #define NURSERY_SECTION_SIZE (24*4096) -#define TOTAL_MEMORY (NB_PAGES * 4096UL * NB_REGIONS) +#define TOTAL_MEMORY (NB_PAGES * 4096UL * NB_SEGMENTS) #define READMARKER_END ((NB_PAGES * 4096UL) >> 4) #define FIRST_OBJECT_PAGE ((READMARKER_END + 4095) / 4096UL) #define FIRST_NURSERY_PAGE FIRST_OBJECT_PAGE @@ -42,30 +42,32 @@ }; -#define STM_PREGION ((stm_priv_region_info_t *)STM_REGION) +#define STM_PSEGMENT ((stm_priv_segment_info_t *)STM_SEGMENT) -typedef TLPREFIX struct stm_priv_region_info_s stm_priv_region_info_t; +typedef TLPREFIX struct stm_priv_segment_info_s stm_priv_segment_info_t; -struct stm_priv_region_info_s { - struct stm_region_info_s pub; +struct stm_priv_segment_info_s { + struct stm_segment_info_s pub; }; static char *stm_object_pages; static stm_thread_local_t *stm_thread_locals = NULL; -#define REAL_ADDRESS(region_base, src) ((region_base) + (uintptr_t)(src)) +#define REAL_ADDRESS(segment_base, src) ((segment_base) + (uintptr_t)(src)) -static inline char *get_region_base(long region_num) { - return stm_object_pages + region_num * (NB_PAGES * 4096UL); +static inline char *get_segment_base(long segment_num) { + return stm_object_pages + segment_num * (NB_PAGES * 4096UL); } -static inline struct stm_region_info_s *get_region(long region_num) { - return (struct stm_region_info_s *)REAL_ADDRESS( - get_region_base(region_num), STM_PREGION); +static inline +struct stm_segment_info_s *get_segment(long segment_num) { + return (struct stm_segment_info_s *)REAL_ADDRESS( + get_segment_base(segment_num), STM_PSEGMENT); } -static inline struct stm_priv_region_info_s *get_priv_region(long region_num) { - return (struct stm_priv_region_info_s *)REAL_ADDRESS( - get_region_base(region_num), STM_PREGION); +static inline +struct stm_priv_segment_info_s *get_priv_segment(long segment_num) { + return (struct stm_priv_segment_info_s *)REAL_ADDRESS( + get_segment_base(segment_num), STM_PSEGMENT); } diff --git a/c7/stm/misc.c b/c7/stm/misc.c --- a/c7/stm/misc.c +++ b/c7/stm/misc.c @@ -10,15 +10,15 @@ assert(FIRST_OBJECT_PAGE * 4096UL <= (uintptr_t)o && (uintptr_t)o < NB_PAGES * 4096UL); - return REAL_ADDRESS(STM_REGION->region_base, o); + return REAL_ADDRESS(STM_SEGMENT->segment_base, o); } -object_t *_stm_region_address(char *ptr) +object_t *_stm_segment_address(char *ptr) { if (ptr == NULL) return NULL; - uintptr_t res = ptr - STM_REGION->region_base; + uintptr_t res = ptr - STM_SEGMENT->segment_base; assert(FIRST_OBJECT_PAGE * 4096UL <= res && res < NB_PAGES * 4096UL); return (object_t*)res; @@ -27,7 +27,7 @@ bool _stm_was_read(object_t *obj) { return ((stm_read_marker_t *)(((uintptr_t)obj) >> 4))->rm == - STM_REGION->transaction_read_version; + STM_SEGMENT->transaction_read_version; } bool _stm_was_written(object_t *obj) diff --git a/c7/stm/pages.c b/c7/stm/pages.c --- a/c7/stm/pages.c +++ b/c7/stm/pages.c @@ -7,11 +7,11 @@ { /* call remap_file_pages() to make all pages in the range(pagenum, pagenum+count) refer to the same - physical range of pages from region 0 */ + physical range of pages from segment 0 */ long i; - for (i = 1; i < NB_REGIONS; i++) { - char *region_base = get_region_base(i); - int res = remap_file_pages(region_base + pagenum * 4096UL, + for (i = 1; i < NB_SEGMENTS; i++) { + char *segment_base = get_segment_base(i); + int res = remap_file_pages(segment_base + pagenum * 4096UL, count * 4096UL, 0, pagenum, 0); if (res != 0) { diff --git a/c7/stm/pages.h b/c7/stm/pages.h --- a/c7/stm/pages.h +++ b/c7/stm/pages.h @@ -1,10 +1,10 @@ enum { - /* The page is not in use. Assume that each region sees its own copy. */ + /* The page is not in use. Assume that each segment sees its own copy. */ FREE_PAGE=0, - /* The page is shared by all threads. Each region sees the same - physical page (the one that is within the region 0 mmap address). */ + /* The page is shared by all threads. Each segment sees the same + physical page (the one that is within the segment 0 mmap address). */ SHARED_PAGE, /* Page being in the process of privatization */ diff --git a/c7/stm/setup.c b/c7/stm/setup.c --- a/c7/stm/setup.c +++ b/c7/stm/setup.c @@ -13,9 +13,9 @@ #endif /* Check that some values are acceptable */ - assert(4096 <= ((uintptr_t)STM_REGION)); - assert((uintptr_t)STM_REGION == (uintptr_t)STM_PREGION); - assert(((uintptr_t)STM_PREGION) + sizeof(*STM_PREGION) <= 8192); + assert(4096 <= ((uintptr_t)STM_SEGMENT)); + assert((uintptr_t)STM_SEGMENT == (uintptr_t)STM_PSEGMENT); + assert(((uintptr_t)STM_PSEGMENT) + sizeof(*STM_PSEGMENT) <= 8192); assert(2 <= FIRST_READMARKER_PAGE); assert(FIRST_READMARKER_PAGE * 4096UL <= READMARKER_START); assert(READMARKER_START < READMARKER_END); @@ -31,27 +31,27 @@ } long i; - for (i = 0; i < NB_REGIONS; i++) { - char *region_base = get_region_base(i); + for (i = 0; i < NB_SEGMENTS; i++) { + char *segment_base = get_segment_base(i); - /* In each region, the first page is where TLPREFIX'ed + /* In each segment, the first page is where TLPREFIX'ed NULL accesses land. We mprotect it so that accesses fail. */ - mprotect(region_base, 4096, PROT_NONE); + mprotect(segment_base, 4096, PROT_NONE); /* Fill the TLS page (page 1) with 0xDD, for debugging */ - memset(REAL_ADDRESS(region_base, 4096), 0xDD, 4096); - /* Make a "hole" at STM_PREGION */ - memset(REAL_ADDRESS(region_base, STM_PREGION), 0, - sizeof(*STM_PREGION)); + memset(REAL_ADDRESS(segment_base, 4096), 0xDD, 4096); + /* Make a "hole" at STM_PSEGMENT */ + memset(REAL_ADDRESS(segment_base, STM_PSEGMENT), 0, + sizeof(*STM_PSEGMENT)); /* Pages in range(2, FIRST_READMARKER_PAGE) are never used */ if (FIRST_READMARKER_PAGE > 2) - mprotect(region_base + 8192, (FIRST_READMARKER_PAGE - 2) * 4096UL, + mprotect(segment_base + 8192, (FIRST_READMARKER_PAGE - 2) * 4096UL, PROT_NONE); - struct stm_priv_region_info_s *pr = get_priv_region(i); - pr->pub.region_num = i; - pr->pub.region_base = region_base; + struct stm_priv_segment_info_s *pr = get_priv_segment(i); + pr->pub.segment_num = i; + pr->pub.segment_base = segment_base; } /* Make the nursery pages shared. The other pages are @@ -85,7 +85,7 @@ stm_thread_locals->prev->next = tl; stm_thread_locals->prev = tl; } - tl->associated_region = get_region(0); + tl->associated_segment = get_segment(0); } void stm_unregister_thread_local(stm_thread_local_t *tl) diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -31,7 +31,7 @@ #define TLPREFIX __attribute__((address_space(256))) typedef TLPREFIX struct object_s object_t; -typedef TLPREFIX struct stm_region_info_s stm_region_info_t; +typedef TLPREFIX struct stm_segment_info_s stm_segment_info_t; typedef TLPREFIX struct stm_read_marker_s stm_read_marker_t; typedef TLPREFIX char stm_char; typedef void* stm_jmpbuf_t[5]; /* for use with __builtin_setjmp() */ @@ -40,22 +40,22 @@ uint8_t rm; }; -struct stm_region_info_s { +struct stm_segment_info_s { uint8_t transaction_read_version; - int region_num; - char *region_base; + int segment_num; + char *segment_base; stm_char *nursery_current; uintptr_t nursery_section_end; struct stm_thread_local_s *running_thread; stm_jmpbuf_t *jmpbuf_ptr; }; -#define STM_REGION ((stm_region_info_t *)4352) +#define STM_SEGMENT ((stm_segment_info_t *)4352) typedef struct stm_thread_local_s { /* every thread should handle the shadow stack itself */ object_t **shadowstack, **shadowstack_base; /* the next fields are handled automatically by the library */ - struct stm_region_info_s *associated_region; + struct stm_segment_info_s *associated_segment; struct stm_thread_local_s *prev, *next; } stm_thread_local_t; @@ -70,7 +70,7 @@ bool _stm_was_written(object_t *obj); bool _stm_in_nursery(object_t *obj); char *_stm_real_address(object_t *o); -object_t *_stm_region_address(char *ptr); +object_t *_stm_segment_address(char *ptr); #endif #define _STM_GCFLAG_WRITE_BARRIER 0x01 @@ -108,7 +108,7 @@ static inline void stm_read(object_t *obj) { ((stm_read_marker_t *)(((uintptr_t)obj) >> 4))->rm = - STM_REGION->transaction_read_version; + STM_SEGMENT->transaction_read_version; } static inline void stm_write(object_t *obj) @@ -128,10 +128,10 @@ OPT_ASSERT(size_rounded_up >= 16); OPT_ASSERT((size_rounded_up & 7) == 0); - stm_char *p = STM_REGION->nursery_current; + stm_char *p = STM_SEGMENT->nursery_current; stm_char *end = p + size_rounded_up; - STM_REGION->nursery_current = end; - if (UNLIKELY((uintptr_t)end > STM_REGION->nursery_section_end)) + STM_SEGMENT->nursery_current = end; + if (UNLIKELY((uintptr_t)end > STM_SEGMENT->nursery_section_end)) p = _stm_allocate_slowpath(size_rounded_up); return (object_t *)p; } @@ -155,7 +155,7 @@ }) static inline void stm_become_inevitable(char* msg) { - if (STM_REGION->jmpbuf_ptr != NULL) + if (STM_SEGMENT->jmpbuf_ptr != NULL) _stm_become_inevitable(msg); } diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -64,7 +64,7 @@ void stm_become_inevitable(char* msg); bool _stm_in_nursery(object_t *obj); char *_stm_real_address(object_t *obj); -object_t *_stm_region_address(char *ptr); +object_t *_stm_segment_address(char *ptr); void _stm_start_safe_point(uint8_t); void _stm_stop_safe_point(uint8_t); @@ -164,15 +164,15 @@ bool _checked_stm_write(object_t *object) { stm_jmpbuf_t here; - stm_region_info_t *region = STM_REGION; + stm_segment_info_t *segment = STM_SEGMENT; if (__builtin_setjmp(here) == 0) { // returned directly - assert(region->jmpbuf_ptr == (stm_jmpbuf_t *)-1); - region->jmpbuf_ptr = &here; + assert(segment->jmpbuf_ptr == (stm_jmpbuf_t *)-1); + segment->jmpbuf_ptr = &here; stm_write(object); - region->jmpbuf_ptr = (stm_jmpbuf_t *)-1; + segment->jmpbuf_ptr = (stm_jmpbuf_t *)-1; return 0; } - region->jmpbuf_ptr = (stm_jmpbuf_t *)-1; + segment->jmpbuf_ptr = (stm_jmpbuf_t *)-1; return 1; } @@ -338,8 +338,8 @@ def stm_get_real_address(obj): return lib._stm_real_address(ffi.cast('object_t*', obj)) -def stm_get_region_address(ptr): - return int(ffi.cast('uintptr_t', lib._stm_region_address(ptr))) +def stm_get_segment_address(ptr): + return int(ffi.cast('uintptr_t', lib._stm_segment_address(ptr))) def stm_read(o): lib.stm_read(o) From noreply at buildbot.pypy.org Mon Feb 10 18:00:51 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 10 Feb 2014 18:00:51 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: in-progress Message-ID: <20140210170051.1FAFF1D2600@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r719:b107802731ce Date: 2014-02-10 18:00 +0100 http://bitbucket.org/pypy/stmgc/changeset/b107802731ce/ Log: in-progress diff --git a/c7/pages.c b/c7/pages.c deleted file mode 100644 --- a/c7/pages.c +++ /dev/null @@ -1,157 +0,0 @@ -#define _GNU_SOURCE -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - - -#include "core.h" -#include "list.h" -#include "pages.h" -#include "pagecopy.h" - - -#if defined(__i386__) || defined(__x86_64__) -# define HAVE_FULL_EXCHANGE_INSN -#endif - - -uintptr_t index_page_never_used; -uint8_t flag_page_private[NB_PAGES]; - -volatile uint8_t list_lock = 0; -struct stm_list_s *single_page_list; - - -void _stm_reset_pages() -{ - assert(!list_lock); - if (!single_page_list) - single_page_list = stm_list_create(); - else - stm_list_clear(single_page_list); - - index_page_never_used = FIRST_AFTER_NURSERY_PAGE; - - memset(flag_page_private, 0, sizeof(flag_page_private)); -} - -uint8_t stm_get_page_flag(int pagenum) -{ - return flag_page_private[pagenum]; -} - -void stm_set_page_flag(int pagenum, uint8_t flag) -{ - assert(flag_page_private[pagenum] != flag); - flag_page_private[pagenum] = flag; -} - - -void stm_pages_privatize(uintptr_t pagenum) -{ - if (flag_page_private[pagenum] == PRIVATE_PAGE) - return; - -#ifdef HAVE_FULL_EXCHANGE_INSN - /* use __sync_lock_test_and_set() as a cheaper alternative to - __sync_bool_compare_and_swap(). */ - int previous = __sync_lock_test_and_set(&flag_page_private[pagenum], - REMAPPING_PAGE); - if (previous == PRIVATE_PAGE) { - flag_page_private[pagenum] = PRIVATE_PAGE; - return; - } - bool was_shared = (previous == SHARED_PAGE); -#else - bool was_shared = __sync_bool_compare_and_swap(&flag_page_private[pagenum], - SHARED_PAGE, REMAPPING_PAGE); -#endif - if (!was_shared) { - while (1) { - uint8_t state = ((uint8_t volatile *)flag_page_private)[pagenum]; - if (state != REMAPPING_PAGE) { - assert(state == PRIVATE_PAGE); - break; - } - spin_loop(); - } - return; - } - - ssize_t pgoff1 = pagenum; - ssize_t pgoff2 = pagenum + NB_PAGES; - ssize_t localpgoff = pgoff1 + NB_PAGES * _STM_TL->thread_num; - ssize_t otherpgoff = pgoff1 + NB_PAGES * (1 - _STM_TL->thread_num); - - void *localpg = object_pages + localpgoff * 4096UL; - void *otherpg = object_pages + otherpgoff * 4096UL; - - // XXX should not use pgoff2, but instead the next unused page in - // thread 2, so that after major GCs the next dirty pages are the - // same as the old ones - int res = remap_file_pages(localpg, 4096, 0, pgoff2, 0); - if (res < 0) { - perror("remap_file_pages"); - abort(); - } - pagecopy(localpg, otherpg); - write_fence(); - assert(flag_page_private[pagenum] == REMAPPING_PAGE); - flag_page_private[pagenum] = PRIVATE_PAGE; -} - - - -uintptr_t stm_pages_reserve(int num) -{ - /* grab free, possibly uninitialized pages */ - if (num == 1 && !stm_list_is_empty(single_page_list)) { - uint8_t previous; - while ((previous = __sync_lock_test_and_set(&list_lock, 1))) - spin_loop(); - - if (!stm_list_is_empty(single_page_list)) { - uintptr_t res = (uintptr_t)stm_list_pop_item(single_page_list); - list_lock = 0; - return res; - } - - list_lock = 0; - } - - /* Return the index'th object page, which is so far never used. */ - uintptr_t index = __sync_fetch_and_add(&index_page_never_used, num); - - int i; - for (i = 0; i < num; i++) { - assert(flag_page_private[index+i] == SHARED_PAGE); - } - - if (index + num >= NB_PAGES) { - fprintf(stderr, "Out of mmap'ed memory!\n"); - abort(); - } - return index; -} - -void stm_pages_unreserve(uintptr_t pagenum) -{ - uint8_t previous; - while ((previous = __sync_lock_test_and_set(&list_lock, 1))) - spin_loop(); - - flag_page_private[pagenum] = SHARED_PAGE; - LIST_APPEND(single_page_list, (object_t*)pagenum); - - list_lock = 0; -} - - - diff --git a/c7/pages.h b/c7/pages.h deleted file mode 100644 --- a/c7/pages.h +++ /dev/null @@ -1,21 +0,0 @@ -enum { - /* unprivatized page seen by all threads */ - SHARED_PAGE=0, - - /* page being in the process of privatization */ - REMAPPING_PAGE, - - /* page private for each thread */ - PRIVATE_PAGE, -}; /* flag_page_private */ - - -void stm_pages_privatize(uintptr_t pagenum); -uintptr_t stm_pages_reserve(int num); -uint8_t stm_get_page_flag(int pagenum); -void stm_set_page_flag(int pagenum, uint8_t flag); -void _stm_reset_pages(void); -void stm_pages_unreserve(uintptr_t num); - - - diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -8,9 +8,48 @@ abort(); } +static void reset_transaction_read_version(void) +{ + /* force-reset all read markers to 0 */ + + /* XXX measure the time taken by this madvise() and the following + zeroing of pages done lazily by the kernel; compare it with using + 16-bit read_versions. + */ + /* XXX try to use madvise() on smaller ranges of memory. In my + measures, we could gain a factor 2 --- not really more, even if + the range of virtual addresses below is very large, as long as it + is already mostly non-reserved pages. (The following call keeps + them non-reserved; apparently the kernel just skips them very + quickly.) + */ + char *readmarkers = REAL_ADDRESS(STM_SEGMENT->segment_base, + FIRST_READMARKER_PAGE * 4096UL); + if (madvise(readmarkers, NB_READMARKER_PAGES * 4096UL, + MADV_DONTNEED) < 0) { + perror("madvise"); + abort(); + } + STM_SEGMENT->transaction_read_version = 1; +} + void stm_start_transaction(stm_thread_local_t *tl, stm_jmpbuf_t *jmpbuf) { /* GS invalid before this point! */ - _stm_stop_safe_point(LOCK_COLLECT|THREAD_YIELD); - + acquire_thread_segment(tl); + + STM_SEGMENT->jmpbuf_ptr = jmpbuf; + + uint8_t old_rv = STM_SEGMENT->transaction_read_version; + STM_SEGMENT->transaction_read_version = old_rv + 1; + if (UNLIKELY(old_rv == 0xff)) + reset_transaction_read_version(); } + + +void stm_commit_transaction(void) +{ + stm_thread_local_t *tl = STM_SEGMENT->running_thread; + release_thread_segment(tl); + abort(); +} diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -4,6 +4,7 @@ #include #include #include +#include #define NB_PAGES (1500*256) // 1500MB @@ -12,16 +13,14 @@ #define LARGE_OBJECT_WORDS 36 #define NB_NURSERY_PAGES 1024 // 4MB -#define NURSERY_SECTION_SIZE (24*4096) - - #define TOTAL_MEMORY (NB_PAGES * 4096UL * NB_SEGMENTS) #define READMARKER_END ((NB_PAGES * 4096UL) >> 4) #define FIRST_OBJECT_PAGE ((READMARKER_END + 4095) / 4096UL) #define FIRST_NURSERY_PAGE FIRST_OBJECT_PAGE +#define END_NURSERY_PAGE (FIRST_NURSERY_PAGE + NB_NURSERY_PAGES) #define READMARKER_START ((FIRST_OBJECT_PAGE * 4096UL) >> 4) #define FIRST_READMARKER_PAGE (READMARKER_START / 4096UL) -#define END_NURSERY_PAGE (FIRST_NURSERY_PAGE + NB_NURSERY_PAGES) +#define NB_READMARKER_PAGES (FIRST_OBJECT_PAGE - FIRST_READMARKER_PAGE) enum { diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -3,12 +3,6 @@ #endif -stm_char *_stm_allocate_slowpath(ssize_t size_rounded_up) -{ - abort(); -} - - object_t *stm_allocate_prebuilt(ssize_t size_rounded_up) { abort(); diff --git a/c7/stm/misc.c b/c7/stm/misc.c --- a/c7/stm/misc.c +++ b/c7/stm/misc.c @@ -24,6 +24,17 @@ return (object_t*)res; } +struct stm_priv_segment_info_s *_stm_segment(void) +{ + char *info = REAL_ADDRESS(STM_SEGMENT->segment_base, STM_PSEGMENT); + return (struct stm_priv_segment_info_s *)info; +} + +stm_thread_local_t *_stm_thread(void) +{ + return STM_SEGMENT->running_thread; +} + bool _stm_was_read(object_t *obj) { return ((stm_read_marker_t *)(((uintptr_t)obj) >> 4))->rm == diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c new file mode 100644 --- /dev/null +++ b/c7/stm/nursery.c @@ -0,0 +1,63 @@ +#ifndef _STM_CORE_H_ +# error "must be compiled via stmgc.c" +#endif + +/************************************************************/ + +#define NURSERY_SIZE (NB_NURSERY_PAGES * 4096UL) + +/* an object larger than LARGE_OBJECT will never be allocated in + the nursery. */ +#define LARGE_OBJECT (65*1024) + +/* the nursery is divided in "sections" this big. Each section is + allocated to a single running thread. */ +#define NURSERY_SECTION_SIZE (128*1024) + +/* if objects are larger than this limit but smaller than LARGE_OBJECT, + then they might be allocted outside sections but still in the nursery. */ +#define MEDIUM_OBJECT (9*1024) + +/************************************************************/ + +static union { + struct { + uint64_t used; /* number of bytes from the nursery used so far */ + }; + char reserved[64]; +} nursery_ctl __attribute__((aligned(64))); + +/************************************************************/ + +static void setup_nursery(void) +{ + assert(MEDIUM_OBJECT < LARGE_OBJECT); + assert(LARGE_OBJECT < NURSERY_SECTION_SIZE); + nursery_ctl.used = 0; +} + + +static stm_char *allocate_from_nursery(uint64_t bytes) +{ + /* thread-safe; allocate a chunk of memory from the nursery */ + uint64_t p = __sync_fetch_and_add(&nursery_ctl.used, bytes); + if (p + bytes > NURSERY_SIZE) { + //major_collection(); + abort(); + } + return (stm_char *)(FIRST_NURSERY_PAGE * 4096UL + p); +} + + +stm_char *_stm_allocate_slowpath(ssize_t size_rounded_up) +{ + if (size_rounded_up < MEDIUM_OBJECT) { + /* This is a small object. The current section is simply full. + Allocate the next section. */ + stm_char *p = allocate_from_nursery(NURSERY_SECTION_SIZE); + STM_SEGMENT->nursery_current = p + size_rounded_up; + STM_SEGMENT->nursery_section_end = (uintptr_t)p + NURSERY_SECTION_SIZE; + return p; + } + abort(); +} diff --git a/c7/stm/pages.c b/c7/stm/pages.c --- a/c7/stm/pages.c +++ b/c7/stm/pages.c @@ -19,9 +19,8 @@ abort(); } } - for (; count > 0; count--) { - flag_page_private[pagenum++] = SHARED_PAGE; - } + for (i = 0; i < count; i++) + flag_page_private[pagenum + i] = SHARED_PAGE; } static void _pages_privatize(uintptr_t pagenum, uintptr_t count) diff --git a/c7/stm/setup.c b/c7/stm/setup.c --- a/c7/stm/setup.c +++ b/c7/stm/setup.c @@ -59,6 +59,9 @@ long time for each page. */ pages_initialize_shared(FIRST_NURSERY_PAGE, NB_NURSERY_PAGES); + setup_sync(); + setup_nursery(); + #if 0 stm_largemalloc_init(heap, HEAP_PAGES * 4096UL); #endif @@ -72,6 +75,8 @@ stm_object_pages = NULL; memset(flag_page_private, 0, sizeof(flag_page_private)); + + teardown_sync(); } void stm_register_thread_local(stm_thread_local_t *tl) @@ -85,7 +90,7 @@ stm_thread_locals->prev->next = tl; stm_thread_locals->prev = tl; } - tl->associated_segment = get_segment(0); + tl->associated_segment_num = -1; } void stm_unregister_thread_local(stm_thread_local_t *tl) @@ -99,4 +104,7 @@ } tl->prev->next = tl->next; tl->next->prev = tl->prev; + tl->prev = NULL; + tl->next = NULL; + tl->associated_segment_num = -1; } diff --git a/c7/stm/sync.c b/c7/stm/sync.c new file mode 100644 --- /dev/null +++ b/c7/stm/sync.c @@ -0,0 +1,82 @@ +#include +#include +#include +#include + + +static union { + struct { + sem_t semaphore; + uint8_t in_use[NB_SEGMENTS]; /* 1 if running a pthread */ + }; + char reserved[64]; +} segments_ctl __attribute__((aligned(64))); + + +static void setup_sync(void) +{ + memset(segments_ctl.in_use, 0, sizeof(segments_ctl.in_use)); + if (sem_init(&segments_ctl.semaphore, 0, NB_SEGMENTS) != 0) { + perror("sem_init"); + abort(); + } +} + +static void teardown_sync(void) +{ + if (sem_destroy(&segments_ctl.semaphore) != 0) { + perror("sem_destroy"); + abort(); + } +} + +static void set_gs_register(char *value) +{ + if (syscall(SYS_arch_prctl, ARCH_SET_GS, (uint64_t)value) != 0) { + perror("syscall(arch_prctl, ARCH_SET_GS)"); + abort(); + } +} + +static void acquire_thread_segment(stm_thread_local_t *tl) +{ + /* This function acquires a segment for the currently running thread, + and set up the GS register if it changed. */ + while (sem_wait(&segments_ctl.semaphore) != 0) { + if (errno != EINTR) { + perror("sem_wait"); + abort(); + } + } + int num = tl->associated_segment_num; + if (num >= 0) { + if (__sync_lock_test_and_set(&segments_ctl.in_use[num], 1) == 0) { + /* fast-path: reacquired the same segment number than the one + we had. The value stored in GS is still valid. */ + goto exit; + } + } + /* Look for the next free segment. There must be one, because we + acquired the semaphore above. */ + while (1) { + num = (num + 1) % NB_SEGMENTS; + if (__sync_lock_test_and_set(&segments_ctl.in_use[num], 1) == 0) + break; + } + tl->associated_segment_num = num; + set_gs_register(get_segment_base(num)); + + exit: + assert(STM_SEGMENT->running_thread == NULL); + STM_SEGMENT->running_thread = tl; +} + +static void release_thread_segment(stm_thread_local_t *tl) +{ + assert(STM_SEGMENT->running_thread == tl); + STM_SEGMENT->running_thread = NULL; + + int num = tl->associated_segment_num; + __sync_lock_release(&segments_ctl.in_use[num]); + sem_post(&segments_ctl.semaphore); +} diff --git a/c7/stm/sync.h b/c7/stm/sync.h new file mode 100644 --- /dev/null +++ b/c7/stm/sync.h @@ -0,0 +1,8 @@ + + +static void setup_sync(void); +static void teardown_sync(void); + +/* acquire and release one of the segments for running the given thread */ +static void acquire_thread_segment(stm_thread_local_t *tl); +static void release_thread_segment(stm_thread_local_t *tl); diff --git a/c7/stmgc.c b/c7/stmgc.c --- a/c7/stmgc.c +++ b/c7/stmgc.c @@ -2,9 +2,12 @@ #include "stmgc.h" #include "stm/core.h" #include "stm/pages.h" +#include "stm/sync.h" #include "stm/misc.c" -#include "stm/core.c" #include "stm/pages.c" #include "stm/gcpage.c" +#include "stm/nursery.c" +#include "stm/sync.c" #include "stm/setup.c" +#include "stm/core.c" diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -55,7 +55,7 @@ /* every thread should handle the shadow stack itself */ object_t **shadowstack, **shadowstack_base; /* the next fields are handled automatically by the library */ - struct stm_segment_info_s *associated_segment; + int associated_segment_num; struct stm_thread_local_s *prev, *next; } stm_thread_local_t; From noreply at buildbot.pypy.org Mon Feb 10 18:12:01 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 10 Feb 2014 18:12:01 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: in-progress Message-ID: <20140210171201.6E9391C35D0@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r720:1da8e9545d83 Date: 2014-02-10 18:11 +0100 http://bitbucket.org/pypy/stmgc/changeset/1da8e9545d83/ Log: in-progress diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -4,6 +4,7 @@ /************************************************************/ +#define NURSERY_START (FIRST_NURSERY_PAGE * 4096UL) #define NURSERY_SIZE (NB_NURSERY_PAGES * 4096UL) /* an object larger than LARGE_OBJECT will never be allocated in @@ -36,6 +37,12 @@ nursery_ctl.used = 0; } +bool _stm_in_nursery(object_t *obj) +{ + uint64_t p = (uint64_t)obj; + return (p - NURSERY_START) < NURSERY_SIZE; +} + static stm_char *allocate_from_nursery(uint64_t bytes) { @@ -45,7 +52,7 @@ //major_collection(); abort(); } - return (stm_char *)(FIRST_NURSERY_PAGE * 4096UL + p); + return (stm_char *)(NURSERY_START + p); } @@ -53,8 +60,10 @@ { if (size_rounded_up < MEDIUM_OBJECT) { /* This is a small object. The current section is simply full. - Allocate the next section. */ + Allocate the next section and initialize it with zeroes. */ stm_char *p = allocate_from_nursery(NURSERY_SECTION_SIZE); + memset(REAL_ADDRESS(STM_SEGMENT->segment_base, p), 0, + NURSERY_SECTION_SIZE); STM_SEGMENT->nursery_current = p + size_rounded_up; STM_SEGMENT->nursery_section_end = (uintptr_t)p + NURSERY_SECTION_SIZE; return p; diff --git a/c7/stm/sync.c b/c7/stm/sync.c --- a/c7/stm/sync.c +++ b/c7/stm/sync.c @@ -80,3 +80,8 @@ __sync_lock_release(&segments_ctl.in_use[num]); sem_post(&segments_ctl.semaphore); } + +bool _stm_in_transaction(void) +{ + return STM_SEGMENT->running_thread != NULL; +} diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -69,6 +69,7 @@ bool _stm_was_read(object_t *obj); bool _stm_was_written(object_t *obj); bool _stm_in_nursery(object_t *obj); +bool _stm_in_transaction(void); char *_stm_real_address(object_t *o); object_t *_stm_segment_address(char *ptr); #endif diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -49,11 +49,18 @@ bool _checked_stm_write(object_t *obj); bool _stm_was_read(object_t *obj); bool _stm_was_written(object_t *obj); +bool _stm_in_nursery(object_t *obj); +char *_stm_real_address(object_t *obj); +object_t *_stm_segment_address(char *ptr); +bool _stm_in_transaction(void); void stm_register_thread_local(stm_thread_local_t *tl); void stm_unregister_thread_local(stm_thread_local_t *tl); void stm_start_transaction(stm_thread_local_t *tl, stm_jmpbuf_t *jmpbuf); + +void _set_type_id(object_t *obj, uint32_t h); +uint32_t _get_type_id(object_t *obj); """) @@ -62,18 +69,11 @@ void stm_commit_transaction(void); void stm_abort_transaction(void); void stm_become_inevitable(char* msg); -bool _stm_in_nursery(object_t *obj); -char *_stm_real_address(object_t *obj); -object_t *_stm_segment_address(char *ptr); void _stm_start_safe_point(uint8_t); void _stm_stop_safe_point(uint8_t); bool _stm_check_stop_safe_point(void); -void _set_type_id(object_t *obj, uint32_t h); -uint32_t _get_type_id(object_t *obj); -bool _stm_is_in_transaction(void); - void stm_push_root(object_t *obj); object_t *stm_pop_root(void); @@ -426,10 +426,14 @@ self.running_transaction.add(n) def switch(self, thread_num): + tr = lib._stm_in_transaction() + assert tr == (self.current_thread in self.running_transaction) assert thread_num != self.current_thread + if tr: + stm_start_safe_point() self.current_thread = thread_num - if lib._stm_is_in_transaction(): - stm_start_safe_point() lib._stm_restore_local_state(thread_num) - if lib._stm_is_in_transaction(): + tr = lib._stm_in_transaction() + assert tr == (self.current_thread in self.running_transaction) + if tr: stm_stop_safe_point() # can raise Conflict From noreply at buildbot.pypy.org Mon Feb 10 19:10:19 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 10 Feb 2014 19:10:19 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: Enough to pass the first test. Message-ID: <20140210181019.EEEC31C35D2@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r721:0aa39c737d7f Date: 2014-02-10 19:10 +0100 http://bitbucket.org/pypy/stmgc/changeset/0aa39c737d7f/ Log: Enough to pass the first test. diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -70,3 +70,5 @@ return (struct stm_priv_segment_info_s *)REAL_ADDRESS( get_segment_base(segment_num), STM_PSEGMENT); } + +static bool _is_tl_registered(stm_thread_local_t *tl); diff --git a/c7/stm/setup.c b/c7/stm/setup.c --- a/c7/stm/setup.c +++ b/c7/stm/setup.c @@ -90,7 +90,7 @@ stm_thread_locals->prev->next = tl; stm_thread_locals->prev = tl; } - tl->associated_segment_num = -1; + tl->associated_segment_num = NB_SEGMENTS; } void stm_unregister_thread_local(stm_thread_local_t *tl) @@ -106,5 +106,9 @@ tl->next->prev = tl->prev; tl->prev = NULL; tl->next = NULL; - tl->associated_segment_num = -1; } + +static bool _is_tl_registered(stm_thread_local_t *tl) +{ + return tl->next != NULL; +} diff --git a/c7/stm/sync.c b/c7/stm/sync.c --- a/c7/stm/sync.c +++ b/c7/stm/sync.c @@ -7,7 +7,7 @@ static union { struct { sem_t semaphore; - uint8_t in_use[NB_SEGMENTS]; /* 1 if running a pthread */ + uint8_t in_use[NB_SEGMENTS + 1]; /* 1 if running a pthread */ }; char reserved[64]; } segments_ctl __attribute__((aligned(64))); @@ -16,6 +16,7 @@ static void setup_sync(void) { memset(segments_ctl.in_use, 0, sizeof(segments_ctl.in_use)); + segments_ctl.in_use[NB_SEGMENTS] = 0xff; if (sem_init(&segments_ctl.semaphore, 0, NB_SEGMENTS) != 0) { perror("sem_init"); abort(); @@ -48,13 +49,12 @@ abort(); } } + assert(_is_tl_registered(tl)); int num = tl->associated_segment_num; - if (num >= 0) { - if (__sync_lock_test_and_set(&segments_ctl.in_use[num], 1) == 0) { - /* fast-path: reacquired the same segment number than the one - we had. The value stored in GS is still valid. */ - goto exit; - } + if (__sync_lock_test_and_set(&segments_ctl.in_use[num], 1) == 0) { + /* fast-path: reacquired the same segment number than the one + we had before. The value stored in GS is still valid. */ + goto exit; } /* Look for the next free segment. There must be one, because we acquired the semaphore above. */ @@ -85,3 +85,21 @@ { return STM_SEGMENT->running_thread != NULL; } + +void _stm_test_switch(stm_thread_local_t *tl) +{ + int num = tl->associated_segment_num; + assert(segments_ctl.in_use[num] == 1); + set_gs_register(get_segment_base(num)); + assert(STM_SEGMENT->running_thread == tl); +} + +void stm_start_safe_point(int flags) +{ + //... +} + +void stm_stop_safe_point(int flags) +{ + //... +} diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -72,6 +72,7 @@ bool _stm_in_transaction(void); char *_stm_real_address(object_t *o); object_t *_stm_segment_address(char *ptr); +void _stm_test_switch(stm_thread_local_t *tl); #endif #define _STM_GCFLAG_WRITE_BARRIER 0x01 @@ -160,6 +161,8 @@ _stm_become_inevitable(msg); } +void stm_start_safe_point(int flags); +void stm_stop_safe_point(int flags); /* ==================== END ==================== */ diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -53,6 +53,7 @@ char *_stm_real_address(object_t *obj); object_t *_stm_segment_address(char *ptr); bool _stm_in_transaction(void); +void _stm_test_switch(stm_thread_local_t *tl); void stm_register_thread_local(stm_thread_local_t *tl); void stm_unregister_thread_local(stm_thread_local_t *tl); @@ -61,6 +62,13 @@ void _set_type_id(object_t *obj, uint32_t h); uint32_t _get_type_id(object_t *obj); + +#define LOCK_COLLECT ... +#define LOCK_EXCLUSIVE ... +#define THREAD_YIELD ... + +void stm_start_safe_point(int); +bool _check_stop_safe_point(int); """) @@ -70,10 +78,6 @@ void stm_abort_transaction(void); void stm_become_inevitable(char* msg); -void _stm_start_safe_point(uint8_t); -void _stm_stop_safe_point(uint8_t); -bool _stm_check_stop_safe_point(void); - void stm_push_root(object_t *obj); object_t *stm_pop_root(void); @@ -102,13 +106,6 @@ GCFLAG_MOVED = 4, }; -enum { - LOCK_COLLECT = 1, - LOCK_EXCLUSIVE = 2, - THREAD_YIELD = 4, -}; - - void stm_largemalloc_init(char *data_start, size_t data_size); int stm_largemalloc_resize_arena(size_t new_size); @@ -141,6 +138,12 @@ typedef TLPREFIX struct myobj_s myobj_t; #define SIZEOF_MYOBJ sizeof(struct myobj_s) +enum { + LOCK_COLLECT = 1, + LOCK_EXCLUSIVE = 2, + THREAD_YIELD = 4, +}; + uint8_t _stm_get_flags(object_t *obj) { return obj->stm_flags; @@ -190,21 +193,23 @@ _stm_dbg_get_tl(tn)->jmpbufptr = (jmpbufptr_t*)-1; return 1; } +#endif -bool _stm_check_stop_safe_point(void) { - jmpbufptr_t here; - int tn = _STM_TL->thread_num; +bool _check_stop_safe_point(int flags) { + stm_jmpbuf_t here; + stm_segment_info_t *segment = STM_SEGMENT; if (__builtin_setjmp(here) == 0) { // returned directly - assert(_STM_TL->jmpbufptr == (jmpbufptr_t*)-1); - _STM_TL->jmpbufptr = &here; - _stm_stop_safe_point(LOCK_COLLECT); - _STM_TL->jmpbufptr = (jmpbufptr_t*)-1; + assert(segment->jmpbuf_ptr == (stm_jmpbuf_t *)-1); + segment->jmpbuf_ptr = &here; + stm_stop_safe_point(flags); + segment->jmpbuf_ptr = (stm_jmpbuf_t *)-1; return 0; } - _stm_dbg_get_tl(tn)->jmpbufptr = (jmpbufptr_t*)-1; + segment->jmpbuf_ptr = (stm_jmpbuf_t *)-1; return 1; } +#if 0 bool _stm_check_abort_transaction(void) { jmpbufptr_t here; int tn = _STM_TL->thread_num; @@ -369,10 +374,10 @@ def stm_start_safe_point(): - lib._stm_start_safe_point(lib.LOCK_COLLECT) + lib.stm_start_safe_point(lib.LOCK_COLLECT) def stm_stop_safe_point(): - if lib._stm_check_stop_safe_point(): + if lib._check_stop_safe_point(lib.LOCK_COLLECT): raise Conflict() def stm_become_inevitable(): @@ -412,7 +417,8 @@ def teardown_method(self, meth): for n in sorted(self.running_transaction): - self.switch(n) + if self.current_thread != n: + self.switch(n) self.abort_transaction() for tl in self.tls: lib.stm_unregister_thread_local(tl) @@ -432,8 +438,7 @@ if tr: stm_start_safe_point() self.current_thread = thread_num - lib._stm_restore_local_state(thread_num) - tr = lib._stm_in_transaction() - assert tr == (self.current_thread in self.running_transaction) - if tr: + if thread_num in self.running_transaction: + tl = self.tls[thread_num] + lib._stm_test_switch(tl) stm_stop_safe_point() # can raise Conflict diff --git a/c7/test/test_basic.py b/c7/test/test_basic.py --- a/c7/test/test_basic.py +++ b/c7/test/test_basic.py @@ -18,7 +18,7 @@ assert p3 - stm_get_real_address(lp2) == 16 # self.switch(1) - stm_start_transaction() + self.start_transaction() lp1s = stm_allocate(16) assert is_in_nursery(lp1s) assert abs(stm_get_real_address(lp1s) - p3) >= 4000 From noreply at buildbot.pypy.org Mon Feb 10 19:50:16 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 10 Feb 2014 19:50:16 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: Align nursery sections Message-ID: <20140210185016.609E41C35DE@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r722:f4d87872cb45 Date: 2014-02-10 19:46 +0100 http://bitbucket.org/pypy/stmgc/changeset/f4d87872cb45/ Log: Align nursery sections diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -19,6 +19,9 @@ then they might be allocted outside sections but still in the nursery. */ #define MEDIUM_OBJECT (9*1024) +/* size in bytes of the alignment of any section requested */ +#define NURSERY_ALIGNMENT 64 + /************************************************************/ static union { @@ -44,9 +47,13 @@ } +#define NURSERY_ALIGN(bytes) \ + (((bytes) + NURSERY_ALIGNMENT - 1) & ~(NURSERY_ALIGNMENT - 1)) + static stm_char *allocate_from_nursery(uint64_t bytes) { /* thread-safe; allocate a chunk of memory from the nursery */ + bytes = NURSERY_ALIGN(bytes); uint64_t p = __sync_fetch_and_add(&nursery_ctl.used, bytes); if (p + bytes > NURSERY_SIZE) { //major_collection(); From noreply at buildbot.pypy.org Mon Feb 10 19:50:17 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 10 Feb 2014 19:50:17 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: in-progress Message-ID: <20140210185017.7A8A81C35DE@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r723:77132519fe37 Date: 2014-02-10 19:47 +0100 http://bitbucket.org/pypy/stmgc/changeset/77132519fe37/ Log: in-progress diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -53,3 +53,14 @@ release_thread_segment(tl); abort(); } + +void stm_abort_transaction(void) +{ + stm_thread_local_t *tl = STM_SEGMENT->running_thread; + stm_jmpbuf_t *jmpbuf_ptr = STM_SEGMENT->jmpbuf_ptr; + release_thread_segment(tl); + + assert(jmpbuf_ptr != NULL); + assert(jmpbuf_ptr != (stm_jmpbuf_t *)-1); /* for tests only */ + __builtin_longjmp(*jmpbuf_ptr, 1); +} diff --git a/c7/stm/sync.c b/c7/stm/sync.c --- a/c7/stm/sync.c +++ b/c7/stm/sync.c @@ -81,16 +81,19 @@ sem_post(&segments_ctl.semaphore); } -bool _stm_in_transaction(void) +bool _stm_in_transaction(stm_thread_local_t *tl) { - return STM_SEGMENT->running_thread != NULL; + int num = tl->associated_segment_num; + if (num < NB_SEGMENTS) + return get_segment(num)->running_thread == tl; + else + return false; } void _stm_test_switch(stm_thread_local_t *tl) { - int num = tl->associated_segment_num; - assert(segments_ctl.in_use[num] == 1); - set_gs_register(get_segment_base(num)); + assert(_stm_in_transaction(tl)); + set_gs_register(get_segment_base(tl->associated_segment_num)); assert(STM_SEGMENT->running_thread == tl); } diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -69,7 +69,7 @@ bool _stm_was_read(object_t *obj); bool _stm_was_written(object_t *obj); bool _stm_in_nursery(object_t *obj); -bool _stm_in_transaction(void); +bool _stm_in_transaction(stm_thread_local_t *tl); char *_stm_real_address(object_t *o); object_t *_stm_segment_address(char *ptr); void _stm_test_switch(stm_thread_local_t *tl); @@ -148,7 +148,7 @@ void stm_start_transaction(stm_thread_local_t *tl, stm_jmpbuf_t *jmpbuf); void stm_start_inevitable_transaction(stm_thread_local_t *tl); void stm_commit_transaction(void); -void stm_abort_transaction(void); +void stm_abort_transaction(void) __attribute__((noreturn)); #define STM_START_TRANSACTION(tl, jmpbuf) ({ \ int _restart = __builtin_setjmp(jmpbuf); \ diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -52,13 +52,15 @@ bool _stm_in_nursery(object_t *obj); char *_stm_real_address(object_t *obj); object_t *_stm_segment_address(char *ptr); -bool _stm_in_transaction(void); +bool _stm_in_transaction(stm_thread_local_t *tl); void _stm_test_switch(stm_thread_local_t *tl); void stm_register_thread_local(stm_thread_local_t *tl); void stm_unregister_thread_local(stm_thread_local_t *tl); void stm_start_transaction(stm_thread_local_t *tl, stm_jmpbuf_t *jmpbuf); +void stm_commit_transaction(void); +bool _check_abort_transaction(void); void _set_type_id(object_t *obj, uint32_t h); uint32_t _get_type_id(object_t *obj); @@ -74,8 +76,6 @@ TEMPORARILY_DISABLED = """ void stm_start_inevitable_transaction(stm_thread_local_t *tl); -void stm_commit_transaction(void); -void stm_abort_transaction(void); void stm_become_inevitable(char* msg); void stm_push_root(object_t *obj); @@ -86,8 +86,6 @@ void _stm_minor_collect(); -bool _stm_check_abort_transaction(void); - void *memset(void *s, int c, size_t n); extern size_t stmcb_size(struct object_s *); extern void stmcb_trace(struct object_s *, void (object_t **)); @@ -126,6 +124,7 @@ lib = ffi.verify(''' +#include #include #include @@ -209,21 +208,19 @@ return 1; } -#if 0 -bool _stm_check_abort_transaction(void) { - jmpbufptr_t here; - int tn = _STM_TL->thread_num; +int _check_abort_transaction(void) { + stm_jmpbuf_t here; + stm_segment_info_t *segment = STM_SEGMENT; if (__builtin_setjmp(here) == 0) { // returned directly - assert(_STM_TL->jmpbufptr == (jmpbufptr_t*)-1); - _STM_TL->jmpbufptr = &here; + assert(segment->jmpbuf_ptr == (stm_jmpbuf_t *)-1); + segment->jmpbuf_ptr = &here; stm_abort_transaction(); - _stm_dbg_get_tl(tn)->jmpbufptr = (jmpbufptr_t*)-1; - return 0; + segment->jmpbuf_ptr = (stm_jmpbuf_t *)-1; + return 0; // but should be unreachable in this case } - _stm_dbg_get_tl(tn)->jmpbufptr = (jmpbufptr_t*)-1; + segment->jmpbuf_ptr = (stm_jmpbuf_t *)-1; return 1; } -#endif void _set_type_id(object_t *obj, uint32_t h) @@ -369,9 +366,6 @@ if lib._stm_stop_transaction(): raise Conflict() -def stm_abort_transaction(): - return lib._stm_check_abort_transaction() - def stm_start_safe_point(): lib.stm_start_safe_point(lib.LOCK_COLLECT) @@ -413,32 +407,45 @@ lib.stm_setup() self.tls = [_allocate_thread_local(), _allocate_thread_local()] self.current_thread = 0 - self.running_transaction = set() def teardown_method(self, meth): - for n in sorted(self.running_transaction): - if self.current_thread != n: - self.switch(n) - self.abort_transaction() + for n, tl in enumerate(self.tls): + if lib._stm_in_transaction(tl): + if self.current_thread != n: + self.switch(n) + self.abort_transaction() for tl in self.tls: lib.stm_unregister_thread_local(tl) lib.stm_teardown() def start_transaction(self): - n = self.current_thread - assert n not in self.running_transaction - tl = self.tls[n] + tl = self.tls[self.current_thread] + assert not lib._stm_in_transaction(tl) lib.stm_start_transaction(tl, ffi.cast("stm_jmpbuf_t *", -1)) - self.running_transaction.add(n) + assert lib._stm_in_transaction(tl) + + def commit_transaction(self): + tl = self.tls[self.current_thread] + assert lib._stm_in_transaction(tl) + lib.stm_commit_transaction() + assert not lib._stm_in_transaction(tl) + + def abort_transaction(self): + tl = self.tls[self.current_thread] + assert lib._stm_in_transaction(tl) + res = lib._check_abort_transaction() + assert res # abort_transaction() didn't abort! + assert not lib._stm_in_transaction(tl) def switch(self, thread_num): - tr = lib._stm_in_transaction() - assert tr == (self.current_thread in self.running_transaction) assert thread_num != self.current_thread - if tr: + tl = self.tls[self.current_thread] + if lib._stm_in_transaction(tl): stm_start_safe_point() + # self.current_thread = thread_num - if thread_num in self.running_transaction: - tl = self.tls[thread_num] - lib._stm_test_switch(tl) + tl2 = self.tls[thread_num] + # + if lib._stm_in_transaction(tl2): + lib._stm_test_switch(tl2) stm_stop_safe_point() # can raise Conflict From noreply at buildbot.pypy.org Mon Feb 10 19:50:18 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 10 Feb 2014 19:50:18 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: fixing some more tests Message-ID: <20140210185018.86F5A1C35DE@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r724:efd9d37da8af Date: 2014-02-10 19:50 +0100 http://bitbucket.org/pypy/stmgc/changeset/efd9d37da8af/ Log: fixing some more tests diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -51,7 +51,6 @@ { stm_thread_local_t *tl = STM_SEGMENT->running_thread; release_thread_segment(tl); - abort(); } void stm_abort_transaction(void) diff --git a/c7/test/test_basic.py b/c7/test/test_basic.py --- a/c7/test/test_basic.py +++ b/c7/test/test_basic.py @@ -28,29 +28,29 @@ assert stm_get_real_address(lp4) - p3 == 16 def test_transaction_start_stop(self): - stm_start_transaction() + self.start_transaction() self.switch(1) - stm_start_transaction() - stm_stop_transaction() + self.start_transaction() + self.commit_transaction() self.switch(0) - stm_stop_transaction() + self.commit_transaction() def test_simple_read(self): - stm_start_transaction() + self.start_transaction() lp1 = stm_allocate(16) stm_read(lp1) assert stm_was_read(lp1) - stm_stop_transaction() + self.commit_transaction() def test_simple_write(self): - stm_start_transaction() + self.start_transaction() lp1 = stm_allocate(16) assert stm_was_written(lp1) stm_write(lp1) assert stm_was_written(lp1) - stm_stop_transaction() + self.commit_transaction() def test_allocate_old(self): lp1 = stm_allocate_old(16) @@ -60,66 +60,66 @@ def test_write_on_old(self): lp1 = stm_allocate_old(16) - stm_start_transaction() + self.start_transaction() stm_write(lp1) assert stm_was_written(lp1) stm_set_char(lp1, 'a') self.switch(1) - stm_start_transaction() + self.start_transaction() stm_read(lp1) assert stm_was_read(lp1) assert stm_get_char(lp1) == '\0' - stm_stop_transaction() + self.commit_transaction() def test_read_write_1(self): lp1 = stm_allocate_old(16) stm_get_real_address(lp1)[HDR] = 'a' #setchar - stm_start_transaction() - stm_stop_transaction() + self.start_transaction() + self.commit_transaction() # self.switch(1) - stm_start_transaction() + self.start_transaction() stm_write(lp1) assert stm_get_char(lp1) == 'a' stm_set_char(lp1, 'b') # self.switch(0) - stm_start_transaction() + self.start_transaction() stm_read(lp1) assert stm_get_char(lp1) == 'a' # self.switch(1) - stm_stop_transaction() + self.commit_transaction() # py.test.raises(Conflict, self.switch, 0) # detects rw conflict def test_commit_fresh_objects(self): - stm_start_transaction() + self.start_transaction() lp = stm_allocate(16) stm_set_char(lp, 'u') p = stm_get_real_address(lp) stm_push_root(lp) - stm_stop_transaction() + self.commit_transaction() lp = stm_pop_root() p1 = stm_get_real_address(lp) assert p != p1 self.switch(1) - stm_start_transaction() + self.start_transaction() stm_write(lp) # privatize page p_ = stm_get_real_address(lp) assert p != p_ assert p1 != p_ assert stm_get_char(lp) == 'u' - stm_stop_transaction() + self.commit_transaction() def test_commit_fresh_objects2(self): self.switch(1) - stm_start_transaction() + self.start_transaction() lp = stm_allocate(16) stm_set_char(lp, 'u') lp2 = stm_allocate(16) @@ -131,31 +131,31 @@ stm_read(lp2) # test not crash stm_push_root(lp) stm_push_root(lp2) - stm_stop_transaction() + self.commit_transaction() lp2 = stm_pop_root() lp = stm_pop_root() self.switch(0) - stm_start_transaction() + self.start_transaction() stm_write(lp) # privatize page assert stm_get_char(lp) == 'u' stm_set_char(lp, 'x') stm_write(lp2) assert stm_get_char(lp2) == 'v' stm_set_char(lp2, 'y') - stm_stop_transaction() + self.commit_transaction() self.switch(1) - stm_start_transaction() + self.start_transaction() stm_write(lp) assert stm_get_char(lp) == 'x' assert stm_get_char(lp2) == 'y' - stm_stop_transaction() + self.commit_transaction() def test_simple_refs(self): - stm_start_transaction() + self.start_transaction() lp = stm_allocate_refs(3) lq = stm_allocate(16) lr = stm_allocate(16) @@ -164,12 +164,12 @@ stm_set_ref(lp, 0, lq) stm_set_ref(lp, 1, lr) stm_push_root(lp) - stm_stop_transaction() + self.commit_transaction() lp = stm_pop_root() self.switch(1) - stm_start_transaction() + self.start_transaction() stm_write(lp) lq = stm_get_ref(lp, 0) lr = stm_get_ref(lp, 1) @@ -177,60 +177,60 @@ stm_read(lr) assert stm_get_char(lq) == 'x' assert stm_get_char(lr) == 'y' - stm_stop_transaction() + self.commit_transaction() def test_start_transaction_updates(self): - stm_start_transaction() + self.start_transaction() lp1 = stm_allocate(16) stm_set_char(lp1, 'a') stm_push_root(lp1) - stm_stop_transaction() + self.commit_transaction() lp1 = stm_pop_root() # self.switch(1) - stm_start_transaction() + self.start_transaction() stm_write(lp1) assert stm_get_char(lp1) == 'a' stm_set_char(lp1, 'b') - stm_stop_transaction() + self.commit_transaction() # self.switch(0) - stm_start_transaction() + self.start_transaction() assert stm_get_char(lp1) == 'b' def test_resolve_no_conflict_empty(self): - stm_start_transaction() + self.start_transaction() # self.switch(1) - stm_start_transaction() - stm_stop_transaction() + self.start_transaction() + self.commit_transaction() # self.switch(0) - stm_stop_transaction() + self.commit_transaction() def test_resolve_no_conflict_write_only_in_already_committed(self): - stm_start_transaction() + self.start_transaction() lp1 = stm_allocate(16) p1 = stm_get_real_address(lp1) p1[HDR] = 'a' stm_push_root(lp1) - stm_stop_transaction() + self.commit_transaction() lp1 = stm_pop_root() # 'a' in SHARED_PAGE - stm_start_transaction() + self.start_transaction() self.switch(1) - stm_start_transaction() + self.start_transaction() stm_write(lp1) # privatize page p1 = stm_get_real_address(lp1) assert p1[HDR] == 'a' p1[HDR] = 'b' - stm_stop_transaction() + self.commit_transaction() # 'b' both private pages # self.switch(0) @@ -238,84 +238,84 @@ assert p1[HDR] == 'b' p1 = stm_get_real_address(lp1) assert p1[HDR] == 'b' - stm_stop_transaction() + self.commit_transaction() assert p1[HDR] == 'b' def test_not_resolve_write_read_conflict(self): - stm_start_transaction() + self.start_transaction() lp1 = stm_allocate(16) stm_set_char(lp1, 'a') stm_push_root(lp1) - stm_stop_transaction() + self.commit_transaction() lp1 = stm_pop_root() - stm_start_transaction() + self.start_transaction() stm_read(lp1) # self.switch(1) - stm_start_transaction() + self.start_transaction() stm_write(lp1) stm_set_char(lp1, 'b') - stm_stop_transaction() + self.commit_transaction() # py.test.raises(Conflict, self.switch, 0) - stm_start_transaction() + self.start_transaction() assert stm_get_char(lp1) == 'b' def test_resolve_write_read_conflict(self): - stm_start_transaction() + self.start_transaction() lp1 = stm_allocate(16) stm_set_char(lp1, 'a') stm_push_root(lp1) - stm_stop_transaction() + self.commit_transaction() lp1 = stm_pop_root() - stm_start_transaction() + self.start_transaction() # self.switch(1) - stm_start_transaction() + self.start_transaction() stm_write(lp1) stm_set_char(lp1, 'b') - stm_stop_transaction() + self.commit_transaction() # self.switch(0) assert stm_get_char(lp1) == 'b' def test_resolve_write_write_conflict(self): - stm_start_transaction() + self.start_transaction() lp1 = stm_allocate(16) stm_set_char(lp1, 'a') stm_push_root(lp1) - stm_stop_transaction() + self.commit_transaction() lp1 = stm_pop_root() - stm_start_transaction() + self.start_transaction() stm_write(lp1) # acquire lock # self.switch(1) - stm_start_transaction() + self.start_transaction() py.test.raises(Conflict, stm_write, lp1) # write-write conflict def test_abort_cleanup(self): - stm_start_transaction() + self.start_transaction() lp1 = stm_allocate(16) stm_set_char(lp1, 'a') stm_push_root(lp1) - stm_stop_transaction() + self.commit_transaction() lp1 = stm_pop_root() - stm_start_transaction() + self.start_transaction() stm_set_char(lp1, 'x') assert stm_abort_transaction() - stm_start_transaction() + self.start_transaction() assert stm_get_char(lp1) == 'a' def test_many_allocs(self): obj_size = 1024 num = (lib.NB_NURSERY_PAGES * 4096) / obj_size + 100 # more than what fits in the nursery - stm_start_transaction() + self.start_transaction() for i in range(num): new = stm_allocate(obj_size) stm_push_root(new) @@ -335,7 +335,7 @@ def test_larger_than_section(self): obj_size = lib.NURSERY_SECTION + 16 - stm_start_transaction() + self.start_transaction() new = stm_allocate(obj_size) assert not is_in_nursery(new) @@ -349,7 +349,7 @@ assert obj_size > 4096 # we want more than 1 page assert obj_size < 4096 * 1024 # in the nursery - stm_start_transaction() + self.start_transaction() new = stm_allocate(obj_size) assert is_in_nursery(new) assert len(stm_get_obj_pages(new)) == 2 @@ -373,17 +373,17 @@ assert obj_size > 4096 # we want more than 1 page assert obj_size < 4096 * 1024 # in the nursery - stm_start_transaction() + self.start_transaction() new = stm_allocate(obj_size) assert is_in_nursery(new) stm_push_root(new) - stm_stop_transaction() + self.commit_transaction() new = stm_pop_root() assert ([stm_get_page_flag(p) for p in stm_get_obj_pages(new)] == [lib.SHARED_PAGE]*2) - stm_start_transaction() + self.start_transaction() stm_write(new) assert ([stm_get_page_flag(p) for p in stm_get_obj_pages(new)] == [lib.PRIVATE_PAGE]*2) @@ -393,13 +393,13 @@ wnew[4097] = 'x' self.switch(1) - stm_start_transaction() + self.start_transaction() stm_read(new) rnew = stm_get_real_address(new) assert rnew[4097] == '\0' def test_partial_alloced_pages(self): - stm_start_transaction() + self.start_transaction() new = stm_allocate(16) stm_push_root(new) stm_minor_collect() @@ -407,11 +407,11 @@ # assert stm_get_page_flag(stm_get_obj_pages(new)[0]) == lib.UNCOMMITTED_SHARED_PAGE # assert not (stm_get_flags(new) & lib.GCFLAG_NOT_COMMITTED) - stm_stop_transaction() + self.commit_transaction() assert stm_get_page_flag(stm_get_obj_pages(new)[0]) == lib.SHARED_PAGE assert not (stm_get_flags(new) & lib.GCFLAG_NOT_COMMITTED) - stm_start_transaction() + self.start_transaction() newer = stm_allocate(16) stm_push_root(newer) stm_minor_collect() @@ -425,13 +425,13 @@ assert stm_get_flags(newer) & lib.GCFLAG_NOT_COMMITTED stm_write(newer) # does not privatize assert stm_get_page_flag(stm_get_obj_pages(newer)[0]) == lib.SHARED_PAGE - stm_stop_transaction() + self.commit_transaction() assert stm_get_page_flag(stm_get_obj_pages(newer)[0]) == lib.SHARED_PAGE assert not (stm_get_flags(newer) & lib.GCFLAG_NOT_COMMITTED) def test_reset_partial_alloc_pages(self): - stm_start_transaction() + self.start_transaction() new = stm_allocate(16) stm_set_char(new, 'a') stm_push_root(new) @@ -439,7 +439,7 @@ new = stm_pop_root() stm_abort_transaction() - stm_start_transaction() + self.start_transaction() newer = stm_allocate(16) stm_push_root(newer) stm_minor_collect() @@ -448,7 +448,7 @@ assert stm_get_char(newer) == '\0' def test_reuse_page(self): - stm_start_transaction() + self.start_transaction() new = stm_allocate(16) stm_push_root(new) stm_minor_collect() @@ -456,7 +456,7 @@ # assert stm_get_page_flag(stm_get_obj_pages(new)[0]) == lib.UNCOMMITTED_SHARED_PAGE stm_abort_transaction() - stm_start_transaction() + self.start_transaction() newer = stm_allocate(16) stm_push_root(newer) stm_minor_collect() @@ -464,106 +464,106 @@ assert new == newer def test_write_to_old_after_minor(self): - stm_start_transaction() + self.start_transaction() new = stm_allocate(16) stm_push_root(new) stm_minor_collect() old = stm_pop_root() - stm_stop_transaction() + self.commit_transaction() - stm_start_transaction() + self.start_transaction() stm_write(old) # old objs to trace stm_set_char(old, 'x') stm_minor_collect() stm_write(old) # old objs to trace stm_set_char(old, 'y') - stm_stop_transaction() + self.commit_transaction() def test_inevitable_transaction(self): - py.test.skip("stm_write and stm_stop_transaction" + py.test.skip("stm_write and self.commit_transaction" " of an inevitable tr. is not testable" " since they wait for the other thread" " to synchronize and possibly abort") old = stm_allocate_old(16) - stm_start_transaction() + self.start_transaction() self.switch(1) - stm_start_transaction() + self.start_transaction() stm_write(old) self.switch(0) stm_become_inevitable() stm_write(old) # t1 needs to abort, not us - stm_stop_transaction() + self.commit_transaction() py.test.raises(Conflict, self.switch, 1) # def test_resolve_write_write_no_conflict(self): - # stm_start_transaction() + # self.start_transaction() # p1 = stm_allocate(16) # p2 = stm_allocate(16) # p1[8] = 'a' # p2[8] = 'A' - # stm_stop_transaction(False) - # stm_start_transaction() + # self.commit_transaction(False) + # self.start_transaction() # # # self.switch(1) - # stm_start_transaction() + # self.start_transaction() # stm_write(p1) # p1[8] = 'b' - # stm_stop_transaction(False) + # self.commit_transaction(False) # # # self.switch(0) # stm_write(p2) # p2[8] = 'C' - # stm_stop_transaction(False) + # self.commit_transaction(False) # assert p1[8] == 'b' # assert p2[8] == 'C' # def test_page_extra_malloc_unchanged_page(self): - # stm_start_transaction() + # self.start_transaction() # p1 = stm_allocate(16) # p2 = stm_allocate(16) # p1[8] = 'A' # p2[8] = 'a' - # stm_stop_transaction(False) - # stm_start_transaction() + # self.commit_transaction(False) + # self.start_transaction() # # # self.switch(1) - # stm_start_transaction() + # self.start_transaction() # stm_write(p1) # assert p1[8] == 'A' # p1[8] = 'B' - # stm_stop_transaction(False) + # self.commit_transaction(False) # # # self.switch(0) # stm_read(p2) # assert p2[8] == 'a' # p3 = stm_allocate(16) # goes into the same page, which is # p3[8] = ':' # not otherwise modified - # stm_stop_transaction(False) + # self.commit_transaction(False) # # # assert p1[8] == 'B' # assert p2[8] == 'a' # assert p3[8] == ':' # def test_page_extra_malloc_changed_page_before(self): - # stm_start_transaction() + # self.start_transaction() # p1 = stm_allocate(16) # p2 = stm_allocate(16) # p1[8] = 'A' # p2[8] = 'a' - # stm_stop_transaction(False) - # stm_start_transaction() + # self.commit_transaction(False) + # self.start_transaction() # # # self.switch(1) - # stm_start_transaction() + # self.start_transaction() # stm_write(p1) # assert p1[8] == 'A' # p1[8] = 'B' - # stm_stop_transaction(False) + # self.commit_transaction(False) # # # self.switch(0) # stm_write(p2) @@ -571,27 +571,27 @@ # p2[8] = 'b' # p3 = stm_allocate(16) # goes into the same page, which I already # p3[8] = ':' # modified just above - # stm_stop_transaction(False) + # self.commit_transaction(False) # # # assert p1[8] == 'B' # assert p2[8] == 'b' # assert p3[8] == ':' # def test_page_extra_malloc_changed_page_after(self): - # stm_start_transaction() + # self.start_transaction() # p1 = stm_allocate(16) # p2 = stm_allocate(16) # p1[8] = 'A' # p2[8] = 'a' - # stm_stop_transaction(False) - # stm_start_transaction() + # self.commit_transaction(False) + # self.start_transaction() # # # self.switch(1) - # stm_start_transaction() + # self.start_transaction() # stm_write(p1) # assert p1[8] == 'A' # p1[8] = 'B' - # stm_stop_transaction(False) + # self.commit_transaction(False) # # # self.switch(0) # p3 = stm_allocate(16) # goes into the same page, which I will @@ -599,19 +599,19 @@ # stm_write(p2) # assert p2[8] == 'a' # p2[8] = 'b' - # stm_stop_transaction(False) + # self.commit_transaction(False) # # # assert p1[8] == 'B' # assert p2[8] == 'b' # assert p3[8] == ':' # def test_overflow_write_history(self): - # stm_start_transaction() + # self.start_transaction() # plist = [stm_allocate(n) for n in range(16, 256, 8)] - # stm_stop_transaction(False) + # self.commit_transaction(False) # # # for i in range(20): - # stm_start_transaction() + # self.start_transaction() # for p in plist: # stm_write(p) - # stm_stop_transaction(False) + # self.commit_transaction(False) From noreply at buildbot.pypy.org Mon Feb 10 20:30:31 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 10 Feb 2014 20:30:31 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: Next test passes. Message-ID: <20140210193031.51BD51C35DE@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r725:ef01288963ea Date: 2014-02-10 20:30 +0100 http://bitbucket.org/pypy/stmgc/changeset/ef01288963ea/ Log: Next test passes. diff --git a/c7/list.c b/c7/list.c deleted file mode 100644 --- a/c7/list.c +++ /dev/null @@ -1,39 +0,0 @@ -#include -#include -#include -#include - -#include "list.h" - - -#define SETSIZE(n) (sizeof(struct stm_list_s) + ITEMSSIZE(n)) -#define ITEMSSIZE(n) ((n) * sizeof(object_t*)) -#define OVERCNT(n) (33 + ((((n) / 2) * 3) | 1)) - -struct stm_list_s *stm_list_create(void) -{ - uintptr_t initial_allocation = 32; - struct stm_list_s *lst = malloc(SETSIZE(initial_allocation)); - if (lst == NULL) { - perror("out of memory in stm_list_create"); - abort(); - } - lst->count = 0; - lst->last_allocated = initial_allocation - 1; - assert(lst->last_allocated & 1); - return lst; -} - -struct stm_list_s *_stm_list_grow(struct stm_list_s *lst, uintptr_t nalloc) -{ - assert(lst->last_allocated & 1); - nalloc = OVERCNT(nalloc); - lst = realloc(lst, SETSIZE(nalloc)); - if (lst == NULL) { - perror("out of memory in _stm_list_grow"); - abort(); - } - lst->last_allocated = nalloc - 1; - assert(lst->last_allocated & 1); - return lst; -} diff --git a/c7/list.h b/c7/list.h deleted file mode 100644 --- a/c7/list.h +++ /dev/null @@ -1,76 +0,0 @@ -#ifndef _STM_LIST_H -#define _STM_LIST_H - -#include "core.h" -#include - -struct stm_list_s { - uintptr_t count; - union { - uintptr_t last_allocated; /* always odd */ - //struct stm_list_s *nextlist; /* always even */ - }; - object_t *items[]; -}; - -struct stm_list_s *stm_list_create(void); - -static inline void stm_list_free(struct stm_list_s *lst) -{ - free(lst); -} - - -struct stm_list_s *_stm_list_grow(struct stm_list_s *, uintptr_t); - -static inline struct stm_list_s * -stm_list_append(struct stm_list_s *lst, object_t *item) -{ - uintptr_t index = lst->count++; - if (UNLIKELY(index > lst->last_allocated)) - lst = _stm_list_grow(lst, index); - lst->items[index] = item; - return lst; -} - -#define LIST_APPEND(lst, e) { \ - lst = stm_list_append(lst, e); \ - } - -static inline void stm_list_clear(struct stm_list_s *lst) -{ - lst->count = 0; -} - -static inline bool stm_list_is_empty(struct stm_list_s *lst) -{ - return (lst->count == 0); -} - -static inline bool stm_list_count(struct stm_list_s *lst) -{ - return lst->count; -} - -static inline object_t *stm_list_pop_item(struct stm_list_s *lst) -{ - return lst->items[--lst->count]; -} - -static inline object_t *stm_list_item(struct stm_list_s *lst, uintptr_t index) -{ - return lst->items[index]; -} - -#define STM_LIST_FOREACH(lst, CODE) \ - do { \ - struct stm_list_s *_lst = (lst); \ - uintptr_t _i; \ - for (_i = _lst->count; _i--; ) { \ - object_t *item = _lst->items[_i]; \ - CODE; \ - } \ - } while (0) - - -#endif diff --git a/c7/nursery.c b/c7/nursery.c deleted file mode 100644 --- a/c7/nursery.c +++ /dev/null @@ -1,355 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - - -#include "core.h" -#include "list.h" -#include "nursery.h" -#include "pages.h" -#include "stmsync.h" -#include "largemalloc.h" - -void stm_major_collection(void) -{ - assert(_STM_TL->active); - abort(); -} - - -bool _stm_is_young(object_t *o) -{ - assert((uintptr_t)o >= FIRST_NURSERY_PAGE * 4096); - return (uintptr_t)o < FIRST_AFTER_NURSERY_PAGE * 4096; -} - - -object_t *_stm_allocate_old(size_t size) -{ - object_t* o = stm_large_malloc(size); - memset(real_address(o), 0, size); - o->stm_flags |= GCFLAG_WRITE_BARRIER; - return o; -} - -object_t *stm_allocate_prebuilt(size_t size) -{ - object_t* res = _stm_allocate_old(size); /* XXX */ - return res; -} - -localchar_t *_stm_alloc_next_page(size_t size_class) -{ - /* may return uninitialized pages */ - - /* 'alloc->next' points to where the next allocation should go. The - present function is called instead when this next allocation is - equal to 'alloc->stop'. As we know that 'start', 'next' and - 'stop' are always nearby pointers, we play tricks and only store - the lower 16 bits of 'start' and 'stop', so that the three - variables plus some flags fit in 16 bytes. - */ - uintptr_t page; - localchar_t *result; - alloc_for_size_t *alloc = &_STM_TL->alloc[size_class]; - size_t size = size_class * 8; - - /* reserve a fresh new page (XXX: from the end!) */ - page = stm_pages_reserve(1); - - assert(memset(real_address((object_t*)(page * 4096)), 0xdd, 4096)); - - result = (localchar_t *)(page * 4096UL); - alloc->start = (uintptr_t)result; - alloc->stop = alloc->start + (4096 / size) * size; - alloc->next = result + size; - alloc->flag_partial_page = false; - return result; -} - -object_t *stm_big_small_alloc_old(size_t size, bool *is_small) -{ - /* may return uninitialized objects */ - object_t *result; - size_t size_class = size / 8; - assert(size_class >= 2); - - if (size_class >= LARGE_OBJECT_WORDS) { - result = stm_large_malloc(size); - *is_small = 0; - } else { - *is_small = 1; - alloc_for_size_t *alloc = &_STM_TL->alloc[size_class]; - - if ((uint16_t)((uintptr_t)alloc->next) == alloc->stop) { - result = (object_t *)_stm_alloc_next_page(size_class); - } else { - result = (object_t *)alloc->next; - alloc->next += size; - } - } - return result; -} - - - -void trace_if_young(object_t **pobj) -{ - /* takes a normal pointer to a thread-local pointer to an object */ - if (*pobj == NULL) - return; - if (!_stm_is_young(*pobj)) - return; - - /* the location the object moved to is at an 8b offset */ - localchar_t *temp = ((localchar_t *)(*pobj)) + 8; - object_t * TLPREFIX *pforwarded = (object_t* TLPREFIX *)temp; - if ((*pobj)->stm_flags & GCFLAG_MOVED) { - *pobj = *pforwarded; - return; - } - - /* move obj to somewhere else */ - size_t size = stmcb_size(real_address(*pobj)); - bool is_small; - object_t *moved = stm_big_small_alloc_old(size, &is_small); - - memcpy((void*)real_address(moved), - (void*)real_address(*pobj), - size); - - /* object is not committed yet */ - moved->stm_flags |= GCFLAG_NOT_COMMITTED; - if (is_small) /* means, not allocated by large-malloc */ - moved->stm_flags |= GCFLAG_SMALL; - assert(size == _stm_data_size((struct object_s*)REAL_ADDRESS(get_thread_base(0), moved))); - LIST_APPEND(_STM_TL->uncommitted_objects, moved); - - (*pobj)->stm_flags |= GCFLAG_MOVED; - *pforwarded = moved; - *pobj = moved; - - LIST_APPEND(_STM_TL->old_objects_to_trace, moved); -} - -void minor_collect() -{ - /* visit shadowstack & add to old_obj_to_trace */ - object_t **current = _STM_TL->shadow_stack; - object_t **base = _STM_TL->shadow_stack_base; - while (current-- != base) { - trace_if_young(current); - } - - /* visit old_obj_to_trace until empty */ - struct stm_list_s *old_objs = _STM_TL->old_objects_to_trace; - while (!stm_list_is_empty(old_objs)) { - object_t *item = stm_list_pop_item(old_objs); - - assert(!_stm_is_young(item)); - assert(!(item->stm_flags & GCFLAG_WRITE_BARRIER)); - - /* re-add write-barrier */ - item->stm_flags |= GCFLAG_WRITE_BARRIER; - - stmcb_trace(real_address(item), trace_if_young); - old_objs = _STM_TL->old_objects_to_trace; - } - - /* clear nursery */ - localchar_t *nursery_base = (localchar_t*)(FIRST_NURSERY_PAGE * 4096); - memset((void*)real_address((object_t*)nursery_base), 0x0, - NURSERY_CURRENT(_STM_TL) - nursery_base); - SET_NURSERY_CURRENT(_STM_TL, nursery_base); -} - -void _stm_minor_collect() -{ - minor_collect(); -} - -localchar_t *collect_and_reserve(size_t size) -{ - localchar_t *new_current = _STM_TL->nursery_current; - - while (((uintptr_t)new_current > FIRST_AFTER_NURSERY_PAGE * 4096) - && _STM_TL->nursery_current_halfwords[LENDIAN]) { - - _STM_TL->nursery_current_halfwords[LENDIAN] = 0; - _stm_start_safe_point(0); - /* no collect, it would mess with nursery_current */ - _stm_stop_safe_point(0); - - new_current = _STM_TL->nursery_current; - } - - if (!((uintptr_t)new_current > FIRST_AFTER_NURSERY_PAGE * 4096)) { - /* after safe-point, new_current is actually fine again */ - return new_current - size; - } - - /* reset nursery_current (left invalid by the caller) */ - SET_NURSERY_CURRENT(_STM_TL, new_current - size); - - minor_collect(); - - /* XXX: if we_want_major_collect: acquire EXCLUSIVE & COLLECT lock - and do it */ - - localchar_t *current = NURSERY_CURRENT(_STM_TL); - assert((uintptr_t)current + size <= FIRST_AFTER_NURSERY_PAGE * 4096); - SET_NURSERY_CURRENT(_STM_TL, current + size); - return current; -} - - -object_t *stm_allocate(size_t size) -{ - object_t *result; - - assert(_STM_TL->active); - assert(size % 8 == 0); - assert(16 <= size); - - /* XXX move out of fastpath */ - if (UNLIKELY(size >= NURSERY_SECTION)) { - /* allocate large objects outside the nursery immediately, - otherwise they may trigger too many minor collections - and degrade performance */ - bool is_small; - result = stm_big_small_alloc_old(size, &is_small); - - memset((void*)real_address(result), 0, size); - - /* object is not committed yet */ - result->stm_flags |= GCFLAG_NOT_COMMITTED; - if (is_small) /* means, not allocated by large-malloc */ - result->stm_flags |= GCFLAG_SMALL; - assert(size == _stm_data_size((struct object_s*)REAL_ADDRESS(get_thread_base(0), result))); - - LIST_APPEND(_STM_TL->uncommitted_objects, result); - LIST_APPEND(_STM_TL->old_objects_to_trace, result); - return result; - } - - localchar_t *current = _STM_TL->nursery_current; - localchar_t *new_current = current + size; - SET_NURSERY_CURRENT(_STM_TL, new_current); - - if ((uintptr_t)new_current > FIRST_AFTER_NURSERY_PAGE * 4096) { - current = collect_and_reserve(size); - } - - result = (object_t *)current; - return result; -} - - -void push_uncommitted_to_other_threads() -{ - /* WE HAVE THE EXCLUSIVE LOCK HERE */ - - struct stm_list_s *uncommitted = _STM_TL->uncommitted_objects; - char *local_base = _STM_TL->thread_base; - char *remote_base = get_thread_base(1 - _STM_TL->thread_num); - - STM_LIST_FOREACH( - uncommitted, - ({ - /* write-lock always cleared for these objects */ - uintptr_t lock_idx; - assert(lock_idx = (((uintptr_t)item) >> 4) - READMARKER_START); - assert(!write_locks[lock_idx]); - - /* remove the flag (they are now committed) */ - item->stm_flags &= ~GCFLAG_NOT_COMMITTED; - - _stm_move_object(item, - REAL_ADDRESS(local_base, item), - REAL_ADDRESS(remote_base, item)); - })); -} - -void nursery_on_start() -{ - assert(stm_list_is_empty(_STM_TL->old_objects_to_trace)); - - _STM_TL->old_shadow_stack = _STM_TL->shadow_stack; -} - -void nursery_on_commit() -{ - /* DON'T do a minor_collect. This is already done in - the caller (optimization) */ - /* minor_collect(); */ - - /* uncommitted objects */ - push_uncommitted_to_other_threads(); - stm_list_clear(_STM_TL->uncommitted_objects); - - /* for small alloc classes, set the partial flag */ - long j; - for (j = 2; j < LARGE_OBJECT_WORDS; j++) { - alloc_for_size_t *alloc = &_STM_TL->alloc[j]; - uint16_t start = alloc->start; - uint16_t cur = (uintptr_t)alloc->next; - - if (start == cur) - continue; /* page full -> will be replaced automatically */ - - alloc->start = cur; /* next transaction has different 'start' to - reset in case of an abort */ - alloc->flag_partial_page = 1; - } -} - -void nursery_on_abort() -{ - /* reset shadowstack */ - _STM_TL->shadow_stack = _STM_TL->old_shadow_stack; - - /* clear old_objects_to_trace (they will have the WRITE_BARRIER flag - set because the ones we care about are also in modified_objects) */ - stm_list_clear(_STM_TL->old_objects_to_trace); - - /* clear the nursery */ - localchar_t *nursery_base = (localchar_t*)(FIRST_NURSERY_PAGE * 4096); - memset((void*)real_address((object_t*)nursery_base), 0x0, - NURSERY_CURRENT(_STM_TL) - nursery_base); - SET_NURSERY_CURRENT(_STM_TL, nursery_base); - - - /* reset the alloc-pages to the state at the start of the transaction */ - long j; - for (j = 2; j < LARGE_OBJECT_WORDS; j++) { - alloc_for_size_t *alloc = &_STM_TL->alloc[j]; - uint16_t num_allocated = ((uintptr_t)alloc->next) - alloc->start; - - if (num_allocated) { - /* forget about all non-committed objects */ - alloc->next -= num_allocated; - } - } - - /* free uncommitted objects */ - struct stm_list_s *uncommitted = _STM_TL->uncommitted_objects; - - STM_LIST_FOREACH( - uncommitted, - ({ - if (!(item->stm_flags & GCFLAG_SMALL)) - stm_large_free(item); - })); - - stm_list_clear(uncommitted); -} - - - diff --git a/c7/nursery.h b/c7/nursery.h deleted file mode 100644 --- a/c7/nursery.h +++ /dev/null @@ -1,19 +0,0 @@ - - - -object_t *stm_allocate_prebuilt(size_t size); -object_t *_stm_allocate_old(size_t size); -object_t *stm_allocate(size_t size); - -void _stm_minor_collect(); -bool _stm_is_young(object_t *o); - -void nursery_on_abort(); -void nursery_on_commit(); -void nursery_on_start(); - - - -extern uintptr_t index_page_never_used; - - diff --git a/c7/pagecopy.c b/c7/pagecopy.c deleted file mode 100644 --- a/c7/pagecopy.c +++ /dev/null @@ -1,57 +0,0 @@ - -void pagecopy(void *dest, const void *src) -{ - unsigned long i; - for (i=0; i<4096/128; i++) { - asm volatile("movdqa (%0), %%xmm0\n" - "movdqa 16(%0), %%xmm1\n" - "movdqa 32(%0), %%xmm2\n" - "movdqa 48(%0), %%xmm3\n" - "movdqa %%xmm0, (%1)\n" - "movdqa %%xmm1, 16(%1)\n" - "movdqa %%xmm2, 32(%1)\n" - "movdqa %%xmm3, 48(%1)\n" - "movdqa 64(%0), %%xmm0\n" - "movdqa 80(%0), %%xmm1\n" - "movdqa 96(%0), %%xmm2\n" - "movdqa 112(%0), %%xmm3\n" - "movdqa %%xmm0, 64(%1)\n" - "movdqa %%xmm1, 80(%1)\n" - "movdqa %%xmm2, 96(%1)\n" - "movdqa %%xmm3, 112(%1)\n" - : - : "r"(src + 128*i), "r"(dest + 128*i) - : "xmm0", "xmm1", "xmm2", "xmm3", "memory"); - } -} - -#if 0 /* XXX enable if detected on the cpu */ -void pagecopy_ymm8(void *dest, const void *src) -{ - asm volatile("0:\n" - "vmovdqa (%0), %%ymm0\n" - "vmovdqa 32(%0), %%ymm1\n" - "vmovdqa 64(%0), %%ymm2\n" - "vmovdqa 96(%0), %%ymm3\n" - "vmovdqa 128(%0), %%ymm4\n" - "vmovdqa 160(%0), %%ymm5\n" - "vmovdqa 192(%0), %%ymm6\n" - "vmovdqa 224(%0), %%ymm7\n" - "addq $256, %0\n" - "vmovdqa %%ymm0, (%1)\n" - "vmovdqa %%ymm1, 32(%1)\n" - "vmovdqa %%ymm2, 64(%1)\n" - "vmovdqa %%ymm3, 96(%1)\n" - "vmovdqa %%ymm4, 128(%1)\n" - "vmovdqa %%ymm5, 160(%1)\n" - "vmovdqa %%ymm6, 192(%1)\n" - "vmovdqa %%ymm7, 224(%1)\n" - "addq $256, %1\n" - "cmpq %2, %0\n" - "jne 0b" - : "=r"(src), "=r"(dest) - : "r"((char *)src + 4096), "0"(src), "1"(dest) - : "xmm0", "xmm1", "xmm2", "xmm3", - "xmm4", "xmm5", "xmm6", "xmm7"); -} -#endif diff --git a/c7/pagecopy.h b/c7/pagecopy.h deleted file mode 100644 --- a/c7/pagecopy.h +++ /dev/null @@ -1,2 +0,0 @@ - -void pagecopy(void *dest, const void *src); diff --git a/c7/reader_writer_lock.c b/c7/reader_writer_lock.c deleted file mode 100644 --- a/c7/reader_writer_lock.c +++ /dev/null @@ -1,97 +0,0 @@ -/* Taken from: http://locklessinc.com/articles/locks/ - - Sticking to semi-portable C code, we can still do a little better. - There exists a form of the ticket lock that is designed for read-write - locks. An example written in assembly was posted to the Linux kernel - mailing list in 2002 by David Howells from RedHat. This was a highly - optimized version of a read-write ticket lock developed at IBM in the - early 90's by Joseph Seigh. Note that a similar (but not identical) - algorithm was published by John Mellor-Crummey and Michael Scott in - their landmark paper "Scalable Reader-Writer Synchronization for - Shared-Memory Multiprocessors". Converting the algorithm from - assembly language to C yields: -*/ -#include -#include "reader_writer_lock.h" - - -#define EBUSY 1 -#define atomic_xadd(P, V) __sync_fetch_and_add((P), (V)) -#define cmpxchg(P, O, N) __sync_val_compare_and_swap((P), (O), (N)) -#define atomic_inc(P) __sync_add_and_fetch((P), 1) -#define atomic_dec(P) __sync_add_and_fetch((P), -1) -#define atomic_add(P, V) __sync_add_and_fetch((P), (V)) -#define atomic_set_bit(P, V) __sync_or_and_fetch((P), 1<<(V)) -#define atomic_clear_bit(P, V) __sync_and_and_fetch((P), ~(1<<(V))) -/* Compile read-write barrier */ -#define barrier() asm volatile("": : :"memory") - -/* Pause instruction to prevent excess processor bus usage */ -#define cpu_relax() asm volatile("pause\n": : :"memory") - - - -void rwticket_wrlock(rwticket *l) -{ - unsigned me = atomic_xadd(&l->u, (1<<16)); - unsigned char val = me >> 16; - - while (val != l->s.write) cpu_relax(); -} - -int rwticket_wrunlock(rwticket *l) -{ - rwticket t = *l; - - barrier(); - - t.s.write++; - t.s.read++; - - *(unsigned short *) l = t.us; - return 0; -} - -int rwticket_wrtrylock(rwticket *l) -{ - unsigned cmp = l->u; - - unsigned me = cmp & 0xff;//l->s.users; - unsigned char menew = me + 1; - // unsigned read = (cmp & 0xffff) >> 8;//l->s.read << 8; - //unsigned cmp = (me << 16) + read + me; - unsigned cmpnew = (menew << 16) | (cmp & 0x0000ffff); //(menew << 16) + read + me; - - if (cmpxchg(&l->u, cmp, cmpnew) == cmp) return 0; - - return EBUSY; -} - -void rwticket_rdlock(rwticket *l) -{ - unsigned me = atomic_xadd(&l->u, (1<<16)); - unsigned char val = me >> 16; - - while (val != l->s.read) cpu_relax(); - l->s.read++; -} - -void rwticket_rdunlock(rwticket *l) -{ - atomic_inc(&l->s.write); -} - -int rwticket_rdtrylock(rwticket *l) -{ - assert(0); - /* XXX implement like wrtrylock */ - unsigned me = l->s.users; - unsigned write = l->s.write; - unsigned char menew = me + 1; - unsigned cmp = (me << 16) + (me << 8) + write; - unsigned cmpnew = ((unsigned) menew << 16) + (menew << 8) + write; - - if (cmpxchg(&l->u, cmp, cmpnew) == cmp) return 0; - - return EBUSY; -} diff --git a/c7/reader_writer_lock.h b/c7/reader_writer_lock.h deleted file mode 100644 --- a/c7/reader_writer_lock.h +++ /dev/null @@ -1,22 +0,0 @@ - -typedef union rwticket rwticket; -union rwticket -{ - unsigned u; - unsigned short us; - struct - { - unsigned char write; - unsigned char read; - unsigned char users; - } s; -}; - -void rwticket_wrlock(rwticket *l); -int rwticket_wrunlock(rwticket *l); -int rwticket_wrtrylock(rwticket *l); -void rwticket_rdlock(rwticket *l); -void rwticket_rdunlock(rwticket *l); -int rwticket_rdtrylock(rwticket *l); - - diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -3,7 +3,23 @@ #endif +static void setup_gcpage(void) +{ + largemalloc_init_arena(stm_object_pages + END_NURSERY_PAGE * 4096UL, + (NB_PAGES - END_NURSERY_PAGE) * 4096UL); +} + object_t *stm_allocate_prebuilt(ssize_t size_rounded_up) { abort(); } + +object_t *_stm_allocate_old(ssize_t size_rounded_up) +{ + char *addr = large_malloc(size_rounded_up); + memset(addr, 0, size_rounded_up); + + object_t* o = (object_t *)(addr - stm_object_pages); + o->stm_flags = GCFLAG_WRITE_BARRIER; + return o; +} diff --git a/c7/largemalloc.c b/c7/stm/largemalloc.c rename from c7/largemalloc.c rename to c7/stm/largemalloc.c --- a/c7/largemalloc.c +++ b/c7/stm/largemalloc.c @@ -1,3 +1,7 @@ +#ifndef _STM_CORE_H_ +# error "must be compiled via stmgc.c" +#endif + /* This contains a lot of inspiration from malloc() in the GNU C Library. More precisely, this is (a subset of) the part that handles large blocks, which in our case means at least 288 bytes. It is actually @@ -5,16 +9,6 @@ or medium-block support that are also present in the GNU C Library. */ -#include -#include -#include -#include -#include "largemalloc.h" -#include "pages.h" -#include "pagecopy.h" - -#define MMAP_LIMIT (1280*1024) - #define largebin_index(sz) \ (((sz) < (48 << 6)) ? ((sz) >> 6): /* 0 - 47 */ \ ((sz) < (24 << 9)) ? 42 + ((sz) >> 9): /* 48 - 65 */ \ @@ -82,7 +76,7 @@ list are some unsorted chunks. All unsorted chunks are after all sorted chunks. The flag 'FLAG_SORTED' distinguishes them. - Note that if the user always calls stm_large_malloc() with a large + Note that if the user always calls large_malloc() with a large enough argument, then the few bins corresponding to smaller values will never be sorted at all. They are still populated with the fragments of space between bigger allocations. @@ -90,62 +84,7 @@ static dlist_t largebins[N_BINS]; static mchunk_t *first_chunk, *last_chunk; -uint8_t alloc_lock = 0; -void _stm_chunk_pages(struct object_s *data, uintptr_t *start, uintptr_t *num) -{ - /* expects object_s in thread0-space */ - /* returns the start page and number of pages that the *payload* - spans over. the CHUNK_HEADER is not included in the calculations */ - mchunk_t *chunk = data2chunk((char*)data); - *start = (((char*)data) - get_thread_base(0)) / 4096UL; - assert(*start < NB_PAGES); - size_t offset_into_page = ((uintptr_t)data) & 4095UL; // % 4096 - *num = ((chunk->size & ~FLAG_SORTED) + offset_into_page + 4095) / 4096UL; - assert(*num < NB_PAGES); -} - -size_t _stm_data_size(struct object_s *data) -{ - if (data->stm_flags & GCFLAG_SMALL) - return stmcb_size(data); /* XXX: inefficient */ - - mchunk_t *chunk = data2chunk((char*)data); - return chunk->size & ~FLAG_SORTED; -} - -void _stm_move_object(object_t* obj, char *src, char *dst) -{ - /* XXX: should be thread-safe... */ - - /* only copies if page is PRIVATE - XXX: various optimizations for objects with - multiple pages. E.g. using pagecopy or - memcpy over multiple PRIVATE pages. */ - char *end = src + _stm_data_size((struct object_s*)REAL_ADDRESS(get_thread_base(0), obj)); - uintptr_t pagenum, num; - struct object_s *t0_obj = (struct object_s*)REAL_ADDRESS(get_thread_base(0), obj); - - if (obj->stm_flags & GCFLAG_SMALL) { - pagenum = (uintptr_t)obj / 4096UL; - num = 1; - } else { - _stm_chunk_pages(t0_obj, &pagenum, &num); - } - - while (src < end) { - size_t to_copy = 4096UL - ((uintptr_t)src & 4095UL); - if (to_copy > end - src) - to_copy = end - src; - if (stm_get_page_flag(pagenum) == PRIVATE_PAGE) { - memcpy(dst, src, to_copy); - } - - pagenum++; - src += to_copy; - dst += to_copy; - } -} static void insert_unsorted(mchunk_t *new) { @@ -231,11 +170,8 @@ really_sort_bin(index); } -object_t *stm_large_malloc(size_t request_size) +static char *large_malloc(size_t request_size) { - while (__sync_lock_test_and_set(&alloc_lock, 1)) - spin_loop(); - /* 'request_size' should already be a multiple of the word size here */ assert((request_size & (sizeof(char *)-1)) == 0); @@ -272,7 +208,7 @@ } /* not enough memory. */ - alloc_lock = 0; + fprintf(stderr, "not enough memory!\n"); abort(); return NULL; @@ -303,19 +239,12 @@ } mscan->size = request_size; mscan->prev_size = BOTH_CHUNKS_USED; - - alloc_lock = 0; - return (object_t *)(((char *)&mscan->d) - get_thread_base(0)); + + return (char *)&mscan->d; } -void stm_large_free(object_t *tldata) +static void large_free(char *data) { - assert(!(tldata->stm_flags & GCFLAG_SMALL)); - - while (__sync_lock_test_and_set(&alloc_lock, 1)) - spin_loop(); - - char *data = _stm_real_address(tldata); mchunk_t *chunk = data2chunk(data); assert((chunk->size & (sizeof(char *) - 1)) == 0); assert(chunk->prev_size != THIS_CHUNK_FREE); @@ -373,8 +302,6 @@ } insert_unsorted(chunk); - - alloc_lock = 0; } @@ -414,16 +341,13 @@ assert(data - 16 == (char *)last_chunk); } -char *_stm_largemalloc_data_start() -{ - return (char*)first_chunk; -} - -void stm_largemalloc_init(char *data_start, size_t data_size) +static void largemalloc_init_arena(char *data_start, size_t data_size) { int i; - for (i = 0; i < N_BINS; i++) - largebins[i].prev = largebins[i].next = &largebins[i]; + for (i = 0; i < N_BINS; i++) { + largebins[i].prev = &largebins[i]; + largebins[i].next = &largebins[i]; + } assert(data_size >= 2 * sizeof(struct malloc_chunk)); assert((data_size & 31) == 0); @@ -438,10 +362,8 @@ insert_unsorted(first_chunk); } -int stm_largemalloc_resize_arena(size_t new_size) +static int largemalloc_resize_arena(size_t new_size) { - /* XXX not thread-safe regarding all functions here... */ - assert(new_size >= 2 * sizeof(struct malloc_chunk)); assert((new_size & 31) == 0); @@ -490,7 +412,7 @@ assert(last_chunk == next_chunk_u(old_last_chunk)); /* then free the last_chunk (turn it from "used" to "free) */ - stm_large_free((object_t *)(((char *)&old_last_chunk->d) - get_thread_base(0))); + large_free((char *)&old_last_chunk->d); } return 1; } diff --git a/c7/largemalloc.h b/c7/stm/largemalloc.h rename from c7/largemalloc.h rename to c7/stm/largemalloc.h --- a/c7/largemalloc.h +++ b/c7/stm/largemalloc.h @@ -1,17 +1,11 @@ -#include -#include "core.h" -void stm_largemalloc_init(char *data_start, size_t data_size); -int stm_largemalloc_resize_arena(size_t new_size); +/* all addresses passed to this interface should be "char *" pointers + in the segment 0. */ +static void largemalloc_init_arena(char *data_start, size_t data_size); +static int largemalloc_resize_arena(size_t new_size) __attribute__((unused)); -object_t *stm_large_malloc(size_t request_size); -void stm_large_free(object_t *data); - -void _stm_large_dump(void); -char *_stm_largemalloc_data_start(void); - -void _stm_move_object(object_t *obj, char *src, char *dst); -size_t _stm_data_size(struct object_s *data); -void _stm_chunk_pages(struct object_s *data, uintptr_t *start, uintptr_t *num); - - +/* large_malloc() and large_free() are not thread-safe. This is + due to the fact that they should be mostly called during minor or + major collections, which have their own synchronization mecanisms. */ +static char *large_malloc(size_t request_size); +static void large_free(char *data) __attribute__((unused)); diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -42,8 +42,8 @@ bool _stm_in_nursery(object_t *obj) { - uint64_t p = (uint64_t)obj; - return (p - NURSERY_START) < NURSERY_SIZE; + assert((uintptr_t)obj >= NURSERY_START); + return (uintptr_t)obj < NURSERY_START + NURSERY_SIZE; } diff --git a/c7/stm/setup.c b/c7/stm/setup.c --- a/c7/stm/setup.c +++ b/c7/stm/setup.c @@ -61,6 +61,7 @@ setup_sync(); setup_nursery(); + setup_gcpage(); #if 0 stm_largemalloc_init(heap, HEAP_PAGES * 4096UL); diff --git a/c7/stmgc.c b/c7/stmgc.c --- a/c7/stmgc.c +++ b/c7/stmgc.c @@ -3,10 +3,12 @@ #include "stm/core.h" #include "stm/pages.h" #include "stm/sync.h" +#include "stm/largemalloc.h" #include "stm/misc.c" #include "stm/pages.c" #include "stm/gcpage.c" +#include "stm/largemalloc.c" #include "stm/nursery.c" #include "stm/sync.c" #include "stm/setup.c" diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -73,6 +73,8 @@ char *_stm_real_address(object_t *o); object_t *_stm_segment_address(char *ptr); void _stm_test_switch(stm_thread_local_t *tl); +object_t *_stm_allocate_old(ssize_t size_rounded_up); +void _stm_large_dump(void); #endif #define _STM_GCFLAG_WRITE_BARRIER 0x01 diff --git a/c7/stmsync.c b/c7/stmsync.c deleted file mode 100644 --- a/c7/stmsync.c +++ /dev/null @@ -1,306 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include -#include - -#include "stmsync.h" -#include "core.h" -#include "reader_writer_lock.h" -#include "list.h" - -#define INVALID_GS_VALUE 0x6D6D6D6D - -/* a multi-reader, single-writer lock: transactions normally take a reader - lock, so don't conflict with each other; when we need to do a global GC, - we take a writer lock to "stop the world". */ - -rwticket rw_shared_lock __attribute__((aligned(64))); /* the "GIL" */ -rwticket rw_collection_lock __attribute__((aligned(64))); /* for major collections */ - -sem_t static_thread_semaphore __attribute__((aligned(64))); -uint8_t static_threads[NB_THREADS]; /* 1 if running a pthread */ -__thread struct _thread_local1_s *pthread_tl = NULL; - - - - -void _stm_acquire_tl_segment(); -void _stm_release_tl_segment(); - -static void set_gs_register(uint64_t value) -{ - int result = syscall(SYS_arch_prctl, ARCH_SET_GS, value); - assert(result == 0); -} - -bool _stm_is_in_transaction(void) -{ - return pthread_tl->active; -} - - -char* _stm_restore_local_state(int thread_num) -{ - if (thread_num == -1) { /* mostly for debugging */ - set_gs_register(INVALID_GS_VALUE); - return (char*)1; - } - - char *thread_base = get_thread_base(thread_num); - set_gs_register((uintptr_t)thread_base); - - assert(_STM_TL->thread_num == thread_num); - assert(_STM_TL->thread_base == thread_base); - return thread_base; -} - - -void _stm_yield_thread_segment() -{ - _stm_release_tl_segment(); - - /* release our static thread: */ - static_threads[_STM_TL->thread_num] = 0; - sem_post(&static_thread_semaphore); - - assert(_stm_restore_local_state(-1)); /* invalid */ -} - -void _stm_grab_thread_segment() -{ - /* acquire a static thread: */ - sem_wait(&static_thread_semaphore); - int thread_num = 0; - while (1) { - if (!__sync_lock_test_and_set(&static_threads[thread_num], 1)) - break; - thread_num = (thread_num + 1) % NB_THREADS; - } - - _stm_restore_local_state(thread_num); - _stm_acquire_tl_segment(); -} - - -void _stm_assert_clean_tl() -{ - /* between a pthread switch, these are the things - that must be guaranteed */ - - /* already set are - thread_num, thread_base: to the current static thread - nursery_current: nursery should be cleared - active, need_abort: no transaction running - modified_objects: empty - alloc: re-usable by this thread - uncommitted_objects: empty - old_objects_to_trace: empty - !!shadow_stack...: still belongs to previous thread - */ - assert(stm_list_is_empty(_STM_TL->modified_objects)); - assert(stm_list_is_empty(_STM_TL->uncommitted_objects)); - assert(stm_list_is_empty(_STM_TL->old_objects_to_trace)); - - assert(!_STM_TL->active); - /* assert(!_STM_TL->need_abort); may happen, but will be cleared by - start_transaction() */ - assert(NURSERY_CURRENT(_STM_TL) == (localchar_t*)(FIRST_NURSERY_PAGE * 4096)); -} - -void _stm_acquire_tl_segment() -{ - /* makes tl-segment ours! */ - _stm_assert_clean_tl(); - - _STM_TL->shadow_stack = pthread_tl->shadow_stack; - _STM_TL->shadow_stack_base = pthread_tl->shadow_stack_base; - _STM_TL->old_shadow_stack = pthread_tl->old_shadow_stack; -} - -void _stm_release_tl_segment() -{ - /* makes tl-segment ours! */ - _stm_assert_clean_tl(); - - pthread_tl->shadow_stack = _STM_TL->shadow_stack; - pthread_tl->shadow_stack_base = _STM_TL->shadow_stack_base; - pthread_tl->old_shadow_stack = _STM_TL->old_shadow_stack; -} - -void stm_setup_pthread(void) -{ - struct _thread_local1_s* tl = malloc(sizeof(struct _thread_local1_s)); - assert(!pthread_tl); - pthread_tl = tl; - - /* get us a clean thread segment */ - _stm_grab_thread_segment(); - _stm_assert_clean_tl(); - - /* allocate shadow stack for this thread */ - _STM_TL->shadow_stack = (object_t**)malloc(LENGTH_SHADOW_STACK * sizeof(void*)); - _STM_TL->shadow_stack_base = _STM_TL->shadow_stack; - - /* copy everything from _STM_TL */ - memcpy(tl, REAL_ADDRESS(get_thread_base(_STM_TL->thread_num), _STM_TL), - sizeof(struct _thread_local1_s)); - - /* go into safe-point again: */ - _stm_yield_thread_segment(); -} - - -void stm_teardown_pthread(void) -{ - free(pthread_tl->shadow_stack_base); - - free(pthread_tl); - pthread_tl = NULL; -} - - - - - -void _stm_reset_shared_lock() -{ - assert(!rwticket_wrtrylock(&rw_shared_lock)); - assert(!rwticket_wrunlock(&rw_shared_lock)); - - memset(&rw_shared_lock, 0, sizeof(rwticket)); - - assert(!rwticket_wrtrylock(&rw_collection_lock)); - assert(!rwticket_wrunlock(&rw_collection_lock)); - - memset(&rw_collection_lock, 0, sizeof(rwticket)); - - int i; - for (i = 0; i < NB_THREADS; i++) - assert(static_threads[i] == 0); - memset(static_threads, 0, sizeof(static_threads)); - sem_init(&static_thread_semaphore, 0, NB_THREADS); -} - -/* void stm_acquire_collection_lock() */ -/* { */ -/* /\* we must have the exclusive lock here and */ -/* not the colletion lock!! *\/ */ -/* /\* XXX: for more than 2 threads, need a way */ -/* to signal other threads with need_major_collect */ -/* so that they don't leave COLLECT-safe-points */ -/* when this flag is set. Otherwise we simply */ -/* wait arbitrarily long until all threads reach */ -/* COLLECT-safe-points by chance at the same time. *\/ */ -/* while (1) { */ -/* if (!rwticket_wrtrylock(&rw_collection_lock)) */ -/* break; /\* acquired! *\/ */ - -/* stm_stop_exclusive_lock(); */ -/* usleep(1); */ -/* stm_start_exclusive_lock(); */ -/* if (_STM_TL->need_abort) { */ -/* stm_stop_exclusive_lock(); */ -/* stm_start_shared_lock(); */ -/* stm_abort_transaction(); */ -/* } */ -/* } */ -/* } */ - -void stm_start_shared_lock(void) -{ - rwticket_rdlock(&rw_shared_lock); -} - -void stm_stop_shared_lock() -{ - rwticket_rdunlock(&rw_shared_lock); -} - -void stm_start_exclusive_lock(void) -{ - rwticket_wrlock(&rw_shared_lock); -} - -void stm_stop_exclusive_lock(void) -{ - rwticket_wrunlock(&rw_shared_lock); -} - -/* _stm_start_safe_point(LOCK_EXCLUSIVE|LOCK_COLLECT) - -> release the exclusive lock and also the collect-read-lock - - THREAD_YIELD: gives up its (current thread's) GS segment - so that other threads can grab it and run. This will - make _STM_TL and all thread-local addresses unusable - for the current thread. (requires LOCK_COLLECT) -*/ -void _stm_start_safe_point(uint8_t flags) -{ - assert(IMPLY(flags & THREAD_YIELD, flags & LOCK_COLLECT)); - - if (flags & LOCK_EXCLUSIVE) - stm_stop_exclusive_lock(); - else - stm_stop_shared_lock(); - - if (flags & LOCK_COLLECT) { - rwticket_rdunlock(&rw_collection_lock); - - if (flags & THREAD_YIELD) { - _stm_yield_thread_segment(); - } - } -} - -/* - _stm_stop_safe_point(LOCK_COLLECT|LOCK_EXCLUSIVE); - -> reacquire the collect-read-lock and the exclusive lock - - THREAD_YIELD: wait until we get a GS segment assigned - and then continue (requires LOCK_COLLECT) - */ -void _stm_stop_safe_point(uint8_t flags) -{ - assert(IMPLY(flags & THREAD_YIELD, flags & LOCK_COLLECT)); - if (flags & THREAD_YIELD) { - _stm_grab_thread_segment(); - } - - if (flags & LOCK_EXCLUSIVE) { - stm_request_safe_point(1 - _STM_TL->thread_num); - stm_start_exclusive_lock(); - } else { - stm_start_shared_lock(); - } - - if (flags & LOCK_COLLECT) { /* if we released the collection lock */ - /* acquire read-collection. always succeeds because - if there was a write-collection holder we would - also not have gotten the shared_lock */ - rwticket_rdlock(&rw_collection_lock); - } - - if (_STM_TL->active && _STM_TL->need_abort) { - if (flags & LOCK_EXCLUSIVE) { - /* restore to shared-mode with the collection lock */ - stm_stop_exclusive_lock(); - stm_start_shared_lock(); - stm_abort_transaction(); - } else { - stm_abort_transaction(); - } - } -} - - - -void stm_request_safe_point(int thread_num) -{ - struct _thread_local1_s* other_tl = _stm_dbg_get_tl(thread_num); - other_tl->nursery_current_halfwords[LENDIAN] = 1; -} - diff --git a/c7/stmsync.h b/c7/stmsync.h deleted file mode 100644 --- a/c7/stmsync.h +++ /dev/null @@ -1,30 +0,0 @@ - -#include - -void stm_start_shared_lock(void); -void stm_stop_shared_lock(void); -void stm_stop_exclusive_lock(void); -void stm_start_exclusive_lock(void); -void _stm_start_safe_point(uint8_t flags); -void _stm_stop_safe_point(uint8_t flags); -void _stm_reset_shared_lock(void); -void _stm_grab_thread_segment(void); -void _stm_yield_thread_segment(void); - -enum { - LOCK_COLLECT = (1 << 0), - LOCK_EXCLUSIVE = (1 << 1), - THREAD_YIELD = (1 << 2), -}; - - -void stm_request_safe_point(int thread_num); - - -#define NURSERY_CURRENT(tls) \ - ((localchar_t *)(uintptr_t)( \ - (tls)->nursery_current_halfwords[1-LENDIAN])) - -#define SET_NURSERY_CURRENT(tls, new_value) \ - ((tls)->nursery_current_halfwords[1-LENDIAN] = \ - (uintptr_t)(new_value)) diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -42,6 +42,7 @@ /*void stm_write(object_t *obj); use _checked_stm_write() instead */ object_t *stm_allocate(ssize_t size_rounded_up); object_t *stm_allocate_prebuilt(ssize_t size_rounded_up); +object_t *_stm_allocate_old(ssize_t size_rounded_up); void stm_setup(void); void stm_teardown(void); From noreply at buildbot.pypy.org Mon Feb 10 21:29:42 2014 From: noreply at buildbot.pypy.org (mattip) Date: Mon, 10 Feb 2014 21:29:42 +0100 (CET) Subject: [pypy-commit] pypy default: random test fixes for windows Message-ID: <20140210202942.AA3141C35DE@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: Changeset: r69119:5a5b32cdebb3 Date: 2014-02-10 22:28 +0200 http://bitbucket.org/pypy/pypy/changeset/5a5b32cdebb3/ Log: random test fixes for windows diff --git a/rpython/translator/c/test/test_extfunc.py b/rpython/translator/c/test/test_extfunc.py --- a/rpython/translator/c/test/test_extfunc.py +++ b/rpython/translator/c/test/test_extfunc.py @@ -65,7 +65,8 @@ f1 = compile(does_stuff, []) f1() - assert open(filename, 'r').read() == "hello world\n" + with open(filename, 'r') as fid: + assert fid.read() == "hello world\n" os.unlink(filename) def test_big_read(): @@ -296,8 +297,10 @@ os.chdir(path) return os.getcwd() f1 = compile(does_stuff, [str]) - # different on windows please - assert f1('/tmp') == os.path.realpath('/tmp') + if os.name == 'nt': + assert f1(os.environment['TEMP']) == os.path.realpath(os.environment['TEMP']) + else: + assert f1('/tmp') == os.path.realpath('/tmp') def test_mkdir_rmdir(): def does_stuff(path, delete): diff --git a/rpython/translator/c/test/test_newgc.py b/rpython/translator/c/test/test_newgc.py --- a/rpython/translator/c/test/test_newgc.py +++ b/rpython/translator/c/test/test_newgc.py @@ -658,7 +658,8 @@ def test_open_read_write_seek_close(self): self.run('open_read_write_seek_close') - assert open(self.filename, 'r').read() == "hello world\n" + with open(self.filename, 'r') as fid: + assert fid.read() == "hello world\n" os.unlink(self.filename) def define_callback_with_collect(cls): From noreply at buildbot.pypy.org Tue Feb 11 10:18:41 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 11 Feb 2014 10:18:41 +0100 (CET) Subject: [pypy-commit] pypy.org extradoc: Add the (very lightweight) stanford talk here, because we cannot point Message-ID: <20140211091841.9F3221C03D5@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r475:842edcb84f59 Date: 2014-02-11 10:18 +0100 http://bitbucket.org/pypy/pypy.org/changeset/842edcb84f59/ Log: Add the (very lightweight) stanford talk here, because we cannot point people directly to the bitbucket --- it's an HTML file containing slides. diff too long, truncating to 2000 out of 2750 lines diff --git a/talk/stanford-ee380-2011/abstract.txt b/talk/stanford-ee380-2011/abstract.txt new file mode 100644 --- /dev/null +++ b/talk/stanford-ee380-2011/abstract.txt @@ -0,0 +1,48 @@ +--------------------------------- +Python in Python: the PyPy system +--------------------------------- + +PyPy is a complete Python implementation in Python, in the old +tradition of Squeak and Scheme48 --- but there is more to PyPy +than just this. + +During this talk I will describe what PyPy is: a mature, +8 year old project of roughly 200K lines of code and 150K lines +of tests, implementing the full Python language. I will show our +results: faster execution of most programs (by a factor between +1.5x and 20x), and smaller total memory usage for large programs. + +I will then focus on the architecture of PyPy. On the one hand, +we have written a straightforward interpreter for the Python +language, using a (large) subset of Python called RPython. On +the other hand, we have a complex translation toolchain which is +able to compile interpreters from RPython to efficient C code. +(We also have experimental backends for producing JVM and .NET +code.) + +There are two distinct benefits from keeping the interpreter and +the translation toolchain separate. On the one hand, we keep our +interpreter simple, and we can easily write interpreters for +other languages. We have a complete Prolog interpreter and have +at least played with versions for Smalltalk and JavaScript. On +the other hand, the fact that our source interpreter does not +contain any architectural choices makes for unprecedented +flexibility. Our toolchain "weaves" into the final executable +various aspects including the object memory layout, choice of garbage +collection (GC), of execution model (regular vs. "Stackless"), +choice of backend (C/JVM/.NET), and even the +Just-in-Time Compiler (JIT). There are great practical benefits +to this. For example, CPython's GC is stuck with using reference +counting, while we offer a number of choices. + +I will explain how this is done, and describe in more details the +JIT Compiler. It is a "tracing JIT", as pioneered in recent +years for Java and JavaScript. However, it is also a "meta JIT": +it works for any language, by tracing at the level of the +language's interpreter. In other words, from an interpreter for +any language, we produce quasi-automatically a JIT suited to this +language. + +I will conclude by comparing PyPy to other projects, old and new: +Squeak, CPython, Jython and IronPython, the Jikes RVM, as well as +the various recent tracing JITs such as TraceMonkey. diff --git a/talk/stanford-ee380-2011/speed.png b/talk/stanford-ee380-2011/speed.png new file mode 100644 index 0000000000000000000000000000000000000000..f43135cd4f4e26589d57eb85bea7ec42cbc5220e GIT binary patch [cut] diff --git a/talk/stanford-ee380-2011/talk.html b/talk/stanford-ee380-2011/talk.html new file mode 100644 --- /dev/null +++ b/talk/stanford-ee380-2011/talk.html @@ -0,0 +1,952 @@ + + + + + + + +Python in Python: the PyPy system + + + + + + + + + + + + + + +
+
+
+ + +
+
+
+

Python in Python: the PyPy system

+ + + + + + + + +
+

Armin Rigo

+
    +
  • Heinrich-Heine Universität, Germany
  • +
  • Open End AB, Sweden
  • +
+

March 2011

+
+ +
+
+

What is Python

+
+
+

What is Python

+
+class Foo(object):
+
+    def __init__(self, value):
+        self.value = value
+
+    def double(self):
+        return Foo(self.value * 2)
+
+print Foo(42).double().value
+print Foo("hello").double().value
+
+
+
+

In two points

+
    +
  • Strongly, trivially, dynamically typed language
  • +
  • Ints, floats, longs, string, unicode, +lists, tuples, dicts, iterators, +functions, classes...
  • +
+
+
+

Python likes its dictionaries

+
+d = {}
+for i in [1, 2, 3, 4]:
+    d[i] = i*i
+print d
+
+
    +
  • in this example, we get {1:1, 2:4, 3:9, 4:16}
  • +
+
+
+

Python is not Java

+
+for name in ["add", "sub", "mul"]:
+    def f(x, y):
+        ...
+    globals()[name] = f
+
+
+
+

Python is complicated

+

How a + b works (simplified!):

+
    +
  • look up the method __add__ on the type of a
  • +
  • if there is one, call it
  • +
  • if it returns NotImplemented, or if there is none, +look up the method __radd__ on the type of b
  • +
  • if there is one, call it
  • +
  • if there is none, or we get NotImplemented again, +raise an exception TypeError
  • +
+
+
+

Python is a mess

+

How obj.attr or obj.method() works:

+
    +
  • ...
  • +
  • no way to write it down in just one slide
  • +
+
+
+

What this talk is about

+
    +
  • The PyPy project: a framework in which to write interpreters for +this kind of language
  • +
  • "Python in Python" -- roughly
  • +
  • From the user's point of view (i.e. the programmer in Python), +PyPy is very similar to CPython.
  • +
+
+
+

CPython and PyPy

+
+
+

CPython and PyPy

+
    +
  • Two implementations
  • +
  • Two interpreters
  • +
  • CPython is written in C, PyPy is written in Python
  • +
  • PyPy tries to be equivalent to CPython
  • +
+
+
+

...and Jython and IronPython

+
    +
  • Jython: Python for the Java VM
  • +
  • IronPython: Python for .NET
  • +
  • Both try to integrate well with their VM
  • +
+
+
+

What is PyPy

+
    +
  • A project started in 2003
  • +
  • An Open Source effort of volunteers
  • +
  • With some funding support: 2 years from the European Union (2005-2007), +and now from Germany and Sweden (2010-2011).
  • +
+
+
+

What is PyPy

+
    +
  • Test-driven development
  • +
  • Now contains about 200 KLoC, and 150 KLoc of tests
  • +
+
+
+

A bit of history

+
    +
  • Squeak and Scheme48 are also interpreters written in themselves
  • +
  • Or more precisely, like PyPy, a subset of themselves
  • +
  • But in PyPy, the subset is at a higher level
  • +
  • General rule: every aspect that is independent from the high-level +description of the interpreter is left out of it
  • +
+
+
+

What is the point of PyPy?

+
    +
  • CPython is older, it's the "official" version
  • +
  • PyPy is just a replacement, so why?
  • +
  • Moreover PyPy is not quite complete (e.g. C extension +modules are only partially supported)
  • +
+
+
+

Speed

+
    +
  • First answer: PyPy is faster, and may use less memory
  • +
  • ...or at least, it is "often" the case
  • +
+
+ +
+

And (optionally) extra features

+
    +
  • "Stackless"
  • +
  • Non-Python interpreters
  • +
  • and many smaller experiments
  • +
  • it is a better experimentation platform than CPython
  • +
+
+
+

Multi-threading

+
    +
  • Bad support on CPython (GIL)
  • +
  • PyPy has no answer to this question (there is also a GIL)
  • +
+
+
+

Architecture

+
+
+

Architecture

+

PyPy has two parts:

+
    +
  • A Python interpreter, written in RPython
  • +
  • A compilation toolchain -- the "translator" -- that translates +RPython code into C code (mainly)
  • +
+
+
+

PyPy's Python interpreter

+
    +
  • A priori similar to CPython, but written in RPython.
  • +
  • RPython is also valid Python: we test extensively by running +it on top of CPython
  • +
  • See demo (py.py)
  • +
+
+
+

The translation toolchain

+
    +
  • Takes a program written in RPython, a custom subset of Python
  • +
  • Outputs the "same" program written in C
  • +
  • See demo
  • +
+
+
+

RPython is still mostly Python

+
    +
  • Completely valid Python (can be tested directly)
  • +
  • Can use lists, dicts, tuples, classes and instances, and so on, +but it must be type-safe
  • +
  • Contains no garbage collection detail (Py_INCREF/Py_DECREF in CPython)
  • +
  • Really a subset of Python: roughly "how a Java programmer writes his +first Python program"
  • +
  • ...well, plus tons of tricks :-)
  • +
+
+
+

RPython meta-programming

+
    +
  • RPython is actually only a restriction on the code after being imported, +so we can build up everything in (normal) full Python:

    +
    +for name in ["add", "sub", "mul"]:
    +    def f(x, y):
    +        ...
    +    globals()[name] = f
    +
    +
  • +
  • here, the code in f() is RPython, but the loop around it is not.

    +
  • +
+
+
+

Architecture: the interpreter

+
+
+

Overview of the interpreter

+
    +
  • A compiler that produces a custom bytecode format
  • +
  • An interpreter for this bytecode
  • +
  • A large library of object types (the "object space")
  • +
  • A collection of extension modules
  • +
+
+
+

The bytecode interpreter

+
    +
  • A straightforward, recursive interpreter
  • +
  • Stack-based
  • +
  • Every call to a Python function makes a frame object
  • +
  • Then the interpreter is written as methods on this frame object
  • +
+
+
+

The object space

+
    +
  • Implements all the built-in types
  • +
  • Structure more flexible than CPython's family of C functions
  • +
  • Very open to experimentation
  • +
+
+
+

Separation of levels

+
    +
  • Important: all objects that appear in the interpreted program are, +in the interpreter, instances of W_XxxObject.
  • +
  • Again, similar to CPython: an object in Python is implemented, +in the interpreter, as a C structure PyXxxObject.
  • +
+
+
+

Example: smalllong

+
    +
  • Standard Python types: int (32/64-bit) and long +(integer of unlimited size)
  • +
  • In CPython, the type is directly linked to its (single) implementation in C. +In PyPy, it is not.
  • +
  • So we could easily add an implementation W_SmallLongObject for +integers that happen to fit in 64 bits
  • +
  • And there is also W_LongObject for the general case
  • +
+
+
+

Example: smallint

+
    +
  • Tagged integers, common in interpreters (but not in CPython)
  • +
  • Idea, in C terms: take the integer objects whose value fits in 31/63 +bits, and encode them as odd-valued pseudo-pointers, instead of +pointers to separately-allocated integer objects
  • +
  • We did it in PyPy, but it's disabled now because it does not give +the expected performance gain
  • +
+
+
+

Example: multidict

+
    +
  • Similarly, we have several implementations of dict
  • +
  • For the different typical usage patterns of dicts in Python
  • +
  • E.g. module dicts (containing all global names of a module), +class dicts, instance dicts, user dicts (typically containing +non-string keys)
  • +
+
+
+

Example: mapdict

+
    +
  • An instance in Python uses a dictionary to store attributes:

    +
    +>>> x = MyClass()
    +>>> x.a = 5
    +>>> x.__dict__
    +{'a': 5}
    +>>> x.__dict__ = {'b': 6}
    +>>> x.b
    +6
    +
    +
  • +
+
+
+

Example: mapdict

+
    +
  • An instance is thus two objects: a dict and a wrapper around it
  • +
  • Requires a lot of memory
  • +
  • This is different than Java, Smalltalk or C++, where the class +enforces the exact set of attributes of its instances
  • +
  • But it is like Self and JavaScript
  • +
+
+
+

Maps

+
    +
  • We can reuse the technique introduced in Self: "maps"
  • +
  • The JavaScript engine V8 also uses them, calling them "hidden classes"
  • +
  • Idea: it is likely that a lot of instances of a given class will +have the same set of attributes
  • +
  • So we split the attributes into a per-instance part (just an array of +field values) and a shared part (giving the attribute names, and their +indices in the arrays of the individual instances).
  • +
+
+
+

Architecture: the translation toolchain

+
+
+

Overview

+
    +
  • "Translation toolchain": statically compiles RPython code
  • +
  • Produces C code (or JVM or .NET code, experimentally)
  • +
  • Every aspect that is independent from the high-level +description of the interpreter is left out of RPython
  • +
  • Instead, they are added during translation
  • +
  • PyPy = hybrid "research base" + "production-ready"
  • +
+
+
+

Translation overview (1)

+
    +
  • Start with the live RPython program
  • +
  • Build the Control Flow Graphs (CFGs) of the functions
  • +
  • Perform global type inference
  • +
  • We get a type-annotated version of the CFGs
  • +
  • Demo
  • +
+
+
+

Translation overview (2)

+
    +
  • "Lower" the level of the CFGs: transform their Python-like operations +into C-like operations
  • +
  • Do a number of additional transformations to insert the selected "aspects"
  • +
  • Generate C code from the low-level CFGs
  • +
+
+
+

Various aspects

+
    +
  • The object model, e.g. how to turn RPython classes and instances +to C structs
  • +
  • Garbage collection
  • +
  • Execution model: regular or stackless
  • +
  • Just-in-Time compiler
  • +
+
+
+

The object model

+
    +
  • Called "RTyping" internally
  • +
  • Can target "lltype" or "ootype"
  • +
  • "lltype" = low-level types = C-like structs and arrays
  • +
  • "ootype" = object-oriented types, for JVM or .NET
  • +
+
+
+

The execution model

+
    +
  • Optionally do a "stackless transformation"
  • +
  • We get microthread capabilities (soft threads)
  • +
  • Even if the source code of the interpreter is just recursive
  • +
+
+
+

Garbage collection

+
+
+

Purpose

+
    +
  • RPython assumes automatic memory management, like Python
  • +
  • But of course C code does not
  • +
  • We can use the Boehm GC, but it is far too slow
  • +
  • Remember that our GC needs to support both allocating Python-visible +objects and internal objects of the interpreter (lists, instances...)
  • +
+
+
+

Overview

+
    +
  • We wrote our own GCs, and each alloc operation in the CFGs is replaced +with a call to the GC
  • +
  • Handles finding and freeing unused memory
  • +
  • The GC is written in RPython, too
  • +
  • Analyzed like the rest of the program during translation
  • +
  • This approach allows testing at all levels
  • +
+
+
+

The GCs we have written

+
    +
  • Currently used: "minimark", a generational GC with one young generation +and using mark-and-sweep for the old generation
  • +
  • Previously: a hybrid collector using generational semi-space collection +and mark-and-sweep for the oldest generation (too complicated)
  • +
  • Pretty standard, non-concurrent, non-thread-safe collectors
  • +
+
+
+

Old experiments

+
    +
  • Reference counting (like CPython)... Does not work well.
  • +
  • Mark-and-sweep, a fully non-moving collector
  • +
  • Mark-and-compact, a fully compacting, generationless collector, +similar to Squeak.
  • +
  • Lesson learned: using a generational collector is essential for +dynamic languages like Python
  • +
+
+
+

GC transformer

+
    +
  • Inserting a GC in a program being translated is handled by the "GC +transformer"
  • +
  • Easy to customize, no fixed API
  • +
+
+
+

API example (minimark GC)

+
    +
  • The GC provides functions like "malloc"
  • +
  • Plus a number of others: hash, identity_hash, weakref support, +finalizer support
  • +
  • The GC transformer inserts tables describing the structure of +RPython objects: sizes, location of further references, etc.
  • +
+
+
+

Finding the stack roots

+
    +
  • The hard part: finding all pointers to GC objects from local variables +in the C stack
  • +
  • ANSI C solution: all pointers are copied to and from some custom stack
  • +
  • Not-ANSI-C-at-all: parse the assembler produced by GCC to build tables
  • +
+
+
+

Just-in-Time Compiler

+
+
+

Goal

+
    +
  • Speed up the interpreter written in RPython
  • +
  • Independent of the language that is being interpreted
  • +
  • Let us call it the P-interpreter (P = Python or other)
  • +
+
+
+

What is a JIT

+
    +
  • A JIT selects pieces of the user program (in language P) that would benefit +from compilation instead of interpretation
  • +
  • A "method JIT" selects individual P functions and compiles them, +possibly doing some inlining to improve performance (HotSpot, Psyco)
  • +
  • A "tracing JIT" selects individual code paths from loops and compiles +them, inlining aggressively (TraceMonkey, PyPy)
  • +
+
+
+

Tracing

+
    +
  • Run the user program, and do some lightweight profiling of loops
  • +
  • When a loop is run often enough, enter "Tracing Mode"
  • +
  • Run one more iteration of the loop in this mode
  • +
  • In addition to actually running the next iteration, it records a "trace"
  • +
+
+
+

Tracing (2)

+
    +
  • The trace is then turned into a machine code loop, and directly executed
  • +
  • Runs all the further iterations of the loop
  • +
+
+
+

Tracing (3)

+
    +
  • The machine code contains "guards" checking that all conditions met +during tracing are still valid
  • +
  • When a guard fails (latest: at the end of the loop), we fall back to +the regular P-interpreter
  • +
+
+
+

Meta-Tracing in PyPy

+
    +
  • The explanation above assumes a tracing JIT for the full Python +language
  • +
  • Would need to be maintained whenever we change the Python version we +support
  • +
  • Instead, we have a "meta-tracing JIT"
  • +
  • We trace the P-interpreter's main loop (running N times) interpreting +a P loop (running once)
  • +
+
+
+

Demo

+
+
+

Architecture of the PyPy JIT

+
    +
  • In advance, turn the CFGs of the P-interpreter into some bytecode +representation called "jitcode"
  • +
  • Uses some hints provided by the P-interpreter author (but not many)
  • +
  • "Links" into the P-interpreter's bytecode dispatch loop
  • +
  • In this way we add lightweight profiling code
  • +
+
+
+

Meta-Tracing

+
    +
  • When thresholds are reached, we start tracing
  • +
  • Tracing is done by running the "jitcodes" in a custom interpreter, +and recording a trace of all operations performed
  • +
  • Tracing is slow (double interpretation) but only runs for one iteration +of the loop
  • +
+
+
+

Optimization

+
    +
  • Advanced optimizations of the trace: escaping analysis, integer bounds, +store sinking, string handling, FFI calls, unrolling, virtualrefs...
  • +
+
+
+

Machine Code Backend

+
    +
  • Turns a trace into machine code
  • +
  • Simple register allocation (linear code)
  • +
  • x86, x86-64, (ARM)
  • +
  • Guards compiled as conditional jumps to code that restores the full state
  • +
+
+
+

Blackhole interpreter

+
    +
  • When a guard fails, we need to go back to the regular P-interpreter
  • +
  • Cannot easily re-enter the P-interpreter from anywhere, because it +is just C code
  • +
  • Instead we use one more interpreter, the "blackhole interpreter".
  • +
+
+
+

Bridges

+
    +
  • When a guard fails often enough, run again the JIT from there
  • +
  • Meta-trace, optimize, generate machine code, run it
  • +
  • Such extra traces are called "bridges" instead of "loops"
  • +
  • In practice, most loops end up needing some number of bridges
  • +
  • We get "trees" of machine code
  • +
+
+
+

More topics

+
    +
  • Loops, bridges and "preamble loops"
  • +
  • Virtualizables
  • +
  • GC integration
  • +
  • Memory management of machine code
  • +
  • ...
  • +
+
+
+

Conclusion

+
+
+

Conclusion

+ +
+
+ + diff --git a/talk/stanford-ee380-2011/talk.txt b/talk/stanford-ee380-2011/talk.txt new file mode 100644 --- /dev/null +++ b/talk/stanford-ee380-2011/talk.txt @@ -0,0 +1,771 @@ +.. include:: + +========================================================= +Python in Python: the PyPy system +========================================================= + + +.. admonition:: Armin Rigo + + - *Heinrich-Heine Universit�t, Germany* + - *Open End AB, Sweden* + + March 2011 + + + + +What is Python +--------------------------------------------------------- + + +What is Python +--------------------- + +:: + + class Foo(object): + + def __init__(self, value): + self.value = value + + def double(self): + return Foo(self.value * 2) + + print Foo(42).double().value + print Foo("hello").double().value + + +In two points +------------- + +* Strongly, trivially, dynamically typed language + +* Ints, floats, longs, string, unicode, + lists, tuples, dicts, iterators, + functions, classes... + + +Python likes its dictionaries +----------------------------- + +:: + + d = {} + for i in [1, 2, 3, 4]: + d[i] = i*i + print d + +* in this example, we get ``{1:1, 2:4, 3:9, 4:16}`` + + +Python is not Java +------------------ + +:: + + for name in ["add", "sub", "mul"]: + def f(x, y): + ... + globals()[name] = f + + +Python is complicated +--------------------- + +How ``a + b`` works (simplified!): + +* look up the method __add__ on the type of a + +* if there is one, call it + +* if it returns NotImplemented, or if there is none, + look up the method __radd__ on the type of b + +* if there is one, call it + +* if there is none, or we get NotImplemented again, + raise an exception TypeError + + +Python is a mess +---------------- + +How ``obj.attr`` or ``obj.method()`` works: + +* ... + +* no way to write it down in just one slide + + +What this talk is about +----------------------- + +* The PyPy project: a framework in which to write interpreters for + this kind of language + +* "Python in Python" -- roughly + +* From the user's point of view (i.e. the programmer in Python), + PyPy is very similar to CPython. + + + + +CPython and PyPy +-------------------------------------------------------------------- + + +CPython and PyPy +---------------- + +* Two implementations + +* Two interpreters + +* CPython is written in C, PyPy is written in Python + +* PyPy tries to be equivalent to CPython + + +...and Jython and IronPython +---------------------------- + +* Jython: Python for the Java VM + +* IronPython: Python for .NET + +* Both try to integrate well with their VM + + +What is PyPy +------------ + +* A project started in 2003 + +* An Open Source effort of volunteers + +* With some funding support: 2 years from the European Union (2005-2007), + and now from Germany and Sweden (2010-2011). + + +What is PyPy +------------ + +* Test-driven development + +* Now contains about 200 KLoC, and 150 KLoc of tests + + +A bit of history +---------------- + +* Squeak and Scheme48 are also interpreters written in themselves + +* Or more precisely, like PyPy, a subset of themselves + +* But in PyPy, the subset is at a higher level + +* General rule: *every aspect that is independent from the high-level + description of the interpreter is left out of it* + + +What is the point of PyPy? +-------------------------- + +* CPython is older, it's the "official" version + +* PyPy is just a replacement, so why? + +* Moreover PyPy is not quite complete (e.g. C extension + modules are only partially supported) + + +Speed +----- + +* First answer: PyPy is faster, and may use less memory + +* ...or at least, it is "often" the case + + +http://speed.pypy.org/ +---------------------- + +.. image:: speed.png + + +And (optionally) extra features +------------------------------- + +* "Stackless" + +* Non-Python interpreters + +* and many smaller experiments + +* it is a better experimentation platform than CPython + + +Multi-threading +--------------- + +* Bad support on CPython (GIL) + +* PyPy has no answer to this question (there is also a GIL) + + + + +Architecture +------------------------------------------------------------------------ + + +Architecture +------------ + +PyPy has two parts: + +* A Python interpreter, written in *RPython* + +* A compilation toolchain -- the "translator" -- that translates + RPython code into C code (mainly) + + +PyPy's Python interpreter +------------------------- + +* A priori similar to CPython, but written in RPython. + +* RPython is also valid Python: we test extensively by running + it on top of CPython + +* See demo (py.py) + + +The translation toolchain +------------------------- + +* Takes a program written in RPython, a custom subset of Python + +* Outputs the "same" program written in C + +* See demo + + +RPython is still mostly Python +------------------------------ + +* Completely valid Python (can be tested directly) + +* Can use lists, dicts, tuples, classes and instances, and so on, + but it must be type-safe + +* Contains no garbage collection detail (Py_INCREF/Py_DECREF in CPython) + +* Really a subset of Python: roughly "how a Java programmer writes his + first Python program" + +* ...well, plus tons of tricks ``:-)`` + + +RPython meta-programming +------------------------ + +* RPython is actually only a restriction on the code after being imported, + so we can build up everything in (normal) full Python:: + + for name in ["add", "sub", "mul"]: + def f(x, y): + ... + globals()[name] = f + +* here, the code in ``f()`` is RPython, but the loop around it is not. + + + + +Architecture: the interpreter +-------------------------------------------------------------------------- + + +Overview of the interpreter +--------------------------- + +* A compiler that produces a custom bytecode format + +* An interpreter for this bytecode + +* A large library of object types (the "object space") + +* A collection of extension modules + + +The bytecode interpreter +------------------------ + +* A straightforward, recursive interpreter + +* Stack-based + +* Every call to a Python function makes a frame object + +* Then the interpreter is written as methods on this frame object + + +The object space +---------------- + +* Implements all the built-in types + +* Structure more flexible than CPython's family of C functions + +* Very open to experimentation + + +Separation of levels +-------------------- + +* Important: *all* objects that appear in the interpreted program are, + in the interpreter, instances of W_XxxObject. + +* Again, similar to CPython: an object in Python is implemented, + in the interpreter, as a C structure PyXxxObject. + + +Example: smalllong +------------------ + +* Standard Python types: int (32/64-bit) and long + (integer of unlimited size) + +* In CPython, the type is directly linked to its (single) implementation in C. + In PyPy, it is not. + +* So we could easily add an implementation W_SmallLongObject for + integers that happen to fit in 64 bits + +* And there is also W_LongObject for the general case + + +Example: smallint +----------------- + +* *Tagged integers,* common in interpreters (but not in CPython) + +* Idea, in C terms: take the integer objects whose value fits in 31/63 + bits, and encode them as odd-valued pseudo-pointers, instead of + pointers to separately-allocated integer objects + +* We did it in PyPy, but it's disabled now because it does not give + the expected performance gain + + +Example: multidict +------------------ + +* Similarly, we have several implementations of dict + +* For the different typical usage patterns of dicts in Python + +* E.g. module dicts (containing all global names of a module), + class dicts, instance dicts, user dicts (typically containing + non-string keys) + + +Example: mapdict +---------------- + +* An instance in Python uses a dictionary to store attributes:: + + >>> x = MyClass() + >>> x.a = 5 + >>> x.__dict__ + {'a': 5} + >>> x.__dict__ = {'b': 6} + >>> x.b + 6 + + +Example: mapdict +---------------- + +* An instance is thus two objects: a dict and a wrapper around it + +* Requires a lot of memory + +* This is different than Java, Smalltalk or C++, where the class + enforces the exact set of attributes of its instances + +* But it is like Self and JavaScript + + +Maps +---------------- + +* We can reuse the technique introduced in Self: "maps" + +* The JavaScript engine V8 also uses them, calling them "hidden classes" + +* Idea: it is likely that a lot of instances of a given class will + have the same set of attributes + +* So we split the attributes into a per-instance part (just an array of + field values) and a shared part (giving the attribute names, and their + indices in the arrays of the individual instances). + + + + +Architecture: the translation toolchain +--------------------------------------------------------------------- + + +Overview +-------- + +* "Translation toolchain": statically compiles RPython code + +* Produces C code (or JVM or .NET code, experimentally) + +* Every aspect that is independent from the high-level + description of the interpreter is left out of RPython + +* Instead, they are added during translation + +* PyPy = hybrid "research base" + "production-ready" + + +Translation overview (1) +------------------------ + +* Start with the live RPython program + +* Build the Control Flow Graphs (CFGs) of the functions + +* Perform global type inference + +* We get a type-annotated version of the CFGs + +* Demo + + +Translation overview (2) +------------------------ + +* "Lower" the level of the CFGs: transform their Python-like operations + into C-like operations + +* Do a number of additional transformations to insert the selected "aspects" + +* Generate C code from the low-level CFGs + + +Various aspects +--------------- + +* The object model, e.g. how to turn RPython classes and instances + to C structs + +* Garbage collection + +* Execution model: regular or stackless + +* Just-in-Time compiler + + +The object model +---------------- + +* Called "RTyping" internally + +* Can target "lltype" or "ootype" + +* "lltype" = low-level types = C-like structs and arrays + +* "ootype" = object-oriented types, for JVM or .NET + + +The execution model +------------------- + +* Optionally do a "stackless transformation" + +* We get microthread capabilities (soft threads) + +* Even if the source code of the interpreter is just recursive + + + + +Garbage collection +--------------------------------------------------------------------- + + +Purpose +------- + +* RPython assumes automatic memory management, like Python + +* But of course C code does not + +* We can use the Boehm GC, but it is far too slow + +* Remember that our GC needs to support both allocating Python-visible + objects and internal objects of the interpreter (lists, instances...) + + +Overview +-------- + +* We wrote our own GCs, and each alloc operation in the CFGs is replaced + with a call to the GC + +* Handles finding and freeing unused memory + +* The GC is written in RPython, too + +* Analyzed like the rest of the program during translation + +* This approach allows testing at all levels + + +The GCs we have written +----------------------- + +* Currently used: "minimark", a generational GC with one young generation + and using mark-and-sweep for the old generation + +* Previously: a hybrid collector using generational semi-space collection + and mark-and-sweep for the oldest generation (too complicated) + +* Pretty standard, non-concurrent, non-thread-safe collectors + + +Old experiments +--------------- + +* Reference counting (like CPython)... Does not work well. + +* Mark-and-sweep, a fully non-moving collector + +* Mark-and-compact, a fully compacting, generationless collector, + similar to Squeak. + +* Lesson learned: using a generational collector is essential for + dynamic languages like Python + + +GC transformer +-------------- + +* Inserting a GC in a program being translated is handled by the "GC + transformer" + +* Easy to customize, no fixed API + + +API example (minimark GC) +------------------------- + +* The GC provides functions like "malloc" + +* Plus a number of others: hash, identity_hash, weakref support, + finalizer support + +* The GC transformer inserts tables describing the structure of + RPython objects: sizes, location of further references, etc. + + +Finding the stack roots +----------------------- + +* The hard part: finding all pointers to GC objects from local variables + in the C stack + +* ANSI C solution: all pointers are copied to and from some custom stack + +* Not-ANSI-C-at-all: parse the assembler produced by GCC to build tables + + + + +Just-in-Time Compiler +--------------------------------------------------------------------- + + +Goal +---- + +* Speed up the interpreter written in RPython + +* Independent of the language that is being interpreted + +* Let us call it the P-interpreter (P = Python or other) + + +What is a JIT +------------- + +* A JIT selects pieces of the user program (in language P) that would benefit + from compilation instead of interpretation + +* A "method JIT" selects individual P functions and compiles them, + possibly doing some inlining to improve performance (HotSpot, Psyco) + +* A "tracing JIT" selects individual code paths from loops and compiles + them, inlining aggressively (TraceMonkey, PyPy) + + +Tracing +------- + +* Run the user program, and do some lightweight profiling of loops + +* When a loop is run often enough, enter "Tracing Mode" + +* Run one more iteration of the loop in this mode + +* In addition to actually running the next iteration, it records a "trace" + + +Tracing (2) +----------- + +* The trace is then turned into a machine code loop, and directly executed + +* Runs all the further iterations of the loop + + +Tracing (3) +----------- + +* The machine code contains "guards" checking that all conditions met + during tracing are still valid + +* When a guard fails (latest: at the end of the loop), we fall back to + the regular P-interpreter + + +Meta-Tracing in PyPy +-------------------- + +* The explanation above assumes a tracing JIT for the full Python + language + +* Would need to be maintained whenever we change the Python version we + support + +* Instead, we have a "meta-tracing JIT" + +* We trace the P-interpreter's main loop (running N times) interpreting + a P loop (running once) + + +Demo +---- + + +Architecture of the PyPy JIT +---------------------------- + +* In advance, turn the CFGs of the P-interpreter into some bytecode + representation called "jitcode" + +* Uses some hints provided by the P-interpreter author (but not many) + +* "Links" into the P-interpreter's bytecode dispatch loop + +* In this way we add lightweight profiling code + + +Meta-Tracing +------------ + +* When thresholds are reached, we start tracing + +* Tracing is done by running the "jitcodes" in a custom interpreter, + and recording a trace of all operations performed + +* Tracing is slow (double interpretation) but only runs for one iteration + of the loop + + +Optimization +------------ + +* Advanced optimizations of the trace: escaping analysis, integer bounds, + store sinking, string handling, FFI calls, unrolling, virtualrefs... + + +Machine Code Backend +-------------------- + +* Turns a trace into machine code + +* Simple register allocation (linear code) + +* x86, x86-64, (ARM) + +* Guards compiled as conditional jumps to code that restores the full state + + +Blackhole interpreter +--------------------- + +* When a guard fails, we need to go back to the regular P-interpreter + +* Cannot easily re-enter the P-interpreter from anywhere, because it + is just C code + +* Instead we use one more interpreter, the "blackhole interpreter". + + +Bridges +------- + +* When a guard fails often enough, run again the JIT from there + +* Meta-trace, optimize, generate machine code, run it + +* Such extra traces are called "bridges" instead of "loops" + +* In practice, most loops end up needing some number of bridges + +* We get "trees" of machine code + + +More topics +----------- + +* Loops, bridges and "preamble loops" + +* Virtualizables + +* GC integration + +* Memory management of machine code + +* ... + + + + +Conclusion +--------------------------------------------------------------------- + + +Conclusion +---------- + +* PyPy is a platform for writing efficient interpreters for + dynamic languages + +* http://pypy.org/ + +* http://speed.pypy.org/ + +* irc: ``#pypy at freenode.net`` + +* noisebridge sprint this weekend (from 10am): + https://www.noisebridge.net/wiki/Getting_Here diff --git a/talk/stanford-ee380-2011/ui/default/blank.gif b/talk/stanford-ee380-2011/ui/default/blank.gif new file mode 100644 index 0000000000000000000000000000000000000000..75b945d2553848b8b6f41fe5e24599c0687b8472 GIT binary patch [cut] diff --git a/talk/stanford-ee380-2011/ui/default/framing.css b/talk/stanford-ee380-2011/ui/default/framing.css new file mode 100644 --- /dev/null +++ b/talk/stanford-ee380-2011/ui/default/framing.css @@ -0,0 +1,25 @@ +/* This file has been placed in the public domain. */ +/* The following styles size, place, and layer the slide components. + Edit these if you want to change the overall slide layout. + The commented lines can be uncommented (and modified, if necessary) + to help you with the rearrangement process. */ + +/* target = 1024x768 */ + +div#header, div#footer, .slide {width: 100%; top: 0; left: 0;} +div#header {position: fixed; top: 0; height: 3em; z-index: 1;} +div#footer {top: auto; bottom: 0; height: 2.5em; z-index: 5;} +.slide {top: 0; width: 92%; padding: 2.5em 4% 4%; z-index: 2;} +div#controls {left: 50%; bottom: 0; width: 50%; z-index: 100;} +div#controls form {position: absolute; bottom: 0; right: 0; width: 100%; + margin: 0;} +#currentSlide {position: absolute; width: 10%; left: 45%; bottom: 1em; + z-index: 10;} +html>body #currentSlide {position: fixed;} + +/* +div#header {background: #FCC;} +div#footer {background: #CCF;} +div#controls {background: #BBD;} +div#currentSlide {background: #FFC;} +*/ diff --git a/talk/stanford-ee380-2011/ui/default/iepngfix.htc b/talk/stanford-ee380-2011/ui/default/iepngfix.htc new file mode 100644 --- /dev/null +++ b/talk/stanford-ee380-2011/ui/default/iepngfix.htc @@ -0,0 +1,42 @@ + + + + + \ No newline at end of file diff --git a/talk/stanford-ee380-2011/ui/default/opera.css b/talk/stanford-ee380-2011/ui/default/opera.css new file mode 100644 --- /dev/null +++ b/talk/stanford-ee380-2011/ui/default/opera.css @@ -0,0 +1,8 @@ +/* This file has been placed in the public domain. */ +/* DO NOT CHANGE THESE unless you really want to break Opera Show */ +.slide { + visibility: visible !important; + position: static !important; + page-break-before: always; +} +#slide0 {page-break-before: avoid;} diff --git a/talk/stanford-ee380-2011/ui/default/outline.css b/talk/stanford-ee380-2011/ui/default/outline.css new file mode 100644 --- /dev/null +++ b/talk/stanford-ee380-2011/ui/default/outline.css @@ -0,0 +1,16 @@ +/* This file has been placed in the public domain. */ +/* Don't change this unless you want the layout stuff to show up in the + outline view! */ + +.layout div, #footer *, #controlForm * {display: none;} +#footer, #controls, #controlForm, #navLinks, #toggle { + display: block; visibility: visible; margin: 0; padding: 0;} +#toggle {float: right; padding: 0.5em;} +html>body #toggle {position: fixed; top: 0; right: 0;} + +/* making the outline look pretty-ish */ + +#slide0 h1, #slide0 h2, #slide0 h3, #slide0 h4 {border: none; margin: 0;} +#toggle {border: 1px solid; border-width: 0 0 1px 1px; background: #FFF;} + +.outline {display: inline ! important;} diff --git a/talk/stanford-ee380-2011/ui/default/pretty.css b/talk/stanford-ee380-2011/ui/default/pretty.css new file mode 100644 --- /dev/null +++ b/talk/stanford-ee380-2011/ui/default/pretty.css @@ -0,0 +1,121 @@ +/* This file has been placed in the public domain. */ +/* Following are the presentation styles -- edit away! */ + +html, body {margin: 0; padding: 0;} +body {background: #fff color: #222; font-size: 2em;} +/* Replace the background style above with the style below (and again for + div#header) for a graphic: */ +/* background: white url(bodybg.gif) -16px 0 no-repeat; */ +:link, :visited {text-decoration: none; color: #00C;} +#controls :active {color: #88A !important;} +#controls :focus {outline: 1px dotted #227;} +h1, h2, h3, h4 {font-size: 100%; margin: 0; padding: 0; font-weight: inherit;} + +blockquote {padding: 0 2em 0.5em; margin: 0 1.5em 0.5em;} +blockquote p {margin: 0;} + +kbd {font-weight: bold; font-size: 1em;} +sup {font-size: smaller; line-height: 1px;} + +.slide pre {padding: 0; margin-left: 0; margin-right: 0; font-size: 90%;} +.slide ul ul li {list-style: square; } +.slide img.leader {display: block; margin: 0 auto;} +.slide tt {font-size: 90%;} + +div#header, div#footer {background: #005; color: #AAB; font-family: sans-serif;} +/* background: #005 url(bodybg.gif) -16px 0 no-repeat; */ +div#footer {font-size: 0.5em; font-weight: bold; padding: 1em 0;} +#footer h1 {display: block; padding: 0 1em;} +#footer h2 {display: block; padding: 0.8em 1em 0;} + +.slide {font-size: 1.5em;} +.slide li {font-size: 1.0em; padding-bottom: 0.2em;} +.slide h1 {position: absolute; top: 0.45em; z-index: 1; + margin: 0; padding-left: 0.7em; white-space: nowrap; + font: bold 110% sans-serif; color: #DDE; background: #005;} +.slide h2 {font: bold 120%/1em sans-serif; padding-top: 0.5em;} +.slide h3 {font: bold 100% sans-serif; padding-top: 0.5em;} +h1 abbr {font-variant: small-caps;} + +div#controls {position: absolute; left: 50%; bottom: 0; + width: 50%; text-align: right; font: bold 0.9em sans-serif;} +html>body div#controls {position: fixed; padding: 0 0 1em 0; top: auto;} +div#controls form {position: absolute; bottom: 0; right: 0; width: 100%; + margin: 0; padding: 0;} +#controls #navLinks a {padding: 0; margin: 0 0.5em; + background: #005; border: none; color: #779; cursor: pointer;} +#controls #navList {height: 1em;} +#controls #navList #jumplist {position: absolute; bottom: 0; right: 0; + background: #DDD; color: #227;} + +#currentSlide {text-align: center; font-size: 0.5em; color: #449; + font-family: sans-serif; font-weight: bold;} + +#slide0 {padding-top: 1.5em} +#slide0 h1 {position: static; margin: 1em 0 0; padding: 0; color: #000; + font: bold 2em sans-serif; white-space: normal; background: transparent;} +#slide0 h2 {font: bold italic 1em sans-serif; margin: 0.25em;} +#slide0 h3 {margin-top: 1.5em; font-size: 1.5em;} +#slide0 h4 {margin-top: 0; font-size: 1em;} + +ul.urls {list-style: none; display: inline; margin: 0;} +.urls li {display: inline; margin: 0;} +.external {border-bottom: 1px dotted gray;} +html>body .external {border-bottom: none;} +.external:after {content: " \274F"; font-size: smaller; color: #77B;} + +.incremental, .incremental *, .incremental *:after {visibility: visible; + color: white; border: 0;} +img.incremental {visibility: hidden;} +.slide .current {color: green;} + +.slide-display {display: inline ! important;} + +.huge {font-family: sans-serif; font-weight: bold; font-size: 150%;} +.big {font-family: sans-serif; font-weight: bold; font-size: 120%;} +.small {font-size: 75%;} +.tiny {font-size: 50%;} +.huge tt, .big tt, .small tt, .tiny tt {font-size: 115%;} +.huge pre, .big pre, .small pre, .tiny pre {font-size: 115%;} + +.maroon {color: maroon;} +.red {color: red;} +.magenta {color: magenta;} +.fuchsia {color: fuchsia;} +.pink {color: #FAA;} From noreply at buildbot.pypy.org Tue Feb 11 15:50:15 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 11 Feb 2014 15:50:15 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: Replace stm_allocate_prebuilt() with stm_copy_prebuilt_objects() Message-ID: <20140211145015.49C2B1C03D5@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r726:a73a0beed91e Date: 2014-02-11 15:49 +0100 http://bitbucket.org/pypy/stmgc/changeset/a73a0beed91e/ Log: Replace stm_allocate_prebuilt() with stm_copy_prebuilt_objects() which is probably a better fit for PyPy. diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -9,11 +9,6 @@ (NB_PAGES - END_NURSERY_PAGE) * 4096UL); } -object_t *stm_allocate_prebuilt(ssize_t size_rounded_up) -{ - abort(); -} - object_t *_stm_allocate_old(ssize_t size_rounded_up) { char *addr = large_malloc(size_rounded_up); diff --git a/c7/stm/prebuilt.c b/c7/stm/prebuilt.c new file mode 100644 --- /dev/null +++ b/c7/stm/prebuilt.c @@ -0,0 +1,41 @@ +#ifndef _STM_CORE_H_ +# error "must be compiled via stmgc.c" +#endif + + +void stm_copy_prebuilt_objects(object_t *target, char *source, ssize_t size) +{ + /* Initialize a region of 'size' bytes at the 'target' address, + containing prebuilt objects copied from 'source'. The caller + must ensure that the 'target' address is valid. It might be + called several times but care must be taken not to overlap the + ranges. The exact rules are a bit complicated: + + - the range [target, target + size] must be inside the + range [131072, FIRST_READMARKER_PAGE*4096] + + - the range [target / 16, (target + size) / 16] will be + used by read markers, so it must be fully before the + range [target, target + size]. + + The objects themselves can contain more pointers to other + prebuilt objects. Their stm_flags field must be initialized + with STM_FLAGS_PREBUILT. + */ + + uintptr_t utarget = (uintptr_t)target; + if (utarget / 16 < 8192 || + utarget + size > FIRST_READMARKER_PAGE * 4096UL || + (utarget + size + 15) / 16 > utarget) { + fprintf(stderr, + "stm_copy_prebuilt_objects: invalid range (%ld, %ld)", + (long)utarget, (long)size); + abort(); + } + uintptr_t start_page = utarget / 4096; + uintptr_t end_page = (utarget + size + 4095) / 4096; + pages_initialize_shared(start_page, end_page - start_page); + + char *segment_base = get_segment_base(0); + memcpy(REAL_ADDRESS(segment_base, utarget), source, size); +} diff --git a/c7/stmgc.c b/c7/stmgc.c --- a/c7/stmgc.c +++ b/c7/stmgc.c @@ -7,6 +7,7 @@ #include "stm/misc.c" #include "stm/pages.c" +#include "stm/prebuilt.c" #include "stm/gcpage.c" #include "stm/largemalloc.c" #include "stm/nursery.c" diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -78,6 +78,7 @@ #endif #define _STM_GCFLAG_WRITE_BARRIER 0x01 +#define STM_FLAGS_PREBUILT _STM_GCFLAG_WRITE_BARRIER /* ==================== HELPERS ==================== */ @@ -140,12 +141,11 @@ return (object_t *)p; } -object_t *stm_allocate_prebuilt(ssize_t size_rounded_up); - void stm_setup(void); void stm_teardown(void); void stm_register_thread_local(stm_thread_local_t *tl); void stm_unregister_thread_local(stm_thread_local_t *tl); +void stm_copy_prebuilt_objects(object_t *target, char *source, ssize_t size); void stm_start_transaction(stm_thread_local_t *tl, stm_jmpbuf_t *jmpbuf); void stm_start_inevitable_transaction(stm_thread_local_t *tl); diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -41,11 +41,13 @@ void stm_read(object_t *obj); /*void stm_write(object_t *obj); use _checked_stm_write() instead */ object_t *stm_allocate(ssize_t size_rounded_up); -object_t *stm_allocate_prebuilt(ssize_t size_rounded_up); object_t *_stm_allocate_old(ssize_t size_rounded_up); void stm_setup(void); void stm_teardown(void); +void stm_register_thread_local(stm_thread_local_t *tl); +void stm_unregister_thread_local(stm_thread_local_t *tl); +void stm_copy_prebuilt_objects(object_t *target, char *source, ssize_t size); bool _checked_stm_write(object_t *obj); bool _stm_was_read(object_t *obj); @@ -56,9 +58,6 @@ bool _stm_in_transaction(stm_thread_local_t *tl); void _stm_test_switch(stm_thread_local_t *tl); -void stm_register_thread_local(stm_thread_local_t *tl); -void stm_unregister_thread_local(stm_thread_local_t *tl); - void stm_start_transaction(stm_thread_local_t *tl, stm_jmpbuf_t *jmpbuf); void stm_commit_transaction(void); bool _check_abort_transaction(void); From noreply at buildbot.pypy.org Tue Feb 11 21:51:46 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 11 Feb 2014 21:51:46 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: Tweak I did in the published version Message-ID: <20140211205146.A322D1C1361@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5155:9c8a438818d6 Date: 2014-02-11 21:51 +0100 http://bitbucket.org/pypy/extradoc/changeset/9c8a438818d6/ Log: Tweak I did in the published version diff --git a/blog/draft/stm-feb2014.txt b/blog/draft/stm-feb2014.txt --- a/blog/draft/stm-feb2014.txt +++ b/blog/draft/stm-feb2014.txt @@ -13,8 +13,8 @@ difficult issues we ran into for the JIT. So while this is basically yet another restart similar to last -June's, the difference is that the work that we have put in the PyPy -part (as opposed to the C library) remains. +June's, the difference is that the work that we have already put in +the PyPy part (as opposed to the C library) remains. You can read about the basic ideas of this new C library here. From noreply at buildbot.pypy.org Tue Feb 11 21:51:45 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 11 Feb 2014 21:51:45 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: Add the demos, for what they are worth. Message-ID: <20140211205145.863571C1361@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5154:7029195255d7 Date: 2014-02-11 21:51 +0100 http://bitbucket.org/pypy/extradoc/changeset/7029195255d7/ Log: Add the demos, for what they are worth. diff --git a/talk/stanford-ee380-2011/demo-jit.py b/talk/stanford-ee380-2011/demo-jit.py new file mode 100644 --- /dev/null +++ b/talk/stanford-ee380-2011/demo-jit.py @@ -0,0 +1,87 @@ +import sys +from pypy.rlib import jit + + +class Object: + pass + + +class Integer(Object): + + def __init__(self, value): + self.value = value + + def next(self): + return Integer(self.value + 1) + + def small(self): + return self.value < 1000000 + + def as_str(self): + return str(self.value) + + +class String(Object): + + def __init__(self, str): + self.str = str + + def next(self): + return String(self.str + "x") + + def small(self): + return len(self.str) < 50 + + def as_str(self): + return self.str + + +jitdriver = jit.JitDriver(greens=['pc', 'bytecode'], reds=['input']) + + +def interpret(bytecode, input): + pc = 0 + while True: + jitdriver.jit_merge_point(bytecode=bytecode, input=input, pc=pc) + + nextop = bytecode[pc] + pc += 1 + + if nextop == 'n': + input = input.next() + + elif nextop == 'l': + if input.small(): + pc = 0 + else: + return input.as_str() + + +# ____________________________________________________________ + + +if __name__ == '__main__': + print interpret("nl", String("x")) + print interpret("nl", Integer(1)) + sys.exit() + + +# ____________________________________________________________ + + +def main(argv): + num = int(argv[2]) + print interpret(argv[1], Integer(num)) + return 0 + +def target(*args): + return main, None + + +# ____________________________________________________________ + + +from pypy.jit.codewriter.policy import JitPolicy + +def jitpolicy(driver): + return JitPolicy() diff --git a/talk/stanford-ee380-2011/demo1 b/talk/stanford-ee380-2011/demo1 new file mode 100644 --- /dev/null +++ b/talk/stanford-ee380-2011/demo1 @@ -0,0 +1,8 @@ + + +>>>> for i in range(10): +.... print i**2 + + +>>>> x = 5 +Ctrl-C diff --git a/talk/stanford-ee380-2011/demo2.py b/talk/stanford-ee380-2011/demo2.py new file mode 100644 --- /dev/null +++ b/talk/stanford-ee380-2011/demo2.py @@ -0,0 +1,27 @@ + + +class Foo(object): + + def __init__(self, value): + self.value = value + + def double(self): + return Foo(self.value * 2) + + +def main(argv): + if len(argv) <= 1: + n = 22 + else: + n = int(argv[1]) + + lst = [Foo(i) for i in range(n)] + + print lst[-1].double().value + return 0 + + +# ____________________________________________________________ + +def target(*args): + return main, None From noreply at buildbot.pypy.org Wed Feb 12 13:26:14 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 12 Feb 2014 13:26:14 +0100 (CET) Subject: [pypy-commit] pypy default: Use "int" instead of "Signed" in these functions meant to be called from Message-ID: <20140212122614.6574A1C3CD8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69120:69b6a25067f8 Date: 2014-02-12 13:25 +0100 http://bitbucket.org/pypy/pypy/changeset/69b6a25067f8/ Log: Use "int" instead of "Signed" in these functions meant to be called from C code. diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -90,9 +90,10 @@ return f """) - @entrypoint('main', [rffi.CCHARP, lltype.Signed], c_name='pypy_setup_home') + @entrypoint('main', [rffi.CCHARP, rffi.INT], c_name='pypy_setup_home') def pypy_setup_home(ll_home, verbose): from pypy.module.sys.initpath import pypy_find_stdlib + verbose = rffi.cast(lltype.Signed, verbose) if ll_home: home = rffi.charp2str(ll_home) else: @@ -120,7 +121,8 @@ @entrypoint('main', [rffi.CCHARP], c_name='pypy_execute_source') def pypy_execute_source(ll_source): source = rffi.charp2str(ll_source) - return _pypy_execute_source(source) + res = _pypy_execute_source(source) + return rffi.cast(rffi.INT, res) @entrypoint('main', [], c_name='pypy_init_threads') def pypy_init_threads(): diff --git a/pypy/interpreter/test/test_targetpypy.py b/pypy/interpreter/test/test_targetpypy.py --- a/pypy/interpreter/test/test_targetpypy.py +++ b/pypy/interpreter/test/test_targetpypy.py @@ -12,8 +12,10 @@ _, d = create_entry_point(space, None) execute_source = d['pypy_execute_source'] lls = rffi.str2charp("import sys; sys.modules['xyz'] = 3") - execute_source(lls) + res = execute_source(lls) lltype.free(lls, flavor='raw') + assert lltype.typeOf(res) == rffi.INT + assert rffi.cast(lltype.Signed, res) == 0 x = space.int_w(space.getitem(space.getattr(space.builtin_modules['sys'], space.wrap('modules')), space.wrap('xyz'))) @@ -24,5 +26,5 @@ # did not crash - the same globals pypy_setup_home = d['pypy_setup_home'] lls = rffi.str2charp(__file__) - pypy_setup_home(lls, 1) + pypy_setup_home(lls, rffi.cast(rffi.INT, 1)) lltype.free(lls, flavor='raw') From noreply at buildbot.pypy.org Wed Feb 12 13:51:34 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 12 Feb 2014 13:51:34 +0100 (CET) Subject: [pypy-commit] pypy default: Add an official header with comments for using libpypy.so. Message-ID: <20140212125134.4DE9A1C35CC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69121:c4cd6eca9358 Date: 2014-02-12 13:50 +0100 http://bitbucket.org/pypy/pypy/changeset/c4cd6eca9358/ Log: Add an official header with comments for using libpypy.so. diff --git a/include/PyPy.h b/include/PyPy.h new file mode 100644 --- /dev/null +++ b/include/PyPy.h @@ -0,0 +1,54 @@ +#ifndef _PYPY_H_ +#define _PYPY_H_ + +/* This header is meant to be included in programs that use PyPy as an + embedded library. */ + +#ifdef __cplusplus +extern "C" { +#endif + + +/* You should call this first once. */ +void rpython_startup_code(void); + + +/* Initialize the home directory of PyPy. It is necessary to call this. + + Call it with "home" being the file name of the libpypy.so, for + example; it will be used as a starting point when searching for the + lib-python and lib_pypy directories. They are searched from + "home/..", "home/../..", etc. Returns 0 if everything was fine. If + an error occurs, returns 1 and (if verbose != 0) prints some + information to stderr. + */ +int pypy_setup_home(char *home, int verbose); + + +/* If your program has multiple threads, then you need to call + pypy_init_threads() once at init time, and then pypy_thread_attach() + once in each other thread that just started and in which you want to + use pypy_execute_source(). + */ +void pypy_init_threads(void); +void pypy_thread_attach(void); + + +/* The main entry point: executes "source" as plain Python code. + Returns 0 if everything was fine. If a Python exception is + uncaught, it is printed to stderr and 1 is returned. + + Usually, the Python code from "source" should use cffi to fill in + global variables of "function pointer" type in your program. Use + cffi callbacks to do so. Once it is done, there is no need to call + pypy_execute_source() any more: from C, you call directly the + functions (which are "callbacks" from the point of view of Python). + */ +int pypy_execute_source(char *source); + + +#ifdef __cplusplus +} +#endif + +#endif From noreply at buildbot.pypy.org Wed Feb 12 14:07:57 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 12 Feb 2014 14:07:57 +0100 (CET) Subject: [pypy-commit] pypy default: Fix the comment here Message-ID: <20140212130757.ACE071C3CD8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69122:b7deb1f65630 Date: 2014-02-12 14:07 +0100 http://bitbucket.org/pypy/pypy/changeset/b7deb1f65630/ Log: Fix the comment here diff --git a/include/PyPy.h b/include/PyPy.h --- a/include/PyPy.h +++ b/include/PyPy.h @@ -28,7 +28,7 @@ /* If your program has multiple threads, then you need to call pypy_init_threads() once at init time, and then pypy_thread_attach() once in each other thread that just started and in which you want to - use pypy_execute_source(). + run Python code (including via callbacks, see below). */ void pypy_init_threads(void); void pypy_thread_attach(void); From noreply at buildbot.pypy.org Wed Feb 12 15:30:49 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Wed, 12 Feb 2014 15:30:49 +0100 (CET) Subject: [pypy-commit] pypy default: Remove trailing whitespaces. Message-ID: <20140212143049.142041D2411@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: Changeset: r69123:9b8d5bb9504b Date: 2014-02-12 15:28 +0100 http://bitbucket.org/pypy/pypy/changeset/9b8d5bb9504b/ Log: Remove trailing whitespaces. diff --git a/include/PyPy.h b/include/PyPy.h --- a/include/PyPy.h +++ b/include/PyPy.h @@ -14,13 +14,13 @@ /* Initialize the home directory of PyPy. It is necessary to call this. - + Call it with "home" being the file name of the libpypy.so, for example; it will be used as a starting point when searching for the lib-python and lib_pypy directories. They are searched from "home/..", "home/../..", etc. Returns 0 if everything was fine. If an error occurs, returns 1 and (if verbose != 0) prints some - information to stderr. + information to stderr. */ int pypy_setup_home(char *home, int verbose); From noreply at buildbot.pypy.org Wed Feb 12 20:29:42 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Wed, 12 Feb 2014 20:29:42 +0100 (CET) Subject: [pypy-commit] pypy default: Remove the LLVM entry from the project ideas document -- we have a LLVM translation backend in-tree. Message-ID: <20140212192942.A21F21D22CC@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: Changeset: r69124:4dd6b84f03d6 Date: 2014-02-12 20:27 +0100 http://bitbucket.org/pypy/pypy/changeset/4dd6b84f03d6/ Log: Remove the LLVM entry from the project ideas document -- we have a LLVM translation backend in-tree. diff --git a/pypy/doc/project-ideas.rst b/pypy/doc/project-ideas.rst --- a/pypy/doc/project-ideas.rst +++ b/pypy/doc/project-ideas.rst @@ -142,20 +142,6 @@ * `hg` -Experiment (again) with LLVM backend for RPython compilation ------------------------------------------------------------- - -We already tried working with LLVM and at the time, LLVM was not mature enough -for our needs. It's possible that this has changed, reviving the LLVM backend -(or writing new from scratch) for static compilation would be a good project. - -(On the other hand, just generating C code and using clang might be enough. -The issue with that is the so-called "asmgcc GC root finder", which has tons -of issues of this own. In my opinion (arigo), it would be definitely a -better project to try to optimize the alternative, the "shadowstack" GC root -finder, which is nicely portable. So far it gives a pypy that is around -7% slower.) - Embedding PyPy ---------------------------------------- From noreply at buildbot.pypy.org Thu Feb 13 17:20:29 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 13 Feb 2014 17:20:29 +0100 (CET) Subject: [pypy-commit] pypy default: Another attempt to make it so that weakrefs in RPython are cleared as Message-ID: <20140213162029.7D2F71D24E9@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69125:dc505e09021c Date: 2014-02-13 17:18 +0100 http://bitbucket.org/pypy/pypy/changeset/dc505e09021c/ Log: Another attempt to make it so that weakrefs in RPython are cleared as soon as the finalizer is enqueued. So if the __del__ is called, we know that we really have no more strong references to the object anywhere at that point in time. Before, due to the incremental GC, it would be possible to fetch the content of the weakref at just the wrong point in time, and still have the finalizer called afterwards. This is explained in https://bugs.pypy.org/issue1687, where this causes surprizes. diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py --- a/rpython/memory/gc/incminimark.py +++ b/rpython/memory/gc/incminimark.py @@ -1837,6 +1837,11 @@ # if self.objects_with_finalizers.non_empty(): self.deal_with_objects_with_finalizers() + elif self.old_objects_with_weakrefs.non_empty(): + # Weakref support: clear the weak pointers to dying objects + # (if we call deal_with_objects_with_finalizers(), it will + # invoke invalidate_old_weakrefs() itself directly) + self.invalidate_old_weakrefs() ll_assert(not self.objects_to_trace.non_empty(), "objects_to_trace should be empty") @@ -1846,9 +1851,7 @@ self.more_objects_to_trace.delete() # - # Weakref support: clear the weak pointers to dying objects - if self.old_objects_with_weakrefs.non_empty(): - self.invalidate_old_weakrefs() + # Light finalizers if self.old_objects_with_light_finalizers.non_empty(): self.deal_with_old_objects_with_finalizers() #objects_to_trace processed fully, can move on to sweeping @@ -2206,6 +2209,12 @@ self._recursively_bump_finalization_state_from_2_to_3(y) self._recursively_bump_finalization_state_from_1_to_2(x) + # Clear the weak pointers to dying objects. Also clears them if + # they point to objects which have the GCFLAG_FINALIZATION_ORDERING + # bit set here. These are objects which will be added to + # run_finalizers(). + self.invalidate_old_weakrefs() + while marked.non_empty(): x = marked.popleft() state = self._finalization_state(x) @@ -2333,7 +2342,9 @@ ll_assert((self.header(pointing_to).tid & GCFLAG_NO_HEAP_PTRS) == 0, "registered old weakref should not " "point to a NO_HEAP_PTRS obj") - if self.header(pointing_to).tid & GCFLAG_VISITED: + tid = self.header(pointing_to).tid + if ((tid & (GCFLAG_VISITED | GCFLAG_FINALIZATION_ORDERING)) == + GCFLAG_VISITED): new_with_weakref.append(obj) else: (obj + offset).address[0] = llmemory.NULL diff --git a/rpython/memory/test/gc_test_base.py b/rpython/memory/test/gc_test_base.py --- a/rpython/memory/test/gc_test_base.py +++ b/rpython/memory/test/gc_test_base.py @@ -29,6 +29,7 @@ GC_CAN_SHRINK_ARRAY = False GC_CAN_SHRINK_BIG_ARRAY = False BUT_HOW_BIG_IS_A_BIG_STRING = 3*WORD + WREF_IS_INVALID_BEFORE_DEL_IS_CALLED = False def setup_class(cls): cls._saved_logstate = py.log._getstate() @@ -370,15 +371,23 @@ class A(object): count = 0 a = A() + expected_invalid = self.WREF_IS_INVALID_BEFORE_DEL_IS_CALLED class B(object): def __del__(self): # when __del__ is called, the weakref to myself is still valid - # in RPython (at least with most GCs; this test might be - # skipped for specific GCs) - if self.ref() is self: - a.count += 10 # ok + # in RPython with most GCs. However, this can lead to strange + # bugs with incminimark. https://bugs.pypy.org/issue1687 + # So with incminimark, we expect the opposite. + if expected_invalid: + if self.ref() is None: + a.count += 10 # ok + else: + a.count = 666 # not ok else: - a.count = 666 # not ok + if self.ref() is self: + a.count += 10 # ok + else: + a.count = 666 # not ok def g(): b = B() ref = weakref.ref(b) diff --git a/rpython/memory/test/test_incminimark_gc.py b/rpython/memory/test/test_incminimark_gc.py --- a/rpython/memory/test/test_incminimark_gc.py +++ b/rpython/memory/test/test_incminimark_gc.py @@ -1,6 +1,38 @@ -from rpython.rlib.rarithmetic import LONG_BIT +from rpython.rtyper.lltypesystem import lltype +from rpython.rtyper.lltypesystem.lloperation import llop from rpython.memory.test import test_minimark_gc class TestIncrementalMiniMarkGC(test_minimark_gc.TestMiniMarkGC): from rpython.memory.gc.incminimark import IncrementalMiniMarkGC as GCClass + WREF_IS_INVALID_BEFORE_DEL_IS_CALLED = True + + def test_weakref_not_in_stack(self): + import weakref + class A(object): + pass + class B(object): + def __init__(self, next): + self.next = next + def g(): + a = A() + a.x = 5 + wr = weakref.ref(a) + llop.gc__collect(lltype.Void) # make everything old + assert wr() is not None + assert a.x == 5 + return wr + def f(): + ref = g() + llop.gc__collect(lltype.Void, 1) # start a major cycle + # at this point the stack is scanned, and the weakref points + # to an object not found, but still reachable: + b = ref() + llop.debug_print(lltype.Void, b) + assert b is not None + llop.gc__collect(lltype.Void) # finish the major cycle + # assert does not crash, because 'b' is still kept alive + b.x = 42 + return ref() is b + res = self.interpret(f, []) + assert res == True From noreply at buildbot.pypy.org Thu Feb 13 17:20:30 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 13 Feb 2014 17:20:30 +0100 (CET) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20140213162030.C504E1D24E9@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69126:c67f0c163386 Date: 2014-02-13 17:19 +0100 http://bitbucket.org/pypy/pypy/changeset/c67f0c163386/ Log: merge heads diff --git a/pypy/doc/project-ideas.rst b/pypy/doc/project-ideas.rst --- a/pypy/doc/project-ideas.rst +++ b/pypy/doc/project-ideas.rst @@ -142,20 +142,6 @@ * `hg` -Experiment (again) with LLVM backend for RPython compilation ------------------------------------------------------------- - -We already tried working with LLVM and at the time, LLVM was not mature enough -for our needs. It's possible that this has changed, reviving the LLVM backend -(or writing new from scratch) for static compilation would be a good project. - -(On the other hand, just generating C code and using clang might be enough. -The issue with that is the so-called "asmgcc GC root finder", which has tons -of issues of this own. In my opinion (arigo), it would be definitely a -better project to try to optimize the alternative, the "shadowstack" GC root -finder, which is nicely portable. So far it gives a pypy that is around -7% slower.) - Embedding PyPy ---------------------------------------- From noreply at buildbot.pypy.org Thu Feb 13 17:34:41 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 13 Feb 2014 17:34:41 +0100 (CET) Subject: [pypy-commit] pypy default: Rare ordering issue Message-ID: <20140213163441.2D5CF1D24EE@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69127:ba1bf5e55066 Date: 2014-02-13 17:31 +0100 http://bitbucket.org/pypy/pypy/changeset/ba1bf5e55066/ Log: Rare ordering issue diff --git a/rpython/rlib/objectmodel.py b/rpython/rlib/objectmodel.py --- a/rpython/rlib/objectmodel.py +++ b/rpython/rlib/objectmodel.py @@ -366,6 +366,9 @@ def compute_result_annotation(self, s_l, s_sizehint): from rpython.annotator import model as annmodel + if annmodel.s_None.contains(s_l): + return # first argument is only None so far, but we + # expect a generalization later if not isinstance(s_l, annmodel.SomeList): raise annmodel.AnnotatorError("First argument must be a list") if not isinstance(s_sizehint, annmodel.SomeInteger): From noreply at buildbot.pypy.org Thu Feb 13 17:55:51 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 13 Feb 2014 17:55:51 +0100 (CET) Subject: [pypy-commit] pypy default: Silence a warning Message-ID: <20140213165551.43C791D24EE@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69128:21ab114115aa Date: 2014-02-13 17:54 +0100 http://bitbucket.org/pypy/pypy/changeset/21ab114115aa/ Log: Silence a warning diff --git a/pypy/module/bz2/interp_bz2.py b/pypy/module/bz2/interp_bz2.py --- a/pypy/module/bz2/interp_bz2.py +++ b/pypy/module/bz2/interp_bz2.py @@ -31,7 +31,7 @@ _compilation_info_ = eci calling_conv = 'c' - CHECK_LIBRARY = platform.Has('dump("x", (int)&BZ2_bzCompress)') + CHECK_LIBRARY = platform.Has('dump("x", (long)&BZ2_bzCompress)') off_t = platform.SimpleType("off_t", rffi.LONGLONG) size_t = platform.SimpleType("size_t", rffi.ULONG) From noreply at buildbot.pypy.org Thu Feb 13 19:38:13 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 13 Feb 2014 19:38:13 +0100 (CET) Subject: [pypy-commit] pypy default: Update ''Embedding PyPy'' Message-ID: <20140213183813.9D4441C3CF3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69129:979a9e29354f Date: 2014-02-13 19:37 +0100 http://bitbucket.org/pypy/pypy/changeset/979a9e29354f/ Log: Update ''Embedding PyPy'' diff --git a/pypy/doc/project-ideas.rst b/pypy/doc/project-ideas.rst --- a/pypy/doc/project-ideas.rst +++ b/pypy/doc/project-ideas.rst @@ -148,12 +148,11 @@ Note: there is a basic proof-of-concept for that as a `uwsgi pypy plugin`_ Being able to embed PyPy, say with its own limited C API, would be -useful. But here is the most interesting variant, straight from -EuroPython live discussion :-) We can have a generic "libpypy.so" that -can be used as a placeholder dynamic library, and when it gets loaded, -it runs a .py module that installs (via ctypes) the interface it wants -exported. This would give us a one-size-fits-all generic .so file to be -imported by any application that wants to load .so files :-) +useful. But there is a possibly better variant: use CFFI. With some +minimal tools atop CFFI, it would be possible to write a pure Python +library, and then compile automatically from it an .so/.dll file that is +a dynamic-link library with whatever C API we want. This gives us a +one-size-fits-all generic way to make .so/.dll files from Python. .. _`uwsgi pypy plugin`: http://uwsgi-docs.readthedocs.org/en/latest/PyPy.html From noreply at buildbot.pypy.org Thu Feb 13 20:22:32 2014 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 13 Feb 2014 20:22:32 +0100 (CET) Subject: [pypy-commit] pypy align_float_cast: first pass - mark raw_load as xxx and remove non-aligned fast path Message-ID: <20140213192232.BA3C31C3CF3@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: align_float_cast Changeset: r69130:43345880f21c Date: 2013-12-27 12:30 +0200 http://bitbucket.org/pypy/pypy/changeset/43345880f21c/ Log: first pass - mark raw_load as xxx and remove non-aligned fast path diff --git a/TODO b/TODO --- a/TODO +++ b/TODO @@ -1,3 +1,5 @@ - - fix jitted assembler + - add test for unaligned OP_RAW_LOAD in translator/c/funcgen + - fix jitted assembler, new opcode? - add fast path for aligned float - - test non-aligned write, fix + - repeat all this for raw_store + - use in pypy/module/_rawffi instead of write_ptr, read_ptr diff --git a/rpython/jit/backend/arm/opassembler.py b/rpython/jit/backend/arm/opassembler.py --- a/rpython/jit/backend/arm/opassembler.py +++ b/rpython/jit/backend/arm/opassembler.py @@ -766,6 +766,7 @@ emit_op_getarrayitem_gc_pure = emit_op_getarrayitem_gc def emit_op_raw_load(self, op, arglocs, regalloc, fcond): + xxx res_loc, base_loc, ofs_loc, scale, ofs = arglocs assert ofs_loc.is_core_reg() # no base offset diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -1409,6 +1409,7 @@ genop_getarrayitem_raw_pure = genop_getarrayitem_gc def genop_raw_load(self, op, arglocs, resloc): + xxx # This seems to relate to pointers? base_loc, ofs_loc, size_loc, ofs, sign_loc = arglocs assert isinstance(ofs, ImmedLoc) src_addr = addr_add(base_loc, ofs_loc, ofs.value, 0) diff --git a/rpython/rlib/rawstorage.py b/rpython/rlib/rawstorage.py --- a/rpython/rlib/rawstorage.py +++ b/rpython/rlib/rawstorage.py @@ -34,23 +34,6 @@ def free_raw_storage(storage, track_allocation=True): lltype.free(storage, flavor='raw', track_allocation=track_allocation) -class RawStorageGetitemEntryUnaligned(ExtRegistryEntry): - _about_ = raw_storage_getitem_unaligned - - def compute_result_annotation(self, s_TP, s_storage, s_index): - assert s_TP.is_constant() - return annmodel.lltype_to_annotation(s_TP.const) - - def specialize_call(self, hop): - assert hop.args_r[1].lowleveltype == RAW_STORAGE_PTR - v_storage = hop.inputarg(hop.args_r[1], arg=1) - v_index = hop.inputarg(lltype.Signed, arg=2) - hop.exception_cannot_occur() - v_addr = hop.genop('cast_ptr_to_adr', [v_storage], - resulttype=llmemory.Address) - return hop.genop('raw_load', [v_addr, v_index], - resulttype=hop.r_result.lowleveltype) - class RawStorageGetitemEntry(ExtRegistryEntry): _about_ = raw_storage_getitem diff --git a/rpython/rtyper/llinterp.py b/rpython/rtyper/llinterp.py --- a/rpython/rtyper/llinterp.py +++ b/rpython/rtyper/llinterp.py @@ -954,6 +954,7 @@ op_raw_memmove = op_raw_memcopy # this is essentially the same here def op_raw_load(self, RESTYPE, addr, offset): + xxx checkadr(addr) if isinstance(offset, int): from rpython.rtyper.lltypesystem import rffi diff --git a/rpython/rtyper/lltypesystem/opimpl.py b/rpython/rtyper/lltypesystem/opimpl.py --- a/rpython/rtyper/lltypesystem/opimpl.py +++ b/rpython/rtyper/lltypesystem/opimpl.py @@ -670,6 +670,7 @@ p[0] = newvalue def op_raw_load(TVAL, p, ofs): + xxx from rpython.rtyper.lltypesystem import rffi p = rffi.cast(llmemory.Address, p) p = rffi.cast(rffi.CArrayPtr(TVAL), p + ofs) diff --git a/rpython/translator/c/funcgen.py b/rpython/translator/c/funcgen.py --- a/rpython/translator/c/funcgen.py +++ b/rpython/translator/c/funcgen.py @@ -695,6 +695,7 @@ OP_BARE_RAW_STORE = OP_RAW_STORE def OP_RAW_LOAD(self, op): + xxx addr = self.expr(op.args[0]) offset = self.expr(op.args[1]) result = self.expr(op.result) From noreply at buildbot.pypy.org Thu Feb 13 20:22:34 2014 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 13 Feb 2014 20:22:34 +0100 (CET) Subject: [pypy-commit] pypy align_float_cast: do not mark x86 for fixing Message-ID: <20140213192234.022EB1C3CF3@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: align_float_cast Changeset: r69131:b6e353d16f97 Date: 2013-12-28 22:00 +0200 http://bitbucket.org/pypy/pypy/changeset/b6e353d16f97/ Log: do not mark x86 for fixing diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -1409,7 +1409,6 @@ genop_getarrayitem_raw_pure = genop_getarrayitem_gc def genop_raw_load(self, op, arglocs, resloc): - xxx # This seems to relate to pointers? base_loc, ofs_loc, size_loc, ofs, sign_loc = arglocs assert isinstance(ofs, ImmedLoc) src_addr = addr_add(base_loc, ofs_loc, ofs.value, 0) From noreply at buildbot.pypy.org Thu Feb 13 20:22:35 2014 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 13 Feb 2014 20:22:35 +0100 (CET) Subject: [pypy-commit] pypy align_float_cast: mark places that need to check float alignment with xxx Message-ID: <20140213192235.25F2D1C3CF3@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: align_float_cast Changeset: r69132:cedf7339c28d Date: 2014-02-13 18:59 +0200 http://bitbucket.org/pypy/pypy/changeset/cedf7339c28d/ Log: mark places that need to check float alignment with xxx diff --git a/rpython/jit/backend/arm/opassembler.py b/rpython/jit/backend/arm/opassembler.py --- a/rpython/jit/backend/arm/opassembler.py +++ b/rpython/jit/backend/arm/opassembler.py @@ -766,6 +766,7 @@ emit_op_getarrayitem_gc_pure = emit_op_getarrayitem_gc def emit_op_raw_load(self, op, arglocs, regalloc, fcond): + # Called when compiling a trace or loop xxx res_loc, base_loc, ofs_loc, scale, ofs = arglocs assert ofs_loc.is_core_reg() diff --git a/rpython/jit/backend/llsupport/llmodel.py b/rpython/jit/backend/llsupport/llmodel.py --- a/rpython/jit/backend/llsupport/llmodel.py +++ b/rpython/jit/backend/llsupport/llmodel.py @@ -402,6 +402,7 @@ @specialize.argtype(1) def read_float_at_mem(self, gcref, ofs): + xxx return llop.raw_load(longlong.FLOATSTORAGE, gcref, ofs) @specialize.argtype(1) diff --git a/rpython/rtyper/llinterp.py b/rpython/rtyper/llinterp.py --- a/rpython/rtyper/llinterp.py +++ b/rpython/rtyper/llinterp.py @@ -954,7 +954,6 @@ op_raw_memmove = op_raw_memcopy # this is essentially the same here def op_raw_load(self, RESTYPE, addr, offset): - xxx checkadr(addr) if isinstance(offset, int): from rpython.rtyper.lltypesystem import rffi diff --git a/rpython/rtyper/lltypesystem/opimpl.py b/rpython/rtyper/lltypesystem/opimpl.py --- a/rpython/rtyper/lltypesystem/opimpl.py +++ b/rpython/rtyper/lltypesystem/opimpl.py @@ -670,7 +670,6 @@ p[0] = newvalue def op_raw_load(TVAL, p, ofs): - xxx from rpython.rtyper.lltypesystem import rffi p = rffi.cast(llmemory.Address, p) p = rffi.cast(rffi.CArrayPtr(TVAL), p + ofs) diff --git a/rpython/translator/c/funcgen.py b/rpython/translator/c/funcgen.py --- a/rpython/translator/c/funcgen.py +++ b/rpython/translator/c/funcgen.py @@ -695,15 +695,17 @@ OP_BARE_RAW_STORE = OP_RAW_STORE def OP_RAW_LOAD(self, op): - xxx addr = self.expr(op.args[0]) offset = self.expr(op.args[1]) result = self.expr(op.result) TYPE = op.result.concretetype typename = cdecl(self.db.gettype(TYPE).replace('@', '*@'), '') - return ( + res = ( "%(result)s = ((%(typename)s) (((char *)%(addr)s) + %(offset)s))[0];" % locals()) + if 'float' in res or 'double' in res: + xxx + return res def OP_CAST_PRIMITIVE(self, op): TYPE = self.lltypemap(op.result) From noreply at buildbot.pypy.org Thu Feb 13 20:22:45 2014 From: noreply at buildbot.pypy.org (mattip) Date: Thu, 13 Feb 2014 20:22:45 +0100 (CET) Subject: [pypy-commit] pypy align_float_cast: merge default into branch Message-ID: <20140213192245.EB59A1C3CF3@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: align_float_cast Changeset: r69133:cd30cf395ac4 Date: 2014-02-13 19:31 +0200 http://bitbucket.org/pypy/pypy/changeset/cd30cf395ac4/ Log: merge default into branch diff too long, truncating to 2000 out of 36874 lines diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -28,7 +28,7 @@ DEALINGS IN THE SOFTWARE. -PyPy Copyright holders 2003-2013 +PyPy Copyright holders 2003-2014 ----------------------------------- Except when otherwise stated (look for LICENSE files or information at diff --git a/dotviewer/graphdisplay.py b/dotviewer/graphdisplay.py --- a/dotviewer/graphdisplay.py +++ b/dotviewer/graphdisplay.py @@ -136,6 +136,7 @@ Click on objects to move around Drag with the left mouse button to zoom in/out Drag with the right mouse button to scroll + Use scroll wheel do scroll up or down """.replace('\n ', '\n').strip() # poor man's dedent @@ -613,6 +614,19 @@ def process_MouseButtonUp(self, event): self.dragging = None pygame.event.set_grab(False) + # handle directional scrolling + if event.button == 4: + self.pan((0, -1)) + return + if event.button == 5: + self.pan((0, 1)) + return + if event.button == 6: + self.pan((-1, 0)) + return + if event.button == 7: + self.pan((1, 0)) + return if self.click_time is not None and abs(time.time() - self.click_time) < 1: # click (no significant dragging) self.notifyclick(self.click_origin) diff --git a/include/PyPy.h b/include/PyPy.h new file mode 100644 --- /dev/null +++ b/include/PyPy.h @@ -0,0 +1,54 @@ +#ifndef _PYPY_H_ +#define _PYPY_H_ + +/* This header is meant to be included in programs that use PyPy as an + embedded library. */ + +#ifdef __cplusplus +extern "C" { +#endif + + +/* You should call this first once. */ +void rpython_startup_code(void); + + +/* Initialize the home directory of PyPy. It is necessary to call this. + + Call it with "home" being the file name of the libpypy.so, for + example; it will be used as a starting point when searching for the + lib-python and lib_pypy directories. They are searched from + "home/..", "home/../..", etc. Returns 0 if everything was fine. If + an error occurs, returns 1 and (if verbose != 0) prints some + information to stderr. + */ +int pypy_setup_home(char *home, int verbose); + + +/* If your program has multiple threads, then you need to call + pypy_init_threads() once at init time, and then pypy_thread_attach() + once in each other thread that just started and in which you want to + run Python code (including via callbacks, see below). + */ +void pypy_init_threads(void); +void pypy_thread_attach(void); + + +/* The main entry point: executes "source" as plain Python code. + Returns 0 if everything was fine. If a Python exception is + uncaught, it is printed to stderr and 1 is returned. + + Usually, the Python code from "source" should use cffi to fill in + global variables of "function pointer" type in your program. Use + cffi callbacks to do so. Once it is done, there is no need to call + pypy_execute_source() any more: from C, you call directly the + functions (which are "callbacks" from the point of view of Python). + */ +int pypy_execute_source(char *source); + + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/lib-python/2.7/ctypes/__init__.py b/lib-python/2.7/ctypes/__init__.py --- a/lib-python/2.7/ctypes/__init__.py +++ b/lib-python/2.7/ctypes/__init__.py @@ -371,10 +371,9 @@ self._handle = handle def __repr__(self): - return "<%s '%s', handle %r at %x>" % \ - (self.__class__.__name__, self._name, - (self._handle), - id(self) & (_sys.maxint*2 + 1)) + return "<%s '%s', handle %r at 0x%x>" % ( + self.__class__.__name__, self._name, self._handle, + id(self) & (_sys.maxint * 2 + 1)) def __getattr__(self, name): diff --git a/lib-python/2.7/test/test_memoryview.py b/lib-python/2.7/test/test_memoryview.py --- a/lib-python/2.7/test/test_memoryview.py +++ b/lib-python/2.7/test/test_memoryview.py @@ -166,11 +166,18 @@ self.assertTrue(m[0:6] == m[:]) self.assertFalse(m[0:5] == m) - # Comparison with objects which don't support the buffer API - self.assertFalse(m == u"abcdef") - self.assertTrue(m != u"abcdef") - self.assertFalse(u"abcdef" == m) - self.assertTrue(u"abcdef" != m) + if test_support.check_impl_detail(cpython=True): + # what is supported and what is not supported by memoryview is + # very inconsisten on CPython. In PyPy, memoryview supports + # the buffer interface, and thus the following comparison + # succeeds. See also the comment in + # pypy.modules.__builtin__.interp_memoryview.W_MemoryView.descr_buffer + # + # Comparison with objects which don't support the buffer API + self.assertFalse(m == u"abcdef", "%s %s" % (self, tp)) + self.assertTrue(m != u"abcdef") + self.assertFalse(u"abcdef" == m) + self.assertTrue(u"abcdef" != m) # Unordered comparisons are unimplemented, and therefore give # arbitrary results (they raise a TypeError in py3k) diff --git a/lib-python/2.7/test/test_ssl.py b/lib-python/2.7/test/test_ssl.py --- a/lib-python/2.7/test/test_ssl.py +++ b/lib-python/2.7/test/test_ssl.py @@ -993,7 +993,7 @@ try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True) try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_OPTIONAL) try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_REQUIRED) - try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, True) + try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False) try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv3, False) try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLSv1, False) diff --git a/lib-python/conftest.py b/lib-python/conftest.py --- a/lib-python/conftest.py +++ b/lib-python/conftest.py @@ -109,7 +109,7 @@ RegrTest('test_asynchat.py', usemodules='select fcntl'), RegrTest('test_asyncore.py', usemodules='select fcntl'), RegrTest('test_atexit.py', core=True), - RegrTest('test_audioop.py', skip="unsupported extension module"), + RegrTest('test_audioop.py', skip="incomplete module"), RegrTest('test_augassign.py', core=True), RegrTest('test_base64.py', usemodules='struct'), RegrTest('test_bastion.py'), diff --git a/lib_pypy/_ctypes/array.py b/lib_pypy/_ctypes/array.py --- a/lib_pypy/_ctypes/array.py +++ b/lib_pypy/_ctypes/array.py @@ -1,4 +1,4 @@ -import _ffi +from _rawffi import alt as _ffi import _rawffi from _ctypes.basics import _CData, cdata_from_address, _CDataMeta, sizeof @@ -20,10 +20,13 @@ # we don't want to have buffers here if len(val) > self._length_: raise ValueError("%r too long" % (val,)) - for i in range(len(val)): - self[i] = val[i] + if isinstance(val, str): + _rawffi.rawstring2charp(self._buffer.buffer, val) + else: + for i in range(len(val)): + self[i] = val[i] if len(val) < self._length_: - self[len(val)] = '\x00' + self._buffer[len(val)] = '\x00' res.value = property(getvalue, setvalue) def getraw(self): @@ -33,8 +36,7 @@ def setraw(self, buffer): if len(buffer) > self._length_: raise ValueError("%r too long" % (buffer,)) - for i in range(len(buffer)): - self[i] = buffer[i] + _rawffi.rawstring2charp(self._buffer.buffer, buffer) res.raw = property(getraw, setraw) elif subletter == 'u': def getvalue(self): @@ -45,10 +47,14 @@ # we don't want to have buffers here if len(val) > self._length_: raise ValueError("%r too long" % (val,)) + if isinstance(val, unicode): + target = self._buffer + else: + target = self for i in range(len(val)): - self[i] = val[i] + target[i] = val[i] if len(val) < self._length_: - self[len(val)] = '\x00' + target[len(val)] = u'\x00' res.value = property(getvalue, setvalue) if '_length_' in typedict: diff --git a/lib_pypy/_ctypes/basics.py b/lib_pypy/_ctypes/basics.py --- a/lib_pypy/_ctypes/basics.py +++ b/lib_pypy/_ctypes/basics.py @@ -1,6 +1,6 @@ import _rawffi -import _ffi +from _rawffi import alt as _ffi import sys try: from __pypy__ import builtinify diff --git a/lib_pypy/_ctypes/function.py b/lib_pypy/_ctypes/function.py --- a/lib_pypy/_ctypes/function.py +++ b/lib_pypy/_ctypes/function.py @@ -5,7 +5,7 @@ from _ctypes.basics import is_struct_shape from _ctypes.builtin import get_errno, set_errno, get_last_error, set_last_error import _rawffi -import _ffi +from _rawffi import alt as _ffi import sys import traceback diff --git a/lib_pypy/_ctypes/pointer.py b/lib_pypy/_ctypes/pointer.py --- a/lib_pypy/_ctypes/pointer.py +++ b/lib_pypy/_ctypes/pointer.py @@ -1,6 +1,6 @@ import _rawffi -import _ffi +from _rawffi import alt as _ffi from _ctypes.basics import _CData, _CDataMeta, cdata_from_address, ArgumentError from _ctypes.basics import keepalive_key, store_reference, ensure_objects from _ctypes.basics import sizeof, byref, as_ffi_pointer diff --git a/lib_pypy/_ctypes/primitive.py b/lib_pypy/_ctypes/primitive.py --- a/lib_pypy/_ctypes/primitive.py +++ b/lib_pypy/_ctypes/primitive.py @@ -1,4 +1,4 @@ -import _ffi +from _rawffi import alt as _ffi import _rawffi import weakref import sys diff --git a/lib_pypy/_ctypes/structure.py b/lib_pypy/_ctypes/structure.py --- a/lib_pypy/_ctypes/structure.py +++ b/lib_pypy/_ctypes/structure.py @@ -2,6 +2,8 @@ import _rawffi from _ctypes.basics import _CData, _CDataMeta, keepalive_key,\ store_reference, ensure_objects, CArgObject +from _ctypes.array import Array +from _ctypes.pointer import _Pointer import inspect def names_and_fields(self, _fields_, superclass, anonymous_fields=None): @@ -104,8 +106,11 @@ def __set__(self, obj, value): fieldtype = self.ctype cobj = fieldtype.from_param(value) - if ensure_objects(cobj) is not None: - key = keepalive_key(self.num) + key = keepalive_key(self.num) + if issubclass(fieldtype, _Pointer) and isinstance(cobj, Array): + # if our value is an Array we need the whole thing alive + store_reference(obj, key, cobj) + elif ensure_objects(cobj) is not None: store_reference(obj, key, cobj._objects) arg = cobj._get_buffer_value() if fieldtype._fficompositesize is not None: diff --git a/lib_pypy/_ffi.py b/lib_pypy/_ffi.py new file mode 100644 --- /dev/null +++ b/lib_pypy/_ffi.py @@ -0,0 +1,2 @@ +# Backward compatibility hack +from _rawffi.alt import * diff --git a/lib_pypy/_sha.py b/lib_pypy/_sha.py --- a/lib_pypy/_sha.py +++ b/lib_pypy/_sha.py @@ -115,14 +115,14 @@ ] class sha: - "An implementation of the MD5 hash function in pure Python." + "An implementation of the SHA hash function in pure Python." digest_size = digestsize = 20 - block_size = 1 + block_size = 512 // 8 def __init__(self): "Initialisation." - + # Initial message length in bits(!). self.length = 0 self.count = [0, 0] @@ -209,7 +209,7 @@ self.H2 = (self.H2 + C) & 0xffffffff self.H3 = (self.H3 + D) & 0xffffffff self.H4 = (self.H4 + E) & 0xffffffff - + # Down from here all methods follow the Python Standard Library # API of the sha module. @@ -295,13 +295,13 @@ _long2bytesBigEndian(self.H3, 4) + \ _long2bytesBigEndian(self.H4, 4) - self.H0 = H0 - self.H1 = H1 + self.H0 = H0 + self.H1 = H1 self.H2 = H2 self.H3 = H3 self.H4 = H4 - self.input = input - self.count = count + self.input = input + self.count = count return digest diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -330,6 +330,14 @@ # SQLite version information sqlite_version = str(_ffi.string(_lib.sqlite3_libversion()).decode('ascii')) +_STMT_TYPE_UPDATE = 0 +_STMT_TYPE_DELETE = 1 +_STMT_TYPE_INSERT = 2 +_STMT_TYPE_REPLACE = 3 +_STMT_TYPE_OTHER = 4 +_STMT_TYPE_SELECT = 5 +_STMT_TYPE_INVALID = 6 + class Error(StandardError): pass @@ -992,13 +1000,18 @@ self.__statement = self.__connection._statement_cache.get(sql) if self.__connection._isolation_level is not None: - if self.__statement._type in ("UPDATE", "DELETE", "INSERT", "REPLACE"): + if self.__statement._type in ( + _STMT_TYPE_UPDATE, + _STMT_TYPE_DELETE, + _STMT_TYPE_INSERT, + _STMT_TYPE_REPLACE + ): if not self.__connection._in_transaction: self.__connection._begin() - elif self.__statement._type == "OTHER": + elif self.__statement._type == _STMT_TYPE_OTHER: if self.__connection._in_transaction: self.__connection.commit() - elif self.__statement._type == "SELECT": + elif self.__statement._type == _STMT_TYPE_SELECT: if multiple: raise ProgrammingError("You cannot execute SELECT " "statements in executemany().") @@ -1021,12 +1034,17 @@ self.__statement._reset() raise self.__connection._get_exception(ret) - if self.__statement._type in ("UPDATE", "DELETE", "INSERT", "REPLACE"): + if self.__statement._type in ( + _STMT_TYPE_UPDATE, + _STMT_TYPE_DELETE, + _STMT_TYPE_INSERT, + _STMT_TYPE_REPLACE + ): if self.__rowcount == -1: self.__rowcount = 0 self.__rowcount += _lib.sqlite3_changes(self.__connection._db) - if not multiple and self.__statement._type == "INSERT": + if not multiple and self.__statement._type == _STMT_TYPE_INSERT: self.__lastrowid = _lib.sqlite3_last_insert_rowid(self.__connection._db) else: self.__lastrowid = None @@ -1176,11 +1194,19 @@ first_word = sql.lstrip().split(" ")[0].upper() if first_word == "": - self._type = "INVALID" - elif first_word in ("SELECT", "INSERT", "UPDATE", "DELETE", "REPLACE"): - self._type = first_word + self._type = _STMT_TYPE_INVALID + elif first_word == "SELECT": + self._type = _STMT_TYPE_SELECT + elif first_word == "INSERT": + self._type = _STMT_TYPE_INSERT + elif first_word == "UPDATE": + self._type = _STMT_TYPE_UPDATE + elif first_word == "DELETE": + self._type = _STMT_TYPE_DELETE + elif first_word == "REPLACE": + self._type = _STMT_TYPE_REPLACE else: - self._type = "OTHER" + self._type = _STMT_TYPE_OTHER if isinstance(sql, unicode): sql = sql.encode('utf-8') @@ -1193,7 +1219,7 @@ if ret == _lib.SQLITE_OK and not self._statement: # an empty statement, work around that, as it's the least trouble - self._type = "SELECT" + self._type = _STMT_TYPE_SELECT c_sql = _ffi.new("char[]", b"select 42") ret = _lib.sqlite3_prepare_v2(self.__con._db, c_sql, -1, statement_star, next_char) @@ -1312,7 +1338,12 @@ raise ValueError("parameters are of unsupported type") def _get_description(self): - if self._type in ("INSERT", "UPDATE", "DELETE", "REPLACE"): + if self._type in ( + _STMT_TYPE_INSERT, + _STMT_TYPE_UPDATE, + _STMT_TYPE_DELETE, + _STMT_TYPE_REPLACE + ): return None desc = [] for i in xrange(_lib.sqlite3_column_count(self._statement)): diff --git a/lib_pypy/audioop.py b/lib_pypy/audioop.py new file mode 100644 --- /dev/null +++ b/lib_pypy/audioop.py @@ -0,0 +1,29 @@ + +import struct + + +class error(Exception): + pass + + +def _check_size(size): + if size != 1 and size != 2 and size != 4: + raise error("Size should be 1, 2 or 4") + + +def _check_params(length, size): + _check_size(size) + if length % size != 0: + raise error("not a whole number of frames") + + +def getsample(cp, size, i): + _check_params(len(cp), size) + if not (0 <= i < len(cp) / size): + raise error("Index out of range") + if size == 1: + return struct.unpack_from("B", buffer(cp)[i:])[0] + elif size == 2: + return struct.unpack_from("H", buffer(cp)[i * 2:])[0] + elif size == 4: + return struct.unpack_from("I", buffer(cp)[i * 4:])[0] diff --git a/lib_pypy/datetime.py b/lib_pypy/datetime.py --- a/lib_pypy/datetime.py +++ b/lib_pypy/datetime.py @@ -878,7 +878,6 @@ month = self._month if day is None: day = self._day - year, month, day = _check_date_fields(year, month, day) return date(year, month, day) # Comparisons of date objects with other. @@ -1389,8 +1388,6 @@ microsecond = self.microsecond if tzinfo is True: tzinfo = self.tzinfo - hour, minute, second, microsecond = _check_time_fields(hour, minute, second, microsecond) - _check_tzinfo_arg(tzinfo) return time(hour, minute, second, microsecond, tzinfo) def __nonzero__(self): @@ -1608,9 +1605,6 @@ microsecond = self.microsecond if tzinfo is True: tzinfo = self.tzinfo - year, month, day = _check_date_fields(year, month, day) - hour, minute, second, microsecond = _check_time_fields(hour, minute, second, microsecond) - _check_tzinfo_arg(tzinfo) return datetime(year, month, day, hour, minute, second, microsecond, tzinfo) diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -34,14 +34,14 @@ "struct", "_hashlib", "_md5", "_sha", "_minimal_curses", "cStringIO", "thread", "itertools", "pyexpat", "_ssl", "cpyext", "array", "binascii", "_multiprocessing", '_warnings', - "_collections", "_multibytecodec", "micronumpy", "_ffi", + "_collections", "_multibytecodec", "micronumpy", "_continuation", "_cffi_backend", "_csv", "cppyy", "_pypyjson"] )) translation_modules = default_modules.copy() translation_modules.update(dict.fromkeys( ["fcntl", "rctime", "select", "signal", "_rawffi", "zlib", - "struct", "_md5", "cStringIO", "array", "_ffi", + "struct", "_md5", "cStringIO", "array", "binascii", # the following are needed for pyrepl (and hence for the # interactive prompt/pdb) @@ -96,7 +96,6 @@ # no _rawffi if importing rpython.rlib.clibffi raises ImportError # or CompilationError or py.test.skip.Exception "_rawffi" : ["rpython.rlib.clibffi"], - "_ffi" : ["rpython.rlib.clibffi"], "zlib" : ["rpython.rlib.rzlib"], "bz2" : ["pypy.module.bz2.interp_bz2"], diff --git a/pypy/doc/_ref.txt b/pypy/doc/_ref.txt --- a/pypy/doc/_ref.txt +++ b/pypy/doc/_ref.txt @@ -109,6 +109,4 @@ .. _`rpython/translator/c/`: https://bitbucket.org/pypy/pypy/src/default/rpython/translator/c/ .. _`rpython/translator/c/src/stacklet/`: https://bitbucket.org/pypy/pypy/src/default/rpython/translator/c/src/stacklet/ .. _`rpython/translator/c/src/stacklet/stacklet.h`: https://bitbucket.org/pypy/pypy/src/default/rpython/translator/c/src/stacklet/stacklet.h -.. _`rpython/translator/cli/`: https://bitbucket.org/pypy/pypy/src/default/rpython/translator/cli/ -.. _`rpython/translator/jvm/`: https://bitbucket.org/pypy/pypy/src/default/rpython/translator/jvm/ .. _`rpython/translator/tool/`: https://bitbucket.org/pypy/pypy/src/default/rpython/translator/tool/ diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -38,7 +38,7 @@ # General information about the project. project = u'PyPy' -copyright = u'2013, The PyPy Project' +copyright = u'2014, The PyPy Project' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the diff --git a/pypy/doc/config/translation.lldebug0.txt b/pypy/doc/config/translation.lldebug0.txt new file mode 100644 --- /dev/null +++ b/pypy/doc/config/translation.lldebug0.txt @@ -0,0 +1,1 @@ +Like lldebug, but in addition compile C files with -O0 diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -103,8 +103,7 @@ .. _`extension modules`: cpython_differences.html#extension-modules .. _`cpython differences`: cpython_differences.html -.. _`compatibility wiki`: -.. https://bitbucket.org/pypy/compatibility/wiki/Home +.. _`compatibility wiki`: https://bitbucket.org/pypy/compatibility/wiki/Home .. _cffi: http://cffi.readthedocs.org/ --------------------------------- @@ -426,25 +425,12 @@ Could we use LLVM? ------------------ -In theory yes. But we tried to use it 5 or 6 times already, as a -translation backend or as a JIT backend --- and failed each time. +There is a (static) translation backend using LLVM in the branch +``llvm-translation-backend``. It can translate PyPy with or without the JIT on +Linux. -In more details: using LLVM as a (static) translation backend is -pointless nowadays because you can generate C code and compile it with -clang. (Note that compiling PyPy with clang gives a result that is not -faster than compiling it with gcc.) We might in theory get extra -benefits from LLVM's GC integration, but this requires more work on the -LLVM side before it would be remotely useful. Anyway, it could be -interfaced via a custom primitive in the C code. - -On the other hand, using LLVM as our JIT backend looks interesting as -well --- but again we made an attempt, and it failed: LLVM has no way to -patch the generated machine code. - -So the position of the core PyPy developers is that if anyone wants to -make an N+1'th attempt with LLVM, they are welcome, and will be happy to -provide help in the IRC channel, but they are left with the burden of proof -that (a) it works and (b) it gives important benefits. +Using LLVM as our JIT backend looks interesting as well -- we made an attempt, +but it failed: LLVM has no way to patch the generated machine code. ---------------------- How do I compile PyPy? diff --git a/pypy/doc/garbage_collection.rst b/pypy/doc/garbage_collection.rst --- a/pypy/doc/garbage_collection.rst +++ b/pypy/doc/garbage_collection.rst @@ -210,4 +210,12 @@ are preserved. If the object dies then the pre-reserved location becomes free garbage, to be collected at the next major collection. +The exact name of this GC is either `minimark` or `incminimark`. The +latter is a version that does major collections incrementally (i.e. one +major collection is split along some number of minor collections, rather +than being done all at once after a specific minor collection). The +default is `incminimark`, as it seems to have a very minimal impact on +performance and memory usage at the benefit of avoiding the long pauses +of `minimark`. + .. include:: _ref.txt diff --git a/pypy/doc/gc_info.rst b/pypy/doc/gc_info.rst --- a/pypy/doc/gc_info.rst +++ b/pypy/doc/gc_info.rst @@ -6,7 +6,7 @@ Minimark -------- -PyPy's default ``minimark`` garbage collector is configurable through +PyPy's default ``incminimark`` garbage collector is configurable through several environment variables: ``PYPY_GC_NURSERY`` @@ -14,6 +14,17 @@ Defaults to 1/2 of your cache or ``4M``. Small values (like 1 or 1KB) are useful for debugging. +``PYPY_GC_NURSERY_CLEANUP`` + The interval at which nursery is cleaned up. Must + be smaller than the nursery size and bigger than the + biggest object we can allotate in the nursery. + +``PYPY_GC_INCREMENT_STEP`` + The size of memory marked during the marking step. Default is size of + nursery times 2. If you mark it too high your GC is not incremental at + all. The minimum is set to size that survives minor collection times + 1.5 so we reclaim anything all the time. + ``PYPY_GC_MAJOR_COLLECT`` Major collection memory factor. Default is ``1.82``, which means trigger a major collection when the diff --git a/pypy/doc/project-ideas.rst b/pypy/doc/project-ideas.rst --- a/pypy/doc/project-ideas.rst +++ b/pypy/doc/project-ideas.rst @@ -74,6 +74,10 @@ The actual details would be rather differen in PyPy, but we would like to have the same optimization implemented. +Or maybe not. We can also play around with the idea of using a single +representation: as a byte string in utf-8. (This idea needs some extra logic +for efficient indexing, like a cache.) + .. _`optimized unicode representation`: http://www.python.org/dev/peps/pep-0393/ Translation Toolchain @@ -138,20 +142,6 @@ * `hg` -Experiment (again) with LLVM backend for RPython compilation ------------------------------------------------------------- - -We already tried working with LLVM and at the time, LLVM was not mature enough -for our needs. It's possible that this has changed, reviving the LLVM backend -(or writing new from scratch) for static compilation would be a good project. - -(On the other hand, just generating C code and using clang might be enough. -The issue with that is the so-called "asmgcc GC root finder", which has tons -of issues of this own. In my opinion (arigo), it would be definitely a -better project to try to optimize the alternative, the "shadowstack" GC root -finder, which is nicely portable. So far it gives a pypy that is around -7% slower.) - Embedding PyPy ---------------------------------------- diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -39,3 +39,31 @@ .. branch: OlivierBlanvillain/fix-3-broken-links-on-pypy-published-pap-1386250839215 Fix 3 broken links on PyPy published papers in docs. + +.. branch: jit-ordereddict + +.. branch: refactor-str-types +Remove multimethods on str/unicode/bytearray and make the implementations share code. + +.. branch: remove-del-from-generatoriterator +Speed up generators that don't yield inside try or wait blocks by skipping +unnecessary cleanup. + +.. branch: annotator +Remove FlowObjSpace. +Improve cohesion between rpython.flowspace and rpython.annotator. + +.. branch: detect-immutable-fields +mapdicts keep track of whether or not an attribute is every assigned to +multiple times. If it's only assigned once then an elidable lookup is used when +possible. + +.. branch: precompiled-headers +Create a Makefile using precompiled headers for MSVC platforms. +The downside is a messy nmake-compatible Makefile. Since gcc shows minimal +speedup, it was not implemented. + +.. branch: camelot +With a properly configured 256-color terminal (TERM=...-256color), the +Mandelbrot set shown during translation now uses a range of 50 colours. +Essential! diff --git a/pypy/goal/getnightly.py b/pypy/goal/getnightly.py --- a/pypy/goal/getnightly.py +++ b/pypy/goal/getnightly.py @@ -26,7 +26,12 @@ if branch == 'default': branch = 'trunk' -filename = 'pypy-c-jit-latest-%s.tar.bz2' % arch +if '--nojit' in sys.argv: + kind = 'nojit' +else: + kind = 'jit' + +filename = 'pypy-c-%s-latest-%s.tar.bz2' % (kind, arch) url = 'http://buildbot.pypy.org/nightly/%s/%s' % (branch, filename) tmp = py.path.local.mkdtemp() mydir = tmp.chdir() diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -90,9 +90,10 @@ return f """) - @entrypoint('main', [rffi.CCHARP, lltype.Signed], c_name='pypy_setup_home') + @entrypoint('main', [rffi.CCHARP, rffi.INT], c_name='pypy_setup_home') def pypy_setup_home(ll_home, verbose): from pypy.module.sys.initpath import pypy_find_stdlib + verbose = rffi.cast(lltype.Signed, verbose) if ll_home: home = rffi.charp2str(ll_home) else: @@ -120,7 +121,8 @@ @entrypoint('main', [rffi.CCHARP], c_name='pypy_execute_source') def pypy_execute_source(ll_source): source = rffi.charp2str(ll_source) - return _pypy_execute_source(source) + res = _pypy_execute_source(source) + return rffi.cast(rffi.INT, res) @entrypoint('main', [], c_name='pypy_init_threads') def pypy_init_threads(): diff --git a/pypy/interpreter/argument.py b/pypy/interpreter/argument.py --- a/pypy/interpreter/argument.py +++ b/pypy/interpreter/argument.py @@ -1,11 +1,11 @@ """ Arguments objects. """ - -from pypy.interpreter.error import OperationError, operationerrfmt from rpython.rlib.debug import make_sure_not_resized from rpython.rlib import jit +from pypy.interpreter.error import OperationError, oefmt + class Arguments(object): """ @@ -86,9 +86,9 @@ args_w = space.fixedview(w_stararg) except OperationError, e: if e.match(space, space.w_TypeError): - raise operationerrfmt( - space.w_TypeError, - "argument after * must be a sequence, not %T", w_stararg) + raise oefmt(space.w_TypeError, + "argument after * must be a sequence, not %T", + w_stararg) raise self.arguments_w = self.arguments_w + args_w @@ -113,10 +113,9 @@ w_keys = space.call_method(w_starstararg, "keys") except OperationError, e: if e.match(space, space.w_AttributeError): - raise operationerrfmt( - space.w_TypeError, - "argument after ** must be a mapping, not %T", - w_starstararg) + raise oefmt(space.w_TypeError, + "argument after ** must be a mapping, not %T", + w_starstararg) raise keys_w = space.unpackiterable(w_keys) keywords_w = [None] * len(keys_w) @@ -281,8 +280,7 @@ self._match_signature(w_firstarg, scope_w, signature, defaults_w, 0) except ArgErr, e: - raise operationerrfmt(self.space.w_TypeError, - "%s() %s", fnname, e.getmsg()) + raise oefmt(self.space.w_TypeError, "%s() %s", fnname, e.getmsg()) return signature.scope_length() def _parse(self, w_firstarg, signature, defaults_w, blindargs=0): @@ -304,8 +302,7 @@ try: return self._parse(w_firstarg, signature, defaults_w, blindargs) except ArgErr, e: - raise operationerrfmt(self.space.w_TypeError, - "%s() %s", fnname, e.getmsg()) + raise oefmt(self.space.w_TypeError, "%s() %s", fnname, e.getmsg()) @staticmethod def frompacked(space, w_args=None, w_kwds=None): @@ -344,10 +341,9 @@ for key in keywords: for otherkey in existingkeywords: if otherkey == key: - raise operationerrfmt(space.w_TypeError, - "got multiple values " - "for keyword argument " - "'%s'", key) + raise oefmt(space.w_TypeError, + "got multiple values for keyword argument '%s'", + key) def _do_combine_starstarargs_wrapped(space, keys_w, w_starstararg, keywords, keywords_w, existingkeywords): @@ -367,10 +363,9 @@ raise else: if existingkeywords and key in existingkeywords: - raise operationerrfmt(space.w_TypeError, - "got multiple values " - "for keyword argument " - "'%s'", key) + raise oefmt(space.w_TypeError, + "got multiple values for keyword argument '%s'", + key) keywords[i] = key keywords_w[i] = space.getitem(w_starstararg, w_key) i += 1 diff --git a/pypy/interpreter/astcompiler/ast.py b/pypy/interpreter/astcompiler/ast.py --- a/pypy/interpreter/astcompiler/ast.py +++ b/pypy/interpreter/astcompiler/ast.py @@ -1,12 +1,18 @@ # Generated by tools/asdl_py.py -from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter import typedef -from pypy.interpreter.gateway import interp2app -from pypy.interpreter.error import OperationError, operationerrfmt from rpython.rlib.unroll import unrolling_iterable from rpython.tool.pairtype import extendabletype from rpython.tool.sourcetools import func_with_new_name +from pypy.interpreter import typedef +from pypy.interpreter.baseobjspace import W_Root +from pypy.interpreter.error import OperationError, oefmt +from pypy.interpreter.gateway import interp2app + + +def raise_attriberr(space, w_obj, name): + raise oefmt(space.w_AttributeError, + "'%T' object has no attribute '%s'", w_obj, name) + def check_string(space, w_obj): if not (space.isinstance_w(w_obj, space.w_str) or @@ -70,11 +76,13 @@ continue # field is optional w_obj = self.getdictvalue(space, missing) if w_obj is None: - err = "required field \"%s\" missing from %s" - raise operationerrfmt(space.w_TypeError, err, missing, host) + raise oefmt(space.w_TypeError, + "required field \"%s\" missing from %s", + missing, host) else: - err = "incorrect type for field \"%s\" in %s" - raise operationerrfmt(space.w_TypeError, err, missing, host) + raise oefmt(space.w_TypeError, + "incorrect type for field \"%s\" in %s", + missing, host) raise AssertionError("should not reach here") @@ -2793,7 +2801,7 @@ def Module_get_body(space, w_self): if not w_self.initialization_state & 1: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -2834,7 +2842,7 @@ def Interactive_get_body(space, w_self): if not w_self.initialization_state & 1: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -2879,7 +2887,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 1: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') return space.wrap(w_self.body) def Expression_set_body(space, w_self, w_new_value): @@ -2922,7 +2930,7 @@ def Suite_get_body(space, w_self): if not w_self.initialization_state & 1: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -2967,7 +2975,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 1: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'lineno') + raise_attriberr(space, w_self, 'lineno') return space.wrap(w_self.lineno) def stmt_set_lineno(space, w_self, w_new_value): @@ -2988,7 +2996,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 2: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'col_offset') + raise_attriberr(space, w_self, 'col_offset') return space.wrap(w_self.col_offset) def stmt_set_col_offset(space, w_self, w_new_value): @@ -3018,7 +3026,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'name') + raise_attriberr(space, w_self, 'name') return space.wrap(w_self.name) def FunctionDef_set_name(space, w_self, w_new_value): @@ -3039,7 +3047,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'args') + raise_attriberr(space, w_self, 'args') return space.wrap(w_self.args) def FunctionDef_set_args(space, w_self, w_new_value): @@ -3056,7 +3064,7 @@ def FunctionDef_get_body(space, w_self): if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -3072,7 +3080,7 @@ def FunctionDef_get_decorator_list(space, w_self): if not w_self.initialization_state & 32: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'decorator_list') + raise_attriberr(space, w_self, 'decorator_list') if w_self.w_decorator_list is None: if w_self.decorator_list is None: list_w = [] @@ -3121,7 +3129,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'name') + raise_attriberr(space, w_self, 'name') return space.wrap(w_self.name) def ClassDef_set_name(space, w_self, w_new_value): @@ -3138,7 +3146,7 @@ def ClassDef_get_bases(space, w_self): if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'bases') + raise_attriberr(space, w_self, 'bases') if w_self.w_bases is None: if w_self.bases is None: list_w = [] @@ -3154,7 +3162,7 @@ def ClassDef_get_body(space, w_self): if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -3170,7 +3178,7 @@ def ClassDef_get_decorator_list(space, w_self): if not w_self.initialization_state & 32: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'decorator_list') + raise_attriberr(space, w_self, 'decorator_list') if w_self.w_decorator_list is None: if w_self.decorator_list is None: list_w = [] @@ -3220,7 +3228,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') + raise_attriberr(space, w_self, 'value') return space.wrap(w_self.value) def Return_set_value(space, w_self, w_new_value): @@ -3263,7 +3271,7 @@ def Delete_get_targets(space, w_self): if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'targets') + raise_attriberr(space, w_self, 'targets') if w_self.w_targets is None: if w_self.targets is None: list_w = [] @@ -3304,7 +3312,7 @@ def Assign_get_targets(space, w_self): if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'targets') + raise_attriberr(space, w_self, 'targets') if w_self.w_targets is None: if w_self.targets is None: list_w = [] @@ -3324,7 +3332,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') + raise_attriberr(space, w_self, 'value') return space.wrap(w_self.value) def Assign_set_value(space, w_self, w_new_value): @@ -3373,7 +3381,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'target') + raise_attriberr(space, w_self, 'target') return space.wrap(w_self.target) def AugAssign_set_target(space, w_self, w_new_value): @@ -3396,7 +3404,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'op') + raise_attriberr(space, w_self, 'op') return operator_to_class[w_self.op - 1]() def AugAssign_set_op(space, w_self, w_new_value): @@ -3419,7 +3427,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') + raise_attriberr(space, w_self, 'value') return space.wrap(w_self.value) def AugAssign_set_value(space, w_self, w_new_value): @@ -3468,7 +3476,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'dest') + raise_attriberr(space, w_self, 'dest') return space.wrap(w_self.dest) def Print_set_dest(space, w_self, w_new_value): @@ -3487,7 +3495,7 @@ def Print_get_values(space, w_self): if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'values') + raise_attriberr(space, w_self, 'values') if w_self.w_values is None: if w_self.values is None: list_w = [] @@ -3507,7 +3515,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'nl') + raise_attriberr(space, w_self, 'nl') return space.wrap(w_self.nl) def Print_set_nl(space, w_self, w_new_value): @@ -3555,7 +3563,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'target') + raise_attriberr(space, w_self, 'target') return space.wrap(w_self.target) def For_set_target(space, w_self, w_new_value): @@ -3578,7 +3586,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'iter') + raise_attriberr(space, w_self, 'iter') return space.wrap(w_self.iter) def For_set_iter(space, w_self, w_new_value): @@ -3597,7 +3605,7 @@ def For_get_body(space, w_self): if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -3613,7 +3621,7 @@ def For_get_orelse(space, w_self): if not w_self.initialization_state & 32: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'orelse') + raise_attriberr(space, w_self, 'orelse') if w_self.w_orelse is None: if w_self.orelse is None: list_w = [] @@ -3662,7 +3670,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'test') + raise_attriberr(space, w_self, 'test') return space.wrap(w_self.test) def While_set_test(space, w_self, w_new_value): @@ -3681,7 +3689,7 @@ def While_get_body(space, w_self): if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -3697,7 +3705,7 @@ def While_get_orelse(space, w_self): if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'orelse') + raise_attriberr(space, w_self, 'orelse') if w_self.w_orelse is None: if w_self.orelse is None: list_w = [] @@ -3745,7 +3753,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'test') + raise_attriberr(space, w_self, 'test') return space.wrap(w_self.test) def If_set_test(space, w_self, w_new_value): @@ -3764,7 +3772,7 @@ def If_get_body(space, w_self): if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -3780,7 +3788,7 @@ def If_get_orelse(space, w_self): if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'orelse') + raise_attriberr(space, w_self, 'orelse') if w_self.w_orelse is None: if w_self.orelse is None: list_w = [] @@ -3828,7 +3836,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'context_expr') + raise_attriberr(space, w_self, 'context_expr') return space.wrap(w_self.context_expr) def With_set_context_expr(space, w_self, w_new_value): @@ -3851,7 +3859,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'optional_vars') + raise_attriberr(space, w_self, 'optional_vars') return space.wrap(w_self.optional_vars) def With_set_optional_vars(space, w_self, w_new_value): @@ -3870,7 +3878,7 @@ def With_get_body(space, w_self): if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -3917,7 +3925,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'type') + raise_attriberr(space, w_self, 'type') return space.wrap(w_self.type) def Raise_set_type(space, w_self, w_new_value): @@ -3940,7 +3948,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'inst') + raise_attriberr(space, w_self, 'inst') return space.wrap(w_self.inst) def Raise_set_inst(space, w_self, w_new_value): @@ -3963,7 +3971,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'tback') + raise_attriberr(space, w_self, 'tback') return space.wrap(w_self.tback) def Raise_set_tback(space, w_self, w_new_value): @@ -4008,7 +4016,7 @@ def TryExcept_get_body(space, w_self): if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -4024,7 +4032,7 @@ def TryExcept_get_handlers(space, w_self): if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'handlers') + raise_attriberr(space, w_self, 'handlers') if w_self.w_handlers is None: if w_self.handlers is None: list_w = [] @@ -4040,7 +4048,7 @@ def TryExcept_get_orelse(space, w_self): if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'orelse') + raise_attriberr(space, w_self, 'orelse') if w_self.w_orelse is None: if w_self.orelse is None: list_w = [] @@ -4085,7 +4093,7 @@ def TryFinally_get_body(space, w_self): if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -4101,7 +4109,7 @@ def TryFinally_get_finalbody(space, w_self): if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'finalbody') + raise_attriberr(space, w_self, 'finalbody') if w_self.w_finalbody is None: if w_self.finalbody is None: list_w = [] @@ -4148,7 +4156,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'test') + raise_attriberr(space, w_self, 'test') return space.wrap(w_self.test) def Assert_set_test(space, w_self, w_new_value): @@ -4171,7 +4179,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'msg') + raise_attriberr(space, w_self, 'msg') return space.wrap(w_self.msg) def Assert_set_msg(space, w_self, w_new_value): @@ -4215,7 +4223,7 @@ def Import_get_names(space, w_self): if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'names') + raise_attriberr(space, w_self, 'names') if w_self.w_names is None: if w_self.names is None: list_w = [] @@ -4260,7 +4268,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'module') + raise_attriberr(space, w_self, 'module') return space.wrap(w_self.module) def ImportFrom_set_module(space, w_self, w_new_value): @@ -4280,7 +4288,7 @@ def ImportFrom_get_names(space, w_self): if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'names') + raise_attriberr(space, w_self, 'names') if w_self.w_names is None: if w_self.names is None: list_w = [] @@ -4300,7 +4308,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'level') + raise_attriberr(space, w_self, 'level') return space.wrap(w_self.level) def ImportFrom_set_level(space, w_self, w_new_value): @@ -4348,7 +4356,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') return space.wrap(w_self.body) def Exec_set_body(space, w_self, w_new_value): @@ -4371,7 +4379,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'globals') + raise_attriberr(space, w_self, 'globals') return space.wrap(w_self.globals) def Exec_set_globals(space, w_self, w_new_value): @@ -4394,7 +4402,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'locals') + raise_attriberr(space, w_self, 'locals') return space.wrap(w_self.locals) def Exec_set_locals(space, w_self, w_new_value): @@ -4439,7 +4447,7 @@ def Global_get_names(space, w_self): if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'names') + raise_attriberr(space, w_self, 'names') if w_self.w_names is None: if w_self.names is None: list_w = [] @@ -4484,7 +4492,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') + raise_attriberr(space, w_self, 'value') return space.wrap(w_self.value) def Expr_set_value(space, w_self, w_new_value): @@ -4582,7 +4590,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 1: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'lineno') + raise_attriberr(space, w_self, 'lineno') return space.wrap(w_self.lineno) def expr_set_lineno(space, w_self, w_new_value): @@ -4603,7 +4611,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 2: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'col_offset') + raise_attriberr(space, w_self, 'col_offset') return space.wrap(w_self.col_offset) def expr_set_col_offset(space, w_self, w_new_value): @@ -4633,7 +4641,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'op') + raise_attriberr(space, w_self, 'op') return boolop_to_class[w_self.op - 1]() def BoolOp_set_op(space, w_self, w_new_value): @@ -4652,7 +4660,7 @@ def BoolOp_get_values(space, w_self): if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'values') + raise_attriberr(space, w_self, 'values') if w_self.w_values is None: if w_self.values is None: list_w = [] @@ -4698,7 +4706,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'left') + raise_attriberr(space, w_self, 'left') return space.wrap(w_self.left) def BinOp_set_left(space, w_self, w_new_value): @@ -4721,7 +4729,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'op') + raise_attriberr(space, w_self, 'op') return operator_to_class[w_self.op - 1]() def BinOp_set_op(space, w_self, w_new_value): @@ -4744,7 +4752,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'right') + raise_attriberr(space, w_self, 'right') return space.wrap(w_self.right) def BinOp_set_right(space, w_self, w_new_value): @@ -4793,7 +4801,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'op') + raise_attriberr(space, w_self, 'op') return unaryop_to_class[w_self.op - 1]() def UnaryOp_set_op(space, w_self, w_new_value): @@ -4816,7 +4824,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'operand') + raise_attriberr(space, w_self, 'operand') return space.wrap(w_self.operand) def UnaryOp_set_operand(space, w_self, w_new_value): @@ -4864,7 +4872,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'args') + raise_attriberr(space, w_self, 'args') return space.wrap(w_self.args) def Lambda_set_args(space, w_self, w_new_value): @@ -4885,7 +4893,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') return space.wrap(w_self.body) def Lambda_set_body(space, w_self, w_new_value): @@ -4933,7 +4941,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'test') + raise_attriberr(space, w_self, 'test') return space.wrap(w_self.test) def IfExp_set_test(space, w_self, w_new_value): @@ -4956,7 +4964,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') return space.wrap(w_self.body) def IfExp_set_body(space, w_self, w_new_value): @@ -4979,7 +4987,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'orelse') + raise_attriberr(space, w_self, 'orelse') return space.wrap(w_self.orelse) def IfExp_set_orelse(space, w_self, w_new_value): @@ -5024,7 +5032,7 @@ def Dict_get_keys(space, w_self): if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'keys') + raise_attriberr(space, w_self, 'keys') if w_self.w_keys is None: if w_self.keys is None: list_w = [] @@ -5040,7 +5048,7 @@ def Dict_get_values(space, w_self): if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'values') + raise_attriberr(space, w_self, 'values') if w_self.w_values is None: if w_self.values is None: list_w = [] @@ -5083,7 +5091,7 @@ def Set_get_elts(space, w_self): if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'elts') + raise_attriberr(space, w_self, 'elts') if w_self.w_elts is None: if w_self.elts is None: list_w = [] @@ -5128,7 +5136,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'elt') + raise_attriberr(space, w_self, 'elt') return space.wrap(w_self.elt) def ListComp_set_elt(space, w_self, w_new_value): @@ -5147,7 +5155,7 @@ def ListComp_get_generators(space, w_self): if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'generators') + raise_attriberr(space, w_self, 'generators') if w_self.w_generators is None: if w_self.generators is None: list_w = [] @@ -5193,7 +5201,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'elt') + raise_attriberr(space, w_self, 'elt') return space.wrap(w_self.elt) def SetComp_set_elt(space, w_self, w_new_value): @@ -5212,7 +5220,7 @@ def SetComp_get_generators(space, w_self): if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'generators') + raise_attriberr(space, w_self, 'generators') if w_self.w_generators is None: if w_self.generators is None: list_w = [] @@ -5258,7 +5266,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'key') + raise_attriberr(space, w_self, 'key') return space.wrap(w_self.key) def DictComp_set_key(space, w_self, w_new_value): @@ -5281,7 +5289,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') + raise_attriberr(space, w_self, 'value') return space.wrap(w_self.value) def DictComp_set_value(space, w_self, w_new_value): @@ -5300,7 +5308,7 @@ def DictComp_get_generators(space, w_self): if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'generators') + raise_attriberr(space, w_self, 'generators') if w_self.w_generators is None: if w_self.generators is None: list_w = [] @@ -5347,7 +5355,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'elt') + raise_attriberr(space, w_self, 'elt') return space.wrap(w_self.elt) def GeneratorExp_set_elt(space, w_self, w_new_value): @@ -5366,7 +5374,7 @@ def GeneratorExp_get_generators(space, w_self): if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'generators') + raise_attriberr(space, w_self, 'generators') if w_self.w_generators is None: if w_self.generators is None: list_w = [] @@ -5412,7 +5420,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') + raise_attriberr(space, w_self, 'value') return space.wrap(w_self.value) def Yield_set_value(space, w_self, w_new_value): @@ -5459,7 +5467,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'left') + raise_attriberr(space, w_self, 'left') return space.wrap(w_self.left) def Compare_set_left(space, w_self, w_new_value): @@ -5478,7 +5486,7 @@ def Compare_get_ops(space, w_self): if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'ops') + raise_attriberr(space, w_self, 'ops') if w_self.w_ops is None: if w_self.ops is None: list_w = [] @@ -5494,7 +5502,7 @@ def Compare_get_comparators(space, w_self): if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'comparators') + raise_attriberr(space, w_self, 'comparators') if w_self.w_comparators is None: if w_self.comparators is None: list_w = [] @@ -5542,7 +5550,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'func') + raise_attriberr(space, w_self, 'func') return space.wrap(w_self.func) def Call_set_func(space, w_self, w_new_value): @@ -5561,7 +5569,7 @@ def Call_get_args(space, w_self): if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'args') + raise_attriberr(space, w_self, 'args') if w_self.w_args is None: if w_self.args is None: list_w = [] @@ -5577,7 +5585,7 @@ def Call_get_keywords(space, w_self): if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'keywords') + raise_attriberr(space, w_self, 'keywords') if w_self.w_keywords is None: if w_self.keywords is None: list_w = [] @@ -5597,7 +5605,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 32: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'starargs') + raise_attriberr(space, w_self, 'starargs') return space.wrap(w_self.starargs) def Call_set_starargs(space, w_self, w_new_value): @@ -5620,7 +5628,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 64: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'kwargs') + raise_attriberr(space, w_self, 'kwargs') return space.wrap(w_self.kwargs) def Call_set_kwargs(space, w_self, w_new_value): @@ -5673,7 +5681,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') + raise_attriberr(space, w_self, 'value') return space.wrap(w_self.value) def Repr_set_value(space, w_self, w_new_value): @@ -5720,7 +5728,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'n') + raise_attriberr(space, w_self, 'n') return w_self.n def Num_set_n(space, w_self, w_new_value): @@ -5765,7 +5773,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 's') + raise_attriberr(space, w_self, 's') return w_self.s def Str_set_s(space, w_self, w_new_value): @@ -5810,7 +5818,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') + raise_attriberr(space, w_self, 'value') return space.wrap(w_self.value) def Attribute_set_value(space, w_self, w_new_value): @@ -5833,7 +5841,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'attr') + raise_attriberr(space, w_self, 'attr') return space.wrap(w_self.attr) def Attribute_set_attr(space, w_self, w_new_value): @@ -5854,7 +5862,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'ctx') + raise_attriberr(space, w_self, 'ctx') return expr_context_to_class[w_self.ctx - 1]() def Attribute_set_ctx(space, w_self, w_new_value): @@ -5903,7 +5911,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') + raise_attriberr(space, w_self, 'value') return space.wrap(w_self.value) def Subscript_set_value(space, w_self, w_new_value): @@ -5926,7 +5934,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'slice') + raise_attriberr(space, w_self, 'slice') return space.wrap(w_self.slice) def Subscript_set_slice(space, w_self, w_new_value): @@ -5949,7 +5957,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'ctx') + raise_attriberr(space, w_self, 'ctx') return expr_context_to_class[w_self.ctx - 1]() def Subscript_set_ctx(space, w_self, w_new_value): @@ -5998,7 +6006,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'id') + raise_attriberr(space, w_self, 'id') return space.wrap(w_self.id) def Name_set_id(space, w_self, w_new_value): @@ -6019,7 +6027,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'ctx') + raise_attriberr(space, w_self, 'ctx') return expr_context_to_class[w_self.ctx - 1]() def Name_set_ctx(space, w_self, w_new_value): @@ -6063,7 +6071,7 @@ def List_get_elts(space, w_self): if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'elts') + raise_attriberr(space, w_self, 'elts') if w_self.w_elts is None: if w_self.elts is None: list_w = [] @@ -6083,7 +6091,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'ctx') + raise_attriberr(space, w_self, 'ctx') return expr_context_to_class[w_self.ctx - 1]() def List_set_ctx(space, w_self, w_new_value): @@ -6128,7 +6136,7 @@ def Tuple_get_elts(space, w_self): if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'elts') + raise_attriberr(space, w_self, 'elts') if w_self.w_elts is None: if w_self.elts is None: list_w = [] @@ -6148,7 +6156,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'ctx') + raise_attriberr(space, w_self, 'ctx') return expr_context_to_class[w_self.ctx - 1]() def Tuple_set_ctx(space, w_self, w_new_value): @@ -6197,7 +6205,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') + raise_attriberr(space, w_self, 'value') return w_self.value def Const_set_value(space, w_self, w_new_value): @@ -6315,7 +6323,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 1: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'lower') + raise_attriberr(space, w_self, 'lower') return space.wrap(w_self.lower) def Slice_set_lower(space, w_self, w_new_value): @@ -6338,7 +6346,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 2: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'upper') + raise_attriberr(space, w_self, 'upper') return space.wrap(w_self.upper) def Slice_set_upper(space, w_self, w_new_value): @@ -6361,7 +6369,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'step') + raise_attriberr(space, w_self, 'step') return space.wrap(w_self.step) def Slice_set_step(space, w_self, w_new_value): @@ -6406,7 +6414,7 @@ def ExtSlice_get_dims(space, w_self): if not w_self.initialization_state & 1: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'dims') + raise_attriberr(space, w_self, 'dims') if w_self.w_dims is None: if w_self.dims is None: list_w = [] @@ -6451,7 +6459,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 1: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'value') + raise_attriberr(space, w_self, 'value') return space.wrap(w_self.value) def Index_set_value(space, w_self, w_new_value): @@ -6722,7 +6730,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 1: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'target') + raise_attriberr(space, w_self, 'target') return space.wrap(w_self.target) def comprehension_set_target(space, w_self, w_new_value): @@ -6745,7 +6753,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 2: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'iter') + raise_attriberr(space, w_self, 'iter') return space.wrap(w_self.iter) def comprehension_set_iter(space, w_self, w_new_value): @@ -6764,7 +6772,7 @@ def comprehension_get_ifs(space, w_self): if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'ifs') + raise_attriberr(space, w_self, 'ifs') if w_self.w_ifs is None: if w_self.ifs is None: list_w = [] @@ -6811,7 +6819,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 1: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'lineno') + raise_attriberr(space, w_self, 'lineno') return space.wrap(w_self.lineno) def excepthandler_set_lineno(space, w_self, w_new_value): @@ -6832,7 +6840,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 2: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'col_offset') + raise_attriberr(space, w_self, 'col_offset') return space.wrap(w_self.col_offset) def excepthandler_set_col_offset(space, w_self, w_new_value): @@ -6862,7 +6870,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 4: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'type') + raise_attriberr(space, w_self, 'type') return space.wrap(w_self.type) def ExceptHandler_set_type(space, w_self, w_new_value): @@ -6885,7 +6893,7 @@ if w_obj is not None: return w_obj if not w_self.initialization_state & 8: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'name') + raise_attriberr(space, w_self, 'name') return space.wrap(w_self.name) def ExceptHandler_set_name(space, w_self, w_new_value): @@ -6904,7 +6912,7 @@ def ExceptHandler_get_body(space, w_self): if not w_self.initialization_state & 16: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'body') + raise_attriberr(space, w_self, 'body') if w_self.w_body is None: if w_self.body is None: list_w = [] @@ -6947,7 +6955,7 @@ def arguments_get_args(space, w_self): if not w_self.initialization_state & 1: - raise operationerrfmt(space.w_AttributeError, "'%T' object has no attribute '%s'", w_self, 'args') + raise_attriberr(space, w_self, 'args') if w_self.w_args is None: if w_self.args is None: From noreply at buildbot.pypy.org Thu Feb 13 21:00:26 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Thu, 13 Feb 2014 21:00:26 +0100 (CET) Subject: [pypy-commit] pypy default: cleanup Message-ID: <20140213200026.2164C1D24E9@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r69134:75ef172c843b Date: 2014-02-10 18:07 -0800 http://bitbucket.org/pypy/pypy/changeset/75ef172c843b/ Log: cleanup diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py --- a/pypy/objspace/std/dictmultiobject.py +++ b/pypy/objspace/std/dictmultiobject.py @@ -1,3 +1,10 @@ +"""The builtin dict implementation""" + +from rpython.rlib import jit, rerased +from rpython.rlib.debug import mark_dict_non_null +from rpython.rlib.objectmodel import newlist_hint, r_dict, specialize +from rpython.tool.sourcetools import func_renamer, func_with_new_name + from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import ( @@ -7,18 +14,10 @@ from pypy.objspace.std.stdtypedef import StdTypeDef from pypy.objspace.std.util import negate -from rpython.rlib import jit, rerased -from rpython.rlib.debug import mark_dict_non_null -from rpython.rlib.objectmodel import newlist_hint, r_dict, specialize -from rpython.tool.sourcetools import func_renamer, func_with_new_name - UNROLL_CUTOFF = 5 -def _is_str(space, w_key): - return space.is_w(space.type(w_key), space.w_str) - def _never_equal_to_string(space, w_lookup_type): """Handles the case of a non string key lookup. Types that have a sane hash/eq function should allow us to return True @@ -29,8 +28,8 @@ return (space.is_w(w_lookup_type, space.w_NoneType) or space.is_w(w_lookup_type, space.w_int) or space.is_w(w_lookup_type, space.w_bool) or - space.is_w(w_lookup_type, space.w_float) - ) + space.is_w(w_lookup_type, space.w_float)) + @specialize.call_location() def w_dict_unrolling_heuristic(w_dct): @@ -69,19 +68,18 @@ w_type = space.w_dict storage = strategy.get_empty_storage() - w_self = space.allocate_instance(W_DictMultiObject, w_type) - W_DictMultiObject.__init__(w_self, space, strategy, storage) - return w_self + w_obj = space.allocate_instance(W_DictMultiObject, w_type) + W_DictMultiObject.__init__(w_obj, space, strategy, storage) + return w_obj def __init__(self, space, strategy, storage): self.space = space self.strategy = strategy self.dstorage = storage - def __repr__(w_self): + def __repr__(self): """representation for debugging purposes""" - #print('XXXXXXX', w_self.dstorage) - return "%s(%s)" % (w_self.__class__.__name__, w_self.strategy) + return "%s(%s)" % (self.__class__.__name__, self.strategy) def unwrap(w_dict, space): result = {} @@ -98,9 +96,9 @@ return space.get_and_call_function(w_missing, w_dict, w_key) return None - def initialize_content(w_self, list_pairs_w): + def initialize_content(self, list_pairs_w): for w_k, w_v in list_pairs_w: - w_self.setitem(w_k, w_v) + self.setitem(w_k, w_v) def setitem_str(self, key, w_value): self.strategy.setitem_str(self, key, w_value) @@ -115,7 +113,8 @@ if w_fill is None: w_fill = space.w_None if space.is_w(w_type, space.w_dict): - w_dict = W_DictMultiObject.allocate_and_init_instance(space, w_type) + w_dict = W_DictMultiObject.allocate_and_init_instance(space, + w_type) byteslist = space.listview_bytes(w_keys) if byteslist is not None: @@ -312,8 +311,7 @@ try: w_key, w_value = self.popitem() except KeyError: - raise OperationError(space.w_KeyError, - space.wrap("popitem(): dictionary is empty")) + raise oefmt(space.w_KeyError, "popitem(): dictionary is empty") return space.newtuple([w_key, w_value]) @unwrap_spec(w_default=WrappedDefault(None)) @@ -597,6 +595,7 @@ def getiterkeys(self, w_dict): return iter([None]) getitervalues = getiterkeys + def getiteritems(self, w_dict): return iter([(None, None)]) @@ -615,8 +614,8 @@ space = self.space if self.len != self.dictimplementation.length(): self.len = -1 # Make this error state sticky - msg = "dictionary changed size during iteration" - raise OperationError(space.w_RuntimeError, space.wrap(msg)) + raise oefmt(space.w_RuntimeError, + "dictionary changed size during iteration") # look for the next entry if self.pos < self.len: @@ -635,14 +634,15 @@ w_value = self.dictimplementation.getitem(w_key) if w_value is None: self.len = -1 # Make this error state sticky - msg = "dictionary changed during iteration" - raise OperationError(space.w_RuntimeError, space.wrap(msg)) + raise oefmt(space.w_RuntimeError, + "dictionary changed during iteration") return (w_key, w_value) # no more entries self.dictimplementation = None return EMPTY return func_with_new_name(next, 'next_' + TP) + class BaseIteratorImplementation(object): def __init__(self, space, strategy, implementation): self.space = space @@ -665,13 +665,14 @@ class BaseItemIterator(BaseIteratorImplementation): next_item = _new_next('item') + def create_iterator_classes(dictimpl, override_next_item=None): if not hasattr(dictimpl, 'wrapkey'): - wrapkey = lambda space, key : key + wrapkey = lambda space, key: key else: wrapkey = dictimpl.wrapkey.im_func if not hasattr(dictimpl, 'wrapvalue'): - wrapvalue = lambda space, key : key + wrapvalue = lambda space, key: key else: wrapvalue = dictimpl.wrapvalue.im_func @@ -800,7 +801,8 @@ return w_dict.getitem(w_key) def w_keys(self, w_dict): - l = [self.wrap(key) for key in self.unerase(w_dict.dstorage).iterkeys()] + l = [self.wrap(key) + for key in self.unerase(w_dict.dstorage).iterkeys()] return self.space.newlist(l) def values(self, w_dict): @@ -1036,7 +1038,8 @@ def wrapkey(space, key): return space.wrap(key) - # XXX there is no space.newlist_int yet to implement w_keys more efficiently + # XXX there is no space.newlist_int yet to implement w_keys more + # efficiently create_iterator_classes(IntDictStrategy) @@ -1071,8 +1074,7 @@ for w_pair in data_w: pair = space.fixedview(w_pair) if len(pair) != 2: - raise OperationError(space.w_ValueError, - space.wrap("sequence of pairs expected")) + raise oefmt(space.w_ValueError, "sequence of pairs expected") w_key, w_value = pair w_dict.setitem(w_key, w_value) @@ -1128,9 +1130,9 @@ ignore_for_isinstance_cache = True - def __init__(w_self, space, iteratorimplementation): - w_self.space = space - w_self.iteratorimplementation = iteratorimplementation + def __init__(self, space, iteratorimplementation): + self.space = space + self.iteratorimplementation = iteratorimplementation def descr_iter(self, space): return self @@ -1158,9 +1160,8 @@ new_inst = mod.get('dictiter_surrogate_new') w_typeobj = space.type(self) - raise OperationError( - space.w_TypeError, - space.wrap("can't pickle dictionary-keyiterator objects")) + raise oefmt(space.w_TypeError, + "can't pickle dictionary-keyiterator objects") # XXXXXX get that working again # we cannot call __init__ since we don't have the original dict @@ -1174,8 +1175,8 @@ w_clone = space.allocate_instance(W_DictMultiIterItemsObject, w_typeobj) else: - msg = "unsupported dictiter type '%s' during pickling" % (self,) - raise OperationError(space.w_TypeError, space.wrap(msg)) + raise oefmt(space.w_TypeError, + "unsupported dictiter type '%R' during pickling", self) w_clone.space = space w_clone.content = self.content w_clone.len = self.len @@ -1244,8 +1245,8 @@ # Views class W_DictViewObject(W_Root): - def __init__(w_self, space, w_dict): - w_self.w_dict = w_dict + def __init__(self, space, w_dict): + self.w_dict = w_dict def descr_repr(self, space): w_seq = space.call_function(space.w_list, self) From noreply at buildbot.pypy.org Thu Feb 13 21:00:28 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Thu, 13 Feb 2014 21:00:28 +0100 (CET) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20140213200028.B80F31D24E9@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r69136:3b71e90dc818 Date: 2014-02-10 18:24 -0800 http://bitbucket.org/pypy/pypy/changeset/3b71e90dc818/ Log: merge default diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py --- a/pypy/objspace/std/dictmultiobject.py +++ b/pypy/objspace/std/dictmultiobject.py @@ -1,3 +1,10 @@ +"""The builtin dict implementation""" + +from rpython.rlib import jit, rerased +from rpython.rlib.debug import mark_dict_non_null +from rpython.rlib.objectmodel import newlist_hint, r_dict, specialize +from rpython.tool.sourcetools import func_renamer, func_with_new_name + from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import ( @@ -7,18 +14,10 @@ from pypy.objspace.std.stdtypedef import StdTypeDef from pypy.objspace.std.util import negate -from rpython.rlib import jit, rerased -from rpython.rlib.debug import mark_dict_non_null -from rpython.rlib.objectmodel import newlist_hint, r_dict, specialize -from rpython.tool.sourcetools import func_renamer, func_with_new_name - UNROLL_CUTOFF = 5 -def _is_str(space, w_key): - return space.is_w(space.type(w_key), space.w_str) - def _never_equal_to_string(space, w_lookup_type): """Handles the case of a non string key lookup. Types that have a sane hash/eq function should allow us to return True @@ -29,8 +28,8 @@ return (space.is_w(w_lookup_type, space.w_NoneType) or space.is_w(w_lookup_type, space.w_int) or space.is_w(w_lookup_type, space.w_bool) or - space.is_w(w_lookup_type, space.w_float) - ) + space.is_w(w_lookup_type, space.w_float)) + @specialize.call_location() def w_dict_unrolling_heuristic(w_dct): @@ -69,19 +68,18 @@ w_type = space.w_dict storage = strategy.get_empty_storage() - w_self = space.allocate_instance(W_DictMultiObject, w_type) - W_DictMultiObject.__init__(w_self, space, strategy, storage) - return w_self + w_obj = space.allocate_instance(W_DictMultiObject, w_type) + W_DictMultiObject.__init__(w_obj, space, strategy, storage) + return w_obj def __init__(self, space, strategy, storage): self.space = space self.strategy = strategy self.dstorage = storage - def __repr__(w_self): + def __repr__(self): """representation for debugging purposes""" - #print('XXXXXXX', w_self.dstorage) - return "%s(%s)" % (w_self.__class__.__name__, w_self.strategy) + return "%s(%s)" % (self.__class__.__name__, self.strategy) def unwrap(w_dict, space): result = {} @@ -98,9 +96,9 @@ return space.get_and_call_function(w_missing, w_dict, w_key) return None - def initialize_content(w_self, list_pairs_w): + def initialize_content(self, list_pairs_w): for w_k, w_v in list_pairs_w: - w_self.setitem(w_k, w_v) + self.setitem(w_k, w_v) def setitem_str(self, key, w_value): self.strategy.setitem_str(self, key, w_value) @@ -115,7 +113,8 @@ if w_fill is None: w_fill = space.w_None if space.is_w(w_type, space.w_dict): - w_dict = W_DictMultiObject.allocate_and_init_instance(space, w_type) + w_dict = W_DictMultiObject.allocate_and_init_instance(space, + w_type) byteslist = space.listview_bytes(w_keys) if byteslist is not None: @@ -250,8 +249,7 @@ try: w_key, w_value = self.popitem() except KeyError: - raise OperationError(space.w_KeyError, - space.wrap("popitem(): dictionary is empty")) + raise oefmt(space.w_KeyError, "popitem(): dictionary is empty") return space.newtuple([w_key, w_value]) @unwrap_spec(w_default=WrappedDefault(None)) @@ -527,6 +525,7 @@ def getiterkeys(self, w_dict): return iter([None]) getitervalues = getiterkeys + def getiteritems(self, w_dict): return iter([(None, None)]) @@ -545,8 +544,8 @@ space = self.space if self.len != self.dictimplementation.length(): self.len = -1 # Make this error state sticky - msg = "dictionary changed size during iteration" - raise OperationError(space.w_RuntimeError, space.wrap(msg)) + raise oefmt(space.w_RuntimeError, + "dictionary changed size during iteration") # look for the next entry if self.pos < self.len: @@ -565,14 +564,15 @@ w_value = self.dictimplementation.getitem(w_key) if w_value is None: self.len = -1 # Make this error state sticky - msg = "dictionary changed during iteration" - raise OperationError(space.w_RuntimeError, space.wrap(msg)) + raise oefmt(space.w_RuntimeError, + "dictionary changed during iteration") return (w_key, w_value) # no more entries self.dictimplementation = None return EMPTY return func_with_new_name(next, 'next_' + TP) + class BaseIteratorImplementation(object): def __init__(self, space, strategy, implementation): self.space = space @@ -595,13 +595,14 @@ class BaseItemIterator(BaseIteratorImplementation): next_item = _new_next('item') + def create_iterator_classes(dictimpl, override_next_item=None): if not hasattr(dictimpl, 'wrapkey'): - wrapkey = lambda space, key : key + wrapkey = lambda space, key: key else: wrapkey = dictimpl.wrapkey.im_func if not hasattr(dictimpl, 'wrapvalue'): - wrapvalue = lambda space, key : key + wrapvalue = lambda space, key: key else: wrapvalue = dictimpl.wrapvalue.im_func @@ -730,7 +731,8 @@ return w_dict.getitem(w_key) def w_keys(self, w_dict): - l = [self.wrap(key) for key in self.unerase(w_dict.dstorage).iterkeys()] + l = [self.wrap(key) + for key in self.unerase(w_dict.dstorage).iterkeys()] return self.space.newlist(l) def values(self, w_dict): @@ -967,7 +969,8 @@ def wrapkey(space, key): return space.wrap(key) - # XXX there is no space.newlist_int yet to implement w_keys more efficiently + # XXX there is no space.newlist_int yet to implement w_keys more + # efficiently create_iterator_classes(IntDictStrategy) @@ -1002,8 +1005,7 @@ for w_pair in data_w: pair = space.fixedview(w_pair) if len(pair) != 2: - raise OperationError(space.w_ValueError, - space.wrap("sequence of pairs expected")) + raise oefmt(space.w_ValueError, "sequence of pairs expected") w_key, w_value = pair w_dict.setitem(w_key, w_value) @@ -1036,9 +1038,9 @@ ignore_for_isinstance_cache = True - def __init__(w_self, space, iteratorimplementation): - w_self.space = space - w_self.iteratorimplementation = iteratorimplementation + def __init__(self, space, iteratorimplementation): + self.space = space + self.iteratorimplementation = iteratorimplementation def descr_iter(self, space): return self @@ -1066,9 +1068,8 @@ new_inst = mod.get('dictiter_surrogate_new') w_typeobj = space.type(self) - raise OperationError( - space.w_TypeError, - space.wrap("can't pickle dictionary-keyiterator objects")) + raise oefmt(space.w_TypeError, + "can't pickle dictionary-keyiterator objects") # XXXXXX get that working again # we cannot call __init__ since we don't have the original dict @@ -1082,8 +1083,8 @@ w_clone = space.allocate_instance(W_DictMultiIterItemsObject, w_typeobj) else: - msg = "unsupported dictiter type '%s' during pickling" % (self,) - raise OperationError(space.w_TypeError, space.wrap(msg)) + raise oefmt(space.w_TypeError, + "unsupported dictiter type '%R' during pickling", self) w_clone.space = space w_clone.content = self.content w_clone.len = self.len @@ -1152,8 +1153,8 @@ # Views class W_DictViewObject(W_Root): - def __init__(w_self, space, w_dict): - w_self.w_dict = w_dict + def __init__(self, space, w_dict): + self.w_dict = w_dict def descr_repr(self, space): typename = space.type(self).getname(space) From noreply at buildbot.pypy.org Thu Feb 13 21:00:29 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Thu, 13 Feb 2014 21:00:29 +0100 (CET) Subject: [pypy-commit] pypy default: merge upstream Message-ID: <20140213200029.D8A9E1D24E9@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: Changeset: r69137:2d71669b2c3c Date: 2014-02-13 11:59 -0800 http://bitbucket.org/pypy/pypy/changeset/2d71669b2c3c/ Log: merge upstream diff --git a/include/PyPy.h b/include/PyPy.h new file mode 100644 --- /dev/null +++ b/include/PyPy.h @@ -0,0 +1,54 @@ +#ifndef _PYPY_H_ +#define _PYPY_H_ + +/* This header is meant to be included in programs that use PyPy as an + embedded library. */ + +#ifdef __cplusplus +extern "C" { +#endif + + +/* You should call this first once. */ +void rpython_startup_code(void); + + +/* Initialize the home directory of PyPy. It is necessary to call this. + + Call it with "home" being the file name of the libpypy.so, for + example; it will be used as a starting point when searching for the + lib-python and lib_pypy directories. They are searched from + "home/..", "home/../..", etc. Returns 0 if everything was fine. If + an error occurs, returns 1 and (if verbose != 0) prints some + information to stderr. + */ +int pypy_setup_home(char *home, int verbose); + + +/* If your program has multiple threads, then you need to call + pypy_init_threads() once at init time, and then pypy_thread_attach() + once in each other thread that just started and in which you want to + run Python code (including via callbacks, see below). + */ +void pypy_init_threads(void); +void pypy_thread_attach(void); + + +/* The main entry point: executes "source" as plain Python code. + Returns 0 if everything was fine. If a Python exception is + uncaught, it is printed to stderr and 1 is returned. + + Usually, the Python code from "source" should use cffi to fill in + global variables of "function pointer" type in your program. Use + cffi callbacks to do so. Once it is done, there is no need to call + pypy_execute_source() any more: from C, you call directly the + functions (which are "callbacks" from the point of view of Python). + */ +int pypy_execute_source(char *source); + + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/pypy/doc/project-ideas.rst b/pypy/doc/project-ideas.rst --- a/pypy/doc/project-ideas.rst +++ b/pypy/doc/project-ideas.rst @@ -142,32 +142,17 @@ * `hg` -Experiment (again) with LLVM backend for RPython compilation ------------------------------------------------------------- - -We already tried working with LLVM and at the time, LLVM was not mature enough -for our needs. It's possible that this has changed, reviving the LLVM backend -(or writing new from scratch) for static compilation would be a good project. - -(On the other hand, just generating C code and using clang might be enough. -The issue with that is the so-called "asmgcc GC root finder", which has tons -of issues of this own. In my opinion (arigo), it would be definitely a -better project to try to optimize the alternative, the "shadowstack" GC root -finder, which is nicely portable. So far it gives a pypy that is around -7% slower.) - Embedding PyPy ---------------------------------------- Note: there is a basic proof-of-concept for that as a `uwsgi pypy plugin`_ Being able to embed PyPy, say with its own limited C API, would be -useful. But here is the most interesting variant, straight from -EuroPython live discussion :-) We can have a generic "libpypy.so" that -can be used as a placeholder dynamic library, and when it gets loaded, -it runs a .py module that installs (via ctypes) the interface it wants -exported. This would give us a one-size-fits-all generic .so file to be -imported by any application that wants to load .so files :-) +useful. But there is a possibly better variant: use CFFI. With some +minimal tools atop CFFI, it would be possible to write a pure Python +library, and then compile automatically from it an .so/.dll file that is +a dynamic-link library with whatever C API we want. This gives us a +one-size-fits-all generic way to make .so/.dll files from Python. .. _`uwsgi pypy plugin`: http://uwsgi-docs.readthedocs.org/en/latest/PyPy.html diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -90,9 +90,10 @@ return f """) - @entrypoint('main', [rffi.CCHARP, lltype.Signed], c_name='pypy_setup_home') + @entrypoint('main', [rffi.CCHARP, rffi.INT], c_name='pypy_setup_home') def pypy_setup_home(ll_home, verbose): from pypy.module.sys.initpath import pypy_find_stdlib + verbose = rffi.cast(lltype.Signed, verbose) if ll_home: home = rffi.charp2str(ll_home) else: @@ -120,7 +121,8 @@ @entrypoint('main', [rffi.CCHARP], c_name='pypy_execute_source') def pypy_execute_source(ll_source): source = rffi.charp2str(ll_source) - return _pypy_execute_source(source) + res = _pypy_execute_source(source) + return rffi.cast(rffi.INT, res) @entrypoint('main', [], c_name='pypy_init_threads') def pypy_init_threads(): diff --git a/pypy/interpreter/test/test_targetpypy.py b/pypy/interpreter/test/test_targetpypy.py --- a/pypy/interpreter/test/test_targetpypy.py +++ b/pypy/interpreter/test/test_targetpypy.py @@ -12,8 +12,10 @@ _, d = create_entry_point(space, None) execute_source = d['pypy_execute_source'] lls = rffi.str2charp("import sys; sys.modules['xyz'] = 3") - execute_source(lls) + res = execute_source(lls) lltype.free(lls, flavor='raw') + assert lltype.typeOf(res) == rffi.INT + assert rffi.cast(lltype.Signed, res) == 0 x = space.int_w(space.getitem(space.getattr(space.builtin_modules['sys'], space.wrap('modules')), space.wrap('xyz'))) @@ -24,5 +26,5 @@ # did not crash - the same globals pypy_setup_home = d['pypy_setup_home'] lls = rffi.str2charp(__file__) - pypy_setup_home(lls, 1) + pypy_setup_home(lls, rffi.cast(rffi.INT, 1)) lltype.free(lls, flavor='raw') diff --git a/pypy/module/bz2/interp_bz2.py b/pypy/module/bz2/interp_bz2.py --- a/pypy/module/bz2/interp_bz2.py +++ b/pypy/module/bz2/interp_bz2.py @@ -31,7 +31,7 @@ _compilation_info_ = eci calling_conv = 'c' - CHECK_LIBRARY = platform.Has('dump("x", (int)&BZ2_bzCompress)') + CHECK_LIBRARY = platform.Has('dump("x", (long)&BZ2_bzCompress)') off_t = platform.SimpleType("off_t", rffi.LONGLONG) size_t = platform.SimpleType("size_t", rffi.ULONG) diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py --- a/rpython/memory/gc/incminimark.py +++ b/rpython/memory/gc/incminimark.py @@ -1837,6 +1837,11 @@ # if self.objects_with_finalizers.non_empty(): self.deal_with_objects_with_finalizers() + elif self.old_objects_with_weakrefs.non_empty(): + # Weakref support: clear the weak pointers to dying objects + # (if we call deal_with_objects_with_finalizers(), it will + # invoke invalidate_old_weakrefs() itself directly) + self.invalidate_old_weakrefs() ll_assert(not self.objects_to_trace.non_empty(), "objects_to_trace should be empty") @@ -1846,9 +1851,7 @@ self.more_objects_to_trace.delete() # - # Weakref support: clear the weak pointers to dying objects - if self.old_objects_with_weakrefs.non_empty(): - self.invalidate_old_weakrefs() + # Light finalizers if self.old_objects_with_light_finalizers.non_empty(): self.deal_with_old_objects_with_finalizers() #objects_to_trace processed fully, can move on to sweeping @@ -2206,6 +2209,12 @@ self._recursively_bump_finalization_state_from_2_to_3(y) self._recursively_bump_finalization_state_from_1_to_2(x) + # Clear the weak pointers to dying objects. Also clears them if + # they point to objects which have the GCFLAG_FINALIZATION_ORDERING + # bit set here. These are objects which will be added to + # run_finalizers(). + self.invalidate_old_weakrefs() + while marked.non_empty(): x = marked.popleft() state = self._finalization_state(x) @@ -2333,7 +2342,9 @@ ll_assert((self.header(pointing_to).tid & GCFLAG_NO_HEAP_PTRS) == 0, "registered old weakref should not " "point to a NO_HEAP_PTRS obj") - if self.header(pointing_to).tid & GCFLAG_VISITED: + tid = self.header(pointing_to).tid + if ((tid & (GCFLAG_VISITED | GCFLAG_FINALIZATION_ORDERING)) == + GCFLAG_VISITED): new_with_weakref.append(obj) else: (obj + offset).address[0] = llmemory.NULL diff --git a/rpython/memory/test/gc_test_base.py b/rpython/memory/test/gc_test_base.py --- a/rpython/memory/test/gc_test_base.py +++ b/rpython/memory/test/gc_test_base.py @@ -29,6 +29,7 @@ GC_CAN_SHRINK_ARRAY = False GC_CAN_SHRINK_BIG_ARRAY = False BUT_HOW_BIG_IS_A_BIG_STRING = 3*WORD + WREF_IS_INVALID_BEFORE_DEL_IS_CALLED = False def setup_class(cls): cls._saved_logstate = py.log._getstate() @@ -370,15 +371,23 @@ class A(object): count = 0 a = A() + expected_invalid = self.WREF_IS_INVALID_BEFORE_DEL_IS_CALLED class B(object): def __del__(self): # when __del__ is called, the weakref to myself is still valid - # in RPython (at least with most GCs; this test might be - # skipped for specific GCs) - if self.ref() is self: - a.count += 10 # ok + # in RPython with most GCs. However, this can lead to strange + # bugs with incminimark. https://bugs.pypy.org/issue1687 + # So with incminimark, we expect the opposite. + if expected_invalid: + if self.ref() is None: + a.count += 10 # ok + else: + a.count = 666 # not ok else: - a.count = 666 # not ok + if self.ref() is self: + a.count += 10 # ok + else: + a.count = 666 # not ok def g(): b = B() ref = weakref.ref(b) diff --git a/rpython/memory/test/test_incminimark_gc.py b/rpython/memory/test/test_incminimark_gc.py --- a/rpython/memory/test/test_incminimark_gc.py +++ b/rpython/memory/test/test_incminimark_gc.py @@ -1,6 +1,38 @@ -from rpython.rlib.rarithmetic import LONG_BIT +from rpython.rtyper.lltypesystem import lltype +from rpython.rtyper.lltypesystem.lloperation import llop from rpython.memory.test import test_minimark_gc class TestIncrementalMiniMarkGC(test_minimark_gc.TestMiniMarkGC): from rpython.memory.gc.incminimark import IncrementalMiniMarkGC as GCClass + WREF_IS_INVALID_BEFORE_DEL_IS_CALLED = True + + def test_weakref_not_in_stack(self): + import weakref + class A(object): + pass + class B(object): + def __init__(self, next): + self.next = next + def g(): + a = A() + a.x = 5 + wr = weakref.ref(a) + llop.gc__collect(lltype.Void) # make everything old + assert wr() is not None + assert a.x == 5 + return wr + def f(): + ref = g() + llop.gc__collect(lltype.Void, 1) # start a major cycle + # at this point the stack is scanned, and the weakref points + # to an object not found, but still reachable: + b = ref() + llop.debug_print(lltype.Void, b) + assert b is not None + llop.gc__collect(lltype.Void) # finish the major cycle + # assert does not crash, because 'b' is still kept alive + b.x = 42 + return ref() is b + res = self.interpret(f, []) + assert res == True diff --git a/rpython/rlib/objectmodel.py b/rpython/rlib/objectmodel.py --- a/rpython/rlib/objectmodel.py +++ b/rpython/rlib/objectmodel.py @@ -366,6 +366,9 @@ def compute_result_annotation(self, s_l, s_sizehint): from rpython.annotator import model as annmodel + if annmodel.s_None.contains(s_l): + return # first argument is only None so far, but we + # expect a generalization later if not isinstance(s_l, annmodel.SomeList): raise annmodel.AnnotatorError("First argument must be a list") if not isinstance(s_sizehint, annmodel.SomeInteger): From noreply at buildbot.pypy.org Thu Feb 13 21:00:27 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Thu, 13 Feb 2014 21:00:27 +0100 (CET) Subject: [pypy-commit] pypy py3k: merge default Message-ID: <20140213200027.93E291D24E9@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r69135:305f4623907b Date: 2014-02-10 14:42 -0800 http://bitbucket.org/pypy/pypy/changeset/305f4623907b/ Log: merge default diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -103,8 +103,7 @@ .. _`extension modules`: cpython_differences.html#extension-modules .. _`cpython differences`: cpython_differences.html -.. _`compatibility wiki`: -.. https://bitbucket.org/pypy/compatibility/wiki/Home +.. _`compatibility wiki`: https://bitbucket.org/pypy/compatibility/wiki/Home .. _cffi: http://cffi.readthedocs.org/ --------------------------------- diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -57,3 +57,13 @@ mapdicts keep track of whether or not an attribute is every assigned to multiple times. If it's only assigned once then an elidable lookup is used when possible. + +.. branch: precompiled-headers +Create a Makefile using precompiled headers for MSVC platforms. +The downside is a messy nmake-compatible Makefile. Since gcc shows minimal +speedup, it was not implemented. + +.. branch: camelot +With a properly configured 256-color terminal (TERM=...-256color), the +Mandelbrot set shown during translation now uses a range of 50 colours. +Essential! diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -82,6 +82,7 @@ 'newdict' : 'interp_dict.newdict', 'dictstrategy' : 'interp_dict.dictstrategy', 'set_debug' : 'interp_magic.set_debug', + 'locals_to_fast' : 'interp_magic.locals_to_fast', 'normalize_exc' : 'interp_magic.normalize_exc', } diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py --- a/pypy/module/__pypy__/interp_magic.py +++ b/pypy/module/__pypy__/interp_magic.py @@ -1,5 +1,6 @@ from pypy.interpreter.error import OperationError, wrap_oserror from pypy.interpreter.gateway import WrappedDefault, unwrap_spec +from pypy.interpreter.pyframe import PyFrame from rpython.rlib.objectmodel import we_are_translated from pypy.objspace.std.listobject import W_ListObject from pypy.objspace.std.typeobject import MethodCache @@ -109,6 +110,11 @@ def add_memory_pressure(estimate): rgc.add_memory_pressure(estimate) + at unwrap_spec(w_frame=PyFrame) +def locals_to_fast(space, w_frame): + assert isinstance(w_frame, PyFrame) + w_frame.locals2fast() + @unwrap_spec(w_value=WrappedDefault(None), w_tb=WrappedDefault(None)) def normalize_exc(space, w_type, w_value=None, w_tb=None): operr = OperationError(w_type, w_value, w_tb) diff --git a/pypy/module/__pypy__/test/test_locals2fast.py b/pypy/module/__pypy__/test/test_locals2fast.py new file mode 100644 --- /dev/null +++ b/pypy/module/__pypy__/test/test_locals2fast.py @@ -0,0 +1,81 @@ +# Tests from Fabio Zadrozny + + +class AppTestLocals2Fast: + """ + Test setting locals in one function from another function + using several approaches. + """ + + def setup_class(cls): + cls.w_save_locals = cls.space.appexec([], """(): + import sys + if '__pypy__' in sys.builtin_module_names: + import __pypy__ + save_locals = __pypy__.locals_to_fast + else: + # CPython version + import ctypes + @staticmethod + def save_locals(frame): + ctypes.pythonapi.PyFrame_LocalsToFast( + ctypes.py_object(frame), ctypes.c_int(0)) + return save_locals + """) + + def test_set_locals_using_save_locals(self): + import sys + def use_save_locals(name, value): + frame = sys._getframe().f_back + locals_dict = frame.f_locals + locals_dict[name] = value + self.save_locals(frame) + def test_method(fn): + x = 1 + # The method 'fn' should attempt to set x = 2 in the current frame. + fn('x', 2) + return x + x = test_method(use_save_locals) + assert x == 2 + + def test_frame_simple_change(self): + import sys + frame = sys._getframe() + a = 20 + frame.f_locals['a'] = 50 + self.save_locals(frame) + assert a == 50 + + def test_frame_co_freevars(self): + import sys + outer_var = 20 + def func(): + frame = sys._getframe() + frame.f_locals['outer_var'] = 50 + self.save_locals(frame) + assert outer_var == 50 + func() + + def test_frame_co_cellvars(self): + import sys + def check_co_vars(a): + frame = sys._getframe() + def function2(): + print a + assert 'a' in frame.f_code.co_cellvars + frame = sys._getframe() + frame.f_locals['a'] = 50 + self.save_locals(frame) + assert a == 50 + check_co_vars(1) + + def test_frame_change_in_inner_frame(self): + import sys + def change(f): + assert f is not sys._getframe() + f.f_locals['a'] = 50 + self.save_locals(f) + frame = sys._getframe() + a = 20 + change(frame) + assert a == 50 diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -911,6 +911,8 @@ # implement function callbacks and generate function decls functions = [] pypy_decls = [] + pypy_decls.append("#ifndef _PYPY_PYPY_DECL_H\n") + pypy_decls.append("#define _PYPY_PYPY_DECL_H\n") pypy_decls.append("#ifndef PYPY_STANDALONE\n") pypy_decls.append("#ifdef __cplusplus") pypy_decls.append("extern \"C\" {") @@ -954,6 +956,7 @@ pypy_decls.append("}") pypy_decls.append("#endif") pypy_decls.append("#endif /*PYPY_STANDALONE*/\n") + pypy_decls.append("#endif /*_PYPY_PYPY_DECL_H*/\n") pypy_decl_h = udir.join('pypy_decl.h') pypy_decl_h.write('\n'.join(pypy_decls)) diff --git a/pypy/module/cpyext/include/pycapsule.h b/pypy/module/cpyext/include/pycapsule.h --- a/pypy/module/cpyext/include/pycapsule.h +++ b/pypy/module/cpyext/include/pycapsule.h @@ -50,7 +50,7 @@ PyAPI_FUNC(void *) PyCapsule_Import(const char *name, int no_block); -void _Py_init_capsule(void); +PyTypeObject *_Py_get_capsule_type(void); #ifdef __cplusplus } diff --git a/pypy/module/cpyext/include/pycobject.h b/pypy/module/cpyext/include/pycobject.h --- a/pypy/module/cpyext/include/pycobject.h +++ b/pypy/module/cpyext/include/pycobject.h @@ -48,7 +48,7 @@ } PyCObject; #endif -void _Py_init_pycobject(void); +PyTypeObject *_Py_get_cobject_type(void); #ifdef __cplusplus } diff --git a/pypy/module/pypyjit/test_pypy_c/test_misc.py b/pypy/module/pypyjit/test_pypy_c/test_misc.py --- a/pypy/module/pypyjit/test_pypy_c/test_misc.py +++ b/pypy/module/pypyjit/test_pypy_c/test_misc.py @@ -333,8 +333,8 @@ loop, = log.loops_by_id("struct") if sys.maxint == 2 ** 63 - 1: extra = """ - i8 = int_lt(i4, -2147483648) - guard_false(i8, descr=...) + i8 = int_ge(i4, -2147483648) + guard_true(i8, descr=...) """ else: extra = "" diff --git a/pypy/objspace/std/specialisedtupleobject.py b/pypy/objspace/std/specialisedtupleobject.py --- a/pypy/objspace/std/specialisedtupleobject.py +++ b/pypy/objspace/std/specialisedtupleobject.py @@ -27,11 +27,11 @@ w_obj = values_w[i] val_type = typetuple[i] if val_type == int: - unwrapped = space.int_w(w_obj) + unwrapped = w_obj.int_w(space) elif val_type == float: - unwrapped = space.float_w(w_obj) + unwrapped = w_obj.float_w(space) elif val_type == str: - unwrapped = space.str_w(w_obj) + unwrapped = w_obj.str_w(space) elif val_type == object: unwrapped = w_obj else: @@ -129,7 +129,7 @@ def is_int_w(space, w_obj): """Determine if obj can be safely casted to an int_w""" try: - space.int_w(w_obj) + w_obj.int_w(space) except OperationError, e: if not (e.match(space, space.w_OverflowError) or e.match(space, space.w_TypeError)): @@ -138,16 +138,16 @@ return True def makespecialisedtuple(space, list_w): + # XXX: hardcoded to W_LongObject until py3k W_IntObject is restored + from pypy.objspace.std.longobject import W_LongObject + from pypy.objspace.std.floatobject import W_FloatObject if len(list_w) == 2: w_arg1, w_arg2 = list_w - w_type1 = space.type(w_arg1) - if w_type1 is space.w_int and is_int_w(space, w_arg1): - w_type2 = space.type(w_arg2) - if w_type2 is space.w_int and is_int_w(space, w_arg2): + if type(w_arg1) is W_LongObject and is_int_w(space, w_arg1): + if type(w_arg2) is W_LongObject and is_int_w(space, w_arg2): return Cls_ii(space, w_arg1, w_arg2) - elif w_type1 is space.w_float: - w_type2 = space.type(w_arg2) - if w_type2 is space.w_float: + elif type(w_arg1) is W_FloatObject: + if type(w_arg2) is W_FloatObject: return Cls_ff(space, w_arg1, w_arg2) return Cls_oo(space, w_arg1, w_arg2) else: diff --git a/pypy/objspace/std/test/test_specialisedtupleobject.py b/pypy/objspace/std/test/test_specialisedtupleobject.py --- a/pypy/objspace/std/test/test_specialisedtupleobject.py +++ b/pypy/objspace/std/test/test_specialisedtupleobject.py @@ -214,6 +214,14 @@ assert a == (1, 2.2,) + b assert not a != (1, 2.2) + b + def test_subclasses(self): + class I(int): pass + class F(float): pass + t = (I(42), I(43)) + assert type(t[0]) is I + t = (F(42), F(43)) + assert type(t[0]) is F + def test_ovfl_bug(self): # previously failed a = (0xffffffffffffffff, 0) diff --git a/rpython/annotator/annrpython.py b/rpython/annotator/annrpython.py --- a/rpython/annotator/annrpython.py +++ b/rpython/annotator/annrpython.py @@ -582,18 +582,18 @@ def consider_op(self, block, opindex): op = block.operations[opindex] - argcells = [self.binding(a) for a in op.args] + try: + argcells = [self.binding(a) for a in op.args] - # let's be careful about avoiding propagated SomeImpossibleValues - # to enter an op; the latter can result in violations of the - # more general results invariant: e.g. if SomeImpossibleValue enters is_ - # is_(SomeImpossibleValue, None) -> SomeBool - # is_(SomeInstance(not None), None) -> SomeBool(const=False) ... - # boom -- in the assert of setbinding() - for arg in argcells: - if isinstance(arg, annmodel.SomeImpossibleValue): - raise BlockedInference(self, op, opindex) - try: + # let's be careful about avoiding propagated SomeImpossibleValues + # to enter an op; the latter can result in violations of the + # more general results invariant: e.g. if SomeImpossibleValue enters is_ + # is_(SomeImpossibleValue, None) -> SomeBool + # is_(SomeInstance(not None), None) -> SomeBool(const=False) ... + # boom -- in the assert of setbinding() + for arg in argcells: + if isinstance(arg, annmodel.SomeImpossibleValue): + raise BlockedInference(self, op, opindex) resultcell = op.consider(self, *argcells) except annmodel.AnnotatorError as e: # note that UnionError is a subclass graph = self.bookkeeper.position_key[0] diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -4139,6 +4139,16 @@ a.build_types(f, [str]) assert ("Cannot prove that the object is callable" in exc.value.msg) + def test_UnionError_on_PBC(self): + l = ['a', 1] + def f(x): + l.append(x) + a = self.RPythonAnnotator() + with py.test.raises(annmodel.UnionError) as excinfo: + a.build_types(f, [int]) + assert 'Happened at file' in excinfo.value.source + assert 'Known variable annotations:' in excinfo.value.source + def test_str_format_error(self): def f(s, x): return s.format(x) diff --git a/rpython/jit/codewriter/longlong.py b/rpython/jit/codewriter/longlong.py --- a/rpython/jit/codewriter/longlong.py +++ b/rpython/jit/codewriter/longlong.py @@ -26,6 +26,7 @@ getrealfloat = lambda x: x gethash = compute_hash gethash_fast = longlong2float.float2longlong + extract_bits = longlong2float.float2longlong is_longlong = lambda TYPE: False # ------------------------------------- @@ -42,6 +43,7 @@ getrealfloat = longlong2float.longlong2float gethash = lambda xll: rarithmetic.intmask(xll - (xll >> 32)) gethash_fast = gethash + extract_bits = lambda x: x is_longlong = lambda TYPE: (TYPE is lltype.SignedLongLong or TYPE is lltype.UnsignedLongLong) diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -275,7 +275,12 @@ def same_constant(self, other): if isinstance(other, ConstFloat): - return self.value == other.value + # careful in this comparison: if self.value and other.value + # are both NaN, stored as regular floats (i.e. on 64-bit), + # then just using "==" would say False: two NaNs are always + # different from each other. + return (longlong.extract_bits(self.value) == + longlong.extract_bits(other.value)) return False def nonnull(self): diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -594,11 +594,9 @@ if tobox is not None: # sanity check: see whether the current struct value # corresponds to what the cache thinks the value is - # XXX pypy with the following check fails on micronumpy, - # XXX investigate - #resbox = executor.execute(self.metainterp.cpu, self.metainterp, - # rop.GETFIELD_GC, fielddescr, box) - #assert resbox.constbox().same_constant(tobox.constbox()) + resbox = executor.execute(self.metainterp.cpu, self.metainterp, + rop.GETFIELD_GC, fielddescr, box) + assert resbox.constbox().same_constant(tobox.constbox()) return tobox resbox = self.execute_with_descr(opnum, fielddescr, box) self.metainterp.heapcache.getfield_now_known(box, fielddescr, resbox) diff --git a/rpython/jit/metainterp/test/test_history.py b/rpython/jit/metainterp/test/test_history.py --- a/rpython/jit/metainterp/test/test_history.py +++ b/rpython/jit/metainterp/test/test_history.py @@ -1,5 +1,8 @@ from rpython.jit.metainterp.history import * from rpython.rtyper.lltypesystem import lltype, llmemory, rffi +from rpython.rlib.rfloat import NAN, INFINITY +from rpython.jit.codewriter import longlong +from rpython.translator.c.test.test_standalone import StandaloneTests def test_repr(): @@ -38,3 +41,36 @@ assert not c3a.same_constant(c1b) assert not c3a.same_constant(c2b) assert c3a.same_constant(c3b) + +def test_same_constant_float(): + c1 = Const._new(12.34) + c2 = Const._new(12.34) + c3 = Const._new(NAN) + c4 = Const._new(NAN) + c5 = Const._new(INFINITY) + c6 = Const._new(INFINITY) + assert c1.same_constant(c2) + assert c3.same_constant(c4) + assert c5.same_constant(c6) + assert not c1.same_constant(c4) + assert not c1.same_constant(c6) + assert not c3.same_constant(c2) + assert not c3.same_constant(c6) + assert not c5.same_constant(c2) + assert not c5.same_constant(c4) + + +class TestZTranslated(StandaloneTests): + def test_ztranslated_same_constant_float(self): + def fn(args): + n = INFINITY + c1 = ConstFloat(longlong.getfloatstorage(n - INFINITY)) + c2 = ConstFloat(longlong.getfloatstorage(n - INFINITY)) + c3 = ConstFloat(longlong.getfloatstorage(12.34)) + if c1.same_constant(c2): + print "ok!" + return 0 + + t, cbuilder = self.compile(fn) + data = cbuilder.cmdexec('') + assert "ok!\n" in data diff --git a/rpython/rlib/objectmodel.py b/rpython/rlib/objectmodel.py --- a/rpython/rlib/objectmodel.py +++ b/rpython/rlib/objectmodel.py @@ -338,9 +338,10 @@ _about_ = newlist_hint def compute_result_annotation(self, s_sizehint): - from rpython.annotator.model import SomeInteger + from rpython.annotator.model import SomeInteger, AnnotatorError - assert isinstance(s_sizehint, SomeInteger) + if not isinstance(s_sizehint, SomeInteger): + raise AnnotatorError("newlist_hint() argument must be an int") s_l = self.bookkeeper.newlist() s_l.listdef.listitem.resize() return s_l @@ -365,8 +366,10 @@ def compute_result_annotation(self, s_l, s_sizehint): from rpython.annotator import model as annmodel - assert isinstance(s_l, annmodel.SomeList) - assert isinstance(s_sizehint, annmodel.SomeInteger) + if not isinstance(s_l, annmodel.SomeList): + raise annmodel.AnnotatorError("First argument must be a list") + if not isinstance(s_sizehint, annmodel.SomeInteger): + raise annmodel.AnnotatorError("Second argument must be an integer") s_l.listdef.listitem.resize() def specialize_call(self, hop): diff --git a/rpython/rlib/rfile.py b/rpython/rlib/rfile.py --- a/rpython/rlib/rfile.py +++ b/rpython/rlib/rfile.py @@ -29,9 +29,9 @@ OFF_T = CC['off_t'] c_open = llexternal('fopen', [rffi.CCHARP, rffi.CCHARP], lltype.Ptr(FILE)) c_close = llexternal('fclose', [lltype.Ptr(FILE)], rffi.INT) -c_write = llexternal('fwrite', [rffi.CCHARP, rffi.SIZE_T, rffi.SIZE_T, +c_fwrite = llexternal('fwrite', [rffi.CCHARP, rffi.SIZE_T, rffi.SIZE_T, lltype.Ptr(FILE)], rffi.SIZE_T) -c_read = llexternal('fread', [rffi.CCHARP, rffi.SIZE_T, rffi.SIZE_T, +c_fread = llexternal('fread', [rffi.CCHARP, rffi.SIZE_T, rffi.SIZE_T, lltype.Ptr(FILE)], rffi.SIZE_T) c_feof = llexternal('feof', [lltype.Ptr(FILE)], rffi.INT) c_ferror = llexternal('ferror', [lltype.Ptr(FILE)], rffi.INT) @@ -40,13 +40,16 @@ rffi.INT) c_tmpfile = llexternal('tmpfile', [], lltype.Ptr(FILE)) c_fileno = llexternal('fileno', [lltype.Ptr(FILE)], rffi.INT) -c_ftell = llexternal('ftell', [lltype.Ptr(FILE)], lltype.Signed) +c_ftell = llexternal('ftell', [lltype.Ptr(FILE)], rffi.LONG) c_fflush = llexternal('fflush', [lltype.Ptr(FILE)], rffi.INT) c_ftruncate = llexternal('ftruncate', [rffi.INT, OFF_T], rffi.INT, macro=True) c_fgets = llexternal('fgets', [rffi.CCHARP, rffi.INT, lltype.Ptr(FILE)], rffi.CCHARP) +c_popen = llexternal('popen', [rffi.CCHARP, rffi.CCHARP], lltype.Ptr(FILE)) +c_pclose = llexternal('pclose', [lltype.Ptr(FILE)], rffi.INT) + BASE_BUF_SIZE = 4096 BASE_LINE_SIZE = 100 @@ -75,6 +78,21 @@ raise OSError(errno, os.strerror(errno)) return RFile(res) +def create_popen_file(command, type): + ll_command = rffi.str2charp(command) + try: + ll_type = rffi.str2charp(type) + try: + ll_f = c_popen(ll_command, ll_type) + if not ll_f: + errno = rposix.get_errno() + raise OSError(errno, os.strerror(errno)) + finally: + lltype.free(ll_type, flavor='raw') + finally: + lltype.free(ll_command, flavor='raw') + return RPopenFile(ll_f) + class RFile(object): def __init__(self, ll_file): self.ll_file = ll_file @@ -89,30 +107,26 @@ try: # note that since we got a nonmoving buffer, it is either raw # or already cannot move, so the arithmetics below are fine - total_bytes = 0 - ll_current = ll_value - while total_bytes < len(value): - bytes = c_write(ll_current, 1, len(value) - r_uint(total_bytes), - ll_file) - if bytes == 0: - errno = rposix.get_errno() - raise OSError(errno, os.strerror(errno)) - total_bytes += bytes - ll_current = rffi.cast(rffi.CCHARP, - rffi.cast(lltype.Unsigned, ll_value) + - total_bytes) + length = len(value) + bytes = c_fwrite(ll_value, 1, length, ll_file) + if bytes != length: + errno = rposix.get_errno() + raise OSError(errno, os.strerror(errno)) finally: rffi.free_nonmovingbuffer(value, ll_value) def close(self): - if self.ll_file: + ll_f = self.ll_file + if ll_f: # double close is allowed - res = c_close(self.ll_file) self.ll_file = lltype.nullptr(FILE) + res = self._do_close(ll_f) if res == -1: errno = rposix.get_errno() raise OSError(errno, os.strerror(errno)) + _do_close = staticmethod(c_close) # overridden in RPopenFile + def read(self, size=-1): # XXX CPython uses a more delicate logic here ll_file = self.ll_file @@ -124,27 +138,25 @@ try: s = StringBuilder() while True: - returned_size = c_read(buf, 1, BASE_BUF_SIZE, ll_file) + returned_size = c_fread(buf, 1, BASE_BUF_SIZE, ll_file) + returned_size = intmask(returned_size) # is between 0 and BASE_BUF_SIZE if returned_size == 0: if c_feof(ll_file): # ok, finished return s.build() - errno = c_ferror(ll_file) - c_clearerror(ll_file) - raise OSError(errno, os.strerror(errno)) + raise _error(ll_file) s.append_charpsize(buf, returned_size) finally: lltype.free(buf, flavor='raw') else: raw_buf, gc_buf = rffi.alloc_buffer(size) try: - returned_size = c_read(raw_buf, 1, size, ll_file) + returned_size = c_fread(raw_buf, 1, size, ll_file) + returned_size = intmask(returned_size) # is between 0 and size if returned_size == 0: if not c_feof(ll_file): - errno = c_ferror(ll_file) - raise OSError(errno, os.strerror(errno)) - s = rffi.str_from_buffer(raw_buf, gc_buf, size, - rffi.cast(lltype.Signed, returned_size)) + raise _error(ll_file) + s = rffi.str_from_buffer(raw_buf, gc_buf, size, returned_size) finally: rffi.keep_buffer_alive_until_here(raw_buf, gc_buf) return s @@ -200,8 +212,7 @@ if not result: if c_feof(self.ll_file): # ok return 0 - errno = c_ferror(self.ll_file) - raise OSError(errno, os.strerror(errno)) + raise _error(self.ll_file) # # Assume that fgets() works as documented, and additionally # never writes beyond the final \0, which the CPython @@ -240,3 +251,13 @@ finally: rffi.keep_buffer_alive_until_here(raw_buf, gc_buf) raise ValueError("I/O operation on closed file") + + +class RPopenFile(RFile): + _do_close = staticmethod(c_pclose) + + +def _error(ll_file): + errno = c_ferror(ll_file) + c_clearerror(ll_file) + raise OSError(errno, os.strerror(errno)) diff --git a/rpython/rlib/rgc.py b/rpython/rlib/rgc.py --- a/rpython/rlib/rgc.py +++ b/rpython/rlib/rgc.py @@ -15,7 +15,6 @@ def set_max_heap_size(nbytes): """Limit the heap size to n bytes. - So far only implemented by the Boehm GC and the semispace/generation GCs. """ pass diff --git a/rpython/rlib/test/test_rfile.py b/rpython/rlib/test/test_rfile.py --- a/rpython/rlib/test/test_rfile.py +++ b/rpython/rlib/test/test_rfile.py @@ -1,5 +1,5 @@ -import os +import os, sys, py from rpython.rtyper.test.tool import BaseRtypingTest from rpython.tool.udir import udir from rpython.rlib import rfile @@ -142,6 +142,15 @@ cls.tmpdir = udir.join('test_rfile_direct') cls.tmpdir.ensure(dir=True) + def test_read_a_lot(self): + fname = str(self.tmpdir.join('file_read_a_lot')) + with open(fname, 'w') as f: + f.write('dupa' * 999) + f = rfile.create_file(fname, 'r') + s = f.read() + assert s == 'dupa' * 999 + f.close() + def test_readline(self): fname = str(self.tmpdir.join('file_readline')) j = 0 @@ -175,3 +184,15 @@ got = f.readline() assert got == '' f.close() + + +class TestPopen: + def setup_class(cls): + if sys.platform == 'win32': + py.test.skip("not for win32") + + def test_popen(self): + f = rfile.create_popen_file("python -c 'print 42'", "r") + s = f.read() + f.close() + assert s == '42\n' diff --git a/rpython/rlib/test/test_rpoll.py b/rpython/rlib/test/test_rpoll.py --- a/rpython/rlib/test/test_rpoll.py +++ b/rpython/rlib/test/test_rpoll.py @@ -1,5 +1,6 @@ import os import errno +import py from rpython.rlib.rsocket import * from rpython.rlib.rpoll import * @@ -55,6 +56,8 @@ serv.close() def test_select(): + if os.name == 'nt': + py.test.skip('cannot select on file handles on windows') def f(): readend, writeend = os.pipe() try: @@ -72,6 +75,8 @@ interpret(f, []) def test_select_timeout(): + if os.name == 'nt': + py.test.skip('cannot select on file handles on windows') from time import time def f(): # once there was a bug where the sleeping time was doubled diff --git a/rpython/tool/ansi_mandelbrot.py b/rpython/tool/ansi_mandelbrot.py --- a/rpython/tool/ansi_mandelbrot.py +++ b/rpython/tool/ansi_mandelbrot.py @@ -14,8 +14,12 @@ """ -palette = [39, 34, 35, 36, 31, 33, 32, 37] - +import os +if os.environ.get('TERM', 'dumb').find('256') > 0: + from ansiramp import ansi_ramp80 + palette = map(lambda x: "38;5;%d" % x, ansi_ramp80) +else: + palette = [39, 34, 35, 36, 31, 33, 32, 37] colour_range = None # used for debugging diff --git a/rpython/tool/ansiramp.py b/rpython/tool/ansiramp.py new file mode 100755 --- /dev/null +++ b/rpython/tool/ansiramp.py @@ -0,0 +1,29 @@ +#! /usr/bin/env python +import colorsys + +def hsv2ansi(h, s, v): + # h: 0..1, s/v: 0..1 + if s < 0.1: + return int(v * 23) + 232 + r, g, b = map(lambda x: int(x * 5), colorsys.hsv_to_rgb(h, s, v)) + return 16 + (r * 36) + (g * 6) + b + +def ramp_idx(i, num): + assert num > 0 + i0 = float(i) / num + h = 0.57 + i0 + s = 1 - pow(i0,3) + v = 1 + return hsv2ansi(h, s, v) + +def ansi_ramp(num): + return [ramp_idx(i, num) for i in range(num)] + +ansi_ramp80 = ansi_ramp(80) + +if __name__ == '__main__': + import sys + from py.io import ansi_print + colors = int(sys.argv[1]) if len(sys.argv) > 1 else 80 + for col in range(colors): + ansi_print('#', "38;5;%d" % ramp_idx(col, colors), newline=False, flush=True) diff --git a/rpython/tool/sourcetools.py b/rpython/tool/sourcetools.py --- a/rpython/tool/sourcetools.py +++ b/rpython/tool/sourcetools.py @@ -6,7 +6,6 @@ # XXX We should try to generalize and single out one approach to dynamic # XXX code compilation. -import types import sys, os, inspect, new import py @@ -296,40 +295,3 @@ result.func_defaults = f.func_defaults result.func_dict.update(f.func_dict) return result - - -def _convert_const_maybe(x, encoding): - if isinstance(x, str): - return x.decode(encoding) - elif isinstance(x, tuple): - items = [_convert_const_maybe(item, encoding) for item in x] - return tuple(items) - return x - -def with_unicode_literals(fn=None, **kwds): - """Decorator that replace all string literals with unicode literals. - Similar to 'from __future__ import string literals' at function level. - Useful to limit changes in the py3k branch. - """ - encoding = kwds.pop('encoding', 'ascii') - if kwds: - raise TypeError("Unexpected keyword argument(s): %s" % ', '.join(kwds.keys())) - def decorator(fn): - co = fn.func_code - new_consts = [] - for const in co.co_consts: - new_consts.append(_convert_const_maybe(const, encoding)) - new_consts = tuple(new_consts) - new_code = types.CodeType(co.co_argcount, co.co_nlocals, co.co_stacksize, - co.co_flags, co.co_code, new_consts, co.co_names, - co.co_varnames, co.co_filename, co.co_name, - co.co_firstlineno, co.co_lnotab) - fn.func_code = new_code - return fn - # - # support the usage of @with_unicode_literals instead of @with_unicode_literals() - if fn is not None: - assert type(fn) is types.FunctionType - return decorator(fn) - else: - return decorator diff --git a/rpython/tool/test/test_sourcetools.py b/rpython/tool/test/test_sourcetools.py --- a/rpython/tool/test/test_sourcetools.py +++ b/rpython/tool/test/test_sourcetools.py @@ -1,7 +1,5 @@ -# -*- encoding: utf-8 -*- -import py from rpython.tool.sourcetools import ( - func_with_new_name, func_renamer, rpython_wrapper, with_unicode_literals) + func_renamer, func_with_new_name, rpython_wrapper) def test_rename(): def f(x, y=5): @@ -57,30 +55,3 @@ ('decorated', 40, 2), ('bar', 40, 2), ] - - -def test_with_unicode_literals(): - @with_unicode_literals() - def foo(): - return 'hello' - assert type(foo()) is unicode - # - @with_unicode_literals - def foo(): - return 'hello' - assert type(foo()) is unicode - # - def foo(): - return 'hello àèì' - py.test.raises(UnicodeDecodeError, "with_unicode_literals(foo)") - # - @with_unicode_literals(encoding='utf-8') - def foo(): - return 'hello àèì' - assert foo() == u'hello àèì' - # - @with_unicode_literals - def foo(): - return ('a', 'b') - assert type(foo()[0]) is unicode - diff --git a/rpython/translator/c/dlltool.py b/rpython/translator/c/dlltool.py --- a/rpython/translator/c/dlltool.py +++ b/rpython/translator/c/dlltool.py @@ -21,7 +21,8 @@ entrypoints.append(getfunctionptr(graph)) return entrypoints - def gen_makefile(self, targetdir, exe_name=None): + def gen_makefile(self, targetdir, exe_name=None, + headers_to_precompile=[]): pass # XXX finish def compile(self): @@ -30,6 +31,8 @@ extsymeci = ExternalCompilationInfo(export_symbols=export_symbols) self.eci = self.eci.merge(extsymeci) files = [self.c_source_filename] + self.extrafiles + files += self.eventually_copy(self.eci.separate_module_files) + self.eci.separate_module_files = () oname = self.name self.so_name = self.translator.platform.compile(files, self.eci, standalone=False, diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -260,12 +260,13 @@ defines['PYPY_MAIN_FUNCTION'] = "pypy_main_startup" self.eci = self.eci.merge(ExternalCompilationInfo( export_symbols=["pypy_main_startup", "pypy_debug_file"])) - self.eci, cfile, extra = gen_source(db, modulename, targetdir, - self.eci, defines=defines, - split=self.split) + self.eci, cfile, extra, headers_to_precompile = \ + gen_source(db, modulename, targetdir, + self.eci, defines=defines, split=self.split) self.c_source_filename = py.path.local(cfile) self.extrafiles = self.eventually_copy(extra) - self.gen_makefile(targetdir, exe_name=exe_name) + self.gen_makefile(targetdir, exe_name=exe_name, + headers_to_precompile=headers_to_precompile) return cfile def eventually_copy(self, cfiles): @@ -375,18 +376,22 @@ self._compiled = True return self.executable_name - def gen_makefile(self, targetdir, exe_name=None): - cfiles = [self.c_source_filename] + self.extrafiles + def gen_makefile(self, targetdir, exe_name=None, headers_to_precompile=[]): + module_files = self.eventually_copy(self.eci.separate_module_files) + self.eci.separate_module_files = [] + cfiles = [self.c_source_filename] + self.extrafiles + list(module_files) if exe_name is not None: exe_name = targetdir.join(exe_name) mk = self.translator.platform.gen_makefile( cfiles, self.eci, path=targetdir, exe_name=exe_name, + headers_to_precompile=headers_to_precompile, + no_precompile_cfiles = module_files, shared=self.config.translation.shared) if self.has_profopt(): profopt = self.config.translation.profopt - mk.definition('ABS_TARGET', '$(shell python -c "import sys,os; print os.path.abspath(sys.argv[1])" $(TARGET))') + mk.definition('ABS_TARGET', str(targetdir.join('$(TARGET)'))) mk.definition('DEFAULT_TARGET', 'profopt') mk.definition('PROFOPT', profopt) @@ -427,8 +432,8 @@ mk.definition('ASMFILES', sfiles) mk.definition('ASMLBLFILES', lblsfiles) mk.definition('GCMAPFILES', gcmapfiles) - if sys.platform == 'win32': - mk.definition('DEBUGFLAGS', '/MD /Zi') + if self.translator.platform.name == 'msvc': + mk.definition('DEBUGFLAGS', '-MD -Zi') else: if self.config.translation.shared: mk.definition('DEBUGFLAGS', '-O2 -fomit-frame-pointer -g -fPIC') @@ -484,11 +489,11 @@ mk.rule('.PRECIOUS', '%.s', "# don't remove .s files if Ctrl-C'ed") else: - if sys.platform == 'win32': - mk.definition('DEBUGFLAGS', '/MD /Zi') + if self.translator.platform.name == 'msvc': + mk.definition('DEBUGFLAGS', '-MD -Zi') else: mk.definition('DEBUGFLAGS', '-O1 -g') - if sys.platform == 'win32': + if self.translator.platform.name == 'msvc': mk.rule('debug_target', 'debugmode_$(DEFAULT_TARGET)', 'rem') else: mk.rule('debug_target', '$(TARGET)', '#') @@ -511,6 +516,7 @@ def __init__(self, database): self.database = database self.extrafiles = [] + self.headers_to_precompile = [] self.path = None self.namespace = NameManager() @@ -539,6 +545,8 @@ filepath = self.path.join(name) if name.endswith('.c'): self.extrafiles.append(filepath) + if name.endswith('.h'): + self.headers_to_precompile.append(filepath) return filepath.open('w') def getextrafiles(self): @@ -686,11 +694,11 @@ print >> fc, '/***********************************************************/' print >> fc, '/*** Implementations ***/' print >> fc - print >> fc, '#define PYPY_FILE_NAME "%s"' % name print >> fc, '#include "common_header.h"' print >> fc, '#include "structdef.h"' print >> fc, '#include "forwarddecl.h"' print >> fc, '#include "preimpl.h"' + print >> fc, '#define PYPY_FILE_NAME "%s"' % name print >> fc, '#include "src/g_include.h"' print >> fc print >> fc, MARKER @@ -732,12 +740,14 @@ print >> f, "#endif" def gen_preimpl(f, database): + f.write('#ifndef _PY_PREIMPLE_H\n#define _PY_PREIMPL_H\n') if database.translator is None or database.translator.rtyper is None: return preimplementationlines = pre_include_code_lines( database, database.translator.rtyper) for line in preimplementationlines: print >> f, line + f.write('#endif /* _PY_PREIMPL_H */\n') def gen_startupcode(f, database): # generate the start-up code and put it into a function @@ -799,6 +809,7 @@ f = filename.open('w') incfilename = targetdir.join('common_header.h') fi = incfilename.open('w') + fi.write('#ifndef _PY_COMMON_HEADER_H\n#define _PY_COMMON_HEADER_H\n') # # Header @@ -811,6 +822,7 @@ eci.write_c_header(fi) print >> fi, '#include "src/g_prerequisite.h"' + fi.write('#endif /* _PY_COMMON_HEADER_H*/\n') fi.close() @@ -822,6 +834,8 @@ sg.set_strategy(targetdir, split) database.prepare_inline_helpers() sg.gen_readable_parts_of_source(f) + headers_to_precompile = sg.headers_to_precompile[:] + headers_to_precompile.insert(0, incfilename) gen_startupcode(f, database) f.close() @@ -834,5 +848,4 @@ eci = add_extra_files(eci) eci = eci.convert_sources_to_files() - files, eci = eci.get_module_files() - return eci, filename, sg.getextrafiles() + list(files) + return eci, filename, sg.getextrafiles(), headers_to_precompile diff --git a/rpython/translator/c/test/test_extfunc.py b/rpython/translator/c/test/test_extfunc.py --- a/rpython/translator/c/test/test_extfunc.py +++ b/rpython/translator/c/test/test_extfunc.py @@ -65,7 +65,8 @@ f1 = compile(does_stuff, []) f1() - assert open(filename, 'r').read() == "hello world\n" + with open(filename, 'r') as fid: + assert fid.read() == "hello world\n" os.unlink(filename) def test_big_read(): @@ -296,8 +297,10 @@ os.chdir(path) return os.getcwd() f1 = compile(does_stuff, [str]) - # different on windows please - assert f1('/tmp') == os.path.realpath('/tmp') + if os.name == 'nt': + assert f1(os.environment['TEMP']) == os.path.realpath(os.environment['TEMP']) + else: + assert f1('/tmp') == os.path.realpath('/tmp') def test_mkdir_rmdir(): def does_stuff(path, delete): diff --git a/rpython/translator/c/test/test_newgc.py b/rpython/translator/c/test/test_newgc.py --- a/rpython/translator/c/test/test_newgc.py +++ b/rpython/translator/c/test/test_newgc.py @@ -658,7 +658,8 @@ def test_open_read_write_seek_close(self): self.run('open_read_write_seek_close') - assert open(self.filename, 'r').read() == "hello world\n" + with open(self.filename, 'r') as fid: + assert fid.read() == "hello world\n" os.unlink(self.filename) def define_callback_with_collect(cls): diff --git a/rpython/translator/platform/__init__.py b/rpython/translator/platform/__init__.py --- a/rpython/translator/platform/__init__.py +++ b/rpython/translator/platform/__init__.py @@ -100,7 +100,8 @@ return ExecutionResult(returncode, stdout, stderr) def gen_makefile(self, cfiles, eci, exe_name=None, path=None, - shared=False): + shared=False, headers_to_precompile=[], + no_precompile_cfiles = []): raise NotImplementedError("Pure abstract baseclass") def __repr__(self): diff --git a/rpython/translator/platform/darwin.py b/rpython/translator/platform/darwin.py --- a/rpython/translator/platform/darwin.py +++ b/rpython/translator/platform/darwin.py @@ -50,14 +50,17 @@ return ["-Wl,-exported_symbols_list,%s" % (response_file,)] def gen_makefile(self, cfiles, eci, exe_name=None, path=None, - shared=False): + shared=False, headers_to_precompile=[], + no_precompile_cfiles = []): # ensure frameworks are passed in the Makefile fs = self._frameworks(eci.frameworks) if len(fs) > 0: # concat (-framework, FrameworkName) pairs self.extra_libs += tuple(map(" ".join, zip(fs[::2], fs[1::2]))) mk = super(Darwin, self).gen_makefile(cfiles, eci, exe_name, path, - shared) + shared=shared, + headers_to_precompile=headers_to_precompile, + no_precompile_cfiles = no_precompile_cfiles) return mk diff --git a/rpython/translator/platform/posix.py b/rpython/translator/platform/posix.py --- a/rpython/translator/platform/posix.py +++ b/rpython/translator/platform/posix.py @@ -83,7 +83,8 @@ return [entry[2:] for entry in out.split()] def gen_makefile(self, cfiles, eci, exe_name=None, path=None, - shared=False): + shared=False, headers_to_precompile=[], + no_precompile_cfiles = []): cfiles = self._all_cfiles(cfiles, eci) if path is None: diff --git a/rpython/translator/platform/test/test_distutils.py b/rpython/translator/platform/test/test_distutils.py --- a/rpython/translator/platform/test/test_distutils.py +++ b/rpython/translator/platform/test/test_distutils.py @@ -11,3 +11,7 @@ def test_900_files(self): py.test.skip('Makefiles not suppoerted') + + def test_precompiled_headers(self): + py.test.skip('Makefiles not suppoerted') + diff --git a/rpython/translator/platform/test/test_makefile.py b/rpython/translator/platform/test/test_makefile.py --- a/rpython/translator/platform/test/test_makefile.py +++ b/rpython/translator/platform/test/test_makefile.py @@ -1,7 +1,10 @@ from rpython.translator.platform.posix import GnuMakefile as Makefile +from rpython.translator.platform import host +from rpython.tool.udir import udir +from rpython.translator.tool.cbuild import ExternalCompilationInfo from StringIO import StringIO -import re +import re, sys, py def test_simple_makefile(): m = Makefile() @@ -29,3 +32,112 @@ val = s.getvalue() assert not re.search('CC += +xxx', val, re.M) assert re.search('CC += +yyy', val, re.M) + +class TestMakefile(object): + platform = host + strict_on_stderr = True + + def check_res(self, res, expected='42\n'): + assert res.out == expected + if self.strict_on_stderr: + assert res.err == '' + assert res.returncode == 0 + + def test_900_files(self): + txt = '#include \n' + for i in range(900): + txt += 'int func%03d();\n' % i + txt += 'int main() {\n int j=0;' + for i in range(900): + txt += ' j += func%03d();\n' % i + txt += ' printf("%d\\n", j);\n' + txt += ' return 0;};\n' + cfile = udir.join('test_900_files.c') + cfile.write(txt) + cfiles = [cfile] + for i in range(900): + cfile2 = udir.join('implement%03d.c' %i) + cfile2.write(''' + int func%03d() + { + return %d; + } + ''' % (i, i)) + cfiles.append(cfile2) + mk = self.platform.gen_makefile(cfiles, ExternalCompilationInfo(), path=udir) + mk.write() + self.platform.execute_makefile(mk) + res = self.platform.execute(udir.join('test_900_files')) + self.check_res(res, '%d\n' %sum(range(900))) + + def test_precompiled_headers(self): + if self.platform.cc != 'cl.exe': + py.test.skip("Only MSVC profits from precompiled headers") + import time + tmpdir = udir.join('precompiled_headers').ensure(dir=1) + # Create an eci that should not use precompiled headers + eci = ExternalCompilationInfo(include_dirs=[tmpdir]) + main_c = tmpdir.join('main_no_pch.c') + eci.separate_module_files = [main_c] + ncfiles = 10 + nprecompiled_headers = 20 + txt = '' + for i in range(ncfiles): + txt += "int func%03d();\n" % i + txt += "\nint main(int argc, char * argv[])\n" + txt += "{\n int i=0;\n" + for i in range(ncfiles): + txt += " i += func%03d();\n" % i + txt += ' printf("%d\\n", i);\n' + txt += " return 0;\n};\n" + main_c.write(txt) + # Create some large headers with dummy functions to be precompiled + cfiles_precompiled_headers = [] + for i in range(nprecompiled_headers): + pch_name =tmpdir.join('pcheader%03d.h' % i) + txt = '#ifndef PCHEADER%03d_H\n#define PCHEADER%03d_H\n' %(i, i) + for j in range(3000): + txt += "int pcfunc%03d_%03d();\n" %(i, j) + txt += '#endif' + pch_name.write(txt) + cfiles_precompiled_headers.append(pch_name) + # Create some cfiles with headers we want precompiled + cfiles = [] + for i in range(ncfiles): + c_name =tmpdir.join('implement%03d.c' % i) + txt = '' + for pch_name in cfiles_precompiled_headers: + txt += '#include "%s"\n' % pch_name + txt += "int func%03d(){ return %d;};\n" % (i, i) + c_name.write(txt) + cfiles.append(c_name) + if sys.platform == 'win32': + clean = ('clean', '', 'for %f in ( $(OBJECTS) $(TARGET) ) do @if exist %f del /f %f') + get_time = time.clock + else: + clean = ('clean', '', 'rm -f $(OBJECTS) $(TARGET) ') + get_time = time.time + #write a non-precompiled header makefile + mk = self.platform.gen_makefile(cfiles, eci, path=tmpdir) + mk.rule(*clean) + mk.write() + t0 = get_time() + self.platform.execute_makefile(mk) + t1 = get_time() + t_normal = t1 - t0 + self.platform.execute_makefile(mk, extra_opts=['clean']) + # Write a super-duper makefile with precompiled headers + mk = self.platform.gen_makefile(cfiles, eci, path=tmpdir, + headers_to_precompile=cfiles_precompiled_headers,) + mk.rule(*clean) + mk.write() + t0 = get_time() + self.platform.execute_makefile(mk) + t1 = get_time() + t_precompiled = t1 - t0 + res = self.platform.execute(mk.exe_name) + self.check_res(res, '%d\n' %sum(range(ncfiles))) + print "precompiled haeder 'make' time %.2f, non-precompiled header time %.2f" %(t_precompiled, t_normal) + assert t_precompiled < t_normal * 0.5 + + diff --git a/rpython/translator/platform/test/test_platform.py b/rpython/translator/platform/test/test_platform.py --- a/rpython/translator/platform/test/test_platform.py +++ b/rpython/translator/platform/test/test_platform.py @@ -59,34 +59,6 @@ res = self.platform.execute(executable) self.check_res(res) - def test_900_files(self): - txt = '#include \n' - for i in range(900): - txt += 'int func%03d();\n' % i - txt += 'int main() {\n int j=0;' - for i in range(900): - txt += ' j += func%03d();\n' % i - txt += ' printf("%d\\n", j);\n' - txt += ' return 0;};\n' - cfile = udir.join('test_900_files.c') - cfile.write(txt) - cfiles = [cfile] - for i in range(900): - cfile2 = udir.join('implement%03d.c' %i) - cfile2.write(''' - int func%03d() - { - return %d; - } - ''' % (i, i)) - cfiles.append(cfile2) - mk = self.platform.gen_makefile(cfiles, ExternalCompilationInfo(), path=udir) - mk.write() - self.platform.execute_makefile(mk) - res = self.platform.execute(udir.join('test_900_files')) - self.check_res(res, '%d\n' %sum(range(900))) - - def test_nice_errors(self): cfile = udir.join('test_nice_errors.c') cfile.write('') diff --git a/rpython/translator/platform/windows.py b/rpython/translator/platform/windows.py --- a/rpython/translator/platform/windows.py +++ b/rpython/translator/platform/windows.py @@ -249,7 +249,8 @@ def gen_makefile(self, cfiles, eci, exe_name=None, path=None, - shared=False): + shared=False, headers_to_precompile=[], + no_precompile_cfiles = []): cfiles = self._all_cfiles(cfiles, eci) if path is None: @@ -313,20 +314,60 @@ ('CC_LINK', self.link), ('LINKFILES', eci.link_files), ('MASM', self.masm), + ('MAKE', 'nmake.exe'), ('_WIN32', '1'), ] if self.x64: definitions.append(('_WIN64', '1')) + rules = [ + ('all', '$(DEFAULT_TARGET)', []), + ('.asm.obj', '', '$(MASM) /nologo /Fo$@ /c $< $(INCLUDEDIRS)'), + ] + + if len(headers_to_precompile)>0: + stdafx_h = path.join('stdafx.h') + txt = '#ifndef PYPY_STDAFX_H\n' + txt += '#define PYPY_STDAFX_H\n' + txt += '\n'.join(['#include "' + m.pathrel(c) + '"' for c in headers_to_precompile]) + txt += '\n#endif\n' + stdafx_h.write(txt) + stdafx_c = path.join('stdafx.c') + stdafx_c.write('#include "stdafx.h"\n') + definitions.append(('CREATE_PCH', '/Ycstdafx.h /Fpstdafx.pch /FIstdafx.h')) + definitions.append(('USE_PCH', '/Yustdafx.h /Fpstdafx.pch /FIstdafx.h')) + rules.append(('$(OBJECTS)', 'stdafx.pch', [])) + rules.append(('stdafx.pch', 'stdafx.h', + '$(CC) stdafx.c /c /nologo $(CFLAGS) $(CFLAGSEXTRA) ' + '$(CREATE_PCH) $(INCLUDEDIRS)')) + rules.append(('.c.obj', '', + '$(CC) /nologo $(CFLAGS) $(CFLAGSEXTRA) $(USE_PCH) ' + '/Fo$@ /c $< $(INCLUDEDIRS)')) + #Do not use precompiled headers for some files + #rules.append((r'{..\module_cache}.c{..\module_cache}.obj', '', + # '$(CC) /nologo $(CFLAGS) $(CFLAGSEXTRA) /Fo$@ /c $< $(INCLUDEDIRS)')) + # nmake cannot handle wildcard target specifications, so we must + # create a rule for compiling each file from eci since they cannot use + # precompiled headers :( + no_precompile = [] + for f in list(no_precompile_cfiles): + f = m.pathrel(py.path.local(f)) + if f not in no_precompile and f.endswith('.c'): + no_precompile.append(f) + target = f[:-1] + 'obj' + rules.append((target, f, + '$(CC) /nologo $(CFLAGS) $(CFLAGSEXTRA) ' + '/Fo%s /c %s $(INCLUDEDIRS)' %(target, f))) + + else: + rules.append(('.c.obj', '', + '$(CC) /nologo $(CFLAGS) $(CFLAGSEXTRA) ' + '/Fo$@ /c $< $(INCLUDEDIRS)')) + + for args in definitions: m.definition(*args) - rules = [ - ('all', '$(DEFAULT_TARGET)', []), - ('.c.obj', '', '$(CC) /nologo $(CFLAGS) $(CFLAGSEXTRA) /Fo$@ /c $< $(INCLUDEDIRS)'), - ('.asm.obj', '', '$(MASM) /nologo /Fo$@ /c $< $(INCLUDEDIRS)'), - ] - for rule in rules: m.rule(*rule) @@ -371,7 +412,7 @@ 'mt.exe -nologo -manifest $*.manifest -outputresource:$@;1', ]) m.rule('debugmode_$(DEFAULT_TARGET)', ['debugmode_$(TARGET)', 'main.obj'], - ['$(CC_LINK) /nologo /DEBUG main.obj $(SHARED_IMPORT_LIB) /out:$@' + ['$(CC_LINK) /nologo /DEBUG main.obj debugmode_$(SHARED_IMPORT_LIB) /out:$@' ]) return m @@ -392,6 +433,25 @@ self._handle_error(returncode, stdout, stderr, path.join('make')) +class WinDefinition(posix.Definition): + def write(self, f): + def write_list(prefix, lst): + lst = lst or [''] + for i, fn in enumerate(lst): + print >> f, prefix, fn, + if i < len(lst)-1: + print >> f, '\\' + else: + print >> f + prefix = ' ' * len(prefix) + name, value = self.name, self.value + if isinstance(value, str): + f.write('%s = %s\n' % (name, value)) + else: + write_list('%s =' % (name,), value) + f.write('\n') + + class NMakefile(posix.GnuMakefile): def write(self, out=None): # nmake expands macros when it parses rules. @@ -410,6 +470,14 @@ if out is None: f.close() + def definition(self, name, value): + defs = self.defs + defn = WinDefinition(name, value) + if name in defs: + self.lines[defs[name]] = defn + else: + defs[name] = len(self.lines) + self.lines.append(defn) class MingwPlatform(posix.BasePosix): name = 'mingw32' From noreply at buildbot.pypy.org Fri Feb 14 14:22:27 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 14 Feb 2014 14:22:27 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: More prebuilt stuff from two days ago. Might get changed soon. Message-ID: <20140214132227.168371C1154@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r728:f1f127c63039 Date: 2014-02-13 13:49 +0100 http://bitbucket.org/pypy/stmgc/changeset/f1f127c63039/ Log: More prebuilt stuff from two days ago. Might get changed soon. diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -30,6 +30,7 @@ perror("madvise"); abort(); } + reset_transaction_read_version_prebuilt(); STM_SEGMENT->transaction_read_version = 1; } diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -12,9 +12,9 @@ object_t *_stm_allocate_old(ssize_t size_rounded_up) { char *addr = large_malloc(size_rounded_up); - memset(addr, 0, size_rounded_up); + object_t* o = (object_t *)(addr - stm_object_pages); - object_t* o = (object_t *)(addr - stm_object_pages); + memset(REAL_ADDRESS(STM_SEGMENT->segment_base, o), 0, size_rounded_up); o->stm_flags = GCFLAG_WRITE_BARRIER; return o; } diff --git a/c7/stm/prebuilt.c b/c7/stm/prebuilt.c --- a/c7/stm/prebuilt.c +++ b/c7/stm/prebuilt.c @@ -3,6 +3,11 @@ #endif +static uint64_t prebuilt_readmarkers_start = 0; +static uint64_t prebuilt_readmarkers_end = 0; +static uint64_t prebuilt_objects_start = 0; + + void stm_copy_prebuilt_objects(object_t *target, char *source, ssize_t size) { /* Initialize a region of 'size' bytes at the 'target' address, @@ -23,19 +28,51 @@ with STM_FLAGS_PREBUILT. */ - uintptr_t utarget = (uintptr_t)target; - if (utarget / 16 < 8192 || - utarget + size > FIRST_READMARKER_PAGE * 4096UL || - (utarget + size + 15) / 16 > utarget) { + uint64_t utarget = (uint64_t)target; + uint64_t rm_start = utarget / 16; + uint64_t rm_end = (utarget + size + 15) / 16; + + if (rm_start < 8192 || rm_end > (utarget & ~4095) || + utarget + size > FIRST_READMARKER_PAGE * 4096UL) { fprintf(stderr, - "stm_copy_prebuilt_objects: invalid range (%ld, %ld)", + "stm_copy_prebuilt_objects: invalid range (0x%lx, 0x%lx)\n", (long)utarget, (long)size); abort(); } - uintptr_t start_page = utarget / 4096; - uintptr_t end_page = (utarget + size + 4095) / 4096; + + if (prebuilt_readmarkers_start == 0) { + prebuilt_readmarkers_start = rm_start; + prebuilt_readmarkers_end = rm_end; + prebuilt_objects_start = utarget & ~4095; + } + else { + if (prebuilt_readmarkers_start > rm_start) + prebuilt_readmarkers_start = rm_start; + if (prebuilt_readmarkers_end < rm_end) + prebuilt_readmarkers_end = rm_end; + if (prebuilt_objects_start > (utarget & ~4095)) + prebuilt_objects_start = utarget & ~4095; + + if (prebuilt_readmarkers_end > prebuilt_objects_start) { + fprintf(stderr, + "stm_copy_prebuilt_objects: read markers ending at 0x%lx " + "overlap with prebuilt objects starting at 0x%lx\n", + (long)prebuilt_readmarkers_end, + (long)prebuilt_objects_start); + abort(); + } + } + + uint64_t start_page = utarget / 4096; + uint64_t end_page = (utarget + size + 4095) / 4096; pages_initialize_shared(start_page, end_page - start_page); char *segment_base = get_segment_base(0); memcpy(REAL_ADDRESS(segment_base, utarget), source, size); } + +static void reset_transaction_read_version_prebuilt(void) +{ + memset(REAL_ADDRESS(STM_SEGMENT->segment_base, prebuilt_readmarkers_start), + 0, prebuilt_readmarkers_end - prebuilt_readmarkers_start); +} From noreply at buildbot.pypy.org Fri Feb 14 14:22:25 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 14 Feb 2014 14:22:25 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: Two days to convince myself that this version of stm_write() gives Message-ID: <20140214132225.E3A381C1154@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r727:3ddbe9c6d224 Date: 2014-02-13 13:48 +0100 http://bitbucket.org/pypy/stmgc/changeset/3ddbe9c6d224/ Log: Two days to convince myself that this version of stm_write() gives the best trade-offs diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -24,20 +24,20 @@ enum { - /* set if the write-barrier slowpath needs to trigger. set on all - old objects if there was no write-barrier on it in the same - transaction and no collection inbetween. */ - GCFLAG_WRITE_BARRIER = _STM_GCFLAG_WRITE_BARRIER, - /* set on objects which are in pages visible to others (SHARED - or PRIVATE), but not committed yet. So only visible from - this transaction. */ - //GCFLAG_NOT_COMMITTED = _STM_GCFLAG_WRITE_BARRIER << 1, + /* this flag is not set on most objects. when stm_write() is called + on an object that is not from the current transaction, then + _stm_write_slowpath() is called, and then the flag is set to + say "called once already, no need to call again". */ + GCFLAG_WRITE_BARRIER_CALLED = _STM_GCFLAG_WRITE_BARRIER_CALLED, + /* set if the object can be seen by all threads. If unset, we know + it is only visible from the current thread. */ + //GCFLAG_ALL_THREADS = 0x04, /* only used during collections to mark an obj as moved out of the generation it was in */ - //GCFLAG_MOVED = _STM_GCFLAG_WRITE_BARRIER << 2, + //GCFLAG_MOVED = 0x01, /* objects smaller than one page and even smaller than LARGE_OBJECT_WORDS * 8 bytes */ - //GCFLAG_SMALL = _STM_GCFLAG_WRITE_BARRIER << 3, + //GCFLAG_SMALL = 0x02, }; diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -33,6 +33,7 @@ typedef TLPREFIX struct object_s object_t; typedef TLPREFIX struct stm_segment_info_s stm_segment_info_t; typedef TLPREFIX struct stm_read_marker_s stm_read_marker_t; +typedef TLPREFIX struct stm_current_transaction_s stm_current_transaction_t; typedef TLPREFIX char stm_char; typedef void* stm_jmpbuf_t[5]; /* for use with __builtin_setjmp() */ @@ -40,6 +41,10 @@ uint8_t rm; }; +struct stm_current_transaction_s { + uint8_t ct; +}; + struct stm_segment_info_s { uint8_t transaction_read_version; int segment_num; @@ -77,8 +82,8 @@ void _stm_large_dump(void); #endif -#define _STM_GCFLAG_WRITE_BARRIER 0x01 -#define STM_FLAGS_PREBUILT _STM_GCFLAG_WRITE_BARRIER +#define _STM_GCFLAG_WRITE_BARRIER_CALLED 0x80 +#define STM_FLAGS_PREBUILT 0 /* ==================== HELPERS ==================== */ @@ -118,7 +123,12 @@ static inline void stm_write(object_t *obj) { - if (UNLIKELY(obj->stm_flags & _STM_GCFLAG_WRITE_BARRIER)) + /* this is: + 'if (ct == 0 && (stm_flags & WRITE_BARRIER_CALLED) == 0)' + assuming that 'ct' is either 0 (no, not current transaction) + or 0xff (yes) */ + if (UNLIKELY(!(((stm_current_transaction_t *)(((uintptr_t)obj) >> 8)->ct | + obj->stm_flags) & _STM_GCFLAG_WRITE_BARRIER_CALLED))) _stm_write_slowpath(obj); } From noreply at buildbot.pypy.org Fri Feb 14 14:22:28 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 14 Feb 2014 14:22:28 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: Rename "stm_current_transaction_t" into "stm_creation_marker_t". Message-ID: <20140214132228.258531C1154@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r729:508eed0d9371 Date: 2014-02-14 14:22 +0100 http://bitbucket.org/pypy/stmgc/changeset/508eed0d9371/ Log: Rename "stm_current_transaction_t" into "stm_creation_marker_t". diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -18,10 +18,13 @@ #define FIRST_OBJECT_PAGE ((READMARKER_END + 4095) / 4096UL) #define FIRST_NURSERY_PAGE FIRST_OBJECT_PAGE #define END_NURSERY_PAGE (FIRST_NURSERY_PAGE + NB_NURSERY_PAGES) + #define READMARKER_START ((FIRST_OBJECT_PAGE * 4096UL) >> 4) #define FIRST_READMARKER_PAGE (READMARKER_START / 4096UL) #define NB_READMARKER_PAGES (FIRST_OBJECT_PAGE - FIRST_READMARKER_PAGE) +#define CURTRANS_START ((FIRST_OBJECT_PAGE * 4096UL) >> 8) + enum { /* this flag is not set on most objects. when stm_write() is called diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -17,10 +17,10 @@ /* if objects are larger than this limit but smaller than LARGE_OBJECT, then they might be allocted outside sections but still in the nursery. */ -#define MEDIUM_OBJECT (9*1024) +#define MEDIUM_OBJECT (8*1024) /* size in bytes of the alignment of any section requested */ -#define NURSERY_ALIGNMENT 64 +#define NURSERY_ALIGNMENT 256 /************************************************************/ diff --git a/c7/stm/setup.c b/c7/stm/setup.c --- a/c7/stm/setup.c +++ b/c7/stm/setup.c @@ -21,6 +21,10 @@ assert(READMARKER_START < READMARKER_END); assert(READMARKER_END <= 4096UL * FIRST_OBJECT_PAGE); assert(FIRST_OBJECT_PAGE < NB_PAGES); + assert(CURTRANS_START >= 8192); + assert((NB_PAGES * 4096UL) >> 8 <= (FIRST_OBJECT_PAGE * 4096UL) >> 4); + assert((END_NURSERY_PAGE * 4096UL) >> 8 <= + (FIRST_READMARKER_PAGE * 4096UL)); stm_object_pages = mmap(NULL, TOTAL_MEMORY, PROT_READ | PROT_WRITE, @@ -59,13 +63,19 @@ long time for each page. */ pages_initialize_shared(FIRST_NURSERY_PAGE, NB_NURSERY_PAGES); + /* The read markers are initially zero, which is correct: + STM_SEGMENT->transaction_read_version never contains zero, + so a null read marker means "not read" whatever the + current transaction_read_version is. + + The creation markers are initially zero, which is correct: + it means "objects of this group of 256 bytes have not been + allocated by the current transaction." + */ + setup_sync(); setup_nursery(); setup_gcpage(); - -#if 0 - stm_largemalloc_init(heap, HEAP_PAGES * 4096UL); -#endif } void stm_teardown(void) diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -33,16 +33,27 @@ typedef TLPREFIX struct object_s object_t; typedef TLPREFIX struct stm_segment_info_s stm_segment_info_t; typedef TLPREFIX struct stm_read_marker_s stm_read_marker_t; -typedef TLPREFIX struct stm_current_transaction_s stm_current_transaction_t; +typedef TLPREFIX struct stm_creation_marker_s stm_creation_marker_t; typedef TLPREFIX char stm_char; typedef void* stm_jmpbuf_t[5]; /* for use with __builtin_setjmp() */ struct stm_read_marker_s { + /* In every segment, every object has a corresponding read marker. + We assume that objects are at least 16 bytes long, and use + their address divided by 16. The read marker is equal to + 'STM_SEGMENT->transaction_read_version' if and only if the + object was read in the current transaction. */ uint8_t rm; }; -struct stm_current_transaction_s { - uint8_t ct; +struct stm_creation_marker_s { + /* In addition to read markers, every group of 256 bytes has one + extra byte, the creation marker, located at the address divided + by 256. The creation marker is either 0xff if all objects in + this group come have been allocated by the current transaction, + or 0x00 if none of them have been. Groups cannot contain a + mixture of both. */ + uint8_t cm; }; struct stm_segment_info_s { @@ -125,9 +136,9 @@ { /* this is: 'if (ct == 0 && (stm_flags & WRITE_BARRIER_CALLED) == 0)' - assuming that 'ct' is either 0 (no, not current transaction) - or 0xff (yes) */ - if (UNLIKELY(!(((stm_current_transaction_t *)(((uintptr_t)obj) >> 8)->ct | + assuming that 'cm' is either 0 (not created in current transaction) + or 0xff (created in current transaction) */ + if (UNLIKELY(!(((stm_creation_marker_t *)(((uintptr_t)obj) >> 8)->cm | obj->stm_flags) & _STM_GCFLAG_WRITE_BARRIER_CALLED))) _stm_write_slowpath(obj); } From noreply at buildbot.pypy.org Fri Feb 14 15:07:10 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 14 Feb 2014 15:07:10 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: Comments Message-ID: <20140214140710.D1B331C1154@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r730:b2e798d333ed Date: 2014-02-14 15:06 +0100 http://bitbucket.org/pypy/stmgc/changeset/b2e798d333ed/ Log: Comments diff --git a/c7/stm/setup.c b/c7/stm/setup.c --- a/c7/stm/setup.c +++ b/c7/stm/setup.c @@ -69,7 +69,7 @@ current transaction_read_version is. The creation markers are initially zero, which is correct: - it means "objects of this group of 256 bytes have not been + it means "objects of this line of 256 bytes have not been allocated by the current transaction." */ diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -47,11 +47,11 @@ }; struct stm_creation_marker_s { - /* In addition to read markers, every group of 256 bytes has one + /* In addition to read markers, every "line" of 256 bytes has one extra byte, the creation marker, located at the address divided by 256. The creation marker is either 0xff if all objects in - this group come have been allocated by the current transaction, - or 0x00 if none of them have been. Groups cannot contain a + this line come have been allocated by the current transaction, + or 0x00 if none of them have been. Lines cannot contain a mixture of both. */ uint8_t cm; }; @@ -80,6 +80,9 @@ void _stm_write_slowpath(object_t *); stm_char *_stm_allocate_slowpath(ssize_t); void _stm_become_inevitable(char*); +void _stm_start_transaction(stm_thread_local_t *, stm_jmpbuf_t *); +void _stm_start_safe_point(int flags); +void _stm_stop_safe_point(int flags); #ifdef STM_TESTS bool _stm_was_read(object_t *obj); @@ -108,7 +111,7 @@ #define IMPLY(a, b) (!(a) || (b)) -/* ==================== API ==================== */ +/* ==================== PUBLIC API ==================== */ /* Structure of objects -------------------- @@ -126,12 +129,24 @@ uint8_t stm_flags; /* reserved for the STM library */ }; +/* The read barrier must be called whenever the object 'obj' is read. + It is not required to call it before reading: it can be called + during or after too, as long as we are in the same transaction. + If we might have finished the transaction and started the next + one, then stm_read() needs to be called again. +*/ static inline void stm_read(object_t *obj) { ((stm_read_marker_t *)(((uintptr_t)obj) >> 4))->rm = STM_SEGMENT->transaction_read_version; } +/* The write barrier must be called *before* doing any change to the + object 'obj'. If we might have finished the transaction and started + the next one, then stm_write() needs to be called again. + If stm_write() is called, it is not necessary to also call stm_read() + on the same object. +*/ static inline void stm_write(object_t *obj) { /* this is: @@ -144,11 +159,18 @@ } /* Must be provided by the user of this library. - The "size rounded up" must be a multiple of 8 and at least 16. */ + The "size rounded up" must be a multiple of 8 and at least 16. + "Tracing" an object means enumerating all GC references in it, + by invoking the callback passed as argument. +*/ extern ssize_t stmcb_size_rounded_up(struct object_s *); extern void stmcb_trace(struct object_s *, void (object_t **)); +/* Allocate an object of the given size, which must be a multiple + of 8 and at least 16. In the fast-path, this is inlined to just + a few assembler instructions. +*/ static inline object_t *stm_allocate(ssize_t size_rounded_up) { OPT_ASSERT(size_rounded_up >= 16); @@ -162,30 +184,53 @@ return (object_t *)p; } + +/* stm_setup() needs to be called once at the beginning of the program. + stm_teardown() can be called at the end, but that's not necessary + and rather meant for tests. + */ void stm_setup(void); void stm_teardown(void); + +/* Every thread needs to have a corresponding stm_thread_local_t + structure. It may be a "__thread" global variable or something else. + Use the following functions at the start and at the end of a thread. + The user of this library needs to maintain the two shadowstack fields; + at any call to stm_allocate(), these fields should point to a range + of memory that can be walked in order to find the stack roots. +*/ void stm_register_thread_local(stm_thread_local_t *tl); void stm_unregister_thread_local(stm_thread_local_t *tl); -void stm_copy_prebuilt_objects(object_t *target, char *source, ssize_t size); -void stm_start_transaction(stm_thread_local_t *tl, stm_jmpbuf_t *jmpbuf); -void stm_start_inevitable_transaction(stm_thread_local_t *tl); -void stm_commit_transaction(void); -void stm_abort_transaction(void) __attribute__((noreturn)); - +/* Starting and ending transactions. You should only call stm_read(), + stm_write() and stm_allocate() from within a transaction. Use + the macro STM_START_TRANSACTION() to start a transaction that + can be restarted using the 'jmpbuf' (a pointer to a local variable + of type stm_jmpbuf_t). */ #define STM_START_TRANSACTION(tl, jmpbuf) ({ \ int _restart = __builtin_setjmp(jmpbuf); \ - stm_start_transaction(tl, jmpbuf); \ + _stm_start_transaction(tl, jmpbuf); \ _restart; \ }) +/* Start an inevitable transaction, if it's going to return from the + current function immediately. */ +void stm_start_inevitable_transaction(stm_thread_local_t *tl); + +/* Commit a transaction. */ +void stm_commit_transaction(void); + +/* Abort the currently running transaction. */ +void stm_abort_transaction(void) __attribute__((noreturn)); + +/* Turn the current transaction inevitable. The 'jmpbuf' passed to + STM_START_TRANSACTION() is not going to be used any more after + this call (but the stm_become_inevitable() itself may still abort). */ static inline void stm_become_inevitable(char* msg) { if (STM_SEGMENT->jmpbuf_ptr != NULL) _stm_become_inevitable(msg); } -void stm_start_safe_point(int flags); -void stm_stop_safe_point(int flags); /* ==================== END ==================== */ From noreply at buildbot.pypy.org Fri Feb 14 15:11:40 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 14 Feb 2014 15:11:40 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: Fix Message-ID: <20140214141140.220C01C3364@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r731:9f3c9e6a0a31 Date: 2014-02-14 15:11 +0100 http://bitbucket.org/pypy/stmgc/changeset/9f3c9e6a0a31/ Log: Fix diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -153,7 +153,7 @@ 'if (ct == 0 && (stm_flags & WRITE_BARRIER_CALLED) == 0)' assuming that 'cm' is either 0 (not created in current transaction) or 0xff (created in current transaction) */ - if (UNLIKELY(!(((stm_creation_marker_t *)(((uintptr_t)obj) >> 8)->cm | + if (UNLIKELY(!((((stm_creation_marker_t *)(((uintptr_t)obj) >> 8))->cm | obj->stm_flags) & _STM_GCFLAG_WRITE_BARRIER_CALLED))) _stm_write_slowpath(obj); } From noreply at buildbot.pypy.org Fri Feb 14 16:25:37 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 14 Feb 2014 16:25:37 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: Fixes Message-ID: <20140214152537.AD03A1C1154@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r732:9c55d1e9505f Date: 2014-02-14 16:25 +0100 http://bitbucket.org/pypy/stmgc/changeset/9c55d1e9505f/ Log: Fixes diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -34,7 +34,7 @@ STM_SEGMENT->transaction_read_version = 1; } -void stm_start_transaction(stm_thread_local_t *tl, stm_jmpbuf_t *jmpbuf) +void _stm_start_transaction(stm_thread_local_t *tl, stm_jmpbuf_t *jmpbuf) { /* GS invalid before this point! */ acquire_thread_segment(tl); diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -15,6 +15,5 @@ object_t* o = (object_t *)(addr - stm_object_pages); memset(REAL_ADDRESS(STM_SEGMENT->segment_base, o), 0, size_rounded_up); - o->stm_flags = GCFLAG_WRITE_BARRIER; return o; } diff --git a/c7/stm/misc.c b/c7/stm/misc.c --- a/c7/stm/misc.c +++ b/c7/stm/misc.c @@ -43,5 +43,6 @@ bool _stm_was_written(object_t *obj) { - return !(obj->stm_flags & GCFLAG_WRITE_BARRIER); + return !!((((stm_creation_marker_t *)(((uintptr_t)obj) >> 8))->cm | + obj->stm_flags) & _STM_GCFLAG_WRITE_BARRIER_CALLED); } diff --git a/c7/stm/sync.c b/c7/stm/sync.c --- a/c7/stm/sync.c +++ b/c7/stm/sync.c @@ -97,12 +97,12 @@ assert(STM_SEGMENT->running_thread == tl); } -void stm_start_safe_point(int flags) +void _stm_start_safe_point(int flags) { //... } -void stm_stop_safe_point(int flags) +void _stm_stop_safe_point(int flags) { //... } diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -47,7 +47,7 @@ void stm_teardown(void); void stm_register_thread_local(stm_thread_local_t *tl); void stm_unregister_thread_local(stm_thread_local_t *tl); -void stm_copy_prebuilt_objects(object_t *target, char *source, ssize_t size); +//void stm_copy_prebuilt_objects(object_t *target, char *source, ssize_t size); bool _checked_stm_write(object_t *obj); bool _stm_was_read(object_t *obj); @@ -58,7 +58,7 @@ bool _stm_in_transaction(stm_thread_local_t *tl); void _stm_test_switch(stm_thread_local_t *tl); -void stm_start_transaction(stm_thread_local_t *tl, stm_jmpbuf_t *jmpbuf); +void _stm_start_transaction(stm_thread_local_t *tl, stm_jmpbuf_t *jmpbuf); void stm_commit_transaction(void); bool _check_abort_transaction(void); @@ -69,7 +69,7 @@ #define LOCK_EXCLUSIVE ... #define THREAD_YIELD ... -void stm_start_safe_point(int); +void _stm_start_safe_point(int); bool _check_stop_safe_point(int); """) @@ -200,7 +200,7 @@ if (__builtin_setjmp(here) == 0) { // returned directly assert(segment->jmpbuf_ptr == (stm_jmpbuf_t *)-1); segment->jmpbuf_ptr = &here; - stm_stop_safe_point(flags); + _stm_stop_safe_point(flags); segment->jmpbuf_ptr = (stm_jmpbuf_t *)-1; return 0; } @@ -368,7 +368,7 @@ def stm_start_safe_point(): - lib.stm_start_safe_point(lib.LOCK_COLLECT) + lib._stm_start_safe_point(lib.LOCK_COLLECT) def stm_stop_safe_point(): if lib._check_stop_safe_point(lib.LOCK_COLLECT): @@ -421,7 +421,7 @@ def start_transaction(self): tl = self.tls[self.current_thread] assert not lib._stm_in_transaction(tl) - lib.stm_start_transaction(tl, ffi.cast("stm_jmpbuf_t *", -1)) + lib._stm_start_transaction(tl, ffi.cast("stm_jmpbuf_t *", -1)) assert lib._stm_in_transaction(tl) def commit_transaction(self): From noreply at buildbot.pypy.org Fri Feb 14 16:38:38 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 14 Feb 2014 16:38:38 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: in-progress Message-ID: <20140214153838.CABC91C0178@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r733:51a343395a8e Date: 2014-02-14 16:38 +0100 http://bitbucket.org/pypy/stmgc/changeset/51a343395a8e/ Log: in-progress diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -23,7 +23,8 @@ #define FIRST_READMARKER_PAGE (READMARKER_START / 4096UL) #define NB_READMARKER_PAGES (FIRST_OBJECT_PAGE - FIRST_READMARKER_PAGE) -#define CURTRANS_START ((FIRST_OBJECT_PAGE * 4096UL) >> 8) +#define CREATMARKER_START ((FIRST_OBJECT_PAGE * 4096UL) >> 8) +#define FIRST_CREATMARKER_PAGE (CREATMARKER_START / 4096UL) enum { diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -19,8 +19,10 @@ then they might be allocted outside sections but still in the nursery. */ #define MEDIUM_OBJECT (8*1024) -/* size in bytes of the alignment of any section requested */ -#define NURSERY_ALIGNMENT 256 +/* size in bytes of the "line". Should be equal to the line used by + stm_creation_marker_t. */ +#define NURSERY_LINE_SHIFT 8 +#define NURSERY_LINE (1 << NURSERY_LINE_SHIFT) /************************************************************/ @@ -35,6 +37,7 @@ static void setup_nursery(void) { + assert((NURSERY_SECTION_SIZE % NURSERY_LINE) == 0); assert(MEDIUM_OBJECT < LARGE_OBJECT); assert(LARGE_OBJECT < NURSERY_SECTION_SIZE); nursery_ctl.used = 0; @@ -48,7 +51,7 @@ #define NURSERY_ALIGN(bytes) \ - (((bytes) + NURSERY_ALIGNMENT - 1) & ~(NURSERY_ALIGNMENT - 1)) + (((bytes) + NURSERY_LINE - 1) & ~(NURSERY_LINE - 1)) static stm_char *allocate_from_nursery(uint64_t bytes) { @@ -73,6 +76,10 @@ NURSERY_SECTION_SIZE); STM_SEGMENT->nursery_current = p + size_rounded_up; STM_SEGMENT->nursery_section_end = (uintptr_t)p + NURSERY_SECTION_SIZE; + /* Also fill the corresponding creation markers with 0xff. */ + memset(REAL_ADDRESS(STM_SEGMENT->segment_base, + ((uintptr_t)p) >> NURSERY_LINE_SHIFT), + 0xff, NURSERY_SECTION_SIZE >> NURSERY_LINE_SHIFT); return p; } abort(); diff --git a/c7/stm/setup.c b/c7/stm/setup.c --- a/c7/stm/setup.c +++ b/c7/stm/setup.c @@ -21,7 +21,9 @@ assert(READMARKER_START < READMARKER_END); assert(READMARKER_END <= 4096UL * FIRST_OBJECT_PAGE); assert(FIRST_OBJECT_PAGE < NB_PAGES); - assert(CURTRANS_START >= 8192); + assert(CREATMARKER_START >= 8192); + assert(2 <= FIRST_CREATMARKER_PAGE); + assert(FIRST_CREATMARKER_PAGE <= FIRST_READMARKER_PAGE); assert((NB_PAGES * 4096UL) >> 8 <= (FIRST_OBJECT_PAGE * 4096UL) >> 4); assert((END_NURSERY_PAGE * 4096UL) >> 8 <= (FIRST_READMARKER_PAGE * 4096UL)); @@ -48,9 +50,10 @@ memset(REAL_ADDRESS(segment_base, STM_PSEGMENT), 0, sizeof(*STM_PSEGMENT)); - /* Pages in range(2, FIRST_READMARKER_PAGE) are never used */ - if (FIRST_READMARKER_PAGE > 2) - mprotect(segment_base + 8192, (FIRST_READMARKER_PAGE - 2) * 4096UL, + /* Pages in range(2, FIRST_CREATMARKER_PAGE) are never used */ + if (FIRST_CREATMARKER_PAGE > 2) + mprotect(segment_base + 8192, + (FIRST_CREATMARKER_PAGE - 2) * 4096UL, PROT_NONE); struct stm_priv_segment_info_s *pr = get_priv_segment(i); diff --git a/c7/test/test_basic.py b/c7/test/test_basic.py --- a/c7/test/test_basic.py +++ b/c7/test/test_basic.py @@ -29,12 +29,12 @@ def test_transaction_start_stop(self): self.start_transaction() - + self.switch(1) self.start_transaction() self.commit_transaction() self.switch(0) - + self.commit_transaction() def test_simple_read(self): @@ -57,22 +57,21 @@ self.switch(1) lp2 = stm_allocate_old(16) assert lp1 != lp2 - + def test_write_on_old(self): lp1 = stm_allocate_old(16) self.start_transaction() stm_write(lp1) assert stm_was_written(lp1) stm_set_char(lp1, 'a') - + self.switch(1) self.start_transaction() stm_read(lp1) assert stm_was_read(lp1) assert stm_get_char(lp1) == '\0' self.commit_transaction() - - + def test_read_write_1(self): lp1 = stm_allocate_old(16) stm_get_real_address(lp1)[HDR] = 'a' #setchar From noreply at buildbot.pypy.org Fri Feb 14 17:21:57 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 14 Feb 2014 17:21:57 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: in-progress Message-ID: <20140214162157.C1AE11C0178@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r734:b8d2ad0cb59c Date: 2014-02-14 17:21 +0100 http://bitbucket.org/pypy/stmgc/changeset/b8d2ad0cb59c/ Log: in-progress diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -5,7 +5,14 @@ void _stm_write_slowpath(object_t *obj) { - abort(); + assert(_running_transaction()); + + LIST_APPEND(STM_PSEGMENT->old_objects_to_trace, obj); + + obj->stm_flags |= GCFLAG_WRITE_BARRIER_CALLED; + stm_read(obj); + + //... } static void reset_transaction_read_version(void) @@ -45,6 +52,8 @@ STM_SEGMENT->transaction_read_version = old_rv + 1; if (UNLIKELY(old_rv == 0xff)) reset_transaction_read_version(); + + assert(list_is_empty(STM_PSEGMENT->old_objects_to_trace)); } diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -51,11 +51,16 @@ struct stm_priv_segment_info_s { struct stm_segment_info_s pub; + struct list_s *old_objects_to_trace; }; static char *stm_object_pages; static stm_thread_local_t *stm_thread_locals = NULL; +#ifdef STM_TESTS +static char *stm_other_pages; +#endif + #define REAL_ADDRESS(segment_base, src) ((segment_base) + (uintptr_t)(src)) @@ -76,3 +81,4 @@ } static bool _is_tl_registered(stm_thread_local_t *tl); +static bool _running_transaction(void); diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -14,6 +14,8 @@ char *addr = large_malloc(size_rounded_up); object_t* o = (object_t *)(addr - stm_object_pages); - memset(REAL_ADDRESS(STM_SEGMENT->segment_base, o), 0, size_rounded_up); + long i; + for (i = 0; i < NB_SEGMENTS; i++) + memset(REAL_ADDRESS(get_segment_base(i), o), 0, size_rounded_up); return o; } diff --git a/c7/stm/list.c b/c7/stm/list.c new file mode 100644 --- /dev/null +++ b/c7/stm/list.c @@ -0,0 +1,33 @@ +#ifndef _STM_CORE_H_ +# error "must be compiled via stmgc.c" +#endif + + +#define LIST_SETSIZE(n) (sizeof(struct list_s) + LIST_ITEMSSIZE(n)) +#define LIST_ITEMSSIZE(n) ((n) * sizeof(uintptr_t)) +#define LIST_OVERCNT(n) (33 + ((((n) / 2) * 3) | 1)) + +static struct list_s *list_create(void) +{ + uintptr_t initial_allocation = 32; + struct list_s *lst = malloc(LIST_SETSIZE(initial_allocation)); + if (lst == NULL) { + perror("out of memory in list_create"); + abort(); + } + lst->count = 0; + lst->last_allocated = initial_allocation - 1; + return lst; +} + +static struct list_s *_list_grow(struct list_s *lst, uintptr_t nalloc) +{ + nalloc = LIST_OVERCNT(nalloc); + lst = realloc(lst, LIST_SETSIZE(nalloc)); + if (lst == NULL) { + perror("out of memory in _list_grow"); + abort(); + } + lst->last_allocated = nalloc - 1; + return lst; +} diff --git a/c7/stm/list.h b/c7/stm/list.h new file mode 100644 --- /dev/null +++ b/c7/stm/list.h @@ -0,0 +1,64 @@ +#include + +struct list_s { + uintptr_t count; + uintptr_t last_allocated; + uintptr_t items[]; +}; + +static struct list_s *list_create(void); + +static inline void list_free(struct list_s *lst) +{ + free(lst); +} + + +static struct list_s *_list_grow(struct list_s *, uintptr_t); + +static inline struct list_s *list_append(struct list_s *lst, uintptr_t item) +{ + uintptr_t index = lst->count++; + if (UNLIKELY(index > lst->last_allocated)) + lst = _list_grow(lst, index); + lst->items[index] = item; + return lst; +} + +#define LIST_APPEND(lst, e) ((lst) = list_append((lst), (uintptr_t)(e))) + + +static inline void list_clear(struct list_s *lst) +{ + lst->count = 0; +} + +static inline bool list_is_empty(struct list_s *lst) +{ + return (lst->count == 0); +} + +static inline bool list_count(struct list_s *lst) +{ + return lst->count; +} + +static inline uintptr_t list_pop_item(struct list_s *lst) +{ + return lst->items[--lst->count]; +} + +static inline uintptr_t list_item(struct list_s *lst, uintptr_t index) +{ + return lst->items[index]; +} + +#define LIST_FOREACH_R(lst, TYPE, CODE) \ + do { \ + struct list_s *_lst = (lst); \ + uintptr_t _i; \ + for (_i = _lst->count; _i--; ) { \ + TYPE item = (TYPE)_lst->items[_i]; \ + CODE; \ + } \ + } while (0) diff --git a/c7/stm/setup.c b/c7/stm/setup.c --- a/c7/stm/setup.c +++ b/c7/stm/setup.c @@ -39,6 +39,9 @@ long i; for (i = 0; i < NB_SEGMENTS; i++) { char *segment_base = get_segment_base(i); +#ifdef STM_TESTS + stm_other_pages = segment_base; +#endif /* In each segment, the first page is where TLPREFIX'ed NULL accesses land. We mprotect it so that accesses fail. */ @@ -59,6 +62,7 @@ struct stm_priv_segment_info_s *pr = get_priv_segment(i); pr->pub.segment_num = i; pr->pub.segment_base = segment_base; + pr->old_objects_to_trace = list_create(); } /* Make the nursery pages shared. The other pages are @@ -85,6 +89,12 @@ { /* This function is called during testing, but normal programs don't need to call it. */ + long i; + for (i = 0; i < NB_SEGMENTS; i++) { + struct stm_priv_segment_info_s *pr = get_priv_segment(i); + list_free(pr->old_objects_to_trace); + } + munmap(stm_object_pages, TOTAL_MEMORY); stm_object_pages = NULL; diff --git a/c7/stm/sync.c b/c7/stm/sync.c --- a/c7/stm/sync.c +++ b/c7/stm/sync.c @@ -81,6 +81,11 @@ sem_post(&segments_ctl.semaphore); } +static bool _running_transaction(void) +{ + return (STM_SEGMENT->running_thread != NULL); +} + bool _stm_in_transaction(stm_thread_local_t *tl) { int num = tl->associated_segment_num; diff --git a/c7/stmgc.c b/c7/stmgc.c --- a/c7/stmgc.c +++ b/c7/stmgc.c @@ -1,11 +1,13 @@ #define _GNU_SOURCE #include "stmgc.h" +#include "stm/list.h" #include "stm/core.h" #include "stm/pages.h" #include "stm/sync.h" #include "stm/largemalloc.h" #include "stm/misc.c" +#include "stm/list.c" #include "stm/pages.c" #include "stm/prebuilt.c" #include "stm/gcpage.c" diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -35,6 +35,7 @@ typedef struct { object_t **shadowstack, **shadowstack_base; + int associated_segment_num; ...; } stm_thread_local_t; @@ -339,7 +340,7 @@ def stm_get_real_address(obj): return lib._stm_real_address(ffi.cast('object_t*', obj)) - + def stm_get_segment_address(ptr): return int(ffi.cast('uintptr_t', lib._stm_segment_address(ptr))) @@ -423,6 +424,12 @@ assert not lib._stm_in_transaction(tl) lib._stm_start_transaction(tl, ffi.cast("stm_jmpbuf_t *", -1)) assert lib._stm_in_transaction(tl) + # + seen = set() + for tl1 in self.tls: + if lib._stm_in_transaction(tl1): + assert tl1.associated_segment_num not in seen + seen.add(tl1.associated_segment_num) def commit_transaction(self): tl = self.tls[self.current_thread] diff --git a/c7/test/test_basic.py b/c7/test/test_basic.py --- a/c7/test/test_basic.py +++ b/c7/test/test_basic.py @@ -61,13 +61,16 @@ def test_write_on_old(self): lp1 = stm_allocate_old(16) self.start_transaction() + assert stm_get_char(lp1) == '\0' stm_write(lp1) assert stm_was_written(lp1) stm_set_char(lp1, 'a') + assert stm_get_char(lp1) == 'a' self.switch(1) self.start_transaction() - stm_read(lp1) + assert not stm_was_read(lp1) + assert stm_get_char(lp1) == '\0' assert stm_was_read(lp1) assert stm_get_char(lp1) == '\0' self.commit_transaction() From noreply at buildbot.pypy.org Fri Feb 14 18:30:33 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 14 Feb 2014 18:30:33 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: In-progress: redoing the page privatization Message-ID: <20140214173033.B4B651D2524@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r736:3906e3d066b4 Date: 2014-02-14 18:30 +0100 http://bitbucket.org/pypy/stmgc/changeset/3906e3d066b4/ Log: In-progress: redoing the page privatization diff --git a/c7/stm/atomic.h b/c7/stm/atomic.h --- a/c7/stm/atomic.h +++ b/c7/stm/atomic.h @@ -1,12 +1,34 @@ -#if defined(__i386__) || defined(__x86_64__) +/* spin_loop() corresponds to the PAUSE instruction on x86. On + other architectures, we generate no instruction (but still need + the compiler barrier); if on another architecture you find the + corresponding instruction, feel free to add it here. +*/ + +/* write_fence() is a function that inserts a "write fence". The + goal is to make sure that past writes are really pushed to memory + before the future writes. We assume that the corresponding "read + fence" effect is done automatically by a corresponding + __sync_bool_compare_and_swap(). + + On x86, this is done automatically by the CPU; we only need a + compiler barrier (asm("memory")). + + On other architectures, we use __sync_synchronize() as a general + fall-back, but we might have more efficient alternative on some other + platforms too. +*/ + + +#if defined(__i386__) || defined(__amd64__) # define HAVE_FULL_EXCHANGE_INSN static inline void spin_loop(void) { asm("pause" : : : "memory"); } + static inline void write_fence(void) { asm("" : : : "memory"); } #else -# warn "Add a correct definition of spin_loop() for this platform?" static inline void spin_loop(void) { asm("" : : : "memory"); } + static inline void write_fence(void) { __sync_synchronize(); } #endif diff --git a/c7/stm/pagecopy.c b/c7/stm/pagecopy.c new file mode 100644 --- /dev/null +++ b/c7/stm/pagecopy.c @@ -0,0 +1,57 @@ + +static void pagecopy(void *dest, const void *src) +{ + unsigned long i; + for (i=0; i<4096/128; i++) { + asm volatile("movdqa (%0), %%xmm0\n" + "movdqa 16(%0), %%xmm1\n" + "movdqa 32(%0), %%xmm2\n" + "movdqa 48(%0), %%xmm3\n" + "movdqa %%xmm0, (%1)\n" + "movdqa %%xmm1, 16(%1)\n" + "movdqa %%xmm2, 32(%1)\n" + "movdqa %%xmm3, 48(%1)\n" + "movdqa 64(%0), %%xmm0\n" + "movdqa 80(%0), %%xmm1\n" + "movdqa 96(%0), %%xmm2\n" + "movdqa 112(%0), %%xmm3\n" + "movdqa %%xmm0, 64(%1)\n" + "movdqa %%xmm1, 80(%1)\n" + "movdqa %%xmm2, 96(%1)\n" + "movdqa %%xmm3, 112(%1)\n" + : + : "r"(src + 128*i), "r"(dest + 128*i) + : "xmm0", "xmm1", "xmm2", "xmm3", "memory"); + } +} + +#if 0 /* XXX enable if detected on the cpu */ +static void pagecopy_ymm8(void *dest, const void *src) +{ + asm volatile("0:\n" + "vmovdqa (%0), %%ymm0\n" + "vmovdqa 32(%0), %%ymm1\n" + "vmovdqa 64(%0), %%ymm2\n" + "vmovdqa 96(%0), %%ymm3\n" + "vmovdqa 128(%0), %%ymm4\n" + "vmovdqa 160(%0), %%ymm5\n" + "vmovdqa 192(%0), %%ymm6\n" + "vmovdqa 224(%0), %%ymm7\n" + "addq $256, %0\n" + "vmovdqa %%ymm0, (%1)\n" + "vmovdqa %%ymm1, 32(%1)\n" + "vmovdqa %%ymm2, 64(%1)\n" + "vmovdqa %%ymm3, 96(%1)\n" + "vmovdqa %%ymm4, 128(%1)\n" + "vmovdqa %%ymm5, 160(%1)\n" + "vmovdqa %%ymm6, 192(%1)\n" + "vmovdqa %%ymm7, 224(%1)\n" + "addq $256, %1\n" + "cmpq %2, %0\n" + "jne 0b" + : "=r"(src), "=r"(dest) + : "r"((char *)src + 4096), "0"(src), "1"(dest) + : "xmm0", "xmm1", "xmm2", "xmm3", + "xmm4", "xmm5", "xmm6", "xmm7"); +} +#endif diff --git a/c7/stm/pagecopy.h b/c7/stm/pagecopy.h new file mode 100644 --- /dev/null +++ b/c7/stm/pagecopy.h @@ -0,0 +1,2 @@ + +static void pagecopy(void *dest, const void *src); diff --git a/c7/stm/pages.c b/c7/stm/pages.c --- a/c7/stm/pages.c +++ b/c7/stm/pages.c @@ -58,11 +58,11 @@ ssize_t pgoff1 = pagenum; ssize_t pgoff2 = pagenum + NB_PAGES; - ssize_t localpgoff = pgoff1 + NB_PAGES * _STM_TL->thread_num; - ssize_t otherpgoff = pgoff1 + NB_PAGES * (1 - _STM_TL->thread_num); + ssize_t localpgoff = pgoff1 + NB_PAGES * STM_SEGMENT->segment_num; + ssize_t otherpgoff = pgoff1 + NB_PAGES * (1 - STM_SEGMENT->segment_num); - void *localpg = object_pages + localpgoff * 4096UL; - void *otherpg = object_pages + otherpgoff * 4096UL; + void *localpg = stm_object_pages + localpgoff * 4096UL; + void *otherpg = stm_object_pages + otherpgoff * 4096UL; // XXX should not use pgoff2, but instead the next unused page in // thread 2, so that after major GCs the next dirty pages are the diff --git a/c7/stmgc.c b/c7/stmgc.c --- a/c7/stmgc.c +++ b/c7/stmgc.c @@ -3,6 +3,7 @@ #include "stm/atomic.h" #include "stm/list.h" #include "stm/core.h" +#include "stm/pagecopy.h" #include "stm/pages.h" #include "stm/gcpage.h" #include "stm/sync.h" @@ -10,6 +11,7 @@ #include "stm/misc.c" #include "stm/list.c" +#include "stm/pagecopy.c" #include "stm/pages.c" #include "stm/prebuilt.c" #include "stm/gcpage.c" From noreply at buildbot.pypy.org Fri Feb 14 18:30:32 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 14 Feb 2014 18:30:32 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: in-progress Message-ID: <20140214173032.A0B161D2524@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r735:35cc44d7afab Date: 2014-02-14 18:16 +0100 http://bitbucket.org/pypy/stmgc/changeset/35cc44d7afab/ Log: in-progress diff --git a/c7/stm/atomic.h b/c7/stm/atomic.h new file mode 100644 --- /dev/null +++ b/c7/stm/atomic.h @@ -0,0 +1,12 @@ + +#if defined(__i386__) || defined(__x86_64__) + +# define HAVE_FULL_EXCHANGE_INSN + static inline void spin_loop(void) { asm("pause" : : : "memory"); } + +#else + +# warn "Add a correct definition of spin_loop() for this platform?" + static inline void spin_loop(void) { asm("" : : : "memory"); } + +#endif diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -3,16 +3,33 @@ #endif +static uint8_t write_locks[READMARKER_END - READMARKER_START]; + + void _stm_write_slowpath(object_t *obj) { assert(_running_transaction()); LIST_APPEND(STM_PSEGMENT->old_objects_to_trace, obj); + obj->stm_flags |= GCFLAG_WRITE_BARRIER_CALLED; - obj->stm_flags |= GCFLAG_WRITE_BARRIER_CALLED; + /* for old objects from the same transaction, we are done now */ + if (obj_from_same_transaction(obj)) + return; + + /* otherwise, we need to privatize the pages containing the object, + if they are still SHARED_PAGE. The common case is that there is + only one page in total. */ + if (UNLIKELY((obj->stm_flags & GCFLAG_CROSS_PAGE) != 0)) { + abort(); + //... + } + else { + pages_privatize(((uintptr_t)obj) / 4096UL, 1); + } + + //... write_locks stm_read(obj); - - //... } static void reset_transaction_read_version(void) diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -6,6 +6,8 @@ #include #include +/************************************************************/ + #define NB_PAGES (1500*256) // 1500MB #define NB_SEGMENTS 2 @@ -33,17 +35,17 @@ _stm_write_slowpath() is called, and then the flag is set to say "called once already, no need to call again". */ GCFLAG_WRITE_BARRIER_CALLED = _STM_GCFLAG_WRITE_BARRIER_CALLED, - /* set if the object can be seen by all threads. If unset, we know - it is only visible from the current thread. */ - //GCFLAG_ALL_THREADS = 0x04, - /* only used during collections to mark an obj as moved out of the - generation it was in */ - //GCFLAG_MOVED = 0x01, - /* objects smaller than one page and even smaller than - LARGE_OBJECT_WORDS * 8 bytes */ - //GCFLAG_SMALL = 0x02, + /* objects that are allocated crossing a page boundary have this + flag set */ + GCFLAG_CROSS_PAGE = 0x02, }; +#define CROSS_PAGE_BOUNDARY(start, stop) \ + (((uintptr_t)(start)) / 4096UL != ((uintptr_t)(stop)) / 4096UL) + + +/************************************************************/ + #define STM_PSEGMENT ((stm_priv_segment_info_t *)STM_SEGMENT) @@ -82,3 +84,7 @@ static bool _is_tl_registered(stm_thread_local_t *tl); static bool _running_transaction(void); + +static inline bool obj_from_same_transaction(object_t *obj) { + return ((stm_creation_marker_t *)(((uintptr_t)obj) >> 8))->cm != 0; +} diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -5,17 +5,38 @@ static void setup_gcpage(void) { - largemalloc_init_arena(stm_object_pages + END_NURSERY_PAGE * 4096UL, - (NB_PAGES - END_NURSERY_PAGE) * 4096UL); + char *base = stm_object_pages + END_NURSERY_PAGE * 4096UL; + uintptr_t length = (NB_PAGES - END_NURSERY_PAGE) * 4096UL; + largemalloc_init_arena(base, length); + + uninitialized_page_start = (stm_char *)(END_NURSERY_PAGE * 4096UL); + uninitialized_page_stop = (stm_char *)(NB_PAGES * 4096UL); } object_t *_stm_allocate_old(ssize_t size_rounded_up) { + /* XXX not thread-safe! */ char *addr = large_malloc(size_rounded_up); - object_t* o = (object_t *)(addr - stm_object_pages); + stm_char* o = (stm_char *)(addr - stm_object_pages); - long i; - for (i = 0; i < NB_SEGMENTS; i++) - memset(REAL_ADDRESS(get_segment_base(i), o), 0, size_rounded_up); - return o; + if (o + size_rounded_up > uninitialized_page_start) { + uintptr_t pagenum = + ((uint64_t)uninitialized_page_start) / 4096UL; + uintptr_t pagecount = + (o + size_rounded_up - uninitialized_page_start) / 4096UL + 20; + uintptr_t pagemax = + (uninitialized_page_stop - uninitialized_page_start) / 4096UL; + if (pagecount > pagemax) + pagecount = pagemax; + pages_initialize_shared(pagenum, pagecount); + + uninitialized_page_start += pagecount * 4096UL; + } + + memset(addr, 0, size_rounded_up); + + if (CROSS_PAGE_BOUNDARY(o, o + size_rounded_up)) + ((object_t *)o)->stm_flags = GCFLAG_CROSS_PAGE; + + return (object_t *)o; } diff --git a/c7/stm/gcpage.h b/c7/stm/gcpage.h new file mode 100644 --- /dev/null +++ b/c7/stm/gcpage.h @@ -0,0 +1,3 @@ + +static stm_char *uninitialized_page_start; +static stm_char *uninitialized_page_stop; diff --git a/c7/stm/pages.c b/c7/stm/pages.c --- a/c7/stm/pages.c +++ b/c7/stm/pages.c @@ -19,11 +19,61 @@ abort(); } } - for (i = 0; i < count; i++) + for (i = 0; i < count; i++) { + assert(flag_page_private[pagenum + i] == FREE_PAGE); flag_page_private[pagenum + i] = SHARED_PAGE; + } } static void _pages_privatize(uintptr_t pagenum, uintptr_t count) { - abort(); + assert(count == 1); /* XXX */ + +#ifdef HAVE_FULL_EXCHANGE_INSN + /* use __sync_lock_test_and_set() as a cheaper alternative to + __sync_bool_compare_and_swap(). */ + int previous = __sync_lock_test_and_set(&flag_page_private[pagenum], + REMAPPING_PAGE); + assert(previous != FREE_PAGE); + if (previous == PRIVATE_PAGE) { + flag_page_private[pagenum] = PRIVATE_PAGE; + return; + } + bool was_shared = (previous == SHARED_PAGE); +#else + bool was_shared = __sync_bool_compare_and_swap(&flag_page_private[pagenum], + SHARED_PAGE, REMAPPING_PAGE); +#endif + if (!was_shared) { + while (1) { + uint8_t state = ((uint8_t volatile *)flag_page_private)[pagenum]; + if (state != REMAPPING_PAGE) { + assert(state == PRIVATE_PAGE); + break; + } + spin_loop(); + } + return; + } + + ssize_t pgoff1 = pagenum; + ssize_t pgoff2 = pagenum + NB_PAGES; + ssize_t localpgoff = pgoff1 + NB_PAGES * _STM_TL->thread_num; + ssize_t otherpgoff = pgoff1 + NB_PAGES * (1 - _STM_TL->thread_num); + + void *localpg = object_pages + localpgoff * 4096UL; + void *otherpg = object_pages + otherpgoff * 4096UL; + + // XXX should not use pgoff2, but instead the next unused page in + // thread 2, so that after major GCs the next dirty pages are the + // same as the old ones + int res = remap_file_pages(localpg, 4096, 0, pgoff2, 0); + if (res < 0) { + perror("remap_file_pages"); + abort(); + } + pagecopy(localpg, otherpg); + write_fence(); + assert(flag_page_private[pagenum] == REMAPPING_PAGE); + flag_page_private[pagenum] = PRIVATE_PAGE; } diff --git a/c7/stm/pages.h b/c7/stm/pages.h --- a/c7/stm/pages.h +++ b/c7/stm/pages.h @@ -3,14 +3,14 @@ /* The page is not in use. Assume that each segment sees its own copy. */ FREE_PAGE=0, - /* The page is shared by all threads. Each segment sees the same + /* The page is shared by all segments. Each segment sees the same physical page (the one that is within the segment 0 mmap address). */ SHARED_PAGE, /* Page being in the process of privatization */ REMAPPING_PAGE, - /* Page private for each thread */ + /* Page is private for each segment. */ PRIVATE_PAGE, }; /* used for flag_page_private */ @@ -18,7 +18,6 @@ static uint8_t flag_page_private[NB_PAGES]; - static void _pages_privatize(uintptr_t pagenum, uintptr_t count); static void pages_initialize_shared(uintptr_t pagenum, uintptr_t count); diff --git a/c7/stm/prebuilt.c b/c7/stm/prebuilt.c --- a/c7/stm/prebuilt.c +++ b/c7/stm/prebuilt.c @@ -8,6 +8,8 @@ static uint64_t prebuilt_objects_start = 0; +/* XXX NOT TESTED, AND NOT WORKING RIGHT NOW */ + void stm_copy_prebuilt_objects(object_t *target, char *source, ssize_t size) { /* Initialize a region of 'size' bytes at the 'target' address, diff --git a/c7/stmgc.c b/c7/stmgc.c --- a/c7/stmgc.c +++ b/c7/stmgc.c @@ -1,8 +1,10 @@ #define _GNU_SOURCE #include "stmgc.h" +#include "stm/atomic.h" #include "stm/list.h" #include "stm/core.h" #include "stm/pages.h" +#include "stm/gcpage.h" #include "stm/sync.h" #include "stm/largemalloc.h" From noreply at buildbot.pypy.org Fri Feb 14 19:01:50 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 14 Feb 2014 19:01:50 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: Copy the complete logic for _stm_write_slowpath() Message-ID: <20140214180150.536591C318B@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r737:c773165c8774 Date: 2014-02-14 19:01 +0100 http://bitbucket.org/pypy/stmgc/changeset/c773165c8774/ Log: Copy the complete logic for _stm_write_slowpath() diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -2,20 +2,60 @@ # error "must be compiled via stmgc.c" #endif +#include + static uint8_t write_locks[READMARKER_END - READMARKER_START]; +static void teardown_core(void) +{ + memset(write_locks, 0, sizeof(write_locks)); +} + + +static void contention_management(uint8_t current_lock_owner) +{ + /* A simple contention manager. Called when we do stm_write() + on an object, but some other thread already holds the write + lock on the same object. */ + + /* By construction it should not be possible that the owner + of the object is precisely us */ + assert(current_lock_owner != STM_PSEGMENT->write_lock_num); + + /* Who should abort here: this thread, or the other thread? */ + struct stm_priv_segment_info_s* other_pseg; + other_pseg = get_priv_segment(current_lock_owner - 1); + assert(other_pseg->write_lock_num == current_lock_owner); + + if ((STM_PSEGMENT->approximate_start_time < + other_pseg->approximate_start_time) || is_inevitable()) { + /* we are the thread that must succeed */ + other_pseg->need_abort = 1; + _stm_start_safe_point(0); + /* XXX: not good, maybe should be signalled by other thread */ + usleep(1); + _stm_stop_safe_point(0); + /* done, will retry */ + } + else { + /* we are the thread that must abort */ + stm_abort_transaction(); + } +} + void _stm_write_slowpath(object_t *obj) { assert(_running_transaction()); LIST_APPEND(STM_PSEGMENT->old_objects_to_trace, obj); - obj->stm_flags |= GCFLAG_WRITE_BARRIER_CALLED; /* for old objects from the same transaction, we are done now */ - if (obj_from_same_transaction(obj)) + if (obj_from_same_transaction(obj)) { + obj->stm_flags |= GCFLAG_WRITE_BARRIER_CALLED; return; + } /* otherwise, we need to privatize the pages containing the object, if they are still SHARED_PAGE. The common case is that there is @@ -28,8 +68,27 @@ pages_privatize(((uintptr_t)obj) / 4096UL, 1); } - //... write_locks + /* claim the write-lock for this object */ + do { + uintptr_t lock_idx = (((uintptr_t)obj) >> 4) - READMARKER_START; + uint8_t lock_num = STM_PSEGMENT->write_lock_num; + uint8_t prev_owner; + prev_owner = __sync_val_compare_and_swap(&write_locks[lock_idx], + 0, lock_num); + + /* if there was no lock-holder, we are done */ + if (LIKELY(prev_owner == 0)) + break; + + /* otherwise, call the contention manager, and then possibly retry */ + contention_management(prev_owner); + } while (1); + + /* add the write-barrier-already-called flag ONLY if we succeeded in + getting the write-lock */ stm_read(obj); + obj->stm_flags |= GCFLAG_WRITE_BARRIER_CALLED; + LIST_APPEND(STM_PSEGMENT->modified_objects, obj); } static void reset_transaction_read_version(void) diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -54,6 +54,10 @@ struct stm_priv_segment_info_s { struct stm_segment_info_s pub; struct list_s *old_objects_to_trace; + struct list_s *modified_objects; + uint64_t approximate_start_time; + uint8_t write_lock_num; + uint8_t need_abort; }; static char *stm_object_pages; @@ -88,3 +92,9 @@ static inline bool obj_from_same_transaction(object_t *obj) { return ((stm_creation_marker_t *)(((uintptr_t)obj) >> 8))->cm != 0; } + +static inline bool is_inevitable(void) { + return STM_SEGMENT->jmpbuf_ptr == NULL; +} + +static void teardown_core(void); diff --git a/c7/stm/setup.c b/c7/stm/setup.c --- a/c7/stm/setup.c +++ b/c7/stm/setup.c @@ -60,9 +60,12 @@ PROT_NONE); struct stm_priv_segment_info_s *pr = get_priv_segment(i); + assert(i + 1 < 256); + pr->write_lock_num = i + 1; pr->pub.segment_num = i; pr->pub.segment_base = segment_base; pr->old_objects_to_trace = list_create(); + pr->modified_objects = list_create(); } /* Make the nursery pages shared. The other pages are @@ -100,6 +103,7 @@ memset(flag_page_private, 0, sizeof(flag_page_private)); + teardown_core(); teardown_sync(); } diff --git a/c7/stm/sync.c b/c7/stm/sync.c --- a/c7/stm/sync.c +++ b/c7/stm/sync.c @@ -8,6 +8,7 @@ struct { sem_t semaphore; uint8_t in_use[NB_SEGMENTS + 1]; /* 1 if running a pthread */ + uint64_t global_time; /* approximate */ }; char reserved[64]; } segments_ctl __attribute__((aligned(64))); @@ -69,6 +70,9 @@ exit: assert(STM_SEGMENT->running_thread == NULL); STM_SEGMENT->running_thread = tl; + + /* global_time is approximate -> no synchronization required */ + STM_PSEGMENT->approximate_start_time = ++segments_ctl.global_time; } static void release_thread_segment(stm_thread_local_t *tl) From noreply at buildbot.pypy.org Fri Feb 14 19:19:28 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 14 Feb 2014 19:19:28 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: Resetting the creation markers between transactions Message-ID: <20140214181928.D77E21C318B@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r738:a26f2c58f57e Date: 2014-02-14 19:19 +0100 http://bitbucket.org/pypy/stmgc/changeset/a26f2c58f57e/ Log: Resetting the creation markers between transactions diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -130,6 +130,8 @@ reset_transaction_read_version(); assert(list_is_empty(STM_PSEGMENT->old_objects_to_trace)); + assert(list_is_empty(STM_PSEGMENT->modified_objects)); + assert(list_is_empty(STM_PSEGMENT->creation_markers)); } @@ -137,6 +139,7 @@ { stm_thread_local_t *tl = STM_SEGMENT->running_thread; release_thread_segment(tl); + reset_all_creation_markers(); } void stm_abort_transaction(void) @@ -144,6 +147,7 @@ stm_thread_local_t *tl = STM_SEGMENT->running_thread; stm_jmpbuf_t *jmpbuf_ptr = STM_SEGMENT->jmpbuf_ptr; release_thread_segment(tl); + reset_all_creation_markers(); assert(jmpbuf_ptr != NULL); assert(jmpbuf_ptr != (stm_jmpbuf_t *)-1); /* for tests only */ diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -55,6 +55,7 @@ struct stm_segment_info_s pub; struct list_s *old_objects_to_trace; struct list_s *modified_objects; + struct list_s *creation_markers; uint64_t approximate_start_time; uint8_t write_lock_num; uint8_t need_abort; diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -5,12 +5,14 @@ static void setup_gcpage(void) { + /* NB. the very last page is not used, which allows a speed-up in + reset_all_creation_markers() */ char *base = stm_object_pages + END_NURSERY_PAGE * 4096UL; - uintptr_t length = (NB_PAGES - END_NURSERY_PAGE) * 4096UL; + uintptr_t length = (NB_PAGES - END_NURSERY_PAGE - 1) * 4096UL; largemalloc_init_arena(base, length); uninitialized_page_start = (stm_char *)(END_NURSERY_PAGE * 4096UL); - uninitialized_page_stop = (stm_char *)(NB_PAGES * 4096UL); + uninitialized_page_stop = (stm_char *)((NB_PAGES - 1) * 4096UL); } object_t *_stm_allocate_old(ssize_t size_rounded_up) diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -49,6 +49,37 @@ return (uintptr_t)obj < NURSERY_START + NURSERY_SIZE; } +static void set_creation_markers(stm_char *p, uint64_t size) +{ + /* Set the creation markers to 0xff for all lines from p to p+size. + Both p and size should be aligned to NURSERY_LINE. */ + + assert((((uintptr_t)p) & (NURSERY_LINE - 1)) == 0); + assert((size & (NURSERY_LINE - 1)) == 0); + + char *addr = REAL_ADDRESS(STM_SEGMENT->segment_base, + ((uintptr_t)p) >> NURSERY_LINE_SHIFT); + memset(addr, 0xff, size >> NURSERY_LINE_SHIFT); + + LIST_APPEND(STM_PSEGMENT->creation_markers, addr); +} + +static void reset_all_creation_markers(void) +{ + /* Note that the page 'NB_PAGES - 1' is not actually used. This + ensures that the creation markers always end with some zeroes. + We reset the markers 8 at a time, by writing null integers + until we reach a place that is already null. + */ + LIST_FOREACH_R(STM_PSEGMENT->creation_markers, uintptr_t, ({ + uint64_t *p = (uint64_t *)(item & ~7); + while (*p != 0) + *p++ = 0; + })); + + list_clear(STM_PSEGMENT->creation_markers); +} + #define NURSERY_ALIGN(bytes) \ (((bytes) + NURSERY_LINE - 1) & ~(NURSERY_LINE - 1)) @@ -76,10 +107,10 @@ NURSERY_SECTION_SIZE); STM_SEGMENT->nursery_current = p + size_rounded_up; STM_SEGMENT->nursery_section_end = (uintptr_t)p + NURSERY_SECTION_SIZE; + /* Also fill the corresponding creation markers with 0xff. */ - memset(REAL_ADDRESS(STM_SEGMENT->segment_base, - ((uintptr_t)p) >> NURSERY_LINE_SHIFT), - 0xff, NURSERY_SECTION_SIZE >> NURSERY_LINE_SHIFT); + set_creation_markers(p, NURSERY_SECTION_SIZE); + return p; } abort(); diff --git a/c7/stm/nursery.h b/c7/stm/nursery.h new file mode 100644 --- /dev/null +++ b/c7/stm/nursery.h @@ -0,0 +1,2 @@ + +static void reset_all_creation_markers(void); diff --git a/c7/stm/setup.c b/c7/stm/setup.c --- a/c7/stm/setup.c +++ b/c7/stm/setup.c @@ -66,6 +66,7 @@ pr->pub.segment_base = segment_base; pr->old_objects_to_trace = list_create(); pr->modified_objects = list_create(); + pr->creation_markers = list_create(); } /* Make the nursery pages shared. The other pages are diff --git a/c7/stmgc.c b/c7/stmgc.c --- a/c7/stmgc.c +++ b/c7/stmgc.c @@ -8,6 +8,7 @@ #include "stm/gcpage.h" #include "stm/sync.h" #include "stm/largemalloc.h" +#include "stm/nursery.h" #include "stm/misc.c" #include "stm/list.c" From noreply at buildbot.pypy.org Fri Feb 14 21:12:34 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 14 Feb 2014 21:12:34 +0100 (CET) Subject: [pypy-commit] pypy remove-intlong-smm: maybe less work for the typical path Message-ID: <20140214201234.D46681C1154@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: remove-intlong-smm Changeset: r69139:47afe1f67e8f Date: 2014-02-14 12:03 -0800 http://bitbucket.org/pypy/pypy/changeset/47afe1f67e8f/ Log: maybe less work for the typical path diff --git a/pypy/objspace/std/intobject.py b/pypy/objspace/std/intobject.py --- a/pypy/objspace/std/intobject.py +++ b/pypy/objspace/std/intobject.py @@ -498,13 +498,11 @@ return float(self.intval) def int(self, space): - if (type(self) is not W_IntObject and - space.is_overloaded(self, space.w_int, '__int__')): - return W_Root.int(self, space) - if space.is_w(space.type(self), space.w_int): + if type(self) is W_IntObject: return self - a = self.intval - return space.newint(a) + if not space.is_overloaded(self, space.w_int, '__int__'): + return space.newint(self.intval) + return W_Root.int(self, space) def _recover_with_smalllong(space): From noreply at buildbot.pypy.org Fri Feb 14 21:12:36 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 14 Feb 2014 21:12:36 +0100 (CET) Subject: [pypy-commit] pypy remove-intlong-smm: cleanup Message-ID: <20140214201236.058B91C1154@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: remove-intlong-smm Changeset: r69140:a7974c1a5d68 Date: 2014-02-14 12:03 -0800 http://bitbucket.org/pypy/pypy/changeset/a7974c1a5d68/ Log: cleanup diff --git a/pypy/objspace/std/intobject.py b/pypy/objspace/std/intobject.py --- a/pypy/objspace/std/intobject.py +++ b/pypy/objspace/std/intobject.py @@ -234,7 +234,7 @@ def descr_pos(self, space): return self.int(space) - descr_trunc = descr_pos # XX: descr_index/conjugate + descr_index = descr_trunc = descr_conjugate = descr_pos def descr_neg(self, space): a = self.int_w(space) @@ -251,9 +251,6 @@ pos = self.int_w(space) >= 0 return self.int(space) if pos else self.descr_neg(space) - def descr_index(self, space): - return self.int(space) - def descr_float(self, space): a = self.int_w(space) x = float(a) @@ -268,9 +265,6 @@ def descr_getnewargs(self, space): return space.newtuple([wrapint(space, self.int_w(space))]) - def descr_conjugate(self, space): - return self.int(space) - def descr_bit_length(self, space): val = self.int_w(space) if val < 0: From noreply at buildbot.pypy.org Fri Feb 14 21:12:33 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 14 Feb 2014 21:12:33 +0100 (CET) Subject: [pypy-commit] pypy remove-intlong-smm: improve interpreted (non-jit) perf: Message-ID: <20140214201233.7D5071C1154@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: remove-intlong-smm Changeset: r69138:13c6b40389d3 Date: 2014-02-14 12:02 -0800 http://bitbucket.org/pypy/pypy/changeset/13c6b40389d3/ Log: improve interpreted (non-jit) perf: o specialize W_IntObject methods w/ import_from_mixin o try to fastpath pure int binops diff --git a/pypy/objspace/std/boolobject.py b/pypy/objspace/std/boolobject.py --- a/pypy/objspace/std/boolobject.py +++ b/pypy/objspace/std/boolobject.py @@ -1,15 +1,17 @@ import operator +from rpython.rlib.objectmodel import import_from_mixin from rpython.rlib.rarithmetic import r_uint from rpython.rlib.rbigint import rbigint from rpython.tool.sourcetools import func_renamer, func_with_new_name from pypy.interpreter.gateway import WrappedDefault, interp2app, unwrap_spec -from pypy.objspace.std.intobject import W_AbstractIntObject +from pypy.objspace.std.intobject import IntMethods, W_AbstractIntObject from pypy.objspace.std.stdtypedef import StdTypeDef class W_BoolObject(W_AbstractIntObject): + import_from_mixin(IntMethods) _immutable_fields_ = ['boolval'] diff --git a/pypy/objspace/std/intobject.py b/pypy/objspace/std/intobject.py --- a/pypy/objspace/std/intobject.py +++ b/pypy/objspace/std/intobject.py @@ -9,7 +9,7 @@ import sys from rpython.rlib import jit -from rpython.rlib.objectmodel import instantiate, specialize +from rpython.rlib.objectmodel import instantiate, import_from_mixin, specialize from rpython.rlib.rarithmetic import ( LONG_BIT, is_valid_int, ovfcheck, r_uint, string_to_int) from rpython.rlib.rbigint import rbigint @@ -29,27 +29,197 @@ from pypy.objspace.std.stdtypedef import StdTypeDef +SENTINEL = object() + + class W_AbstractIntObject(W_Root): __slots__ = () def int(self, space): """x.__int__() <==> int(x)""" - raise NotImplementedError + + def descr_format(self, space, w_format_spec): + pass def descr_coerce(self, space, w_other): """x.__coerce__(y) <==> coerce(x, y)""" + + def descr_pow(self, space, w_exponent, w_modulus=None): + """x.__pow__(y[, z]) <==> pow(x, y[, z])""" + descr_rpow = func_with_new_name(descr_pow, 'descr_rpow') + descr_rpow.__doc__ = "y.__rpow__(x[, z]) <==> pow(x, y[, z])" + + def _abstract_unaryop(opname, doc=SENTINEL): + if doc is SENTINEL: + doc = 'x.__%s__() <==> %s(x)' % (opname, opname) + @func_renamer('descr_' + opname) + def descr_unaryop(self, space): + pass + descr_unaryop.__doc__ = doc + return descr_unaryop + + descr_repr = _abstract_unaryop('repr') + descr_str = _abstract_unaryop('str') + + descr_conjugate = _abstract_unaryop( + 'conjugate', "Returns self, the complex conjugate of any int.") + descr_bit_length = _abstract_unaryop('bit_length', """\ + int.bit_length() -> int + + Number of bits necessary to represent self in binary. + >>> bin(37) + '0b100101' + >>> (37).bit_length() + 6""") + descr_hash = _abstract_unaryop('hash') + descr_oct = _abstract_unaryop('oct') + descr_hex = _abstract_unaryop('hex') + descr_getnewargs = _abstract_unaryop('getnewargs', None) + + descr_long = _abstract_unaryop('long') + descr_index = _abstract_unaryop( + 'index', "x[y:z] <==> x[y.__index__():z.__index__()]") + descr_trunc = _abstract_unaryop('trunc', + "Truncating an Integral returns itself.") + descr_float = _abstract_unaryop('float') + + descr_pos = _abstract_unaryop('pos', "x.__pos__() <==> +x") + descr_neg = _abstract_unaryop('neg', "x.__neg__() <==> -x") + descr_abs = _abstract_unaryop('abs') + descr_nonzero = _abstract_unaryop('nonzero', "x.__nonzero__() <==> x != 0") + descr_invert = _abstract_unaryop('invert', "x.__invert__() <==> ~x") + + def _abstract_cmpop(opname): + @func_renamer('descr_' + opname) + def descr_cmp(self, space, w_other): + pass + descr_cmp.__doc__ = 'x.__%s__(y) <==> x%sy' % (opname, CMP_OPS[opname]) + return descr_cmp + + descr_lt = _abstract_cmpop('lt') + descr_le = _abstract_cmpop('le') + descr_eq = _abstract_cmpop('eq') + descr_ne = _abstract_cmpop('ne') + descr_gt = _abstract_cmpop('gt') + descr_ge = _abstract_cmpop('ge') + + def _abstract_binop(opname): + oper = BINARY_OPS.get(opname) + if oper == '%': + oper = '%%' + oper = '%s(%%s, %%s)' % opname if not oper else '%%s%s%%s' % oper + @func_renamer('descr_' + opname) + def descr_binop(self, space, w_other): + pass + descr_binop.__doc__ = "x.__%s__(y) <==> %s" % (opname, + oper % ('x', 'y')) + descr_rbinop = func_with_new_name(descr_binop, 'descr_r' + opname) + descr_rbinop.__doc__ = "x.__r%s__(y) <==> %s" % (opname, + oper % ('y', 'x')) + return descr_binop, descr_rbinop + + descr_add, descr_radd = _abstract_binop('add') + descr_sub, descr_rsub = _abstract_binop('sub') + descr_mul, descr_rmul = _abstract_binop('mul') + + descr_and, descr_rand = _abstract_binop('and') + descr_or, descr_ror = _abstract_binop('or') + descr_xor, descr_rxor = _abstract_binop('xor') + + descr_lshift, descr_rlshift = _abstract_binop('lshift') + descr_rshift, descr_rrshift = _abstract_binop('rshift') + + descr_floordiv, descr_rfloordiv = _abstract_binop('floordiv') + descr_div, descr_rdiv = _abstract_binop('div') + descr_truediv, descr_rtruediv = _abstract_binop('truediv') + descr_mod, descr_rmod = _abstract_binop('mod') + descr_divmod, descr_rdivmod = _abstract_binop('divmod') + + def descr_get_numerator(self, space): + return self.int(space) + descr_get_real = descr_get_numerator + + def descr_get_denominator(self, space): + return wrapint(space, 1) + + def descr_get_imag(self, space): + return wrapint(space, 0) + + +def _floordiv(space, x, y): + try: + z = ovfcheck(x // y) + except ZeroDivisionError: + raise oefmt(space.w_ZeroDivisionError, "integer division by zero") + return wrapint(space, z) +_div = _floordiv + + +def _truediv(space, x, y): + a = float(x) + b = float(y) + if b == 0.0: + raise oefmt(space.w_ZeroDivisionError, "division by zero") + return space.wrap(a / b) + + +def _mod(space, x, y): + try: + z = ovfcheck(x % y) + except ZeroDivisionError: + raise oefmt(space.w_ZeroDivisionError, "integer modulo by zero") + return wrapint(space, z) + + +def _divmod(space, x, y): + try: + z = ovfcheck(x // y) + except ZeroDivisionError: + raise oefmt(space.w_ZeroDivisionError, "integer divmod by zero") + # no overflow possible + m = x % y + w = space.wrap + return space.newtuple([w(z), w(m)]) + + +def _lshift(space, a, b): + if r_uint(b) < LONG_BIT: # 0 <= b < LONG_BIT + c = ovfcheck(a << b) + return wrapint(space, c) + if b < 0: + raise oefmt(space.w_ValueError, "negative shift count") + # b >= LONG_BIT + if a == 0: + return wrapint(space, a) + raise OverflowError + + +def _rshift(space, a, b): + if r_uint(b) >= LONG_BIT: # not (0 <= b < LONG_BIT) + if b < 0: + raise oefmt(space.w_ValueError, "negative shift count") + # b >= LONG_BIT + if a == 0: + return wrapint(space, a) + a = -1 if a < 0 else 0 + else: + a = a >> b + return wrapint(space, a) + + +class IntMethods(object): + + def descr_coerce(self, space, w_other): if not isinstance(w_other, W_AbstractIntObject): return space.w_NotImplemented return space.newtuple([self, w_other]) def descr_long(self, space): - """x.__long__() <==> long(x)""" from pypy.objspace.std.longobject import W_LongObject return W_LongObject.fromint(space, self.int_w(space)) def descr_hash(self, space): - """x.__hash__() <==> hash(x)""" # unlike CPython, we don't special-case the value -1 in most of # our hash functions, so there is not much sense special-casing # it here either. Make sure this is consistent with the hash of @@ -57,22 +227,17 @@ return self.int(space) def descr_nonzero(self, space): - """x.__nonzero__() <==> x != 0""" - return space.newbool(space.int_w(self) != 0) + return space.newbool(self.int_w(space) != 0) def descr_invert(self, space): - """x.__invert__() <==> ~x""" - return wrapint(space, ~space.int_w(self)) + return wrapint(space, ~self.int_w(space)) def descr_pos(self, space): - """x.__pos__() <==> +x""" return self.int(space) - descr_trunc = func_with_new_name(descr_pos, 'descr_trunc') - descr_trunc.__doc__ = 'Truncating an Integral returns itself.' + descr_trunc = descr_pos # XX: descr_index/conjugate def descr_neg(self, space): - """x.__neg__() <==> -x""" - a = space.int_w(self) + a = self.int_w(space) try: x = ovfcheck(-a) except OverflowError: @@ -83,45 +248,31 @@ return wrapint(space, x) def descr_abs(self, space): - """x.__abs__() <==> abs(x)""" - pos = space.int_w(self) >= 0 + pos = self.int_w(space) >= 0 return self.int(space) if pos else self.descr_neg(space) def descr_index(self, space): - """x[y:z] <==> x[y.__index__():z.__index__()]""" return self.int(space) def descr_float(self, space): - """x.__float__() <==> float(x)""" - a = space.int_w(self) + a = self.int_w(space) x = float(a) return space.newfloat(x) def descr_oct(self, space): - """x.__oct__() <==> oct(x)""" - return space.wrap(oct(space.int_w(self))) + return space.wrap(oct(self.int_w(space))) def descr_hex(self, space): - """x.__hex__() <==> hex(x)""" - return space.wrap(hex(space.int_w(self))) + return space.wrap(hex(self.int_w(space))) def descr_getnewargs(self, space): - return space.newtuple([wrapint(space, space.int_w(self))]) + return space.newtuple([wrapint(space, self.int_w(space))]) def descr_conjugate(self, space): - """Returns self, the complex conjugate of any int.""" - return space.int(self) + return self.int(space) def descr_bit_length(self, space): - """int.bit_length() -> int - - Number of bits necessary to represent self in binary. - >>> bin(37) - '0b100101' - >>> (37).bit_length() - 6 - """ - val = space.int_w(self) + val = self.int_w(space) if val < 0: val = -val bits = 0 @@ -131,35 +282,24 @@ return space.wrap(bits) def descr_repr(self, space): - """x.__repr__() <==> repr(x)""" res = str(self.int_w(space)) return space.wrap(res) - descr_str = func_with_new_name(descr_repr, 'descr_str') - descr_str.__doc__ = "x.__str__() <==> str(x)" + descr_str = descr_repr def descr_format(self, space, w_format_spec): return newformat.run_formatter(space, w_format_spec, "format_int_or_long", self, newformat.INT_KIND) - def descr_get_denominator(self, space): - return space.wrap(1) - - def descr_get_imag(self, space): - return space.wrap(0) - - descr_get_numerator = descr_get_real = descr_conjugate - @unwrap_spec(w_modulus=WrappedDefault(None)) def descr_pow(self, space, w_exponent, w_modulus=None): - """x.__pow__(y[, z]) <==> pow(x, y[, z])""" if not isinstance(w_exponent, W_AbstractIntObject): return space.w_NotImplemented if space.is_none(w_modulus): z = 0 elif isinstance(w_modulus, W_AbstractIntObject): - z = space.int_w(w_modulus) + z = w_modulus.int_w(space) if z == 0: raise oefmt(space.w_ValueError, "pow() 3rd argument cannot be 0") @@ -169,8 +309,8 @@ # handle it ourselves return self._ovfpow2long(space, w_exponent, w_modulus) - x = space.int_w(self) - y = space.int_w(w_exponent) + x = self.int_w(space) + y = w_exponent.int_w(space) try: result = _pow_impl(space, x, y, z) except (OverflowError, ValueError): @@ -179,7 +319,6 @@ @unwrap_spec(w_modulus=WrappedDefault(None)) def descr_rpow(self, space, w_base, w_modulus=None): - """y.__rpow__(x[, z]) <==> pow(x, y[, z])""" if not isinstance(w_base, W_AbstractIntObject): return space.w_NotImplemented return w_base.descr_pow(space, self, w_modulus) @@ -195,12 +334,14 @@ op = getattr(operator, opname) @func_renamer('descr_' + opname) def descr_cmp(self, space, w_other): - if not isinstance(w_other, W_AbstractIntObject): + i = self.int_w(space) + if isinstance(w_other, W_IntObject): + j = w_other.intval + elif isinstance(w_other, W_AbstractIntObject): + j = w_other.int_w(space) + else: return space.w_NotImplemented - i = space.int_w(self) - j = space.int_w(w_other) return space.newbool(op(i, j)) - descr_cmp.__doc__ = 'x.__%s__(y) <==> x%sy' % (opname, CMP_OPS[opname]) return descr_cmp descr_lt = _make_descr_cmp('lt') @@ -213,17 +354,16 @@ def _make_generic_descr_binop(opname, ovf=True): op = getattr(operator, opname + '_' if opname in ('and', 'or') else opname) - oper = BINARY_OPS.get(opname) - doc = "x.__%s__(y) <==> x%sy" % (opname, oper) - rdoc = "x.__r%s__(y) <==> y%sx" % (opname, oper) @func_renamer('descr_' + opname) def descr_binop(self, space, w_other): - if not isinstance(w_other, W_AbstractIntObject): + x = self.int_w(space) + if isinstance(w_other, W_IntObject): + y = w_other.intval + elif isinstance(w_other, W_AbstractIntObject): + y = w_other.int_w(space) + else: return space.w_NotImplemented - - x = space.int_w(self) - y = space.int_w(w_other) if ovf: try: z = ovfcheck(op(x, y)) @@ -232,27 +372,28 @@ else: z = op(x, y) return wrapint(space, z) - descr_binop.__doc__ = doc if opname in COMMUTATIVE_OPS: - descr_rbinop = func_with_new_name(descr_binop, 'descr_r' + opname) - else: - @func_renamer('descr_r' + opname) - def descr_rbinop(self, space, w_other): - if not isinstance(w_other, W_AbstractIntObject): - return space.w_NotImplemented + return descr_binop, func_with_new_name(descr_binop, + 'descr_r' + opname) - x = space.int_w(self) - y = space.int_w(w_other) - if ovf: - try: - z = ovfcheck(op(y, x)) - except OverflowError: - return _ovf2long(space, opname, w_other, self) - else: - z = op(y, x) - return wrapint(space, z) - descr_rbinop.__doc__ = rdoc + @func_renamer('descr_r' + opname) + def descr_rbinop(self, space, w_other): + x = self.int_w(space) + if isinstance(w_other, W_IntObject): + y = w_other.intval + elif isinstance(w_other, W_AbstractIntObject): + y = w_other.int_w(space) + else: + return space.w_NotImplemented + if ovf: + try: + z = ovfcheck(op(y, x)) + except OverflowError: + return _ovf2long(space, opname, w_other, self) + else: + z = op(y, x) + return wrapint(space, z) return descr_binop, descr_rbinop @@ -266,116 +407,53 @@ def _make_descr_binop(func, ovf=True): opname = func.__name__[1:] - oper = BINARY_OPS.get(opname) - if oper == '%': - oper = '%%' - oper = '%s(%%s, %%s)' % opname if not oper else '%%s%s%%s' % oper @func_renamer('descr_' + opname) def descr_binop(self, space, w_other): - if not isinstance(w_other, W_AbstractIntObject): + x = self.int_w(space) + if isinstance(w_other, W_IntObject): + y = w_other.intval + elif isinstance(w_other, W_AbstractIntObject): + y = w_other.int_w(space) + else: return space.w_NotImplemented if ovf: try: - return func(self, space, w_other) + return func(space, x, y) except OverflowError: return _ovf2long(space, opname, self, w_other) else: - return func(self, space, w_other) - descr_binop.__doc__ = "x.__%s__(y) <==> %s" % (opname, - oper % ('x', 'y')) + return func(space, x, y) @func_renamer('descr_r' + opname) def descr_rbinop(self, space, w_other): - if not isinstance(w_other, W_AbstractIntObject): + x = self.int_w(space) + if isinstance(w_other, W_IntObject): + y = w_other.intval + elif isinstance(w_other, W_AbstractIntObject): + y = w_other.int_w(space) + else: return space.w_NotImplemented if ovf: try: - return func(w_other, space, self) + return func(space, y, x) except OverflowError: return _ovf2long(space, opname, w_other, self) else: - return func(w_other, space, self) - descr_rbinop.__doc__ = "x.__r%s__(y) <==> %s" % (opname, - oper % ('y', 'x')) - + return func(space, y, x) return descr_binop, descr_rbinop - def _floordiv(self, space, w_other): - x = space.int_w(self) - y = space.int_w(w_other) - try: - z = ovfcheck(x // y) - except ZeroDivisionError: - raise oefmt(space.w_ZeroDivisionError, "integer division by zero") - return wrapint(space, z) descr_floordiv, descr_rfloordiv = _make_descr_binop(_floordiv) - - _div = func_with_new_name(_floordiv, '_div') descr_div, descr_rdiv = _make_descr_binop(_div) - - def _truediv(self, space, w_other): - x = float(space.int_w(self)) - y = float(space.int_w(w_other)) - if y == 0.0: - raise oefmt(space.w_ZeroDivisionError, "division by zero") - return space.wrap(x / y) descr_truediv, descr_rtruediv = _make_descr_binop(_truediv, ovf=False) - - def _mod(self, space, w_other): - x = space.int_w(self) - y = space.int_w(w_other) - try: - z = ovfcheck(x % y) - except ZeroDivisionError: - raise oefmt(space.w_ZeroDivisionError, "integer modulo by zero") - return wrapint(space, z) descr_mod, descr_rmod = _make_descr_binop(_mod) - - def _divmod(self, space, w_other): - x = space.int_w(self) - y = space.int_w(w_other) - try: - z = ovfcheck(x // y) - except ZeroDivisionError: - raise oefmt(space.w_ZeroDivisionError, "integer divmod by zero") - # no overflow possible - m = x % y - w = space.wrap - return space.newtuple([w(z), w(m)]) descr_divmod, descr_rdivmod = _make_descr_binop(_divmod) - - def _lshift(self, space, w_other): - a = space.int_w(self) - b = space.int_w(w_other) - if r_uint(b) < LONG_BIT: # 0 <= b < LONG_BIT - c = ovfcheck(a << b) - return wrapint(space, c) - if b < 0: - raise oefmt(space.w_ValueError, "negative shift count") - # b >= LONG_BIT - if a == 0: - return self.int(space) - raise OverflowError descr_lshift, descr_rlshift = _make_descr_binop(_lshift) - - def _rshift(self, space, w_other): - a = space.int_w(self) - b = space.int_w(w_other) - if r_uint(b) >= LONG_BIT: # not (0 <= b < LONG_BIT) - if b < 0: - raise oefmt(space.w_ValueError, "negative shift count") - # b >= LONG_BIT - if a == 0: - return self.int(space) - a = -1 if a < 0 else 0 - else: - a = a >> b - return wrapint(space, a) descr_rshift, descr_rrshift = _make_descr_binop(_rshift, ovf=False) class W_IntObject(W_AbstractIntObject): + import_from_mixin(IntMethods) __slots__ = 'intval' _immutable_fields_ = ['intval'] @@ -393,7 +471,7 @@ return False if self.user_overridden_class or w_other.user_overridden_class: return self is w_other - return space.int_w(self) == space.int_w(w_other) + return self.int_w(space) == w_other.int_w(space) def immutable_unique_id(self, space): if self.user_overridden_class: @@ -634,8 +712,8 @@ W_AbstractIntObject.descr_get_imag, doc="the imaginary part of a complex number"), - __repr__ = interp2app(W_AbstractIntObject.descr_repr), - __str__ = interp2app(W_AbstractIntObject.descr_str), + __repr__ = interpindirect2app(W_AbstractIntObject.descr_repr), + __str__ = interpindirect2app(W_AbstractIntObject.descr_str), conjugate = interpindirect2app(W_AbstractIntObject.descr_conjugate), bit_length = interpindirect2app(W_AbstractIntObject.descr_bit_length), From noreply at buildbot.pypy.org Fri Feb 14 22:07:41 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 14 Feb 2014 22:07:41 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: Fixes, and issues Message-ID: <20140214210741.2A6E91C0178@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r739:8f59f0e3f05b Date: 2014-02-14 22:07 +0100 http://bitbucket.org/pypy/stmgc/changeset/8f59f0e3f05b/ Log: Fixes, and issues diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -20,7 +20,7 @@ lock on the same object. */ /* By construction it should not be possible that the owner - of the object is precisely us */ + of the object is already us */ assert(current_lock_owner != STM_PSEGMENT->write_lock_num); /* Who should abort here: this thread, or the other thread? */ @@ -31,6 +31,8 @@ if ((STM_PSEGMENT->approximate_start_time < other_pseg->approximate_start_time) || is_inevitable()) { /* we are the thread that must succeed */ + XXX /* don't go here if the other thread is inevitable! */ + ... other_pseg->need_abort = 1; _stm_start_safe_point(0); /* XXX: not good, maybe should be signalled by other thread */ @@ -68,6 +70,10 @@ pages_privatize(((uintptr_t)obj) / 4096UL, 1); } + /* do a read-barrier *before* the safepoints that may be issued in + contention_management() */ + stm_read(obj); + /* claim the write-lock for this object */ do { uintptr_t lock_idx = (((uintptr_t)obj) >> 4) - READMARKER_START; @@ -86,7 +92,6 @@ /* add the write-barrier-already-called flag ONLY if we succeeded in getting the write-lock */ - stm_read(obj); obj->stm_flags |= GCFLAG_WRITE_BARRIER_CALLED; LIST_APPEND(STM_PSEGMENT->modified_objects, obj); } @@ -135,9 +140,53 @@ } +static void push_modified_to_other_threads() +{ + long remote_num = 1 - STM_SEGMENT->segment_num; + char *local_base = STM_SEGMENT->segment_base; + char *remote_base = get_segment_base(remote_num); + bool conflicted = false; + uint8_t remote_version = get_segment(remote_num)->transaction_read_version; + + LIST_FOREACH_R( + STM_PSEGMENT->modified_objects, + object_t * /*item*/, + ({ + if (!conflicted) + conflicted = was_read_remote(remote_base, item, + remote_version); + + /* clear the write-lock */ + uintptr_t lock_idx = (((uintptr_t)item) >> 4) - READMARKER_START; + assert(write_locks[lock_idx] == _STM_TL->thread_num + 1); + write_locks[lock_idx] = 0; + + _stm_move_object(item, + REAL_ADDRESS(local_base, item), + REAL_ADDRESS(remote_base, item)); + })); + + list_clear(STM_PSEGMENT->modified_objects); + + if (conflicted) { + struct _thread_local1_s *remote_TL = (struct _thread_local1_s *) + REAL_ADDRESS(remote_base, _STM_TL); + remote_TL->need_abort = 1; + } +} + void stm_commit_transaction(void) { stm_thread_local_t *tl = STM_SEGMENT->running_thread; + + /* cannot abort any more */ + STM_SEGMENT->jmpbuf_ptr = NULL; + + ... + + /* copy modified object versions to other threads */ + push_modified_to_other_threads(); + release_thread_segment(tl); reset_all_creation_markers(); } @@ -146,6 +195,7 @@ { stm_thread_local_t *tl = STM_SEGMENT->running_thread; stm_jmpbuf_t *jmpbuf_ptr = STM_SEGMENT->jmpbuf_ptr; + STM_SEGMENT->need_abort = 0; release_thread_segment(tl); reset_all_creation_markers(); diff --git a/c7/stm/misc.c b/c7/stm/misc.c --- a/c7/stm/misc.c +++ b/c7/stm/misc.c @@ -46,3 +46,11 @@ return !!((((stm_creation_marker_t *)(((uintptr_t)obj) >> 8))->cm | obj->stm_flags) & _STM_GCFLAG_WRITE_BARRIER_CALLED); } + +static inline bool was_read_remote(char *base, object_t *obj, + uint8_t other_transaction_read_version) +{ + struct read_marker_s *marker = (struct read_marker_s *) + (base + (((uintptr_t)obj) >> 4)); + return (marker->rm == other_transaction_read_version); +} diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -71,11 +71,14 @@ We reset the markers 8 at a time, by writing null integers until we reach a place that is already null. */ - LIST_FOREACH_R(STM_PSEGMENT->creation_markers, uintptr_t, ({ - uint64_t *p = (uint64_t *)(item & ~7); - while (*p != 0) - *p++ = 0; - })); + LIST_FOREACH_R( + STM_PSEGMENT->creation_markers, + uintptr_t /*item*/, + ({ + uint64_t *p = (uint64_t *)(item & ~7); + while (*p != 0) + *p++ = 0; + })); list_clear(STM_PSEGMENT->creation_markers); } diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -130,10 +130,12 @@ }; /* The read barrier must be called whenever the object 'obj' is read. - It is not required to call it before reading: it can be called - during or after too, as long as we are in the same transaction. - If we might have finished the transaction and started the next - one, then stm_read() needs to be called again. + It is not required to call it before reading: it can be delayed for a + bit, but we must still be in the same "scope": no allocation, no + transaction commit, nothing that can potentially collect or do a safe + point (like stm_write() on a different object). Also, if we might + have finished the transaction and started the next one, then + stm_read() needs to be called again. */ static inline void stm_read(object_t *obj) { @@ -150,7 +152,7 @@ static inline void stm_write(object_t *obj) { /* this is: - 'if (ct == 0 && (stm_flags & WRITE_BARRIER_CALLED) == 0)' + 'if (cm == 0 && (stm_flags & WRITE_BARRIER_CALLED) == 0)' assuming that 'cm' is either 0 (not created in current transaction) or 0xff (created in current transaction) */ if (UNLIKELY(!((((stm_creation_marker_t *)(((uintptr_t)obj) >> 8))->cm | From noreply at buildbot.pypy.org Sat Feb 15 00:23:45 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 15 Feb 2014 00:23:45 +0100 (CET) Subject: [pypy-commit] pypy remove-intlong-smm: merge default Message-ID: <20140214232345.46EC11C1154@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: remove-intlong-smm Changeset: r69141:3f9dc297088e Date: 2014-02-14 15:22 -0800 http://bitbucket.org/pypy/pypy/changeset/3f9dc297088e/ Log: merge default diff --git a/include/PyPy.h b/include/PyPy.h new file mode 100644 --- /dev/null +++ b/include/PyPy.h @@ -0,0 +1,54 @@ +#ifndef _PYPY_H_ +#define _PYPY_H_ + +/* This header is meant to be included in programs that use PyPy as an + embedded library. */ + +#ifdef __cplusplus +extern "C" { +#endif + + +/* You should call this first once. */ +void rpython_startup_code(void); + + +/* Initialize the home directory of PyPy. It is necessary to call this. + + Call it with "home" being the file name of the libpypy.so, for + example; it will be used as a starting point when searching for the + lib-python and lib_pypy directories. They are searched from + "home/..", "home/../..", etc. Returns 0 if everything was fine. If + an error occurs, returns 1 and (if verbose != 0) prints some + information to stderr. + */ +int pypy_setup_home(char *home, int verbose); + + +/* If your program has multiple threads, then you need to call + pypy_init_threads() once at init time, and then pypy_thread_attach() + once in each other thread that just started and in which you want to + run Python code (including via callbacks, see below). + */ +void pypy_init_threads(void); +void pypy_thread_attach(void); + + +/* The main entry point: executes "source" as plain Python code. + Returns 0 if everything was fine. If a Python exception is + uncaught, it is printed to stderr and 1 is returned. + + Usually, the Python code from "source" should use cffi to fill in + global variables of "function pointer" type in your program. Use + cffi callbacks to do so. Once it is done, there is no need to call + pypy_execute_source() any more: from C, you call directly the + functions (which are "callbacks" from the point of view of Python). + */ +int pypy_execute_source(char *source); + + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -103,8 +103,7 @@ .. _`extension modules`: cpython_differences.html#extension-modules .. _`cpython differences`: cpython_differences.html -.. _`compatibility wiki`: -.. https://bitbucket.org/pypy/compatibility/wiki/Home +.. _`compatibility wiki`: https://bitbucket.org/pypy/compatibility/wiki/Home .. _cffi: http://cffi.readthedocs.org/ --------------------------------- diff --git a/pypy/doc/project-ideas.rst b/pypy/doc/project-ideas.rst --- a/pypy/doc/project-ideas.rst +++ b/pypy/doc/project-ideas.rst @@ -142,32 +142,17 @@ * `hg` -Experiment (again) with LLVM backend for RPython compilation ------------------------------------------------------------- - -We already tried working with LLVM and at the time, LLVM was not mature enough -for our needs. It's possible that this has changed, reviving the LLVM backend -(or writing new from scratch) for static compilation would be a good project. - -(On the other hand, just generating C code and using clang might be enough. -The issue with that is the so-called "asmgcc GC root finder", which has tons -of issues of this own. In my opinion (arigo), it would be definitely a -better project to try to optimize the alternative, the "shadowstack" GC root -finder, which is nicely portable. So far it gives a pypy that is around -7% slower.) - Embedding PyPy ---------------------------------------- Note: there is a basic proof-of-concept for that as a `uwsgi pypy plugin`_ Being able to embed PyPy, say with its own limited C API, would be -useful. But here is the most interesting variant, straight from -EuroPython live discussion :-) We can have a generic "libpypy.so" that -can be used as a placeholder dynamic library, and when it gets loaded, -it runs a .py module that installs (via ctypes) the interface it wants -exported. This would give us a one-size-fits-all generic .so file to be -imported by any application that wants to load .so files :-) +useful. But there is a possibly better variant: use CFFI. With some +minimal tools atop CFFI, it would be possible to write a pure Python +library, and then compile automatically from it an .so/.dll file that is +a dynamic-link library with whatever C API we want. This gives us a +one-size-fits-all generic way to make .so/.dll files from Python. .. _`uwsgi pypy plugin`: http://uwsgi-docs.readthedocs.org/en/latest/PyPy.html diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -57,3 +57,13 @@ mapdicts keep track of whether or not an attribute is every assigned to multiple times. If it's only assigned once then an elidable lookup is used when possible. + +.. branch: precompiled-headers +Create a Makefile using precompiled headers for MSVC platforms. +The downside is a messy nmake-compatible Makefile. Since gcc shows minimal +speedup, it was not implemented. + +.. branch: camelot +With a properly configured 256-color terminal (TERM=...-256color), the +Mandelbrot set shown during translation now uses a range of 50 colours. +Essential! diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -90,9 +90,10 @@ return f """) - @entrypoint('main', [rffi.CCHARP, lltype.Signed], c_name='pypy_setup_home') + @entrypoint('main', [rffi.CCHARP, rffi.INT], c_name='pypy_setup_home') def pypy_setup_home(ll_home, verbose): from pypy.module.sys.initpath import pypy_find_stdlib + verbose = rffi.cast(lltype.Signed, verbose) if ll_home: home = rffi.charp2str(ll_home) else: @@ -120,7 +121,8 @@ @entrypoint('main', [rffi.CCHARP], c_name='pypy_execute_source') def pypy_execute_source(ll_source): source = rffi.charp2str(ll_source) - return _pypy_execute_source(source) + res = _pypy_execute_source(source) + return rffi.cast(rffi.INT, res) @entrypoint('main', [], c_name='pypy_init_threads') def pypy_init_threads(): diff --git a/pypy/interpreter/test/test_targetpypy.py b/pypy/interpreter/test/test_targetpypy.py --- a/pypy/interpreter/test/test_targetpypy.py +++ b/pypy/interpreter/test/test_targetpypy.py @@ -12,8 +12,10 @@ _, d = create_entry_point(space, None) execute_source = d['pypy_execute_source'] lls = rffi.str2charp("import sys; sys.modules['xyz'] = 3") - execute_source(lls) + res = execute_source(lls) lltype.free(lls, flavor='raw') + assert lltype.typeOf(res) == rffi.INT + assert rffi.cast(lltype.Signed, res) == 0 x = space.int_w(space.getitem(space.getattr(space.builtin_modules['sys'], space.wrap('modules')), space.wrap('xyz'))) @@ -24,5 +26,5 @@ # did not crash - the same globals pypy_setup_home = d['pypy_setup_home'] lls = rffi.str2charp(__file__) - pypy_setup_home(lls, 1) + pypy_setup_home(lls, rffi.cast(rffi.INT, 1)) lltype.free(lls, flavor='raw') diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -81,6 +81,7 @@ 'newdict' : 'interp_dict.newdict', 'dictstrategy' : 'interp_dict.dictstrategy', 'set_debug' : 'interp_magic.set_debug', + 'locals_to_fast' : 'interp_magic.locals_to_fast', } if sys.platform == 'win32': interpleveldefs['get_console_cp'] = 'interp_magic.get_console_cp' diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py --- a/pypy/module/__pypy__/interp_magic.py +++ b/pypy/module/__pypy__/interp_magic.py @@ -1,5 +1,6 @@ from pypy.interpreter.error import OperationError, wrap_oserror from pypy.interpreter.gateway import unwrap_spec +from pypy.interpreter.pyframe import PyFrame from rpython.rlib.objectmodel import we_are_translated from pypy.objspace.std.listobject import W_ListObject from pypy.objspace.std.typeobject import MethodCache @@ -111,3 +112,8 @@ @unwrap_spec(estimate=int) def add_memory_pressure(estimate): rgc.add_memory_pressure(estimate) + + at unwrap_spec(w_frame=PyFrame) +def locals_to_fast(space, w_frame): + assert isinstance(w_frame, PyFrame) + w_frame.locals2fast() diff --git a/pypy/module/__pypy__/test/test_locals2fast.py b/pypy/module/__pypy__/test/test_locals2fast.py new file mode 100644 --- /dev/null +++ b/pypy/module/__pypy__/test/test_locals2fast.py @@ -0,0 +1,81 @@ +# Tests from Fabio Zadrozny + + +class AppTestLocals2Fast: + """ + Test setting locals in one function from another function + using several approaches. + """ + + def setup_class(cls): + cls.w_save_locals = cls.space.appexec([], """(): + import sys + if '__pypy__' in sys.builtin_module_names: + import __pypy__ + save_locals = __pypy__.locals_to_fast + else: + # CPython version + import ctypes + @staticmethod + def save_locals(frame): + ctypes.pythonapi.PyFrame_LocalsToFast( + ctypes.py_object(frame), ctypes.c_int(0)) + return save_locals + """) + + def test_set_locals_using_save_locals(self): + import sys + def use_save_locals(name, value): + frame = sys._getframe().f_back + locals_dict = frame.f_locals + locals_dict[name] = value + self.save_locals(frame) + def test_method(fn): + x = 1 + # The method 'fn' should attempt to set x = 2 in the current frame. + fn('x', 2) + return x + x = test_method(use_save_locals) + assert x == 2 + + def test_frame_simple_change(self): + import sys + frame = sys._getframe() + a = 20 + frame.f_locals['a'] = 50 + self.save_locals(frame) + assert a == 50 + + def test_frame_co_freevars(self): + import sys + outer_var = 20 + def func(): + frame = sys._getframe() + frame.f_locals['outer_var'] = 50 + self.save_locals(frame) + assert outer_var == 50 + func() + + def test_frame_co_cellvars(self): + import sys + def check_co_vars(a): + frame = sys._getframe() + def function2(): + print a + assert 'a' in frame.f_code.co_cellvars + frame = sys._getframe() + frame.f_locals['a'] = 50 + self.save_locals(frame) + assert a == 50 + check_co_vars(1) + + def test_frame_change_in_inner_frame(self): + import sys + def change(f): + assert f is not sys._getframe() + f.f_locals['a'] = 50 + self.save_locals(f) + frame = sys._getframe() + a = 20 + change(frame) + assert a == 50 diff --git a/pypy/module/bz2/interp_bz2.py b/pypy/module/bz2/interp_bz2.py --- a/pypy/module/bz2/interp_bz2.py +++ b/pypy/module/bz2/interp_bz2.py @@ -31,7 +31,7 @@ _compilation_info_ = eci calling_conv = 'c' - CHECK_LIBRARY = platform.Has('dump("x", (int)&BZ2_bzCompress)') + CHECK_LIBRARY = platform.Has('dump("x", (long)&BZ2_bzCompress)') off_t = platform.SimpleType("off_t", rffi.LONGLONG) size_t = platform.SimpleType("size_t", rffi.ULONG) diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -910,6 +910,8 @@ # implement function callbacks and generate function decls functions = [] pypy_decls = [] + pypy_decls.append("#ifndef _PYPY_PYPY_DECL_H\n") + pypy_decls.append("#define _PYPY_PYPY_DECL_H\n") pypy_decls.append("#ifndef PYPY_STANDALONE\n") pypy_decls.append("#ifdef __cplusplus") pypy_decls.append("extern \"C\" {") @@ -953,6 +955,7 @@ pypy_decls.append("}") pypy_decls.append("#endif") pypy_decls.append("#endif /*PYPY_STANDALONE*/\n") + pypy_decls.append("#endif /*_PYPY_PYPY_DECL_H*/\n") pypy_decl_h = udir.join('pypy_decl.h') pypy_decl_h.write('\n'.join(pypy_decls)) diff --git a/pypy/module/cpyext/include/bufferobject.h b/pypy/module/cpyext/include/bufferobject.h --- a/pypy/module/cpyext/include/bufferobject.h +++ b/pypy/module/cpyext/include/bufferobject.h @@ -37,7 +37,7 @@ PyObject* PyBuffer_New(Py_ssize_t size); -void _Py_init_bufferobject(void); +PyTypeObject *_Py_get_buffer_type(void); #ifdef __cplusplus } diff --git a/pypy/module/cpyext/include/pycapsule.h b/pypy/module/cpyext/include/pycapsule.h --- a/pypy/module/cpyext/include/pycapsule.h +++ b/pypy/module/cpyext/include/pycapsule.h @@ -50,7 +50,7 @@ PyAPI_FUNC(void *) PyCapsule_Import(const char *name, int no_block); -void _Py_init_capsule(void); +PyTypeObject *_Py_get_capsule_type(void); #ifdef __cplusplus } diff --git a/pypy/module/cpyext/include/pycobject.h b/pypy/module/cpyext/include/pycobject.h --- a/pypy/module/cpyext/include/pycobject.h +++ b/pypy/module/cpyext/include/pycobject.h @@ -48,7 +48,7 @@ } PyCObject; #endif -void _Py_init_pycobject(void); +PyTypeObject *_Py_get_cobject_type(void); #ifdef __cplusplus } diff --git a/pypy/module/pypyjit/test_pypy_c/test_misc.py b/pypy/module/pypyjit/test_pypy_c/test_misc.py --- a/pypy/module/pypyjit/test_pypy_c/test_misc.py +++ b/pypy/module/pypyjit/test_pypy_c/test_misc.py @@ -333,8 +333,8 @@ loop, = log.loops_by_id("struct") if sys.maxint == 2 ** 63 - 1: extra = """ - i8 = int_lt(i4, -2147483648) - guard_false(i8, descr=...) + i8 = int_ge(i4, -2147483648) + guard_true(i8, descr=...) """ else: extra = "" diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py --- a/pypy/objspace/std/dictmultiobject.py +++ b/pypy/objspace/std/dictmultiobject.py @@ -1,3 +1,10 @@ +"""The builtin dict implementation""" + +from rpython.rlib import jit, rerased +from rpython.rlib.debug import mark_dict_non_null +from rpython.rlib.objectmodel import newlist_hint, r_dict, specialize +from rpython.tool.sourcetools import func_renamer, func_with_new_name + from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import ( @@ -7,18 +14,10 @@ from pypy.objspace.std.stdtypedef import StdTypeDef from pypy.objspace.std.util import negate -from rpython.rlib import jit, rerased -from rpython.rlib.debug import mark_dict_non_null -from rpython.rlib.objectmodel import newlist_hint, r_dict, specialize -from rpython.tool.sourcetools import func_renamer, func_with_new_name - UNROLL_CUTOFF = 5 -def _is_str(space, w_key): - return space.is_w(space.type(w_key), space.w_str) - def _never_equal_to_string(space, w_lookup_type): """Handles the case of a non string key lookup. Types that have a sane hash/eq function should allow us to return True @@ -29,8 +28,8 @@ return (space.is_w(w_lookup_type, space.w_NoneType) or space.is_w(w_lookup_type, space.w_int) or space.is_w(w_lookup_type, space.w_bool) or - space.is_w(w_lookup_type, space.w_float) - ) + space.is_w(w_lookup_type, space.w_float)) + @specialize.call_location() def w_dict_unrolling_heuristic(w_dct): @@ -69,19 +68,18 @@ w_type = space.w_dict storage = strategy.get_empty_storage() - w_self = space.allocate_instance(W_DictMultiObject, w_type) - W_DictMultiObject.__init__(w_self, space, strategy, storage) - return w_self + w_obj = space.allocate_instance(W_DictMultiObject, w_type) + W_DictMultiObject.__init__(w_obj, space, strategy, storage) + return w_obj def __init__(self, space, strategy, storage): self.space = space self.strategy = strategy self.dstorage = storage - def __repr__(w_self): + def __repr__(self): """representation for debugging purposes""" - #print('XXXXXXX', w_self.dstorage) - return "%s(%s)" % (w_self.__class__.__name__, w_self.strategy) + return "%s(%s)" % (self.__class__.__name__, self.strategy) def unwrap(w_dict, space): result = {} @@ -98,9 +96,9 @@ return space.get_and_call_function(w_missing, w_dict, w_key) return None - def initialize_content(w_self, list_pairs_w): + def initialize_content(self, list_pairs_w): for w_k, w_v in list_pairs_w: - w_self.setitem(w_k, w_v) + self.setitem(w_k, w_v) def setitem_str(self, key, w_value): self.strategy.setitem_str(self, key, w_value) @@ -115,7 +113,8 @@ if w_fill is None: w_fill = space.w_None if space.is_w(w_type, space.w_dict): - w_dict = W_DictMultiObject.allocate_and_init_instance(space, w_type) + w_dict = W_DictMultiObject.allocate_and_init_instance(space, + w_type) byteslist = space.listview_bytes(w_keys) if byteslist is not None: @@ -312,8 +311,7 @@ try: w_key, w_value = self.popitem() except KeyError: - raise OperationError(space.w_KeyError, - space.wrap("popitem(): dictionary is empty")) + raise oefmt(space.w_KeyError, "popitem(): dictionary is empty") return space.newtuple([w_key, w_value]) @unwrap_spec(w_default=WrappedDefault(None)) @@ -597,6 +595,7 @@ def getiterkeys(self, w_dict): return iter([None]) getitervalues = getiterkeys + def getiteritems(self, w_dict): return iter([(None, None)]) @@ -615,8 +614,8 @@ space = self.space if self.len != self.dictimplementation.length(): self.len = -1 # Make this error state sticky - msg = "dictionary changed size during iteration" - raise OperationError(space.w_RuntimeError, space.wrap(msg)) + raise oefmt(space.w_RuntimeError, + "dictionary changed size during iteration") # look for the next entry if self.pos < self.len: @@ -635,14 +634,15 @@ w_value = self.dictimplementation.getitem(w_key) if w_value is None: self.len = -1 # Make this error state sticky - msg = "dictionary changed during iteration" - raise OperationError(space.w_RuntimeError, space.wrap(msg)) + raise oefmt(space.w_RuntimeError, + "dictionary changed during iteration") return (w_key, w_value) # no more entries self.dictimplementation = None return EMPTY return func_with_new_name(next, 'next_' + TP) + class BaseIteratorImplementation(object): def __init__(self, space, strategy, implementation): self.space = space @@ -665,13 +665,14 @@ class BaseItemIterator(BaseIteratorImplementation): next_item = _new_next('item') + def create_iterator_classes(dictimpl, override_next_item=None): if not hasattr(dictimpl, 'wrapkey'): - wrapkey = lambda space, key : key + wrapkey = lambda space, key: key else: wrapkey = dictimpl.wrapkey.im_func if not hasattr(dictimpl, 'wrapvalue'): - wrapvalue = lambda space, key : key + wrapvalue = lambda space, key: key else: wrapvalue = dictimpl.wrapvalue.im_func @@ -800,7 +801,8 @@ return w_dict.getitem(w_key) def w_keys(self, w_dict): - l = [self.wrap(key) for key in self.unerase(w_dict.dstorage).iterkeys()] + l = [self.wrap(key) + for key in self.unerase(w_dict.dstorage).iterkeys()] return self.space.newlist(l) def values(self, w_dict): @@ -1036,7 +1038,8 @@ def wrapkey(space, key): return space.wrap(key) - # XXX there is no space.newlist_int yet to implement w_keys more efficiently + # XXX there is no space.newlist_int yet to implement w_keys more + # efficiently create_iterator_classes(IntDictStrategy) @@ -1071,8 +1074,7 @@ for w_pair in data_w: pair = space.fixedview(w_pair) if len(pair) != 2: - raise OperationError(space.w_ValueError, - space.wrap("sequence of pairs expected")) + raise oefmt(space.w_ValueError, "sequence of pairs expected") w_key, w_value = pair w_dict.setitem(w_key, w_value) @@ -1128,9 +1130,9 @@ ignore_for_isinstance_cache = True - def __init__(w_self, space, iteratorimplementation): - w_self.space = space - w_self.iteratorimplementation = iteratorimplementation + def __init__(self, space, iteratorimplementation): + self.space = space + self.iteratorimplementation = iteratorimplementation def descr_iter(self, space): return self @@ -1158,9 +1160,8 @@ new_inst = mod.get('dictiter_surrogate_new') w_typeobj = space.type(self) - raise OperationError( - space.w_TypeError, - space.wrap("can't pickle dictionary-keyiterator objects")) + raise oefmt(space.w_TypeError, + "can't pickle dictionary-keyiterator objects") # XXXXXX get that working again # we cannot call __init__ since we don't have the original dict @@ -1174,8 +1175,8 @@ w_clone = space.allocate_instance(W_DictMultiIterItemsObject, w_typeobj) else: - msg = "unsupported dictiter type '%s' during pickling" % (self,) - raise OperationError(space.w_TypeError, space.wrap(msg)) + raise oefmt(space.w_TypeError, + "unsupported dictiter type '%R' during pickling", self) w_clone.space = space w_clone.content = self.content w_clone.len = self.len @@ -1244,8 +1245,8 @@ # Views class W_DictViewObject(W_Root): - def __init__(w_self, space, w_dict): - w_self.w_dict = w_dict + def __init__(self, space, w_dict): + self.w_dict = w_dict def descr_repr(self, space): w_seq = space.call_function(space.w_list, self) diff --git a/pypy/objspace/std/specialisedtupleobject.py b/pypy/objspace/std/specialisedtupleobject.py --- a/pypy/objspace/std/specialisedtupleobject.py +++ b/pypy/objspace/std/specialisedtupleobject.py @@ -27,11 +27,11 @@ w_obj = values_w[i] val_type = typetuple[i] if val_type == int: - unwrapped = space.int_w(w_obj) + unwrapped = w_obj.int_w(space) elif val_type == float: - unwrapped = space.float_w(w_obj) + unwrapped = w_obj.float_w(space) elif val_type == str: - unwrapped = space.str_w(w_obj) + unwrapped = w_obj.str_w(space) elif val_type == object: unwrapped = w_obj else: @@ -127,16 +127,15 @@ Cls_ff = make_specialised_class((float, float)) def makespecialisedtuple(space, list_w): + from pypy.objspace.std.intobject import W_IntObject + from pypy.objspace.std.floatobject import W_FloatObject if len(list_w) == 2: w_arg1, w_arg2 = list_w - w_type1 = space.type(w_arg1) - if w_type1 is space.w_int: - w_type2 = space.type(w_arg2) - if w_type2 is space.w_int: + if type(w_arg1) is W_IntObject: + if type(w_arg2) is W_IntObject: return Cls_ii(space, w_arg1, w_arg2) - elif w_type1 is space.w_float: - w_type2 = space.type(w_arg2) - if w_type2 is space.w_float: + elif type(w_arg1) is W_FloatObject: + if type(w_arg2) is W_FloatObject: return Cls_ff(space, w_arg1, w_arg2) return Cls_oo(space, w_arg1, w_arg2) else: diff --git a/pypy/objspace/std/test/test_specialisedtupleobject.py b/pypy/objspace/std/test/test_specialisedtupleobject.py --- a/pypy/objspace/std/test/test_specialisedtupleobject.py +++ b/pypy/objspace/std/test/test_specialisedtupleobject.py @@ -214,6 +214,14 @@ assert a == (1, 2.2,) + b assert not a != (1, 2.2) + b + def test_subclasses(self): + class I(int): pass + class F(float): pass + t = (I(42), I(43)) + assert type(t[0]) is I + t = (F(42), F(43)) + assert type(t[0]) is F + class AppTestAll(test_tupleobject.AppTestW_TupleObject): spaceconfig = {"objspace.std.withspecialisedtuple": True} diff --git a/rpython/annotator/annrpython.py b/rpython/annotator/annrpython.py --- a/rpython/annotator/annrpython.py +++ b/rpython/annotator/annrpython.py @@ -582,18 +582,18 @@ def consider_op(self, block, opindex): op = block.operations[opindex] - argcells = [self.binding(a) for a in op.args] + try: + argcells = [self.binding(a) for a in op.args] - # let's be careful about avoiding propagated SomeImpossibleValues - # to enter an op; the latter can result in violations of the - # more general results invariant: e.g. if SomeImpossibleValue enters is_ - # is_(SomeImpossibleValue, None) -> SomeBool - # is_(SomeInstance(not None), None) -> SomeBool(const=False) ... - # boom -- in the assert of setbinding() - for arg in argcells: - if isinstance(arg, annmodel.SomeImpossibleValue): - raise BlockedInference(self, op, opindex) - try: + # let's be careful about avoiding propagated SomeImpossibleValues + # to enter an op; the latter can result in violations of the + # more general results invariant: e.g. if SomeImpossibleValue enters is_ + # is_(SomeImpossibleValue, None) -> SomeBool + # is_(SomeInstance(not None), None) -> SomeBool(const=False) ... + # boom -- in the assert of setbinding() + for arg in argcells: + if isinstance(arg, annmodel.SomeImpossibleValue): + raise BlockedInference(self, op, opindex) resultcell = op.consider(self, *argcells) except annmodel.AnnotatorError as e: # note that UnionError is a subclass graph = self.bookkeeper.position_key[0] diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -4139,6 +4139,16 @@ a.build_types(f, [str]) assert ("Cannot prove that the object is callable" in exc.value.msg) + def test_UnionError_on_PBC(self): + l = ['a', 1] + def f(x): + l.append(x) + a = self.RPythonAnnotator() + with py.test.raises(annmodel.UnionError) as excinfo: + a.build_types(f, [int]) + assert 'Happened at file' in excinfo.value.source + assert 'Known variable annotations:' in excinfo.value.source + def test_str_format_error(self): def f(s, x): return s.format(x) diff --git a/rpython/jit/codewriter/longlong.py b/rpython/jit/codewriter/longlong.py --- a/rpython/jit/codewriter/longlong.py +++ b/rpython/jit/codewriter/longlong.py @@ -26,6 +26,7 @@ getrealfloat = lambda x: x gethash = compute_hash gethash_fast = longlong2float.float2longlong + extract_bits = longlong2float.float2longlong is_longlong = lambda TYPE: False # ------------------------------------- @@ -42,6 +43,7 @@ getrealfloat = longlong2float.longlong2float gethash = lambda xll: rarithmetic.intmask(xll - (xll >> 32)) gethash_fast = gethash + extract_bits = lambda x: x is_longlong = lambda TYPE: (TYPE is lltype.SignedLongLong or TYPE is lltype.UnsignedLongLong) diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -275,7 +275,12 @@ def same_constant(self, other): if isinstance(other, ConstFloat): - return self.value == other.value + # careful in this comparison: if self.value and other.value + # are both NaN, stored as regular floats (i.e. on 64-bit), + # then just using "==" would say False: two NaNs are always + # different from each other. + return (longlong.extract_bits(self.value) == + longlong.extract_bits(other.value)) return False def nonnull(self): diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -594,11 +594,9 @@ if tobox is not None: # sanity check: see whether the current struct value # corresponds to what the cache thinks the value is - # XXX pypy with the following check fails on micronumpy, - # XXX investigate - #resbox = executor.execute(self.metainterp.cpu, self.metainterp, - # rop.GETFIELD_GC, fielddescr, box) - #assert resbox.constbox().same_constant(tobox.constbox()) + resbox = executor.execute(self.metainterp.cpu, self.metainterp, + rop.GETFIELD_GC, fielddescr, box) + assert resbox.constbox().same_constant(tobox.constbox()) return tobox resbox = self.execute_with_descr(opnum, fielddescr, box) self.metainterp.heapcache.getfield_now_known(box, fielddescr, resbox) diff --git a/rpython/jit/metainterp/test/test_history.py b/rpython/jit/metainterp/test/test_history.py --- a/rpython/jit/metainterp/test/test_history.py +++ b/rpython/jit/metainterp/test/test_history.py @@ -1,5 +1,8 @@ from rpython.jit.metainterp.history import * from rpython.rtyper.lltypesystem import lltype, llmemory, rffi +from rpython.rlib.rfloat import NAN, INFINITY +from rpython.jit.codewriter import longlong +from rpython.translator.c.test.test_standalone import StandaloneTests def test_repr(): @@ -38,3 +41,36 @@ assert not c3a.same_constant(c1b) assert not c3a.same_constant(c2b) assert c3a.same_constant(c3b) + +def test_same_constant_float(): + c1 = Const._new(12.34) + c2 = Const._new(12.34) + c3 = Const._new(NAN) + c4 = Const._new(NAN) + c5 = Const._new(INFINITY) + c6 = Const._new(INFINITY) + assert c1.same_constant(c2) + assert c3.same_constant(c4) + assert c5.same_constant(c6) + assert not c1.same_constant(c4) + assert not c1.same_constant(c6) + assert not c3.same_constant(c2) + assert not c3.same_constant(c6) + assert not c5.same_constant(c2) + assert not c5.same_constant(c4) + + +class TestZTranslated(StandaloneTests): + def test_ztranslated_same_constant_float(self): + def fn(args): + n = INFINITY + c1 = ConstFloat(longlong.getfloatstorage(n - INFINITY)) + c2 = ConstFloat(longlong.getfloatstorage(n - INFINITY)) + c3 = ConstFloat(longlong.getfloatstorage(12.34)) + if c1.same_constant(c2): + print "ok!" + return 0 + + t, cbuilder = self.compile(fn) + data = cbuilder.cmdexec('') + assert "ok!\n" in data diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py --- a/rpython/memory/gc/incminimark.py +++ b/rpython/memory/gc/incminimark.py @@ -1837,6 +1837,11 @@ # if self.objects_with_finalizers.non_empty(): self.deal_with_objects_with_finalizers() + elif self.old_objects_with_weakrefs.non_empty(): + # Weakref support: clear the weak pointers to dying objects + # (if we call deal_with_objects_with_finalizers(), it will + # invoke invalidate_old_weakrefs() itself directly) + self.invalidate_old_weakrefs() ll_assert(not self.objects_to_trace.non_empty(), "objects_to_trace should be empty") @@ -1846,9 +1851,7 @@ self.more_objects_to_trace.delete() # - # Weakref support: clear the weak pointers to dying objects - if self.old_objects_with_weakrefs.non_empty(): - self.invalidate_old_weakrefs() + # Light finalizers if self.old_objects_with_light_finalizers.non_empty(): self.deal_with_old_objects_with_finalizers() #objects_to_trace processed fully, can move on to sweeping @@ -2206,6 +2209,12 @@ self._recursively_bump_finalization_state_from_2_to_3(y) self._recursively_bump_finalization_state_from_1_to_2(x) + # Clear the weak pointers to dying objects. Also clears them if + # they point to objects which have the GCFLAG_FINALIZATION_ORDERING + # bit set here. These are objects which will be added to + # run_finalizers(). + self.invalidate_old_weakrefs() + while marked.non_empty(): x = marked.popleft() state = self._finalization_state(x) @@ -2333,7 +2342,9 @@ ll_assert((self.header(pointing_to).tid & GCFLAG_NO_HEAP_PTRS) == 0, "registered old weakref should not " "point to a NO_HEAP_PTRS obj") - if self.header(pointing_to).tid & GCFLAG_VISITED: + tid = self.header(pointing_to).tid + if ((tid & (GCFLAG_VISITED | GCFLAG_FINALIZATION_ORDERING)) == + GCFLAG_VISITED): new_with_weakref.append(obj) else: (obj + offset).address[0] = llmemory.NULL diff --git a/rpython/memory/test/gc_test_base.py b/rpython/memory/test/gc_test_base.py --- a/rpython/memory/test/gc_test_base.py +++ b/rpython/memory/test/gc_test_base.py @@ -29,6 +29,7 @@ GC_CAN_SHRINK_ARRAY = False GC_CAN_SHRINK_BIG_ARRAY = False BUT_HOW_BIG_IS_A_BIG_STRING = 3*WORD + WREF_IS_INVALID_BEFORE_DEL_IS_CALLED = False def setup_class(cls): cls._saved_logstate = py.log._getstate() @@ -370,15 +371,23 @@ class A(object): count = 0 a = A() + expected_invalid = self.WREF_IS_INVALID_BEFORE_DEL_IS_CALLED class B(object): def __del__(self): # when __del__ is called, the weakref to myself is still valid - # in RPython (at least with most GCs; this test might be - # skipped for specific GCs) - if self.ref() is self: - a.count += 10 # ok + # in RPython with most GCs. However, this can lead to strange + # bugs with incminimark. https://bugs.pypy.org/issue1687 + # So with incminimark, we expect the opposite. + if expected_invalid: + if self.ref() is None: + a.count += 10 # ok + else: + a.count = 666 # not ok else: - a.count = 666 # not ok + if self.ref() is self: + a.count += 10 # ok + else: + a.count = 666 # not ok def g(): b = B() ref = weakref.ref(b) diff --git a/rpython/memory/test/test_incminimark_gc.py b/rpython/memory/test/test_incminimark_gc.py --- a/rpython/memory/test/test_incminimark_gc.py +++ b/rpython/memory/test/test_incminimark_gc.py @@ -1,6 +1,38 @@ -from rpython.rlib.rarithmetic import LONG_BIT +from rpython.rtyper.lltypesystem import lltype +from rpython.rtyper.lltypesystem.lloperation import llop from rpython.memory.test import test_minimark_gc class TestIncrementalMiniMarkGC(test_minimark_gc.TestMiniMarkGC): from rpython.memory.gc.incminimark import IncrementalMiniMarkGC as GCClass + WREF_IS_INVALID_BEFORE_DEL_IS_CALLED = True + + def test_weakref_not_in_stack(self): + import weakref + class A(object): + pass + class B(object): + def __init__(self, next): + self.next = next + def g(): + a = A() + a.x = 5 + wr = weakref.ref(a) + llop.gc__collect(lltype.Void) # make everything old + assert wr() is not None + assert a.x == 5 + return wr + def f(): + ref = g() + llop.gc__collect(lltype.Void, 1) # start a major cycle + # at this point the stack is scanned, and the weakref points + # to an object not found, but still reachable: + b = ref() + llop.debug_print(lltype.Void, b) + assert b is not None + llop.gc__collect(lltype.Void) # finish the major cycle + # assert does not crash, because 'b' is still kept alive + b.x = 42 + return ref() is b + res = self.interpret(f, []) + assert res == True diff --git a/rpython/rlib/objectmodel.py b/rpython/rlib/objectmodel.py --- a/rpython/rlib/objectmodel.py +++ b/rpython/rlib/objectmodel.py @@ -338,9 +338,10 @@ _about_ = newlist_hint def compute_result_annotation(self, s_sizehint): - from rpython.annotator.model import SomeInteger + from rpython.annotator.model import SomeInteger, AnnotatorError - assert isinstance(s_sizehint, SomeInteger) + if not isinstance(s_sizehint, SomeInteger): + raise AnnotatorError("newlist_hint() argument must be an int") s_l = self.bookkeeper.newlist() s_l.listdef.listitem.resize() return s_l @@ -365,8 +366,13 @@ def compute_result_annotation(self, s_l, s_sizehint): from rpython.annotator import model as annmodel - assert isinstance(s_l, annmodel.SomeList) - assert isinstance(s_sizehint, annmodel.SomeInteger) + if annmodel.s_None.contains(s_l): + return # first argument is only None so far, but we + # expect a generalization later + if not isinstance(s_l, annmodel.SomeList): + raise annmodel.AnnotatorError("First argument must be a list") + if not isinstance(s_sizehint, annmodel.SomeInteger): + raise annmodel.AnnotatorError("Second argument must be an integer") s_l.listdef.listitem.resize() def specialize_call(self, hop): diff --git a/rpython/rlib/rfile.py b/rpython/rlib/rfile.py --- a/rpython/rlib/rfile.py +++ b/rpython/rlib/rfile.py @@ -29,9 +29,9 @@ OFF_T = CC['off_t'] c_open = llexternal('fopen', [rffi.CCHARP, rffi.CCHARP], lltype.Ptr(FILE)) c_close = llexternal('fclose', [lltype.Ptr(FILE)], rffi.INT) -c_write = llexternal('fwrite', [rffi.CCHARP, rffi.SIZE_T, rffi.SIZE_T, +c_fwrite = llexternal('fwrite', [rffi.CCHARP, rffi.SIZE_T, rffi.SIZE_T, lltype.Ptr(FILE)], rffi.SIZE_T) -c_read = llexternal('fread', [rffi.CCHARP, rffi.SIZE_T, rffi.SIZE_T, +c_fread = llexternal('fread', [rffi.CCHARP, rffi.SIZE_T, rffi.SIZE_T, lltype.Ptr(FILE)], rffi.SIZE_T) c_feof = llexternal('feof', [lltype.Ptr(FILE)], rffi.INT) c_ferror = llexternal('ferror', [lltype.Ptr(FILE)], rffi.INT) @@ -40,13 +40,16 @@ rffi.INT) c_tmpfile = llexternal('tmpfile', [], lltype.Ptr(FILE)) c_fileno = llexternal('fileno', [lltype.Ptr(FILE)], rffi.INT) -c_ftell = llexternal('ftell', [lltype.Ptr(FILE)], lltype.Signed) +c_ftell = llexternal('ftell', [lltype.Ptr(FILE)], rffi.LONG) c_fflush = llexternal('fflush', [lltype.Ptr(FILE)], rffi.INT) c_ftruncate = llexternal('ftruncate', [rffi.INT, OFF_T], rffi.INT, macro=True) c_fgets = llexternal('fgets', [rffi.CCHARP, rffi.INT, lltype.Ptr(FILE)], rffi.CCHARP) +c_popen = llexternal('popen', [rffi.CCHARP, rffi.CCHARP], lltype.Ptr(FILE)) +c_pclose = llexternal('pclose', [lltype.Ptr(FILE)], rffi.INT) + BASE_BUF_SIZE = 4096 BASE_LINE_SIZE = 100 @@ -75,6 +78,21 @@ raise OSError(errno, os.strerror(errno)) return RFile(res) +def create_popen_file(command, type): + ll_command = rffi.str2charp(command) + try: + ll_type = rffi.str2charp(type) + try: + ll_f = c_popen(ll_command, ll_type) + if not ll_f: + errno = rposix.get_errno() + raise OSError(errno, os.strerror(errno)) + finally: + lltype.free(ll_type, flavor='raw') + finally: + lltype.free(ll_command, flavor='raw') + return RPopenFile(ll_f) + class RFile(object): def __init__(self, ll_file): self.ll_file = ll_file @@ -89,30 +107,26 @@ try: # note that since we got a nonmoving buffer, it is either raw # or already cannot move, so the arithmetics below are fine - total_bytes = 0 - ll_current = ll_value - while total_bytes < len(value): - bytes = c_write(ll_current, 1, len(value) - r_uint(total_bytes), - ll_file) - if bytes == 0: - errno = rposix.get_errno() - raise OSError(errno, os.strerror(errno)) - total_bytes += bytes - ll_current = rffi.cast(rffi.CCHARP, - rffi.cast(lltype.Unsigned, ll_value) + - total_bytes) + length = len(value) + bytes = c_fwrite(ll_value, 1, length, ll_file) + if bytes != length: + errno = rposix.get_errno() + raise OSError(errno, os.strerror(errno)) finally: rffi.free_nonmovingbuffer(value, ll_value) def close(self): - if self.ll_file: + ll_f = self.ll_file + if ll_f: # double close is allowed - res = c_close(self.ll_file) self.ll_file = lltype.nullptr(FILE) + res = self._do_close(ll_f) if res == -1: errno = rposix.get_errno() raise OSError(errno, os.strerror(errno)) + _do_close = staticmethod(c_close) # overridden in RPopenFile + def read(self, size=-1): # XXX CPython uses a more delicate logic here ll_file = self.ll_file @@ -124,27 +138,25 @@ try: s = StringBuilder() while True: - returned_size = c_read(buf, 1, BASE_BUF_SIZE, ll_file) + returned_size = c_fread(buf, 1, BASE_BUF_SIZE, ll_file) + returned_size = intmask(returned_size) # is between 0 and BASE_BUF_SIZE if returned_size == 0: if c_feof(ll_file): # ok, finished return s.build() - errno = c_ferror(ll_file) - c_clearerror(ll_file) - raise OSError(errno, os.strerror(errno)) + raise _error(ll_file) s.append_charpsize(buf, returned_size) finally: lltype.free(buf, flavor='raw') else: raw_buf, gc_buf = rffi.alloc_buffer(size) try: - returned_size = c_read(raw_buf, 1, size, ll_file) + returned_size = c_fread(raw_buf, 1, size, ll_file) + returned_size = intmask(returned_size) # is between 0 and size if returned_size == 0: if not c_feof(ll_file): - errno = c_ferror(ll_file) - raise OSError(errno, os.strerror(errno)) - s = rffi.str_from_buffer(raw_buf, gc_buf, size, - rffi.cast(lltype.Signed, returned_size)) + raise _error(ll_file) + s = rffi.str_from_buffer(raw_buf, gc_buf, size, returned_size) finally: rffi.keep_buffer_alive_until_here(raw_buf, gc_buf) return s @@ -200,8 +212,7 @@ if not result: if c_feof(self.ll_file): # ok return 0 - errno = c_ferror(self.ll_file) - raise OSError(errno, os.strerror(errno)) + raise _error(self.ll_file) # # Assume that fgets() works as documented, and additionally # never writes beyond the final \0, which the CPython @@ -240,3 +251,13 @@ finally: rffi.keep_buffer_alive_until_here(raw_buf, gc_buf) raise ValueError("I/O operation on closed file") + + +class RPopenFile(RFile): + _do_close = staticmethod(c_pclose) + + +def _error(ll_file): + errno = c_ferror(ll_file) + c_clearerror(ll_file) + raise OSError(errno, os.strerror(errno)) diff --git a/rpython/rlib/rgc.py b/rpython/rlib/rgc.py --- a/rpython/rlib/rgc.py +++ b/rpython/rlib/rgc.py @@ -15,7 +15,6 @@ def set_max_heap_size(nbytes): """Limit the heap size to n bytes. - So far only implemented by the Boehm GC and the semispace/generation GCs. """ pass diff --git a/rpython/rlib/test/test_rfile.py b/rpython/rlib/test/test_rfile.py --- a/rpython/rlib/test/test_rfile.py +++ b/rpython/rlib/test/test_rfile.py @@ -1,5 +1,5 @@ -import os +import os, sys, py from rpython.rtyper.test.tool import BaseRtypingTest from rpython.tool.udir import udir from rpython.rlib import rfile @@ -142,6 +142,15 @@ cls.tmpdir = udir.join('test_rfile_direct') cls.tmpdir.ensure(dir=True) + def test_read_a_lot(self): + fname = str(self.tmpdir.join('file_read_a_lot')) + with open(fname, 'w') as f: + f.write('dupa' * 999) + f = rfile.create_file(fname, 'r') + s = f.read() + assert s == 'dupa' * 999 + f.close() + def test_readline(self): fname = str(self.tmpdir.join('file_readline')) j = 0 @@ -175,3 +184,15 @@ got = f.readline() assert got == '' f.close() + + +class TestPopen: + def setup_class(cls): + if sys.platform == 'win32': + py.test.skip("not for win32") + + def test_popen(self): + f = rfile.create_popen_file("python -c 'print 42'", "r") + s = f.read() + f.close() + assert s == '42\n' diff --git a/rpython/rlib/test/test_rpoll.py b/rpython/rlib/test/test_rpoll.py --- a/rpython/rlib/test/test_rpoll.py +++ b/rpython/rlib/test/test_rpoll.py @@ -1,5 +1,6 @@ import os import errno +import py from rpython.rlib.rsocket import * from rpython.rlib.rpoll import * @@ -55,6 +56,8 @@ serv.close() def test_select(): + if os.name == 'nt': + py.test.skip('cannot select on file handles on windows') def f(): readend, writeend = os.pipe() try: @@ -72,6 +75,8 @@ interpret(f, []) def test_select_timeout(): + if os.name == 'nt': + py.test.skip('cannot select on file handles on windows') from time import time def f(): # once there was a bug where the sleeping time was doubled diff --git a/rpython/tool/ansi_mandelbrot.py b/rpython/tool/ansi_mandelbrot.py --- a/rpython/tool/ansi_mandelbrot.py +++ b/rpython/tool/ansi_mandelbrot.py @@ -14,8 +14,12 @@ """ -palette = [39, 34, 35, 36, 31, 33, 32, 37] - +import os +if os.environ.get('TERM', 'dumb').find('256') > 0: + from ansiramp import ansi_ramp80 + palette = map(lambda x: "38;5;%d" % x, ansi_ramp80) +else: + palette = [39, 34, 35, 36, 31, 33, 32, 37] colour_range = None # used for debugging diff --git a/rpython/tool/ansiramp.py b/rpython/tool/ansiramp.py new file mode 100755 --- /dev/null +++ b/rpython/tool/ansiramp.py @@ -0,0 +1,29 @@ +#! /usr/bin/env python +import colorsys + +def hsv2ansi(h, s, v): + # h: 0..1, s/v: 0..1 + if s < 0.1: + return int(v * 23) + 232 + r, g, b = map(lambda x: int(x * 5), colorsys.hsv_to_rgb(h, s, v)) + return 16 + (r * 36) + (g * 6) + b + +def ramp_idx(i, num): + assert num > 0 + i0 = float(i) / num + h = 0.57 + i0 + s = 1 - pow(i0,3) + v = 1 + return hsv2ansi(h, s, v) + +def ansi_ramp(num): + return [ramp_idx(i, num) for i in range(num)] + +ansi_ramp80 = ansi_ramp(80) + +if __name__ == '__main__': + import sys + from py.io import ansi_print + colors = int(sys.argv[1]) if len(sys.argv) > 1 else 80 + for col in range(colors): + ansi_print('#', "38;5;%d" % ramp_idx(col, colors), newline=False, flush=True) diff --git a/rpython/tool/sourcetools.py b/rpython/tool/sourcetools.py --- a/rpython/tool/sourcetools.py +++ b/rpython/tool/sourcetools.py @@ -6,7 +6,6 @@ # XXX We should try to generalize and single out one approach to dynamic # XXX code compilation. -import types import sys, os, inspect, new import py @@ -296,40 +295,3 @@ result.func_defaults = f.func_defaults result.func_dict.update(f.func_dict) return result - - -def _convert_const_maybe(x, encoding): - if isinstance(x, str): - return x.decode(encoding) - elif isinstance(x, tuple): - items = [_convert_const_maybe(item, encoding) for item in x] - return tuple(items) - return x - -def with_unicode_literals(fn=None, **kwds): - """Decorator that replace all string literals with unicode literals. - Similar to 'from __future__ import string literals' at function level. - Useful to limit changes in the py3k branch. - """ - encoding = kwds.pop('encoding', 'ascii') - if kwds: - raise TypeError("Unexpected keyword argument(s): %s" % ', '.join(kwds.keys())) - def decorator(fn): - co = fn.func_code - new_consts = [] - for const in co.co_consts: - new_consts.append(_convert_const_maybe(const, encoding)) - new_consts = tuple(new_consts) - new_code = types.CodeType(co.co_argcount, co.co_nlocals, co.co_stacksize, - co.co_flags, co.co_code, new_consts, co.co_names, - co.co_varnames, co.co_filename, co.co_name, - co.co_firstlineno, co.co_lnotab) - fn.func_code = new_code - return fn - # - # support the usage of @with_unicode_literals instead of @with_unicode_literals() - if fn is not None: - assert type(fn) is types.FunctionType - return decorator(fn) - else: - return decorator diff --git a/rpython/tool/test/test_sourcetools.py b/rpython/tool/test/test_sourcetools.py --- a/rpython/tool/test/test_sourcetools.py +++ b/rpython/tool/test/test_sourcetools.py @@ -1,7 +1,5 @@ -# -*- encoding: utf-8 -*- -import py from rpython.tool.sourcetools import ( - func_with_new_name, func_renamer, rpython_wrapper, with_unicode_literals) + func_renamer, func_with_new_name, rpython_wrapper) def test_rename(): def f(x, y=5): @@ -57,30 +55,3 @@ ('decorated', 40, 2), ('bar', 40, 2), ] - - -def test_with_unicode_literals(): - @with_unicode_literals() - def foo(): - return 'hello' - assert type(foo()) is unicode - # - @with_unicode_literals - def foo(): - return 'hello' - assert type(foo()) is unicode - # - def foo(): - return 'hello àèì' - py.test.raises(UnicodeDecodeError, "with_unicode_literals(foo)") - # - @with_unicode_literals(encoding='utf-8') - def foo(): - return 'hello àèì' - assert foo() == u'hello àèì' - # - @with_unicode_literals - def foo(): - return ('a', 'b') - assert type(foo()[0]) is unicode - diff --git a/rpython/translator/c/dlltool.py b/rpython/translator/c/dlltool.py --- a/rpython/translator/c/dlltool.py +++ b/rpython/translator/c/dlltool.py @@ -21,7 +21,8 @@ entrypoints.append(getfunctionptr(graph)) return entrypoints - def gen_makefile(self, targetdir, exe_name=None): + def gen_makefile(self, targetdir, exe_name=None, + headers_to_precompile=[]): pass # XXX finish def compile(self): @@ -30,6 +31,8 @@ extsymeci = ExternalCompilationInfo(export_symbols=export_symbols) self.eci = self.eci.merge(extsymeci) files = [self.c_source_filename] + self.extrafiles + files += self.eventually_copy(self.eci.separate_module_files) + self.eci.separate_module_files = () oname = self.name self.so_name = self.translator.platform.compile(files, self.eci, standalone=False, diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -260,12 +260,13 @@ defines['PYPY_MAIN_FUNCTION'] = "pypy_main_startup" self.eci = self.eci.merge(ExternalCompilationInfo( export_symbols=["pypy_main_startup", "pypy_debug_file"])) - self.eci, cfile, extra = gen_source(db, modulename, targetdir, - self.eci, defines=defines, - split=self.split) + self.eci, cfile, extra, headers_to_precompile = \ + gen_source(db, modulename, targetdir, + self.eci, defines=defines, split=self.split) self.c_source_filename = py.path.local(cfile) self.extrafiles = self.eventually_copy(extra) - self.gen_makefile(targetdir, exe_name=exe_name) + self.gen_makefile(targetdir, exe_name=exe_name, + headers_to_precompile=headers_to_precompile) return cfile def eventually_copy(self, cfiles): @@ -375,18 +376,22 @@ self._compiled = True return self.executable_name - def gen_makefile(self, targetdir, exe_name=None): - cfiles = [self.c_source_filename] + self.extrafiles + def gen_makefile(self, targetdir, exe_name=None, headers_to_precompile=[]): + module_files = self.eventually_copy(self.eci.separate_module_files) + self.eci.separate_module_files = [] + cfiles = [self.c_source_filename] + self.extrafiles + list(module_files) if exe_name is not None: exe_name = targetdir.join(exe_name) mk = self.translator.platform.gen_makefile( cfiles, self.eci, path=targetdir, exe_name=exe_name, + headers_to_precompile=headers_to_precompile, + no_precompile_cfiles = module_files, shared=self.config.translation.shared) if self.has_profopt(): profopt = self.config.translation.profopt - mk.definition('ABS_TARGET', '$(shell python -c "import sys,os; print os.path.abspath(sys.argv[1])" $(TARGET))') + mk.definition('ABS_TARGET', str(targetdir.join('$(TARGET)'))) mk.definition('DEFAULT_TARGET', 'profopt') mk.definition('PROFOPT', profopt) @@ -427,8 +432,8 @@ mk.definition('ASMFILES', sfiles) mk.definition('ASMLBLFILES', lblsfiles) mk.definition('GCMAPFILES', gcmapfiles) - if sys.platform == 'win32': - mk.definition('DEBUGFLAGS', '/MD /Zi') + if self.translator.platform.name == 'msvc': + mk.definition('DEBUGFLAGS', '-MD -Zi') else: if self.config.translation.shared: mk.definition('DEBUGFLAGS', '-O2 -fomit-frame-pointer -g -fPIC') @@ -484,11 +489,11 @@ mk.rule('.PRECIOUS', '%.s', "# don't remove .s files if Ctrl-C'ed") else: - if sys.platform == 'win32': - mk.definition('DEBUGFLAGS', '/MD /Zi') + if self.translator.platform.name == 'msvc': + mk.definition('DEBUGFLAGS', '-MD -Zi') else: mk.definition('DEBUGFLAGS', '-O1 -g') - if sys.platform == 'win32': + if self.translator.platform.name == 'msvc': mk.rule('debug_target', 'debugmode_$(DEFAULT_TARGET)', 'rem') else: mk.rule('debug_target', '$(TARGET)', '#') @@ -511,6 +516,7 @@ def __init__(self, database): self.database = database self.extrafiles = [] + self.headers_to_precompile = [] self.path = None self.namespace = NameManager() @@ -539,6 +545,8 @@ filepath = self.path.join(name) if name.endswith('.c'): self.extrafiles.append(filepath) + if name.endswith('.h'): + self.headers_to_precompile.append(filepath) return filepath.open('w') def getextrafiles(self): @@ -686,11 +694,11 @@ print >> fc, '/***********************************************************/' print >> fc, '/*** Implementations ***/' print >> fc - print >> fc, '#define PYPY_FILE_NAME "%s"' % name print >> fc, '#include "common_header.h"' print >> fc, '#include "structdef.h"' print >> fc, '#include "forwarddecl.h"' print >> fc, '#include "preimpl.h"' + print >> fc, '#define PYPY_FILE_NAME "%s"' % name print >> fc, '#include "src/g_include.h"' print >> fc print >> fc, MARKER @@ -732,12 +740,14 @@ print >> f, "#endif" def gen_preimpl(f, database): + f.write('#ifndef _PY_PREIMPLE_H\n#define _PY_PREIMPL_H\n') if database.translator is None or database.translator.rtyper is None: return preimplementationlines = pre_include_code_lines( database, database.translator.rtyper) for line in preimplementationlines: print >> f, line + f.write('#endif /* _PY_PREIMPL_H */\n') def gen_startupcode(f, database): # generate the start-up code and put it into a function @@ -799,6 +809,7 @@ f = filename.open('w') incfilename = targetdir.join('common_header.h') fi = incfilename.open('w') + fi.write('#ifndef _PY_COMMON_HEADER_H\n#define _PY_COMMON_HEADER_H\n') # # Header @@ -811,6 +822,7 @@ eci.write_c_header(fi) print >> fi, '#include "src/g_prerequisite.h"' + fi.write('#endif /* _PY_COMMON_HEADER_H*/\n') fi.close() @@ -822,6 +834,8 @@ sg.set_strategy(targetdir, split) database.prepare_inline_helpers() sg.gen_readable_parts_of_source(f) + headers_to_precompile = sg.headers_to_precompile[:] + headers_to_precompile.insert(0, incfilename) gen_startupcode(f, database) f.close() @@ -834,5 +848,4 @@ eci = add_extra_files(eci) eci = eci.convert_sources_to_files() - files, eci = eci.get_module_files() - return eci, filename, sg.getextrafiles() + list(files) + return eci, filename, sg.getextrafiles(), headers_to_precompile diff --git a/rpython/translator/c/test/test_extfunc.py b/rpython/translator/c/test/test_extfunc.py --- a/rpython/translator/c/test/test_extfunc.py +++ b/rpython/translator/c/test/test_extfunc.py @@ -65,7 +65,8 @@ f1 = compile(does_stuff, []) f1() - assert open(filename, 'r').read() == "hello world\n" + with open(filename, 'r') as fid: + assert fid.read() == "hello world\n" os.unlink(filename) def test_big_read(): @@ -296,8 +297,10 @@ os.chdir(path) return os.getcwd() f1 = compile(does_stuff, [str]) - # different on windows please - assert f1('/tmp') == os.path.realpath('/tmp') + if os.name == 'nt': + assert f1(os.environment['TEMP']) == os.path.realpath(os.environment['TEMP']) + else: + assert f1('/tmp') == os.path.realpath('/tmp') def test_mkdir_rmdir(): def does_stuff(path, delete): diff --git a/rpython/translator/c/test/test_newgc.py b/rpython/translator/c/test/test_newgc.py --- a/rpython/translator/c/test/test_newgc.py +++ b/rpython/translator/c/test/test_newgc.py @@ -658,7 +658,8 @@ def test_open_read_write_seek_close(self): self.run('open_read_write_seek_close') - assert open(self.filename, 'r').read() == "hello world\n" + with open(self.filename, 'r') as fid: + assert fid.read() == "hello world\n" os.unlink(self.filename) def define_callback_with_collect(cls): diff --git a/rpython/translator/platform/__init__.py b/rpython/translator/platform/__init__.py --- a/rpython/translator/platform/__init__.py +++ b/rpython/translator/platform/__init__.py @@ -100,7 +100,8 @@ return ExecutionResult(returncode, stdout, stderr) def gen_makefile(self, cfiles, eci, exe_name=None, path=None, - shared=False): + shared=False, headers_to_precompile=[], + no_precompile_cfiles = []): raise NotImplementedError("Pure abstract baseclass") def __repr__(self): diff --git a/rpython/translator/platform/darwin.py b/rpython/translator/platform/darwin.py --- a/rpython/translator/platform/darwin.py +++ b/rpython/translator/platform/darwin.py @@ -50,14 +50,17 @@ return ["-Wl,-exported_symbols_list,%s" % (response_file,)] def gen_makefile(self, cfiles, eci, exe_name=None, path=None, - shared=False): + shared=False, headers_to_precompile=[], + no_precompile_cfiles = []): # ensure frameworks are passed in the Makefile fs = self._frameworks(eci.frameworks) if len(fs) > 0: # concat (-framework, FrameworkName) pairs self.extra_libs += tuple(map(" ".join, zip(fs[::2], fs[1::2]))) mk = super(Darwin, self).gen_makefile(cfiles, eci, exe_name, path, - shared) + shared=shared, + headers_to_precompile=headers_to_precompile, + no_precompile_cfiles = no_precompile_cfiles) return mk diff --git a/rpython/translator/platform/posix.py b/rpython/translator/platform/posix.py --- a/rpython/translator/platform/posix.py +++ b/rpython/translator/platform/posix.py @@ -83,7 +83,8 @@ return [entry[2:] for entry in out.split()] def gen_makefile(self, cfiles, eci, exe_name=None, path=None, - shared=False): + shared=False, headers_to_precompile=[], + no_precompile_cfiles = []): cfiles = self._all_cfiles(cfiles, eci) if path is None: diff --git a/rpython/translator/platform/test/test_distutils.py b/rpython/translator/platform/test/test_distutils.py --- a/rpython/translator/platform/test/test_distutils.py +++ b/rpython/translator/platform/test/test_distutils.py @@ -11,3 +11,7 @@ def test_900_files(self): py.test.skip('Makefiles not suppoerted') + + def test_precompiled_headers(self): + py.test.skip('Makefiles not suppoerted') + diff --git a/rpython/translator/platform/test/test_makefile.py b/rpython/translator/platform/test/test_makefile.py --- a/rpython/translator/platform/test/test_makefile.py +++ b/rpython/translator/platform/test/test_makefile.py @@ -1,7 +1,10 @@ from rpython.translator.platform.posix import GnuMakefile as Makefile +from rpython.translator.platform import host +from rpython.tool.udir import udir +from rpython.translator.tool.cbuild import ExternalCompilationInfo from StringIO import StringIO -import re +import re, sys, py def test_simple_makefile(): m = Makefile() @@ -29,3 +32,112 @@ val = s.getvalue() assert not re.search('CC += +xxx', val, re.M) assert re.search('CC += +yyy', val, re.M) + +class TestMakefile(object): + platform = host + strict_on_stderr = True + + def check_res(self, res, expected='42\n'): + assert res.out == expected + if self.strict_on_stderr: + assert res.err == '' + assert res.returncode == 0 + + def test_900_files(self): + txt = '#include \n' + for i in range(900): + txt += 'int func%03d();\n' % i + txt += 'int main() {\n int j=0;' + for i in range(900): + txt += ' j += func%03d();\n' % i + txt += ' printf("%d\\n", j);\n' + txt += ' return 0;};\n' + cfile = udir.join('test_900_files.c') + cfile.write(txt) + cfiles = [cfile] + for i in range(900): + cfile2 = udir.join('implement%03d.c' %i) + cfile2.write(''' + int func%03d() + { + return %d; + } + ''' % (i, i)) + cfiles.append(cfile2) + mk = self.platform.gen_makefile(cfiles, ExternalCompilationInfo(), path=udir) + mk.write() + self.platform.execute_makefile(mk) + res = self.platform.execute(udir.join('test_900_files')) + self.check_res(res, '%d\n' %sum(range(900))) + + def test_precompiled_headers(self): + if self.platform.cc != 'cl.exe': + py.test.skip("Only MSVC profits from precompiled headers") + import time + tmpdir = udir.join('precompiled_headers').ensure(dir=1) + # Create an eci that should not use precompiled headers + eci = ExternalCompilationInfo(include_dirs=[tmpdir]) + main_c = tmpdir.join('main_no_pch.c') + eci.separate_module_files = [main_c] + ncfiles = 10 + nprecompiled_headers = 20 + txt = '' + for i in range(ncfiles): + txt += "int func%03d();\n" % i + txt += "\nint main(int argc, char * argv[])\n" + txt += "{\n int i=0;\n" + for i in range(ncfiles): + txt += " i += func%03d();\n" % i + txt += ' printf("%d\\n", i);\n' + txt += " return 0;\n};\n" + main_c.write(txt) + # Create some large headers with dummy functions to be precompiled + cfiles_precompiled_headers = [] + for i in range(nprecompiled_headers): + pch_name =tmpdir.join('pcheader%03d.h' % i) + txt = '#ifndef PCHEADER%03d_H\n#define PCHEADER%03d_H\n' %(i, i) + for j in range(3000): + txt += "int pcfunc%03d_%03d();\n" %(i, j) + txt += '#endif' + pch_name.write(txt) + cfiles_precompiled_headers.append(pch_name) + # Create some cfiles with headers we want precompiled + cfiles = [] + for i in range(ncfiles): + c_name =tmpdir.join('implement%03d.c' % i) + txt = '' + for pch_name in cfiles_precompiled_headers: + txt += '#include "%s"\n' % pch_name + txt += "int func%03d(){ return %d;};\n" % (i, i) + c_name.write(txt) + cfiles.append(c_name) + if sys.platform == 'win32': + clean = ('clean', '', 'for %f in ( $(OBJECTS) $(TARGET) ) do @if exist %f del /f %f') + get_time = time.clock + else: + clean = ('clean', '', 'rm -f $(OBJECTS) $(TARGET) ') + get_time = time.time + #write a non-precompiled header makefile + mk = self.platform.gen_makefile(cfiles, eci, path=tmpdir) + mk.rule(*clean) + mk.write() + t0 = get_time() + self.platform.execute_makefile(mk) + t1 = get_time() + t_normal = t1 - t0 + self.platform.execute_makefile(mk, extra_opts=['clean']) + # Write a super-duper makefile with precompiled headers + mk = self.platform.gen_makefile(cfiles, eci, path=tmpdir, + headers_to_precompile=cfiles_precompiled_headers,) + mk.rule(*clean) + mk.write() + t0 = get_time() + self.platform.execute_makefile(mk) + t1 = get_time() + t_precompiled = t1 - t0 + res = self.platform.execute(mk.exe_name) + self.check_res(res, '%d\n' %sum(range(ncfiles))) + print "precompiled haeder 'make' time %.2f, non-precompiled header time %.2f" %(t_precompiled, t_normal) + assert t_precompiled < t_normal * 0.5 + + diff --git a/rpython/translator/platform/test/test_platform.py b/rpython/translator/platform/test/test_platform.py --- a/rpython/translator/platform/test/test_platform.py +++ b/rpython/translator/platform/test/test_platform.py @@ -59,34 +59,6 @@ res = self.platform.execute(executable) self.check_res(res) - def test_900_files(self): - txt = '#include \n' - for i in range(900): - txt += 'int func%03d();\n' % i - txt += 'int main() {\n int j=0;' - for i in range(900): - txt += ' j += func%03d();\n' % i - txt += ' printf("%d\\n", j);\n' - txt += ' return 0;};\n' - cfile = udir.join('test_900_files.c') - cfile.write(txt) - cfiles = [cfile] - for i in range(900): - cfile2 = udir.join('implement%03d.c' %i) - cfile2.write(''' - int func%03d() - { - return %d; - } - ''' % (i, i)) - cfiles.append(cfile2) - mk = self.platform.gen_makefile(cfiles, ExternalCompilationInfo(), path=udir) - mk.write() - self.platform.execute_makefile(mk) - res = self.platform.execute(udir.join('test_900_files')) - self.check_res(res, '%d\n' %sum(range(900))) - - def test_nice_errors(self): cfile = udir.join('test_nice_errors.c') cfile.write('') diff --git a/rpython/translator/platform/windows.py b/rpython/translator/platform/windows.py --- a/rpython/translator/platform/windows.py +++ b/rpython/translator/platform/windows.py @@ -249,7 +249,8 @@ def gen_makefile(self, cfiles, eci, exe_name=None, path=None, - shared=False): + shared=False, headers_to_precompile=[], + no_precompile_cfiles = []): cfiles = self._all_cfiles(cfiles, eci) if path is None: @@ -313,20 +314,60 @@ ('CC_LINK', self.link), ('LINKFILES', eci.link_files), ('MASM', self.masm), + ('MAKE', 'nmake.exe'), ('_WIN32', '1'), ] if self.x64: definitions.append(('_WIN64', '1')) + rules = [ + ('all', '$(DEFAULT_TARGET)', []), + ('.asm.obj', '', '$(MASM) /nologo /Fo$@ /c $< $(INCLUDEDIRS)'), + ] + + if len(headers_to_precompile)>0: + stdafx_h = path.join('stdafx.h') + txt = '#ifndef PYPY_STDAFX_H\n' + txt += '#define PYPY_STDAFX_H\n' + txt += '\n'.join(['#include "' + m.pathrel(c) + '"' for c in headers_to_precompile]) + txt += '\n#endif\n' + stdafx_h.write(txt) + stdafx_c = path.join('stdafx.c') + stdafx_c.write('#include "stdafx.h"\n') + definitions.append(('CREATE_PCH', '/Ycstdafx.h /Fpstdafx.pch /FIstdafx.h')) + definitions.append(('USE_PCH', '/Yustdafx.h /Fpstdafx.pch /FIstdafx.h')) + rules.append(('$(OBJECTS)', 'stdafx.pch', [])) + rules.append(('stdafx.pch', 'stdafx.h', + '$(CC) stdafx.c /c /nologo $(CFLAGS) $(CFLAGSEXTRA) ' + '$(CREATE_PCH) $(INCLUDEDIRS)')) + rules.append(('.c.obj', '', + '$(CC) /nologo $(CFLAGS) $(CFLAGSEXTRA) $(USE_PCH) ' + '/Fo$@ /c $< $(INCLUDEDIRS)')) + #Do not use precompiled headers for some files + #rules.append((r'{..\module_cache}.c{..\module_cache}.obj', '', + # '$(CC) /nologo $(CFLAGS) $(CFLAGSEXTRA) /Fo$@ /c $< $(INCLUDEDIRS)')) + # nmake cannot handle wildcard target specifications, so we must + # create a rule for compiling each file from eci since they cannot use + # precompiled headers :( + no_precompile = [] + for f in list(no_precompile_cfiles): + f = m.pathrel(py.path.local(f)) + if f not in no_precompile and f.endswith('.c'): + no_precompile.append(f) + target = f[:-1] + 'obj' + rules.append((target, f, + '$(CC) /nologo $(CFLAGS) $(CFLAGSEXTRA) ' + '/Fo%s /c %s $(INCLUDEDIRS)' %(target, f))) + + else: + rules.append(('.c.obj', '', + '$(CC) /nologo $(CFLAGS) $(CFLAGSEXTRA) ' + '/Fo$@ /c $< $(INCLUDEDIRS)')) + + for args in definitions: m.definition(*args) - rules = [ - ('all', '$(DEFAULT_TARGET)', []), - ('.c.obj', '', '$(CC) /nologo $(CFLAGS) $(CFLAGSEXTRA) /Fo$@ /c $< $(INCLUDEDIRS)'), - ('.asm.obj', '', '$(MASM) /nologo /Fo$@ /c $< $(INCLUDEDIRS)'), - ] - for rule in rules: m.rule(*rule) @@ -371,7 +412,7 @@ 'mt.exe -nologo -manifest $*.manifest -outputresource:$@;1', ]) m.rule('debugmode_$(DEFAULT_TARGET)', ['debugmode_$(TARGET)', 'main.obj'], - ['$(CC_LINK) /nologo /DEBUG main.obj $(SHARED_IMPORT_LIB) /out:$@' + ['$(CC_LINK) /nologo /DEBUG main.obj debugmode_$(SHARED_IMPORT_LIB) /out:$@' ]) return m @@ -392,6 +433,25 @@ self._handle_error(returncode, stdout, stderr, path.join('make')) +class WinDefinition(posix.Definition): + def write(self, f): + def write_list(prefix, lst): + lst = lst or [''] + for i, fn in enumerate(lst): + print >> f, prefix, fn, + if i < len(lst)-1: + print >> f, '\\' + else: + print >> f + prefix = ' ' * len(prefix) + name, value = self.name, self.value + if isinstance(value, str): + f.write('%s = %s\n' % (name, value)) + else: + write_list('%s =' % (name,), value) + f.write('\n') + + class NMakefile(posix.GnuMakefile): def write(self, out=None): # nmake expands macros when it parses rules. @@ -410,6 +470,14 @@ if out is None: f.close() + def definition(self, name, value): + defs = self.defs + defn = WinDefinition(name, value) + if name in defs: + self.lines[defs[name]] = defn + else: + defs[name] = len(self.lines) + self.lines.append(defn) class MingwPlatform(posix.BasePosix): name = 'mingw32' From noreply at buildbot.pypy.org Sat Feb 15 00:42:08 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 15 Feb 2014 00:42:08 +0100 (CET) Subject: [pypy-commit] pypy remove-intlong-smm: fix Message-ID: <20140214234208.BFDBE1C318B@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: remove-intlong-smm Changeset: r69142:d1533165b692 Date: 2014-02-14 15:41 -0800 http://bitbucket.org/pypy/pypy/changeset/d1533165b692/ Log: fix diff --git a/pypy/objspace/std/boolobject.py b/pypy/objspace/std/boolobject.py --- a/pypy/objspace/std/boolobject.py +++ b/pypy/objspace/std/boolobject.py @@ -6,7 +6,8 @@ from rpython.tool.sourcetools import func_renamer, func_with_new_name from pypy.interpreter.gateway import WrappedDefault, interp2app, unwrap_spec -from pypy.objspace.std.intobject import IntMethods, W_AbstractIntObject +from pypy.objspace.std.intobject import ( + IntMethods, W_AbstractIntObject, W_IntObject) from pypy.objspace.std.stdtypedef import StdTypeDef @@ -53,13 +54,13 @@ def make_bitwise_binop(opname): descr_name = 'descr_' + opname - super_op = getattr(W_AbstractIntObject, descr_name) + int_op = getattr(W_IntObject, descr_name) op = getattr(operator, opname + '_' if opname in ('and', 'or') else opname) @func_renamer(descr_name) def descr_binop(self, space, w_other): if not isinstance(w_other, W_BoolObject): - return super_op(self, space, w_other) + return int_op(self.int(space), space, w_other) return space.newbool(op(self.boolval, w_other.boolval)) return descr_binop, func_with_new_name(descr_binop, 'descr_r' + opname) diff --git a/pypy/objspace/std/test/test_boolobject.py b/pypy/objspace/std/test/test_boolobject.py --- a/pypy/objspace/std/test/test_boolobject.py +++ b/pypy/objspace/std/test/test_boolobject.py @@ -60,6 +60,8 @@ assert True ^ True is False assert False ^ False is False assert True ^ False is True + assert True & 1 == 1 + assert False & 0 == 0 & 0 def test_new(self): assert bool.__new__(bool, "hi") is True From noreply at buildbot.pypy.org Sat Feb 15 02:00:57 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 15 Feb 2014 02:00:57 +0100 (CET) Subject: [pypy-commit] pypy remove-intlong-smm: adapt from 0e0d08198110 which I previously missed Message-ID: <20140215010057.472131D22FA@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: remove-intlong-smm Changeset: r69143:78aecefdc562 Date: 2014-02-14 17:00 -0800 http://bitbucket.org/pypy/pypy/changeset/78aecefdc562/ Log: adapt from 0e0d08198110 which I previously missed diff --git a/pypy/objspace/std/longobject.py b/pypy/objspace/std/longobject.py --- a/pypy/objspace/std/longobject.py +++ b/pypy/objspace/std/longobject.py @@ -536,7 +536,7 @@ def _string_to_w_long(space, w_longtype, w_source, string, base=10): try: - bigint = rbigint.fromstr(string, base) + bigint = rbigint.fromstr2(string, base) except ParseStringError as e: from pypy.objspace.std.intobject import wrap_parsestringerror raise wrap_parsestringerror(space, e, w_source) From noreply at buildbot.pypy.org Sat Feb 15 03:18:46 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Sat, 15 Feb 2014 03:18:46 +0100 (CET) Subject: [pypy-commit] pypy py3k: 2to3 Message-ID: <20140215021846.8440B1C0178@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: py3k Changeset: r69144:84b7dca9f936 Date: 2014-02-14 18:17 -0800 http://bitbucket.org/pypy/pypy/changeset/84b7dca9f936/ Log: 2to3 diff --git a/pypy/module/__pypy__/test/test_locals2fast.py b/pypy/module/__pypy__/test/test_locals2fast.py --- a/pypy/module/__pypy__/test/test_locals2fast.py +++ b/pypy/module/__pypy__/test/test_locals2fast.py @@ -61,7 +61,7 @@ def check_co_vars(a): frame = sys._getframe() def function2(): - print a + print(a) assert 'a' in frame.f_code.co_cellvars frame = sys._getframe() frame.f_locals['a'] = 50 From noreply at buildbot.pypy.org Sat Feb 15 10:31:20 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 15 Feb 2014 10:31:20 +0100 (CET) Subject: [pypy-commit] pypy default: Uh. Long time we don't use "make linuxmemchk" any more. Message-ID: <20140215093120.395991C1504@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69145:8e754763d878 Date: 2014-02-15 10:30 +0100 http://bitbucket.org/pypy/pypy/changeset/8e754763d878/ Log: Uh. Long time we don't use "make linuxmemchk" any more. diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -402,7 +402,7 @@ ('debug_exc', '', '$(MAKE) CFLAGS="$(DEBUGFLAGS) -DRPY_ASSERT -DDO_LOG_EXC" debug_target'), ('debug_mem', '', '$(MAKE) CFLAGS="$(DEBUGFLAGS) -DRPY_ASSERT -DPYPY_USE_TRIVIAL_MALLOC" debug_target'), ('no_obmalloc', '', '$(MAKE) CFLAGS="-g -O2 -DRPY_ASSERT -DPYPY_NO_OBMALLOC" $(TARGET)'), - ('linuxmemchk', '', '$(MAKE) CFLAGS="$(DEBUGFLAGS) -DRPY_ASSERT -DPPY_USE_LINUXMEMCHK" debug_target'), + ('linuxmemchk', '', '$(MAKE) CFLAGS="$(DEBUGFLAGS) -DRPY_ASSERT -DPYPY_USE_LINUXMEMCHK" debug_target'), ('llsafer', '', '$(MAKE) CFLAGS="-O2 -DRPY_LL_ASSERT" $(TARGET)'), ('lldebug', '', '$(MAKE) CFLAGS="$(DEBUGFLAGS) -DRPY_ASSERT -DRPY_LL_ASSERT" debug_target'), ('lldebug0','', '$(MAKE) CFLAGS="-O0 $(DEBUGFLAGS) -DRPY_ASSERT -DRPY_LL_ASSERT" debug_target'), From noreply at buildbot.pypy.org Sat Feb 15 15:49:12 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Sat, 15 Feb 2014 15:49:12 +0100 (CET) Subject: [pypy-commit] pypy NonConstant: hg merge default Message-ID: <20140215144912.4791D1C01DE@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: NonConstant Changeset: r69146:f61dac5d3b9e Date: 2014-02-15 14:48 +0000 http://bitbucket.org/pypy/pypy/changeset/f61dac5d3b9e/ Log: hg merge default diff --git a/include/PyPy.h b/include/PyPy.h new file mode 100644 --- /dev/null +++ b/include/PyPy.h @@ -0,0 +1,54 @@ +#ifndef _PYPY_H_ +#define _PYPY_H_ + +/* This header is meant to be included in programs that use PyPy as an + embedded library. */ + +#ifdef __cplusplus +extern "C" { +#endif + + +/* You should call this first once. */ +void rpython_startup_code(void); + + +/* Initialize the home directory of PyPy. It is necessary to call this. + + Call it with "home" being the file name of the libpypy.so, for + example; it will be used as a starting point when searching for the + lib-python and lib_pypy directories. They are searched from + "home/..", "home/../..", etc. Returns 0 if everything was fine. If + an error occurs, returns 1 and (if verbose != 0) prints some + information to stderr. + */ +int pypy_setup_home(char *home, int verbose); + + +/* If your program has multiple threads, then you need to call + pypy_init_threads() once at init time, and then pypy_thread_attach() + once in each other thread that just started and in which you want to + run Python code (including via callbacks, see below). + */ +void pypy_init_threads(void); +void pypy_thread_attach(void); + + +/* The main entry point: executes "source" as plain Python code. + Returns 0 if everything was fine. If a Python exception is + uncaught, it is printed to stderr and 1 is returned. + + Usually, the Python code from "source" should use cffi to fill in + global variables of "function pointer" type in your program. Use + cffi callbacks to do so. Once it is done, there is no need to call + pypy_execute_source() any more: from C, you call directly the + functions (which are "callbacks" from the point of view of Python). + */ +int pypy_execute_source(char *source); + + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -103,8 +103,7 @@ .. _`extension modules`: cpython_differences.html#extension-modules .. _`cpython differences`: cpython_differences.html -.. _`compatibility wiki`: -.. https://bitbucket.org/pypy/compatibility/wiki/Home +.. _`compatibility wiki`: https://bitbucket.org/pypy/compatibility/wiki/Home .. _cffi: http://cffi.readthedocs.org/ --------------------------------- diff --git a/pypy/doc/project-ideas.rst b/pypy/doc/project-ideas.rst --- a/pypy/doc/project-ideas.rst +++ b/pypy/doc/project-ideas.rst @@ -142,32 +142,17 @@ * `hg` -Experiment (again) with LLVM backend for RPython compilation ------------------------------------------------------------- - -We already tried working with LLVM and at the time, LLVM was not mature enough -for our needs. It's possible that this has changed, reviving the LLVM backend -(or writing new from scratch) for static compilation would be a good project. - -(On the other hand, just generating C code and using clang might be enough. -The issue with that is the so-called "asmgcc GC root finder", which has tons -of issues of this own. In my opinion (arigo), it would be definitely a -better project to try to optimize the alternative, the "shadowstack" GC root -finder, which is nicely portable. So far it gives a pypy that is around -7% slower.) - Embedding PyPy ---------------------------------------- Note: there is a basic proof-of-concept for that as a `uwsgi pypy plugin`_ Being able to embed PyPy, say with its own limited C API, would be -useful. But here is the most interesting variant, straight from -EuroPython live discussion :-) We can have a generic "libpypy.so" that -can be used as a placeholder dynamic library, and when it gets loaded, -it runs a .py module that installs (via ctypes) the interface it wants -exported. This would give us a one-size-fits-all generic .so file to be -imported by any application that wants to load .so files :-) +useful. But there is a possibly better variant: use CFFI. With some +minimal tools atop CFFI, it would be possible to write a pure Python +library, and then compile automatically from it an .so/.dll file that is +a dynamic-link library with whatever C API we want. This gives us a +one-size-fits-all generic way to make .so/.dll files from Python. .. _`uwsgi pypy plugin`: http://uwsgi-docs.readthedocs.org/en/latest/PyPy.html diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -57,3 +57,13 @@ mapdicts keep track of whether or not an attribute is every assigned to multiple times. If it's only assigned once then an elidable lookup is used when possible. + +.. branch: precompiled-headers +Create a Makefile using precompiled headers for MSVC platforms. +The downside is a messy nmake-compatible Makefile. Since gcc shows minimal +speedup, it was not implemented. + +.. branch: camelot +With a properly configured 256-color terminal (TERM=...-256color), the +Mandelbrot set shown during translation now uses a range of 50 colours. +Essential! diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -90,9 +90,10 @@ return f """) - @entrypoint('main', [rffi.CCHARP, lltype.Signed], c_name='pypy_setup_home') + @entrypoint('main', [rffi.CCHARP, rffi.INT], c_name='pypy_setup_home') def pypy_setup_home(ll_home, verbose): from pypy.module.sys.initpath import pypy_find_stdlib + verbose = rffi.cast(lltype.Signed, verbose) if ll_home: home = rffi.charp2str(ll_home) else: @@ -120,7 +121,8 @@ @entrypoint('main', [rffi.CCHARP], c_name='pypy_execute_source') def pypy_execute_source(ll_source): source = rffi.charp2str(ll_source) - return _pypy_execute_source(source) + res = _pypy_execute_source(source) + return rffi.cast(rffi.INT, res) @entrypoint('main', [], c_name='pypy_init_threads') def pypy_init_threads(): diff --git a/pypy/interpreter/test/test_targetpypy.py b/pypy/interpreter/test/test_targetpypy.py --- a/pypy/interpreter/test/test_targetpypy.py +++ b/pypy/interpreter/test/test_targetpypy.py @@ -12,8 +12,10 @@ _, d = create_entry_point(space, None) execute_source = d['pypy_execute_source'] lls = rffi.str2charp("import sys; sys.modules['xyz'] = 3") - execute_source(lls) + res = execute_source(lls) lltype.free(lls, flavor='raw') + assert lltype.typeOf(res) == rffi.INT + assert rffi.cast(lltype.Signed, res) == 0 x = space.int_w(space.getitem(space.getattr(space.builtin_modules['sys'], space.wrap('modules')), space.wrap('xyz'))) @@ -24,5 +26,5 @@ # did not crash - the same globals pypy_setup_home = d['pypy_setup_home'] lls = rffi.str2charp(__file__) - pypy_setup_home(lls, 1) + pypy_setup_home(lls, rffi.cast(rffi.INT, 1)) lltype.free(lls, flavor='raw') diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -81,6 +81,7 @@ 'newdict' : 'interp_dict.newdict', 'dictstrategy' : 'interp_dict.dictstrategy', 'set_debug' : 'interp_magic.set_debug', + 'locals_to_fast' : 'interp_magic.locals_to_fast', } if sys.platform == 'win32': interpleveldefs['get_console_cp'] = 'interp_magic.get_console_cp' diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py --- a/pypy/module/__pypy__/interp_magic.py +++ b/pypy/module/__pypy__/interp_magic.py @@ -1,5 +1,6 @@ from pypy.interpreter.error import OperationError, wrap_oserror from pypy.interpreter.gateway import unwrap_spec +from pypy.interpreter.pyframe import PyFrame from rpython.rlib.objectmodel import we_are_translated from pypy.objspace.std.listobject import W_ListObject from pypy.objspace.std.typeobject import MethodCache @@ -111,3 +112,8 @@ @unwrap_spec(estimate=int) def add_memory_pressure(estimate): rgc.add_memory_pressure(estimate) + + at unwrap_spec(w_frame=PyFrame) +def locals_to_fast(space, w_frame): + assert isinstance(w_frame, PyFrame) + w_frame.locals2fast() diff --git a/pypy/module/__pypy__/test/test_locals2fast.py b/pypy/module/__pypy__/test/test_locals2fast.py new file mode 100644 --- /dev/null +++ b/pypy/module/__pypy__/test/test_locals2fast.py @@ -0,0 +1,81 @@ +# Tests from Fabio Zadrozny + + +class AppTestLocals2Fast: + """ + Test setting locals in one function from another function + using several approaches. + """ + + def setup_class(cls): + cls.w_save_locals = cls.space.appexec([], """(): + import sys + if '__pypy__' in sys.builtin_module_names: + import __pypy__ + save_locals = __pypy__.locals_to_fast + else: + # CPython version + import ctypes + @staticmethod + def save_locals(frame): + ctypes.pythonapi.PyFrame_LocalsToFast( + ctypes.py_object(frame), ctypes.c_int(0)) + return save_locals + """) + + def test_set_locals_using_save_locals(self): + import sys + def use_save_locals(name, value): + frame = sys._getframe().f_back + locals_dict = frame.f_locals + locals_dict[name] = value + self.save_locals(frame) + def test_method(fn): + x = 1 + # The method 'fn' should attempt to set x = 2 in the current frame. + fn('x', 2) + return x + x = test_method(use_save_locals) + assert x == 2 + + def test_frame_simple_change(self): + import sys + frame = sys._getframe() + a = 20 + frame.f_locals['a'] = 50 + self.save_locals(frame) + assert a == 50 + + def test_frame_co_freevars(self): + import sys + outer_var = 20 + def func(): + frame = sys._getframe() + frame.f_locals['outer_var'] = 50 + self.save_locals(frame) + assert outer_var == 50 + func() + + def test_frame_co_cellvars(self): + import sys + def check_co_vars(a): + frame = sys._getframe() + def function2(): + print a + assert 'a' in frame.f_code.co_cellvars + frame = sys._getframe() + frame.f_locals['a'] = 50 + self.save_locals(frame) + assert a == 50 + check_co_vars(1) + + def test_frame_change_in_inner_frame(self): + import sys + def change(f): + assert f is not sys._getframe() + f.f_locals['a'] = 50 + self.save_locals(f) + frame = sys._getframe() + a = 20 + change(frame) + assert a == 50 diff --git a/pypy/module/bz2/interp_bz2.py b/pypy/module/bz2/interp_bz2.py --- a/pypy/module/bz2/interp_bz2.py +++ b/pypy/module/bz2/interp_bz2.py @@ -31,7 +31,7 @@ _compilation_info_ = eci calling_conv = 'c' - CHECK_LIBRARY = platform.Has('dump("x", (int)&BZ2_bzCompress)') + CHECK_LIBRARY = platform.Has('dump("x", (long)&BZ2_bzCompress)') off_t = platform.SimpleType("off_t", rffi.LONGLONG) size_t = platform.SimpleType("size_t", rffi.ULONG) diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -910,6 +910,8 @@ # implement function callbacks and generate function decls functions = [] pypy_decls = [] + pypy_decls.append("#ifndef _PYPY_PYPY_DECL_H\n") + pypy_decls.append("#define _PYPY_PYPY_DECL_H\n") pypy_decls.append("#ifndef PYPY_STANDALONE\n") pypy_decls.append("#ifdef __cplusplus") pypy_decls.append("extern \"C\" {") @@ -953,6 +955,7 @@ pypy_decls.append("}") pypy_decls.append("#endif") pypy_decls.append("#endif /*PYPY_STANDALONE*/\n") + pypy_decls.append("#endif /*_PYPY_PYPY_DECL_H*/\n") pypy_decl_h = udir.join('pypy_decl.h') pypy_decl_h.write('\n'.join(pypy_decls)) diff --git a/pypy/module/cpyext/include/bufferobject.h b/pypy/module/cpyext/include/bufferobject.h --- a/pypy/module/cpyext/include/bufferobject.h +++ b/pypy/module/cpyext/include/bufferobject.h @@ -37,7 +37,7 @@ PyObject* PyBuffer_New(Py_ssize_t size); -void _Py_init_bufferobject(void); +PyTypeObject *_Py_get_buffer_type(void); #ifdef __cplusplus } diff --git a/pypy/module/cpyext/include/pycapsule.h b/pypy/module/cpyext/include/pycapsule.h --- a/pypy/module/cpyext/include/pycapsule.h +++ b/pypy/module/cpyext/include/pycapsule.h @@ -50,7 +50,7 @@ PyAPI_FUNC(void *) PyCapsule_Import(const char *name, int no_block); -void _Py_init_capsule(void); +PyTypeObject *_Py_get_capsule_type(void); #ifdef __cplusplus } diff --git a/pypy/module/cpyext/include/pycobject.h b/pypy/module/cpyext/include/pycobject.h --- a/pypy/module/cpyext/include/pycobject.h +++ b/pypy/module/cpyext/include/pycobject.h @@ -48,7 +48,7 @@ } PyCObject; #endif -void _Py_init_pycobject(void); +PyTypeObject *_Py_get_cobject_type(void); #ifdef __cplusplus } diff --git a/pypy/module/pypyjit/test_pypy_c/test_misc.py b/pypy/module/pypyjit/test_pypy_c/test_misc.py --- a/pypy/module/pypyjit/test_pypy_c/test_misc.py +++ b/pypy/module/pypyjit/test_pypy_c/test_misc.py @@ -333,8 +333,8 @@ loop, = log.loops_by_id("struct") if sys.maxint == 2 ** 63 - 1: extra = """ - i8 = int_lt(i4, -2147483648) - guard_false(i8, descr=...) + i8 = int_ge(i4, -2147483648) + guard_true(i8, descr=...) """ else: extra = "" diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py --- a/pypy/objspace/std/dictmultiobject.py +++ b/pypy/objspace/std/dictmultiobject.py @@ -1,3 +1,10 @@ +"""The builtin dict implementation""" + +from rpython.rlib import jit, rerased +from rpython.rlib.debug import mark_dict_non_null +from rpython.rlib.objectmodel import newlist_hint, r_dict, specialize +from rpython.tool.sourcetools import func_renamer, func_with_new_name + from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import ( @@ -7,18 +14,10 @@ from pypy.objspace.std.stdtypedef import StdTypeDef from pypy.objspace.std.util import negate -from rpython.rlib import jit, rerased -from rpython.rlib.debug import mark_dict_non_null -from rpython.rlib.objectmodel import newlist_hint, r_dict, specialize -from rpython.tool.sourcetools import func_renamer, func_with_new_name - UNROLL_CUTOFF = 5 -def _is_str(space, w_key): - return space.is_w(space.type(w_key), space.w_str) - def _never_equal_to_string(space, w_lookup_type): """Handles the case of a non string key lookup. Types that have a sane hash/eq function should allow us to return True @@ -29,8 +28,8 @@ return (space.is_w(w_lookup_type, space.w_NoneType) or space.is_w(w_lookup_type, space.w_int) or space.is_w(w_lookup_type, space.w_bool) or - space.is_w(w_lookup_type, space.w_float) - ) + space.is_w(w_lookup_type, space.w_float)) + @specialize.call_location() def w_dict_unrolling_heuristic(w_dct): @@ -69,19 +68,18 @@ w_type = space.w_dict storage = strategy.get_empty_storage() - w_self = space.allocate_instance(W_DictMultiObject, w_type) - W_DictMultiObject.__init__(w_self, space, strategy, storage) - return w_self + w_obj = space.allocate_instance(W_DictMultiObject, w_type) + W_DictMultiObject.__init__(w_obj, space, strategy, storage) + return w_obj def __init__(self, space, strategy, storage): self.space = space self.strategy = strategy self.dstorage = storage - def __repr__(w_self): + def __repr__(self): """representation for debugging purposes""" - #print('XXXXXXX', w_self.dstorage) - return "%s(%s)" % (w_self.__class__.__name__, w_self.strategy) + return "%s(%s)" % (self.__class__.__name__, self.strategy) def unwrap(w_dict, space): result = {} @@ -98,9 +96,9 @@ return space.get_and_call_function(w_missing, w_dict, w_key) return None - def initialize_content(w_self, list_pairs_w): + def initialize_content(self, list_pairs_w): for w_k, w_v in list_pairs_w: - w_self.setitem(w_k, w_v) + self.setitem(w_k, w_v) def setitem_str(self, key, w_value): self.strategy.setitem_str(self, key, w_value) @@ -115,7 +113,8 @@ if w_fill is None: w_fill = space.w_None if space.is_w(w_type, space.w_dict): - w_dict = W_DictMultiObject.allocate_and_init_instance(space, w_type) + w_dict = W_DictMultiObject.allocate_and_init_instance(space, + w_type) byteslist = space.listview_bytes(w_keys) if byteslist is not None: @@ -312,8 +311,7 @@ try: w_key, w_value = self.popitem() except KeyError: - raise OperationError(space.w_KeyError, - space.wrap("popitem(): dictionary is empty")) + raise oefmt(space.w_KeyError, "popitem(): dictionary is empty") return space.newtuple([w_key, w_value]) @unwrap_spec(w_default=WrappedDefault(None)) @@ -597,6 +595,7 @@ def getiterkeys(self, w_dict): return iter([None]) getitervalues = getiterkeys + def getiteritems(self, w_dict): return iter([(None, None)]) @@ -615,8 +614,8 @@ space = self.space if self.len != self.dictimplementation.length(): self.len = -1 # Make this error state sticky - msg = "dictionary changed size during iteration" - raise OperationError(space.w_RuntimeError, space.wrap(msg)) + raise oefmt(space.w_RuntimeError, + "dictionary changed size during iteration") # look for the next entry if self.pos < self.len: @@ -635,14 +634,15 @@ w_value = self.dictimplementation.getitem(w_key) if w_value is None: self.len = -1 # Make this error state sticky - msg = "dictionary changed during iteration" - raise OperationError(space.w_RuntimeError, space.wrap(msg)) + raise oefmt(space.w_RuntimeError, + "dictionary changed during iteration") return (w_key, w_value) # no more entries self.dictimplementation = None return EMPTY return func_with_new_name(next, 'next_' + TP) + class BaseIteratorImplementation(object): def __init__(self, space, strategy, implementation): self.space = space @@ -665,13 +665,14 @@ class BaseItemIterator(BaseIteratorImplementation): next_item = _new_next('item') + def create_iterator_classes(dictimpl, override_next_item=None): if not hasattr(dictimpl, 'wrapkey'): - wrapkey = lambda space, key : key + wrapkey = lambda space, key: key else: wrapkey = dictimpl.wrapkey.im_func if not hasattr(dictimpl, 'wrapvalue'): - wrapvalue = lambda space, key : key + wrapvalue = lambda space, key: key else: wrapvalue = dictimpl.wrapvalue.im_func @@ -800,7 +801,8 @@ return w_dict.getitem(w_key) def w_keys(self, w_dict): - l = [self.wrap(key) for key in self.unerase(w_dict.dstorage).iterkeys()] + l = [self.wrap(key) + for key in self.unerase(w_dict.dstorage).iterkeys()] return self.space.newlist(l) def values(self, w_dict): @@ -1036,7 +1038,8 @@ def wrapkey(space, key): return space.wrap(key) - # XXX there is no space.newlist_int yet to implement w_keys more efficiently + # XXX there is no space.newlist_int yet to implement w_keys more + # efficiently create_iterator_classes(IntDictStrategy) @@ -1071,8 +1074,7 @@ for w_pair in data_w: pair = space.fixedview(w_pair) if len(pair) != 2: - raise OperationError(space.w_ValueError, - space.wrap("sequence of pairs expected")) + raise oefmt(space.w_ValueError, "sequence of pairs expected") w_key, w_value = pair w_dict.setitem(w_key, w_value) @@ -1128,9 +1130,9 @@ ignore_for_isinstance_cache = True - def __init__(w_self, space, iteratorimplementation): - w_self.space = space - w_self.iteratorimplementation = iteratorimplementation + def __init__(self, space, iteratorimplementation): + self.space = space + self.iteratorimplementation = iteratorimplementation def descr_iter(self, space): return self @@ -1158,9 +1160,8 @@ new_inst = mod.get('dictiter_surrogate_new') w_typeobj = space.type(self) - raise OperationError( - space.w_TypeError, - space.wrap("can't pickle dictionary-keyiterator objects")) + raise oefmt(space.w_TypeError, + "can't pickle dictionary-keyiterator objects") # XXXXXX get that working again # we cannot call __init__ since we don't have the original dict @@ -1174,8 +1175,8 @@ w_clone = space.allocate_instance(W_DictMultiIterItemsObject, w_typeobj) else: - msg = "unsupported dictiter type '%s' during pickling" % (self,) - raise OperationError(space.w_TypeError, space.wrap(msg)) + raise oefmt(space.w_TypeError, + "unsupported dictiter type '%R' during pickling", self) w_clone.space = space w_clone.content = self.content w_clone.len = self.len @@ -1244,8 +1245,8 @@ # Views class W_DictViewObject(W_Root): - def __init__(w_self, space, w_dict): - w_self.w_dict = w_dict + def __init__(self, space, w_dict): + self.w_dict = w_dict def descr_repr(self, space): w_seq = space.call_function(space.w_list, self) diff --git a/rpython/annotator/annrpython.py b/rpython/annotator/annrpython.py --- a/rpython/annotator/annrpython.py +++ b/rpython/annotator/annrpython.py @@ -582,18 +582,18 @@ def consider_op(self, block, opindex): op = block.operations[opindex] - argcells = [self.binding(a) for a in op.args] + try: + argcells = [self.binding(a) for a in op.args] - # let's be careful about avoiding propagated SomeImpossibleValues - # to enter an op; the latter can result in violations of the - # more general results invariant: e.g. if SomeImpossibleValue enters is_ - # is_(SomeImpossibleValue, None) -> SomeBool - # is_(SomeInstance(not None), None) -> SomeBool(const=False) ... - # boom -- in the assert of setbinding() - for arg in argcells: - if isinstance(arg, annmodel.SomeImpossibleValue): - raise BlockedInference(self, op, opindex) - try: + # let's be careful about avoiding propagated SomeImpossibleValues + # to enter an op; the latter can result in violations of the + # more general results invariant: e.g. if SomeImpossibleValue enters is_ + # is_(SomeImpossibleValue, None) -> SomeBool + # is_(SomeInstance(not None), None) -> SomeBool(const=False) ... + # boom -- in the assert of setbinding() + for arg in argcells: + if isinstance(arg, annmodel.SomeImpossibleValue): + raise BlockedInference(self, op, opindex) resultcell = op.consider(self, *argcells) except annmodel.AnnotatorError as e: # note that UnionError is a subclass graph = self.bookkeeper.position_key[0] diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -4139,6 +4139,16 @@ a.build_types(f, [str]) assert ("Cannot prove that the object is callable" in exc.value.msg) + def test_UnionError_on_PBC(self): + l = ['a', 1] + def f(x): + l.append(x) + a = self.RPythonAnnotator() + with py.test.raises(annmodel.UnionError) as excinfo: + a.build_types(f, [int]) + assert 'Happened at file' in excinfo.value.source + assert 'Known variable annotations:' in excinfo.value.source + def test_str_format_error(self): def f(s, x): return s.format(x) diff --git a/rpython/jit/codewriter/longlong.py b/rpython/jit/codewriter/longlong.py --- a/rpython/jit/codewriter/longlong.py +++ b/rpython/jit/codewriter/longlong.py @@ -26,6 +26,7 @@ getrealfloat = lambda x: x gethash = compute_hash gethash_fast = longlong2float.float2longlong + extract_bits = longlong2float.float2longlong is_longlong = lambda TYPE: False # ------------------------------------- @@ -42,6 +43,7 @@ getrealfloat = longlong2float.longlong2float gethash = lambda xll: rarithmetic.intmask(xll - (xll >> 32)) gethash_fast = gethash + extract_bits = lambda x: x is_longlong = lambda TYPE: (TYPE is lltype.SignedLongLong or TYPE is lltype.UnsignedLongLong) diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -275,7 +275,12 @@ def same_constant(self, other): if isinstance(other, ConstFloat): - return self.value == other.value + # careful in this comparison: if self.value and other.value + # are both NaN, stored as regular floats (i.e. on 64-bit), + # then just using "==" would say False: two NaNs are always + # different from each other. + return (longlong.extract_bits(self.value) == + longlong.extract_bits(other.value)) return False def nonnull(self): diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -594,11 +594,9 @@ if tobox is not None: # sanity check: see whether the current struct value # corresponds to what the cache thinks the value is - # XXX pypy with the following check fails on micronumpy, - # XXX investigate - #resbox = executor.execute(self.metainterp.cpu, self.metainterp, - # rop.GETFIELD_GC, fielddescr, box) - #assert resbox.constbox().same_constant(tobox.constbox()) + resbox = executor.execute(self.metainterp.cpu, self.metainterp, + rop.GETFIELD_GC, fielddescr, box) + assert resbox.constbox().same_constant(tobox.constbox()) return tobox resbox = self.execute_with_descr(opnum, fielddescr, box) self.metainterp.heapcache.getfield_now_known(box, fielddescr, resbox) diff --git a/rpython/jit/metainterp/test/test_history.py b/rpython/jit/metainterp/test/test_history.py --- a/rpython/jit/metainterp/test/test_history.py +++ b/rpython/jit/metainterp/test/test_history.py @@ -1,5 +1,8 @@ from rpython.jit.metainterp.history import * from rpython.rtyper.lltypesystem import lltype, llmemory, rffi +from rpython.rlib.rfloat import NAN, INFINITY +from rpython.jit.codewriter import longlong +from rpython.translator.c.test.test_standalone import StandaloneTests def test_repr(): @@ -38,3 +41,36 @@ assert not c3a.same_constant(c1b) assert not c3a.same_constant(c2b) assert c3a.same_constant(c3b) + +def test_same_constant_float(): + c1 = Const._new(12.34) + c2 = Const._new(12.34) + c3 = Const._new(NAN) + c4 = Const._new(NAN) + c5 = Const._new(INFINITY) + c6 = Const._new(INFINITY) + assert c1.same_constant(c2) + assert c3.same_constant(c4) + assert c5.same_constant(c6) + assert not c1.same_constant(c4) + assert not c1.same_constant(c6) + assert not c3.same_constant(c2) + assert not c3.same_constant(c6) + assert not c5.same_constant(c2) + assert not c5.same_constant(c4) + + +class TestZTranslated(StandaloneTests): + def test_ztranslated_same_constant_float(self): + def fn(args): + n = INFINITY + c1 = ConstFloat(longlong.getfloatstorage(n - INFINITY)) + c2 = ConstFloat(longlong.getfloatstorage(n - INFINITY)) + c3 = ConstFloat(longlong.getfloatstorage(12.34)) + if c1.same_constant(c2): + print "ok!" + return 0 + + t, cbuilder = self.compile(fn) + data = cbuilder.cmdexec('') + assert "ok!\n" in data diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py --- a/rpython/memory/gc/incminimark.py +++ b/rpython/memory/gc/incminimark.py @@ -1837,6 +1837,11 @@ # if self.objects_with_finalizers.non_empty(): self.deal_with_objects_with_finalizers() + elif self.old_objects_with_weakrefs.non_empty(): + # Weakref support: clear the weak pointers to dying objects + # (if we call deal_with_objects_with_finalizers(), it will + # invoke invalidate_old_weakrefs() itself directly) + self.invalidate_old_weakrefs() ll_assert(not self.objects_to_trace.non_empty(), "objects_to_trace should be empty") @@ -1846,9 +1851,7 @@ self.more_objects_to_trace.delete() # - # Weakref support: clear the weak pointers to dying objects - if self.old_objects_with_weakrefs.non_empty(): - self.invalidate_old_weakrefs() + # Light finalizers if self.old_objects_with_light_finalizers.non_empty(): self.deal_with_old_objects_with_finalizers() #objects_to_trace processed fully, can move on to sweeping @@ -2206,6 +2209,12 @@ self._recursively_bump_finalization_state_from_2_to_3(y) self._recursively_bump_finalization_state_from_1_to_2(x) + # Clear the weak pointers to dying objects. Also clears them if + # they point to objects which have the GCFLAG_FINALIZATION_ORDERING + # bit set here. These are objects which will be added to + # run_finalizers(). + self.invalidate_old_weakrefs() + while marked.non_empty(): x = marked.popleft() state = self._finalization_state(x) @@ -2333,7 +2342,9 @@ ll_assert((self.header(pointing_to).tid & GCFLAG_NO_HEAP_PTRS) == 0, "registered old weakref should not " "point to a NO_HEAP_PTRS obj") - if self.header(pointing_to).tid & GCFLAG_VISITED: + tid = self.header(pointing_to).tid + if ((tid & (GCFLAG_VISITED | GCFLAG_FINALIZATION_ORDERING)) == + GCFLAG_VISITED): new_with_weakref.append(obj) else: (obj + offset).address[0] = llmemory.NULL diff --git a/rpython/memory/test/gc_test_base.py b/rpython/memory/test/gc_test_base.py --- a/rpython/memory/test/gc_test_base.py +++ b/rpython/memory/test/gc_test_base.py @@ -29,6 +29,7 @@ GC_CAN_SHRINK_ARRAY = False GC_CAN_SHRINK_BIG_ARRAY = False BUT_HOW_BIG_IS_A_BIG_STRING = 3*WORD + WREF_IS_INVALID_BEFORE_DEL_IS_CALLED = False def setup_class(cls): cls._saved_logstate = py.log._getstate() @@ -370,15 +371,23 @@ class A(object): count = 0 a = A() + expected_invalid = self.WREF_IS_INVALID_BEFORE_DEL_IS_CALLED class B(object): def __del__(self): # when __del__ is called, the weakref to myself is still valid - # in RPython (at least with most GCs; this test might be - # skipped for specific GCs) - if self.ref() is self: - a.count += 10 # ok + # in RPython with most GCs. However, this can lead to strange + # bugs with incminimark. https://bugs.pypy.org/issue1687 + # So with incminimark, we expect the opposite. + if expected_invalid: + if self.ref() is None: + a.count += 10 # ok + else: + a.count = 666 # not ok else: - a.count = 666 # not ok + if self.ref() is self: + a.count += 10 # ok + else: + a.count = 666 # not ok def g(): b = B() ref = weakref.ref(b) diff --git a/rpython/memory/test/test_incminimark_gc.py b/rpython/memory/test/test_incminimark_gc.py --- a/rpython/memory/test/test_incminimark_gc.py +++ b/rpython/memory/test/test_incminimark_gc.py @@ -1,6 +1,38 @@ -from rpython.rlib.rarithmetic import LONG_BIT +from rpython.rtyper.lltypesystem import lltype +from rpython.rtyper.lltypesystem.lloperation import llop from rpython.memory.test import test_minimark_gc class TestIncrementalMiniMarkGC(test_minimark_gc.TestMiniMarkGC): from rpython.memory.gc.incminimark import IncrementalMiniMarkGC as GCClass + WREF_IS_INVALID_BEFORE_DEL_IS_CALLED = True + + def test_weakref_not_in_stack(self): + import weakref + class A(object): + pass + class B(object): + def __init__(self, next): + self.next = next + def g(): + a = A() + a.x = 5 + wr = weakref.ref(a) + llop.gc__collect(lltype.Void) # make everything old + assert wr() is not None + assert a.x == 5 + return wr + def f(): + ref = g() + llop.gc__collect(lltype.Void, 1) # start a major cycle + # at this point the stack is scanned, and the weakref points + # to an object not found, but still reachable: + b = ref() + llop.debug_print(lltype.Void, b) + assert b is not None + llop.gc__collect(lltype.Void) # finish the major cycle + # assert does not crash, because 'b' is still kept alive + b.x = 42 + return ref() is b + res = self.interpret(f, []) + assert res == True diff --git a/rpython/rlib/objectmodel.py b/rpython/rlib/objectmodel.py --- a/rpython/rlib/objectmodel.py +++ b/rpython/rlib/objectmodel.py @@ -366,6 +366,9 @@ def compute_result_annotation(self, s_l, s_sizehint): from rpython.annotator import model as annmodel + if annmodel.s_None.contains(s_l): + return # first argument is only None so far, but we + # expect a generalization later if not isinstance(s_l, annmodel.SomeList): raise annmodel.AnnotatorError("First argument must be a list") if not isinstance(s_sizehint, annmodel.SomeInteger): diff --git a/rpython/rlib/rfile.py b/rpython/rlib/rfile.py --- a/rpython/rlib/rfile.py +++ b/rpython/rlib/rfile.py @@ -29,9 +29,9 @@ OFF_T = CC['off_t'] c_open = llexternal('fopen', [rffi.CCHARP, rffi.CCHARP], lltype.Ptr(FILE)) c_close = llexternal('fclose', [lltype.Ptr(FILE)], rffi.INT) -c_write = llexternal('fwrite', [rffi.CCHARP, rffi.SIZE_T, rffi.SIZE_T, +c_fwrite = llexternal('fwrite', [rffi.CCHARP, rffi.SIZE_T, rffi.SIZE_T, lltype.Ptr(FILE)], rffi.SIZE_T) -c_read = llexternal('fread', [rffi.CCHARP, rffi.SIZE_T, rffi.SIZE_T, +c_fread = llexternal('fread', [rffi.CCHARP, rffi.SIZE_T, rffi.SIZE_T, lltype.Ptr(FILE)], rffi.SIZE_T) c_feof = llexternal('feof', [lltype.Ptr(FILE)], rffi.INT) c_ferror = llexternal('ferror', [lltype.Ptr(FILE)], rffi.INT) @@ -40,13 +40,16 @@ rffi.INT) c_tmpfile = llexternal('tmpfile', [], lltype.Ptr(FILE)) c_fileno = llexternal('fileno', [lltype.Ptr(FILE)], rffi.INT) -c_ftell = llexternal('ftell', [lltype.Ptr(FILE)], lltype.Signed) +c_ftell = llexternal('ftell', [lltype.Ptr(FILE)], rffi.LONG) c_fflush = llexternal('fflush', [lltype.Ptr(FILE)], rffi.INT) c_ftruncate = llexternal('ftruncate', [rffi.INT, OFF_T], rffi.INT, macro=True) c_fgets = llexternal('fgets', [rffi.CCHARP, rffi.INT, lltype.Ptr(FILE)], rffi.CCHARP) +c_popen = llexternal('popen', [rffi.CCHARP, rffi.CCHARP], lltype.Ptr(FILE)) +c_pclose = llexternal('pclose', [lltype.Ptr(FILE)], rffi.INT) + BASE_BUF_SIZE = 4096 BASE_LINE_SIZE = 100 @@ -75,6 +78,21 @@ raise OSError(errno, os.strerror(errno)) return RFile(res) +def create_popen_file(command, type): + ll_command = rffi.str2charp(command) + try: + ll_type = rffi.str2charp(type) + try: + ll_f = c_popen(ll_command, ll_type) + if not ll_f: + errno = rposix.get_errno() + raise OSError(errno, os.strerror(errno)) + finally: + lltype.free(ll_type, flavor='raw') + finally: + lltype.free(ll_command, flavor='raw') + return RPopenFile(ll_f) + class RFile(object): def __init__(self, ll_file): self.ll_file = ll_file @@ -89,30 +107,26 @@ try: # note that since we got a nonmoving buffer, it is either raw # or already cannot move, so the arithmetics below are fine - total_bytes = 0 - ll_current = ll_value - while total_bytes < len(value): - bytes = c_write(ll_current, 1, len(value) - r_uint(total_bytes), - ll_file) - if bytes == 0: - errno = rposix.get_errno() - raise OSError(errno, os.strerror(errno)) - total_bytes += bytes - ll_current = rffi.cast(rffi.CCHARP, - rffi.cast(lltype.Unsigned, ll_value) + - total_bytes) + length = len(value) + bytes = c_fwrite(ll_value, 1, length, ll_file) + if bytes != length: + errno = rposix.get_errno() + raise OSError(errno, os.strerror(errno)) finally: rffi.free_nonmovingbuffer(value, ll_value) def close(self): - if self.ll_file: + ll_f = self.ll_file + if ll_f: # double close is allowed - res = c_close(self.ll_file) self.ll_file = lltype.nullptr(FILE) + res = self._do_close(ll_f) if res == -1: errno = rposix.get_errno() raise OSError(errno, os.strerror(errno)) + _do_close = staticmethod(c_close) # overridden in RPopenFile + def read(self, size=-1): # XXX CPython uses a more delicate logic here ll_file = self.ll_file @@ -124,27 +138,25 @@ try: s = StringBuilder() while True: - returned_size = c_read(buf, 1, BASE_BUF_SIZE, ll_file) + returned_size = c_fread(buf, 1, BASE_BUF_SIZE, ll_file) + returned_size = intmask(returned_size) # is between 0 and BASE_BUF_SIZE if returned_size == 0: if c_feof(ll_file): # ok, finished return s.build() - errno = c_ferror(ll_file) - c_clearerror(ll_file) - raise OSError(errno, os.strerror(errno)) + raise _error(ll_file) s.append_charpsize(buf, returned_size) finally: lltype.free(buf, flavor='raw') else: raw_buf, gc_buf = rffi.alloc_buffer(size) try: - returned_size = c_read(raw_buf, 1, size, ll_file) + returned_size = c_fread(raw_buf, 1, size, ll_file) + returned_size = intmask(returned_size) # is between 0 and size if returned_size == 0: if not c_feof(ll_file): - errno = c_ferror(ll_file) - raise OSError(errno, os.strerror(errno)) - s = rffi.str_from_buffer(raw_buf, gc_buf, size, - rffi.cast(lltype.Signed, returned_size)) + raise _error(ll_file) + s = rffi.str_from_buffer(raw_buf, gc_buf, size, returned_size) finally: rffi.keep_buffer_alive_until_here(raw_buf, gc_buf) return s @@ -200,8 +212,7 @@ if not result: if c_feof(self.ll_file): # ok return 0 - errno = c_ferror(self.ll_file) - raise OSError(errno, os.strerror(errno)) + raise _error(self.ll_file) # # Assume that fgets() works as documented, and additionally # never writes beyond the final \0, which the CPython @@ -240,3 +251,13 @@ finally: rffi.keep_buffer_alive_until_here(raw_buf, gc_buf) raise ValueError("I/O operation on closed file") + + +class RPopenFile(RFile): + _do_close = staticmethod(c_pclose) + + +def _error(ll_file): + errno = c_ferror(ll_file) + c_clearerror(ll_file) + raise OSError(errno, os.strerror(errno)) diff --git a/rpython/rlib/rgc.py b/rpython/rlib/rgc.py --- a/rpython/rlib/rgc.py +++ b/rpython/rlib/rgc.py @@ -15,7 +15,6 @@ def set_max_heap_size(nbytes): """Limit the heap size to n bytes. - So far only implemented by the Boehm GC and the semispace/generation GCs. """ pass diff --git a/rpython/rlib/test/test_rfile.py b/rpython/rlib/test/test_rfile.py --- a/rpython/rlib/test/test_rfile.py +++ b/rpython/rlib/test/test_rfile.py @@ -1,5 +1,5 @@ -import os +import os, sys, py from rpython.rtyper.test.tool import BaseRtypingTest from rpython.tool.udir import udir from rpython.rlib import rfile @@ -142,6 +142,15 @@ cls.tmpdir = udir.join('test_rfile_direct') cls.tmpdir.ensure(dir=True) + def test_read_a_lot(self): + fname = str(self.tmpdir.join('file_read_a_lot')) + with open(fname, 'w') as f: + f.write('dupa' * 999) + f = rfile.create_file(fname, 'r') + s = f.read() + assert s == 'dupa' * 999 + f.close() + def test_readline(self): fname = str(self.tmpdir.join('file_readline')) j = 0 @@ -175,3 +184,15 @@ got = f.readline() assert got == '' f.close() + + +class TestPopen: + def setup_class(cls): + if sys.platform == 'win32': + py.test.skip("not for win32") + + def test_popen(self): + f = rfile.create_popen_file("python -c 'print 42'", "r") + s = f.read() + f.close() + assert s == '42\n' diff --git a/rpython/rlib/test/test_rpoll.py b/rpython/rlib/test/test_rpoll.py --- a/rpython/rlib/test/test_rpoll.py +++ b/rpython/rlib/test/test_rpoll.py @@ -1,5 +1,6 @@ import os import errno +import py from rpython.rlib.rsocket import * from rpython.rlib.rpoll import * @@ -55,6 +56,8 @@ serv.close() def test_select(): + if os.name == 'nt': + py.test.skip('cannot select on file handles on windows') def f(): readend, writeend = os.pipe() try: @@ -72,6 +75,8 @@ interpret(f, []) def test_select_timeout(): + if os.name == 'nt': + py.test.skip('cannot select on file handles on windows') from time import time def f(): # once there was a bug where the sleeping time was doubled diff --git a/rpython/tool/ansi_mandelbrot.py b/rpython/tool/ansi_mandelbrot.py --- a/rpython/tool/ansi_mandelbrot.py +++ b/rpython/tool/ansi_mandelbrot.py @@ -14,8 +14,12 @@ """ -palette = [39, 34, 35, 36, 31, 33, 32, 37] - +import os +if os.environ.get('TERM', 'dumb').find('256') > 0: + from ansiramp import ansi_ramp80 + palette = map(lambda x: "38;5;%d" % x, ansi_ramp80) +else: + palette = [39, 34, 35, 36, 31, 33, 32, 37] colour_range = None # used for debugging diff --git a/rpython/tool/ansiramp.py b/rpython/tool/ansiramp.py new file mode 100755 --- /dev/null +++ b/rpython/tool/ansiramp.py @@ -0,0 +1,29 @@ +#! /usr/bin/env python +import colorsys + +def hsv2ansi(h, s, v): + # h: 0..1, s/v: 0..1 + if s < 0.1: + return int(v * 23) + 232 + r, g, b = map(lambda x: int(x * 5), colorsys.hsv_to_rgb(h, s, v)) + return 16 + (r * 36) + (g * 6) + b + +def ramp_idx(i, num): + assert num > 0 + i0 = float(i) / num + h = 0.57 + i0 + s = 1 - pow(i0,3) + v = 1 + return hsv2ansi(h, s, v) + +def ansi_ramp(num): + return [ramp_idx(i, num) for i in range(num)] + +ansi_ramp80 = ansi_ramp(80) + +if __name__ == '__main__': + import sys + from py.io import ansi_print + colors = int(sys.argv[1]) if len(sys.argv) > 1 else 80 + for col in range(colors): + ansi_print('#', "38;5;%d" % ramp_idx(col, colors), newline=False, flush=True) diff --git a/rpython/tool/sourcetools.py b/rpython/tool/sourcetools.py --- a/rpython/tool/sourcetools.py +++ b/rpython/tool/sourcetools.py @@ -6,7 +6,6 @@ # XXX We should try to generalize and single out one approach to dynamic # XXX code compilation. -import types import sys, os, inspect, new import py @@ -296,40 +295,3 @@ result.func_defaults = f.func_defaults result.func_dict.update(f.func_dict) return result - - -def _convert_const_maybe(x, encoding): - if isinstance(x, str): - return x.decode(encoding) - elif isinstance(x, tuple): - items = [_convert_const_maybe(item, encoding) for item in x] - return tuple(items) - return x - -def with_unicode_literals(fn=None, **kwds): - """Decorator that replace all string literals with unicode literals. - Similar to 'from __future__ import string literals' at function level. - Useful to limit changes in the py3k branch. - """ - encoding = kwds.pop('encoding', 'ascii') - if kwds: - raise TypeError("Unexpected keyword argument(s): %s" % ', '.join(kwds.keys())) - def decorator(fn): - co = fn.func_code - new_consts = [] - for const in co.co_consts: - new_consts.append(_convert_const_maybe(const, encoding)) - new_consts = tuple(new_consts) - new_code = types.CodeType(co.co_argcount, co.co_nlocals, co.co_stacksize, - co.co_flags, co.co_code, new_consts, co.co_names, - co.co_varnames, co.co_filename, co.co_name, - co.co_firstlineno, co.co_lnotab) - fn.func_code = new_code - return fn - # - # support the usage of @with_unicode_literals instead of @with_unicode_literals() - if fn is not None: - assert type(fn) is types.FunctionType - return decorator(fn) - else: - return decorator diff --git a/rpython/tool/test/test_sourcetools.py b/rpython/tool/test/test_sourcetools.py --- a/rpython/tool/test/test_sourcetools.py +++ b/rpython/tool/test/test_sourcetools.py @@ -1,7 +1,5 @@ -# -*- encoding: utf-8 -*- -import py from rpython.tool.sourcetools import ( - func_with_new_name, func_renamer, rpython_wrapper, with_unicode_literals) + func_renamer, func_with_new_name, rpython_wrapper) def test_rename(): def f(x, y=5): @@ -57,30 +55,3 @@ ('decorated', 40, 2), ('bar', 40, 2), ] - - -def test_with_unicode_literals(): - @with_unicode_literals() - def foo(): - return 'hello' - assert type(foo()) is unicode - # - @with_unicode_literals - def foo(): - return 'hello' - assert type(foo()) is unicode - # - def foo(): - return 'hello àèì' - py.test.raises(UnicodeDecodeError, "with_unicode_literals(foo)") - # - @with_unicode_literals(encoding='utf-8') - def foo(): - return 'hello àèì' - assert foo() == u'hello àèì' - # - @with_unicode_literals - def foo(): - return ('a', 'b') - assert type(foo()[0]) is unicode - diff --git a/rpython/translator/c/dlltool.py b/rpython/translator/c/dlltool.py --- a/rpython/translator/c/dlltool.py +++ b/rpython/translator/c/dlltool.py @@ -21,7 +21,8 @@ entrypoints.append(getfunctionptr(graph)) return entrypoints - def gen_makefile(self, targetdir, exe_name=None): + def gen_makefile(self, targetdir, exe_name=None, + headers_to_precompile=[]): pass # XXX finish def compile(self): @@ -30,6 +31,8 @@ extsymeci = ExternalCompilationInfo(export_symbols=export_symbols) self.eci = self.eci.merge(extsymeci) files = [self.c_source_filename] + self.extrafiles + files += self.eventually_copy(self.eci.separate_module_files) + self.eci.separate_module_files = () oname = self.name self.so_name = self.translator.platform.compile(files, self.eci, standalone=False, diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -260,12 +260,13 @@ defines['PYPY_MAIN_FUNCTION'] = "pypy_main_startup" self.eci = self.eci.merge(ExternalCompilationInfo( export_symbols=["pypy_main_startup", "pypy_debug_file"])) - self.eci, cfile, extra = gen_source(db, modulename, targetdir, - self.eci, defines=defines, - split=self.split) + self.eci, cfile, extra, headers_to_precompile = \ + gen_source(db, modulename, targetdir, + self.eci, defines=defines, split=self.split) self.c_source_filename = py.path.local(cfile) self.extrafiles = self.eventually_copy(extra) - self.gen_makefile(targetdir, exe_name=exe_name) + self.gen_makefile(targetdir, exe_name=exe_name, + headers_to_precompile=headers_to_precompile) return cfile def eventually_copy(self, cfiles): @@ -375,18 +376,22 @@ self._compiled = True return self.executable_name - def gen_makefile(self, targetdir, exe_name=None): - cfiles = [self.c_source_filename] + self.extrafiles + def gen_makefile(self, targetdir, exe_name=None, headers_to_precompile=[]): + module_files = self.eventually_copy(self.eci.separate_module_files) + self.eci.separate_module_files = [] + cfiles = [self.c_source_filename] + self.extrafiles + list(module_files) if exe_name is not None: exe_name = targetdir.join(exe_name) mk = self.translator.platform.gen_makefile( cfiles, self.eci, path=targetdir, exe_name=exe_name, + headers_to_precompile=headers_to_precompile, + no_precompile_cfiles = module_files, shared=self.config.translation.shared) if self.has_profopt(): profopt = self.config.translation.profopt - mk.definition('ABS_TARGET', '$(shell python -c "import sys,os; print os.path.abspath(sys.argv[1])" $(TARGET))') + mk.definition('ABS_TARGET', str(targetdir.join('$(TARGET)'))) mk.definition('DEFAULT_TARGET', 'profopt') mk.definition('PROFOPT', profopt) @@ -397,7 +402,7 @@ ('debug_exc', '', '$(MAKE) CFLAGS="$(DEBUGFLAGS) -DRPY_ASSERT -DDO_LOG_EXC" debug_target'), ('debug_mem', '', '$(MAKE) CFLAGS="$(DEBUGFLAGS) -DRPY_ASSERT -DPYPY_USE_TRIVIAL_MALLOC" debug_target'), ('no_obmalloc', '', '$(MAKE) CFLAGS="-g -O2 -DRPY_ASSERT -DPYPY_NO_OBMALLOC" $(TARGET)'), - ('linuxmemchk', '', '$(MAKE) CFLAGS="$(DEBUGFLAGS) -DRPY_ASSERT -DPPY_USE_LINUXMEMCHK" debug_target'), + ('linuxmemchk', '', '$(MAKE) CFLAGS="$(DEBUGFLAGS) -DRPY_ASSERT -DPYPY_USE_LINUXMEMCHK" debug_target'), ('llsafer', '', '$(MAKE) CFLAGS="-O2 -DRPY_LL_ASSERT" $(TARGET)'), ('lldebug', '', '$(MAKE) CFLAGS="$(DEBUGFLAGS) -DRPY_ASSERT -DRPY_LL_ASSERT" debug_target'), ('lldebug0','', '$(MAKE) CFLAGS="-O0 $(DEBUGFLAGS) -DRPY_ASSERT -DRPY_LL_ASSERT" debug_target'), @@ -427,8 +432,8 @@ mk.definition('ASMFILES', sfiles) mk.definition('ASMLBLFILES', lblsfiles) mk.definition('GCMAPFILES', gcmapfiles) - if sys.platform == 'win32': - mk.definition('DEBUGFLAGS', '/MD /Zi') + if self.translator.platform.name == 'msvc': + mk.definition('DEBUGFLAGS', '-MD -Zi') else: if self.config.translation.shared: mk.definition('DEBUGFLAGS', '-O2 -fomit-frame-pointer -g -fPIC') @@ -484,11 +489,11 @@ mk.rule('.PRECIOUS', '%.s', "# don't remove .s files if Ctrl-C'ed") else: - if sys.platform == 'win32': - mk.definition('DEBUGFLAGS', '/MD /Zi') + if self.translator.platform.name == 'msvc': + mk.definition('DEBUGFLAGS', '-MD -Zi') else: mk.definition('DEBUGFLAGS', '-O1 -g') - if sys.platform == 'win32': + if self.translator.platform.name == 'msvc': mk.rule('debug_target', 'debugmode_$(DEFAULT_TARGET)', 'rem') else: mk.rule('debug_target', '$(TARGET)', '#') @@ -511,6 +516,7 @@ def __init__(self, database): self.database = database self.extrafiles = [] + self.headers_to_precompile = [] self.path = None self.namespace = NameManager() @@ -539,6 +545,8 @@ filepath = self.path.join(name) if name.endswith('.c'): self.extrafiles.append(filepath) + if name.endswith('.h'): + self.headers_to_precompile.append(filepath) return filepath.open('w') def getextrafiles(self): @@ -686,11 +694,11 @@ print >> fc, '/***********************************************************/' print >> fc, '/*** Implementations ***/' print >> fc - print >> fc, '#define PYPY_FILE_NAME "%s"' % name print >> fc, '#include "common_header.h"' print >> fc, '#include "structdef.h"' print >> fc, '#include "forwarddecl.h"' print >> fc, '#include "preimpl.h"' + print >> fc, '#define PYPY_FILE_NAME "%s"' % name print >> fc, '#include "src/g_include.h"' print >> fc print >> fc, MARKER @@ -732,12 +740,14 @@ print >> f, "#endif" def gen_preimpl(f, database): + f.write('#ifndef _PY_PREIMPLE_H\n#define _PY_PREIMPL_H\n') if database.translator is None or database.translator.rtyper is None: return preimplementationlines = pre_include_code_lines( database, database.translator.rtyper) for line in preimplementationlines: print >> f, line + f.write('#endif /* _PY_PREIMPL_H */\n') def gen_startupcode(f, database): # generate the start-up code and put it into a function @@ -799,6 +809,7 @@ f = filename.open('w') incfilename = targetdir.join('common_header.h') fi = incfilename.open('w') + fi.write('#ifndef _PY_COMMON_HEADER_H\n#define _PY_COMMON_HEADER_H\n') # # Header @@ -811,6 +822,7 @@ eci.write_c_header(fi) print >> fi, '#include "src/g_prerequisite.h"' + fi.write('#endif /* _PY_COMMON_HEADER_H*/\n') fi.close() @@ -822,6 +834,8 @@ sg.set_strategy(targetdir, split) database.prepare_inline_helpers() sg.gen_readable_parts_of_source(f) + headers_to_precompile = sg.headers_to_precompile[:] + headers_to_precompile.insert(0, incfilename) gen_startupcode(f, database) f.close() @@ -834,5 +848,4 @@ eci = add_extra_files(eci) eci = eci.convert_sources_to_files() - files, eci = eci.get_module_files() - return eci, filename, sg.getextrafiles() + list(files) + return eci, filename, sg.getextrafiles(), headers_to_precompile diff --git a/rpython/translator/c/test/test_extfunc.py b/rpython/translator/c/test/test_extfunc.py --- a/rpython/translator/c/test/test_extfunc.py +++ b/rpython/translator/c/test/test_extfunc.py @@ -65,7 +65,8 @@ f1 = compile(does_stuff, []) f1() - assert open(filename, 'r').read() == "hello world\n" + with open(filename, 'r') as fid: + assert fid.read() == "hello world\n" os.unlink(filename) def test_big_read(): @@ -296,8 +297,10 @@ os.chdir(path) return os.getcwd() f1 = compile(does_stuff, [str]) - # different on windows please - assert f1('/tmp') == os.path.realpath('/tmp') + if os.name == 'nt': + assert f1(os.environment['TEMP']) == os.path.realpath(os.environment['TEMP']) + else: + assert f1('/tmp') == os.path.realpath('/tmp') def test_mkdir_rmdir(): def does_stuff(path, delete): diff --git a/rpython/translator/c/test/test_newgc.py b/rpython/translator/c/test/test_newgc.py --- a/rpython/translator/c/test/test_newgc.py +++ b/rpython/translator/c/test/test_newgc.py @@ -658,7 +658,8 @@ def test_open_read_write_seek_close(self): self.run('open_read_write_seek_close') - assert open(self.filename, 'r').read() == "hello world\n" + with open(self.filename, 'r') as fid: + assert fid.read() == "hello world\n" os.unlink(self.filename) def define_callback_with_collect(cls): diff --git a/rpython/translator/platform/__init__.py b/rpython/translator/platform/__init__.py --- a/rpython/translator/platform/__init__.py +++ b/rpython/translator/platform/__init__.py @@ -100,7 +100,8 @@ return ExecutionResult(returncode, stdout, stderr) def gen_makefile(self, cfiles, eci, exe_name=None, path=None, - shared=False): + shared=False, headers_to_precompile=[], + no_precompile_cfiles = []): raise NotImplementedError("Pure abstract baseclass") def __repr__(self): diff --git a/rpython/translator/platform/darwin.py b/rpython/translator/platform/darwin.py --- a/rpython/translator/platform/darwin.py +++ b/rpython/translator/platform/darwin.py @@ -50,14 +50,17 @@ return ["-Wl,-exported_symbols_list,%s" % (response_file,)] def gen_makefile(self, cfiles, eci, exe_name=None, path=None, - shared=False): + shared=False, headers_to_precompile=[], + no_precompile_cfiles = []): # ensure frameworks are passed in the Makefile fs = self._frameworks(eci.frameworks) if len(fs) > 0: # concat (-framework, FrameworkName) pairs self.extra_libs += tuple(map(" ".join, zip(fs[::2], fs[1::2]))) mk = super(Darwin, self).gen_makefile(cfiles, eci, exe_name, path, - shared) + shared=shared, + headers_to_precompile=headers_to_precompile, + no_precompile_cfiles = no_precompile_cfiles) return mk diff --git a/rpython/translator/platform/posix.py b/rpython/translator/platform/posix.py --- a/rpython/translator/platform/posix.py +++ b/rpython/translator/platform/posix.py @@ -83,7 +83,8 @@ return [entry[2:] for entry in out.split()] def gen_makefile(self, cfiles, eci, exe_name=None, path=None, - shared=False): + shared=False, headers_to_precompile=[], + no_precompile_cfiles = []): cfiles = self._all_cfiles(cfiles, eci) if path is None: diff --git a/rpython/translator/platform/test/test_distutils.py b/rpython/translator/platform/test/test_distutils.py --- a/rpython/translator/platform/test/test_distutils.py +++ b/rpython/translator/platform/test/test_distutils.py @@ -11,3 +11,7 @@ def test_900_files(self): py.test.skip('Makefiles not suppoerted') + + def test_precompiled_headers(self): + py.test.skip('Makefiles not suppoerted') + diff --git a/rpython/translator/platform/test/test_makefile.py b/rpython/translator/platform/test/test_makefile.py --- a/rpython/translator/platform/test/test_makefile.py +++ b/rpython/translator/platform/test/test_makefile.py @@ -1,7 +1,10 @@ from rpython.translator.platform.posix import GnuMakefile as Makefile +from rpython.translator.platform import host +from rpython.tool.udir import udir +from rpython.translator.tool.cbuild import ExternalCompilationInfo from StringIO import StringIO -import re +import re, sys, py def test_simple_makefile(): m = Makefile() @@ -29,3 +32,112 @@ val = s.getvalue() assert not re.search('CC += +xxx', val, re.M) assert re.search('CC += +yyy', val, re.M) + +class TestMakefile(object): + platform = host + strict_on_stderr = True + + def check_res(self, res, expected='42\n'): + assert res.out == expected + if self.strict_on_stderr: + assert res.err == '' + assert res.returncode == 0 + + def test_900_files(self): + txt = '#include \n' + for i in range(900): + txt += 'int func%03d();\n' % i + txt += 'int main() {\n int j=0;' + for i in range(900): + txt += ' j += func%03d();\n' % i + txt += ' printf("%d\\n", j);\n' + txt += ' return 0;};\n' + cfile = udir.join('test_900_files.c') + cfile.write(txt) + cfiles = [cfile] + for i in range(900): + cfile2 = udir.join('implement%03d.c' %i) + cfile2.write(''' + int func%03d() + { + return %d; + } + ''' % (i, i)) + cfiles.append(cfile2) + mk = self.platform.gen_makefile(cfiles, ExternalCompilationInfo(), path=udir) + mk.write() + self.platform.execute_makefile(mk) + res = self.platform.execute(udir.join('test_900_files')) + self.check_res(res, '%d\n' %sum(range(900))) + + def test_precompiled_headers(self): + if self.platform.cc != 'cl.exe': + py.test.skip("Only MSVC profits from precompiled headers") + import time + tmpdir = udir.join('precompiled_headers').ensure(dir=1) + # Create an eci that should not use precompiled headers + eci = ExternalCompilationInfo(include_dirs=[tmpdir]) + main_c = tmpdir.join('main_no_pch.c') + eci.separate_module_files = [main_c] + ncfiles = 10 + nprecompiled_headers = 20 + txt = '' + for i in range(ncfiles): + txt += "int func%03d();\n" % i + txt += "\nint main(int argc, char * argv[])\n" + txt += "{\n int i=0;\n" + for i in range(ncfiles): + txt += " i += func%03d();\n" % i + txt += ' printf("%d\\n", i);\n' + txt += " return 0;\n};\n" + main_c.write(txt) + # Create some large headers with dummy functions to be precompiled + cfiles_precompiled_headers = [] + for i in range(nprecompiled_headers): + pch_name =tmpdir.join('pcheader%03d.h' % i) + txt = '#ifndef PCHEADER%03d_H\n#define PCHEADER%03d_H\n' %(i, i) + for j in range(3000): + txt += "int pcfunc%03d_%03d();\n" %(i, j) + txt += '#endif' + pch_name.write(txt) + cfiles_precompiled_headers.append(pch_name) + # Create some cfiles with headers we want precompiled + cfiles = [] + for i in range(ncfiles): + c_name =tmpdir.join('implement%03d.c' % i) + txt = '' + for pch_name in cfiles_precompiled_headers: + txt += '#include "%s"\n' % pch_name + txt += "int func%03d(){ return %d;};\n" % (i, i) + c_name.write(txt) + cfiles.append(c_name) + if sys.platform == 'win32': + clean = ('clean', '', 'for %f in ( $(OBJECTS) $(TARGET) ) do @if exist %f del /f %f') + get_time = time.clock + else: + clean = ('clean', '', 'rm -f $(OBJECTS) $(TARGET) ') + get_time = time.time + #write a non-precompiled header makefile + mk = self.platform.gen_makefile(cfiles, eci, path=tmpdir) + mk.rule(*clean) + mk.write() + t0 = get_time() + self.platform.execute_makefile(mk) + t1 = get_time() + t_normal = t1 - t0 + self.platform.execute_makefile(mk, extra_opts=['clean']) + # Write a super-duper makefile with precompiled headers + mk = self.platform.gen_makefile(cfiles, eci, path=tmpdir, + headers_to_precompile=cfiles_precompiled_headers,) + mk.rule(*clean) + mk.write() + t0 = get_time() + self.platform.execute_makefile(mk) + t1 = get_time() + t_precompiled = t1 - t0 + res = self.platform.execute(mk.exe_name) + self.check_res(res, '%d\n' %sum(range(ncfiles))) + print "precompiled haeder 'make' time %.2f, non-precompiled header time %.2f" %(t_precompiled, t_normal) + assert t_precompiled < t_normal * 0.5 + + diff --git a/rpython/translator/platform/test/test_platform.py b/rpython/translator/platform/test/test_platform.py --- a/rpython/translator/platform/test/test_platform.py +++ b/rpython/translator/platform/test/test_platform.py @@ -59,34 +59,6 @@ res = self.platform.execute(executable) self.check_res(res) - def test_900_files(self): - txt = '#include \n' - for i in range(900): - txt += 'int func%03d();\n' % i - txt += 'int main() {\n int j=0;' - for i in range(900): - txt += ' j += func%03d();\n' % i - txt += ' printf("%d\\n", j);\n' - txt += ' return 0;};\n' - cfile = udir.join('test_900_files.c') - cfile.write(txt) - cfiles = [cfile] - for i in range(900): - cfile2 = udir.join('implement%03d.c' %i) - cfile2.write(''' - int func%03d() - { - return %d; - } - ''' % (i, i)) - cfiles.append(cfile2) - mk = self.platform.gen_makefile(cfiles, ExternalCompilationInfo(), path=udir) - mk.write() - self.platform.execute_makefile(mk) - res = self.platform.execute(udir.join('test_900_files')) - self.check_res(res, '%d\n' %sum(range(900))) - - def test_nice_errors(self): cfile = udir.join('test_nice_errors.c') cfile.write('') diff --git a/rpython/translator/platform/windows.py b/rpython/translator/platform/windows.py --- a/rpython/translator/platform/windows.py +++ b/rpython/translator/platform/windows.py @@ -249,7 +249,8 @@ def gen_makefile(self, cfiles, eci, exe_name=None, path=None, - shared=False): + shared=False, headers_to_precompile=[], + no_precompile_cfiles = []): cfiles = self._all_cfiles(cfiles, eci) if path is None: @@ -313,20 +314,60 @@ ('CC_LINK', self.link), ('LINKFILES', eci.link_files), ('MASM', self.masm), + ('MAKE', 'nmake.exe'), ('_WIN32', '1'), ] if self.x64: definitions.append(('_WIN64', '1')) + rules = [ + ('all', '$(DEFAULT_TARGET)', []), + ('.asm.obj', '', '$(MASM) /nologo /Fo$@ /c $< $(INCLUDEDIRS)'), + ] + + if len(headers_to_precompile)>0: + stdafx_h = path.join('stdafx.h') + txt = '#ifndef PYPY_STDAFX_H\n' + txt += '#define PYPY_STDAFX_H\n' + txt += '\n'.join(['#include "' + m.pathrel(c) + '"' for c in headers_to_precompile]) + txt += '\n#endif\n' + stdafx_h.write(txt) + stdafx_c = path.join('stdafx.c') + stdafx_c.write('#include "stdafx.h"\n') + definitions.append(('CREATE_PCH', '/Ycstdafx.h /Fpstdafx.pch /FIstdafx.h')) + definitions.append(('USE_PCH', '/Yustdafx.h /Fpstdafx.pch /FIstdafx.h')) + rules.append(('$(OBJECTS)', 'stdafx.pch', [])) + rules.append(('stdafx.pch', 'stdafx.h', + '$(CC) stdafx.c /c /nologo $(CFLAGS) $(CFLAGSEXTRA) ' + '$(CREATE_PCH) $(INCLUDEDIRS)')) + rules.append(('.c.obj', '', + '$(CC) /nologo $(CFLAGS) $(CFLAGSEXTRA) $(USE_PCH) ' + '/Fo$@ /c $< $(INCLUDEDIRS)')) + #Do not use precompiled headers for some files + #rules.append((r'{..\module_cache}.c{..\module_cache}.obj', '', + # '$(CC) /nologo $(CFLAGS) $(CFLAGSEXTRA) /Fo$@ /c $< $(INCLUDEDIRS)')) + # nmake cannot handle wildcard target specifications, so we must + # create a rule for compiling each file from eci since they cannot use + # precompiled headers :( + no_precompile = [] + for f in list(no_precompile_cfiles): + f = m.pathrel(py.path.local(f)) + if f not in no_precompile and f.endswith('.c'): + no_precompile.append(f) + target = f[:-1] + 'obj' + rules.append((target, f, + '$(CC) /nologo $(CFLAGS) $(CFLAGSEXTRA) ' + '/Fo%s /c %s $(INCLUDEDIRS)' %(target, f))) + + else: + rules.append(('.c.obj', '', + '$(CC) /nologo $(CFLAGS) $(CFLAGSEXTRA) ' + '/Fo$@ /c $< $(INCLUDEDIRS)')) + + for args in definitions: m.definition(*args) - rules = [ - ('all', '$(DEFAULT_TARGET)', []), - ('.c.obj', '', '$(CC) /nologo $(CFLAGS) $(CFLAGSEXTRA) /Fo$@ /c $< $(INCLUDEDIRS)'), - ('.asm.obj', '', '$(MASM) /nologo /Fo$@ /c $< $(INCLUDEDIRS)'), - ] - for rule in rules: m.rule(*rule) @@ -371,7 +412,7 @@ 'mt.exe -nologo -manifest $*.manifest -outputresource:$@;1', ]) m.rule('debugmode_$(DEFAULT_TARGET)', ['debugmode_$(TARGET)', 'main.obj'], - ['$(CC_LINK) /nologo /DEBUG main.obj $(SHARED_IMPORT_LIB) /out:$@' + ['$(CC_LINK) /nologo /DEBUG main.obj debugmode_$(SHARED_IMPORT_LIB) /out:$@' ]) return m @@ -392,6 +433,25 @@ self._handle_error(returncode, stdout, stderr, path.join('make')) +class WinDefinition(posix.Definition): + def write(self, f): + def write_list(prefix, lst): + lst = lst or [''] + for i, fn in enumerate(lst): + print >> f, prefix, fn, + if i < len(lst)-1: + print >> f, '\\' + else: + print >> f + prefix = ' ' * len(prefix) + name, value = self.name, self.value + if isinstance(value, str): + f.write('%s = %s\n' % (name, value)) + else: + write_list('%s =' % (name,), value) + f.write('\n') + + class NMakefile(posix.GnuMakefile): def write(self, out=None): # nmake expands macros when it parses rules. @@ -410,6 +470,14 @@ if out is None: f.close() + def definition(self, name, value): + defs = self.defs + defn = WinDefinition(name, value) + if name in defs: + self.lines[defs[name]] = defn + else: + defs[name] = len(self.lines) + self.lines.append(defn) class MingwPlatform(posix.BasePosix): name = 'mingw32' From noreply at buildbot.pypy.org Sat Feb 15 18:06:32 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Sat, 15 Feb 2014 18:06:32 +0100 (CET) Subject: [pypy-commit] pypy NonConstant: kill bk.immutableconstant() Message-ID: <20140215170632.194951C1178@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: NonConstant Changeset: r69147:7c11617bfff3 Date: 2014-02-15 14:20 +0000 http://bitbucket.org/pypy/pypy/changeset/7c11617bfff3/ Log: kill bk.immutableconstant() diff --git a/rpython/annotator/annrpython.py b/rpython/annotator/annrpython.py --- a/rpython/annotator/annrpython.py +++ b/rpython/annotator/annrpython.py @@ -236,9 +236,7 @@ else: raise elif isinstance(arg, Constant): - #if arg.value is undefined_value: # undefined local variables - # return annmodel.s_ImpossibleValue - return self.bookkeeper.immutableconstant(arg) + return self.bookkeeper.immutablevalue(arg.value) else: raise TypeError('Variable or Constant expected, got %r' % (arg,)) diff --git a/rpython/annotator/bookkeeper.py b/rpython/annotator/bookkeeper.py --- a/rpython/annotator/bookkeeper.py +++ b/rpython/annotator/bookkeeper.py @@ -202,9 +202,6 @@ position.""" return SomeDict(self.getdictdef()) - def immutableconstant(self, const): - return self.immutablevalue(const.value) - def immutablevalue(self, x): """The most precise SomeValue instance that contains the immutable value x.""" From noreply at buildbot.pypy.org Sat Feb 15 18:06:33 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Sat, 15 Feb 2014 18:06:33 +0100 (CET) Subject: [pypy-commit] pypy NonConstant: document branch Message-ID: <20140215170633.6789B1C1178@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: NonConstant Changeset: r69148:5a5dcb80fd86 Date: 2014-02-15 16:59 +0000 http://bitbucket.org/pypy/pypy/changeset/5a5dcb80fd86/ Log: document branch diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -67,3 +67,6 @@ With a properly configured 256-color terminal (TERM=...-256color), the Mandelbrot set shown during translation now uses a range of 50 colours. Essential! + +.. branch: NonConstant +Simplify implementation of NonConstant. From noreply at buildbot.pypy.org Sat Feb 15 18:06:34 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Sat, 15 Feb 2014 18:06:34 +0100 (CET) Subject: [pypy-commit] pypy NonConstant: close branch before merging Message-ID: <20140215170634.8A1441C1178@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: NonConstant Changeset: r69149:a455515aab13 Date: 2014-02-15 17:05 +0000 http://bitbucket.org/pypy/pypy/changeset/a455515aab13/ Log: close branch before merging From noreply at buildbot.pypy.org Sat Feb 15 18:06:35 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Sat, 15 Feb 2014 18:06:35 +0100 (CET) Subject: [pypy-commit] pypy default: hg merge NonConstant Message-ID: <20140215170635.A96771C1178@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: Changeset: r69150:d9b1937be8ca Date: 2014-02-15 17:06 +0000 http://bitbucket.org/pypy/pypy/changeset/d9b1937be8ca/ Log: hg merge NonConstant diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -67,3 +67,6 @@ With a properly configured 256-color terminal (TERM=...-256color), the Mandelbrot set shown during translation now uses a range of 50 colours. Essential! + +.. branch: NonConstant +Simplify implementation of NonConstant. diff --git a/rpython/annotator/annrpython.py b/rpython/annotator/annrpython.py --- a/rpython/annotator/annrpython.py +++ b/rpython/annotator/annrpython.py @@ -236,9 +236,7 @@ else: raise elif isinstance(arg, Constant): - #if arg.value is undefined_value: # undefined local variables - # return annmodel.s_ImpossibleValue - return self.bookkeeper.immutableconstant(arg) + return self.bookkeeper.immutablevalue(arg.value) else: raise TypeError('Variable or Constant expected, got %r' % (arg,)) diff --git a/rpython/annotator/bookkeeper.py b/rpython/annotator/bookkeeper.py --- a/rpython/annotator/bookkeeper.py +++ b/rpython/annotator/bookkeeper.py @@ -202,10 +202,7 @@ position.""" return SomeDict(self.getdictdef()) - def immutableconstant(self, const): - return self.immutablevalue(const.value) - - def immutablevalue(self, x, need_const=True): + def immutablevalue(self, x): """The most precise SomeValue instance that contains the immutable value x.""" # convert unbound methods to the underlying function @@ -241,73 +238,51 @@ elif tp is bytearray: result = SomeByteArray() elif tp is tuple: - result = SomeTuple(items = [self.immutablevalue(e, need_const) for e in x]) + result = SomeTuple(items = [self.immutablevalue(e) for e in x]) elif tp is float: result = SomeFloat() elif tp is list: - if need_const: - key = Constant(x) - try: - return self.immutable_cache[key] - except KeyError: - result = SomeList(ListDef(self, s_ImpossibleValue)) - self.immutable_cache[key] = result - for e in x: - result.listdef.generalize(self.immutablevalue(e)) - result.const_box = key - return result - else: - listdef = ListDef(self, s_ImpossibleValue) + key = Constant(x) + try: + return self.immutable_cache[key] + except KeyError: + result = SomeList(ListDef(self, s_ImpossibleValue)) + self.immutable_cache[key] = result for e in x: - listdef.generalize(self.immutablevalue(e, False)) - result = SomeList(listdef) + result.listdef.generalize(self.immutablevalue(e)) + result.const_box = key + return result elif tp is dict or tp is r_dict or tp is SomeOrderedDict.knowntype: if tp is SomeOrderedDict.knowntype: cls = SomeOrderedDict else: cls = SomeDict - if need_const: - key = Constant(x) - try: - return self.immutable_cache[key] - except KeyError: - result = cls(DictDef(self, - s_ImpossibleValue, - s_ImpossibleValue, - is_r_dict = tp is r_dict)) - self.immutable_cache[key] = result - if tp is r_dict: - s_eqfn = self.immutablevalue(x.key_eq) - s_hashfn = self.immutablevalue(x.key_hash) - result.dictdef.dictkey.update_rdict_annotations(s_eqfn, - s_hashfn) - seen_elements = 0 - while seen_elements != len(x): - items = x.items() - for ek, ev in items: - result.dictdef.generalize_key(self.immutablevalue(ek)) - result.dictdef.generalize_value(self.immutablevalue(ev)) - result.dictdef.seen_prebuilt_key(ek) - seen_elements = len(items) - # if the dictionary grew during the iteration, - # start over again - result.const_box = key - return result - else: - dictdef = DictDef(self, - s_ImpossibleValue, - s_ImpossibleValue, - is_r_dict = tp is r_dict) + key = Constant(x) + try: + return self.immutable_cache[key] + except KeyError: + result = cls(DictDef(self, + s_ImpossibleValue, + s_ImpossibleValue, + is_r_dict = tp is r_dict)) + self.immutable_cache[key] = result if tp is r_dict: s_eqfn = self.immutablevalue(x.key_eq) s_hashfn = self.immutablevalue(x.key_hash) - dictdef.dictkey.update_rdict_annotations(s_eqfn, - s_hashfn) - for ek, ev in x.iteritems(): - dictdef.generalize_key(self.immutablevalue(ek, False)) - dictdef.generalize_value(self.immutablevalue(ev, False)) - dictdef.seen_prebuilt_key(ek) - result = cls(dictdef) + result.dictdef.dictkey.update_rdict_annotations(s_eqfn, + s_hashfn) + seen_elements = 0 + while seen_elements != len(x): + items = x.items() + for ek, ev in items: + result.dictdef.generalize_key(self.immutablevalue(ek)) + result.dictdef.generalize_value(self.immutablevalue(ev)) + result.dictdef.seen_prebuilt_key(ek) + seen_elements = len(items) + # if the dictionary grew during the iteration, + # start over again + result.const_box = key + return result elif tp is weakref.ReferenceType: x1 = x() if x1 is None: @@ -332,11 +307,11 @@ if hasattr(x, 'im_self') and hasattr(x, 'im_func'): # on top of PyPy, for cases like 'l.append' where 'l' is a # global constant list, the find_method() returns non-None - s_self = self.immutablevalue(x.im_self, need_const) + s_self = self.immutablevalue(x.im_self) result = s_self.find_method(x.im_func.__name__) elif hasattr(x, '__self__') and x.__self__ is not None: # for cases like 'l.append' where 'l' is a global constant list - s_self = self.immutablevalue(x.__self__, need_const) + s_self = self.immutablevalue(x.__self__) result = s_self.find_method(x.__name__) assert result is not None else: @@ -360,8 +335,7 @@ return s_None else: raise Exception("Don't know how to represent %r" % (x,)) - if need_const: - result.const = x + result.const = x return result def getdesc(self, pyobj): diff --git a/rpython/rlib/nonconst.py b/rpython/rlib/nonconst.py --- a/rpython/rlib/nonconst.py +++ b/rpython/rlib/nonconst.py @@ -4,6 +4,7 @@ from rpython.rtyper.extregistry import ExtRegistryEntry from rpython.flowspace.model import Constant +from rpython.annotator.model import not_const class NonConstant(object): def __init__(self, _constant): @@ -33,11 +34,8 @@ class EntryNonConstant(ExtRegistryEntry): _about_ = NonConstant - def compute_result_annotation(self, arg): - if hasattr(arg, 'const'): - return self.bookkeeper.immutablevalue(arg.const, False) - else: - return arg + def compute_result_annotation(self, s_arg): + return not_const(s_arg) def specialize_call(self, hop): hop.exception_cannot_occur() From noreply at buildbot.pypy.org Sat Feb 15 18:21:13 2014 From: noreply at buildbot.pypy.org (squeaky) Date: Sat, 15 Feb 2014 18:21:13 +0100 (CET) Subject: [pypy-commit] pypy array-propagate-len: Fix optimized trace debugging utility. Previously the print loop would use Message-ID: <20140215172113.C91771C01DE@cobra.cs.uni-duesseldorf.de> Author: Squeaky Branch: array-propagate-len Changeset: r69151:6bace6054472 Date: 2014-02-14 12:15 +0100 http://bitbucket.org/pypy/pypy/changeset/6bace6054472/ Log: Fix optimized trace debugging utility. Previously the print loop would use zip which always truncates the output to the shorter list. This produces confusing output becuase it would later fail on trace length mismatch but printing truncated output to stdout looks like there is same number of operations. diff --git a/rpython/jit/metainterp/optimizeopt/util.py b/rpython/jit/metainterp/optimizeopt/util.py --- a/rpython/jit/metainterp/optimizeopt/util.py +++ b/rpython/jit/metainterp/optimizeopt/util.py @@ -1,3 +1,5 @@ +import itertools + import py from rpython.rlib.objectmodel import r_dict, compute_identity_hash from rpython.rlib.rarithmetic import intmask @@ -136,13 +138,16 @@ print ' Comparing lists '.center(totwidth, '-') text_right = text_right or 'expected' print '%s| %s' % ('optimized'.center(width), text_right.center(width)) - for op1, op2 in zip(oplist1, oplist2): + for op1, op2 in itertools.izip_longest(oplist1, oplist2, fillvalue=''): txt1 = str(op1) txt2 = str(op2) while txt1 or txt2: print '%s| %s' % (txt1[:width].ljust(width), txt2[:width]) txt1 = txt1[width:] txt2 = txt2[width:] + print '-' * totwidth + + for op1, op2 in zip(oplist1, oplist2): assert op1.getopnum() == op2.getopnum() assert op1.numargs() == op2.numargs() for i in range(op1.numargs()): @@ -177,6 +182,5 @@ else: assert False assert len(oplist1) == len(oplist2) - print '-'*totwidth return True From noreply at buildbot.pypy.org Sat Feb 15 18:21:15 2014 From: noreply at buildbot.pypy.org (squeaky) Date: Sat, 15 Feb 2014 18:21:15 +0100 (CET) Subject: [pypy-commit] pypy array-propagate-len: failing test Message-ID: <20140215172115.0C61A1C01DE@cobra.cs.uni-duesseldorf.de> Author: Squeaky Branch: array-propagate-len Changeset: r69152:e5565168e86f Date: 2014-02-14 14:34 +0100 http://bitbucket.org/pypy/pypy/changeset/e5565168e86f/ Log: failing test diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -5200,6 +5200,20 @@ """ self.optimize_loop(ops, ops) + def test_cmp_outside_intbounds(self): + ops = """ + [p0] + i0 = getfield_gc(p0, descr=chardescr) + i1 = int_lt(i0, 256) + guard_true(i1) [] + """ + + expected = """ + [p0] + i0 = getfield_gc(p0, descr=chardescr) + """ + self.optimize_loop(ops, expected) + class TestLLtype(BaseTestOptimizeBasic, LLtypeMixin): pass diff --git a/rpython/jit/metainterp/optimizeopt/test/test_util.py b/rpython/jit/metainterp/optimizeopt/test/test_util.py --- a/rpython/jit/metainterp/optimizeopt/test/test_util.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_util.py @@ -92,6 +92,7 @@ NODE.become(lltype.GcStruct('NODE', ('parent', OBJECT), ('value', lltype.Signed), ('floatval', lltype.Float), + ('charval', lltype.Char), ('next', lltype.Ptr(NODE)))) NODE2 = lltype.GcStruct('NODE2', ('parent', NODE), ('other', lltype.Ptr(NODE))) @@ -108,6 +109,7 @@ nodesize2 = cpu.sizeof(NODE2) valuedescr = cpu.fielddescrof(NODE, 'value') floatdescr = cpu.fielddescrof(NODE, 'floatval') + chardescr = cpu.fielddescrof(NODE, 'charval') nextdescr = cpu.fielddescrof(NODE, 'next') otherdescr = cpu.fielddescrof(NODE2, 'other') From noreply at buildbot.pypy.org Sat Feb 15 18:21:16 2014 From: noreply at buildbot.pypy.org (squeaky) Date: Sat, 15 Feb 2014 18:21:16 +0100 (CET) Subject: [pypy-commit] pypy array-propagate-len: add integer bounds on FieldDescr Message-ID: <20140215172116.36F481C01DE@cobra.cs.uni-duesseldorf.de> Author: Squeaky Branch: array-propagate-len Changeset: r69153:3830704ee5eb Date: 2014-02-14 15:52 +0100 http://bitbucket.org/pypy/pypy/changeset/3830704ee5eb/ Log: add integer bounds on FieldDescr diff --git a/rpython/jit/backend/llsupport/descr.py b/rpython/jit/backend/llsupport/descr.py --- a/rpython/jit/backend/llsupport/descr.py +++ b/rpython/jit/backend/llsupport/descr.py @@ -103,6 +103,26 @@ def is_field_signed(self): return self.flag == FLAG_SIGNED + def is_integer_bounded(self): + return self.flag in (FLAG_SIGNED, FLAG_UNSIGNED) \ + and self.field_size < symbolic.WORD + + def get_integer_min(self): + if self.flag == FLAG_UNSIGNED: + return 0 + elif self.flag == FLAG_SIGNED: + return -(1 << ((self.field_size << 3) - 1)) + + assert False + + def get_integer_max(self): + if self.flag == FLAG_UNSIGNED: + return (1 << (self.field_size << 3)) - 1 + elif self.flag == FLAG_SIGNED: + return (1 << ((self.field_size << 3) - 1)) - 1 + + assert False + def sort_key(self): return self.offset diff --git a/rpython/jit/backend/llsupport/test/test_descr.py b/rpython/jit/backend/llsupport/test/test_descr.py --- a/rpython/jit/backend/llsupport/test/test_descr.py +++ b/rpython/jit/backend/llsupport/test/test_descr.py @@ -432,3 +432,21 @@ assert descr.basesize == struct.calcsize("PP") # hash, length assert descr.lendescr.offset == struct.calcsize("P") # hash assert not descr.is_array_of_pointers() + + +def test_descr_integer_bounded(): + descr = FieldDescr('descr', 0, 1, FLAG_SIGNED) + assert descr.is_integer_bounded() + + descr = FieldDescr('descr', 0, symbolic.WORD, FLAG_UNSIGNED) + assert not descr.is_integer_bounded() + + +def test_descr_get_integer_bounds(): + descr = FieldDescr('decr', 0, 1, FLAG_UNSIGNED) + assert descr.get_integer_min() == 0 + assert descr.get_integer_max() == 255 + + descr = FieldDescr('descr', 0, 1, FLAG_SIGNED) + assert descr.get_integer_min() == -128 + assert descr.get_integer_max() == 127 From noreply at buildbot.pypy.org Sat Feb 15 18:21:17 2014 From: noreply at buildbot.pypy.org (squeaky) Date: Sat, 15 Feb 2014 18:21:17 +0100 (CET) Subject: [pypy-commit] pypy array-propagate-len: add optimization, move stuff around Message-ID: <20140215172117.652711C01DE@cobra.cs.uni-duesseldorf.de> Author: Squeaky Branch: array-propagate-len Changeset: r69154:17003c30fe47 Date: 2014-02-14 19:23 +0100 http://bitbucket.org/pypy/pypy/changeset/17003c30fe47/ Log: add optimization, move stuff around diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -1,10 +1,12 @@ import py, weakref from rpython.jit.backend import model from rpython.jit.backend.llgraph import support +from rpython.jit.backend.llsupport import symbolic from rpython.jit.metainterp.history import AbstractDescr from rpython.jit.metainterp.history import Const, getkind from rpython.jit.metainterp.history import INT, REF, FLOAT, VOID from rpython.jit.metainterp.resoperation import rop +from rpython.jit.metainterp.optimizeopt import intbounds from rpython.jit.codewriter import longlong, heaptracker from rpython.jit.codewriter.effectinfo import EffectInfo @@ -119,6 +121,24 @@ def is_field_signed(self): return _is_signed_kind(self.FIELD) + def is_integer_bounded(self): + return getkind(self.FIELD) == 'int' \ + and rffi.sizeof(self.FIELD) < symbolic.WORD + + def get_integer_min(self): + if getkind(self.FIELD) != 'int': + assert False + + return intbounds.get_integer_min( + not _is_signed_kind(self.FIELD), rffi.sizeof(self.FIELD)) + + def get_integer_max(self): + if getkind(self.FIELD) != 'int': + assert False + + return intbounds.get_integer_max( + not _is_signed_kind(self.FIELD), rffi.sizeof(self.FIELD)) + def _is_signed_kind(TYPE): return (TYPE is not lltype.Bool and isinstance(TYPE, lltype.Number) and rffi.cast(TYPE, -1) == -1) diff --git a/rpython/jit/backend/llsupport/descr.py b/rpython/jit/backend/llsupport/descr.py --- a/rpython/jit/backend/llsupport/descr.py +++ b/rpython/jit/backend/llsupport/descr.py @@ -6,6 +6,7 @@ from rpython.jit.metainterp import history from rpython.jit.codewriter import heaptracker, longlong from rpython.jit.codewriter.longlong import is_longlong +from rpython.jit.metainterp.optimizeopt import intbounds class GcCache(object): @@ -109,17 +110,17 @@ def get_integer_min(self): if self.flag == FLAG_UNSIGNED: - return 0 + return intbounds.get_integer_min(True, self.field_size) elif self.flag == FLAG_SIGNED: - return -(1 << ((self.field_size << 3) - 1)) + return intbounds.get_integer_min(False, self.field_size) assert False def get_integer_max(self): if self.flag == FLAG_UNSIGNED: - return (1 << (self.field_size << 3)) - 1 + return intbounds.get_integer_max(True, self.field_size) elif self.flag == FLAG_SIGNED: - return (1 << ((self.field_size << 3) - 1)) - 1 + return intbounds.get_integer_max(False, self.field_size) assert False diff --git a/rpython/jit/metainterp/optimizeopt/intbounds.py b/rpython/jit/metainterp/optimizeopt/intbounds.py --- a/rpython/jit/metainterp/optimizeopt/intbounds.py +++ b/rpython/jit/metainterp/optimizeopt/intbounds.py @@ -9,6 +9,20 @@ from rpython.jit.metainterp.resoperation import rop +def get_integer_min(is_unsigned, byte_size): + if is_unsigned: + return 0 + else: + return -(1 << ((byte_size << 3) - 1)) + + +def get_integer_max(is_unsigned, byte_size): + if is_unsigned: + return (1 << (byte_size << 3)) - 1 + else: + return (1 << ((byte_size << 3) - 1)) - 1 + + class OptIntBounds(Optimization): """Keeps track of the bounds placed on integers by guards and remove redundant guards""" @@ -322,6 +336,14 @@ v1.intbound.make_ge(IntLowerBound(0)) v1.intbound.make_lt(IntUpperBound(256)) + def optimize_GETFIELD_GC(self, op): + self.emit_operation(op) + descr = op.getdescr() + if descr.is_integer_bounded(): + v1 = self.getvalue(op.result) + v1.intbound.make_ge(IntLowerBound(descr.get_integer_min())) + v1.intbound.make_lt(IntUpperBound(descr.get_integer_max() + 1)) + def optimize_UNICODEGETITEM(self, op): self.emit_operation(op) v1 = self.getvalue(op.result) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -5200,7 +5200,7 @@ """ self.optimize_loop(ops, ops) - def test_cmp_outside_intbounds(self): + def test_getfield_cmp_outside_intbounds(self): ops = """ [p0] i0 = getfield_gc(p0, descr=chardescr) From noreply at buildbot.pypy.org Sat Feb 15 18:21:18 2014 From: noreply at buildbot.pypy.org (squeaky) Date: Sat, 15 Feb 2014 18:21:18 +0100 (CET) Subject: [pypy-commit] pypy array-propagate-len: more tests for getfield_gc, failing test for arrays Message-ID: <20140215172118.859A41C01DE@cobra.cs.uni-duesseldorf.de> Author: Squeaky Branch: array-propagate-len Changeset: r69155:14f943c2f482 Date: 2014-02-14 22:42 +0100 http://bitbucket.org/pypy/pypy/changeset/14f943c2f482/ Log: more tests for getfield_gc, failing test for arrays diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -5200,7 +5200,7 @@ """ self.optimize_loop(ops, ops) - def test_getfield_cmp_outside_intbounds(self): + def test_getfield_cmp_above_bounds(self): ops = """ [p0] i0 = getfield_gc(p0, descr=chardescr) @@ -5214,6 +5214,46 @@ """ self.optimize_loop(ops, expected) + def test_getfield_cmp_below_bounds(self): + ops = """ + [p0] + i0 = getfield_gc(p0, descr=chardescr) + i1 = int_gt(i0, -1) + guard_true(i1) [] + """ + + expected = """ + [p0] + i0 = getfield_gc(p0, descr=chardescr) + """ + self.optimize_loop(ops, expected) + + def test_getfield_cmp_in_bounds(self): + ops = """ + [p0] + i0 = getfield_gc(p0, descr=chardescr) + i1 = int_gt(i0, 0) + guard_true(i1) [] + i2 = int_lt(i0, 255) + guard_true(i2) [] + """ + self.optimize_loop(ops, ops) + + + def test_rawarray_cmp_outside_intbounds(self): + ops = """ + [i0] + i1 = getarrayitem_raw(i0, 0, descr=rawarraydescr_char) + i2 = int_lt(i1, 256) + guard_true(i2) [] + """ + + expected = """ + [i0] + i1 = getarrayitem_raw(i0, 0, descr=rawarraydescr_char) + """ + self.optimize_loop(ops, expected) + class TestLLtype(BaseTestOptimizeBasic, LLtypeMixin): pass From noreply at buildbot.pypy.org Sat Feb 15 18:21:19 2014 From: noreply at buildbot.pypy.org (squeaky) Date: Sat, 15 Feb 2014 18:21:19 +0100 (CET) Subject: [pypy-commit] pypy array-propagate-len: optimize array raw, make test pass Message-ID: <20140215172119.A5C6D1C01DE@cobra.cs.uni-duesseldorf.de> Author: Squeaky Branch: array-propagate-len Changeset: r69156:6717c9190ff4 Date: 2014-02-14 23:57 +0100 http://bitbucket.org/pypy/pypy/changeset/6717c9190ff4/ Log: optimize array raw, make test pass diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -164,6 +164,25 @@ def is_array_of_structs(self): return isinstance(self.A.OF, lltype.Struct) + def is_item_integer_bounded(self): + return getkind(self.A.OF) == 'int' \ + and rffi.sizeof(self.A.OF) < symbolic.WORD + + def get_item_integer_min(self): + if getkind(self.A.OF) != 'int': + assert False + + return intbounds.get_integer_min( + not _is_signed_kind(self.A.OF), rffi.sizeof(self.A.OF)) + + def get_item_integer_max(self): + if getkind(self.A.OF) != 'int': + assert False + + return intbounds.get_integer_max( + not _is_signed_kind(self.A.OF), rffi.sizeof(self.A.OF)) + + class InteriorFieldDescr(AbstractDescr): def __init__(self, A, fieldname): self.A = A diff --git a/rpython/jit/backend/llsupport/descr.py b/rpython/jit/backend/llsupport/descr.py --- a/rpython/jit/backend/llsupport/descr.py +++ b/rpython/jit/backend/llsupport/descr.py @@ -203,6 +203,28 @@ def is_array_of_structs(self): return self.flag == FLAG_STRUCT + def is_item_integer_bounded(self): + return self.flag in (FLAG_SIGNED, FLAG_UNSIGNED) \ + and self.itemsize < symbolic.WORD + + def get_item_integer_min(self): + if self.flag == FLAG_UNSIGNED: + return intbounds.get_integer_min(True, self.itemsize) + elif self.flag == FLAG_SIGNED: + return intbounds.get_integer_min(False, self.itemsize) + + assert False + + def get_item_integer_max(self): + if self.flag == FLAG_UNSIGNED: + return intbounds.get_integer_max(True, self.itemsize) + elif self.flag == FLAG_SIGNED: + return intbounds.get_integer_max(False, self.itemsize) + + assert False + + + def repr_of_descr(self): return '' % (self.flag, self.itemsize) diff --git a/rpython/jit/metainterp/optimizeopt/intbounds.py b/rpython/jit/metainterp/optimizeopt/intbounds.py --- a/rpython/jit/metainterp/optimizeopt/intbounds.py +++ b/rpython/jit/metainterp/optimizeopt/intbounds.py @@ -344,6 +344,15 @@ v1.intbound.make_ge(IntLowerBound(descr.get_integer_min())) v1.intbound.make_lt(IntUpperBound(descr.get_integer_max() + 1)) + def optimize_GETARRAYITEM_RAW(self, op): + self.emit_operation(op) + descr = op.getdescr() + if descr.is_item_integer_bounded(): + v1 = self.getvalue(op.result) + v1.intbound.make_ge(IntLowerBound(descr.get_item_integer_min())) + v1.intbound.make_lt( + IntUpperBound(descr.get_item_integer_max() + 1)) + def optimize_UNICODEGETITEM(self, op): self.emit_operation(op) v1 = self.getvalue(op.result) From noreply at buildbot.pypy.org Sat Feb 15 18:21:20 2014 From: noreply at buildbot.pypy.org (squeaky) Date: Sat, 15 Feb 2014 18:21:20 +0100 (CET) Subject: [pypy-commit] pypy array-propagate-len: failing test for gc array Message-ID: <20140215172120.C2C5E1C01DE@cobra.cs.uni-duesseldorf.de> Author: Squeaky Branch: array-propagate-len Changeset: r69157:95039bc14ef6 Date: 2014-02-15 01:26 +0100 http://bitbucket.org/pypy/pypy/changeset/95039bc14ef6/ Log: failing test for gc array diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -5239,7 +5239,6 @@ """ self.optimize_loop(ops, ops) - def test_rawarray_cmp_outside_intbounds(self): ops = """ [i0] @@ -5254,6 +5253,20 @@ """ self.optimize_loop(ops, expected) + def test_gcarray_outside_intbounds(self): + ops = """ + [p0] + i0 = getarrayitem_gc(p0, 0, descr=chararraydescr) + i1 = int_lt(i0, 256) + guard_true(i1) [] + """ + + expected = """ + [p0] + i0 = getarrayitem_gc(p0, 0, descr=chararraydescr) + """ + self.optimize_loop(ops, expected) + class TestLLtype(BaseTestOptimizeBasic, LLtypeMixin): pass diff --git a/rpython/jit/metainterp/optimizeopt/test/test_util.py b/rpython/jit/metainterp/optimizeopt/test/test_util.py --- a/rpython/jit/metainterp/optimizeopt/test/test_util.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_util.py @@ -206,6 +206,8 @@ EffectInfo.EF_CANNOT_RAISE, oopspecindex=EffectInfo.OS_RAW_FREE)) + chararray = lltype.GcArray(lltype.Char) + chararraydescr = cpu.arraydescrof(chararray) # array of structs (complex data) complexarray = lltype.GcArray( From noreply at buildbot.pypy.org Sat Feb 15 18:21:21 2014 From: noreply at buildbot.pypy.org (squeaky) Date: Sat, 15 Feb 2014 18:21:21 +0100 (CET) Subject: [pypy-commit] pypy array-propagate-len: failing test for getfield_raw, skip bounds check if descr is None Message-ID: <20140215172121.DD85A1C01DE@cobra.cs.uni-duesseldorf.de> Author: Squeaky Branch: array-propagate-len Changeset: r69158:a17311d182d4 Date: 2014-02-15 12:11 +0100 http://bitbucket.org/pypy/pypy/changeset/a17311d182d4/ Log: failing test for getfield_raw, skip bounds check if descr is None diff --git a/rpython/jit/metainterp/optimizeopt/__init__.py b/rpython/jit/metainterp/optimizeopt/__init__.py --- a/rpython/jit/metainterp/optimizeopt/__init__.py +++ b/rpython/jit/metainterp/optimizeopt/__init__.py @@ -63,7 +63,7 @@ optimizer.propagate_all_forward() finally: debug_stop("jit-optimize") - + if __name__ == '__main__': print ALL_OPTS_NAMES diff --git a/rpython/jit/metainterp/optimizeopt/intbounds.py b/rpython/jit/metainterp/optimizeopt/intbounds.py --- a/rpython/jit/metainterp/optimizeopt/intbounds.py +++ b/rpython/jit/metainterp/optimizeopt/intbounds.py @@ -347,12 +347,14 @@ def optimize_GETARRAYITEM_RAW(self, op): self.emit_operation(op) descr = op.getdescr() - if descr.is_item_integer_bounded(): + if descr and descr.is_item_integer_bounded(): v1 = self.getvalue(op.result) v1.intbound.make_ge(IntLowerBound(descr.get_item_integer_min())) v1.intbound.make_lt( IntUpperBound(descr.get_item_integer_max() + 1)) + optimize_GETARRAYITEM_GC = optimize_GETARRAYITEM_RAW + def optimize_UNICODEGETITEM(self, op): self.emit_operation(op) v1 = self.getvalue(op.result) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -5239,6 +5239,21 @@ """ self.optimize_loop(ops, ops) + def test_getfieldraw_cmp_outside_bounds(self): + ops = """ + [p0] + i0 = getfield_raw(p0, descr=chardescr) + i1 = int_gt(i0, -1) + guard_true(i1) [] + """ + + expected = """ + [p0] + i0 = getfield_raw(p0, descr=chardescr) + """ + self.optimize_loop(ops, expected) + + def test_rawarray_cmp_outside_intbounds(self): ops = """ [i0] From noreply at buildbot.pypy.org Sat Feb 15 18:21:23 2014 From: noreply at buildbot.pypy.org (squeaky) Date: Sat, 15 Feb 2014 18:21:23 +0100 (CET) Subject: [pypy-commit] pypy array-propagate-len: merge default Message-ID: <20140215172123.041FC1C01DE@cobra.cs.uni-duesseldorf.de> Author: Squeaky Branch: array-propagate-len Changeset: r69159:51b35f5af6cf Date: 2014-02-15 12:14 +0100 http://bitbucket.org/pypy/pypy/changeset/51b35f5af6cf/ Log: merge default diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -402,7 +402,7 @@ ('debug_exc', '', '$(MAKE) CFLAGS="$(DEBUGFLAGS) -DRPY_ASSERT -DDO_LOG_EXC" debug_target'), ('debug_mem', '', '$(MAKE) CFLAGS="$(DEBUGFLAGS) -DRPY_ASSERT -DPYPY_USE_TRIVIAL_MALLOC" debug_target'), ('no_obmalloc', '', '$(MAKE) CFLAGS="-g -O2 -DRPY_ASSERT -DPYPY_NO_OBMALLOC" $(TARGET)'), - ('linuxmemchk', '', '$(MAKE) CFLAGS="$(DEBUGFLAGS) -DRPY_ASSERT -DPPY_USE_LINUXMEMCHK" debug_target'), + ('linuxmemchk', '', '$(MAKE) CFLAGS="$(DEBUGFLAGS) -DRPY_ASSERT -DPYPY_USE_LINUXMEMCHK" debug_target'), ('llsafer', '', '$(MAKE) CFLAGS="-O2 -DRPY_LL_ASSERT" $(TARGET)'), ('lldebug', '', '$(MAKE) CFLAGS="$(DEBUGFLAGS) -DRPY_ASSERT -DRPY_LL_ASSERT" debug_target'), ('lldebug0','', '$(MAKE) CFLAGS="-O0 $(DEBUGFLAGS) -DRPY_ASSERT -DRPY_LL_ASSERT" debug_target'), From noreply at buildbot.pypy.org Sat Feb 15 18:21:24 2014 From: noreply at buildbot.pypy.org (squeaky) Date: Sat, 15 Feb 2014 18:21:24 +0100 (CET) Subject: [pypy-commit] pypy array-propagate-len: fix getfieldraw Message-ID: <20140215172124.1E3C21C01DE@cobra.cs.uni-duesseldorf.de> Author: Squeaky Branch: array-propagate-len Changeset: r69160:141205b24825 Date: 2014-02-15 12:21 +0100 http://bitbucket.org/pypy/pypy/changeset/141205b24825/ Log: fix getfieldraw diff --git a/rpython/jit/metainterp/optimizeopt/intbounds.py b/rpython/jit/metainterp/optimizeopt/intbounds.py --- a/rpython/jit/metainterp/optimizeopt/intbounds.py +++ b/rpython/jit/metainterp/optimizeopt/intbounds.py @@ -336,7 +336,7 @@ v1.intbound.make_ge(IntLowerBound(0)) v1.intbound.make_lt(IntUpperBound(256)) - def optimize_GETFIELD_GC(self, op): + def optimize_GETFIELD_RAW(self, op): self.emit_operation(op) descr = op.getdescr() if descr.is_integer_bounded(): @@ -344,6 +344,8 @@ v1.intbound.make_ge(IntLowerBound(descr.get_integer_min())) v1.intbound.make_lt(IntUpperBound(descr.get_integer_max() + 1)) + optimize_GETFIELD_GC = optimize_GETFIELD_RAW + def optimize_GETARRAYITEM_RAW(self, op): self.emit_operation(op) descr = op.getdescr() From noreply at buildbot.pypy.org Sat Feb 15 18:21:25 2014 From: noreply at buildbot.pypy.org (squeaky) Date: Sat, 15 Feb 2014 18:21:25 +0100 (CET) Subject: [pypy-commit] pypy array-propagate-len: test and implement optimization for interior fields Message-ID: <20140215172125.3BBA91C01DE@cobra.cs.uni-duesseldorf.de> Author: Squeaky Branch: array-propagate-len Changeset: r69161:41e8eb67d110 Date: 2014-02-15 13:38 +0100 http://bitbucket.org/pypy/pypy/changeset/41e8eb67d110/ Log: test and implement optimization for interior fields diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -201,6 +201,24 @@ def is_float_field(self): return getkind(self.FIELD) == 'float' + def is_integer_bounded(self): + return getkind(self.FIELD) == 'int' \ + and rffi.sizeof(self.FIELD) < symbolic.WORD + + def get_integer_min(self): + if getkind(self.FIELD) != 'int': + assert False + + return intbounds.get_integer_min( + not _is_signed_kind(self.FIELD), rffi.sizeof(self.FIELD)) + + def get_integer_max(self): + if getkind(self.FIELD) != 'int': + assert False + + return intbounds.get_integer_max( + not _is_signed_kind(self.FIELD), rffi.sizeof(self.FIELD)) + _example_res = {'v': None, 'r': lltype.nullptr(llmemory.GCREF.TO), 'i': 0, diff --git a/rpython/jit/backend/llsupport/descr.py b/rpython/jit/backend/llsupport/descr.py --- a/rpython/jit/backend/llsupport/descr.py +++ b/rpython/jit/backend/llsupport/descr.py @@ -273,6 +273,15 @@ def is_float_field(self): return self.fielddescr.is_float_field() + def is_integer_bounded(self): + return self.fielddescr.is_integer_bounded() + + def get_integer_min(self): + return self.fielddescr.get_integer_min() + + def get_integer_max(self): + return self.fielddescr.get_integer_max() + def repr_of_descr(self): return '' % self.fielddescr.repr_of_descr() diff --git a/rpython/jit/metainterp/optimizeopt/intbounds.py b/rpython/jit/metainterp/optimizeopt/intbounds.py --- a/rpython/jit/metainterp/optimizeopt/intbounds.py +++ b/rpython/jit/metainterp/optimizeopt/intbounds.py @@ -346,6 +346,8 @@ optimize_GETFIELD_GC = optimize_GETFIELD_RAW + optimize_GETINTERIORFIELD_GC = optimize_GETFIELD_RAW + def optimize_GETARRAYITEM_RAW(self, op): self.emit_operation(op) descr = op.getdescr() diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -5282,6 +5282,22 @@ """ self.optimize_loop(ops, expected) + def test_getinterior_outside_intbounds(self): + ops = """ + [p0] + f0 = getinteriorfield_gc(p0, 0, descr=fc_array_floatdescr) + i0 = getinteriorfield_gc(p0, 0, descr=fc_array_chardescr) + i1 = int_lt(i0, 256) + guard_true(i1) [] + """ + + expected = """ + [p0] + f0 = getinteriorfield_gc(p0, 0, descr=fc_array_floatdescr) + i0 = getinteriorfield_gc(p0, 0, descr=fc_array_chardescr) + """ + self.optimize_loop(ops, expected) + class TestLLtype(BaseTestOptimizeBasic, LLtypeMixin): pass diff --git a/rpython/jit/metainterp/optimizeopt/test/test_util.py b/rpython/jit/metainterp/optimizeopt/test/test_util.py --- a/rpython/jit/metainterp/optimizeopt/test/test_util.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_util.py @@ -225,6 +225,12 @@ rawarraydescr_char = cpu.arraydescrof(lltype.Array(lltype.Char, hints={'nolength': True})) + fc_array = lltype.GcArray( + lltype.Struct( + "floatchar", ("float", lltype.Float), ("char", lltype.Char))) + fc_array_descr = cpu.arraydescrof(fc_array) + fc_array_floatdescr = cpu.interiorfielddescrof(fc_array, "float") + fc_array_chardescr = cpu.interiorfielddescrof(fc_array, "char") for _name, _os in [ ('strconcatdescr', 'OS_STR_CONCAT'), From noreply at buildbot.pypy.org Sat Feb 15 18:21:26 2014 From: noreply at buildbot.pypy.org (squeaky) Date: Sat, 15 Feb 2014 18:21:26 +0100 (CET) Subject: [pypy-commit] pypy array-propagate-len: improve test Message-ID: <20140215172126.58D281C01DE@cobra.cs.uni-duesseldorf.de> Author: Squeaky Branch: array-propagate-len Changeset: r69162:6898ded0f2af Date: 2014-02-15 13:42 +0100 http://bitbucket.org/pypy/pypy/changeset/6898ded0f2af/ Log: improve test diff --git a/rpython/jit/backend/llsupport/test/test_descr.py b/rpython/jit/backend/llsupport/test/test_descr.py --- a/rpython/jit/backend/llsupport/test/test_descr.py +++ b/rpython/jit/backend/llsupport/test/test_descr.py @@ -435,12 +435,15 @@ def test_descr_integer_bounded(): - descr = FieldDescr('descr', 0, 1, FLAG_SIGNED) + descr = FieldDescr('descr', 0, symbolic.SIZEOF_CHAR, FLAG_SIGNED) assert descr.is_integer_bounded() descr = FieldDescr('descr', 0, symbolic.WORD, FLAG_UNSIGNED) assert not descr.is_integer_bounded() + descr = FieldDescr('descr', 0, symbolic.SIZEOF_FLOAT, FLAG_FLOAT) + assert not descr.is_integer_bounded() + def test_descr_get_integer_bounds(): descr = FieldDescr('decr', 0, 1, FLAG_UNSIGNED) From noreply at buildbot.pypy.org Sat Feb 15 18:21:27 2014 From: noreply at buildbot.pypy.org (squeaky) Date: Sat, 15 Feb 2014 18:21:27 +0100 (CET) Subject: [pypy-commit] pypy array-propagate-len: fix FakeDescr, document branch Message-ID: <20140215172127.80D941C01DE@cobra.cs.uni-duesseldorf.de> Author: Squeaky Branch: array-propagate-len Changeset: r69163:6be6ca6b8558 Date: 2014-02-15 18:07 +0100 http://bitbucket.org/pypy/pypy/changeset/6be6ca6b8558/ Log: fix FakeDescr, document branch diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -67,3 +67,7 @@ With a properly configured 256-color terminal (TERM=...-256color), the Mandelbrot set shown during translation now uses a range of 50 colours. Essential! + +.. branch: array-propagate-len +Kill some guards and operations in JIT traces by adding integer bounds +propagation for getfield_(raw|gc) and getarrayitem_(raw|gc). diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -5563,6 +5563,8 @@ self.name = name def sort_key(self): return id(self) + def is_integer_bounded(self): + return False for n in ('inst_w_seq', 'inst_index', 'inst_w_list', 'inst_length', From noreply at buildbot.pypy.org Sat Feb 15 18:21:28 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 15 Feb 2014 18:21:28 +0100 (CET) Subject: [pypy-commit] pypy array-propagate-len: Close branch ready to merge Message-ID: <20140215172128.92BF41C01DE@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: array-propagate-len Changeset: r69164:a33e24a26ee5 Date: 2014-02-15 18:15 +0100 http://bitbucket.org/pypy/pypy/changeset/a33e24a26ee5/ Log: Close branch ready to merge From noreply at buildbot.pypy.org Sat Feb 15 18:21:29 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 15 Feb 2014 18:21:29 +0100 (CET) Subject: [pypy-commit] pypy default: hg merge array-propagate-len Message-ID: <20140215172129.B719F1C01DE@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69165:754284731a51 Date: 2014-02-15 18:20 +0100 http://bitbucket.org/pypy/pypy/changeset/754284731a51/ Log: hg merge array-propagate-len Kill some guards and operations in JIT traces by adding integer bounds propagation for getfield_(raw|gc) and getarrayitem_(raw|gc). diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -70,3 +70,7 @@ .. branch: NonConstant Simplify implementation of NonConstant. + +.. branch: array-propagate-len +Kill some guards and operations in JIT traces by adding integer bounds +propagation for getfield_(raw|gc) and getarrayitem_(raw|gc). diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -1,10 +1,12 @@ import py, weakref from rpython.jit.backend import model from rpython.jit.backend.llgraph import support +from rpython.jit.backend.llsupport import symbolic from rpython.jit.metainterp.history import AbstractDescr from rpython.jit.metainterp.history import Const, getkind from rpython.jit.metainterp.history import INT, REF, FLOAT, VOID from rpython.jit.metainterp.resoperation import rop +from rpython.jit.metainterp.optimizeopt import intbounds from rpython.jit.codewriter import longlong, heaptracker from rpython.jit.codewriter.effectinfo import EffectInfo @@ -119,6 +121,24 @@ def is_field_signed(self): return _is_signed_kind(self.FIELD) + def is_integer_bounded(self): + return getkind(self.FIELD) == 'int' \ + and rffi.sizeof(self.FIELD) < symbolic.WORD + + def get_integer_min(self): + if getkind(self.FIELD) != 'int': + assert False + + return intbounds.get_integer_min( + not _is_signed_kind(self.FIELD), rffi.sizeof(self.FIELD)) + + def get_integer_max(self): + if getkind(self.FIELD) != 'int': + assert False + + return intbounds.get_integer_max( + not _is_signed_kind(self.FIELD), rffi.sizeof(self.FIELD)) + def _is_signed_kind(TYPE): return (TYPE is not lltype.Bool and isinstance(TYPE, lltype.Number) and rffi.cast(TYPE, -1) == -1) @@ -144,6 +164,25 @@ def is_array_of_structs(self): return isinstance(self.A.OF, lltype.Struct) + def is_item_integer_bounded(self): + return getkind(self.A.OF) == 'int' \ + and rffi.sizeof(self.A.OF) < symbolic.WORD + + def get_item_integer_min(self): + if getkind(self.A.OF) != 'int': + assert False + + return intbounds.get_integer_min( + not _is_signed_kind(self.A.OF), rffi.sizeof(self.A.OF)) + + def get_item_integer_max(self): + if getkind(self.A.OF) != 'int': + assert False + + return intbounds.get_integer_max( + not _is_signed_kind(self.A.OF), rffi.sizeof(self.A.OF)) + + class InteriorFieldDescr(AbstractDescr): def __init__(self, A, fieldname): self.A = A @@ -162,6 +201,24 @@ def is_float_field(self): return getkind(self.FIELD) == 'float' + def is_integer_bounded(self): + return getkind(self.FIELD) == 'int' \ + and rffi.sizeof(self.FIELD) < symbolic.WORD + + def get_integer_min(self): + if getkind(self.FIELD) != 'int': + assert False + + return intbounds.get_integer_min( + not _is_signed_kind(self.FIELD), rffi.sizeof(self.FIELD)) + + def get_integer_max(self): + if getkind(self.FIELD) != 'int': + assert False + + return intbounds.get_integer_max( + not _is_signed_kind(self.FIELD), rffi.sizeof(self.FIELD)) + _example_res = {'v': None, 'r': lltype.nullptr(llmemory.GCREF.TO), 'i': 0, diff --git a/rpython/jit/backend/llsupport/descr.py b/rpython/jit/backend/llsupport/descr.py --- a/rpython/jit/backend/llsupport/descr.py +++ b/rpython/jit/backend/llsupport/descr.py @@ -6,6 +6,7 @@ from rpython.jit.metainterp import history from rpython.jit.codewriter import heaptracker, longlong from rpython.jit.codewriter.longlong import is_longlong +from rpython.jit.metainterp.optimizeopt import intbounds class GcCache(object): @@ -103,6 +104,26 @@ def is_field_signed(self): return self.flag == FLAG_SIGNED + def is_integer_bounded(self): + return self.flag in (FLAG_SIGNED, FLAG_UNSIGNED) \ + and self.field_size < symbolic.WORD + + def get_integer_min(self): + if self.flag == FLAG_UNSIGNED: + return intbounds.get_integer_min(True, self.field_size) + elif self.flag == FLAG_SIGNED: + return intbounds.get_integer_min(False, self.field_size) + + assert False + + def get_integer_max(self): + if self.flag == FLAG_UNSIGNED: + return intbounds.get_integer_max(True, self.field_size) + elif self.flag == FLAG_SIGNED: + return intbounds.get_integer_max(False, self.field_size) + + assert False + def sort_key(self): return self.offset @@ -182,6 +203,28 @@ def is_array_of_structs(self): return self.flag == FLAG_STRUCT + def is_item_integer_bounded(self): + return self.flag in (FLAG_SIGNED, FLAG_UNSIGNED) \ + and self.itemsize < symbolic.WORD + + def get_item_integer_min(self): + if self.flag == FLAG_UNSIGNED: + return intbounds.get_integer_min(True, self.itemsize) + elif self.flag == FLAG_SIGNED: + return intbounds.get_integer_min(False, self.itemsize) + + assert False + + def get_item_integer_max(self): + if self.flag == FLAG_UNSIGNED: + return intbounds.get_integer_max(True, self.itemsize) + elif self.flag == FLAG_SIGNED: + return intbounds.get_integer_max(False, self.itemsize) + + assert False + + + def repr_of_descr(self): return '' % (self.flag, self.itemsize) @@ -230,6 +273,15 @@ def is_float_field(self): return self.fielddescr.is_float_field() + def is_integer_bounded(self): + return self.fielddescr.is_integer_bounded() + + def get_integer_min(self): + return self.fielddescr.get_integer_min() + + def get_integer_max(self): + return self.fielddescr.get_integer_max() + def repr_of_descr(self): return '' % self.fielddescr.repr_of_descr() diff --git a/rpython/jit/backend/llsupport/test/test_descr.py b/rpython/jit/backend/llsupport/test/test_descr.py --- a/rpython/jit/backend/llsupport/test/test_descr.py +++ b/rpython/jit/backend/llsupport/test/test_descr.py @@ -432,3 +432,24 @@ assert descr.basesize == struct.calcsize("PP") # hash, length assert descr.lendescr.offset == struct.calcsize("P") # hash assert not descr.is_array_of_pointers() + + +def test_descr_integer_bounded(): + descr = FieldDescr('descr', 0, symbolic.SIZEOF_CHAR, FLAG_SIGNED) + assert descr.is_integer_bounded() + + descr = FieldDescr('descr', 0, symbolic.WORD, FLAG_UNSIGNED) + assert not descr.is_integer_bounded() + + descr = FieldDescr('descr', 0, symbolic.SIZEOF_FLOAT, FLAG_FLOAT) + assert not descr.is_integer_bounded() + + +def test_descr_get_integer_bounds(): + descr = FieldDescr('decr', 0, 1, FLAG_UNSIGNED) + assert descr.get_integer_min() == 0 + assert descr.get_integer_max() == 255 + + descr = FieldDescr('descr', 0, 1, FLAG_SIGNED) + assert descr.get_integer_min() == -128 + assert descr.get_integer_max() == 127 diff --git a/rpython/jit/metainterp/optimizeopt/__init__.py b/rpython/jit/metainterp/optimizeopt/__init__.py --- a/rpython/jit/metainterp/optimizeopt/__init__.py +++ b/rpython/jit/metainterp/optimizeopt/__init__.py @@ -63,7 +63,7 @@ optimizer.propagate_all_forward() finally: debug_stop("jit-optimize") - + if __name__ == '__main__': print ALL_OPTS_NAMES diff --git a/rpython/jit/metainterp/optimizeopt/intbounds.py b/rpython/jit/metainterp/optimizeopt/intbounds.py --- a/rpython/jit/metainterp/optimizeopt/intbounds.py +++ b/rpython/jit/metainterp/optimizeopt/intbounds.py @@ -9,6 +9,20 @@ from rpython.jit.metainterp.resoperation import rop +def get_integer_min(is_unsigned, byte_size): + if is_unsigned: + return 0 + else: + return -(1 << ((byte_size << 3) - 1)) + + +def get_integer_max(is_unsigned, byte_size): + if is_unsigned: + return (1 << (byte_size << 3)) - 1 + else: + return (1 << ((byte_size << 3) - 1)) - 1 + + class OptIntBounds(Optimization): """Keeps track of the bounds placed on integers by guards and remove redundant guards""" @@ -322,6 +336,29 @@ v1.intbound.make_ge(IntLowerBound(0)) v1.intbound.make_lt(IntUpperBound(256)) + def optimize_GETFIELD_RAW(self, op): + self.emit_operation(op) + descr = op.getdescr() + if descr.is_integer_bounded(): + v1 = self.getvalue(op.result) + v1.intbound.make_ge(IntLowerBound(descr.get_integer_min())) + v1.intbound.make_lt(IntUpperBound(descr.get_integer_max() + 1)) + + optimize_GETFIELD_GC = optimize_GETFIELD_RAW + + optimize_GETINTERIORFIELD_GC = optimize_GETFIELD_RAW + + def optimize_GETARRAYITEM_RAW(self, op): + self.emit_operation(op) + descr = op.getdescr() + if descr and descr.is_item_integer_bounded(): + v1 = self.getvalue(op.result) + v1.intbound.make_ge(IntLowerBound(descr.get_item_integer_min())) + v1.intbound.make_lt( + IntUpperBound(descr.get_item_integer_max() + 1)) + + optimize_GETARRAYITEM_GC = optimize_GETARRAYITEM_RAW + def optimize_UNICODEGETITEM(self, op): self.emit_operation(op) v1 = self.getvalue(op.result) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -5200,6 +5200,104 @@ """ self.optimize_loop(ops, ops) + def test_getfield_cmp_above_bounds(self): + ops = """ + [p0] + i0 = getfield_gc(p0, descr=chardescr) + i1 = int_lt(i0, 256) + guard_true(i1) [] + """ + + expected = """ + [p0] + i0 = getfield_gc(p0, descr=chardescr) + """ + self.optimize_loop(ops, expected) + + def test_getfield_cmp_below_bounds(self): + ops = """ + [p0] + i0 = getfield_gc(p0, descr=chardescr) + i1 = int_gt(i0, -1) + guard_true(i1) [] + """ + + expected = """ + [p0] + i0 = getfield_gc(p0, descr=chardescr) + """ + self.optimize_loop(ops, expected) + + def test_getfield_cmp_in_bounds(self): + ops = """ + [p0] + i0 = getfield_gc(p0, descr=chardescr) + i1 = int_gt(i0, 0) + guard_true(i1) [] + i2 = int_lt(i0, 255) + guard_true(i2) [] + """ + self.optimize_loop(ops, ops) + + def test_getfieldraw_cmp_outside_bounds(self): + ops = """ + [p0] + i0 = getfield_raw(p0, descr=chardescr) + i1 = int_gt(i0, -1) + guard_true(i1) [] + """ + + expected = """ + [p0] + i0 = getfield_raw(p0, descr=chardescr) + """ + self.optimize_loop(ops, expected) + + + def test_rawarray_cmp_outside_intbounds(self): + ops = """ + [i0] + i1 = getarrayitem_raw(i0, 0, descr=rawarraydescr_char) + i2 = int_lt(i1, 256) + guard_true(i2) [] + """ + + expected = """ + [i0] + i1 = getarrayitem_raw(i0, 0, descr=rawarraydescr_char) + """ + self.optimize_loop(ops, expected) + + def test_gcarray_outside_intbounds(self): + ops = """ + [p0] + i0 = getarrayitem_gc(p0, 0, descr=chararraydescr) + i1 = int_lt(i0, 256) + guard_true(i1) [] + """ + + expected = """ + [p0] + i0 = getarrayitem_gc(p0, 0, descr=chararraydescr) + """ + self.optimize_loop(ops, expected) + + def test_getinterior_outside_intbounds(self): + ops = """ + [p0] + f0 = getinteriorfield_gc(p0, 0, descr=fc_array_floatdescr) + i0 = getinteriorfield_gc(p0, 0, descr=fc_array_chardescr) + i1 = int_lt(i0, 256) + guard_true(i1) [] + """ + + expected = """ + [p0] + f0 = getinteriorfield_gc(p0, 0, descr=fc_array_floatdescr) + i0 = getinteriorfield_gc(p0, 0, descr=fc_array_chardescr) + """ + self.optimize_loop(ops, expected) + class TestLLtype(BaseTestOptimizeBasic, LLtypeMixin): pass diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -5563,6 +5563,8 @@ self.name = name def sort_key(self): return id(self) + def is_integer_bounded(self): + return False for n in ('inst_w_seq', 'inst_index', 'inst_w_list', 'inst_length', diff --git a/rpython/jit/metainterp/optimizeopt/test/test_util.py b/rpython/jit/metainterp/optimizeopt/test/test_util.py --- a/rpython/jit/metainterp/optimizeopt/test/test_util.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_util.py @@ -92,6 +92,7 @@ NODE.become(lltype.GcStruct('NODE', ('parent', OBJECT), ('value', lltype.Signed), ('floatval', lltype.Float), + ('charval', lltype.Char), ('next', lltype.Ptr(NODE)))) NODE2 = lltype.GcStruct('NODE2', ('parent', NODE), ('other', lltype.Ptr(NODE))) @@ -108,6 +109,7 @@ nodesize2 = cpu.sizeof(NODE2) valuedescr = cpu.fielddescrof(NODE, 'value') floatdescr = cpu.fielddescrof(NODE, 'floatval') + chardescr = cpu.fielddescrof(NODE, 'charval') nextdescr = cpu.fielddescrof(NODE, 'next') otherdescr = cpu.fielddescrof(NODE2, 'other') @@ -204,6 +206,8 @@ EffectInfo.EF_CANNOT_RAISE, oopspecindex=EffectInfo.OS_RAW_FREE)) + chararray = lltype.GcArray(lltype.Char) + chararraydescr = cpu.arraydescrof(chararray) # array of structs (complex data) complexarray = lltype.GcArray( @@ -221,6 +225,12 @@ rawarraydescr_char = cpu.arraydescrof(lltype.Array(lltype.Char, hints={'nolength': True})) + fc_array = lltype.GcArray( + lltype.Struct( + "floatchar", ("float", lltype.Float), ("char", lltype.Char))) + fc_array_descr = cpu.arraydescrof(fc_array) + fc_array_floatdescr = cpu.interiorfielddescrof(fc_array, "float") + fc_array_chardescr = cpu.interiorfielddescrof(fc_array, "char") for _name, _os in [ ('strconcatdescr', 'OS_STR_CONCAT'), diff --git a/rpython/jit/metainterp/optimizeopt/util.py b/rpython/jit/metainterp/optimizeopt/util.py --- a/rpython/jit/metainterp/optimizeopt/util.py +++ b/rpython/jit/metainterp/optimizeopt/util.py @@ -1,3 +1,5 @@ +import itertools + import py from rpython.rlib.objectmodel import r_dict, compute_identity_hash from rpython.rlib.rarithmetic import intmask @@ -136,13 +138,16 @@ print ' Comparing lists '.center(totwidth, '-') text_right = text_right or 'expected' print '%s| %s' % ('optimized'.center(width), text_right.center(width)) - for op1, op2 in zip(oplist1, oplist2): + for op1, op2 in itertools.izip_longest(oplist1, oplist2, fillvalue=''): txt1 = str(op1) txt2 = str(op2) while txt1 or txt2: print '%s| %s' % (txt1[:width].ljust(width), txt2[:width]) txt1 = txt1[width:] txt2 = txt2[width:] + print '-' * totwidth + + for op1, op2 in zip(oplist1, oplist2): assert op1.getopnum() == op2.getopnum() assert op1.numargs() == op2.numargs() for i in range(op1.numargs()): @@ -177,6 +182,5 @@ else: assert False assert len(oplist1) == len(oplist2) - print '-'*totwidth return True From noreply at buildbot.pypy.org Sat Feb 15 18:34:48 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 15 Feb 2014 18:34:48 +0100 (CET) Subject: [pypy-commit] pypy default: Fix another rare annotation ordering issue Message-ID: <20140215173448.98A8B1C1154@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69166:625f047a5759 Date: 2014-02-15 18:34 +0100 http://bitbucket.org/pypy/pypy/changeset/625f047a5759/ Log: Fix another rare annotation ordering issue diff --git a/rpython/annotator/test/test_annrpython.py b/rpython/annotator/test/test_annrpython.py --- a/rpython/annotator/test/test_annrpython.py +++ b/rpython/annotator/test/test_annrpython.py @@ -4170,6 +4170,21 @@ a = self.RPythonAnnotator() assert isinstance(a.build_types(f, []), annmodel.SomeOrderedDict) + def test_enumerate_none(self): + # enumerate(None) can occur as an intermediate step during a full + # annotation, because the None will be generalized later to + # None-or-list for example + def f(flag): + if flag: + x = None + else: + x = [42] + return enumerate(x).next() + a = self.RPythonAnnotator() + s = a.build_types(f, [int]) + assert isinstance(s, annmodel.SomeTuple) + assert s.items[1].const == 42 + def g(n): return [0, 1, 2, n] diff --git a/rpython/annotator/unaryop.py b/rpython/annotator/unaryop.py --- a/rpython/annotator/unaryop.py +++ b/rpython/annotator/unaryop.py @@ -614,6 +614,8 @@ return can_throw def next(self): + if s_None.contains(self.s_container): + return s_ImpossibleValue # so far if self.variant == ("enumerate",): s_item = self.s_container.getanyitem() return SomeTuple((SomeInteger(nonneg=True), s_item)) From noreply at buildbot.pypy.org Sat Feb 15 21:10:09 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 15 Feb 2014 21:10:09 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: Mutex and condition variable: the winning combo for writing reasonable Message-ID: <20140215201009.432021C1504@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r740:386522880dd5 Date: 2014-02-15 21:09 +0100 http://bitbucket.org/pypy/stmgc/changeset/386522880dd5/ Log: Mutex and condition variable: the winning combo for writing reasonable code. I keep rediscovering how they are actually a good idea. Maybe later we'll figure out that we need more control to avoid spurious wake-ups, but I think that with low numbers of threads it's fine. diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -28,22 +28,51 @@ other_pseg = get_priv_segment(current_lock_owner - 1); assert(other_pseg->write_lock_num == current_lock_owner); - if ((STM_PSEGMENT->approximate_start_time < - other_pseg->approximate_start_time) || is_inevitable()) { - /* we are the thread that must succeed */ - XXX /* don't go here if the other thread is inevitable! */ - ... - other_pseg->need_abort = 1; - _stm_start_safe_point(0); - /* XXX: not good, maybe should be signalled by other thread */ - usleep(1); - _stm_stop_safe_point(0); - /* done, will retry */ + /* note: other_pseg is currently running a transaction, and it cannot + commit or abort unexpectedly, because to do that it would need to + suspend us. So the reading of other_pseg->start_time and + other_pseg->transaction_state is stable, with one exception: the + 'transaction_state' can go from TS_REGULAR to TS_INEVITABLE under + our feet. */ + if (STM_PSEGMENT->transaction_state == TS_INEVITABLE) { + /* I'm inevitable, so the other is not. */ + assert(other_pseg->transaction_state != TS_INEVITABLE); + other_pseg->transaction_state = TS_MUST_ABORT; + } + else if (STM_PSEGMENT->start_time >= other_pseg->start_time) { + /* The other thread started before us, so I should abort, as I'm + the least long-running transaction. */ } else { - /* we are the thread that must abort */ + /* The other thread started strictly after us. We try to tell + it to abort, using compare_and_swap(). This fails if its + 'transaction_state' is already TS_INEVITABLE. */ + __sync_bool_compare_and_swap( + &other_pseg->transaction_state, TS_REGULAR, TS_MUST_ABORT); + } + + if (other_pseg->transaction_state != TS_MUST_ABORT) { + /* if the other thread is not in aborting-soon mode, then we must + abort. */ stm_abort_transaction(); } + else { + /* otherwise, we will issue a safe point and wait: */ + mutex_lock(); + STM_PSEGMENT->safe_point = SP_SAFE_POINT; + + /* signal the other thread; it must abort */ + cond_broadcast(); + + /* then wait, hopefully until the other thread broadcasts "I'm + done aborting" (spurious wake-ups are ok) */ + cond_wait(); + + /* now we return into _stm_write_slowpath() and will try again + to acquire the write lock on our object. */ + STM_PSEGMENT->safe_point = SP_RUNNING; + mutex_unlock(); + } } @@ -127,6 +156,8 @@ /* GS invalid before this point! */ acquire_thread_segment(tl); + assert(STM_SEGMENT->activity == ACT_NOT_RUNNING); + STM_SEGMENT->activity = jmpbuf != NULL ? ACT_REGULAR : ACT_INEVITABLE; STM_SEGMENT->jmpbuf_ptr = jmpbuf; uint8_t old_rv = STM_SEGMENT->transaction_read_version; @@ -179,6 +210,8 @@ { stm_thread_local_t *tl = STM_SEGMENT->running_thread; + assert(STM_SEGMENT->activity != ACT_NOT_RUNNING); + /* cannot abort any more */ STM_SEGMENT->jmpbuf_ptr = NULL; @@ -187,6 +220,8 @@ /* copy modified object versions to other threads */ push_modified_to_other_threads(); + STM_SEGMENT->activity = ACT_NOT_RUNNING; + release_thread_segment(tl); reset_all_creation_markers(); } diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -56,9 +56,23 @@ struct list_s *old_objects_to_trace; struct list_s *modified_objects; struct list_s *creation_markers; - uint64_t approximate_start_time; + uint64_t start_time; uint8_t write_lock_num; - uint8_t need_abort; + uint8_t safe_point; /* one of the SP_xxx constants */ + uint8_t transaction_state; /* one of the TS_xxx constants */ +}; + +enum { + SP_OUTSIDE=0, + SP_RUNNING, + SP_SAFE_POINT, + SP_SAFE_POINT_CAN_COLLECT, +}; +enum { + TS_NONE=0, + TS_REGULAR, + TS_INEVITABLE, + TS_MUST_ABORT, }; static char *stm_object_pages; @@ -94,8 +108,4 @@ return ((stm_creation_marker_t *)(((uintptr_t)obj) >> 8))->cm != 0; } -static inline bool is_inevitable(void) { - return STM_SEGMENT->jmpbuf_ptr == NULL; -} - static void teardown_core(void); diff --git a/c7/stm/sync.c b/c7/stm/sync.c --- a/c7/stm/sync.c +++ b/c7/stm/sync.c @@ -1,35 +1,52 @@ -#include +#include #include #include #include +/* XXX Getting the most efficient locks is hard, but the following + simplification is probably good enough for small numbers of threads: + when a thread wants to check or change any global state (e.g. start + running a transaction, etc.), it acquires this single mutex. If + additionally it wants to wait until the global state is changed by + someone else, it waits on the condition variable. This should be + all we need for synchronization. + + Maybe look at https://github.com/neosmart/pevents for how they do + WaitForMultipleObjects(). +*/ + + static union { struct { - sem_t semaphore; + pthread_mutex_t global_mutex; + pthread_cond_t global_cond; + /* some additional pieces of global state follow */ uint8_t in_use[NB_SEGMENTS + 1]; /* 1 if running a pthread */ - uint64_t global_time; /* approximate */ + uint64_t global_time; }; - char reserved[64]; -} segments_ctl __attribute__((aligned(64))); + char reserved[128]; +} sync_ctl __attribute__((aligned(64))); static void setup_sync(void) { - memset(segments_ctl.in_use, 0, sizeof(segments_ctl.in_use)); - segments_ctl.in_use[NB_SEGMENTS] = 0xff; - if (sem_init(&segments_ctl.semaphore, 0, NB_SEGMENTS) != 0) { - perror("sem_init"); + if (pthread_mutex_init(&sync_ctl.global_mutex, NULL) != 0 || + pthread_cond_init(&sync_ctl.global_cond, NULL) != 0) { + perror("mutex/cond initialization"); abort(); } + sync_ctl.in_use[NB_SEGMENTS] = 0xff; } static void teardown_sync(void) { - if (sem_destroy(&segments_ctl.semaphore) != 0) { - perror("sem_destroy"); + if (pthread_mutex_destroy(&sync_ctl.global_mutex) != 0 || + pthread_cond_destroy(&sync_ctl.global_cond) != 0) { + perror("mutex/cond destroy"); abort(); } + memset(sync_ctl, 0, sizeof(sync_ctl.in_use)); } static void set_gs_register(char *value) @@ -40,49 +57,92 @@ } } +static inline void mutex_lock(void) +{ + if (UNLIKELY(pthread_mutex_lock(&sync_ctl.global_mutex) != 0)) { + perror("pthread_mutex_lock"); + abort(); + } +} + +static inline void mutex_unlock(void) +{ + if (UNLIKELY(pthread_mutex_unlock(&sync_ctl.global_mutex) != 0)) { + perror("pthread_mutex_unlock"); + abort(); + } +} + +static inline void assert_has_mutex(void) +{ + assert(pthread_mutex_trylock(&sync_ctl.global_mutex) == EBUSY); +} + +static inline void cond_wait(void) +{ + if (UNLIKELY(pthread_cond_wait(&sync_ctl.global_cond, + &sync_ctl.global_mutex) != 0)) { + perror("pthread_cond_wait"); + abort(); + } +} + +static inline void cond_broadcast(void) +{ + if (UNLIKELY(pthread_cond_broadcast(&sync_ctl.global_cond) != 0)) { + perror("pthread_cond_broadcast"); + abort(); + } +} + static void acquire_thread_segment(stm_thread_local_t *tl) { /* This function acquires a segment for the currently running thread, and set up the GS register if it changed. */ - while (sem_wait(&segments_ctl.semaphore) != 0) { - if (errno != EINTR) { - perror("sem_wait"); - abort(); + assert_has_mutex(); + assert(_is_tl_registered(tl)); + + retry: + int num = tl->associated_segment_num; + if (sync_ctl.in_use[num] == 0) { + /* fast-path: we can get the same segment number than the one + we had before. The value stored in GS is still valid. */ + goto got_num; + } + /* Look for the next free segment. If there is none, wait for + the condition variable. */ + int i; + for (i = 0; i < NB_SEGMENTS; i++) { + num = (num + 1) % NB_SEGMENTS; + if (sync_ctl.in_use[num] == 0) { + /* we're getting 'num', a different number. */ + tl->associated_segment_num = num; + set_gs_register(get_segment_base(num)); + goto got_num; } } - assert(_is_tl_registered(tl)); - int num = tl->associated_segment_num; - if (__sync_lock_test_and_set(&segments_ctl.in_use[num], 1) == 0) { - /* fast-path: reacquired the same segment number than the one - we had before. The value stored in GS is still valid. */ - goto exit; - } - /* Look for the next free segment. There must be one, because we - acquired the semaphore above. */ - while (1) { - num = (num + 1) % NB_SEGMENTS; - if (__sync_lock_test_and_set(&segments_ctl.in_use[num], 1) == 0) - break; - } - tl->associated_segment_num = num; - set_gs_register(get_segment_base(num)); + /* Wait and retry */ + cond_wait(); + goto retry; - exit: + got_num: + sync_ctl.in_use[num] = 1; assert(STM_SEGMENT->running_thread == NULL); STM_SEGMENT->running_thread = tl; - - /* global_time is approximate -> no synchronization required */ - STM_PSEGMENT->approximate_start_time = ++segments_ctl.global_time; + STM_PSEGMENT->start_time = ++segments_ctl.global_time; } static void release_thread_segment(stm_thread_local_t *tl) { + assert_has_mutex(); + assert(STM_SEGMENT->running_thread == tl); STM_SEGMENT->running_thread = NULL; - int num = tl->associated_segment_num; - __sync_lock_release(&segments_ctl.in_use[num]); - sem_post(&segments_ctl.semaphore); + assert(sync_ctl.in_use[tl->associated_segment_num] == 1); + sync_ctl.in_use[tl->associated_segment_num] = 0; + + cond_broadcast(); } static bool _running_transaction(void) diff --git a/c7/stm/sync.h b/c7/stm/sync.h --- a/c7/stm/sync.h +++ b/c7/stm/sync.h @@ -3,6 +3,13 @@ static void setup_sync(void); static void teardown_sync(void); -/* acquire and release one of the segments for running the given thread */ +/* all synchronization is done via a mutex and condition variable */ +static void mutex_lock(void); +static void mutex_unlock(void); +static void cond_wait(void); +static void cond_broadcast(void); + +/* acquire and release one of the segments for running the given thread + (must have the mutex acquired!) */ static void acquire_thread_segment(stm_thread_local_t *tl); static void release_thread_segment(stm_thread_local_t *tl); diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -207,17 +207,19 @@ /* Starting and ending transactions. You should only call stm_read(), stm_write() and stm_allocate() from within a transaction. Use the macro STM_START_TRANSACTION() to start a transaction that - can be restarted using the 'jmpbuf' (a pointer to a local variable - of type stm_jmpbuf_t). */ + can be restarted using the 'jmpbuf' (a local variable of type + stm_jmpbuf_t). */ #define STM_START_TRANSACTION(tl, jmpbuf) ({ \ - int _restart = __builtin_setjmp(jmpbuf); \ - _stm_start_transaction(tl, jmpbuf); \ + int _restart = __builtin_setjmp(&jmpbuf); \ + _stm_start_transaction(tl, &jmpbuf); \ _restart; \ }) /* Start an inevitable transaction, if it's going to return from the current function immediately. */ -void stm_start_inevitable_transaction(stm_thread_local_t *tl); +static inline void stm_start_inevitable_transaction(stm_thread_local_t *tl) { + _stm_start_transaction(tl, NULL); +} /* Commit a transaction. */ void stm_commit_transaction(void); From noreply at buildbot.pypy.org Sat Feb 15 21:26:39 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 15 Feb 2014 21:26:39 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: in-progress Message-ID: <20140215202639.46F761C1154@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r741:764a83dbf7d7 Date: 2014-02-15 21:26 +0100 http://bitbucket.org/pypy/stmgc/changeset/764a83dbf7d7/ Log: in-progress diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -153,13 +153,20 @@ void _stm_start_transaction(stm_thread_local_t *tl, stm_jmpbuf_t *jmpbuf) { + mutex_lock(); + /* GS invalid before this point! */ acquire_thread_segment(tl); - assert(STM_SEGMENT->activity == ACT_NOT_RUNNING); - STM_SEGMENT->activity = jmpbuf != NULL ? ACT_REGULAR : ACT_INEVITABLE; + assert(STM_SEGMENT->safe_point == SP_NO_TRANSACTION); + assert(STM_SEGMENT->transaction_state == TS_NONE); + STM_SEGMENT->safe_point = SP_RUNNING; + STM_SEGMENT->transaction_state = (jmpbuf != NULL ? TS_REGULAR + : TS_INEVITABLE); STM_SEGMENT->jmpbuf_ptr = jmpbuf; + mutex_unlock(); + uint8_t old_rv = STM_SEGMENT->transaction_read_version; STM_SEGMENT->transaction_read_version = old_rv + 1; if (UNLIKELY(old_rv == 0xff)) @@ -200,38 +207,71 @@ list_clear(STM_PSEGMENT->modified_objects); if (conflicted) { - struct _thread_local1_s *remote_TL = (struct _thread_local1_s *) - REAL_ADDRESS(remote_base, _STM_TL); - remote_TL->need_abort = 1; + ...; contention management again! + get_segment(remote_num)->transaction_state = TS_MUST_ABORT; } } void stm_commit_transaction(void) { + mutex_lock(); + + assert(STM_SEGMENT->safe_point = SP_RUNNING); stm_thread_local_t *tl = STM_SEGMENT->running_thread; - assert(STM_SEGMENT->activity != ACT_NOT_RUNNING); + switch (STM_SEGMENT->transaction_state) { - /* cannot abort any more */ - STM_SEGMENT->jmpbuf_ptr = NULL; + case TS_REGULAR: + /* cannot abort any more */ + STM_SEGMENT->jmpbuf_ptr = NULL; + break; - ... + case TS_INEVITABLE: + //... + abort(); // XXX do it + break; + + case TS_MUST_ABORT: + mutex_unlock(); + stm_abort_transaction(); + + default: + assert(!"commit: bad transaction_state"); + } /* copy modified object versions to other threads */ push_modified_to_other_threads(); - STM_SEGMENT->activity = ACT_NOT_RUNNING; + release_thread_segment(tl); /* includes the cond_broadcast(); */ + STM_SEGMENT->safe_point = SP_NO_TRANSACTION; + STM_SEGMENT->transaction_state = TS_NONE; + mutex_unlock(); - release_thread_segment(tl); reset_all_creation_markers(); } void stm_abort_transaction(void) { + mutex_lock(); + stm_thread_local_t *tl = STM_SEGMENT->running_thread; stm_jmpbuf_t *jmpbuf_ptr = STM_SEGMENT->jmpbuf_ptr; - STM_SEGMENT->need_abort = 0; - release_thread_segment(tl); + + switch (STM_SEGMENT->transaction_state) { + case TS_REGULAR: + case TS_MUST_ABORT: + break; + case TS_INEVITABLE: + assert(!"abort: transaction_state == TS_INEVITABLE"); + default: + assert(!"abort: bad transaction_state"); + } + + release_thread_segment(tl); /* includes the cond_broadcast(); */ + STM_SEGMENT->safe_point = SP_NO_TRANSACTION; + STM_SEGMENT->transaction_state = TS_NONE; + mutex_unlock(); + reset_all_creation_markers(); assert(jmpbuf_ptr != NULL); diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -63,7 +63,7 @@ }; enum { - SP_OUTSIDE=0, + SP_NO_TRANSACTION=0, SP_RUNNING, SP_SAFE_POINT, SP_SAFE_POINT_CAN_COLLECT, From noreply at buildbot.pypy.org Sat Feb 15 23:14:04 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 15 Feb 2014 23:14:04 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: Mostly compilation fixes Message-ID: <20140215221404.B406E1C1178@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r742:d8f51cf89286 Date: 2014-02-15 23:13 +0100 http://bitbucket.org/pypy/stmgc/changeset/d8f51cf89286/ Log: Mostly compilation fixes diff --git a/c7/stm/contention.c b/c7/stm/contention.c new file mode 100644 --- /dev/null +++ b/c7/stm/contention.c @@ -0,0 +1,60 @@ +#ifndef _STM_CORE_H_ +# error "must be compiled via stmgc.c" +#endif + + +static void contention_management(uint8_t other_segment_num) +{ + /* A simple contention manager. Called when we do stm_write() + on an object, but some other thread already holds the write + lock on the same object. */ + + assert_has_mutex(); + assert(other_segment_num != STM_SEGMENT->segment_num); + + /* Who should abort here: this thread, or the other thread? */ + struct stm_priv_segment_info_s* other_pseg; + other_pseg = get_priv_segment(other_segment_num); + + /* note: other_pseg is currently running a transaction, and it cannot + commit or abort unexpectedly, because to do that it would need to + suspend us. So the reading of other_pseg->start_time and + other_pseg->transaction_state is stable, with one exception: the + 'transaction_state' can go from TS_REGULAR to TS_INEVITABLE under + our feet. */ + if (STM_PSEGMENT->transaction_state == TS_INEVITABLE) { + /* I'm inevitable, so the other is not. */ + assert(other_pseg->transaction_state != TS_INEVITABLE); + other_pseg->transaction_state = TS_MUST_ABORT; + } + else if (other_pseg->start_time < STM_PSEGMENT->start_time) { + /* The other thread started before us, so I should abort, as I'm + the least long-running transaction. */ + } + else if (other_pseg->transaction_state == TS_REGULAR) { + /* The other thread started strictly after us. We tell it + to abort if we can (e.g. if it's not TS_INEVITABLE). */ + other_pseg->transaction_state = TS_MUST_ABORT; + } + + if (other_pseg->transaction_state != TS_MUST_ABORT) { + /* if the other thread is not in aborting-soon mode, then we must + abort. */ + abort_with_mutex(); + } + else { + /* otherwise, we will issue a safe point and wait: */ + STM_PSEGMENT->safe_point = SP_SAFE_POINT; + + /* signal the other thread; it must abort */ + cond_broadcast(); + + /* then wait, hopefully until the other thread broadcasts "I'm + done aborting" (spurious wake-ups are ok) */ + cond_wait(); + + /* now we return into _stm_write_slowpath() and will try again + to acquire the write lock on our object. */ + STM_PSEGMENT->safe_point = SP_RUNNING; + } +} diff --git a/c7/stm/contention.h b/c7/stm/contention.h new file mode 100644 --- /dev/null +++ b/c7/stm/contention.h @@ -0,0 +1,2 @@ + +static void contention_management(uint8_t other_segment_num); diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -7,75 +7,14 @@ static uint8_t write_locks[READMARKER_END - READMARKER_START]; +static void abort_with_mutex(void) __attribute__((noreturn)); + static void teardown_core(void) { memset(write_locks, 0, sizeof(write_locks)); } -static void contention_management(uint8_t current_lock_owner) -{ - /* A simple contention manager. Called when we do stm_write() - on an object, but some other thread already holds the write - lock on the same object. */ - - /* By construction it should not be possible that the owner - of the object is already us */ - assert(current_lock_owner != STM_PSEGMENT->write_lock_num); - - /* Who should abort here: this thread, or the other thread? */ - struct stm_priv_segment_info_s* other_pseg; - other_pseg = get_priv_segment(current_lock_owner - 1); - assert(other_pseg->write_lock_num == current_lock_owner); - - /* note: other_pseg is currently running a transaction, and it cannot - commit or abort unexpectedly, because to do that it would need to - suspend us. So the reading of other_pseg->start_time and - other_pseg->transaction_state is stable, with one exception: the - 'transaction_state' can go from TS_REGULAR to TS_INEVITABLE under - our feet. */ - if (STM_PSEGMENT->transaction_state == TS_INEVITABLE) { - /* I'm inevitable, so the other is not. */ - assert(other_pseg->transaction_state != TS_INEVITABLE); - other_pseg->transaction_state = TS_MUST_ABORT; - } - else if (STM_PSEGMENT->start_time >= other_pseg->start_time) { - /* The other thread started before us, so I should abort, as I'm - the least long-running transaction. */ - } - else { - /* The other thread started strictly after us. We try to tell - it to abort, using compare_and_swap(). This fails if its - 'transaction_state' is already TS_INEVITABLE. */ - __sync_bool_compare_and_swap( - &other_pseg->transaction_state, TS_REGULAR, TS_MUST_ABORT); - } - - if (other_pseg->transaction_state != TS_MUST_ABORT) { - /* if the other thread is not in aborting-soon mode, then we must - abort. */ - stm_abort_transaction(); - } - else { - /* otherwise, we will issue a safe point and wait: */ - mutex_lock(); - STM_PSEGMENT->safe_point = SP_SAFE_POINT; - - /* signal the other thread; it must abort */ - cond_broadcast(); - - /* then wait, hopefully until the other thread broadcasts "I'm - done aborting" (spurious wake-ups are ok) */ - cond_wait(); - - /* now we return into _stm_write_slowpath() and will try again - to acquire the write lock on our object. */ - STM_PSEGMENT->safe_point = SP_RUNNING; - mutex_unlock(); - } -} - - void _stm_write_slowpath(object_t *obj) { assert(_running_transaction()); @@ -115,8 +54,12 @@ if (LIKELY(prev_owner == 0)) break; - /* otherwise, call the contention manager, and then possibly retry */ - contention_management(prev_owner); + /* otherwise, call the contention manager, and then possibly retry. + By construction it should not be possible that the owner + of the object is already us */ + mutex_lock(); + contention_management(prev_owner - 1); + mutex_unlock(); } while (1); /* add the write-barrier-already-called flag ONLY if we succeeded in @@ -158,11 +101,11 @@ /* GS invalid before this point! */ acquire_thread_segment(tl); - assert(STM_SEGMENT->safe_point == SP_NO_TRANSACTION); - assert(STM_SEGMENT->transaction_state == TS_NONE); - STM_SEGMENT->safe_point = SP_RUNNING; - STM_SEGMENT->transaction_state = (jmpbuf != NULL ? TS_REGULAR - : TS_INEVITABLE); + assert(STM_PSEGMENT->safe_point == SP_NO_TRANSACTION); + assert(STM_PSEGMENT->transaction_state == TS_NONE); + STM_PSEGMENT->safe_point = SP_RUNNING; + STM_PSEGMENT->transaction_state = (jmpbuf != NULL ? TS_REGULAR + : TS_INEVITABLE); STM_SEGMENT->jmpbuf_ptr = jmpbuf; mutex_unlock(); @@ -172,79 +115,101 @@ if (UNLIKELY(old_rv == 0xff)) reset_transaction_read_version(); - assert(list_is_empty(STM_PSEGMENT->old_objects_to_trace)); assert(list_is_empty(STM_PSEGMENT->modified_objects)); assert(list_is_empty(STM_PSEGMENT->creation_markers)); } -static void push_modified_to_other_threads() +static bool detect_write_read_conflicts(void) { long remote_num = 1 - STM_SEGMENT->segment_num; - char *local_base = STM_SEGMENT->segment_base; char *remote_base = get_segment_base(remote_num); - bool conflicted = false; uint8_t remote_version = get_segment(remote_num)->transaction_read_version; +#if NB_SEGMENTS != 2 +# error "This logic only works with two segments" +#endif + LIST_FOREACH_R( STM_PSEGMENT->modified_objects, object_t * /*item*/, ({ - if (!conflicted) - conflicted = was_read_remote(remote_base, item, - remote_version); + if (was_read_remote(remote_base, item, remote_version)) { + /* A write-read conflict! */ + contention_management(remote_num); + return true; + } + })); + return false; +} + +static void push_modified_to_other_segments(void) +{ + long remote_num = 1 - STM_SEGMENT->segment_num; + char *local_base = STM_SEGMENT->segment_base; + char *remote_base = get_segment_base(remote_num); + +#if NB_SEGMENTS != 2 +# error "This logic only works with two segments" +#endif + + LIST_FOREACH_R( + STM_PSEGMENT->modified_objects, + object_t * /*item*/, + ({ + assert(!was_read_remote(remote_base, item, + get_segment(remote_num)->transaction_read_version)); /* clear the write-lock */ uintptr_t lock_idx = (((uintptr_t)item) >> 4) - READMARKER_START; - assert(write_locks[lock_idx] == _STM_TL->thread_num + 1); + assert(write_locks[lock_idx] == STM_PSEGMENT->write_lock_num); write_locks[lock_idx] = 0; - _stm_move_object(item, - REAL_ADDRESS(local_base, item), - REAL_ADDRESS(remote_base, item)); + char *src = REAL_ADDRESS(local_base, item); + ssize_t size = stmcb_size_rounded_up((struct object_s *)src); + memcpy(REAL_ADDRESS(remote_base, item), src, size); })); list_clear(STM_PSEGMENT->modified_objects); - - if (conflicted) { - ...; contention management again! - get_segment(remote_num)->transaction_state = TS_MUST_ABORT; - } } void stm_commit_transaction(void) { mutex_lock(); - assert(STM_SEGMENT->safe_point = SP_RUNNING); - stm_thread_local_t *tl = STM_SEGMENT->running_thread; + restart: + assert(STM_PSEGMENT->safe_point = SP_RUNNING); - switch (STM_SEGMENT->transaction_state) { + switch (STM_PSEGMENT->transaction_state) { case TS_REGULAR: - /* cannot abort any more */ - STM_SEGMENT->jmpbuf_ptr = NULL; - break; - case TS_INEVITABLE: - //... - abort(); // XXX do it break; case TS_MUST_ABORT: - mutex_unlock(); - stm_abort_transaction(); + abort_with_mutex(); default: assert(!"commit: bad transaction_state"); } + /* detect conflicts */ + if (detect_write_read_conflicts()) + goto restart; + + /* cannot abort any more from here */ + assert(STM_PSEGMENT->transaction_state != TS_MUST_ABORT); + STM_SEGMENT->jmpbuf_ptr = NULL; + /* copy modified object versions to other threads */ - push_modified_to_other_threads(); + push_modified_to_other_segments(); + /* done */ + stm_thread_local_t *tl = STM_SEGMENT->running_thread; release_thread_segment(tl); /* includes the cond_broadcast(); */ - STM_SEGMENT->safe_point = SP_NO_TRANSACTION; - STM_SEGMENT->transaction_state = TS_NONE; + STM_PSEGMENT->safe_point = SP_NO_TRANSACTION; + STM_PSEGMENT->transaction_state = TS_NONE; + mutex_unlock(); reset_all_creation_markers(); @@ -253,11 +218,15 @@ void stm_abort_transaction(void) { mutex_lock(); + abort_with_mutex(); +} +static void abort_with_mutex(void) +{ stm_thread_local_t *tl = STM_SEGMENT->running_thread; stm_jmpbuf_t *jmpbuf_ptr = STM_SEGMENT->jmpbuf_ptr; - switch (STM_SEGMENT->transaction_state) { + switch (STM_PSEGMENT->transaction_state) { case TS_REGULAR: case TS_MUST_ABORT: break; @@ -268,11 +237,12 @@ } release_thread_segment(tl); /* includes the cond_broadcast(); */ - STM_SEGMENT->safe_point = SP_NO_TRANSACTION; - STM_SEGMENT->transaction_state = TS_NONE; + STM_PSEGMENT->safe_point = SP_NO_TRANSACTION; + STM_PSEGMENT->transaction_state = TS_NONE; mutex_unlock(); reset_all_creation_markers(); + list_clear(STM_PSEGMENT->modified_objects); assert(jmpbuf_ptr != NULL); assert(jmpbuf_ptr != (stm_jmpbuf_t *)-1); /* for tests only */ diff --git a/c7/stm/misc.c b/c7/stm/misc.c --- a/c7/stm/misc.c +++ b/c7/stm/misc.c @@ -50,7 +50,7 @@ static inline bool was_read_remote(char *base, object_t *obj, uint8_t other_transaction_read_version) { - struct read_marker_s *marker = (struct read_marker_s *) + struct stm_read_marker_s *marker = (struct stm_read_marker_s *) (base + (((uintptr_t)obj) >> 4)); return (marker->rm == other_transaction_read_version); } diff --git a/c7/stm/sync.c b/c7/stm/sync.c --- a/c7/stm/sync.c +++ b/c7/stm/sync.c @@ -46,7 +46,7 @@ perror("mutex/cond destroy"); abort(); } - memset(sync_ctl, 0, sizeof(sync_ctl.in_use)); + memset(&sync_ctl, 0, sizeof(sync_ctl.in_use)); } static void set_gs_register(char *value) @@ -102,7 +102,7 @@ assert_has_mutex(); assert(_is_tl_registered(tl)); - retry: + retry:; int num = tl->associated_segment_num; if (sync_ctl.in_use[num] == 0) { /* fast-path: we can get the same segment number than the one @@ -129,7 +129,7 @@ sync_ctl.in_use[num] = 1; assert(STM_SEGMENT->running_thread == NULL); STM_SEGMENT->running_thread = tl; - STM_PSEGMENT->start_time = ++segments_ctl.global_time; + STM_PSEGMENT->start_time = ++sync_ctl.global_time; } static void release_thread_segment(stm_thread_local_t *tl) diff --git a/c7/stmgc.c b/c7/stmgc.c --- a/c7/stmgc.c +++ b/c7/stmgc.c @@ -9,6 +9,7 @@ #include "stm/sync.h" #include "stm/largemalloc.h" #include "stm/nursery.h" +#include "stm/contention.h" #include "stm/misc.c" #include "stm/list.c" @@ -21,3 +22,4 @@ #include "stm/sync.c" #include "stm/setup.c" #include "stm/core.c" +#include "stm/contention.c" From noreply at buildbot.pypy.org Sat Feb 15 23:42:09 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 15 Feb 2014 23:42:09 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: Progress Message-ID: <20140215224209.C58EC1C1178@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r743:8201455ea6be Date: 2014-02-15 23:41 +0100 http://bitbucket.org/pypy/stmgc/changeset/8201455ea6be/ Log: Progress diff --git a/c7/stm/contention.c b/c7/stm/contention.c --- a/c7/stm/contention.c +++ b/c7/stm/contention.c @@ -3,7 +3,7 @@ #endif -static void contention_management(uint8_t other_segment_num) +static void contention_management(uint8_t other_segment_num, bool wait) { /* A simple contention manager. Called when we do stm_write() on an object, but some other thread already holds the write @@ -16,12 +16,6 @@ struct stm_priv_segment_info_s* other_pseg; other_pseg = get_priv_segment(other_segment_num); - /* note: other_pseg is currently running a transaction, and it cannot - commit or abort unexpectedly, because to do that it would need to - suspend us. So the reading of other_pseg->start_time and - other_pseg->transaction_state is stable, with one exception: the - 'transaction_state' can go from TS_REGULAR to TS_INEVITABLE under - our feet. */ if (STM_PSEGMENT->transaction_state == TS_INEVITABLE) { /* I'm inevitable, so the other is not. */ assert(other_pseg->transaction_state != TS_INEVITABLE); @@ -42,7 +36,7 @@ abort. */ abort_with_mutex(); } - else { + else if (wait) { /* otherwise, we will issue a safe point and wait: */ STM_PSEGMENT->safe_point = SP_SAFE_POINT; diff --git a/c7/stm/contention.h b/c7/stm/contention.h --- a/c7/stm/contention.h +++ b/c7/stm/contention.h @@ -1,2 +1,2 @@ -static void contention_management(uint8_t other_segment_num); +static void contention_management(uint8_t other_segment_num, bool wait); diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -7,8 +7,6 @@ static uint8_t write_locks[READMARKER_END - READMARKER_START]; -static void abort_with_mutex(void) __attribute__((noreturn)); - static void teardown_core(void) { memset(write_locks, 0, sizeof(write_locks)); @@ -58,7 +56,7 @@ By construction it should not be possible that the owner of the object is already us */ mutex_lock(); - contention_management(prev_owner - 1); + contention_management(prev_owner - 1, true); mutex_unlock(); } while (1); @@ -120,15 +118,31 @@ } -static bool detect_write_read_conflicts(void) +/************************************************************/ + +#if NB_SEGMENTS != 2 +# error "The logic in the functions below only works with two segments" +#endif + +static void wait_for_other_safe_points(void) +{ + long remote_num = 1 - STM_SEGMENT->segment_num; + while (get_priv_segment(remote_num)->safe_point == SP_RUNNING) { + cond_wait(); + } +} + +static void detect_write_read_conflicts(void) { long remote_num = 1 - STM_SEGMENT->segment_num; char *remote_base = get_segment_base(remote_num); uint8_t remote_version = get_segment(remote_num)->transaction_read_version; -#if NB_SEGMENTS != 2 -# error "This logic only works with two segments" -#endif + switch (get_priv_segment(remote_num)->transaction_state) { + case TS_NONE: + case TS_MUST_ABORT: + return; /* no need to do any check */ + } LIST_FOREACH_R( STM_PSEGMENT->modified_objects, @@ -136,11 +150,13 @@ ({ if (was_read_remote(remote_base, item, remote_version)) { /* A write-read conflict! */ - contention_management(remote_num); - return true; + contention_management(remote_num, false); + + /* If we reach this point, it means we aborted the other + thread. We're done here. */ + return; } })); - return false; } static void push_modified_to_other_segments(void) @@ -148,17 +164,18 @@ long remote_num = 1 - STM_SEGMENT->segment_num; char *local_base = STM_SEGMENT->segment_base; char *remote_base = get_segment_base(remote_num); - -#if NB_SEGMENTS != 2 -# error "This logic only works with two segments" -#endif + bool remote_active = + (get_priv_segment(remote_num)->transaction_state == TS_REGULAR || + get_priv_segment(remote_num)->transaction_state == TS_INEVITABLE); LIST_FOREACH_R( STM_PSEGMENT->modified_objects, object_t * /*item*/, ({ - assert(!was_read_remote(remote_base, item, + if (remote_active) { + assert(!was_read_remote(remote_base, item, get_segment(remote_num)->transaction_read_version)); + } /* clear the write-lock */ uintptr_t lock_idx = (((uintptr_t)item) >> 4) - READMARKER_START; @@ -176,8 +193,6 @@ void stm_commit_transaction(void) { mutex_lock(); - - restart: assert(STM_PSEGMENT->safe_point = SP_RUNNING); switch (STM_PSEGMENT->transaction_state) { @@ -193,9 +208,11 @@ assert(!"commit: bad transaction_state"); } + /* wait until the other thread is at a safe-point */ + wait_for_other_safe_points(); + /* detect conflicts */ - if (detect_write_read_conflicts()) - goto restart; + detect_write_read_conflicts(); /* cannot abort any more from here */ assert(STM_PSEGMENT->transaction_state != TS_MUST_ABORT); @@ -223,9 +240,6 @@ static void abort_with_mutex(void) { - stm_thread_local_t *tl = STM_SEGMENT->running_thread; - stm_jmpbuf_t *jmpbuf_ptr = STM_SEGMENT->jmpbuf_ptr; - switch (STM_PSEGMENT->transaction_state) { case TS_REGULAR: case TS_MUST_ABORT: @@ -236,6 +250,8 @@ assert(!"abort: bad transaction_state"); } + stm_jmpbuf_t *jmpbuf_ptr = STM_SEGMENT->jmpbuf_ptr; + stm_thread_local_t *tl = STM_SEGMENT->running_thread; release_thread_segment(tl); /* includes the cond_broadcast(); */ STM_PSEGMENT->safe_point = SP_NO_TRANSACTION; STM_PSEGMENT->transaction_state = TS_NONE; diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -109,3 +109,4 @@ } static void teardown_core(void); +static void abort_with_mutex(void) __attribute__((noreturn)); diff --git a/c7/stm/sync.c b/c7/stm/sync.c --- a/c7/stm/sync.c +++ b/c7/stm/sync.c @@ -80,11 +80,19 @@ static inline void cond_wait(void) { +#ifdef STM_NO_COND_WAIT + fprintf(stderr, "*** cond_wait called!"); + abort(); +#endif + if (UNLIKELY(pthread_cond_wait(&sync_ctl.global_cond, &sync_ctl.global_mutex) != 0)) { perror("pthread_cond_wait"); abort(); } + + if (STM_PSEGMENT->transaction_state == TS_MUST_ABORT) + abort_with_mutex(); } static inline void cond_broadcast(void) @@ -166,12 +174,19 @@ assert(STM_SEGMENT->running_thread == tl); } -void _stm_start_safe_point(int flags) +#if STM_TESTS +void _stm_start_safe_point(void) { - //... + assert(STM_PSEGMENT->safe_point == SP_RUNNING); + STM_PSEGMENT->safe_point = SP_SAFE_POINT_CAN_COLLECT; } -void _stm_stop_safe_point(int flags) +void _stm_stop_safe_point(void) { - //... + assert(STM_PSEGMENT->safe_point == SP_SAFE_POINT_CAN_COLLECT); + STM_PSEGMENT->safe_point = SP_RUNNING; + + if (STM_PSEGMENT->transaction_state == TS_MUST_ABORT) + stm_abort_transaction(); } +#endif diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -81,8 +81,6 @@ stm_char *_stm_allocate_slowpath(ssize_t); void _stm_become_inevitable(char*); void _stm_start_transaction(stm_thread_local_t *, stm_jmpbuf_t *); -void _stm_start_safe_point(int flags); -void _stm_stop_safe_point(int flags); #ifdef STM_TESTS bool _stm_was_read(object_t *obj); @@ -94,6 +92,8 @@ void _stm_test_switch(stm_thread_local_t *tl); object_t *_stm_allocate_old(ssize_t size_rounded_up); void _stm_large_dump(void); +void _stm_start_safe_point(void); +void _stm_stop_safe_point(void); #endif #define _STM_GCFLAG_WRITE_BARRIER_CALLED 0x80 diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -66,12 +66,8 @@ void _set_type_id(object_t *obj, uint32_t h); uint32_t _get_type_id(object_t *obj); -#define LOCK_COLLECT ... -#define LOCK_EXCLUSIVE ... -#define THREAD_YIELD ... - -void _stm_start_safe_point(int); -bool _check_stop_safe_point(int); +void _stm_start_safe_point(void); +bool _check_stop_safe_point(void); """) @@ -138,12 +134,6 @@ typedef TLPREFIX struct myobj_s myobj_t; #define SIZEOF_MYOBJ sizeof(struct myobj_s) -enum { - LOCK_COLLECT = 1, - LOCK_EXCLUSIVE = 2, - THREAD_YIELD = 4, -}; - uint8_t _stm_get_flags(object_t *obj) { return obj->stm_flags; @@ -195,13 +185,13 @@ } #endif -bool _check_stop_safe_point(int flags) { +bool _check_stop_safe_point(void) { stm_jmpbuf_t here; stm_segment_info_t *segment = STM_SEGMENT; if (__builtin_setjmp(here) == 0) { // returned directly assert(segment->jmpbuf_ptr == (stm_jmpbuf_t *)-1); segment->jmpbuf_ptr = &here; - _stm_stop_safe_point(flags); + _stm_stop_safe_point(); segment->jmpbuf_ptr = (stm_jmpbuf_t *)-1; return 0; } @@ -287,7 +277,7 @@ } ''', sources=source_files, - define_macros=[('STM_TESTS', '1')], + define_macros=[('STM_TESTS', '1'), ('STM_NO_COND_WAIT', '1')], undef_macros=['NDEBUG'], include_dirs=[parent_dir], extra_compile_args=['-g', '-O0', '-Werror'], @@ -369,10 +359,10 @@ def stm_start_safe_point(): - lib._stm_start_safe_point(lib.LOCK_COLLECT) + lib._stm_start_safe_point() def stm_stop_safe_point(): - if lib._check_stop_safe_point(lib.LOCK_COLLECT): + if lib._check_stop_safe_point(): raise Conflict() def stm_become_inevitable(): diff --git a/c7/test/test_basic.py b/c7/test/test_basic.py --- a/c7/test/test_basic.py +++ b/c7/test/test_basic.py @@ -96,7 +96,7 @@ self.commit_transaction() # py.test.raises(Conflict, self.switch, 0) # detects rw conflict - + def test_commit_fresh_objects(self): self.start_transaction() lp = stm_allocate(16) From noreply at buildbot.pypy.org Sat Feb 15 23:53:31 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 15 Feb 2014 23:53:31 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: Port some more tests Message-ID: <20140215225331.A4FE71C1178@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r744:27b37f2a4cb3 Date: 2014-02-15 23:53 +0100 http://bitbucket.org/pypy/stmgc/changeset/27b37f2a4cb3/ Log: Port some more tests diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -1,5 +1,5 @@ import os -import cffi +import cffi, weakref import sys assert sys.maxint == 9223372036854775807, "requires a 64-bit environment" @@ -75,9 +75,6 @@ void stm_start_inevitable_transaction(stm_thread_local_t *tl); void stm_become_inevitable(char* msg); -void stm_push_root(object_t *obj); -object_t *stm_pop_root(void); - void _set_ptr(object_t *obj, int n, object_t *v); object_t * _get_ptr(object_t *obj, int n); @@ -347,12 +344,6 @@ def stm_was_written(o): return lib._stm_was_written(o) -def stm_push_root(o): - return lib.stm_push_root(o) - -def stm_pop_root(): - return lib.stm_pop_root() - def stm_stop_transaction(): if lib._stm_stop_transaction(): raise Conflict() @@ -386,8 +377,15 @@ def stm_get_flags(o): return lib._stm_get_flags(o) +SHADOWSTACK_LENGTH = 100 +_keepalive = weakref.WeakKeyDictionary() + def _allocate_thread_local(): tl = ffi.new("stm_thread_local_t *") + ss = ffi.new("object_t *[]", SHADOWSTACK_LENGTH) + _keepalive[tl] = ss + tl.shadowstack = ss + tl.shadowstack_base = ss lib.stm_register_thread_local(tl) return tl @@ -446,3 +444,18 @@ if lib._stm_in_transaction(tl2): lib._stm_test_switch(tl2) stm_stop_safe_point() # can raise Conflict + + def push_root(self, o): + assert ffi.typeof(o) == ffi.typeof("object_t *") + tl = self.tls[self.current_thread] + curlength = tl.shadowstack - tl.shadowstack_base + assert 0 <= curlength < SHADOWSTACK_LENGTH + tl.shadowstack[0] = ffi.cast("object_t *", o) + tl.shadowstack += 1 + + def pop_root(self): + tl = self.tls[self.current_thread] + curlength = tl.shadowstack - tl.shadowstack_base + assert 0 < curlength <= SHADOWSTACK_LENGTH + tl.shadowstack -= 1 + return ffi.cast("object_t *", tl.shadowstack[0]) diff --git a/c7/test/test_basic.py b/c7/test/test_basic.py --- a/c7/test/test_basic.py +++ b/c7/test/test_basic.py @@ -102,9 +102,9 @@ lp = stm_allocate(16) stm_set_char(lp, 'u') p = stm_get_real_address(lp) - stm_push_root(lp) + self.push_root(lp) self.commit_transaction() - lp = stm_pop_root() + lp = self.pop_root() p1 = stm_get_real_address(lp) assert p != p1 @@ -131,11 +131,11 @@ stm_write(lp2) # test not crash stm_read(lp) # test not crash stm_read(lp2) # test not crash - stm_push_root(lp) - stm_push_root(lp2) + self.push_root(lp) + self.push_root(lp2) self.commit_transaction() - lp2 = stm_pop_root() - lp = stm_pop_root() + lp2 = self.pop_root() + lp = self.pop_root() self.switch(0) @@ -165,9 +165,9 @@ stm_set_char(lr, 'y') stm_set_ref(lp, 0, lq) stm_set_ref(lp, 1, lr) - stm_push_root(lp) + self.push_root(lp) self.commit_transaction() - lp = stm_pop_root() + lp = self.pop_root() self.switch(1) @@ -187,9 +187,9 @@ self.start_transaction() lp1 = stm_allocate(16) stm_set_char(lp1, 'a') - stm_push_root(lp1) + self.push_root(lp1) self.commit_transaction() - lp1 = stm_pop_root() + lp1 = self.pop_root() # self.switch(1) self.start_transaction() @@ -218,9 +218,9 @@ lp1 = stm_allocate(16) p1 = stm_get_real_address(lp1) p1[HDR] = 'a' - stm_push_root(lp1) + self.push_root(lp1) self.commit_transaction() - lp1 = stm_pop_root() + lp1 = self.pop_root() # 'a' in SHARED_PAGE self.start_transaction() @@ -247,9 +247,9 @@ self.start_transaction() lp1 = stm_allocate(16) stm_set_char(lp1, 'a') - stm_push_root(lp1) + self.push_root(lp1) self.commit_transaction() - lp1 = stm_pop_root() + lp1 = self.pop_root() self.start_transaction() stm_read(lp1) @@ -268,9 +268,9 @@ self.start_transaction() lp1 = stm_allocate(16) stm_set_char(lp1, 'a') - stm_push_root(lp1) + self.push_root(lp1) self.commit_transaction() - lp1 = stm_pop_root() + lp1 = self.pop_root() self.start_transaction() # @@ -287,9 +287,9 @@ self.start_transaction() lp1 = stm_allocate(16) stm_set_char(lp1, 'a') - stm_push_root(lp1) + self.push_root(lp1) self.commit_transaction() - lp1 = stm_pop_root() + lp1 = self.pop_root() self.start_transaction() stm_write(lp1) # acquire lock @@ -302,9 +302,9 @@ self.start_transaction() lp1 = stm_allocate(16) stm_set_char(lp1, 'a') - stm_push_root(lp1) + self.push_root(lp1) self.commit_transaction() - lp1 = stm_pop_root() + lp1 = self.pop_root() self.start_transaction() stm_set_char(lp1, 'x') @@ -320,12 +320,12 @@ self.start_transaction() for i in range(num): new = stm_allocate(obj_size) - stm_push_root(new) + self.push_root(new) old = [] young = [] for _ in range(num): - r = stm_pop_root() + r = self.pop_root() if is_in_nursery(r): young.append(r) else: @@ -357,9 +357,9 @@ assert len(stm_get_obj_pages(new)) == 2 assert ([stm_get_page_flag(p) for p in stm_get_obj_pages(new)] == [lib.PRIVATE_PAGE]*2) - stm_push_root(new) + self.push_root(new) stm_minor_collect() - new = stm_pop_root() + new = self.pop_root() assert len(stm_get_obj_pages(new)) == 2 # assert ([stm_get_page_flag(p) for p in stm_get_obj_pages(new)] @@ -378,9 +378,9 @@ self.start_transaction() new = stm_allocate(obj_size) assert is_in_nursery(new) - stm_push_root(new) + self.push_root(new) self.commit_transaction() - new = stm_pop_root() + new = self.pop_root() assert ([stm_get_page_flag(p) for p in stm_get_obj_pages(new)] == [lib.SHARED_PAGE]*2) @@ -403,9 +403,9 @@ def test_partial_alloced_pages(self): self.start_transaction() new = stm_allocate(16) - stm_push_root(new) + self.push_root(new) stm_minor_collect() - new = stm_pop_root() + new = self.pop_root() # assert stm_get_page_flag(stm_get_obj_pages(new)[0]) == lib.UNCOMMITTED_SHARED_PAGE # assert not (stm_get_flags(new) & lib.GCFLAG_NOT_COMMITTED) @@ -415,9 +415,9 @@ self.start_transaction() newer = stm_allocate(16) - stm_push_root(newer) + self.push_root(newer) stm_minor_collect() - newer = stm_pop_root() + newer = self.pop_root() # 'new' is still in shared_page and committed assert stm_get_page_flag(stm_get_obj_pages(new)[0]) == lib.SHARED_PAGE assert not (stm_get_flags(new) & lib.GCFLAG_NOT_COMMITTED) @@ -436,41 +436,41 @@ self.start_transaction() new = stm_allocate(16) stm_set_char(new, 'a') - stm_push_root(new) + self.push_root(new) stm_minor_collect() - new = stm_pop_root() + new = self.pop_root() stm_abort_transaction() self.start_transaction() newer = stm_allocate(16) - stm_push_root(newer) + self.push_root(newer) stm_minor_collect() - newer = stm_pop_root() + newer = self.pop_root() assert stm_get_real_address(new) == stm_get_real_address(newer) assert stm_get_char(newer) == '\0' def test_reuse_page(self): self.start_transaction() new = stm_allocate(16) - stm_push_root(new) + self.push_root(new) stm_minor_collect() - new = stm_pop_root() + new = self.pop_root() # assert stm_get_page_flag(stm_get_obj_pages(new)[0]) == lib.UNCOMMITTED_SHARED_PAGE stm_abort_transaction() self.start_transaction() newer = stm_allocate(16) - stm_push_root(newer) + self.push_root(newer) stm_minor_collect() - newer = stm_pop_root() + newer = self.pop_root() assert new == newer def test_write_to_old_after_minor(self): self.start_transaction() new = stm_allocate(16) - stm_push_root(new) + self.push_root(new) stm_minor_collect() - old = stm_pop_root() + old = self.pop_root() self.commit_transaction() self.start_transaction() From noreply at buildbot.pypy.org Sun Feb 16 00:05:47 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 16 Feb 2014 00:05:47 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: Pass test_abort_cleanup. Message-ID: <20140215230547.5463D1C01DE@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r745:a221bc217ba9 Date: 2014-02-16 00:05 +0100 http://bitbucket.org/pypy/stmgc/changeset/a221bc217ba9/ Log: Pass test_abort_cleanup. diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -238,6 +238,41 @@ abort_with_mutex(); } +static void reset_modified_from_other_segments(void) +{ + /* pull the right versions from other threads in order + to reset our pages as part of an abort */ + long remote_num = 1 - STM_SEGMENT->segment_num; + char *local_base = STM_SEGMENT->segment_base; + char *remote_base = get_segment_base(remote_num); + + LIST_FOREACH_R( + STM_PSEGMENT->modified_objects, + object_t * /*item*/, + ({ + /* memcpy in the opposite direction than + push_modified_to_other_segments() */ + char *src = REAL_ADDRESS(remote_base, item); + ssize_t size = stmcb_size_rounded_up((struct object_s *)src); + memcpy(REAL_ADDRESS(local_base, item), src, size); + + /* copying from the other thread re-added the + WRITE_BARRIER flag */ + //assert(item->stm_flags & GCFLAG_WRITE_BARRIER); --- XXX + + /* write all changes to the object before we release the + write lock below */ + write_fence(); + + /* clear the write-lock */ + uintptr_t lock_idx = (((uintptr_t)item) >> 4) - READMARKER_START; + assert(write_locks[lock_idx]); + write_locks[lock_idx] = 0; + })); + + list_clear(STM_PSEGMENT->modified_objects); +} + static void abort_with_mutex(void) { switch (STM_PSEGMENT->transaction_state) { @@ -250,6 +285,9 @@ assert(!"abort: bad transaction_state"); } + /* reset all the modified objects (incl. re-adding GCFLAG_WRITE_BARRIER) */ + reset_modified_from_other_segments(); + stm_jmpbuf_t *jmpbuf_ptr = STM_SEGMENT->jmpbuf_ptr; stm_thread_local_t *tl = STM_SEGMENT->running_thread; release_thread_segment(tl); /* includes the cond_broadcast(); */ @@ -258,7 +296,6 @@ mutex_unlock(); reset_all_creation_markers(); - list_clear(STM_PSEGMENT->modified_objects); assert(jmpbuf_ptr != NULL); assert(jmpbuf_ptr != (stm_jmpbuf_t *)-1); /* for tests only */ diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -60,7 +60,7 @@ void _stm_test_switch(stm_thread_local_t *tl); void _stm_start_transaction(stm_thread_local_t *tl, stm_jmpbuf_t *jmpbuf); -void stm_commit_transaction(void); +bool _check_commit_transaction(void); bool _check_abort_transaction(void); void _set_type_id(object_t *obj, uint32_t h); @@ -166,22 +166,6 @@ return 1; } -#if 0 -bool _stm_stop_transaction(void) { - jmpbufptr_t here; - int tn = _STM_TL->thread_num; - if (__builtin_setjmp(here) == 0) { // returned directly - assert(_STM_TL->jmpbufptr == (jmpbufptr_t*)-1); - _STM_TL->jmpbufptr = &here; - stm_stop_transaction(); - _stm_dbg_get_tl(tn)->jmpbufptr = (jmpbufptr_t*)-1; - return 0; - } - _stm_dbg_get_tl(tn)->jmpbufptr = (jmpbufptr_t*)-1; - return 1; -} -#endif - bool _check_stop_safe_point(void) { stm_jmpbuf_t here; stm_segment_info_t *segment = STM_SEGMENT; @@ -196,7 +180,21 @@ return 1; } -int _check_abort_transaction(void) { +bool _check_commit_transaction(void) { + stm_jmpbuf_t here; + stm_segment_info_t *segment = STM_SEGMENT; + if (__builtin_setjmp(here) == 0) { // returned directly + assert(segment->jmpbuf_ptr == (stm_jmpbuf_t *)-1); + segment->jmpbuf_ptr = &here; + stm_commit_transaction(); + segment->jmpbuf_ptr = (stm_jmpbuf_t *)-1; + return 0; + } + segment->jmpbuf_ptr = (stm_jmpbuf_t *)-1; + return 1; +} + +bool _check_abort_transaction(void) { stm_jmpbuf_t here; stm_segment_info_t *segment = STM_SEGMENT; if (__builtin_setjmp(here) == 0) { // returned directly @@ -422,8 +420,10 @@ def commit_transaction(self): tl = self.tls[self.current_thread] assert lib._stm_in_transaction(tl) - lib.stm_commit_transaction() + res = lib._check_commit_transaction() assert not lib._stm_in_transaction(tl) + if res: + raise Conflict def abort_transaction(self): tl = self.tls[self.current_thread] diff --git a/c7/test/test_basic.py b/c7/test/test_basic.py --- a/c7/test/test_basic.py +++ b/c7/test/test_basic.py @@ -308,7 +308,7 @@ self.start_transaction() stm_set_char(lp1, 'x') - assert stm_abort_transaction() + self.abort_transaction() self.start_transaction() assert stm_get_char(lp1) == 'a' @@ -439,7 +439,7 @@ self.push_root(new) stm_minor_collect() new = self.pop_root() - stm_abort_transaction() + self.abort_transaction() self.start_transaction() newer = stm_allocate(16) @@ -456,7 +456,7 @@ stm_minor_collect() new = self.pop_root() # assert stm_get_page_flag(stm_get_obj_pages(new)[0]) == lib.UNCOMMITTED_SHARED_PAGE - stm_abort_transaction() + self.abort_transaction() self.start_transaction() newer = stm_allocate(16) From noreply at buildbot.pypy.org Sun Feb 16 01:04:24 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sun, 16 Feb 2014 01:04:24 +0100 (CET) Subject: [pypy-commit] pypy align_float_cast: add passing test for unaligned GcStruct (gcc pads the struct) Message-ID: <20140216000424.AF6D71C1178@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: align_float_cast Changeset: r69167:ea72b35e2a10 Date: 2014-02-16 02:02 +0200 http://bitbucket.org/pypy/pypy/changeset/ea72b35e2a10/ Log: add passing test for unaligned GcStruct (gcc pads the struct) diff --git a/rpython/jit/backend/arm/runner.py b/rpython/jit/backend/arm/runner.py --- a/rpython/jit/backend/arm/runner.py +++ b/rpython/jit/backend/arm/runner.py @@ -121,14 +121,14 @@ def bh_raw_load_f(self, struct, offset, descr): ll_p = rffi.cast(rffi.CCHARP, struct) ll_p_offset = rffi.ptradd(ll_p, offset) - if rffi.cast(lltype.Signed, ll_p_offset) & 3: + if rffi.cast(lltype.Signed, ll_p_offset) & 3: with lltype.scoped_alloc(rffi.CArray(longlong.FLOATSTORAGE), 1) as s_array: - rffi.c_memcpy(rffi.cast(rffi.VOIDP, s_array), + rffi.c_memcpy(rffi.cast(rffi.VOIDP, s_array), rffi.cast(rffi.VOIDP, ll_p_offset), rffi.sizeof(rffi.DOUBLE)) - ll_p = rffi.cast(rffi.CArrayPtr(longlong.FLOATSTORAGE), + ll_p = rffi.cast(rffi.CArrayPtr(longlong.FLOATSTORAGE), s_array) - return ll_p[0] + return ll_p[0] ll_p = rffi.cast(rffi.CArrayPtr(longlong.FLOATSTORAGE), ll_p_offset) return ll_p[0] diff --git a/rpython/rlib/rawstorage.py b/rpython/rlib/rawstorage.py --- a/rpython/rlib/rawstorage.py +++ b/rpython/rlib/rawstorage.py @@ -23,7 +23,12 @@ def raw_storage_getitem(TP, storage, index): "NOT_RPYTHON" ptr = rffi.ptradd(storage, index) - # TODO Check that pointer is aligned for TP + if TP is lltype.Float and rffi.cast(lltype.Signed, ptr) & 3: + with lltype.scoped_alloc(rffi.CArray(TP), 1) as s_array: + rffi.c_memcpy(rffi.cast(rffi.VOIDP, s_array), + rffi.cast(rffi.VOIDP, ptr), + rffi.sizeof(TP)) + return rffi.cast(rffi.CArrayPtr(TP), s_array)[0] return rffi.cast(rffi.CArrayPtr(TP), ptr)[0] def raw_storage_setitem(storage, index, item): @@ -43,12 +48,11 @@ return lltype_to_annotation(s_TP.const) def specialize_call(self, hop): - # emit code that will 'automatically' copy memory if unaligned assert hop.args_r[1].lowleveltype == RAW_STORAGE_PTR v_storage = hop.inputarg(hop.args_r[1], arg=1) v_index = hop.inputarg(lltype.Signed, arg=2) hop.exception_cannot_occur() - v_addr = hop.genop('cast_ptr_to_adr', [v_storage], + v_addr = hop.genop('casst_ptr_to_adr', [v_storage], resulttype=llmemory.Address) return hop.genop('raw_load', [v_addr, v_index], resulttype=hop.r_result.lowleveltype) diff --git a/rpython/rlib/test/test_rawstorage.py b/rpython/rlib/test/test_rawstorage.py --- a/rpython/rlib/test/test_rawstorage.py +++ b/rpython/rlib/test/test_rawstorage.py @@ -8,8 +8,11 @@ r = alloc_raw_storage(15) raw_storage_setitem(r, 3, 1<<30) res = raw_storage_getitem(lltype.Signed, r, 3) + assert res == 1<<30 + raw_storage_setitem(r, 3, 3.14) + res = raw_storage_getitem(lltype.Float, r, 3) + assert res == 3.14 free_raw_storage(r) - assert res == 1<<30 class TestRawStorage(BaseRtypingTest): def test_storage_int(self): @@ -21,3 +24,12 @@ return res x = self.interpret(f, [1<<30]) assert x == 1 << 30 + def test_storage_float(self): + def f(v): + r = alloc_raw_storage(24) + raw_storage_setitem(r, 3, v) + res = raw_storage_getitem(lltype.Float, r, 3) + free_raw_storage(r) + return res + x = self.interpret(f, [3.14]) + assert x == 3.14 diff --git a/rpython/translator/c/funcgen.py b/rpython/translator/c/funcgen.py --- a/rpython/translator/c/funcgen.py +++ b/rpython/translator/c/funcgen.py @@ -703,8 +703,6 @@ res = ( "%(result)s = ((%(typename)s) (((char *)%(addr)s) + %(offset)s))[0];" % locals()) - if 'float' in res or 'double' in res: - xxx return res def OP_CAST_PRIMITIVE(self, op): diff --git a/rpython/translator/c/test/test_lladdresses.py b/rpython/translator/c/test/test_lladdresses.py --- a/rpython/translator/c/test/test_lladdresses.py +++ b/rpython/translator/c/test/test_lladdresses.py @@ -51,6 +51,29 @@ res = fc(42.42) assert res == f(42.42) +def test_memory_float_unaligned(): + S = lltype.GcStruct("S", ('c0', lltype.Char), ("x", lltype.Float), ('c1', lltype.Char), ("y", lltype.Float)) + offset = FieldOffset(S, 'x') + offset_c0 = FieldOffset(S, 'c0') + offsety = FieldOffset(S, 'y') + def f(value): + s = lltype.malloc(S) + s.c0 = 'a' + s.x = 123.2 + a = cast_ptr_to_adr(s) + b = a + offset + assert s.c0 == 'a' + assert b.float[0] == 123.2 + b.float[0] += 234.1 + (a + offsety).float[0] = value + assert s.x == 234.1 + 123.2 + assert s.y == value + return s.x + value + fc = compile(f, [float]) + res = fc(42.42) + assert res == f(42.42) + + def test_offset_inside_fixed_array(): S = lltype.FixedSizeArray(lltype.Signed, 10) offset = FieldOffset(S, 'item4') From noreply at buildbot.pypy.org Sun Feb 16 04:24:16 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Sun, 16 Feb 2014 04:24:16 +0100 (CET) Subject: [pypy-commit] pypy singledispatch: Add a LICENSE file, extracted from singledispatch's setup.py Message-ID: <20140216032416.B134D1C1154@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: singledispatch Changeset: r69169:0cf4456650af Date: 2014-02-16 02:09 +0000 http://bitbucket.org/pypy/pypy/changeset/0cf4456650af/ Log: Add a LICENSE file, extracted from singledispatch's setup.py diff --git a/rpython/tool/singledispatch/LICENSE b/rpython/tool/singledispatch/LICENSE new file mode 100644 --- /dev/null +++ b/rpython/tool/singledispatch/LICENSE @@ -0,0 +1,22 @@ +This (rpython/tool/singledispatch/) is a copy of the singledispatch library +(https://bitbucket.org/ambv/singledispatch). The original license follows: + +Copyright (C) 2013 by Łukasz Langa + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. From noreply at buildbot.pypy.org Sun Feb 16 04:24:20 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Sun, 16 Feb 2014 04:24:20 +0100 (CET) Subject: [pypy-commit] pypy singledispatch: make singledispatch importable Message-ID: <20140216032420.CEC981C1154@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: singledispatch Changeset: r69170:85049837000b Date: 2014-02-16 02:34 +0000 http://bitbucket.org/pypy/pypy/changeset/85049837000b/ Log: make singledispatch importable diff --git a/rpython/tool/singledispatch/__init__.py b/rpython/tool/singledispatch/__init__.py new file mode 100644 diff --git a/rpython/tool/singledispatch/singledispatch.py b/rpython/tool/singledispatch/singledispatch.py --- a/rpython/tool/singledispatch/singledispatch.py +++ b/rpython/tool/singledispatch/singledispatch.py @@ -10,7 +10,7 @@ from functools import update_wrapper from weakref import WeakKeyDictionary -from singledispatch_helpers import MappingProxyType, get_cache_token +from .singledispatch_helpers import MappingProxyType, get_cache_token ################################################################################ ### singledispatch() - single-dispatch generic function decorator diff --git a/rpython/tool/singledispatch/test_singledispatch.py b/rpython/tool/singledispatch/test_singledispatch.py --- a/rpython/tool/singledispatch/test_singledispatch.py +++ b/rpython/tool/singledispatch/test_singledispatch.py @@ -9,17 +9,17 @@ import collections import decimal from itertools import permutations -import singledispatch as functools -from singledispatch_helpers import Support +from . import singledispatch as functools +from .singledispatch_helpers import Support try: from collections import ChainMap except ImportError: - from singledispatch_helpers import ChainMap + from .singledispatch_helpers import ChainMap collections.ChainMap = ChainMap try: from collections import OrderedDict except ImportError: - from singledispatch_helpers import OrderedDict + from .singledispatch_helpers import OrderedDict collections.OrderedDict = OrderedDict try: import unittest2 as unittest From noreply at buildbot.pypy.org Sun Feb 16 04:24:14 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Sun, 16 Feb 2014 04:24:14 +0100 (CET) Subject: [pypy-commit] pypy singledispatch: Copy singledispatch 3.4.0.2 to rpython/tool/singledispatch/ Message-ID: <20140216032414.1A0361C1154@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: singledispatch Changeset: r69168:b2ff76f3966b Date: 2014-02-16 01:59 +0000 http://bitbucket.org/pypy/pypy/changeset/b2ff76f3966b/ Log: Copy singledispatch 3.4.0.2 to rpython/tool/singledispatch/ diff --git a/rpython/tool/singledispatch/singledispatch.py b/rpython/tool/singledispatch/singledispatch.py new file mode 100644 --- /dev/null +++ b/rpython/tool/singledispatch/singledispatch.py @@ -0,0 +1,219 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals + +__all__ = ['singledispatch'] + +from functools import update_wrapper +from weakref import WeakKeyDictionary +from singledispatch_helpers import MappingProxyType, get_cache_token + +################################################################################ +### singledispatch() - single-dispatch generic function decorator +################################################################################ + +def _c3_merge(sequences): + """Merges MROs in *sequences* to a single MRO using the C3 algorithm. + + Adapted from http://www.python.org/download/releases/2.3/mro/. + + """ + result = [] + while True: + sequences = [s for s in sequences if s] # purge empty sequences + if not sequences: + return result + for s1 in sequences: # find merge candidates among seq heads + candidate = s1[0] + for s2 in sequences: + if candidate in s2[1:]: + candidate = None + break # reject the current head, it appears later + else: + break + if not candidate: + raise RuntimeError("Inconsistent hierarchy") + result.append(candidate) + # remove the chosen candidate + for seq in sequences: + if seq[0] == candidate: + del seq[0] + +def _c3_mro(cls, abcs=None): + """Computes the method resolution order using extended C3 linearization. + + If no *abcs* are given, the algorithm works exactly like the built-in C3 + linearization used for method resolution. + + If given, *abcs* is a list of abstract base classes that should be inserted + into the resulting MRO. Unrelated ABCs are ignored and don't end up in the + result. The algorithm inserts ABCs where their functionality is introduced, + i.e. issubclass(cls, abc) returns True for the class itself but returns + False for all its direct base classes. Implicit ABCs for a given class + (either registered or inferred from the presence of a special method like + __len__) are inserted directly after the last ABC explicitly listed in the + MRO of said class. If two implicit ABCs end up next to each other in the + resulting MRO, their ordering depends on the order of types in *abcs*. + + """ + for i, base in enumerate(reversed(cls.__bases__)): + if hasattr(base, '__abstractmethods__'): + boundary = len(cls.__bases__) - i + break # Bases up to the last explicit ABC are considered first. + else: + boundary = 0 + abcs = list(abcs) if abcs else [] + explicit_bases = list(cls.__bases__[:boundary]) + abstract_bases = [] + other_bases = list(cls.__bases__[boundary:]) + for base in abcs: + if issubclass(cls, base) and not any( + issubclass(b, base) for b in cls.__bases__ + ): + # If *cls* is the class that introduces behaviour described by + # an ABC *base*, insert said ABC to its MRO. + abstract_bases.append(base) + for base in abstract_bases: + abcs.remove(base) + explicit_c3_mros = [_c3_mro(base, abcs=abcs) for base in explicit_bases] + abstract_c3_mros = [_c3_mro(base, abcs=abcs) for base in abstract_bases] + other_c3_mros = [_c3_mro(base, abcs=abcs) for base in other_bases] + return _c3_merge( + [[cls]] + + explicit_c3_mros + abstract_c3_mros + other_c3_mros + + [explicit_bases] + [abstract_bases] + [other_bases] + ) + +def _compose_mro(cls, types): + """Calculates the method resolution order for a given class *cls*. + + Includes relevant abstract base classes (with their respective bases) from + the *types* iterable. Uses a modified C3 linearization algorithm. + + """ + bases = set(cls.__mro__) + # Remove entries which are already present in the __mro__ or unrelated. + def is_related(typ): + return (typ not in bases and hasattr(typ, '__mro__') + and issubclass(cls, typ)) + types = [n for n in types if is_related(n)] + # Remove entries which are strict bases of other entries (they will end up + # in the MRO anyway. + def is_strict_base(typ): + for other in types: + if typ != other and typ in other.__mro__: + return True + return False + types = [n for n in types if not is_strict_base(n)] + # Subclasses of the ABCs in *types* which are also implemented by + # *cls* can be used to stabilize ABC ordering. + type_set = set(types) + mro = [] + for typ in types: + found = [] + for sub in typ.__subclasses__(): + if sub not in bases and issubclass(cls, sub): + found.append([s for s in sub.__mro__ if s in type_set]) + if not found: + mro.append(typ) + continue + # Favor subclasses with the biggest number of useful bases + found.sort(key=len, reverse=True) + for sub in found: + for subcls in sub: + if subcls not in mro: + mro.append(subcls) + return _c3_mro(cls, abcs=mro) + +def _find_impl(cls, registry): + """Returns the best matching implementation from *registry* for type *cls*. + + Where there is no registered implementation for a specific type, its method + resolution order is used to find a more generic implementation. + + Note: if *registry* does not contain an implementation for the base + *object* type, this function may return None. + + """ + mro = _compose_mro(cls, registry.keys()) + match = None + for t in mro: + if match is not None: + # If *match* is an implicit ABC but there is another unrelated, + # equally matching implicit ABC, refuse the temptation to guess. + if (t in registry and t not in cls.__mro__ + and match not in cls.__mro__ + and not issubclass(match, t)): + raise RuntimeError("Ambiguous dispatch: {0} or {1}".format( + match, t)) + break + if t in registry: + match = t + return registry.get(match) + +def singledispatch(func): + """Single-dispatch generic function decorator. + + Transforms a function into a generic function, which can have different + behaviours depending upon the type of its first argument. The decorated + function acts as the default implementation, and additional + implementations can be registered using the register() attribute of the + generic function. + + """ + registry = {} + dispatch_cache = WeakKeyDictionary() + def ns(): pass + ns.cache_token = None + + def dispatch(cls): + """generic_func.dispatch(cls) -> + + Runs the dispatch algorithm to return the best available implementation + for the given *cls* registered on *generic_func*. + + """ + if ns.cache_token is not None: + current_token = get_cache_token() + if ns.cache_token != current_token: + dispatch_cache.clear() + ns.cache_token = current_token + try: + impl = dispatch_cache[cls] + except KeyError: + try: + impl = registry[cls] + except KeyError: + impl = _find_impl(cls, registry) + dispatch_cache[cls] = impl + return impl + + def register(cls, func=None): + """generic_func.register(cls, func) -> func + + Registers a new implementation for the given *cls* on a *generic_func*. + + """ + if func is None: + return lambda f: register(cls, f) + registry[cls] = func + if ns.cache_token is None and hasattr(cls, '__abstractmethods__'): + ns.cache_token = get_cache_token() + dispatch_cache.clear() + return func + + def wrapper(*args, **kw): + return dispatch(args[0].__class__)(*args, **kw) + + registry[object] = func + wrapper.register = register + wrapper.dispatch = dispatch + wrapper.registry = MappingProxyType(registry) + wrapper._clear_cache = dispatch_cache.clear + update_wrapper(wrapper, func) + return wrapper + diff --git a/rpython/tool/singledispatch/singledispatch_helpers.py b/rpython/tool/singledispatch/singledispatch_helpers.py new file mode 100644 --- /dev/null +++ b/rpython/tool/singledispatch/singledispatch_helpers.py @@ -0,0 +1,170 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals + +from abc import ABCMeta +from collections import MutableMapping +import sys +try: + from collections import UserDict +except ImportError: + from UserDict import UserDict +try: + from collections import OrderedDict +except ImportError: + from ordereddict import OrderedDict +try: + from thread import get_ident +except ImportError: + try: + from _thread import get_ident + except ImportError: + from _dummy_thread import get_ident + + +def recursive_repr(fillvalue='...'): + 'Decorator to make a repr function return fillvalue for a recursive call' + + def decorating_function(user_function): + repr_running = set() + + def wrapper(self): + key = id(self), get_ident() + if key in repr_running: + return fillvalue + repr_running.add(key) + try: + result = user_function(self) + finally: + repr_running.discard(key) + return result + + # Can't use functools.wraps() here because of bootstrap issues + wrapper.__module__ = getattr(user_function, '__module__') + wrapper.__doc__ = getattr(user_function, '__doc__') + wrapper.__name__ = getattr(user_function, '__name__') + wrapper.__annotations__ = getattr(user_function, '__annotations__', {}) + return wrapper + + return decorating_function + + +class ChainMap(MutableMapping): + ''' A ChainMap groups multiple dicts (or other mappings) together + to create a single, updateable view. + + The underlying mappings are stored in a list. That list is public and can + accessed or updated using the *maps* attribute. There is no other state. + + Lookups search the underlying mappings successively until a key is found. + In contrast, writes, updates, and deletions only operate on the first + mapping. + + ''' + + def __init__(self, *maps): + '''Initialize a ChainMap by setting *maps* to the given mappings. + If no mappings are provided, a single empty dictionary is used. + + ''' + self.maps = list(maps) or [{}] # always at least one map + + def __missing__(self, key): + raise KeyError(key) + + def __getitem__(self, key): + for mapping in self.maps: + try: + return mapping[key] # can't use 'key in mapping' with defaultdict + except KeyError: + pass + return self.__missing__(key) # support subclasses that define __missing__ + + def get(self, key, default=None): + return self[key] if key in self else default + + def __len__(self): + return len(set().union(*self.maps)) # reuses stored hash values if possible + + def __iter__(self): + return iter(set().union(*self.maps)) + + def __contains__(self, key): + return any(key in m for m in self.maps) + + @recursive_repr() + def __repr__(self): + return '{0.__class__.__name__}({1})'.format( + self, ', '.join(map(repr, self.maps))) + + @classmethod + def fromkeys(cls, iterable, *args): + 'Create a ChainMap with a single dict created from the iterable.' + return cls(dict.fromkeys(iterable, *args)) + + def copy(self): + 'New ChainMap or subclass with a new copy of maps[0] and refs to maps[1:]' + return self.__class__(self.maps[0].copy(), *self.maps[1:]) + + __copy__ = copy + + def new_child(self): # like Django's Context.push() + 'New ChainMap with a new dict followed by all previous maps.' + return self.__class__({}, *self.maps) + + @property + def parents(self): # like Django's Context.pop() + 'New ChainMap from maps[1:].' + return self.__class__(*self.maps[1:]) + + def __setitem__(self, key, value): + self.maps[0][key] = value + + def __delitem__(self, key): + try: + del self.maps[0][key] + except KeyError: + raise KeyError('Key not found in the first mapping: {!r}'.format(key)) + + def popitem(self): + 'Remove and return an item pair from maps[0]. Raise KeyError is maps[0] is empty.' + try: + return self.maps[0].popitem() + except KeyError: + raise KeyError('No keys found in the first mapping.') + + def pop(self, key, *args): + 'Remove *key* from maps[0] and return its value. Raise KeyError if *key* not in maps[0].' + try: + return self.maps[0].pop(key, *args) + except KeyError: + raise KeyError('Key not found in the first mapping: {!r}'.format(key)) + + def clear(self): + 'Clear maps[0], leaving maps[1:] intact.' + self.maps[0].clear() + + +class MappingProxyType(UserDict): + def __init__(self, data): + UserDict.__init__(self) + self.data = data + + +def get_cache_token(): + return ABCMeta._abc_invalidation_counter + + + +class Support(object): + def dummy(self): + pass + + def cpython_only(self, func): + if 'PyPy' in sys.version: + return self.dummy + return func diff --git a/rpython/tool/singledispatch/test_singledispatch.py b/rpython/tool/singledispatch/test_singledispatch.py new file mode 100644 --- /dev/null +++ b/rpython/tool/singledispatch/test_singledispatch.py @@ -0,0 +1,519 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals + +import collections +import decimal +from itertools import permutations +import singledispatch as functools +from singledispatch_helpers import Support +try: + from collections import ChainMap +except ImportError: + from singledispatch_helpers import ChainMap + collections.ChainMap = ChainMap +try: + from collections import OrderedDict +except ImportError: + from singledispatch_helpers import OrderedDict + collections.OrderedDict = OrderedDict +try: + import unittest2 as unittest +except ImportError: + import unittest + + +support = Support() +for _prefix in ('collections.abc', '_abcoll'): + if _prefix in repr(collections.Container): + abcoll_prefix = _prefix + break +else: + abcoll_prefix = '?' +del _prefix + + +class TestSingleDispatch(unittest.TestCase): + def test_simple_overloads(self): + @functools.singledispatch + def g(obj): + return "base" + def g_int(i): + return "integer" + g.register(int, g_int) + self.assertEqual(g("str"), "base") + self.assertEqual(g(1), "integer") + self.assertEqual(g([1,2,3]), "base") + + def test_mro(self): + @functools.singledispatch + def g(obj): + return "base" + class A(object): + pass + class C(A): + pass + class B(A): + pass + class D(C, B): + pass + def g_A(a): + return "A" + def g_B(b): + return "B" + g.register(A, g_A) + g.register(B, g_B) + self.assertEqual(g(A()), "A") + self.assertEqual(g(B()), "B") + self.assertEqual(g(C()), "A") + self.assertEqual(g(D()), "B") + + def test_register_decorator(self): + @functools.singledispatch + def g(obj): + return "base" + @g.register(int) + def g_int(i): + return "int %s" % (i,) + self.assertEqual(g(""), "base") + self.assertEqual(g(12), "int 12") + self.assertIs(g.dispatch(int), g_int) + self.assertIs(g.dispatch(object), g.dispatch(str)) + # Note: in the assert above this is not g. + # @singledispatch returns the wrapper. + + def test_wrapping_attributes(self): + @functools.singledispatch + def g(obj): + "Simple test" + return "Test" + self.assertEqual(g.__name__, "g") + self.assertEqual(g.__doc__, "Simple test") + + @unittest.skipUnless(decimal, 'requires _decimal') + @support.cpython_only + def test_c_classes(self): + @functools.singledispatch + def g(obj): + return "base" + @g.register(decimal.DecimalException) + def _(obj): + return obj.args + subn = decimal.Subnormal("Exponent < Emin") + rnd = decimal.Rounded("Number got rounded") + self.assertEqual(g(subn), ("Exponent < Emin",)) + self.assertEqual(g(rnd), ("Number got rounded",)) + @g.register(decimal.Subnormal) + def _(obj): + return "Too small to care." + self.assertEqual(g(subn), "Too small to care.") + self.assertEqual(g(rnd), ("Number got rounded",)) + + def test_compose_mro(self): + # None of the examples in this test depend on haystack ordering. + c = collections + mro = functools._compose_mro + bases = [c.Sequence, c.MutableMapping, c.Mapping, c.Set] + for haystack in permutations(bases): + m = mro(dict, haystack) + self.assertEqual(m, [dict, c.MutableMapping, c.Mapping, c.Sized, + c.Iterable, c.Container, object]) + bases = [c.Container, c.Mapping, c.MutableMapping, c.OrderedDict] + for haystack in permutations(bases): + m = mro(c.ChainMap, haystack) + self.assertEqual(m, [c.ChainMap, c.MutableMapping, c.Mapping, + c.Sized, c.Iterable, c.Container, object]) + + # If there's a generic function with implementations registered for + # both Sized and Container, passing a defaultdict to it results in an + # ambiguous dispatch which will cause a RuntimeError (see + # test_mro_conflicts). + bases = [c.Container, c.Sized, str] + for haystack in permutations(bases): + m = mro(c.defaultdict, [c.Sized, c.Container, str]) + self.assertEqual(m, [c.defaultdict, dict, c.Sized, c.Container, + object]) + + # MutableSequence below is registered directly on D. In other words, it + # preceeds MutableMapping which means single dispatch will always + # choose MutableSequence here. + class D(c.defaultdict): + pass + c.MutableSequence.register(D) + bases = [c.MutableSequence, c.MutableMapping] + for haystack in permutations(bases): + m = mro(D, bases) + self.assertEqual(m, [D, c.MutableSequence, c.Sequence, + c.defaultdict, dict, c.MutableMapping, + c.Mapping, c.Sized, c.Iterable, c.Container, + object]) + + # Container and Callable are registered on different base classes and + # a generic function supporting both should always pick the Callable + # implementation if a C instance is passed. + class C(c.defaultdict): + def __call__(self): + pass + bases = [c.Sized, c.Callable, c.Container, c.Mapping] + for haystack in permutations(bases): + m = mro(C, haystack) + self.assertEqual(m, [C, c.Callable, c.defaultdict, dict, c.Mapping, + c.Sized, c.Iterable, c.Container, object]) + + def test_register_abc(self): + c = collections + d = {"a": "b"} + l = [1, 2, 3] + s = set([object(), None]) + f = frozenset(s) + t = (1, 2, 3) + @functools.singledispatch + def g(obj): + return "base" + self.assertEqual(g(d), "base") + self.assertEqual(g(l), "base") + self.assertEqual(g(s), "base") + self.assertEqual(g(f), "base") + self.assertEqual(g(t), "base") + g.register(c.Sized, lambda obj: "sized") + self.assertEqual(g(d), "sized") + self.assertEqual(g(l), "sized") + self.assertEqual(g(s), "sized") + self.assertEqual(g(f), "sized") + self.assertEqual(g(t), "sized") + g.register(c.MutableMapping, lambda obj: "mutablemapping") + self.assertEqual(g(d), "mutablemapping") + self.assertEqual(g(l), "sized") + self.assertEqual(g(s), "sized") + self.assertEqual(g(f), "sized") + self.assertEqual(g(t), "sized") + g.register(c.ChainMap, lambda obj: "chainmap") + self.assertEqual(g(d), "mutablemapping") # irrelevant ABCs registered + self.assertEqual(g(l), "sized") + self.assertEqual(g(s), "sized") + self.assertEqual(g(f), "sized") + self.assertEqual(g(t), "sized") + g.register(c.MutableSequence, lambda obj: "mutablesequence") + self.assertEqual(g(d), "mutablemapping") + self.assertEqual(g(l), "mutablesequence") + self.assertEqual(g(s), "sized") + self.assertEqual(g(f), "sized") + self.assertEqual(g(t), "sized") + g.register(c.MutableSet, lambda obj: "mutableset") + self.assertEqual(g(d), "mutablemapping") + self.assertEqual(g(l), "mutablesequence") + self.assertEqual(g(s), "mutableset") + self.assertEqual(g(f), "sized") + self.assertEqual(g(t), "sized") + g.register(c.Mapping, lambda obj: "mapping") + self.assertEqual(g(d), "mutablemapping") # not specific enough + self.assertEqual(g(l), "mutablesequence") + self.assertEqual(g(s), "mutableset") + self.assertEqual(g(f), "sized") + self.assertEqual(g(t), "sized") + g.register(c.Sequence, lambda obj: "sequence") + self.assertEqual(g(d), "mutablemapping") + self.assertEqual(g(l), "mutablesequence") + self.assertEqual(g(s), "mutableset") + self.assertEqual(g(f), "sized") + self.assertEqual(g(t), "sequence") + g.register(c.Set, lambda obj: "set") + self.assertEqual(g(d), "mutablemapping") + self.assertEqual(g(l), "mutablesequence") + self.assertEqual(g(s), "mutableset") + self.assertEqual(g(f), "set") + self.assertEqual(g(t), "sequence") + g.register(dict, lambda obj: "dict") + self.assertEqual(g(d), "dict") + self.assertEqual(g(l), "mutablesequence") + self.assertEqual(g(s), "mutableset") + self.assertEqual(g(f), "set") + self.assertEqual(g(t), "sequence") + g.register(list, lambda obj: "list") + self.assertEqual(g(d), "dict") + self.assertEqual(g(l), "list") + self.assertEqual(g(s), "mutableset") + self.assertEqual(g(f), "set") + self.assertEqual(g(t), "sequence") + g.register(set, lambda obj: "concrete-set") + self.assertEqual(g(d), "dict") + self.assertEqual(g(l), "list") + self.assertEqual(g(s), "concrete-set") + self.assertEqual(g(f), "set") + self.assertEqual(g(t), "sequence") + g.register(frozenset, lambda obj: "frozen-set") + self.assertEqual(g(d), "dict") + self.assertEqual(g(l), "list") + self.assertEqual(g(s), "concrete-set") + self.assertEqual(g(f), "frozen-set") + self.assertEqual(g(t), "sequence") + g.register(tuple, lambda obj: "tuple") + self.assertEqual(g(d), "dict") + self.assertEqual(g(l), "list") + self.assertEqual(g(s), "concrete-set") + self.assertEqual(g(f), "frozen-set") + self.assertEqual(g(t), "tuple") + + def test_c3_abc(self): + c = collections + mro = functools._c3_mro + class A(object): + pass + class B(A): + def __len__(self): + return 0 # implies Sized + #@c.Container.register + class C(object): + pass + c.Container.register(C) + class D(object): + pass # unrelated + class X(D, C, B): + def __call__(self): + pass # implies Callable + expected = [X, c.Callable, D, C, c.Container, B, c.Sized, A, object] + for abcs in permutations([c.Sized, c.Callable, c.Container]): + self.assertEqual(mro(X, abcs=abcs), expected) + # unrelated ABCs don't appear in the resulting MRO + many_abcs = [c.Mapping, c.Sized, c.Callable, c.Container, c.Iterable] + self.assertEqual(mro(X, abcs=many_abcs), expected) + + def test_mro_conflicts(self): + c = collections + @functools.singledispatch + def g(arg): + return "base" + class O(c.Sized): + def __len__(self): + return 0 + o = O() + self.assertEqual(g(o), "base") + g.register(c.Iterable, lambda arg: "iterable") + g.register(c.Container, lambda arg: "container") + g.register(c.Sized, lambda arg: "sized") + g.register(c.Set, lambda arg: "set") + self.assertEqual(g(o), "sized") + c.Iterable.register(O) + self.assertEqual(g(o), "sized") # because it's explicitly in __mro__ + c.Container.register(O) + self.assertEqual(g(o), "sized") # see above: Sized is in __mro__ + c.Set.register(O) + self.assertEqual(g(o), "set") # because c.Set is a subclass of + # c.Sized and c.Container + class P(object): + pass + p = P() + self.assertEqual(g(p), "base") + c.Iterable.register(P) + self.assertEqual(g(p), "iterable") + c.Container.register(P) + with self.assertRaises(RuntimeError) as re_one: + g(p) + self.assertIn( + str(re_one.exception), + (("Ambiguous dispatch: " + "or ").format(prefix=abcoll_prefix), + ("Ambiguous dispatch: " + "or ").format(prefix=abcoll_prefix)), + ) + class Q(c.Sized): + def __len__(self): + return 0 + q = Q() + self.assertEqual(g(q), "sized") + c.Iterable.register(Q) + self.assertEqual(g(q), "sized") # because it's explicitly in __mro__ + c.Set.register(Q) + self.assertEqual(g(q), "set") # because c.Set is a subclass of + # c.Sized and c.Iterable + @functools.singledispatch + def h(arg): + return "base" + @h.register(c.Sized) + def _(arg): + return "sized" + @h.register(c.Container) + def _(arg): + return "container" + # Even though Sized and Container are explicit bases of MutableMapping, + # this ABC is implicitly registered on defaultdict which makes all of + # MutableMapping's bases implicit as well from defaultdict's + # perspective. + with self.assertRaises(RuntimeError) as re_two: + h(c.defaultdict(lambda: 0)) + self.assertIn( + str(re_two.exception), + (("Ambiguous dispatch: " + "or ").format(prefix=abcoll_prefix), + ("Ambiguous dispatch: " + "or ").format(prefix=abcoll_prefix)), + ) + class R(c.defaultdict): + pass + c.MutableSequence.register(R) + @functools.singledispatch + def i(arg): + return "base" + @i.register(c.MutableMapping) + def _(arg): + return "mapping" + @i.register(c.MutableSequence) + def _(arg): + return "sequence" + r = R() + self.assertEqual(i(r), "sequence") + class S(object): + pass + class T(S, c.Sized): + def __len__(self): + return 0 + t = T() + self.assertEqual(h(t), "sized") + c.Container.register(T) + self.assertEqual(h(t), "sized") # because it's explicitly in the MRO + class U(object): + def __len__(self): + return 0 + u = U() + self.assertEqual(h(u), "sized") # implicit Sized subclass inferred + # from the existence of __len__() + c.Container.register(U) + # There is no preference for registered versus inferred ABCs. + with self.assertRaises(RuntimeError) as re_three: + h(u) + self.assertIn( + str(re_three.exception), + (("Ambiguous dispatch: " + "or ").format(prefix=abcoll_prefix), + ("Ambiguous dispatch: " + "or ").format(prefix=abcoll_prefix)), + ) + class V(c.Sized, S): + def __len__(self): + return 0 + @functools.singledispatch + def j(arg): + return "base" + @j.register(S) + def _(arg): + return "s" + @j.register(c.Container) + def _(arg): + return "container" + v = V() + self.assertEqual(j(v), "s") + c.Container.register(V) + self.assertEqual(j(v), "container") # because it ends up right after + # Sized in the MRO + + def test_cache_invalidation(self): + try: + from collections import UserDict + except ImportError: + from UserDict import UserDict + class TracingDict(UserDict): + def __init__(self, *args, **kwargs): + UserDict.__init__(self, *args, **kwargs) + self.set_ops = [] + self.get_ops = [] + def __getitem__(self, key): + result = self.data[key] + self.get_ops.append(key) + return result + def __setitem__(self, key, value): + self.set_ops.append(key) + self.data[key] = value + def clear(self): + self.data.clear() + _orig_wkd = functools.WeakKeyDictionary + td = TracingDict() + functools.WeakKeyDictionary = lambda: td + c = collections + @functools.singledispatch + def g(arg): + return "base" + d = {} + l = [] + self.assertEqual(len(td), 0) + self.assertEqual(g(d), "base") + self.assertEqual(len(td), 1) + self.assertEqual(td.get_ops, []) + self.assertEqual(td.set_ops, [dict]) + self.assertEqual(td.data[dict], g.registry[object]) + self.assertEqual(g(l), "base") + self.assertEqual(len(td), 2) + self.assertEqual(td.get_ops, []) + self.assertEqual(td.set_ops, [dict, list]) + self.assertEqual(td.data[dict], g.registry[object]) + self.assertEqual(td.data[list], g.registry[object]) + self.assertEqual(td.data[dict], td.data[list]) + self.assertEqual(g(l), "base") + self.assertEqual(g(d), "base") + self.assertEqual(td.get_ops, [list, dict]) + self.assertEqual(td.set_ops, [dict, list]) + g.register(list, lambda arg: "list") + self.assertEqual(td.get_ops, [list, dict]) + self.assertEqual(len(td), 0) + self.assertEqual(g(d), "base") + self.assertEqual(len(td), 1) + self.assertEqual(td.get_ops, [list, dict]) + self.assertEqual(td.set_ops, [dict, list, dict]) + self.assertEqual(td.data[dict], + functools._find_impl(dict, g.registry)) + self.assertEqual(g(l), "list") + self.assertEqual(len(td), 2) + self.assertEqual(td.get_ops, [list, dict]) + self.assertEqual(td.set_ops, [dict, list, dict, list]) + self.assertEqual(td.data[list], + functools._find_impl(list, g.registry)) + class X(object): + pass + c.MutableMapping.register(X) # Will not invalidate the cache, + # not using ABCs yet. + self.assertEqual(g(d), "base") + self.assertEqual(g(l), "list") + self.assertEqual(td.get_ops, [list, dict, dict, list]) + self.assertEqual(td.set_ops, [dict, list, dict, list]) + g.register(c.Sized, lambda arg: "sized") + self.assertEqual(len(td), 0) + self.assertEqual(g(d), "sized") + self.assertEqual(len(td), 1) + self.assertEqual(td.get_ops, [list, dict, dict, list]) + self.assertEqual(td.set_ops, [dict, list, dict, list, dict]) + self.assertEqual(g(l), "list") + self.assertEqual(len(td), 2) + self.assertEqual(td.get_ops, [list, dict, dict, list]) + self.assertEqual(td.set_ops, [dict, list, dict, list, dict, list]) + self.assertEqual(g(l), "list") + self.assertEqual(g(d), "sized") + self.assertEqual(td.get_ops, [list, dict, dict, list, list, dict]) + self.assertEqual(td.set_ops, [dict, list, dict, list, dict, list]) + g.dispatch(list) + g.dispatch(dict) + self.assertEqual(td.get_ops, [list, dict, dict, list, list, dict, + list, dict]) + self.assertEqual(td.set_ops, [dict, list, dict, list, dict, list]) + c.MutableSet.register(X) # Will invalidate the cache. + self.assertEqual(len(td), 2) # Stale cache. + self.assertEqual(g(l), "list") + self.assertEqual(len(td), 1) + g.register(c.MutableMapping, lambda arg: "mutablemapping") + self.assertEqual(len(td), 0) + self.assertEqual(g(d), "mutablemapping") + self.assertEqual(len(td), 1) + self.assertEqual(g(l), "list") + self.assertEqual(len(td), 2) + g.register(dict, lambda arg: "dict") + self.assertEqual(g(d), "dict") + self.assertEqual(g(l), "list") + g._clear_cache() + self.assertEqual(len(td), 0) + functools.WeakKeyDictionary = _orig_wkd + + +if __name__ == '__main__': + unittest.main() From noreply at buildbot.pypy.org Sun Feb 16 04:24:24 2014 From: noreply at buildbot.pypy.org (rlamy) Date: Sun, 16 Feb 2014 04:24:24 +0100 (CET) Subject: [pypy-commit] pypy singledispatch: use singledispatch in rlib.rmarshal Message-ID: <20140216032424.377011C1154@cobra.cs.uni-duesseldorf.de> Author: Ronan Lamy Branch: singledispatch Changeset: r69171:be668feb3655 Date: 2014-02-16 03:23 +0000 http://bitbucket.org/pypy/pypy/changeset/be668feb3655/ Log: use singledispatch in rlib.rmarshal diff --git a/rpython/rlib/rmarshal.py b/rpython/rlib/rmarshal.py --- a/rpython/rlib/rmarshal.py +++ b/rpython/rlib/rmarshal.py @@ -1,11 +1,11 @@ """A way to serialize data in the same format as the 'marshal' module but accessible to RPython programs. """ +from rpython.tool.singledispatch.singledispatch import singledispatch from rpython.annotator import model as annmodel from rpython.annotator.signature import annotation from rpython.annotator.listdef import ListDef, TooLateForChange -from rpython.tool.pairtype import pair, pairtype from rpython.rlib.rarithmetic import r_longlong, intmask, LONG_BIT, ovfcheck from rpython.rlib.rfloat import formatd, rstring_to_float from rpython.rlib.unroll import unrolling_iterable @@ -29,7 +29,7 @@ return find_dumper(s_obj) except CannotMarshal: # ask the annotation to produce an appropriate dumper - pair(_tag, s_obj).install_marshaller() + install_marshaller(s_obj) return find_dumper(s_obj) get_marshaller._annspecialcase_ = 'specialize:memo' @@ -40,7 +40,7 @@ return find_loader(s_obj) except CannotUnmarshall: # ask the annotation to produce an appropriate loader - pair(_tag, s_obj).install_unmarshaller() + install_unmarshaller(s_obj) return find_loader(s_obj) def get_unmarshaller(type): @@ -325,11 +325,6 @@ # # Annotations => dumpers and loaders -class MTag(object): - """Tag for pairtype(), for the purpose of making the get_marshaller() - and get_unmarshaller() methods of SomeObject only locally visible.""" -_tag = MTag() - def weakly_contains(s_bigger, s_smaller): # a special version of s_bigger.contains(s_smaller). Warning, to # support ListDefs properly, this works by trying to produce a side-effect @@ -342,145 +337,156 @@ except (annmodel.UnionError, TooLateForChange): return False + at singledispatch +def install_marshaller(obj): + raise NotImplementedError -class __extend__(pairtype(MTag, annmodel.SomeObject)): - def install_marshaller((tag, s_obj)): - if not hasattr(s_obj, '_get_rmarshall_support_'): - raise CannotMarshal(s_obj) - # special support for custom annotation like SomeStatResult: - # the annotation tells us how to turn an object into something - # else that can be marshalled - def dump_with_custom_reduce(buf, x): - reduced_obj = fn_reduce(x) - reduceddumper(buf, reduced_obj) - s_reduced_obj, fn_reduce, fn_recreate = s_obj._get_rmarshall_support_() - reduceddumper = get_marshaller(s_reduced_obj) - add_dumper(s_obj, dump_with_custom_reduce) + at singledispatch +def install_unmarshaller(obj): + raise NotImplementedError - def install_unmarshaller((tag, s_obj)): - if not hasattr(s_obj, '_get_rmarshall_support_'): - raise CannotUnmarshall(s_obj) - # special support for custom annotation like SomeStatResult - def load_with_custom_recreate(loader): - reduced_obj = reducedloader(loader) - return fn_recreate(reduced_obj) - s_reduced_obj, fn_reduce, fn_recreate = s_obj._get_rmarshall_support_() - reducedloader = get_loader(s_reduced_obj) - add_loader(s_obj, load_with_custom_recreate) + at install_marshaller.register(annmodel.SomeObject) +def _(s_obj): + if not hasattr(s_obj, '_get_rmarshall_support_'): + raise CannotMarshal(s_obj) -class __extend__(pairtype(MTag, annmodel.SomeList)): + # special support for custom annotation like SomeStatResult: + # the annotation tells us how to turn an object into something + # else that can be marshalled + def dump_with_custom_reduce(buf, x): + reduced_obj = fn_reduce(x) + reduceddumper(buf, reduced_obj) + s_reduced_obj, fn_reduce, fn_recreate = s_obj._get_rmarshall_support_() + reduceddumper = get_marshaller(s_reduced_obj) + add_dumper(s_obj, dump_with_custom_reduce) - def install_marshaller((tag, s_list)): - def dump_list_or_none(buf, x): - if x is None: - dump_none(buf, x) - else: - buf.append(TYPE_LIST) - w_long(buf, len(x)) - for item in x: - itemdumper(buf, item) + at install_unmarshaller.register(annmodel.SomeObject) +def _(s_obj): + if not hasattr(s_obj, '_get_rmarshall_support_'): + raise CannotUnmarshall(s_obj) - itemdumper = get_marshaller(s_list.listdef.listitem.s_value) - if s_list.listdef.listitem.dont_change_any_more: - s_general_list = s_list + # special support for custom annotation like SomeStatResult + def load_with_custom_recreate(loader): + reduced_obj = reducedloader(loader) + return fn_recreate(reduced_obj) + s_reduced_obj, fn_reduce, fn_recreate = s_obj._get_rmarshall_support_() + reducedloader = get_loader(s_reduced_obj) + add_loader(s_obj, load_with_custom_recreate) + + + at install_marshaller.register(annmodel.SomeList) +def _(s_list): + def dump_list_or_none(buf, x): + if x is None: + dump_none(buf, x) else: - s_item = get_dumper_annotation(itemdumper) - s_general_list = annotation([s_item]) - add_dumper(s_general_list, dump_list_or_none) + buf.append(TYPE_LIST) + w_long(buf, len(x)) + for item in x: + itemdumper(buf, item) - def install_unmarshaller((tag, s_list)): - def load_list_or_none(loader): - t = readchr(loader) - if t == TYPE_LIST: - length = readlong(loader) - result = [] - for i in range(length): - result.append(itemloader(loader)) - return result - elif t == TYPE_NONE: - return None - else: - raise ValueError("expected a list or None") + itemdumper = get_marshaller(s_list.listdef.listitem.s_value) + if s_list.listdef.listitem.dont_change_any_more: + s_general_list = s_list + else: + s_item = get_dumper_annotation(itemdumper) + s_general_list = annotation([s_item]) + add_dumper(s_general_list, dump_list_or_none) - itemloader = get_loader(s_list.listdef.listitem.s_value) - add_loader(s_list, load_list_or_none) + at install_unmarshaller.register(annmodel.SomeList) +def _(s_list): + def load_list_or_none(loader): + t = readchr(loader) + if t == TYPE_LIST: + length = readlong(loader) + result = [] + for i in range(length): + result.append(itemloader(loader)) + return result + elif t == TYPE_NONE: + return None + else: + raise ValueError("expected a list or None") + itemloader = get_loader(s_list.listdef.listitem.s_value) + add_loader(s_list, load_list_or_none) -class __extend__(pairtype(MTag, annmodel.SomeDict)): - def install_marshaller((tag, s_dict)): - def dump_dict_or_none(buf, x): - if x is None: - dump_none(buf, x) - else: - buf.append(TYPE_DICT) - for key, value in x.items(): - keydumper(buf, key) - valuedumper(buf, value) - buf.append('0') # end of dict + at install_marshaller.register(annmodel.SomeDict) +def _(s_dict): + def dump_dict_or_none(buf, x): + if x is None: + dump_none(buf, x) + else: + buf.append(TYPE_DICT) + for key, value in x.items(): + keydumper(buf, key) + valuedumper(buf, value) + buf.append('0') # end of dict - keydumper = get_marshaller(s_dict.dictdef.dictkey.s_value) - valuedumper = get_marshaller(s_dict.dictdef.dictvalue.s_value) - if (s_dict.dictdef.dictkey.dont_change_any_more or + keydumper = get_marshaller(s_dict.dictdef.dictkey.s_value) + valuedumper = get_marshaller(s_dict.dictdef.dictvalue.s_value) + if (s_dict.dictdef.dictkey.dont_change_any_more or s_dict.dictdef.dictvalue.dont_change_any_more): - s_general_dict = s_dict + s_general_dict = s_dict + else: + s_key = get_dumper_annotation(keydumper) + s_value = get_dumper_annotation(valuedumper) + s_general_dict = annotation({s_key: s_value}) + add_dumper(s_general_dict, dump_dict_or_none) + + at install_unmarshaller.register(annmodel.SomeDict) +def _(s_dict): + def load_dict_or_none(loader): + t = readchr(loader) + if t == TYPE_DICT: + result = {} + while peekchr(loader) != '0': + key = keyloader(loader) + value = valueloader(loader) + result[key] = value + readchr(loader) # consume the final '0' + return result + elif t == TYPE_NONE: + return None else: - s_key = get_dumper_annotation(keydumper) - s_value = get_dumper_annotation(valuedumper) - s_general_dict = annotation({s_key: s_value}) - add_dumper(s_general_dict, dump_dict_or_none) + raise ValueError("expected a dict or None") - def install_unmarshaller((tag, s_dict)): - def load_dict_or_none(loader): - t = readchr(loader) - if t == TYPE_DICT: - result = {} - while peekchr(loader) != '0': - key = keyloader(loader) - value = valueloader(loader) - result[key] = value - readchr(loader) # consume the final '0' - return result - elif t == TYPE_NONE: - return None - else: - raise ValueError("expected a dict or None") + keyloader = get_loader(s_dict.dictdef.dictkey.s_value) + valueloader = get_loader(s_dict.dictdef.dictvalue.s_value) + add_loader(s_dict, load_dict_or_none) - keyloader = get_loader(s_dict.dictdef.dictkey.s_value) - valueloader = get_loader(s_dict.dictdef.dictvalue.s_value) - add_loader(s_dict, load_dict_or_none) + at install_marshaller.register(annmodel.SomeTuple) +def _(s_tuple): + def dump_tuple(buf, x): + buf.append(TYPE_TUPLE) + w_long(buf, len(x)) + for i, itemdumper in unroll_item_dumpers: + itemdumper(buf, x[i]) -class __extend__(pairtype(MTag, annmodel.SomeTuple)): + itemdumpers = [get_marshaller(s_item) for s_item in s_tuple.items] + unroll_item_dumpers = unrolling_iterable(enumerate(itemdumpers)) + dumper_annotations = [get_dumper_annotation(itemdumper) + for itemdumper in itemdumpers] + s_general_tuple = annmodel.SomeTuple(dumper_annotations) + add_dumper(s_general_tuple, dump_tuple) - def install_marshaller((tag, s_tuple)): - def dump_tuple(buf, x): - buf.append(TYPE_TUPLE) - w_long(buf, len(x)) - for i, itemdumper in unroll_item_dumpers: - itemdumper(buf, x[i]) + at install_unmarshaller.register(annmodel.SomeTuple) +def _(s_tuple): + def load_tuple(loader): + if readchr(loader) != TYPE_TUPLE: + raise ValueError("expected a tuple") + if readlong(loader) != expected_length: + raise ValueError("wrong tuple length") + result = () + for i, itemloader in unroll_item_loaders: + result += (itemloader(loader),) + return result - itemdumpers = [get_marshaller(s_item) for s_item in s_tuple.items] - unroll_item_dumpers = unrolling_iterable(enumerate(itemdumpers)) - dumper_annotations = [get_dumper_annotation(itemdumper) - for itemdumper in itemdumpers] - s_general_tuple = annmodel.SomeTuple(dumper_annotations) - add_dumper(s_general_tuple, dump_tuple) - - def install_unmarshaller((tag, s_tuple)): - def load_tuple(loader): - if readchr(loader) != TYPE_TUPLE: - raise ValueError("expected a tuple") - if readlong(loader) != expected_length: - raise ValueError("wrong tuple length") - result = () - for i, itemloader in unroll_item_loaders: - result += (itemloader(loader),) - return result - - itemloaders = [get_loader(s_item) for s_item in s_tuple.items] - expected_length = len(itemloaders) - unroll_item_loaders = unrolling_iterable(enumerate(itemloaders)) - add_loader(s_tuple, load_tuple) + itemloaders = [get_loader(s_item) for s_item in s_tuple.items] + expected_length = len(itemloaders) + unroll_item_loaders = unrolling_iterable(enumerate(itemloaders)) + add_loader(s_tuple, load_tuple) From noreply at buildbot.pypy.org Sun Feb 16 09:31:45 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 16 Feb 2014 09:31:45 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: creation_markers need to distinguish between current-transaction Message-ID: <20140216083145.BF7F81C1178@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r746:d288b4fdb72b Date: 2014-02-16 09:29 +0100 http://bitbucket.org/pypy/stmgc/changeset/d288b4fdb72b/ Log: creation_markers need to distinguish between current-transaction objects *inside* or *outside* the nursery, otherwise _stm_write_slowpath() is never going to be called for the latter diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -74,6 +74,11 @@ TS_INEVITABLE, TS_MUST_ABORT, }; +enum { /* for stm_creation_marker_t */ + CM_NOT_CURRENT_TRANSACTION = 0x00, + CM_CURRENT_TRANSACTION_OUTSIDE_NURSERY = 0x01, + CM_CURRENT_TRANSACTION_IN_NURSERY = 0xff, +}; static char *stm_object_pages; static stm_thread_local_t *stm_thread_locals = NULL; @@ -105,7 +110,8 @@ static bool _running_transaction(void); static inline bool obj_from_same_transaction(object_t *obj) { - return ((stm_creation_marker_t *)(((uintptr_t)obj) >> 8))->cm != 0; + return ((stm_creation_marker_t *)(((uintptr_t)obj) >> 8))->cm != + CM_NOT_CURRENT_TRANSACTION; } static void teardown_core(void); diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -21,8 +21,7 @@ /* size in bytes of the "line". Should be equal to the line used by stm_creation_marker_t. */ -#define NURSERY_LINE_SHIFT 8 -#define NURSERY_LINE (1 << NURSERY_LINE_SHIFT) +#define NURSERY_LINE 256 /************************************************************/ @@ -37,6 +36,7 @@ static void setup_nursery(void) { + assert(NURSERY_LINE == (1 << 8)); /* from stm_creation_marker_t */ assert((NURSERY_SECTION_SIZE % NURSERY_LINE) == 0); assert(MEDIUM_OBJECT < LARGE_OBJECT); assert(LARGE_OBJECT < NURSERY_SECTION_SIZE); @@ -49,40 +49,6 @@ return (uintptr_t)obj < NURSERY_START + NURSERY_SIZE; } -static void set_creation_markers(stm_char *p, uint64_t size) -{ - /* Set the creation markers to 0xff for all lines from p to p+size. - Both p and size should be aligned to NURSERY_LINE. */ - - assert((((uintptr_t)p) & (NURSERY_LINE - 1)) == 0); - assert((size & (NURSERY_LINE - 1)) == 0); - - char *addr = REAL_ADDRESS(STM_SEGMENT->segment_base, - ((uintptr_t)p) >> NURSERY_LINE_SHIFT); - memset(addr, 0xff, size >> NURSERY_LINE_SHIFT); - - LIST_APPEND(STM_PSEGMENT->creation_markers, addr); -} - -static void reset_all_creation_markers(void) -{ - /* Note that the page 'NB_PAGES - 1' is not actually used. This - ensures that the creation markers always end with some zeroes. - We reset the markers 8 at a time, by writing null integers - until we reach a place that is already null. - */ - LIST_FOREACH_R( - STM_PSEGMENT->creation_markers, - uintptr_t /*item*/, - ({ - uint64_t *p = (uint64_t *)(item & ~7); - while (*p != 0) - *p++ = 0; - })); - - list_clear(STM_PSEGMENT->creation_markers); -} - #define NURSERY_ALIGN(bytes) \ (((bytes) + NURSERY_LINE - 1) & ~(NURSERY_LINE - 1)) @@ -112,8 +78,8 @@ STM_SEGMENT->nursery_section_end = (uintptr_t)p + NURSERY_SECTION_SIZE; /* Also fill the corresponding creation markers with 0xff. */ - set_creation_markers(p, NURSERY_SECTION_SIZE); - + set_creation_markers(p, NURSERY_SECTION_SIZE, + CM_CURRENT_TRANSACTION_IN_NURSERY); return p; } abort(); diff --git a/c7/stm/nursery.h b/c7/stm/nursery.h --- a/c7/stm/nursery.h +++ b/c7/stm/nursery.h @@ -1,2 +1,2 @@ -static void reset_all_creation_markers(void); +/* empty */ diff --git a/c7/stm/pages.c b/c7/stm/pages.c --- a/c7/stm/pages.c +++ b/c7/stm/pages.c @@ -77,3 +77,36 @@ assert(flag_page_private[pagenum] == REMAPPING_PAGE); flag_page_private[pagenum] = PRIVATE_PAGE; } + +static void set_creation_markers(stm_char *p, uint64_t size, int newvalue) +{ + /* Set the creation markers to 'newvalue' for all lines from 'p' to + 'p+size'. Both p and size should be aligned to the line size: 256. */ + + assert((((uintptr_t)p) & 255) == 0); + assert((size & 255) == 0); + + char *addr = REAL_ADDRESS(STM_SEGMENT->segment_base, ((uintptr_t)p) >> 8); + memset(addr, newvalue, size >> 8); + + LIST_APPEND(STM_PSEGMENT->creation_markers, addr); +} + +static void reset_all_creation_markers(void) +{ + /* Note that the page 'NB_PAGES - 1' is not actually used. This + ensures that the creation markers always end with some zeroes. + We reset the markers 8 at a time, by writing null integers + until we reach a place that is already null. + */ + LIST_FOREACH_R( + STM_PSEGMENT->creation_markers, + uintptr_t /*item*/, + ({ + uint64_t *p = (uint64_t *)(item & ~7); + while (*p != 0) + *p++ = 0; + })); + + list_clear(STM_PSEGMENT->creation_markers); +} diff --git a/c7/stm/pages.h b/c7/stm/pages.h --- a/c7/stm/pages.h +++ b/c7/stm/pages.h @@ -28,3 +28,6 @@ } _pages_privatize(pagenum, count); } + +static void set_creation_markers(stm_char *p, uint64_t size, int newvalue); +static void reset_all_creation_markers(void); diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -49,10 +49,11 @@ struct stm_creation_marker_s { /* In addition to read markers, every "line" of 256 bytes has one extra byte, the creation marker, located at the address divided - by 256. The creation marker is either 0xff if all objects in + by 256. The creation marker is either non-zero if all objects in this line come have been allocated by the current transaction, or 0x00 if none of them have been. Lines cannot contain a - mixture of both. */ + mixture of both. Non-zero values are 0xff if in the nursery, + and 0x01 if outside the nursery. */ uint8_t cm; }; @@ -152,9 +153,10 @@ static inline void stm_write(object_t *obj) { /* this is: - 'if (cm == 0 && (stm_flags & WRITE_BARRIER_CALLED) == 0)' - assuming that 'cm' is either 0 (not created in current transaction) - or 0xff (created in current transaction) */ + 'if (cm < 0x80 && (stm_flags & WRITE_BARRIER_CALLED) == 0)' + where 'cm' can be 0 (not created in current transaction) + or 0xff (created in current transaction) + or 0x01 (same, but outside the nursery) */ if (UNLIKELY(!((((stm_creation_marker_t *)(((uintptr_t)obj) >> 8))->cm | obj->stm_flags) & _STM_GCFLAG_WRITE_BARRIER_CALLED))) _stm_write_slowpath(obj); From noreply at buildbot.pypy.org Sun Feb 16 09:49:09 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 16 Feb 2014 09:49:09 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: Fix the GCFLAG_WRITE_BARRIER_CALLED when committing Message-ID: <20140216084909.16E611C0F12@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r747:1c1739135bd5 Date: 2014-02-16 09:47 +0100 http://bitbucket.org/pypy/stmgc/changeset/1c1739135bd5/ Log: Fix the GCFLAG_WRITE_BARRIER_CALLED when committing diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -62,6 +62,7 @@ /* add the write-barrier-already-called flag ONLY if we succeeded in getting the write-lock */ + assert(!(obj->stm_flags & GCFLAG_WRITE_BARRIER_CALLED)); obj->stm_flags |= GCFLAG_WRITE_BARRIER_CALLED; LIST_APPEND(STM_PSEGMENT->modified_objects, obj); } @@ -177,14 +178,21 @@ get_segment(remote_num)->transaction_read_version)); } - /* clear the write-lock */ + /* clear the write-lock (note that this runs with all other + threads paused, so no need to be careful about ordering) */ uintptr_t lock_idx = (((uintptr_t)item) >> 4) - READMARKER_START; assert(write_locks[lock_idx] == STM_PSEGMENT->write_lock_num); write_locks[lock_idx] = 0; + /* remove again the WRITE_BARRIER_CALLED flag */ + assert(item->stm_flags & GCFLAG_WRITE_BARRIER_CALLED); + item->stm_flags &= ~GCFLAG_WRITE_BARRIER_CALLED; + + /* copy the modified object to the other segment */ char *src = REAL_ADDRESS(local_base, item); + char *dst = REAL_ADDRESS(remote_base, item); ssize_t size = stmcb_size_rounded_up((struct object_s *)src); - memcpy(REAL_ADDRESS(remote_base, item), src, size); + memcpy(dst, src, size); })); list_clear(STM_PSEGMENT->modified_objects); @@ -250,18 +258,27 @@ STM_PSEGMENT->modified_objects, object_t * /*item*/, ({ + /* all objects in 'modified_objects' have this flag */ + assert(item->stm_flags & GCFLAG_WRITE_BARRIER_CALLED); + /* memcpy in the opposite direction than push_modified_to_other_segments() */ char *src = REAL_ADDRESS(remote_base, item); + char *dst = REAL_ADDRESS(local_base, item); ssize_t size = stmcb_size_rounded_up((struct object_s *)src); - memcpy(REAL_ADDRESS(local_base, item), src, size); + memcpy(dst, src, size); - /* copying from the other thread re-added the - WRITE_BARRIER flag */ - //assert(item->stm_flags & GCFLAG_WRITE_BARRIER); --- XXX + /* copying from the other segment removed again the + WRITE_BARRIER_CALLED flag */ + assert(!(item->stm_flags & GCFLAG_WRITE_BARRIER_CALLED)); /* write all changes to the object before we release the - write lock below */ + write lock below. This is needed because we need to + ensure that if the write lock is not set, another thread + can get it and then change 'src' in parallel. The + write_fence() ensures in particular that 'src' has been + fully read before we release the lock: reading it + is necessary to write 'dst'. */ write_fence(); /* clear the write-lock */ From noreply at buildbot.pypy.org Sun Feb 16 10:09:34 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 16 Feb 2014 10:09:34 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: Update tests Message-ID: <20140216090934.52B881C03B3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r748:c5e34d7c00ca Date: 2014-02-16 10:05 +0100 http://bitbucket.org/pypy/stmgc/changeset/c5e34d7c00ca/ Log: Update tests diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -459,3 +459,7 @@ assert 0 < curlength <= SHADOWSTACK_LENGTH tl.shadowstack -= 1 return ffi.cast("object_t *", tl.shadowstack[0]) + + def push_root_no_gc(self): + "Pushes an invalid object, to crash in case the GC is called" + self.push_root(ffi.cast("object_t *", -1)) diff --git a/c7/test/test_basic.py b/c7/test/test_basic.py --- a/c7/test/test_basic.py +++ b/c7/test/test_basic.py @@ -98,27 +98,22 @@ py.test.raises(Conflict, self.switch, 0) # detects rw conflict def test_commit_fresh_objects(self): + self.push_root_no_gc() self.start_transaction() lp = stm_allocate(16) stm_set_char(lp, 'u') - p = stm_get_real_address(lp) - self.push_root(lp) self.commit_transaction() - lp = self.pop_root() p1 = stm_get_real_address(lp) - assert p != p1 - + self.switch(1) - + self.start_transaction() stm_write(lp) # privatize page - p_ = stm_get_real_address(lp) - assert p != p_ - assert p1 != p_ + p2 = stm_get_real_address(lp) + assert p1 != p2 assert stm_get_char(lp) == 'u' self.commit_transaction() - def test_commit_fresh_objects2(self): self.switch(1) self.start_transaction() From noreply at buildbot.pypy.org Sun Feb 16 10:09:38 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 16 Feb 2014 10:09:38 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: At transaction start, align the current_nursery and set creation markers Message-ID: <20140216090938.DFF0F1C03B3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r749:4bffa49c22a5 Date: 2014-02-16 10:07 +0100 http://bitbucket.org/pypy/stmgc/changeset/4bffa49c22a5/ Log: At transaction start, align the current_nursery and set creation markers diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -116,6 +116,8 @@ assert(list_is_empty(STM_PSEGMENT->modified_objects)); assert(list_is_empty(STM_PSEGMENT->creation_markers)); + + align_nursery_at_transaction_start(); } diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -84,3 +84,19 @@ } abort(); } + +static void align_nursery_at_transaction_start(void) +{ + /* When the transaction start, we must align the 'nursery_current' + and set creation markers for the part of the section the follows. + */ + uintptr_t c = (uintptr_t)STM_SEGMENT->nursery_current; + c = NURSERY_ALIGN(c); + STM_SEGMENT->nursery_current = (stm_char *)c; + + uint64_t size = STM_SEGMENT->nursery_section_end - c; + if (size > 0) { + set_creation_markers((stm_char *)c, size, + CM_CURRENT_TRANSACTION_IN_NURSERY); + } +} diff --git a/c7/stm/nursery.h b/c7/stm/nursery.h --- a/c7/stm/nursery.h +++ b/c7/stm/nursery.h @@ -1,2 +1,2 @@ -/* empty */ +static void align_nursery_at_transaction_start(void); diff --git a/c7/stm/pages.c b/c7/stm/pages.c --- a/c7/stm/pages.c +++ b/c7/stm/pages.c @@ -85,6 +85,7 @@ assert((((uintptr_t)p) & 255) == 0); assert((size & 255) == 0); + assert(size > 0); char *addr = REAL_ADDRESS(STM_SEGMENT->segment_base, ((uintptr_t)p) >> 8); memset(addr, newvalue, size >> 8); diff --git a/c7/test/test_nursery.py b/c7/test/test_nursery.py new file mode 100644 --- /dev/null +++ b/c7/test/test_nursery.py @@ -0,0 +1,15 @@ +from support import * +import py + +class TestBasic(BaseTest): + + def test_align_nursery_to_256_bytes(self): + self.start_transaction() + lp1 = stm_allocate(16) + self.commit_transaction() + self.start_transaction() + lp2 = stm_allocate(16) + # + u1 = int(ffi.cast("uintptr_t", lp1)) + u2 = int(ffi.cast("uintptr_t", lp2)) + assert (u1 & ~255) != (u2 & ~255) From noreply at buildbot.pypy.org Sun Feb 16 10:15:05 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 16 Feb 2014 10:15:05 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: Add a passing test Message-ID: <20140216091505.B50F71C03B3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r750:e9e218f7fa5e Date: 2014-02-16 10:12 +0100 http://bitbucket.org/pypy/stmgc/changeset/e9e218f7fa5e/ Log: Add a passing test diff --git a/c7/stm/misc.c b/c7/stm/misc.c --- a/c7/stm/misc.c +++ b/c7/stm/misc.c @@ -47,6 +47,11 @@ obj->stm_flags) & _STM_GCFLAG_WRITE_BARRIER_CALLED); } +uint8_t _stm_creation_marker(object_t *obj) +{ + return ((stm_creation_marker_t *)(((uintptr_t)obj) >> 8))->cm; +} + static inline bool was_read_remote(char *base, object_t *obj, uint8_t other_transaction_read_version) { diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -86,6 +86,7 @@ #ifdef STM_TESTS bool _stm_was_read(object_t *obj); bool _stm_was_written(object_t *obj); +uint8_t _stm_creation_marker(object_t *obj); bool _stm_in_nursery(object_t *obj); bool _stm_in_transaction(stm_thread_local_t *tl); char *_stm_real_address(object_t *o); diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -53,6 +53,7 @@ bool _checked_stm_write(object_t *obj); bool _stm_was_read(object_t *obj); bool _stm_was_written(object_t *obj); +uint8_t _stm_creation_marker(object_t *obj); bool _stm_in_nursery(object_t *obj); char *_stm_real_address(object_t *obj); object_t *_stm_segment_address(char *ptr); @@ -342,6 +343,9 @@ def stm_was_written(o): return lib._stm_was_written(o) +def stm_creation_marker(o): + return lib._stm_creation_marker(o) + def stm_stop_transaction(): if lib._stm_stop_transaction(): raise Conflict() diff --git a/c7/test/test_nursery.py b/c7/test/test_nursery.py --- a/c7/test/test_nursery.py +++ b/c7/test/test_nursery.py @@ -13,3 +13,23 @@ u1 = int(ffi.cast("uintptr_t", lp1)) u2 = int(ffi.cast("uintptr_t", lp2)) assert (u1 & ~255) != (u2 & ~255) + + def test_creation_marker_in_nursery(self): + self.start_transaction() + lp1 = stm_allocate(16) + lp2 = stm_allocate(16) + assert stm_creation_marker(lp1) == 0xff + assert stm_creation_marker(lp2) == 0xff + u1 = int(ffi.cast("uintptr_t", lp1)) + u2 = int(ffi.cast("uintptr_t", lp2)) + assert u2 == u1 + 16 + self.commit_transaction() + + assert stm_creation_marker(lp1) == 0 + assert stm_creation_marker(lp2) == 0 + + self.start_transaction() + lp3 = stm_allocate(16) + assert stm_creation_marker(lp1) == 0 + assert stm_creation_marker(lp2) == 0 + assert stm_creation_marker(lp3) == 0xff From noreply at buildbot.pypy.org Sun Feb 16 15:30:14 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 16 Feb 2014 15:30:14 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: Tweak Message-ID: <20140216143014.891F11C03B3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r752:616a24c4ab3c Date: 2014-02-16 10:35 +0100 http://bitbucket.org/pypy/stmgc/changeset/616a24c4ab3c/ Log: Tweak diff --git a/c7/stm/pages.c b/c7/stm/pages.c --- a/c7/stm/pages.c +++ b/c7/stm/pages.c @@ -87,20 +87,21 @@ assert((size & 255) == 0); assert(size > 0); - char *addr = REAL_ADDRESS(STM_SEGMENT->segment_base, ((uintptr_t)p) >> 8); + uintptr_t cmaddr = ((uintptr_t)p) >> 8; + LIST_APPEND(STM_PSEGMENT->creation_markers, cmaddr); + + char *addr = REAL_ADDRESS(STM_SEGMENT->segment_base, cmaddr); memset(addr, newvalue, size >> 8); - - LIST_APPEND(STM_PSEGMENT->creation_markers, addr); } static void set_single_creation_marker(stm_char *p, int newvalue) { assert((((uintptr_t)p) & 255) == 0); - char *addr = REAL_ADDRESS(STM_SEGMENT->segment_base, ((uintptr_t)p) >> 8); - addr[0] = newvalue; + uintptr_t cmaddr = ((uintptr_t)p) >> 8; - LIST_APPEND(STM_PSEGMENT->creation_markers, addr); + ((stm_creation_marker_t *)cmaddr)->cm = newvalue; + LIST_APPEND(STM_PSEGMENT->creation_markers, cmaddr); } static void reset_all_creation_markers(void) @@ -114,7 +115,7 @@ STM_PSEGMENT->creation_markers, uintptr_t /*item*/, ({ - uint64_t *p = (uint64_t *)(item & ~7); + TLPREFIX uint64_t *p = (TLPREFIX uint64_t *)(item & ~7); while (*p != 0) *p++ = 0; })); From noreply at buildbot.pypy.org Sun Feb 16 15:30:15 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 16 Feb 2014 15:30:15 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: Oups, can't call this after releasing the mutex, as a Message-ID: <20140216143015.AC3341C03B3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r753:7072d8ddaff5 Date: 2014-02-16 10:37 +0100 http://bitbucket.org/pypy/stmgc/changeset/7072d8ddaff5/ Log: Oups, can't call this after releasing the mutex, as a different thread might get hold of our segment. diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -236,10 +236,9 @@ release_thread_segment(tl); /* includes the cond_broadcast(); */ STM_PSEGMENT->safe_point = SP_NO_TRANSACTION; STM_PSEGMENT->transaction_state = TS_NONE; + reset_all_creation_markers(); mutex_unlock(); - - reset_all_creation_markers(); } void stm_abort_transaction(void) @@ -312,10 +311,10 @@ release_thread_segment(tl); /* includes the cond_broadcast(); */ STM_PSEGMENT->safe_point = SP_NO_TRANSACTION; STM_PSEGMENT->transaction_state = TS_NONE; + reset_all_creation_markers(); + mutex_unlock(); - reset_all_creation_markers(); - assert(jmpbuf_ptr != NULL); assert(jmpbuf_ptr != (stm_jmpbuf_t *)-1); /* for tests only */ __builtin_longjmp(*jmpbuf_ptr, 1); From noreply at buildbot.pypy.org Sun Feb 16 15:30:16 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 16 Feb 2014 15:30:16 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: Carefully synchronize the threads in order to run a minor collection Message-ID: <20140216143016.C0E1B1C03B3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r754:4fa29629edfa Date: 2014-02-16 15:30 +0100 http://bitbucket.org/pypy/stmgc/changeset/4fa29629edfa/ Log: Carefully synchronize the threads in order to run a minor collection diff --git a/c7/stm/contention.c b/c7/stm/contention.c --- a/c7/stm/contention.c +++ b/c7/stm/contention.c @@ -38,7 +38,7 @@ } else if (wait) { /* otherwise, we will issue a safe point and wait: */ - STM_PSEGMENT->safe_point = SP_SAFE_POINT; + STM_PSEGMENT->safe_point = SP_SAFE_POINT_CANNOT_COLLECT; /* signal the other thread; it must abort */ cond_broadcast(); diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -131,6 +131,10 @@ { long remote_num = 1 - STM_SEGMENT->segment_num; while (get_priv_segment(remote_num)->safe_point == SP_RUNNING) { + + /* we have the mutex here */ + get_segment(remote_num)->nursery_section_end = NSE_SIGNAL; + cond_wait(); } } diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -60,12 +60,13 @@ uint8_t write_lock_num; uint8_t safe_point; /* one of the SP_xxx constants */ uint8_t transaction_state; /* one of the TS_xxx constants */ + uintptr_t real_nursery_section_end; }; enum { SP_NO_TRANSACTION=0, SP_RUNNING, - SP_SAFE_POINT, + SP_SAFE_POINT_CANNOT_COLLECT, SP_SAFE_POINT_CAN_COLLECT, }; enum { diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -25,6 +25,7 @@ /************************************************************/ + static union { struct { uint64_t used; /* number of bytes from the nursery used so far */ @@ -32,6 +33,10 @@ char reserved[64]; } nursery_ctl __attribute__((aligned(64))); +static uint64_t requested_minor_collections = 0; +static uint64_t completed_minor_collections = 0; + + /************************************************************/ static void setup_nursery(void) @@ -50,32 +55,142 @@ } +/************************************************************/ + + +static void minor_collection(void) +{ + fprintf(stderr, "minor_collection\n"); + abort(); //...; + + assert(requested_minor_collections == completed_minor_collections + 1); + completed_minor_collections += 1; + nursery_ctl.used = 0; +} + + +static void sync_point_for_collection(void) +{ + mutex_lock(); + + STM_PSEGMENT->safe_point = SP_SAFE_POINT_CAN_COLLECT; + + restart: + if (requested_minor_collections == completed_minor_collections) { + if (nursery_ctl.used < NURSERY_SIZE) + goto exit; + + requested_minor_collections++; + } + + /* are all threads in a safe-point? */ + long i; + bool must_wait = false; + for (i = 0; i < NB_SEGMENTS; i++) { + struct stm_priv_segment_info_s *other_pseg = get_priv_segment(i); + + if (other_pseg->safe_point != SP_NO_TRANSACTION && + other_pseg->safe_point != SP_SAFE_POINT_CAN_COLLECT) { + /* segment i is not at a safe point, or at one where + collection is not possible (SP_SAFE_POINT_CANNOT_COLLECT) */ + + /* we have the mutex here */ + other_pseg->pub.nursery_section_end = NSE_SIGNAL; + must_wait = true; + } + } + if (must_wait) { + /* wait until all threads are indeed in a safe-point that allows + collection */ + cond_wait(); + goto restart; + } + + /* now we can run minor collection */ + minor_collection(); + + exit: + /* we have the mutex here, and at this point there is no + pending requested minor collection, so we simply reset + our value of nursery_section_end and return. */ + STM_SEGMENT->nursery_section_end = + STM_PSEGMENT->real_nursery_section_end; + + STM_PSEGMENT->safe_point = SP_RUNNING; + + mutex_unlock(); +} + + +/************************************************************/ + #define NURSERY_ALIGN(bytes) \ (((bytes) + NURSERY_LINE - 1) & ~(NURSERY_LINE - 1)) static stm_char *allocate_from_nursery(uint64_t bytes) { + /* may collect! */ /* thread-safe; allocate a chunk of memory from the nursery */ bytes = NURSERY_ALIGN(bytes); - uint64_t p = __sync_fetch_and_add(&nursery_ctl.used, bytes); - if (p + bytes > NURSERY_SIZE) { - //major_collection(); - abort(); + while (1) { + uint64_t p = __sync_fetch_and_add(&nursery_ctl.used, bytes); + if (LIKELY(p + bytes <= NURSERY_SIZE)) { + return (stm_char *)(NURSERY_START + p); + } + sync_point_for_collection(); } - return (stm_char *)(NURSERY_START + p); } stm_char *_stm_allocate_slowpath(ssize_t size_rounded_up) { + /* may collect! */ + STM_SEGMENT->nursery_current -= size_rounded_up; /* restore correct val */ + + restart: + if (UNLIKELY(STM_SEGMENT->nursery_section_end == NSE_SIGNAL)) { + + /* If nursery_section_end was set to NSE_SIGNAL by another thread, + we end up here as soon as we try to call stm_allocate(). */ + sync_point_for_collection(); + + /* Once the sync point is done, retry. */ + goto restart; + } + if (size_rounded_up < MEDIUM_OBJECT) { - /* This is a small object. The current section is simply full. + /* This is a small object. We first try to check if the current + section really doesn't fit the object; maybe all we were called + for was the sync point above */ + stm_char *p1 = STM_SEGMENT->nursery_current; + stm_char *end1 = p1 + size_rounded_up; + if ((uintptr_t)end1 <= STM_PSEGMENT->real_nursery_section_end) { + /* fits */ + STM_SEGMENT->nursery_current = end1; + return p1; + } + + /* Otherwise, the current section is really full. Allocate the next section and initialize it with zeroes. */ stm_char *p = allocate_from_nursery(NURSERY_SECTION_SIZE); + STM_SEGMENT->nursery_current = p + size_rounded_up; + + /* Set nursery_section_end, but carefully: another thread may + have forced it to be equal to NSE_SIGNAL. */ + uintptr_t end = (uintptr_t)p + NURSERY_SECTION_SIZE; + + if (UNLIKELY(!__sync_bool_compare_and_swap( + &STM_SEGMENT->nursery_section_end, + STM_PSEGMENT->real_nursery_section_end, + end))) { + assert(STM_SEGMENT->nursery_section_end == NSE_SIGNAL); + goto restart; + } + + STM_PSEGMENT->real_nursery_section_end = end; + memset(REAL_ADDRESS(STM_SEGMENT->segment_base, p), 0, NURSERY_SECTION_SIZE); - STM_SEGMENT->nursery_current = p + size_rounded_up; - STM_SEGMENT->nursery_section_end = (uintptr_t)p + NURSERY_SECTION_SIZE; /* Also fill the corresponding creation markers with 0xff. */ set_creation_markers(p, NURSERY_SECTION_SIZE, @@ -109,9 +224,18 @@ c = NURSERY_ALIGN(c); STM_SEGMENT->nursery_current = (stm_char *)c; - uint64_t size = STM_SEGMENT->nursery_section_end - c; + uint64_t size = STM_PSEGMENT->real_nursery_section_end - c; if (size > 0) { set_creation_markers((stm_char *)c, size, CM_CURRENT_TRANSACTION_IN_NURSERY); } } + +#ifdef STM_TESTS +void _stm_set_nursery_free_count(uint64_t free_count) +{ + assert(free_count == NURSERY_ALIGN(free_count)); + assert(nursery_ctl.used <= NURSERY_SIZE - free_count); + nursery_ctl.used = NURSERY_SIZE - free_count; +} +#endif diff --git a/c7/stm/nursery.h b/c7/stm/nursery.h --- a/c7/stm/nursery.h +++ b/c7/stm/nursery.h @@ -1,2 +1,4 @@ + +#define NSE_SIGNAL 1 static void align_nursery_at_transaction_start(void); diff --git a/c7/stm/pages.c b/c7/stm/pages.c --- a/c7/stm/pages.c +++ b/c7/stm/pages.c @@ -8,7 +8,7 @@ /* call remap_file_pages() to make all pages in the range(pagenum, pagenum+count) refer to the same physical range of pages from segment 0 */ - long i; + uintptr_t i; for (i = 1; i < NB_SEGMENTS; i++) { char *segment_base = get_segment_base(i); int res = remap_file_pages(segment_base + pagenum * 4096UL, diff --git a/c7/stm/sync.c b/c7/stm/sync.c --- a/c7/stm/sync.c +++ b/c7/stm/sync.c @@ -67,6 +67,9 @@ static inline void mutex_unlock(void) { + assert(STM_PSEGMENT->safe_point == SP_NO_TRANSACTION || + STM_PSEGMENT->safe_point == SP_RUNNING); + if (UNLIKELY(pthread_mutex_unlock(&sync_ctl.global_mutex) != 0)) { perror("pthread_mutex_unlock"); abort(); diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -62,7 +62,8 @@ int segment_num; char *segment_base; stm_char *nursery_current; - uintptr_t nursery_section_end; + uintptr_t nursery_section_end; /* forced to 1 by + sync_all_threads_for_collection() */ struct stm_thread_local_s *running_thread; stm_jmpbuf_t *jmpbuf_ptr; }; @@ -96,6 +97,7 @@ void _stm_large_dump(void); void _stm_start_safe_point(void); void _stm_stop_safe_point(void); +void _stm_set_nursery_free_count(uint64_t free_count); #endif #define _STM_GCFLAG_WRITE_BARRIER_CALLED 0x80 diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -69,6 +69,8 @@ void _stm_start_safe_point(void); bool _check_stop_safe_point(void); + +void _stm_set_nursery_free_count(uint64_t free_count); """) diff --git a/c7/test/test_nursery.py b/c7/test/test_nursery.py --- a/c7/test/test_nursery.py +++ b/c7/test/test_nursery.py @@ -49,3 +49,15 @@ self.commit_transaction() assert stm_creation_marker(lp1) == 0 assert stm_creation_marker(lp2) == 0 + + def test_nursery_full(self): + lib._stm_set_nursery_free_count((SOME_MEDIUM_SIZE + 255) & ~255) + self.push_root_no_gc() + self.start_transaction() + lp1 = stm_allocate(SOME_MEDIUM_SIZE) + self.pop_root() + # + self.push_root(lp1) + lp2 = stm_allocate(16) + lp1b = self.pop_root() + assert lp1b != lp1 # collection occurred From noreply at buildbot.pypy.org Sun Feb 16 19:17:58 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 16 Feb 2014 19:17:58 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: in-progress Message-ID: <20140216181758.BBAAF1C03B3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r755:2248bbaba3b2 Date: 2014-02-16 19:17 +0100 http://bitbucket.org/pypy/stmgc/changeset/2248bbaba3b2/ Log: in-progress diff --git a/c7/stm/contention.c b/c7/stm/contention.c --- a/c7/stm/contention.c +++ b/c7/stm/contention.c @@ -5,9 +5,9 @@ static void contention_management(uint8_t other_segment_num, bool wait) { - /* A simple contention manager. Called when we do stm_write() - on an object, but some other thread already holds the write - lock on the same object. */ + /* A simple contention manager. Called when some other thread + holds the write lock on an object. The current thread tries + to do either a write or a read on it. */ assert_has_mutex(); assert(other_segment_num != STM_SEGMENT->segment_num); diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -127,19 +127,7 @@ # error "The logic in the functions below only works with two segments" #endif -static void wait_for_other_safe_points(void) -{ - long remote_num = 1 - STM_SEGMENT->segment_num; - while (get_priv_segment(remote_num)->safe_point == SP_RUNNING) { - - /* we have the mutex here */ - get_segment(remote_num)->nursery_section_end = NSE_SIGNAL; - - cond_wait(); - } -} - -static void detect_write_read_conflicts(void) +static bool detect_write_read_conflicts(void) { long remote_num = 1 - STM_SEGMENT->segment_num; char *remote_base = get_segment_base(remote_num); @@ -148,7 +136,7 @@ switch (get_priv_segment(remote_num)->transaction_state) { case TS_NONE: case TS_MUST_ABORT: - return; /* no need to do any check */ + return false; /* no need to do any check */ } LIST_FOREACH_R( @@ -161,9 +149,11 @@ /* If we reach this point, it means we aborted the other thread. We're done here. */ - return; + return true; } })); + + return false; } static void push_modified_to_other_segments(void) @@ -207,8 +197,11 @@ void stm_commit_transaction(void) { mutex_lock(); + assert(STM_PSEGMENT->safe_point = SP_RUNNING); + STM_PSEGMENT->safe_point = SP_SAFE_POINT_CAN_COLLECT; + restart: switch (STM_PSEGMENT->transaction_state) { case TS_REGULAR: @@ -223,10 +216,14 @@ } /* wait until the other thread is at a safe-point */ - wait_for_other_safe_points(); + wait_for_other_safe_points(SP_SAFE_POINT_CANNOT_COLLECT); + + /* the rest of this function runs either atomically without releasing + the mutex, or it needs to restart. */ /* detect conflicts */ - detect_write_read_conflicts(); + if (UNLIKELY(detect_write_read_conflicts())) + goto restart; /* cannot abort any more from here */ assert(STM_PSEGMENT->transaction_state != TS_MUST_ABORT); @@ -237,7 +234,7 @@ /* done */ stm_thread_local_t *tl = STM_SEGMENT->running_thread; - release_thread_segment(tl); /* includes the cond_broadcast(); */ + release_thread_segment(tl); STM_PSEGMENT->safe_point = SP_NO_TRANSACTION; STM_PSEGMENT->transaction_state = TS_NONE; reset_all_creation_markers(); @@ -312,11 +309,12 @@ stm_jmpbuf_t *jmpbuf_ptr = STM_SEGMENT->jmpbuf_ptr; stm_thread_local_t *tl = STM_SEGMENT->running_thread; - release_thread_segment(tl); /* includes the cond_broadcast(); */ + release_thread_segment(tl); STM_PSEGMENT->safe_point = SP_NO_TRANSACTION; STM_PSEGMENT->transaction_state = TS_NONE; reset_all_creation_markers(); + cond_broadcast(); mutex_unlock(); assert(jmpbuf_ptr != NULL); diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -33,9 +33,6 @@ char reserved[64]; } nursery_ctl __attribute__((aligned(64))); -static uint64_t requested_minor_collections = 0; -static uint64_t completed_minor_collections = 0; - /************************************************************/ @@ -58,64 +55,82 @@ /************************************************************/ -static void minor_collection(void) + +static void minor_trace_roots(void) { + stm_thread_local_t *tl = stm_thread_locals; + do { + object_t **current = tl->shadowstack; + object_t **base = tl->shadowstack_base; + while (current-- != base) { + minor_trace_if_young(current); + } + tl = tl->next; + } while (tl != stm_thread_locals); +} + +static void do_minor_collection(void) +{ + minor_trace_roots(); + + /* visit shadowstack & add to old_obj_to_trace */ + object_t **current = _STM_TL->shadow_stack; + object_t **base = _STM_TL->shadow_stack_base; + while (current-- != base) { + trace_if_young(current); + } + + + + fprintf(stderr, "minor_collection\n"); abort(); //...; + + /* reset all segments' nursery_section_end, as well as nursery_ctl.used */ + long i; + for (i = 0; i < NB_SEGMENTS; i++) { + get_segment(i)->nursery_section_end = 0; + get_priv_segment(i)->real_nursery_section_end = 0; + } + nursery_ctl.used = 0; + + /* done */ assert(requested_minor_collections == completed_minor_collections + 1); completed_minor_collections += 1; - nursery_ctl.used = 0; } -static void sync_point_for_collection(void) +static void restore_nursery_section_end(uintptr_t prev_value) { + __sync_bool_compare_and_swap(&STM_SEGMENT->v_nursery_section_end, + prev_value, + STM_PSEGMENT->real_nursery_section_end); +} + +static void stm_minor_collection(uint64_t request_size) +{ + /* Run a minor collection --- but only if we can't get 'request_size' + bytes out of the nursery; if we can, no-op. */ mutex_lock(); + assert(STM_PSEGMENT->safe_point == SP_RUNNING); STM_PSEGMENT->safe_point = SP_SAFE_POINT_CAN_COLLECT; restart: - if (requested_minor_collections == completed_minor_collections) { - if (nursery_ctl.used < NURSERY_SIZE) - goto exit; + /* We just waited here, either from mutex_lock() or from cond_wait(), + so we should check again if another thread did the minor + collection itself */ + if (nursery_ctl.used + bytes <= NURSERY_SIZE) + goto exit; - requested_minor_collections++; - } + if (!try_wait_for_other_safe_points(SP_SAFE_POINT_CAN_COLLECT)) + goto restart; - /* are all threads in a safe-point? */ - long i; - bool must_wait = false; - for (i = 0; i < NB_SEGMENTS; i++) { - struct stm_priv_segment_info_s *other_pseg = get_priv_segment(i); - - if (other_pseg->safe_point != SP_NO_TRANSACTION && - other_pseg->safe_point != SP_SAFE_POINT_CAN_COLLECT) { - /* segment i is not at a safe point, or at one where - collection is not possible (SP_SAFE_POINT_CANNOT_COLLECT) */ - - /* we have the mutex here */ - other_pseg->pub.nursery_section_end = NSE_SIGNAL; - must_wait = true; - } - } - if (must_wait) { - /* wait until all threads are indeed in a safe-point that allows - collection */ - cond_wait(); - goto restart; - } - - /* now we can run minor collection */ - minor_collection(); + /* now we can run our minor collection */ + do_minor_collection(); exit: - /* we have the mutex here, and at this point there is no - pending requested minor collection, so we simply reset - our value of nursery_section_end and return. */ - STM_SEGMENT->nursery_section_end = - STM_PSEGMENT->real_nursery_section_end; - STM_PSEGMENT->safe_point = SP_RUNNING; mutex_unlock(); @@ -137,7 +152,9 @@ if (LIKELY(p + bytes <= NURSERY_SIZE)) { return (stm_char *)(NURSERY_START + p); } - sync_point_for_collection(); + + /* nursery full! */ + stm_minor_collection(bytes); } } @@ -147,47 +164,21 @@ /* may collect! */ STM_SEGMENT->nursery_current -= size_rounded_up; /* restore correct val */ - restart: - if (UNLIKELY(STM_SEGMENT->nursery_section_end == NSE_SIGNAL)) { - - /* If nursery_section_end was set to NSE_SIGNAL by another thread, - we end up here as soon as we try to call stm_allocate(). */ - sync_point_for_collection(); - - /* Once the sync point is done, retry. */ - goto restart; - } + if (collectable_safe_point()) + return stm_allocate(size_rounded_up); if (size_rounded_up < MEDIUM_OBJECT) { - /* This is a small object. We first try to check if the current - section really doesn't fit the object; maybe all we were called - for was the sync point above */ - stm_char *p1 = STM_SEGMENT->nursery_current; - stm_char *end1 = p1 + size_rounded_up; - if ((uintptr_t)end1 <= STM_PSEGMENT->real_nursery_section_end) { - /* fits */ - STM_SEGMENT->nursery_current = end1; - return p1; - } - - /* Otherwise, the current section is really full. + /* This is a small object. The current section is really full. Allocate the next section and initialize it with zeroes. */ stm_char *p = allocate_from_nursery(NURSERY_SECTION_SIZE); STM_SEGMENT->nursery_current = p + size_rounded_up; - /* Set nursery_section_end, but carefully: another thread may + /* Set v_nursery_section_end, but carefully: another thread may have forced it to be equal to NSE_SIGNAL. */ uintptr_t end = (uintptr_t)p + NURSERY_SECTION_SIZE; - - if (UNLIKELY(!__sync_bool_compare_and_swap( - &STM_SEGMENT->nursery_section_end, - STM_PSEGMENT->real_nursery_section_end, - end))) { - assert(STM_SEGMENT->nursery_section_end == NSE_SIGNAL); - goto restart; - } - + uintptr_t prev_end = STM_PSEGMENT->real_nursery_section_end; STM_PSEGMENT->real_nursery_section_end = end; + restore_nursery_section_end(prev_end); memset(REAL_ADDRESS(STM_SEGMENT->segment_base, p), 0, NURSERY_SECTION_SIZE); diff --git a/c7/stm/nursery.h b/c7/stm/nursery.h --- a/c7/stm/nursery.h +++ b/c7/stm/nursery.h @@ -1,4 +1,7 @@ -#define NSE_SIGNAL 1 +/* special values of 'v_nursery_section_end' */ +#define NSE_SIGNAL 1 +#define NSE_SIGNAL_DONE 2 static void align_nursery_at_transaction_start(void); +static void restore_nursery_section_end(uintptr_t prev_value); diff --git a/c7/stm/sync.c b/c7/stm/sync.c --- a/c7/stm/sync.c +++ b/c7/stm/sync.c @@ -152,8 +152,6 @@ assert(sync_ctl.in_use[tl->associated_segment_num] == 1); sync_ctl.in_use[tl->associated_segment_num] = 0; - - cond_broadcast(); } static bool _running_transaction(void) @@ -189,7 +187,103 @@ assert(STM_PSEGMENT->safe_point == SP_SAFE_POINT_CAN_COLLECT); STM_PSEGMENT->safe_point = SP_RUNNING; + restore_nursery_section_end(NSE_SIGNAL_DONE); if (STM_PSEGMENT->transaction_state == TS_MUST_ABORT) stm_abort_transaction(); } #endif + + +static bool try_wait_for_other_safe_points(int requested_safe_point_kind) +{ + /* Must be called with the mutex. If all other threads are in a + safe point of at least the requested kind, returns true. Otherwise, + asks them to enter a safe point, issues a cond_wait(), and returns + false; you can call repeatedly this function in this case. + + When this function returns true, the other threads are all + blocked at safe points as requested, until the next time we + unlock the mutex (with mutex_unlock() or cond_wait()). + + This function requires that the calling thread is in a safe-point + right now, so there is no deadlock if one thread calls + wait_for_other_safe_points() while another is currently blocked + in the cond_wait() in this same function. + */ + assert_has_mutex(); + assert(STM_PSEGMENT->safe_point == SP_SAFE_POINT_CAN_COLLECT); + + long i; + bool must_wait = false; + for (i = 0; i < NB_SEGMENTS; i++) { + if (i == STM_SEGMENT->segment_num) + continue; /* ignore myself */ + + struct stm_priv_segment_info_s *other_pseg = get_priv_segment(i); + if (other_pseg->safe_point == SP_RUNNING || + (requested_safe_point_kind == SP_SAFE_POINT_CAN_COLLECT && + other_pseg->safe_point == SP_SAFE_POINT_CANNOT_COLLECT)) { + + /* we need to wait for this thread. Use NSE_SIGNAL to + ask it to enter a safe-point soon. */ + other_pseg->pub.v_nursery_section_end = NSE_SIGNAL; + must_wait = true; + } + } + if (must_wait) { + cond_wait(); + return false; + } + + /* done! All NSE_SIGNAL threads become NSE_SIGNAL_DONE now, which + mean they will actually run again the next time they grab the + mutex. */ + for (i = 0; i < NB_SEGMENTS; i++) { + if (i == STM_SEGMENT->segment_num) + continue; /* ignore myself */ + + struct stm_priv_segment_info_s *other_pseg = get_priv_segment(i); + if (other_pseg->v_nursery_section_end == NSE_SIGNAL) + other_pseg->v_nursery_section_end = NSE_SIGNAL_DONE; + } + cond_broadcast(); /* to wake up the other threads, but later, + when they get the mutex again */ + return true; +} + +static void wait_for_other_safe_points(int requested_safe_point_kind) +{ + while (!try_wait_for_other_safe_points(requested_safe_point_kind)) + /* repeat */; +} + +static bool collectable_safe_point(void) +{ + bool any_operation = false; + restart:; + switch (STM_SEGMENT->v_nursery_section_end) { + + case NSE_SIGNAL: + /* If nursery_section_end was set to NSE_SIGNAL by another thread, + we end up here as soon as we try to call stm_allocate(). + See try_wait_for_other_safe_points() for details. */ + mutex_lock(); + STM_PSEGMENT->safe_point = SP_SAFE_POINT_CAN_COLLECT; + cond_broadcast(); + cond_wait(); + STM_PSEGMENT->safe_point = SP_RUNNING; + mutex_unlock(); + + /* Once the sync point is done, retry. */ + any_operation = true; + goto restart; + + case NSE_SIGNAL_DONE: + restore_nursery_section_end(NSE_SIGNAL_DONE); + any_operation = true; + break; + + default:; + } + return any_operation; +} diff --git a/c7/stm/sync.h b/c7/stm/sync.h --- a/c7/stm/sync.h +++ b/c7/stm/sync.h @@ -13,3 +13,8 @@ (must have the mutex acquired!) */ static void acquire_thread_segment(stm_thread_local_t *tl); static void release_thread_segment(stm_thread_local_t *tl); + +/* see the source for an exact description */ +static void wait_for_other_safe_points(int requested_safe_point_kind); +static bool try_wait_for_other_safe_points(int requested_safe_point_kind); +static bool collectable_safe_point(void); diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -62,8 +62,7 @@ int segment_num; char *segment_base; stm_char *nursery_current; - uintptr_t nursery_section_end; /* forced to 1 by - sync_all_threads_for_collection() */ + volatile uintptr_t v_nursery_section_end; /* see nursery.h */ struct stm_thread_local_s *running_thread; stm_jmpbuf_t *jmpbuf_ptr; }; From noreply at buildbot.pypy.org Sun Feb 16 19:33:22 2014 From: noreply at buildbot.pypy.org (squeaky) Date: Sun, 16 Feb 2014 19:33:22 +0100 (CET) Subject: [pypy-commit] pypy optimize-int-and: failing and passing test Message-ID: <20140216183322.32EE71C03B3@cobra.cs.uni-duesseldorf.de> Author: Squeaky Branch: optimize-int-and Changeset: r69172:bb6064d9cde5 Date: 2014-02-16 02:36 +0100 http://bitbucket.org/pypy/pypy/changeset/bb6064d9cde5/ Log: failing and passing test diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -5298,6 +5298,30 @@ """ self.optimize_loop(ops, expected) + def test_intand_1mask_covering_bitrange(self): + ops = """ + [p0] + i0 = getarrayitem_gc(p0, 0, descr=chararraydescr) + i1 = int_and(i0, 255) + jump(i1) + """ + + expected = """ + [p0] + i0 = getarrayitem_gc(p0, 0, descr=chararraydescr) + jump(i0) + """ + self.optimize_loop(ops, expected) + + def test_intand_maskwith0_in_bitrange(self): + ops = """ + [p0] + i0 = getarrayitem_gc(p0, 0, descr=chararraydescr) + i1 = int_and(i0, 257) + jump(i1) + """ + self.optimize_loop(ops, ops) + class TestLLtype(BaseTestOptimizeBasic, LLtypeMixin): pass From noreply at buildbot.pypy.org Sun Feb 16 19:33:23 2014 From: noreply at buildbot.pypy.org (squeaky) Date: Sun, 16 Feb 2014 19:33:23 +0100 (CET) Subject: [pypy-commit] pypy optimize-int-and: cosmetic tweak to get(array|field) Message-ID: <20140216183323.4D9521C03B3@cobra.cs.uni-duesseldorf.de> Author: Squeaky Branch: optimize-int-and Changeset: r69173:0ccecf7a9518 Date: 2014-02-16 11:30 +0100 http://bitbucket.org/pypy/pypy/changeset/0ccecf7a9518/ Log: cosmetic tweak to get(array|field) diff --git a/rpython/jit/metainterp/optimizeopt/intbounds.py b/rpython/jit/metainterp/optimizeopt/intbounds.py --- a/rpython/jit/metainterp/optimizeopt/intbounds.py +++ b/rpython/jit/metainterp/optimizeopt/intbounds.py @@ -342,7 +342,7 @@ if descr.is_integer_bounded(): v1 = self.getvalue(op.result) v1.intbound.make_ge(IntLowerBound(descr.get_integer_min())) - v1.intbound.make_lt(IntUpperBound(descr.get_integer_max() + 1)) + v1.intbound.make_le(IntUpperBound(descr.get_integer_max())) optimize_GETFIELD_GC = optimize_GETFIELD_RAW @@ -354,8 +354,7 @@ if descr and descr.is_item_integer_bounded(): v1 = self.getvalue(op.result) v1.intbound.make_ge(IntLowerBound(descr.get_item_integer_min())) - v1.intbound.make_lt( - IntUpperBound(descr.get_item_integer_max() + 1)) + v1.intbound.make_le(IntUpperBound(descr.get_item_integer_max())) optimize_GETARRAYITEM_GC = optimize_GETARRAYITEM_RAW From noreply at buildbot.pypy.org Sun Feb 16 19:33:24 2014 From: noreply at buildbot.pypy.org (squeaky) Date: Sun, 16 Feb 2014 19:33:24 +0100 (CET) Subject: [pypy-commit] pypy optimize-int-and: fix and improve test Message-ID: <20140216183324.773001C03B3@cobra.cs.uni-duesseldorf.de> Author: Squeaky Branch: optimize-int-and Changeset: r69174:92dba00162ff Date: 2014-02-16 13:35 +0100 http://bitbucket.org/pypy/pypy/changeset/92dba00162ff/ Log: fix and improve test diff --git a/rpython/jit/metainterp/optimizeopt/rewrite.py b/rpython/jit/metainterp/optimizeopt/rewrite.py --- a/rpython/jit/metainterp/optimizeopt/rewrite.py +++ b/rpython/jit/metainterp/optimizeopt/rewrite.py @@ -86,6 +86,13 @@ v2 = self.getvalue(op.getarg(1)) if v1.is_null() or v2.is_null(): self.make_constant_int(op.result, 0) + elif v2.is_constant(): + val = v2.box.getint() + if val == -1 or v1.intbound.lower >= 0 and \ + v1.intbound.upper <= val & ~(val + 1): + self.make_equal_to(op.result, v1) + else: + self.emit_operation(op) else: self.emit_operation(op) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -5303,7 +5303,8 @@ [p0] i0 = getarrayitem_gc(p0, 0, descr=chararraydescr) i1 = int_and(i0, 255) - jump(i1) + i2 = int_and(i1, -1) + jump(i2) """ expected = """ From noreply at buildbot.pypy.org Sun Feb 16 19:33:25 2014 From: noreply at buildbot.pypy.org (squeaky) Date: Sun, 16 Feb 2014 19:33:25 +0100 (CET) Subject: [pypy-commit] pypy optimize-int-and: fix another test, make optimization symetric Message-ID: <20140216183325.903581C03B3@cobra.cs.uni-duesseldorf.de> Author: Squeaky Branch: optimize-int-and Changeset: r69175:caba2738c3ab Date: 2014-02-16 15:56 +0100 http://bitbucket.org/pypy/pypy/changeset/caba2738c3ab/ Log: fix another test, make optimization symetric diff --git a/rpython/jit/metainterp/optimizeopt/rewrite.py b/rpython/jit/metainterp/optimizeopt/rewrite.py --- a/rpython/jit/metainterp/optimizeopt/rewrite.py +++ b/rpython/jit/metainterp/optimizeopt/rewrite.py @@ -86,15 +86,21 @@ v2 = self.getvalue(op.getarg(1)) if v1.is_null() or v2.is_null(): self.make_constant_int(op.result, 0) + return elif v2.is_constant(): val = v2.box.getint() - if val == -1 or v1.intbound.lower >= 0 and \ - v1.intbound.upper <= val & ~(val + 1): + if val == -1 or v1.intbound.lower >= 0 \ + and v1.intbound.upper <= val & ~(val + 1): self.make_equal_to(op.result, v1) - else: - self.emit_operation(op) - else: - self.emit_operation(op) + return + elif v1.is_constant(): + val = v1.box.getint() + if val == -1 or v2.intbound.lower >= 0 \ + and v2.intbound.upper <= val & ~(val + 1): + self.make_equal_to(op.result, v2) + return + + self.emit_operation(op) def optimize_INT_OR(self, op): v1 = self.getvalue(op.getarg(0)) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -4753,7 +4753,8 @@ def test_bound_and(self): ops = """ - [i0] + [] + i0 = escape() i1 = int_and(i0, 255) i2 = int_lt(i1, 500) guard_true(i2) [] @@ -4779,10 +4780,11 @@ guard_true(i14) [] i15 = int_ge(i1, 20) guard_true(i15) [] - jump(i1) - """ - expected = """ - [i0] + jump() + """ + expected = """ + [] + i0 = escape() i1 = int_and(i0, 255) i12 = int_lt(i1, 100) guard_true(i12) [] @@ -4792,7 +4794,7 @@ guard_true(i14) [] i15 = int_ge(i1, 20) guard_true(i15) [] - jump(i1) + jump() """ self.optimize_loop(ops, expected) From noreply at buildbot.pypy.org Sun Feb 16 19:33:26 2014 From: noreply at buildbot.pypy.org (squeaky) Date: Sun, 16 Feb 2014 19:33:26 +0100 (CET) Subject: [pypy-commit] pypy optimize-int-and: improve test Message-ID: <20140216183326.B98981C03B3@cobra.cs.uni-duesseldorf.de> Author: Squeaky Branch: optimize-int-and Changeset: r69176:242fd9b72daa Date: 2014-02-16 16:23 +0100 http://bitbucket.org/pypy/pypy/changeset/242fd9b72daa/ Log: improve test diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -5304,7 +5304,8 @@ i0 = getarrayitem_gc(p0, 0, descr=chararraydescr) i1 = int_and(i0, 255) i2 = int_and(i1, -1) - jump(i2) + i3 = int_and(511, i2) + jump(i3) """ expected = """ @@ -5319,7 +5320,9 @@ [p0] i0 = getarrayitem_gc(p0, 0, descr=chararraydescr) i1 = int_and(i0, 257) - jump(i1) + i2 = getarrayitem_gc(p0, 1, descr=chararraydescr)a + i3 = int_and(259, i2) + jump(i1, i3) """ self.optimize_loop(ops, ops) From noreply at buildbot.pypy.org Sun Feb 16 19:33:27 2014 From: noreply at buildbot.pypy.org (squeaky) Date: Sun, 16 Feb 2014 19:33:27 +0100 (CET) Subject: [pypy-commit] pypy optimize-int-and: update whatsnew Message-ID: <20140216183327.C8F271C03B3@cobra.cs.uni-duesseldorf.de> Author: Squeaky Branch: optimize-int-and Changeset: r69177:92fee8613f8c Date: 2014-02-16 19:15 +0100 http://bitbucket.org/pypy/pypy/changeset/92fee8613f8c/ Log: update whatsnew diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -74,3 +74,7 @@ .. branch: array-propagate-len Kill some guards and operations in JIT traces by adding integer bounds propagation for getfield_(raw|gc) and getarrayitem_(raw|gc). + +.. branch: optimize-int-and: +Optimize away INT_AND with constant mask of 1s that fully cover the bitrange +of other operand. From noreply at buildbot.pypy.org Sun Feb 16 19:33:28 2014 From: noreply at buildbot.pypy.org (squeaky) Date: Sun, 16 Feb 2014 19:33:28 +0100 (CET) Subject: [pypy-commit] pypy optimize-int-and: oups, typo Message-ID: <20140216183328.E574A1C03B3@cobra.cs.uni-duesseldorf.de> Author: Squeaky Branch: optimize-int-and Changeset: r69178:111057c44543 Date: 2014-02-16 19:30 +0100 http://bitbucket.org/pypy/pypy/changeset/111057c44543/ Log: oups, typo diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -5320,7 +5320,7 @@ [p0] i0 = getarrayitem_gc(p0, 0, descr=chararraydescr) i1 = int_and(i0, 257) - i2 = getarrayitem_gc(p0, 1, descr=chararraydescr)a + i2 = getarrayitem_gc(p0, 1, descr=chararraydescr) i3 = int_and(259, i2) jump(i1, i3) """ From noreply at buildbot.pypy.org Sun Feb 16 19:33:30 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 16 Feb 2014 19:33:30 +0100 (CET) Subject: [pypy-commit] pypy optimize-int-and: Close branch, ready for merge Message-ID: <20140216183330.061421C03B3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: optimize-int-and Changeset: r69179:53ccdc361307 Date: 2014-02-16 19:31 +0100 http://bitbucket.org/pypy/pypy/changeset/53ccdc361307/ Log: Close branch, ready for merge From noreply at buildbot.pypy.org Sun Feb 16 19:33:31 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 16 Feb 2014 19:33:31 +0100 (CET) Subject: [pypy-commit] pypy default: hg merge optimize-int-and (by squeaky_pl) Message-ID: <20140216183331.23F501C03B3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69180:4346099d1d4f Date: 2014-02-16 19:32 +0100 http://bitbucket.org/pypy/pypy/changeset/4346099d1d4f/ Log: hg merge optimize-int-and (by squeaky_pl) Optimize away INT_AND with constant mask of 1s that fully cover the bitrange of other operand. diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -74,3 +74,7 @@ .. branch: array-propagate-len Kill some guards and operations in JIT traces by adding integer bounds propagation for getfield_(raw|gc) and getarrayitem_(raw|gc). + +.. branch: optimize-int-and: +Optimize away INT_AND with constant mask of 1s that fully cover the bitrange +of other operand. diff --git a/rpython/jit/metainterp/optimizeopt/intbounds.py b/rpython/jit/metainterp/optimizeopt/intbounds.py --- a/rpython/jit/metainterp/optimizeopt/intbounds.py +++ b/rpython/jit/metainterp/optimizeopt/intbounds.py @@ -342,7 +342,7 @@ if descr.is_integer_bounded(): v1 = self.getvalue(op.result) v1.intbound.make_ge(IntLowerBound(descr.get_integer_min())) - v1.intbound.make_lt(IntUpperBound(descr.get_integer_max() + 1)) + v1.intbound.make_le(IntUpperBound(descr.get_integer_max())) optimize_GETFIELD_GC = optimize_GETFIELD_RAW @@ -354,8 +354,7 @@ if descr and descr.is_item_integer_bounded(): v1 = self.getvalue(op.result) v1.intbound.make_ge(IntLowerBound(descr.get_item_integer_min())) - v1.intbound.make_lt( - IntUpperBound(descr.get_item_integer_max() + 1)) + v1.intbound.make_le(IntUpperBound(descr.get_item_integer_max())) optimize_GETARRAYITEM_GC = optimize_GETARRAYITEM_RAW diff --git a/rpython/jit/metainterp/optimizeopt/rewrite.py b/rpython/jit/metainterp/optimizeopt/rewrite.py --- a/rpython/jit/metainterp/optimizeopt/rewrite.py +++ b/rpython/jit/metainterp/optimizeopt/rewrite.py @@ -86,8 +86,21 @@ v2 = self.getvalue(op.getarg(1)) if v1.is_null() or v2.is_null(): self.make_constant_int(op.result, 0) - else: - self.emit_operation(op) + return + elif v2.is_constant(): + val = v2.box.getint() + if val == -1 or v1.intbound.lower >= 0 \ + and v1.intbound.upper <= val & ~(val + 1): + self.make_equal_to(op.result, v1) + return + elif v1.is_constant(): + val = v1.box.getint() + if val == -1 or v2.intbound.lower >= 0 \ + and v2.intbound.upper <= val & ~(val + 1): + self.make_equal_to(op.result, v2) + return + + self.emit_operation(op) def optimize_INT_OR(self, op): v1 = self.getvalue(op.getarg(0)) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -5298,6 +5298,34 @@ """ self.optimize_loop(ops, expected) + def test_intand_1mask_covering_bitrange(self): + ops = """ + [p0] + i0 = getarrayitem_gc(p0, 0, descr=chararraydescr) + i1 = int_and(i0, 255) + i2 = int_and(i1, -1) + i3 = int_and(511, i2) + jump(i3) + """ + + expected = """ + [p0] + i0 = getarrayitem_gc(p0, 0, descr=chararraydescr) + jump(i0) + """ + self.optimize_loop(ops, expected) + + def test_intand_maskwith0_in_bitrange(self): + ops = """ + [p0] + i0 = getarrayitem_gc(p0, 0, descr=chararraydescr) + i1 = int_and(i0, 257) + i2 = getarrayitem_gc(p0, 1, descr=chararraydescr) + i3 = int_and(259, i2) + jump(i1, i3) + """ + self.optimize_loop(ops, ops) + class TestLLtype(BaseTestOptimizeBasic, LLtypeMixin): pass diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -4753,7 +4753,8 @@ def test_bound_and(self): ops = """ - [i0] + [] + i0 = escape() i1 = int_and(i0, 255) i2 = int_lt(i1, 500) guard_true(i2) [] @@ -4779,10 +4780,11 @@ guard_true(i14) [] i15 = int_ge(i1, 20) guard_true(i15) [] - jump(i1) - """ - expected = """ - [i0] + jump() + """ + expected = """ + [] + i0 = escape() i1 = int_and(i0, 255) i12 = int_lt(i1, 100) guard_true(i12) [] @@ -4792,7 +4794,7 @@ guard_true(i14) [] i15 = int_ge(i1, 20) guard_true(i15) [] - jump(i1) + jump() """ self.optimize_loop(ops, expected) From noreply at buildbot.pypy.org Sun Feb 16 21:22:19 2014 From: noreply at buildbot.pypy.org (squeaky) Date: Sun, 16 Feb 2014 21:22:19 +0100 (CET) Subject: [pypy-commit] pypy default: fix array test that was failing after int bounds propagation from array operations Message-ID: <20140216202219.761511C03B3@cobra.cs.uni-duesseldorf.de> Author: Squeaky Branch: Changeset: r69181:f9b8b76b4c41 Date: 2014-02-16 21:10 +0100 http://bitbucket.org/pypy/pypy/changeset/f9b8b76b4c41/ Log: fix array test that was failing after int bounds propagation from array operations diff --git a/pypy/module/pypyjit/test_pypy_c/test_array.py b/pypy/module/pypyjit/test_pypy_c/test_array.py --- a/pypy/module/pypyjit/test_pypy_c/test_array.py +++ b/pypy/module/pypyjit/test_pypy_c/test_array.py @@ -67,26 +67,46 @@ log = self.run(main, []) assert log.result == 73574560 loop, = log.loops_by_filename(self.filepath) - assert loop.match(""" - i13 = int_lt(i8, 307200) - guard_true(i13, descr=...) - guard_not_invalidated(descr=...) - # the bound check guard on img has been killed (thanks to the asserts) - i14 = getarrayitem_raw(i10, i8, descr=) - i15 = int_add_ovf(i9, i14) - guard_no_overflow(descr=...) - i17 = int_sub(i8, 640) - # the bound check guard on intimg has been killed (thanks to the asserts) - i18 = getarrayitem_raw(i11, i17, descr=) - i19 = int_add_ovf(i18, i15) - guard_no_overflow(descr=...) - # on 64bit, there is a guard checking that i19 actually fits into 32bit - ... - setarrayitem_raw(i11, i8, _, descr=) - i28 = int_add(i8, 1) - --TICK-- - jump(..., descr=...) - """) + + if sys.maxint == 2 ** 31 - 1: + assert loop.match(""" + i13 = int_lt(i8, 307200) + guard_true(i13, descr=...) + guard_not_invalidated(descr=...) + # the bound check guard on img has been killed (thanks to the asserts) + i14 = getarrayitem_raw(i10, i8, descr=) + i15 = int_add_ovf(i9, i14) + guard_no_overflow(descr=...) + i17 = int_sub(i8, 640) + # the bound check guard on intimg has been killed (thanks to the asserts) + i18 = getarrayitem_raw(i11, i17, descr=) + i19 = int_add_ovf(i18, i15) + guard_no_overflow(descr=...) + setarrayitem_raw(i11, i8, _, descr=) + i28 = int_add(i8, 1) + --TICK-- + jump(..., descr=...) + """) + elif sys.maxint == 2 ** 63 - 1: + assert loop.match(""" + i13 = int_lt(i8, 307200) + guard_true(i13, descr=...) + guard_not_invalidated(descr=...) + # the bound check guard on img has been killed (thanks to the asserts) + i14 = getarrayitem_raw(i10, i8, descr=) + i15 = int_add(i9, i14) + i17 = int_sub(i8, 640) + # the bound check guard on intimg has been killed (thanks to the asserts) + i18 = getarrayitem_raw(i11, i17, descr=) + i19 = int_add(i18, i15) + # on 64bit, there is a guard checking that i19 actually fits into 32bit + ... + setarrayitem_raw(i11, i8, _, descr=) + i28 = int_add(i8, 1) + --TICK-- + jump(..., descr=...) + """) + def test_array_of_doubles(self): def main(): From noreply at buildbot.pypy.org Mon Feb 17 02:05:13 2014 From: noreply at buildbot.pypy.org (squeaky) Date: Mon, 17 Feb 2014 02:05:13 +0100 (CET) Subject: [pypy-commit] pypy default: typo angered test_whatsnew Message-ID: <20140217010513.993151C03B3@cobra.cs.uni-duesseldorf.de> Author: Squeaky Branch: Changeset: r69182:00f906eb59a1 Date: 2014-02-17 02:03 +0100 http://bitbucket.org/pypy/pypy/changeset/00f906eb59a1/ Log: typo angered test_whatsnew diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -75,6 +75,6 @@ Kill some guards and operations in JIT traces by adding integer bounds propagation for getfield_(raw|gc) and getarrayitem_(raw|gc). -.. branch: optimize-int-and: +.. branch: optimize-int-and Optimize away INT_AND with constant mask of 1s that fully cover the bitrange of other operand. From noreply at buildbot.pypy.org Mon Feb 17 11:37:14 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 17 Feb 2014 11:37:14 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: Some more comments and figuring out that a particular point is not Message-ID: <20140217103714.204A51C01F0@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r756:2d3e642f4330 Date: 2014-02-17 11:36 +0100 http://bitbucket.org/pypy/stmgc/changeset/2d3e642f4330/ Log: Some more comments and figuring out that a particular point is not problematic after all diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -216,7 +216,8 @@ } /* wait until the other thread is at a safe-point */ - wait_for_other_safe_points(SP_SAFE_POINT_CANNOT_COLLECT); + if (!try_wait_for_other_safe_points(SP_SAFE_POINT_CANNOT_COLLECT)) + goto restart; /* the rest of this function runs either atomically without releasing the mutex, or it needs to restart. */ diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -69,35 +69,33 @@ } while (tl != stm_thread_locals); } +static void reset_all_nursery_section_ends(void) +{ + long i; + for (i = 0; i < NB_SEGMENTS; i++) { + struct stm_priv_segment_info_s *other_pseg = get_priv_segment(i); + /* no race condition here, because all other threads are paused + in safe points, so cannot be e.g. in _stm_allocate_slowpath() */ + other_pseg->real_nursery_section_end = 0; + other_pseg->pub.v_nursery_section_end = 0; + } +} + static void do_minor_collection(void) { + /* all other threads are paused in safe points during the whole + minor collection */ + assert_has_mutex(); + minor_trace_roots(); - /* visit shadowstack & add to old_obj_to_trace */ - object_t **current = _STM_TL->shadow_stack; - object_t **base = _STM_TL->shadow_stack_base; - while (current-- != base) { - trace_if_young(current); - } - - - fprintf(stderr, "minor_collection\n"); abort(); //...; - /* reset all segments' nursery_section_end, as well as nursery_ctl.used */ - long i; - for (i = 0; i < NB_SEGMENTS; i++) { - get_segment(i)->nursery_section_end = 0; - get_priv_segment(i)->real_nursery_section_end = 0; - } nursery_ctl.used = 0; - - /* done */ - assert(requested_minor_collections == completed_minor_collections + 1); - completed_minor_collections += 1; + reset_all_nursery_section_ends(); } diff --git a/c7/stm/sync.c b/c7/stm/sync.c --- a/c7/stm/sync.c +++ b/c7/stm/sync.c @@ -202,12 +202,17 @@ false; you can call repeatedly this function in this case. When this function returns true, the other threads are all - blocked at safe points as requested, until the next time we - unlock the mutex (with mutex_unlock() or cond_wait()). + blocked at safe points as requested. They may be either in their + own cond_wait(), or running at SP_NO_TRANSACTION, in which case + they should not do anything related to stm until the next time + they call mutex_lock(). + + The next time we unlock the mutex (with mutex_unlock() or + cond_wait()), they will proceed. This function requires that the calling thread is in a safe-point right now, so there is no deadlock if one thread calls - wait_for_other_safe_points() while another is currently blocked + try_wait_for_other_safe_points() while another is currently blocked in the cond_wait() in this same function. */ assert_has_mutex(); @@ -251,12 +256,6 @@ return true; } -static void wait_for_other_safe_points(int requested_safe_point_kind) -{ - while (!try_wait_for_other_safe_points(requested_safe_point_kind)) - /* repeat */; -} - static bool collectable_safe_point(void) { bool any_operation = false; @@ -268,6 +267,7 @@ we end up here as soon as we try to call stm_allocate(). See try_wait_for_other_safe_points() for details. */ mutex_lock(); + assert(STM_PSEGMENT->safe_point == SP_RUNNING); STM_PSEGMENT->safe_point = SP_SAFE_POINT_CAN_COLLECT; cond_broadcast(); cond_wait(); diff --git a/c7/stm/sync.h b/c7/stm/sync.h --- a/c7/stm/sync.h +++ b/c7/stm/sync.h @@ -15,6 +15,5 @@ static void release_thread_segment(stm_thread_local_t *tl); /* see the source for an exact description */ -static void wait_for_other_safe_points(int requested_safe_point_kind); static bool try_wait_for_other_safe_points(int requested_safe_point_kind); static bool collectable_safe_point(void); From noreply at buildbot.pypy.org Mon Feb 17 14:11:07 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 17 Feb 2014 14:11:07 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: Compilation fixes Message-ID: <20140217131107.7114E1C033D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r757:815b5b0f11f1 Date: 2014-02-17 14:10 +0100 http://bitbucket.org/pypy/stmgc/changeset/815b5b0f11f1/ Log: Compilation fixes diff --git a/c7/stm/contention.c b/c7/stm/contention.c --- a/c7/stm/contention.c +++ b/c7/stm/contention.c @@ -9,7 +9,7 @@ holds the write lock on an object. The current thread tries to do either a write or a read on it. */ - assert_has_mutex(); + assert(_has_mutex()); assert(other_segment_num != STM_SEGMENT->segment_num); /* Who should abort here: this thread, or the other thread? */ diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -55,6 +55,11 @@ /************************************************************/ +static void minor_trace_if_young(object_t **pobj) +{ + //... + abort(); +} static void minor_trace_roots(void) { @@ -85,7 +90,7 @@ { /* all other threads are paused in safe points during the whole minor collection */ - assert_has_mutex(); + assert(_has_mutex()); minor_trace_roots(); @@ -119,7 +124,7 @@ /* We just waited here, either from mutex_lock() or from cond_wait(), so we should check again if another thread did the minor collection itself */ - if (nursery_ctl.used + bytes <= NURSERY_SIZE) + if (nursery_ctl.used + request_size <= NURSERY_SIZE) goto exit; if (!try_wait_for_other_safe_points(SP_SAFE_POINT_CAN_COLLECT)) @@ -163,7 +168,7 @@ STM_SEGMENT->nursery_current -= size_rounded_up; /* restore correct val */ if (collectable_safe_point()) - return stm_allocate(size_rounded_up); + return (stm_char *)stm_allocate(size_rounded_up); if (size_rounded_up < MEDIUM_OBJECT) { /* This is a small object. The current section is really full. diff --git a/c7/stm/sync.c b/c7/stm/sync.c --- a/c7/stm/sync.c +++ b/c7/stm/sync.c @@ -76,9 +76,9 @@ } } -static inline void assert_has_mutex(void) +static inline bool _has_mutex(void) { - assert(pthread_mutex_trylock(&sync_ctl.global_mutex) == EBUSY); + return pthread_mutex_trylock(&sync_ctl.global_mutex) == EBUSY; } static inline void cond_wait(void) @@ -110,7 +110,7 @@ { /* This function acquires a segment for the currently running thread, and set up the GS register if it changed. */ - assert_has_mutex(); + assert(_has_mutex()); assert(_is_tl_registered(tl)); retry:; @@ -145,7 +145,7 @@ static void release_thread_segment(stm_thread_local_t *tl) { - assert_has_mutex(); + assert(_has_mutex()); assert(STM_SEGMENT->running_thread == tl); STM_SEGMENT->running_thread = NULL; @@ -215,7 +215,7 @@ try_wait_for_other_safe_points() while another is currently blocked in the cond_wait() in this same function. */ - assert_has_mutex(); + assert(_has_mutex()); assert(STM_PSEGMENT->safe_point == SP_SAFE_POINT_CAN_COLLECT); long i; @@ -247,9 +247,9 @@ if (i == STM_SEGMENT->segment_num) continue; /* ignore myself */ - struct stm_priv_segment_info_s *other_pseg = get_priv_segment(i); - if (other_pseg->v_nursery_section_end == NSE_SIGNAL) - other_pseg->v_nursery_section_end = NSE_SIGNAL_DONE; + struct stm_segment_info_s *other_seg = get_segment(i); + if (other_seg->v_nursery_section_end == NSE_SIGNAL) + other_seg->v_nursery_section_end = NSE_SIGNAL_DONE; } cond_broadcast(); /* to wake up the other threads, but later, when they get the mutex again */ diff --git a/c7/stm/sync.h b/c7/stm/sync.h --- a/c7/stm/sync.h +++ b/c7/stm/sync.h @@ -8,6 +8,7 @@ static void mutex_unlock(void); static void cond_wait(void); static void cond_broadcast(void); +static bool _has_mutex(void); /* acquire and release one of the segments for running the given thread (must have the mutex acquired!) */ diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -185,7 +185,7 @@ stm_char *p = STM_SEGMENT->nursery_current; stm_char *end = p + size_rounded_up; STM_SEGMENT->nursery_current = end; - if (UNLIKELY((uintptr_t)end > STM_SEGMENT->nursery_section_end)) + if (UNLIKELY((uintptr_t)end > STM_SEGMENT->v_nursery_section_end)) p = _stm_allocate_slowpath(size_rounded_up); return (object_t *)p; } From noreply at buildbot.pypy.org Mon Feb 17 16:09:34 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 17 Feb 2014 16:09:34 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: in-progress Message-ID: <20140217150934.AB4241C01F0@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r758:4ab38af77c28 Date: 2014-02-17 16:09 +0100 http://bitbucket.org/pypy/stmgc/changeset/4ab38af77c28/ Log: in-progress diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -38,6 +38,9 @@ /* objects that are allocated crossing a page boundary have this flag set */ GCFLAG_CROSS_PAGE = 0x02, + /* only used during collections to mark an obj as moved out of the + generation it was in */ + GCFLAG_MOVED = 0x04, }; #define CROSS_PAGE_BOUNDARY(start, stop) \ diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -3,6 +3,18 @@ #endif +/* Outside the nursery, we are taking from the highest addresses + complete pages, one at a time, which uniformly contain objects + of size "8 * N" for any "2 <= N < GC_N_SMALL_REQUESTS". We are + taking from the lowest addresses large objects, which are + guaranteed to be at least 256 bytes long (actually 288), + allocated by largemalloc.c. +*/ + +#define GC_N_SMALL_REQUESTS 36 +#define GC_MEDIUM_REQUEST (GC_N_SMALL_REQUESTS * 8) + + static void setup_gcpage(void) { /* NB. the very last page is not used, which allows a speed-up in @@ -11,21 +23,65 @@ uintptr_t length = (NB_PAGES - END_NURSERY_PAGE - 1) * 4096UL; largemalloc_init_arena(base, length); - uninitialized_page_start = (stm_char *)(END_NURSERY_PAGE * 4096UL); - uninitialized_page_stop = (stm_char *)((NB_PAGES - 1) * 4096UL); + uninitialized_page_start = stm_object_pages + END_NURSERY_PAGE * 4096UL; + uninitialized_page_stop = stm_object_pages + (NB_PAGES - 1) * 4096UL; + + assert(GC_MEDIUM_REQUEST >= (1 << 8)); +} + +static char *allocate_outside_nursery(uint64_t size) +{ + /* not thread-safe! Use only when holding the mutex */ + assert(_has_mutex()); + + OPT_ASSERT(size >= 16); + OPT_ASSERT((size & 7) == 0); + + uint64_t index = size / 8; + if (index < GC_N_SMALL_REQUESTS) { + assert(index >= 2); + // XXX! TEMPORARY! + return allocate_outside_nursery(GC_MEDIUM_REQUEST); + } + else { + /* The object is too large to fit inside the uniform pages. + Allocate it with largemalloc.c from the lower addresses */ + char *addr = large_malloc(size); + + if (addr + size > uninitialized_page_start) { + uintptr_t pagenum = + (uninitialized_page_start - stm_object_pages) / 4096UL; + uintptr_t pagecount = + (addr + size - uninitialized_page_start) / 4096UL + 20; + uintptr_t pagemax = + (uninitialized_page_stop - uninitialized_page_start) / 4096UL; + if (pagecount > pagemax) + pagecount = pagemax; + pages_initialize_shared(pagenum, pagecount); + + uninitialized_page_start += pagecount * 4096UL; + } + + assert(get_single_creation_marker( + (stm_char *)(addr - stm_object_pages)) == 0); + return addr; + } } object_t *_stm_allocate_old(ssize_t size_rounded_up) { - /* XXX not thread-safe! */ + /* XXX not thread-safe! and only for tests, don't use when a + transaction might be running! */ + assert(size_rounded_up >= 16); + assert((size_rounded_up & 7) == 0); + char *addr = large_malloc(size_rounded_up); - stm_char* o = (stm_char *)(addr - stm_object_pages); - if (o + size_rounded_up > uninitialized_page_start) { + if (addr + size_rounded_up > uninitialized_page_start) { uintptr_t pagenum = - ((uint64_t)uninitialized_page_start) / 4096UL; + (uninitialized_page_start - stm_object_pages) / 4096UL; uintptr_t pagecount = - (o + size_rounded_up - uninitialized_page_start) / 4096UL + 20; + (addr + size_rounded_up - uninitialized_page_start) / 4096UL + 20; uintptr_t pagemax = (uninitialized_page_stop - uninitialized_page_start) / 4096UL; if (pagecount > pagemax) @@ -37,6 +93,7 @@ memset(addr, 0, size_rounded_up); + stm_char* o = (stm_char *)(addr - stm_object_pages); if (CROSS_PAGE_BOUNDARY(o, o + size_rounded_up)) ((object_t *)o)->stm_flags = GCFLAG_CROSS_PAGE; diff --git a/c7/stm/gcpage.h b/c7/stm/gcpage.h --- a/c7/stm/gcpage.h +++ b/c7/stm/gcpage.h @@ -1,3 +1,5 @@ -static stm_char *uninitialized_page_start; -static stm_char *uninitialized_page_stop; +static char *uninitialized_page_start; /* within segment 0 */ +static char *uninitialized_page_stop; + +static char *allocate_outside_nursery(uint64_t size); diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -51,14 +51,46 @@ return (uintptr_t)obj < NURSERY_START + NURSERY_SIZE; } +static bool _is_young(object_t *obj) +{ + return _stm_in_nursery(obj); /* for now */ +} + /************************************************************/ static void minor_trace_if_young(object_t **pobj) { - //... + /* takes a normal pointer to a thread-local pointer to an object */ + object_t *obj = *pobj; + if (obj == NULL) + return; + if (!_is_young(obj)) + return; + + /* the location the object moved to is the second word in 'obj' */ + object_t *TLPREFIX *pforwarded_array = (object_t *TLPREFIX *)obj; + + if (UNLIKELY(obj->stm_flags & GCFLAG_MOVED)) { + *pobj = pforwarded_array[1]; /* already moved */ + return; + } + +#if 0 + /* move obj to somewhere else */ + size_t size = stmcb_size_rounded_up(stm_object_pages + (uintptr_t)*pobj); + bool is_small; + object_t *moved = stm_big_small_alloc_old(size, &is_small); + + memcpy((void*)real_address(moved), + (void*)real_address(*pobj), + size); +#endif + abort(); + + allocate_outside_nursery(-1); } static void minor_trace_roots(void) diff --git a/c7/stm/pages.c b/c7/stm/pages.c --- a/c7/stm/pages.c +++ b/c7/stm/pages.c @@ -94,12 +94,15 @@ memset(addr, newvalue, size >> 8); } +static uint8_t get_single_creation_marker(stm_char *p) +{ + uintptr_t cmaddr = ((uintptr_t)p) >> 8; + return ((stm_creation_marker_t *)cmaddr)->cm; +} + static void set_single_creation_marker(stm_char *p, int newvalue) { - assert((((uintptr_t)p) & 255) == 0); - uintptr_t cmaddr = ((uintptr_t)p) >> 8; - ((stm_creation_marker_t *)cmaddr)->cm = newvalue; LIST_APPEND(STM_PSEGMENT->creation_markers, cmaddr); } diff --git a/c7/stm/pages.h b/c7/stm/pages.h --- a/c7/stm/pages.h +++ b/c7/stm/pages.h @@ -30,5 +30,6 @@ } static void set_creation_markers(stm_char *p, uint64_t size, int newvalue); +static uint8_t get_single_creation_marker(stm_char *p); static void set_single_creation_marker(stm_char *p, int newvalue); static void reset_all_creation_markers(void); From noreply at buildbot.pypy.org Mon Feb 17 16:19:40 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 17 Feb 2014 16:19:40 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: Add a passing test Message-ID: <20140217151940.684251C01F0@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r759:c7605a116863 Date: 2014-02-17 16:19 +0100 http://bitbucket.org/pypy/stmgc/changeset/c7605a116863/ Log: Add a passing test diff --git a/c7/test/test_basic.py b/c7/test/test_basic.py --- a/c7/test/test_basic.py +++ b/c7/test/test_basic.py @@ -110,7 +110,7 @@ self.start_transaction() stm_write(lp) # privatize page p2 = stm_get_real_address(lp) - assert p1 != p2 + assert p1 == p2 # no collection occurred assert stm_get_char(lp) == 'u' self.commit_transaction() @@ -131,9 +131,9 @@ self.commit_transaction() lp2 = self.pop_root() lp = self.pop_root() - + self.switch(0) - + self.start_transaction() stm_write(lp) # privatize page assert stm_get_char(lp) == 'u' @@ -151,6 +151,29 @@ assert stm_get_char(lp2) == 'y' self.commit_transaction() + def test_commit_fresh_object3(self): + # make objects lpx; then privatize the page by committing changes + # to it; then create lpy in the same page. Check that lpy is + # visible from the other thread. + self.start_transaction() + lpx = stm_allocate(16) + stm_set_char(lpx, '.') + self.commit_transaction() + + self.start_transaction() + stm_set_char(lpx, 'X') + self.commit_transaction() + + self.start_transaction() + lpy = stm_allocate(16) + stm_set_char(lpy, 'y') + self.commit_transaction() + + self.switch(1) + self.start_transaction() + assert stm_get_char(lpx) == 'X' + assert stm_get_char(lpy) == 'y' + def test_simple_refs(self): self.start_transaction() lp = stm_allocate_refs(3) From noreply at buildbot.pypy.org Mon Feb 17 16:30:03 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 17 Feb 2014 16:30:03 +0100 (CET) Subject: [pypy-commit] pypy default: Kill these details from here, and mention "--jit help". Message-ID: <20140217153003.169181C033D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69183:32bd1e855f65 Date: 2014-02-17 16:29 +0100 http://bitbucket.org/pypy/pypy/changeset/32bd1e855f65/ Log: Kill these details from here, and mention "--jit help". diff --git a/pypy/doc/man/pypy.1.rst b/pypy/doc/man/pypy.1.rst --- a/pypy/doc/man/pypy.1.rst +++ b/pypy/doc/man/pypy.1.rst @@ -55,43 +55,8 @@ Print translation information about this PyPy executable. --jit *arg* - Low level JIT parameters. Format is - *arg*\ ``=``\ *value*\ [``,``\ *arg*\ ``=``\ *value*\ ...] - - ``off`` - Disable the JIT. - - ``threshold=``\ *value* - Number of times a loop has to run for it to become hot. - - ``function_threshold=``\ *value* - Number of times a function must run for it to become traced from - start. - - ``inlining=``\ *value* - Inline python functions or not (``1``/``0``). - - ``loop_longevity=``\ *value* - A parameter controlling how long loops will be kept before being - freed, an estimate. - - ``max_retrace_guards=``\ *value* - Number of extra guards a retrace can cause. - - ``retrace_limit=``\ *value* - How many times we can try retracing before giving up. - - ``trace_eagerness=``\ *value* - Number of times a guard has to fail before we start compiling a - bridge. - - ``trace_limit=``\ *value* - Number of recorded operations before we abort tracing with - ``ABORT_TRACE_TOO_LONG``. - - ``enable_opts=``\ *value* - Optimizations to enabled or ``all``. - Warning, this option is dangerous, and should be avoided. + Low level JIT parameters. Mostly internal. Run ``--jit help`` + for more information. ENVIRONMENT =========== @@ -144,7 +109,7 @@ Multiple prefixes can be specified, comma-separated. Only sections whose name match the prefix will be logged. - ``PYPYLOG``\ =\ ``jit-log-opt,jit-backend:``\ *logfile* will + ``PYPYLOG=jit-log-opt,jit-backend:logfile`` will generate a log suitable for *jitviewer*, a tool for debugging performance issues under PyPy. From noreply at buildbot.pypy.org Mon Feb 17 16:50:50 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 17 Feb 2014 16:50:50 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: Fix things until test_commit_fresh_objects3 fails, as it was supposed to. Message-ID: <20140217155050.8A42C1C3293@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r760:2e2461812c80 Date: 2014-02-17 16:50 +0100 http://bitbucket.org/pypy/stmgc/changeset/2e2461812c80/ Log: Fix things until test_commit_fresh_objects3 fails, as it was supposed to. diff --git a/c7/stm/setup.c b/c7/stm/setup.c --- a/c7/stm/setup.c +++ b/c7/stm/setup.c @@ -110,16 +110,25 @@ void stm_register_thread_local(stm_thread_local_t *tl) { + int num; if (stm_thread_locals == NULL) { stm_thread_locals = tl->next = tl->prev = tl; + num = 0; } else { tl->next = stm_thread_locals; tl->prev = stm_thread_locals->prev; stm_thread_locals->prev->next = tl; stm_thread_locals->prev = tl; + num = tl->prev->associated_segment_num + 1; } - tl->associated_segment_num = NB_SEGMENTS; + + /* assign numbers consecutively, but that's for tests; we could also + assign the same number to all of them and they would get their own + numbers automatically. */ + num = num % NB_SEGMENTS; + tl->associated_segment_num = num; + set_gs_register(get_segment_base(num)); } void stm_unregister_thread_local(stm_thread_local_t *tl) diff --git a/c7/stm/sync.c b/c7/stm/sync.c --- a/c7/stm/sync.c +++ b/c7/stm/sync.c @@ -22,7 +22,7 @@ pthread_mutex_t global_mutex; pthread_cond_t global_cond; /* some additional pieces of global state follow */ - uint8_t in_use[NB_SEGMENTS + 1]; /* 1 if running a pthread */ + uint8_t in_use[NB_SEGMENTS]; /* 1 if running a pthread */ uint64_t global_time; }; char reserved[128]; @@ -36,7 +36,6 @@ perror("mutex/cond initialization"); abort(); } - sync_ctl.in_use[NB_SEGMENTS] = 0xff; } static void teardown_sync(void) @@ -118,6 +117,11 @@ if (sync_ctl.in_use[num] == 0) { /* fast-path: we can get the same segment number than the one we had before. The value stored in GS is still valid. */ +#ifdef STM_TESTS + /* that can be optimized away, except during tests, because + they use only one thread */ + set_gs_register(get_segment_base(num)); +#endif goto got_num; } /* Look for the next free segment. If there is none, wait for @@ -138,6 +142,7 @@ got_num: sync_ctl.in_use[num] = 1; + assert(STM_SEGMENT->segment_num == num); assert(STM_SEGMENT->running_thread == NULL); STM_SEGMENT->running_thread = tl; STM_PSEGMENT->start_time = ++sync_ctl.global_time; @@ -162,10 +167,8 @@ bool _stm_in_transaction(stm_thread_local_t *tl) { int num = tl->associated_segment_num; - if (num < NB_SEGMENTS) - return get_segment(num)->running_thread == tl; - else - return false; + assert(num < NB_SEGMENTS); + return get_segment(num)->running_thread == tl; } void _stm_test_switch(stm_thread_local_t *tl) diff --git a/c7/test/test_basic.py b/c7/test/test_basic.py --- a/c7/test/test_basic.py +++ b/c7/test/test_basic.py @@ -110,7 +110,8 @@ self.start_transaction() stm_write(lp) # privatize page p2 = stm_get_real_address(lp) - assert p1 == p2 # no collection occurred + assert p1 != p2 # we see the other segment, but same object + assert (p2 - p1) % 4096 == 0 assert stm_get_char(lp) == 'u' self.commit_transaction() @@ -151,12 +152,13 @@ assert stm_get_char(lp2) == 'y' self.commit_transaction() - def test_commit_fresh_object3(self): + def test_commit_fresh_objects3(self): # make objects lpx; then privatize the page by committing changes # to it; then create lpy in the same page. Check that lpy is # visible from the other thread. self.start_transaction() lpx = stm_allocate(16) + print lpx stm_set_char(lpx, '.') self.commit_transaction() @@ -166,7 +168,9 @@ self.start_transaction() lpy = stm_allocate(16) + print lpy stm_set_char(lpy, 'y') + print "LAST COMMIT" self.commit_transaction() self.switch(1) From noreply at buildbot.pypy.org Mon Feb 17 17:21:19 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 17 Feb 2014 17:21:19 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: Introduce GCFLAG_SMALL_UNIFORM to replace GCFLAG_CROSS_PAGE, which Message-ID: <20140217162119.BF3971C01F0@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r761:1f0ca1e7fe06 Date: 2014-02-17 17:21 +0100 http://bitbucket.org/pypy/stmgc/changeset/1f0ca1e7fe06/ Log: Introduce GCFLAG_SMALL_UNIFORM to replace GCFLAG_CROSS_PAGE, which cannot work because even small nursery objects may cross two pages. diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -28,13 +28,25 @@ /* otherwise, we need to privatize the pages containing the object, if they are still SHARED_PAGE. The common case is that there is only one page in total. */ - if (UNLIKELY((obj->stm_flags & GCFLAG_CROSS_PAGE) != 0)) { - abort(); - //... + size_t size = 0; + uintptr_t first_page = ((uintptr_t)obj) / 4096UL; + uintptr_t page_count = 1; + + /* If the object is in the uniform pages of small objects (outside the + nursery), then it fits into one page. Otherwise, we need to compute + it based on its location and size. */ + if ((obj->stm_flags & GCFLAG_SMALL_UNIFORM) == 0) { + + /* get the size of the object */ + size = stmcb_size_rounded_up( + (struct object_s *)REAL_ADDRESS(STM_SEGMENT->segment_base, obj)); + + /* that's the page *following* the last page with the object */ + uintptr_t end_page = (((uintptr_t)obj) + size + 4095) / 4096UL; + + page_count = end_page - first_page; } - else { - pages_privatize(((uintptr_t)obj) / 4096UL, 1); - } + pages_privatize(first_page, page_count); /* do a read-barrier *before* the safepoints that may be issued in contention_management() */ diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -35,9 +35,8 @@ _stm_write_slowpath() is called, and then the flag is set to say "called once already, no need to call again". */ GCFLAG_WRITE_BARRIER_CALLED = _STM_GCFLAG_WRITE_BARRIER_CALLED, - /* objects that are allocated crossing a page boundary have this - flag set */ - GCFLAG_CROSS_PAGE = 0x02, + /* allocated by gcpage.c in uniformly-sized pages of small objects */ + GCFLAG_SMALL_UNIFORM = 0x02, /* only used during collections to mark an obj as moved out of the generation it was in */ GCFLAG_MOVED = 0x04, diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -94,8 +94,5 @@ memset(addr, 0, size_rounded_up); stm_char* o = (stm_char *)(addr - stm_object_pages); - if (CROSS_PAGE_BOUNDARY(o, o + size_rounded_up)) - ((object_t *)o)->stm_flags = GCFLAG_CROSS_PAGE; - return (object_t *)o; } From noreply at buildbot.pypy.org Mon Feb 17 17:26:33 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 17 Feb 2014 17:26:33 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: Clarifications Message-ID: <20140217162633.021281C033D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r762:8d5e8b47d322 Date: 2014-02-17 17:26 +0100 http://bitbucket.org/pypy/stmgc/changeset/8d5e8b47d322/ Log: Clarifications diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -25,10 +25,11 @@ return; } + /* otherwise, we need to privatize the pages containing the object, if they are still SHARED_PAGE. The common case is that there is only one page in total. */ - size_t size = 0; + size_t obj_size = 0; uintptr_t first_page = ((uintptr_t)obj) / 4096UL; uintptr_t page_count = 1; @@ -38,39 +39,39 @@ if ((obj->stm_flags & GCFLAG_SMALL_UNIFORM) == 0) { /* get the size of the object */ - size = stmcb_size_rounded_up( + obj_size = stmcb_size_rounded_up( (struct object_s *)REAL_ADDRESS(STM_SEGMENT->segment_base, obj)); /* that's the page *following* the last page with the object */ - uintptr_t end_page = (((uintptr_t)obj) + size + 4095) / 4096UL; + uintptr_t end_page = (((uintptr_t)obj) + obj_size + 4095) / 4096UL; page_count = end_page - first_page; } pages_privatize(first_page, page_count); + /* do a read-barrier *before* the safepoints that may be issued in contention_management() */ stm_read(obj); /* claim the write-lock for this object */ - do { - uintptr_t lock_idx = (((uintptr_t)obj) >> 4) - READMARKER_START; - uint8_t lock_num = STM_PSEGMENT->write_lock_num; - uint8_t prev_owner; - prev_owner = __sync_val_compare_and_swap(&write_locks[lock_idx], - 0, lock_num); + retry:; + uintptr_t lock_idx = (((uintptr_t)obj) >> 4) - READMARKER_START; + uint8_t lock_num = STM_PSEGMENT->write_lock_num; + uint8_t prev_owner; + prev_owner = __sync_val_compare_and_swap(&write_locks[lock_idx], + 0, lock_num); - /* if there was no lock-holder, we are done */ - if (LIKELY(prev_owner == 0)) - break; - + /* if there was no lock-holder, we are done; otherwise... */ + if (UNLIKELY(prev_owner != 0)) { /* otherwise, call the contention manager, and then possibly retry. By construction it should not be possible that the owner of the object is already us */ mutex_lock(); contention_management(prev_owner - 1, true); mutex_unlock(); - } while (1); + goto retry; + } /* add the write-barrier-already-called flag ONLY if we succeeded in getting the write-lock */ From noreply at buildbot.pypy.org Mon Feb 17 17:36:14 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 17 Feb 2014 17:36:14 +0100 (CET) Subject: [pypy-commit] pypy default: Try to improve formatting Message-ID: <20140217163614.4F2D21C10B8@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69184:9e295f159914 Date: 2014-02-17 17:35 +0100 http://bitbucket.org/pypy/pypy/changeset/9e295f159914/ Log: Try to improve formatting diff --git a/pypy/doc/man/pypy.1.rst b/pypy/doc/man/pypy.1.rst --- a/pypy/doc/man/pypy.1.rst +++ b/pypy/doc/man/pypy.1.rst @@ -19,10 +19,10 @@ Skip assert statements. -OO - Remove docstrings when importing modules in addition to -O. + Remove docstrings when importing modules in addition to ``-O``. --c *cmd* - Program passed in as CMD (terminates option list). +-c CMD + Program passed in as ``CMD`` (terminates option list). -S Do not ``import site`` on initialization. @@ -36,10 +36,10 @@ -h, --help Show a help message and exit. --m *mod* +-m MOD Library module to be run as a script (terminates option list). --W *arg* +-W ARG Warning control (*arg* is *action*:*message*:*category*:*module*:*lineno*). -E @@ -54,7 +54,7 @@ --info Print translation information about this PyPy executable. ---jit *arg* +--jit ARG Low level JIT parameters. Mostly internal. Run ``--jit help`` for more information. From noreply at buildbot.pypy.org Mon Feb 17 19:19:58 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 17 Feb 2014 19:19:58 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: Fix the bug pointed out by test_commit_fresh_objects3 Message-ID: <20140217181958.904571C0906@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r763:e72de902728f Date: 2014-02-17 19:19 +0100 http://bitbucket.org/pypy/stmgc/changeset/e72de902728f/ Log: Fix the bug pointed out by test_commit_fresh_objects3 diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -31,13 +31,14 @@ only one page in total. */ size_t obj_size = 0; uintptr_t first_page = ((uintptr_t)obj) / 4096UL; - uintptr_t page_count = 1; /* If the object is in the uniform pages of small objects (outside the nursery), then it fits into one page. Otherwise, we need to compute it based on its location and size. */ - if ((obj->stm_flags & GCFLAG_SMALL_UNIFORM) == 0) { - + if ((obj->stm_flags & GCFLAG_SMALL_UNIFORM) != 0) { + pages_privatize(first_page, 1); + } + else { /* get the size of the object */ obj_size = stmcb_size_rounded_up( (struct object_s *)REAL_ADDRESS(STM_SEGMENT->segment_base, obj)); @@ -45,9 +46,8 @@ /* that's the page *following* the last page with the object */ uintptr_t end_page = (((uintptr_t)obj) + obj_size + 4095) / 4096UL; - page_count = end_page - first_page; + pages_privatize(first_page, end_page - first_page); } - pages_privatize(first_page, page_count); /* do a read-barrier *before* the safepoints that may be issued in @@ -246,12 +246,16 @@ /* copy modified object versions to other threads */ push_modified_to_other_segments(); + /* reset the creation markers, and if necessary (i.e. if the page the + data is on is not SHARED) copy the data to other threads. The + hope is that it's rarely necessary. */ + reset_all_creation_markers_and_push_created_data(); + /* done */ stm_thread_local_t *tl = STM_SEGMENT->running_thread; release_thread_segment(tl); STM_PSEGMENT->safe_point = SP_NO_TRANSACTION; STM_PSEGMENT->transaction_state = TS_NONE; - reset_all_creation_markers(); mutex_unlock(); } diff --git a/c7/stm/pagecopy.c b/c7/stm/pagecopy.c --- a/c7/stm/pagecopy.c +++ b/c7/stm/pagecopy.c @@ -1,30 +1,39 @@ + +#define PAGECOPY_128(dest, src) \ + asm volatile("movdqa (%0), %%xmm0\n" \ + "movdqa 16(%0), %%xmm1\n" \ + "movdqa 32(%0), %%xmm2\n" \ + "movdqa 48(%0), %%xmm3\n" \ + "movdqa %%xmm0, (%1)\n" \ + "movdqa %%xmm1, 16(%1)\n" \ + "movdqa %%xmm2, 32(%1)\n" \ + "movdqa %%xmm3, 48(%1)\n" \ + "movdqa 64(%0), %%xmm0\n" \ + "movdqa 80(%0), %%xmm1\n" \ + "movdqa 96(%0), %%xmm2\n" \ + "movdqa 112(%0), %%xmm3\n" \ + "movdqa %%xmm0, 64(%1)\n" \ + "movdqa %%xmm1, 80(%1)\n" \ + "movdqa %%xmm2, 96(%1)\n" \ + "movdqa %%xmm3, 112(%1)\n" \ + : \ + : "r"(src), "r"(dest) \ + : "xmm0", "xmm1", "xmm2", "xmm3", "memory") static void pagecopy(void *dest, const void *src) { unsigned long i; - for (i=0; i<4096/128; i++) { - asm volatile("movdqa (%0), %%xmm0\n" - "movdqa 16(%0), %%xmm1\n" - "movdqa 32(%0), %%xmm2\n" - "movdqa 48(%0), %%xmm3\n" - "movdqa %%xmm0, (%1)\n" - "movdqa %%xmm1, 16(%1)\n" - "movdqa %%xmm2, 32(%1)\n" - "movdqa %%xmm3, 48(%1)\n" - "movdqa 64(%0), %%xmm0\n" - "movdqa 80(%0), %%xmm1\n" - "movdqa 96(%0), %%xmm2\n" - "movdqa 112(%0), %%xmm3\n" - "movdqa %%xmm0, 64(%1)\n" - "movdqa %%xmm1, 80(%1)\n" - "movdqa %%xmm2, 96(%1)\n" - "movdqa %%xmm3, 112(%1)\n" - : - : "r"(src + 128*i), "r"(dest + 128*i) - : "xmm0", "xmm1", "xmm2", "xmm3", "memory"); + for (i = 0; i < 4096 / 128; i++) { + PAGECOPY_128(dest + 128*i, src + 128*i); } } +static void pagecopy_256(void *dest, const void *src) +{ + PAGECOPY_128(dest, src ); + PAGECOPY_128(dest + 128, src + 128); +} + #if 0 /* XXX enable if detected on the cpu */ static void pagecopy_ymm8(void *dest, const void *src) { diff --git a/c7/stm/pagecopy.h b/c7/stm/pagecopy.h --- a/c7/stm/pagecopy.h +++ b/c7/stm/pagecopy.h @@ -1,2 +1,3 @@ -static void pagecopy(void *dest, const void *src); +static void pagecopy(void *dest, const void *src); // 4096 bytes +static void pagecopy_256(void *dest, const void *src); // 256 bytes diff --git a/c7/stm/pages.c b/c7/stm/pages.c --- a/c7/stm/pages.c +++ b/c7/stm/pages.c @@ -125,3 +125,51 @@ list_clear(STM_PSEGMENT->creation_markers); } + +static void reset_all_creation_markers_and_push_created_data(void) +{ + /* This is like reset_all_creation_markers(), but additionally + it looks for markers in non-SHARED pages, and pushes the + corresponding data (in 256-bytes blocks) to other threads. + */ +#if NB_SEGMENTS != 2 +# error "The logic in this function only works with two segments" +#endif + + char *local_base = STM_SEGMENT->segment_base; + long remote_num = 1 - STM_SEGMENT->segment_num; + char *remote_base = get_segment_base(remote_num); + + /* this logic assumes that creation markers are in 256-bytes blocks, + and pages are 4096 bytes, so creation markers are handled by groups + of 16 --- which is two 8-bytes uint64_t. */ + + LIST_FOREACH_R( + STM_PSEGMENT->creation_markers, + uintptr_t /*item*/, + ({ + TLPREFIX uint64_t *p = (TLPREFIX uint64_t *)(item & ~15); + while (p[0] != 0 || p[1] != 0) { + + uint64_t pagenum = ((uint64_t)p) >> 4; + if (flag_page_private[pagenum] != SHARED_PAGE) { + /* copying needed */ + uint64_t dataofs = ((uint64_t)p) << 8; + stm_char *start = (stm_char *)p; + stm_char *stop = start + 16; + while (start < stop) { + if (*start++ != 0) { + pagecopy_256(remote_base + dataofs, + local_base + dataofs); + } + dataofs += 256; + } + } + p[0] = 0; + p[1] = 0; + p += 2; + } + })); + + list_clear(STM_PSEGMENT->creation_markers); +} diff --git a/c7/stm/pages.h b/c7/stm/pages.h --- a/c7/stm/pages.h +++ b/c7/stm/pages.h @@ -33,3 +33,4 @@ static uint8_t get_single_creation_marker(stm_char *p); static void set_single_creation_marker(stm_char *p, int newvalue); static void reset_all_creation_markers(void); +static void reset_all_creation_markers_and_push_created_data(void); From noreply at buildbot.pypy.org Mon Feb 17 23:15:50 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Mon, 17 Feb 2014 23:15:50 +0100 (CET) Subject: [pypy-commit] pypy remove-intlong-smm: try disabling builtinshortcut, which likely slows down multimethod'd types Message-ID: <20140217221550.683B61C0906@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: remove-intlong-smm Changeset: r69185:f2dd9a6e6a15 Date: 2014-02-17 14:14 -0800 http://bitbucket.org/pypy/pypy/changeset/f2dd9a6e6a15/ Log: try disabling builtinshortcut, which likely slows down multimethod'd types (mostly just float/complex and maybe marshal_w) but could help everything else. in particular the is_true builtinshortcut, which is no longer a shortcut now that bool doesn't use multimethods diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -301,7 +301,7 @@ config.objspace.std.suggest(withrangelist=True) config.objspace.std.suggest(withmethodcache=True) config.objspace.std.suggest(withprebuiltchar=True) - config.objspace.std.suggest(builtinshortcut=True) + #config.objspace.std.suggest(builtinshortcut=True) config.objspace.std.suggest(optimized_list_getitem=True) config.objspace.std.suggest(getattributeshortcut=True) #config.objspace.std.suggest(newshortcut=True) From noreply at buildbot.pypy.org Mon Feb 17 23:58:54 2014 From: noreply at buildbot.pypy.org (squeaky) Date: Mon, 17 Feb 2014 23:58:54 +0100 (CET) Subject: [pypy-commit] pypy bounds-int-add-or: create branch Message-ID: <20140217225854.5C9FC1C10B8@cobra.cs.uni-duesseldorf.de> Author: Squeaky Branch: bounds-int-add-or Changeset: r69186:6835ecfc3a5b Date: 2014-02-17 15:49 +0100 http://bitbucket.org/pypy/pypy/changeset/6835ecfc3a5b/ Log: create branch From noreply at buildbot.pypy.org Mon Feb 17 23:58:55 2014 From: noreply at buildbot.pypy.org (squeaky) Date: Mon, 17 Feb 2014 23:58:55 +0100 (CET) Subject: [pypy-commit] pypy bounds-int-add-or: failing and passing test Message-ID: <20140217225855.9B9251C10B8@cobra.cs.uni-duesseldorf.de> Author: Squeaky Branch: bounds-int-add-or Changeset: r69187:17814d1be480 Date: 2014-02-17 13:03 +0100 http://bitbucket.org/pypy/pypy/changeset/17814d1be480/ Log: failing and passing test diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -5326,6 +5326,38 @@ """ self.optimize_loop(ops, ops) + def test_int_and_cmp_above_bounds(self): + ops = """ + [p0] + i0 = getarrayitem_gc(p0, 0, descr=chararraydescr) + i1 = getarrayitem_gc(p0, 1, descr=chararraydescr) + i2 = int_and(i0, i1) + i3 = int_le(i2, 255) + guard_true(i3) [] + jump(i2) + """ + + expected = """ + [p0] + i0 = getarrayitem_gc(p0, 0, descr=chararraydescr) + i1 = getarrayitem_gc(p0, 1, descr=chararraydescr) + i2 = int_and(i0, i1) + jump(i2) + """ + self.optimize_loop(ops, expected) + + def test_int_and_cmp_below_bounds(self): + ops = """ + [p0] + i0 = getarrayitem_gc(p0, 0, descr=chararraydescr) + i1 = getarrayitem_gc(p0, 1, descr=chararraydescr) + i2 = int_and(i0, i1) + i3 = int_le(i2, 80) + guard_true(i3) [] + jump(i2) + """ + self.optimize_loop(ops, ops) + class TestLLtype(BaseTestOptimizeBasic, LLtypeMixin): pass From noreply at buildbot.pypy.org Mon Feb 17 23:58:56 2014 From: noreply at buildbot.pypy.org (squeaky) Date: Mon, 17 Feb 2014 23:58:56 +0100 (CET) Subject: [pypy-commit] pypy bounds-int-add-or: add and test next_power2 utility Message-ID: <20140217225856.C93431C10B8@cobra.cs.uni-duesseldorf.de> Author: Squeaky Branch: bounds-int-add-or Changeset: r69188:5eca705b5884 Date: 2014-02-17 14:51 +0100 http://bitbucket.org/pypy/pypy/changeset/5eca705b5884/ Log: add and test next_power2 utility diff --git a/rpython/jit/metainterp/optimizeopt/intbounds.py b/rpython/jit/metainterp/optimizeopt/intbounds.py --- a/rpython/jit/metainterp/optimizeopt/intbounds.py +++ b/rpython/jit/metainterp/optimizeopt/intbounds.py @@ -23,6 +23,17 @@ return (1 << ((byte_size << 3) - 1)) - 1 +def next_power2(val): + """Calculate next power of 2 greater than val. + + Danger: this can overflow, use only when val is sufficiently + lower than symbolic.WORD""" + power = 1 + while power < val + 1: + power <<= 1 + return power + + class OptIntBounds(Optimization): """Keeps track of the bounds placed on integers by guards and remove redundant guards""" @@ -82,6 +93,8 @@ val = v1.box.getint() if val >= 0: r.intbound.intersect(IntBound(0, val)) + elif v1.intbound.lower >= 0 and v2.intbound.lower >= 0: + pass def optimize_INT_SUB(self, op): v1 = self.getvalue(op.getarg(0)) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_intbounds.py b/rpython/jit/metainterp/optimizeopt/test/test_intbounds.py new file mode 100644 --- /dev/null +++ b/rpython/jit/metainterp/optimizeopt/test/test_intbounds.py @@ -0,0 +1,10 @@ +from rpython.jit.metainterp.optimizeopt.intbounds import next_power2 + + +def test_next_power2(): + assert next_power2(0) == 1 + assert next_power2(1) == 2 + assert next_power2(7) == 8 + assert next_power2(256) == 512 + assert next_power2(255) == 256 + assert next_power2(80) == 128 From noreply at buildbot.pypy.org Mon Feb 17 23:58:58 2014 From: noreply at buildbot.pypy.org (squeaky) Date: Mon, 17 Feb 2014 23:58:58 +0100 (CET) Subject: [pypy-commit] pypy bounds-int-add-or: tweak test to bounding value Message-ID: <20140217225858.06E771C10B8@cobra.cs.uni-duesseldorf.de> Author: Squeaky Branch: bounds-int-add-or Changeset: r69189:fc047124598c Date: 2014-02-17 15:31 +0100 http://bitbucket.org/pypy/pypy/changeset/fc047124598c/ Log: tweak test to bounding value diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -5352,7 +5352,7 @@ i0 = getarrayitem_gc(p0, 0, descr=chararraydescr) i1 = getarrayitem_gc(p0, 1, descr=chararraydescr) i2 = int_and(i0, i1) - i3 = int_le(i2, 80) + i3 = int_lt(i2, 255) guard_true(i3) [] jump(i2) """ From noreply at buildbot.pypy.org Mon Feb 17 23:58:59 2014 From: noreply at buildbot.pypy.org (squeaky) Date: Mon, 17 Feb 2014 23:58:59 +0100 (CET) Subject: [pypy-commit] pypy bounds-int-add-or: add bounds propagation from INT_AND Message-ID: <20140217225859.283B81C10B8@cobra.cs.uni-duesseldorf.de> Author: Squeaky Branch: bounds-int-add-or Changeset: r69190:41581d8d194b Date: 2014-02-17 15:24 +0100 http://bitbucket.org/pypy/pypy/changeset/41581d8d194b/ Log: add bounds propagation from INT_AND diff --git a/rpython/jit/metainterp/optimizeopt/intbounds.py b/rpython/jit/metainterp/optimizeopt/intbounds.py --- a/rpython/jit/metainterp/optimizeopt/intbounds.py +++ b/rpython/jit/metainterp/optimizeopt/intbounds.py @@ -7,6 +7,7 @@ CONST_0, MODE_ARRAY, MODE_STR, MODE_UNICODE) from rpython.jit.metainterp.optimizeopt.util import make_dispatcher_method from rpython.jit.metainterp.resoperation import rop +from rpython.jit.backend.llsupport import symbolic def get_integer_min(is_unsigned, byte_size): @@ -94,7 +95,10 @@ if val >= 0: r.intbound.intersect(IntBound(0, val)) elif v1.intbound.lower >= 0 and v2.intbound.lower >= 0: - pass + lesser = min(v1.intbound.upper, v2.intbound.upper) + # check if next_power2 won't overflow + if lesser < (1 << ((symbolic.WORD - 1) << 3)): + r.intbound.intersect(IntBound(0, next_power2(lesser) - 1)) def optimize_INT_SUB(self, op): v1 = self.getvalue(op.getarg(0)) From noreply at buildbot.pypy.org Mon Feb 17 23:59:00 2014 From: noreply at buildbot.pypy.org (squeaky) Date: Mon, 17 Feb 2014 23:59:00 +0100 (CET) Subject: [pypy-commit] pypy bounds-int-add-or: failing and passing test for INT_OR Message-ID: <20140217225900.5B8FA1C10B8@cobra.cs.uni-duesseldorf.de> Author: Squeaky Branch: bounds-int-add-or Changeset: r69191:45cad34a2d76 Date: 2014-02-17 16:30 +0100 http://bitbucket.org/pypy/pypy/changeset/45cad34a2d76/ Log: failing and passing test for INT_OR diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -5358,6 +5358,39 @@ """ self.optimize_loop(ops, ops) + def test_int_or_cmp_above_bounds(self): + ops = """ + [p0] + i0 = getarrayitem_gc(p0, 0, descr=chararraydescr) + i1 = getarrayitem_gc(p0, 1, descr=chararraydescr) + i2 = int_or(i0, i1) + i3 = int_le(i2, 255) + guard_true(i3) [] + jump(i2) + """ + + expected = """ + [p0] + i0 = getarrayitem_gc(p0, 0, descr=chararraydescr) + i1 = getarrayitem_gc(p0, 1, descr=chararraydescr) + i2 = int_or(i0, i1) + jump(i2) + """ + self.optimize_loop(ops, expected) + + def test_int_or_cmp_below_bounds(self): + ops = """ + [p0] + i0 = getarrayitem_gc(p0, 0, descr=chararraydescr) + i1 = getarrayitem_gc(p0, 1, descr=chararraydescr) + i2 = int_or(i0, i1) + i3 = int_lt(i2, 255) + guard_true(i3) [] + jump(i2) + """ + self.optimize_loop(ops, ops) + + class TestLLtype(BaseTestOptimizeBasic, LLtypeMixin): pass From noreply at buildbot.pypy.org Mon Feb 17 23:59:01 2014 From: noreply at buildbot.pypy.org (squeaky) Date: Mon, 17 Feb 2014 23:59:01 +0100 (CET) Subject: [pypy-commit] pypy bounds-int-add-or: implement optimization and fix the test Message-ID: <20140217225901.718AA1C10B8@cobra.cs.uni-duesseldorf.de> Author: Squeaky Branch: bounds-int-add-or Changeset: r69192:0e8fc6825162 Date: 2014-02-17 16:55 +0100 http://bitbucket.org/pypy/pypy/changeset/0e8fc6825162/ Log: implement optimization and fix the test diff --git a/rpython/jit/metainterp/optimizeopt/intbounds.py b/rpython/jit/metainterp/optimizeopt/intbounds.py --- a/rpython/jit/metainterp/optimizeopt/intbounds.py +++ b/rpython/jit/metainterp/optimizeopt/intbounds.py @@ -68,6 +68,19 @@ optimize_GUARD_FALSE = optimize_GUARD_TRUE optimize_GUARD_VALUE = optimize_GUARD_TRUE + def optimize_INT_OR(self, op): + v1 = self.getvalue(op.getarg(0)) + v2 = self.getvalue(op.getarg(1)) + self.emit_operation(op) + r = self.getvalue(op.result) + + if v1.intbound.lower >= 0 and v2.intbound.lower >= 0: + mostsignificant = v1.intbound.upper | v2.intbound.upper + # check if next_power2 won't overflow + if mostsignificant < (1 << ((symbolic.WORD - 1) << 3)): + r.intbound.intersect( + IntBound(0, next_power2(mostsignificant) - 1)) + def optimize_INT_XOR(self, op): v1 = self.getvalue(op.getarg(0)) v2 = self.getvalue(op.getarg(1)) From noreply at buildbot.pypy.org Mon Feb 17 23:59:02 2014 From: noreply at buildbot.pypy.org (squeaky) Date: Mon, 17 Feb 2014 23:59:02 +0100 (CET) Subject: [pypy-commit] pypy bounds-int-add-or: test for int_xor and fix Message-ID: <20140217225902.946FB1C10B8@cobra.cs.uni-duesseldorf.de> Author: Squeaky Branch: bounds-int-add-or Changeset: r69193:531ef2c5d656 Date: 2014-02-17 17:34 +0100 http://bitbucket.org/pypy/pypy/changeset/531ef2c5d656/ Log: test for int_xor and fix diff --git a/rpython/jit/metainterp/optimizeopt/intbounds.py b/rpython/jit/metainterp/optimizeopt/intbounds.py --- a/rpython/jit/metainterp/optimizeopt/intbounds.py +++ b/rpython/jit/metainterp/optimizeopt/intbounds.py @@ -92,6 +92,12 @@ v2.intbound.known_ge(IntBound(0, 0)): r = self.getvalue(op.result) r.intbound.make_ge(IntLowerBound(0)) + if v1.intbound.lower >= 0 and v2.intbound.lower >= 0: + lesser = min(v1.intbound.upper, v2.intbound.upper) + # check if next_power2 won't overflow + if lesser < (1 << ((symbolic.WORD - 1) << 3)): + r.intbound.intersect(IntBound(0, next_power2(lesser) - 1)) + def optimize_INT_AND(self, op): v1 = self.getvalue(op.getarg(0)) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -5390,6 +5390,38 @@ """ self.optimize_loop(ops, ops) + def test_int_xor_cmp_above_bounds(self): + ops = """ + [p0] + i0 = getarrayitem_gc(p0, 0, descr=chararraydescr) + i1 = getarrayitem_gc(p0, 1, descr=chararraydescr) + i2 = int_xor(i0, i1) + i3 = int_le(i2, 255) + guard_true(i3) [] + jump(i2) + """ + + expected = """ + [p0] + i0 = getarrayitem_gc(p0, 0, descr=chararraydescr) + i1 = getarrayitem_gc(p0, 1, descr=chararraydescr) + i2 = int_xor(i0, i1) + jump(i2) + """ + self.optimize_loop(ops, expected) + + def test_int_xor_cmp_below_bounds(self): + ops = """ + [p0] + i0 = getarrayitem_gc(p0, 0, descr=chararraydescr) + i1 = getarrayitem_gc(p0, 1, descr=chararraydescr) + i2 = int_xor(i0, i1) + i3 = int_lt(i2, 255) + guard_true(i3) [] + jump(i2) + """ + self.optimize_loop(ops, ops) + class TestLLtype(BaseTestOptimizeBasic, LLtypeMixin): From noreply at buildbot.pypy.org Mon Feb 17 23:59:03 2014 From: noreply at buildbot.pypy.org (squeaky) Date: Mon, 17 Feb 2014 23:59:03 +0100 (CET) Subject: [pypy-commit] pypy bounds-int-add-or: stylistic tweaks Message-ID: <20140217225903.B01BD1C10B8@cobra.cs.uni-duesseldorf.de> Author: Squeaky Branch: bounds-int-add-or Changeset: r69194:f4da31595578 Date: 2014-02-17 18:01 +0100 http://bitbucket.org/pypy/pypy/changeset/f4da31595578/ Log: stylistic tweaks diff --git a/rpython/jit/metainterp/optimizeopt/intbounds.py b/rpython/jit/metainterp/optimizeopt/intbounds.py --- a/rpython/jit/metainterp/optimizeopt/intbounds.py +++ b/rpython/jit/metainterp/optimizeopt/intbounds.py @@ -74,7 +74,8 @@ self.emit_operation(op) r = self.getvalue(op.result) - if v1.intbound.lower >= 0 and v2.intbound.lower >= 0: + if v1.intbound.known_ge(IntBound(0, 0)) and \ + v2.intbound.known_ge(IntBound(0, 0)): mostsignificant = v1.intbound.upper | v2.intbound.upper # check if next_power2 won't overflow if mostsignificant < (1 << ((symbolic.WORD - 1) << 3)): @@ -92,11 +93,11 @@ v2.intbound.known_ge(IntBound(0, 0)): r = self.getvalue(op.result) r.intbound.make_ge(IntLowerBound(0)) - if v1.intbound.lower >= 0 and v2.intbound.lower >= 0: - lesser = min(v1.intbound.upper, v2.intbound.upper) + + mostsignificant = v1.intbound.upper | v2.intbound.upper # check if next_power2 won't overflow - if lesser < (1 << ((symbolic.WORD - 1) << 3)): - r.intbound.intersect(IntBound(0, next_power2(lesser) - 1)) + if mostsignificant < (1 << ((symbolic.WORD - 1) << 3)): + r.intbound.make_lt(IntUpperBound(next_power2(mostsignificant))) def optimize_INT_AND(self, op): @@ -113,7 +114,8 @@ val = v1.box.getint() if val >= 0: r.intbound.intersect(IntBound(0, val)) - elif v1.intbound.lower >= 0 and v2.intbound.lower >= 0: + elif v1.intbound.known_ge(IntBound(0, 0)) and \ + v2.intbound.known_ge(IntBound(0, 0)): lesser = min(v1.intbound.upper, v2.intbound.upper) # check if next_power2 won't overflow if lesser < (1 << ((symbolic.WORD - 1) << 3)): From noreply at buildbot.pypy.org Mon Feb 17 23:59:04 2014 From: noreply at buildbot.pypy.org (squeaky) Date: Mon, 17 Feb 2014 23:59:04 +0100 (CET) Subject: [pypy-commit] pypy bounds-int-add-or: improve tests Message-ID: <20140217225904.CC8AD1C10B8@cobra.cs.uni-duesseldorf.de> Author: Squeaky Branch: bounds-int-add-or Changeset: r69195:cd8ee0a2d7af Date: 2014-02-17 18:50 +0100 http://bitbucket.org/pypy/pypy/changeset/cd8ee0a2d7af/ Log: improve tests diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -5328,9 +5328,9 @@ def test_int_and_cmp_above_bounds(self): ops = """ - [p0] + [p0,p1] i0 = getarrayitem_gc(p0, 0, descr=chararraydescr) - i1 = getarrayitem_gc(p0, 1, descr=chararraydescr) + i1 = getarrayitem_gc(p1, 0, descr=u2arraydescr) i2 = int_and(i0, i1) i3 = int_le(i2, 255) guard_true(i3) [] @@ -5338,9 +5338,9 @@ """ expected = """ - [p0] + [p0,p1] i0 = getarrayitem_gc(p0, 0, descr=chararraydescr) - i1 = getarrayitem_gc(p0, 1, descr=chararraydescr) + i1 = getarrayitem_gc(p1, 0, descr=u2arraydescr) i2 = int_and(i0, i1) jump(i2) """ @@ -5348,9 +5348,9 @@ def test_int_and_cmp_below_bounds(self): ops = """ - [p0] + [p0,p1] i0 = getarrayitem_gc(p0, 0, descr=chararraydescr) - i1 = getarrayitem_gc(p0, 1, descr=chararraydescr) + i1 = getarrayitem_gc(p1, 0, descr=u2arraydescr) i2 = int_and(i0, i1) i3 = int_lt(i2, 255) guard_true(i3) [] @@ -5360,19 +5360,19 @@ def test_int_or_cmp_above_bounds(self): ops = """ - [p0] + [p0,p1] i0 = getarrayitem_gc(p0, 0, descr=chararraydescr) - i1 = getarrayitem_gc(p0, 1, descr=chararraydescr) + i1 = getarrayitem_gc(p1, 0, descr=u2arraydescr) i2 = int_or(i0, i1) - i3 = int_le(i2, 255) + i3 = int_le(i2, 65535) guard_true(i3) [] jump(i2) """ expected = """ - [p0] + [p0,p1] i0 = getarrayitem_gc(p0, 0, descr=chararraydescr) - i1 = getarrayitem_gc(p0, 1, descr=chararraydescr) + i1 = getarrayitem_gc(p1, 0, descr=u2arraydescr) i2 = int_or(i0, i1) jump(i2) """ @@ -5380,11 +5380,11 @@ def test_int_or_cmp_below_bounds(self): ops = """ - [p0] + [p0,p1] i0 = getarrayitem_gc(p0, 0, descr=chararraydescr) - i1 = getarrayitem_gc(p0, 1, descr=chararraydescr) + i1 = getarrayitem_gc(p1, 0, descr=u2arraydescr) i2 = int_or(i0, i1) - i3 = int_lt(i2, 255) + i3 = int_lt(i2, 65535) guard_true(i3) [] jump(i2) """ @@ -5392,19 +5392,19 @@ def test_int_xor_cmp_above_bounds(self): ops = """ - [p0] + [p0,p1] i0 = getarrayitem_gc(p0, 0, descr=chararraydescr) - i1 = getarrayitem_gc(p0, 1, descr=chararraydescr) + i1 = getarrayitem_gc(p1, 0, descr=u2arraydescr) i2 = int_xor(i0, i1) - i3 = int_le(i2, 255) + i3 = int_le(i2, 65535) guard_true(i3) [] jump(i2) """ expected = """ - [p0] + [p0,p1] i0 = getarrayitem_gc(p0, 0, descr=chararraydescr) - i1 = getarrayitem_gc(p0, 1, descr=chararraydescr) + i1 = getarrayitem_gc(p1, 0, descr=u2arraydescr) i2 = int_xor(i0, i1) jump(i2) """ @@ -5412,17 +5412,16 @@ def test_int_xor_cmp_below_bounds(self): ops = """ - [p0] + [p0,p1] i0 = getarrayitem_gc(p0, 0, descr=chararraydescr) - i1 = getarrayitem_gc(p0, 1, descr=chararraydescr) + i1 = getarrayitem_gc(p1, 0, descr=u2arraydescr) i2 = int_xor(i0, i1) - i3 = int_lt(i2, 255) + i3 = int_lt(i2, 65535) guard_true(i3) [] jump(i2) """ self.optimize_loop(ops, ops) - class TestLLtype(BaseTestOptimizeBasic, LLtypeMixin): pass diff --git a/rpython/jit/metainterp/optimizeopt/test/test_util.py b/rpython/jit/metainterp/optimizeopt/test/test_util.py --- a/rpython/jit/metainterp/optimizeopt/test/test_util.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_util.py @@ -1,6 +1,6 @@ import py, random -from rpython.rtyper.lltypesystem import lltype, llmemory, rclass, rstr +from rpython.rtyper.lltypesystem import lltype, llmemory, rclass, rstr, rffi from rpython.rtyper.lltypesystem.rclass import OBJECT, OBJECT_VTABLE from rpython.rtyper.rclass import FieldListAccessor, IR_QUASIIMMUTABLE @@ -208,6 +208,8 @@ chararray = lltype.GcArray(lltype.Char) chararraydescr = cpu.arraydescrof(chararray) + u2array = lltype.GcArray(rffi.USHORT) + u2arraydescr = cpu.arraydescrof(u2array) # array of structs (complex data) complexarray = lltype.GcArray( From noreply at buildbot.pypy.org Mon Feb 17 23:59:05 2014 From: noreply at buildbot.pypy.org (squeaky) Date: Mon, 17 Feb 2014 23:59:05 +0100 (CET) Subject: [pypy-commit] pypy bounds-int-add-or: update whatsnew Message-ID: <20140217225905.E774A1C10B8@cobra.cs.uni-duesseldorf.de> Author: Squeaky Branch: bounds-int-add-or Changeset: r69196:cc09acf5f43e Date: 2014-02-17 19:17 +0100 http://bitbucket.org/pypy/pypy/changeset/cc09acf5f43e/ Log: update whatsnew diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -78,3 +78,7 @@ .. branch: optimize-int-and: Optimize away INT_AND with constant mask of 1s that fully cover the bitrange of other operand. + +.. branch: bounds-int-add-or +Propagate appropriate bounds through INT_(OR|XOR|AND) operations if the +operands are positive to kill some guards From noreply at buildbot.pypy.org Mon Feb 17 23:59:07 2014 From: noreply at buildbot.pypy.org (squeaky) Date: Mon, 17 Feb 2014 23:59:07 +0100 (CET) Subject: [pypy-commit] pypy bounds-int-add-or: merge OR and XOR optimization into one Message-ID: <20140217225907.1B8D91C10B8@cobra.cs.uni-duesseldorf.de> Author: Squeaky Branch: bounds-int-add-or Changeset: r69197:d091b8c6f8e8 Date: 2014-02-17 20:26 +0100 http://bitbucket.org/pypy/pypy/changeset/d091b8c6f8e8/ Log: merge OR and XOR optimization into one diff --git a/rpython/jit/metainterp/optimizeopt/intbounds.py b/rpython/jit/metainterp/optimizeopt/intbounds.py --- a/rpython/jit/metainterp/optimizeopt/intbounds.py +++ b/rpython/jit/metainterp/optimizeopt/intbounds.py @@ -68,25 +68,14 @@ optimize_GUARD_FALSE = optimize_GUARD_TRUE optimize_GUARD_VALUE = optimize_GUARD_TRUE - def optimize_INT_OR(self, op): - v1 = self.getvalue(op.getarg(0)) - v2 = self.getvalue(op.getarg(1)) - self.emit_operation(op) - r = self.getvalue(op.result) - - if v1.intbound.known_ge(IntBound(0, 0)) and \ - v2.intbound.known_ge(IntBound(0, 0)): - mostsignificant = v1.intbound.upper | v2.intbound.upper - # check if next_power2 won't overflow - if mostsignificant < (1 << ((symbolic.WORD - 1) << 3)): - r.intbound.intersect( - IntBound(0, next_power2(mostsignificant) - 1)) - - def optimize_INT_XOR(self, op): + def optimize_INT_OR_or_XOR(self, op): v1 = self.getvalue(op.getarg(0)) v2 = self.getvalue(op.getarg(1)) if v1 is v2: - self.make_constant_int(op.result, 0) + if op.getopnum() == rop.INT_OR: + self.make_equal_to(op.result, v1) + else: + self.make_constant_int(op.result, 0) return self.emit_operation(op) if v1.intbound.known_ge(IntBound(0, 0)) and \ @@ -99,6 +88,8 @@ if mostsignificant < (1 << ((symbolic.WORD - 1) << 3)): r.intbound.make_lt(IntUpperBound(next_power2(mostsignificant))) + optimize_INT_OR = optimize_INT_OR_or_XOR + optimize_INT_XOR = optimize_INT_OR_or_XOR def optimize_INT_AND(self, op): v1 = self.getvalue(op.getarg(0)) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -5422,6 +5422,18 @@ """ self.optimize_loop(ops, ops) + def test_or_same_arg(self): + ops = """ + [i0] + i1 = int_or(i0, i0) + jump(i1) + """ + expected = """ + [i0] + jump(i0) + """ + self.optimize_loop(ops, expected) + class TestLLtype(BaseTestOptimizeBasic, LLtypeMixin): pass From noreply at buildbot.pypy.org Mon Feb 17 23:59:08 2014 From: noreply at buildbot.pypy.org (squeaky) Date: Mon, 17 Feb 2014 23:59:08 +0100 (CET) Subject: [pypy-commit] pypy bounds-int-add-or: substitute pow2 with loop-less non-overflowing pow2 - 1 Message-ID: <20140217225908.3F3591C10B8@cobra.cs.uni-duesseldorf.de> Author: Squeaky Branch: bounds-int-add-or Changeset: r69198:c57dc327e758 Date: 2014-02-17 20:52 +0100 http://bitbucket.org/pypy/pypy/changeset/c57dc327e758/ Log: substitute pow2 with loop-less non-overflowing pow2 - 1 diff --git a/rpython/jit/metainterp/optimizeopt/intbounds.py b/rpython/jit/metainterp/optimizeopt/intbounds.py --- a/rpython/jit/metainterp/optimizeopt/intbounds.py +++ b/rpython/jit/metainterp/optimizeopt/intbounds.py @@ -24,15 +24,15 @@ return (1 << ((byte_size << 3) - 1)) - 1 -def next_power2(val): - """Calculate next power of 2 greater than val. - - Danger: this can overflow, use only when val is sufficiently - lower than symbolic.WORD""" - power = 1 - while power < val + 1: - power <<= 1 - return power +def next_pow2_m1(n): + """Calculate next power of 2 greater than n minus one.""" + n |= n >> 1 + n |= n >> 2 + n |= n >> 4 + n |= n >> 8 + n |= n >> 16 + n |= n >> 32 + return n class OptIntBounds(Optimization): diff --git a/rpython/jit/metainterp/optimizeopt/test/test_intbounds.py b/rpython/jit/metainterp/optimizeopt/test/test_intbounds.py --- a/rpython/jit/metainterp/optimizeopt/test/test_intbounds.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_intbounds.py @@ -1,10 +1,12 @@ -from rpython.jit.metainterp.optimizeopt.intbounds import next_power2 +from rpython.jit.metainterp.optimizeopt.intbounds import next_pow2_m1 -def test_next_power2(): - assert next_power2(0) == 1 - assert next_power2(1) == 2 - assert next_power2(7) == 8 - assert next_power2(256) == 512 - assert next_power2(255) == 256 - assert next_power2(80) == 128 +def test_next_pow2_m1(): + assert next_pow2_m1(0) == 0 + assert next_pow2_m1(1) == 1 + assert next_pow2_m1(7) == 7 + assert next_pow2_m1(256) == 511 + assert next_pow2_m1(255) == 255 + assert next_pow2_m1(80) == 127 + assert next_pow2_m1((1 << 32) - 5) == (1 << 32) - 1 + assert next_pow2_m1((1 << 64) - 1) == (1 << 64) - 1 From noreply at buildbot.pypy.org Mon Feb 17 23:59:09 2014 From: noreply at buildbot.pypy.org (squeaky) Date: Mon, 17 Feb 2014 23:59:09 +0100 (CET) Subject: [pypy-commit] pypy bounds-int-add-or: rewrite to use power of two minus 2, no more overflow Message-ID: <20140217225909.61EF31C10B8@cobra.cs.uni-duesseldorf.de> Author: Squeaky Branch: bounds-int-add-or Changeset: r69199:e19230cb6257 Date: 2014-02-17 21:14 +0100 http://bitbucket.org/pypy/pypy/changeset/e19230cb6257/ Log: rewrite to use power of two minus 2, no more overflow diff --git a/rpython/jit/metainterp/optimizeopt/intbounds.py b/rpython/jit/metainterp/optimizeopt/intbounds.py --- a/rpython/jit/metainterp/optimizeopt/intbounds.py +++ b/rpython/jit/metainterp/optimizeopt/intbounds.py @@ -81,12 +81,8 @@ if v1.intbound.known_ge(IntBound(0, 0)) and \ v2.intbound.known_ge(IntBound(0, 0)): r = self.getvalue(op.result) - r.intbound.make_ge(IntLowerBound(0)) - mostsignificant = v1.intbound.upper | v2.intbound.upper - # check if next_power2 won't overflow - if mostsignificant < (1 << ((symbolic.WORD - 1) << 3)): - r.intbound.make_lt(IntUpperBound(next_power2(mostsignificant))) + r.intbound.intersect(IntBound(0, next_pow2_m1(mostsignificant))) optimize_INT_OR = optimize_INT_OR_or_XOR optimize_INT_XOR = optimize_INT_OR_or_XOR @@ -108,9 +104,7 @@ elif v1.intbound.known_ge(IntBound(0, 0)) and \ v2.intbound.known_ge(IntBound(0, 0)): lesser = min(v1.intbound.upper, v2.intbound.upper) - # check if next_power2 won't overflow - if lesser < (1 << ((symbolic.WORD - 1) << 3)): - r.intbound.intersect(IntBound(0, next_power2(lesser) - 1)) + r.intbound.intersect(IntBound(0, next_pow2_m1(lesser))) def optimize_INT_SUB(self, op): v1 = self.getvalue(op.getarg(0)) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -5422,7 +5422,7 @@ """ self.optimize_loop(ops, ops) - def test_or_same_arg(self): + def test_int_or_same_arg(self): ops = """ [i0] i1 = int_or(i0, i0) From noreply at buildbot.pypy.org Mon Feb 17 23:59:10 2014 From: noreply at buildbot.pypy.org (squeaky) Date: Mon, 17 Feb 2014 23:59:10 +0100 (CET) Subject: [pypy-commit] pypy bounds-int-add-or: merge default Message-ID: <20140217225910.8B5E01C10B8@cobra.cs.uni-duesseldorf.de> Author: Squeaky Branch: bounds-int-add-or Changeset: r69200:ae0283f76a9c Date: 2014-02-17 21:38 +0100 http://bitbucket.org/pypy/pypy/changeset/ae0283f76a9c/ Log: merge default diff --git a/pypy/doc/man/pypy.1.rst b/pypy/doc/man/pypy.1.rst --- a/pypy/doc/man/pypy.1.rst +++ b/pypy/doc/man/pypy.1.rst @@ -19,10 +19,10 @@ Skip assert statements. -OO - Remove docstrings when importing modules in addition to -O. + Remove docstrings when importing modules in addition to ``-O``. --c *cmd* - Program passed in as CMD (terminates option list). +-c CMD + Program passed in as ``CMD`` (terminates option list). -S Do not ``import site`` on initialization. @@ -36,10 +36,10 @@ -h, --help Show a help message and exit. --m *mod* +-m MOD Library module to be run as a script (terminates option list). --W *arg* +-W ARG Warning control (*arg* is *action*:*message*:*category*:*module*:*lineno*). -E @@ -54,44 +54,9 @@ --info Print translation information about this PyPy executable. ---jit *arg* - Low level JIT parameters. Format is - *arg*\ ``=``\ *value*\ [``,``\ *arg*\ ``=``\ *value*\ ...] - - ``off`` - Disable the JIT. - - ``threshold=``\ *value* - Number of times a loop has to run for it to become hot. - - ``function_threshold=``\ *value* - Number of times a function must run for it to become traced from - start. - - ``inlining=``\ *value* - Inline python functions or not (``1``/``0``). - - ``loop_longevity=``\ *value* - A parameter controlling how long loops will be kept before being - freed, an estimate. - - ``max_retrace_guards=``\ *value* - Number of extra guards a retrace can cause. - - ``retrace_limit=``\ *value* - How many times we can try retracing before giving up. - - ``trace_eagerness=``\ *value* - Number of times a guard has to fail before we start compiling a - bridge. - - ``trace_limit=``\ *value* - Number of recorded operations before we abort tracing with - ``ABORT_TRACE_TOO_LONG``. - - ``enable_opts=``\ *value* - Optimizations to enabled or ``all``. - Warning, this option is dangerous, and should be avoided. +--jit ARG + Low level JIT parameters. Mostly internal. Run ``--jit help`` + for more information. ENVIRONMENT =========== @@ -144,7 +109,7 @@ Multiple prefixes can be specified, comma-separated. Only sections whose name match the prefix will be logged. - ``PYPYLOG``\ =\ ``jit-log-opt,jit-backend:``\ *logfile* will + ``PYPYLOG=jit-log-opt,jit-backend:logfile`` will generate a log suitable for *jitviewer*, a tool for debugging performance issues under PyPy. diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -75,7 +75,7 @@ Kill some guards and operations in JIT traces by adding integer bounds propagation for getfield_(raw|gc) and getarrayitem_(raw|gc). -.. branch: optimize-int-and: +.. branch: optimize-int-and Optimize away INT_AND with constant mask of 1s that fully cover the bitrange of other operand. From noreply at buildbot.pypy.org Mon Feb 17 23:59:11 2014 From: noreply at buildbot.pypy.org (squeaky) Date: Mon, 17 Feb 2014 23:59:11 +0100 (CET) Subject: [pypy-commit] pypy default: Merge branch bounds-int-add-or Message-ID: <20140217225911.B7B2D1C10B8@cobra.cs.uni-duesseldorf.de> Author: Squeaky Branch: Changeset: r69201:2b0a11075c0e Date: 2014-02-17 23:54 +0100 http://bitbucket.org/pypy/pypy/changeset/2b0a11075c0e/ Log: Merge branch bounds-int-add-or Propagate appropriate bounds through INT_(OR|XOR|AND) operations if the operands are positive to kill some guards diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -78,3 +78,7 @@ .. branch: optimize-int-and Optimize away INT_AND with constant mask of 1s that fully cover the bitrange of other operand. + +.. branch: bounds-int-add-or +Propagate appropriate bounds through INT_(OR|XOR|AND) operations if the +operands are positive to kill some guards diff --git a/rpython/jit/metainterp/optimizeopt/intbounds.py b/rpython/jit/metainterp/optimizeopt/intbounds.py --- a/rpython/jit/metainterp/optimizeopt/intbounds.py +++ b/rpython/jit/metainterp/optimizeopt/intbounds.py @@ -7,6 +7,7 @@ CONST_0, MODE_ARRAY, MODE_STR, MODE_UNICODE) from rpython.jit.metainterp.optimizeopt.util import make_dispatcher_method from rpython.jit.metainterp.resoperation import rop +from rpython.jit.backend.llsupport import symbolic def get_integer_min(is_unsigned, byte_size): @@ -23,6 +24,17 @@ return (1 << ((byte_size << 3) - 1)) - 1 +def next_pow2_m1(n): + """Calculate next power of 2 greater than n minus one.""" + n |= n >> 1 + n |= n >> 2 + n |= n >> 4 + n |= n >> 8 + n |= n >> 16 + n |= n >> 32 + return n + + class OptIntBounds(Optimization): """Keeps track of the bounds placed on integers by guards and remove redundant guards""" @@ -56,17 +68,24 @@ optimize_GUARD_FALSE = optimize_GUARD_TRUE optimize_GUARD_VALUE = optimize_GUARD_TRUE - def optimize_INT_XOR(self, op): + def optimize_INT_OR_or_XOR(self, op): v1 = self.getvalue(op.getarg(0)) v2 = self.getvalue(op.getarg(1)) if v1 is v2: - self.make_constant_int(op.result, 0) + if op.getopnum() == rop.INT_OR: + self.make_equal_to(op.result, v1) + else: + self.make_constant_int(op.result, 0) return self.emit_operation(op) if v1.intbound.known_ge(IntBound(0, 0)) and \ v2.intbound.known_ge(IntBound(0, 0)): r = self.getvalue(op.result) - r.intbound.make_ge(IntLowerBound(0)) + mostsignificant = v1.intbound.upper | v2.intbound.upper + r.intbound.intersect(IntBound(0, next_pow2_m1(mostsignificant))) + + optimize_INT_OR = optimize_INT_OR_or_XOR + optimize_INT_XOR = optimize_INT_OR_or_XOR def optimize_INT_AND(self, op): v1 = self.getvalue(op.getarg(0)) @@ -82,6 +101,10 @@ val = v1.box.getint() if val >= 0: r.intbound.intersect(IntBound(0, val)) + elif v1.intbound.known_ge(IntBound(0, 0)) and \ + v2.intbound.known_ge(IntBound(0, 0)): + lesser = min(v1.intbound.upper, v2.intbound.upper) + r.intbound.intersect(IntBound(0, next_pow2_m1(lesser))) def optimize_INT_SUB(self, op): v1 = self.getvalue(op.getarg(0)) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_intbounds.py b/rpython/jit/metainterp/optimizeopt/test/test_intbounds.py new file mode 100644 --- /dev/null +++ b/rpython/jit/metainterp/optimizeopt/test/test_intbounds.py @@ -0,0 +1,12 @@ +from rpython.jit.metainterp.optimizeopt.intbounds import next_pow2_m1 + + +def test_next_pow2_m1(): + assert next_pow2_m1(0) == 0 + assert next_pow2_m1(1) == 1 + assert next_pow2_m1(7) == 7 + assert next_pow2_m1(256) == 511 + assert next_pow2_m1(255) == 255 + assert next_pow2_m1(80) == 127 + assert next_pow2_m1((1 << 32) - 5) == (1 << 32) - 1 + assert next_pow2_m1((1 << 64) - 1) == (1 << 64) - 1 diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -5326,6 +5326,114 @@ """ self.optimize_loop(ops, ops) + def test_int_and_cmp_above_bounds(self): + ops = """ + [p0,p1] + i0 = getarrayitem_gc(p0, 0, descr=chararraydescr) + i1 = getarrayitem_gc(p1, 0, descr=u2arraydescr) + i2 = int_and(i0, i1) + i3 = int_le(i2, 255) + guard_true(i3) [] + jump(i2) + """ + + expected = """ + [p0,p1] + i0 = getarrayitem_gc(p0, 0, descr=chararraydescr) + i1 = getarrayitem_gc(p1, 0, descr=u2arraydescr) + i2 = int_and(i0, i1) + jump(i2) + """ + self.optimize_loop(ops, expected) + + def test_int_and_cmp_below_bounds(self): + ops = """ + [p0,p1] + i0 = getarrayitem_gc(p0, 0, descr=chararraydescr) + i1 = getarrayitem_gc(p1, 0, descr=u2arraydescr) + i2 = int_and(i0, i1) + i3 = int_lt(i2, 255) + guard_true(i3) [] + jump(i2) + """ + self.optimize_loop(ops, ops) + + def test_int_or_cmp_above_bounds(self): + ops = """ + [p0,p1] + i0 = getarrayitem_gc(p0, 0, descr=chararraydescr) + i1 = getarrayitem_gc(p1, 0, descr=u2arraydescr) + i2 = int_or(i0, i1) + i3 = int_le(i2, 65535) + guard_true(i3) [] + jump(i2) + """ + + expected = """ + [p0,p1] + i0 = getarrayitem_gc(p0, 0, descr=chararraydescr) + i1 = getarrayitem_gc(p1, 0, descr=u2arraydescr) + i2 = int_or(i0, i1) + jump(i2) + """ + self.optimize_loop(ops, expected) + + def test_int_or_cmp_below_bounds(self): + ops = """ + [p0,p1] + i0 = getarrayitem_gc(p0, 0, descr=chararraydescr) + i1 = getarrayitem_gc(p1, 0, descr=u2arraydescr) + i2 = int_or(i0, i1) + i3 = int_lt(i2, 65535) + guard_true(i3) [] + jump(i2) + """ + self.optimize_loop(ops, ops) + + def test_int_xor_cmp_above_bounds(self): + ops = """ + [p0,p1] + i0 = getarrayitem_gc(p0, 0, descr=chararraydescr) + i1 = getarrayitem_gc(p1, 0, descr=u2arraydescr) + i2 = int_xor(i0, i1) + i3 = int_le(i2, 65535) + guard_true(i3) [] + jump(i2) + """ + + expected = """ + [p0,p1] + i0 = getarrayitem_gc(p0, 0, descr=chararraydescr) + i1 = getarrayitem_gc(p1, 0, descr=u2arraydescr) + i2 = int_xor(i0, i1) + jump(i2) + """ + self.optimize_loop(ops, expected) + + def test_int_xor_cmp_below_bounds(self): + ops = """ + [p0,p1] + i0 = getarrayitem_gc(p0, 0, descr=chararraydescr) + i1 = getarrayitem_gc(p1, 0, descr=u2arraydescr) + i2 = int_xor(i0, i1) + i3 = int_lt(i2, 65535) + guard_true(i3) [] + jump(i2) + """ + self.optimize_loop(ops, ops) + + def test_int_or_same_arg(self): + ops = """ + [i0] + i1 = int_or(i0, i0) + jump(i1) + """ + expected = """ + [i0] + jump(i0) + """ + self.optimize_loop(ops, expected) + class TestLLtype(BaseTestOptimizeBasic, LLtypeMixin): pass diff --git a/rpython/jit/metainterp/optimizeopt/test/test_util.py b/rpython/jit/metainterp/optimizeopt/test/test_util.py --- a/rpython/jit/metainterp/optimizeopt/test/test_util.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_util.py @@ -1,6 +1,6 @@ import py, random -from rpython.rtyper.lltypesystem import lltype, llmemory, rclass, rstr +from rpython.rtyper.lltypesystem import lltype, llmemory, rclass, rstr, rffi from rpython.rtyper.lltypesystem.rclass import OBJECT, OBJECT_VTABLE from rpython.rtyper.rclass import FieldListAccessor, IR_QUASIIMMUTABLE @@ -208,6 +208,8 @@ chararray = lltype.GcArray(lltype.Char) chararraydescr = cpu.arraydescrof(chararray) + u2array = lltype.GcArray(rffi.USHORT) + u2arraydescr = cpu.arraydescrof(u2array) # array of structs (complex data) complexarray = lltype.GcArray( From noreply at buildbot.pypy.org Mon Feb 17 23:59:12 2014 From: noreply at buildbot.pypy.org (squeaky) Date: Mon, 17 Feb 2014 23:59:12 +0100 (CET) Subject: [pypy-commit] pypy bounds-int-add-or: Closing merged branch Message-ID: <20140217225912.C44C11C10B8@cobra.cs.uni-duesseldorf.de> Author: Squeaky Branch: bounds-int-add-or Changeset: r69202:a1c20073e6f9 Date: 2014-02-17 23:56 +0100 http://bitbucket.org/pypy/pypy/changeset/a1c20073e6f9/ Log: Closing merged branch From noreply at buildbot.pypy.org Tue Feb 18 15:19:17 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Tue, 18 Feb 2014 15:19:17 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: new test-random Message-ID: <20140218141917.A23991C15BA@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7-refactor Changeset: r766:5707dbd201ed Date: 2014-02-18 15:19 +0100 http://bitbucket.org/pypy/stmgc/changeset/5707dbd201ed/ Log: new test-random diff --git a/c7/test/test_random.py b/c7/test/test_random.py --- a/c7/test/test_random.py +++ b/c7/test/test_random.py @@ -12,7 +12,252 @@ print >> sys.stderr, cmd exec cmd in globals(), self.content +_root_numbering = 0 +def get_new_root_name(): + global _root_numbering + _root_numbering += 1 + return "lp%d" % _root_numbering +_global_time = 0 +def contention_management(first_trs, second_trs): + if first_trs.must_abort or second_trs.must_abort: + return + if first_trs.start_time < second_trs.start_time: + second_trs.must_abort = True + else: + first_trs.must_abort = True + +class TransactionState(object): + """maintains read/write sets""" + def __init__(self, start_time): + self.read_set = set() + self.write_set = set() + self.values = {} + self.must_abort = False + self.start_time = start_time + + def has_conflict_with(self, committed): + return bool(self.read_set & committed.write_set) + + def update_from_committed(self, committed, only_new=False): + """returns True if conflict""" + if only_new: + for w in committed.write_set: + self.values[w] = committed.values[w] + else: + self.values.update(committed.values) + + if self.has_conflict_with(committed): + contention_management(self, committed) + return self.must_abort + + def read_root(self, r): + self.read_set.add(r) + return self.values[r] + + def write_root(self, r, v): + self.read_set.add(r) + self.write_set.add(r) + old = self.values.get(r, None) + self.values[r] = v + return old + + +class ThreadState(object): + """maintains state for one thread """ + + def __init__(self, num, global_state): + self.num = num + self.saved_roots = [] + self.roots_on_stack = 0 + self.roots_on_transaction_start = 0 + self.transaction_state = None + self.global_state = global_state + + def register_root(self, r): + self.saved_roots.append(r) + + def forget_random_root(self): + # # forget some non-pushed root for now + # if self.roots_on_stack < len(self.saved_roots): + # idx = self.global_state.rnd.randrange(self.roots_on_stack, len(self.saved_roots)) + # r = self.saved_roots[idx] + # del self.saved_roots[idx] + # return r + + # forget all non-pushed roots for now + res = str(self.saved_roots[self.roots_on_stack:]) + del self.saved_roots[self.roots_on_stack:] + return res + + def get_random_root(self): + rnd = self.global_state.rnd + if self.saved_roots: + return rnd.choice([rnd.choice(self.global_state.shared_roots), + rnd.choice(self.saved_roots)]) + return rnd.choice(self.global_state.shared_roots) + + def push_roots(self, ex): + for r in self.saved_roots[self.roots_on_transaction_start:]: + ex.do('self.push_root(%s)' % r) + self.roots_on_stack += 1 + + def pop_roots(self, ex): + for r in reversed(self.saved_roots[self.roots_on_transaction_start:]): + ex.do('%s = self.pop_root()' % r) + self.roots_on_stack -= 1 + + def update_roots(self, ex): + assert self.roots_on_stack == self.roots_on_transaction_start + for r in self.saved_roots[::-1]: + ex.do('%s = self.pop_root()' % r) + self.roots_on_stack -= 1 + assert self.roots_on_stack == 0 + for r in self.saved_roots: + ex.do('self.push_root(%s)' % r) + self.roots_on_stack += 1 + + def start_transaction(self): + assert self.transaction_state is None + global _global_time + _global_time += 1 + start_time = _global_time + trs = TransactionState(start_time) + trs.update_from_committed( + self.global_state.committed_transaction_state) + self.transaction_state = trs + self.roots_on_transaction_start = self.roots_on_stack + + def commit_transaction(self): + trs = self.transaction_state + gtrs = self.global_state.committed_transaction_state + self.global_state.check_for_write_read_conflicts(trs) + conflicts = trs.must_abort + if not conflicts: + # update global committed state w/o conflict + assert not gtrs.update_from_committed(trs) + self.global_state.push_state_to_other_threads(trs) + self.transaction_state = None + return conflicts + + def abort_transaction(self): + assert self.transaction_state.must_abort + self.roots_on_stack = self.roots_on_transaction_start + del self.saved_roots[self.roots_on_stack:] + self.transaction_state = None + + +class GlobalState(object): + def __init__(self, rnd): + self.rnd = rnd + self.thread_states = [] + self.shared_roots = [] + self.committed_transaction_state = TransactionState(0) + + def push_state_to_other_threads(self, tr_state): + for ts in self.thread_states: + other_trs = ts.transaction_state + if other_trs is None or other_trs is tr_state: + continue + other_trs.update_from_committed(tr_state, only_new=True) + + def check_for_write_write_conflicts(self, tr_state): + for ts in self.thread_states: + other_trs = ts.transaction_state + if other_trs is None or other_trs is tr_state: + continue + + if other_trs.write_set & tr_state.write_set: + contention_management(tr_state, other_trs) + + def check_for_write_read_conflicts(self, tr_state): + for ts in self.thread_states: + other_trs = ts.transaction_state + if other_trs is None or other_trs is tr_state: + continue + + if other_trs.read_set & tr_state.write_set: + contention_management(tr_state, other_trs) + + +# ========== STM OPERATIONS ========== + +class Operation(object): + def do(self, ex, global_state, thread_state): + raise NotImplemented + +class OpStartTransaction(Operation): + def do(self, ex, global_state, thread_state): + thread_state.start_transaction() + # + ex.do('self.start_transaction()') + thread_state.update_roots(ex) + + +class OpCommitTransaction(Operation): + def do(self, ex, global_state, thread_state): + # + # push all new roots + thread_state.push_roots(ex) + aborts = thread_state.commit_transaction() + # + if aborts: + thread_state.abort_transaction() + ex.do('py.test.raises(Conflict, self.commit_transaction)') + else: + ex.do('self.commit_transaction()') + +class OpAllocate(Operation): + def do(self, ex, global_state, thread_state): + r = get_new_root_name() + thread_state.push_roots(ex) + ex.do('%s = stm_allocate(16)' % r) + assert thread_state.transaction_state.write_root(r, 0) is None + + thread_state.pop_roots(ex) + thread_state.register_root(r) + +class OpForgetRoot(Operation): + def do(self, ex, global_state, thread_state): + r = thread_state.forget_random_root() + ex.do('# forget %s' % r) + +class OpSetChar(Operation): + def do(self, ex, global_state, thread_state): + r = thread_state.get_random_root() + v = ord(global_state.rnd.choice("abcdefghijklmnop")) + trs = thread_state.transaction_state + trs.write_root(r, v) + + global_state.check_for_write_write_conflicts(trs) + if trs.must_abort: + thread_state.abort_transaction() + ex.do("py.test.raises(Conflict, stm_set_char, %s, %s)" % (r, repr(chr(v)))) + else: + ex.do("stm_set_char(%s, %s)" % (r, repr(chr(v)))) + +class OpGetChar(Operation): + def do(self, ex, global_state, thread_state): + r = thread_state.get_random_root() + trs = thread_state.transaction_state + v = trs.read_root(r) + # + ex.do("assert stm_get_char(%s) == %s" % (r, repr(chr(v)))) + +class OpSwitchThread(Operation): + def do(self, ex, global_state, thread_state): + trs = thread_state.transaction_state + conflicts = trs is not None and trs.must_abort + # + if conflicts: + thread_state.abort_transaction() + ex.do('py.test.raises(Conflict, self.switch, %s)' % thread_state.num) + else: + ex.do('self.switch(%s)' % thread_state.num) + + +# ========== TEST GENERATION ========== + class TestRandom(BaseTest): def test_fixed_16_bytes_objects(self, seed=1010): @@ -22,92 +267,46 @@ N_THREADS = 2 ex = Exec(self) ex.do('# initialization') - ex.do('stm_start_transaction()') - head_state = [] + + global_state = GlobalState(rnd) + for i in range(N_THREADS): + global_state.thread_states.append( + ThreadState(i, global_state)) + curr_thread = global_state.thread_states[0] + for i in range(N_OBJECTS): - ex.do('lp%d = stm_allocate(16)' % i) - ex.do('stm_set_char(lp%d, %r)' % (i, chr(i))) - head_state.append(chr(i)) - ex.do('stm_push_root(lp%d)' % i) - read_sets = [set() for i in range(N_THREADS)] - write_sets = [{} for i in range(N_THREADS)] - active_transactions = set() - need_abort = set() + r = get_new_root_name() + ex.do('%s = stm_allocate_old(16)' % r) + global_state.committed_transaction_state.write_root(r, 0) + global_state.shared_roots.append(r) + global_state.committed_transaction_state.write_set = set() + global_state.committed_transaction_state.read_set = set() - ex.do('stm_stop_transaction()') - for i in range(N_OBJECTS-1, -1, -1): - ex.do('lp%d = stm_pop_root()' % i) + # random steps: + remaining_steps = 200 + while remaining_steps > 0: + remaining_steps -= 1 - stop_count = 1 - current_thread = 0 + n_thread = rnd.randrange(0, N_THREADS) + if n_thread != curr_thread.num: + ex.do('#') + curr_thread = global_state.thread_states[n_thread] + OpSwitchThread().do(ex, global_state, curr_thread) + if curr_thread.transaction_state is None: + OpStartTransaction().do(ex, global_state, curr_thread) - def aborted(): - active_transactions.remove(n_thread) - write_sets[n_thread].clear() - read_sets[n_thread].clear() - need_abort.discard(n_thread) + action = rnd.choice([ + OpAllocate, + OpSetChar, + OpSetChar, + OpGetChar, + OpGetChar, + OpCommitTransaction, + OpForgetRoot, + ]) + action().do(ex, global_state, curr_thread) + - remaining_steps = 200 - while remaining_steps > 0 or active_transactions: - remaining_steps -= 1 - n_thread = rnd.randrange(0, N_THREADS) - if n_thread != current_thread: - ex.do('#') - current_thread = n_thread - if n_thread in need_abort: - ex.do('py.test.raises(Conflict, self.switch, %d)' % n_thread) - aborted() - continue - ex.do('self.switch(%d)' % n_thread) - if n_thread not in active_transactions: - if remaining_steps <= 0: - continue - ex.do('stm_start_transaction()') - active_transactions.add(n_thread) - - action = rnd.randrange(0, 7) - if action < 6 and remaining_steps > 0: - is_write = action >= 4 - i = rnd.randrange(0, N_OBJECTS) - if i in write_sets[n_thread]: - expected = write_sets[n_thread][i] - else: - expected = head_state[i] - ex.do("assert stm_get_char(lp%d) == %r" % (i, expected)) - read_sets[n_thread].add(i) - # - if is_write: - newval = chr(rnd.randrange(0, 256)) - write_write_conflict = False - for t in range(N_THREADS): - if t != n_thread: - write_write_conflict |= i in write_sets[t] - if write_write_conflict: - ex.do('py.test.raises(Conflict, stm_set_char, lp%d, %r)' - % (i, newval)) - aborted() - continue - else: - ex.do('stm_set_char(lp%d, %r)' % (i, newval)) - write_sets[n_thread][i] = newval - else: - active_transactions.remove(n_thread) - changes = [] - modified = sorted(write_sets[n_thread]) - for i in modified: - nval = write_sets[n_thread][i] - changes.append('lp%d=%r' % (i, nval)) - head_state[i] = nval - write_sets[n_thread].clear() - read_sets[n_thread].clear() - ex.do('stm_stop_transaction() #%d %s' % (stop_count, ' '.join(changes))) - stop_count += 1 - - for t in range(N_THREADS): - if t != n_thread: - for i in modified: - if i in read_sets[t]: - need_abort.add(t) def _make_fun(seed): def test_fun(self): From noreply at buildbot.pypy.org Tue Feb 18 15:42:57 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 18 Feb 2014 15:42:57 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: Comments about v_nursery_section_end Message-ID: <20140218144257.30FD71C04FF@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r767:94ed263ba9d2 Date: 2014-02-18 15:42 +0100 http://bitbucket.org/pypy/stmgc/changeset/94ed263ba9d2/ Log: Comments about v_nursery_section_end diff --git a/c7/stm/nursery.h b/c7/stm/nursery.h --- a/c7/stm/nursery.h +++ b/c7/stm/nursery.h @@ -3,5 +3,30 @@ #define NSE_SIGNAL 1 #define NSE_SIGNAL_DONE 2 +/* Rules for 'v_nursery_section_end': + + - Its main purpose is to be read by the owning thread in stm_allocate(). + + - The owning thread can change its value without acquiring the mutex, + but it must do so carefully, with a compare_and_swap. + + - If a different thread has the mutex, it can force the field to the + value NSE_SIGNAL or NSE_SIGNAL_DONE with a regular write. This should + not be hidden by the compare_and_swap done by the owning thread: + even if it occurs just before or just after a compare_and_swap, + the end result is that the special value NSE_SIGNAL(_DONE) is still + in the field. + + - When the owning thread sees NSE_SIGNAL, it must signal and wait until + the other thread restores the value to NSE_SIGNAL_DONE. When the + owning thread sees NSE_SIGNAL_DONE, it can replace it, again with + compare_and_swap, with the real value. + + - This should in theory be a volatile field, because it can be read + from stm_allocate() while at the same time being changed to the value + NSE_SIGNAL by another thread. In practice, making it volatile has + probably just a small negative impact on performance for no good reason. +*/ + static void align_nursery_at_transaction_start(void); static void restore_nursery_section_end(uintptr_t prev_value); diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -62,7 +62,7 @@ int segment_num; char *segment_base; stm_char *nursery_current; - volatile uintptr_t v_nursery_section_end; /* see nursery.h */ + uintptr_t v_nursery_section_end; /* see nursery.h */ struct stm_thread_local_s *running_thread; stm_jmpbuf_t *jmpbuf_ptr; }; From noreply at buildbot.pypy.org Tue Feb 18 15:53:10 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Tue, 18 Feb 2014 15:53:10 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: fix for contention_management calling cond_wait in tests Message-ID: <20140218145310.2449E1C04FF@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7-refactor Changeset: r768:775ad832acb8 Date: 2014-02-18 15:53 +0100 http://bitbucket.org/pypy/stmgc/changeset/775ad832acb8/ Log: fix for contention_management calling cond_wait in tests diff --git a/c7/stm/contention.c b/c7/stm/contention.c --- a/c7/stm/contention.c +++ b/c7/stm/contention.c @@ -37,6 +37,10 @@ abort_with_mutex(); } else if (wait) { +#ifdef STM_TESTS + /* abort anyway for tests. We mustn't call cond_wait() */ + abort_with_mutex(); +#endif /* otherwise, we will issue a safe point and wait: */ STM_PSEGMENT->safe_point = SP_SAFE_POINT_CANNOT_COLLECT; diff --git a/c7/test/test_random.py b/c7/test/test_random.py --- a/c7/test/test_random.py +++ b/c7/test/test_random.py @@ -19,13 +19,18 @@ return "lp%d" % _root_numbering _global_time = 0 -def contention_management(first_trs, second_trs): - if first_trs.must_abort or second_trs.must_abort: - return - if first_trs.start_time < second_trs.start_time: - second_trs.must_abort = True +def contention_management(our_trs, other_trs, wait=False): + if other_trs.start_time < our_trs.start_time: + pass else: - first_trs.must_abort = True + other_trs.must_abort = True + + if not other_trs.must_abort: + our_trs.must_abort = True + elif wait: + # abort anyway: + our_trs.must_abort = True + class TransactionState(object): """maintains read/write sets""" @@ -168,7 +173,7 @@ continue if other_trs.write_set & tr_state.write_set: - contention_management(tr_state, other_trs) + contention_management(tr_state, other_trs, True) def check_for_write_read_conflicts(self, tr_state): for ts in self.thread_states: From noreply at buildbot.pypy.org Tue Feb 18 16:24:26 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Tue, 18 Feb 2014 16:24:26 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: more info in output Message-ID: <20140218152426.CB6DE1C15BA@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7-refactor Changeset: r769:c50e88e2b09f Date: 2014-02-18 16:23 +0100 http://bitbucket.org/pypy/stmgc/changeset/c50e88e2b09f/ Log: more info in output diff --git a/c7/test/test_random.py b/c7/test/test_random.py --- a/c7/test/test_random.py +++ b/c7/test/test_random.py @@ -153,20 +153,26 @@ class GlobalState(object): - def __init__(self, rnd): + def __init__(self, ex, rnd): + self.ex = ex self.rnd = rnd self.thread_states = [] self.shared_roots = [] self.committed_transaction_state = TransactionState(0) def push_state_to_other_threads(self, tr_state): + assert not tr_state.must_abort for ts in self.thread_states: other_trs = ts.transaction_state if other_trs is None or other_trs is tr_state: continue other_trs.update_from_committed(tr_state, only_new=True) + if tr_state.must_abort: + self.ex.do('# conflict while pushing to other threads') + def check_for_write_write_conflicts(self, tr_state): + assert not tr_state.must_abort for ts in self.thread_states: other_trs = ts.transaction_state if other_trs is None or other_trs is tr_state: @@ -174,8 +180,12 @@ if other_trs.write_set & tr_state.write_set: contention_management(tr_state, other_trs, True) + + if tr_state.must_abort: + self.ex.do('# write-write conflict') def check_for_write_read_conflicts(self, tr_state): + assert not tr_state.must_abort for ts in self.thread_states: other_trs = ts.transaction_state if other_trs is None or other_trs is tr_state: @@ -183,6 +193,9 @@ if other_trs.read_set & tr_state.write_set: contention_management(tr_state, other_trs) + + if tr_state.must_abort: + self.ex.do('# write-read conflict') # ========== STM OPERATIONS ========== @@ -271,9 +284,15 @@ N_OBJECTS = 5 N_THREADS = 2 ex = Exec(self) + ex.do(""" +################################################################ +################################################################ +################################################################ +################################################################ + """) ex.do('# initialization') - global_state = GlobalState(rnd) + global_state = GlobalState(ex, rnd) for i in range(N_THREADS): global_state.thread_states.append( ThreadState(i, global_state)) From noreply at buildbot.pypy.org Tue Feb 18 19:34:26 2014 From: noreply at buildbot.pypy.org (jiaaro) Date: Tue, 18 Feb 2014 19:34:26 +0100 (CET) Subject: [pypy-commit] pypy default: add implementations of the most commonly used audioop functions (from pydub) Message-ID: <20140218183426.5594C1C04FF@cobra.cs.uni-duesseldorf.de> Author: jiaaro Branch: Changeset: r69203:56afb92fbea1 Date: 2014-01-23 20:09 -0500 http://bitbucket.org/pypy/pypy/changeset/56afb92fbea1/ Log: add implementations of the most commonly used audioop functions (from pydub) diff --git a/lib_pypy/audioop.py b/lib_pypy/audioop.py --- a/lib_pypy/audioop.py +++ b/lib_pypy/audioop.py @@ -1,14 +1,20 @@ - import struct +import math +from fractions import gcd +from ctypes import create_string_buffer class error(Exception): pass +# this module redefines the names of some builtins that are used +max_ = max + + def _check_size(size): if size != 1 and size != 2 and size != 4: - raise error("Size should be 1, 2 or 4") + raise error("Size should be 1, 2 or 4") def _check_params(length, size): @@ -17,13 +23,517 @@ raise error("not a whole number of frames") +def _sample_count(cp, size): + return len(cp) / size + + +def _get_samples(cp, size, signed=True): + for i in range(_sample_count(cp, size)): + yield _get_sample(cp, size, i, signed) + + +def _struct_format(size, signed): + if size == 1: + return "b" if signed else "B" + elif size == 2: + return "h" if signed else "H" + elif size == 4: + return "i" if signed else "I" + + +def _get_sample(cp, size, i, signed=True): + fmt = _struct_format(size, signed) + start = i * size + end = start + size + return struct.unpack_from(fmt, buffer(cp)[start:end])[0] + + +def _put_sample(cp, size, i, val, signed=True): + fmt = _struct_format(size, signed) + struct.pack_into(fmt, cp, i * size, val) + + +def _get_maxval(size, signed=True): + if signed and size == 1: return 0x7f + elif size == 1: return 0xff + elif signed and size == 2: return 0x7fff + elif size == 2: return 0xffff + elif signed and size == 4: return 0x7fffffff + elif size == 4: return 0xffffffff + + +def _get_minval(size, signed=True): + if not signed: return 0 + elif size == 1: return -0x80 + elif size == 2: return -0x8000 + elif size == 4: return -0x80000000 + + +def _get_clipfn(size, signed=True): + maxval = _get_maxval(size, signed) + minval = _get_minval(size, signed) + return lambda val: max_(min(val, maxval), minval) + + +def _overflow(val, size, signed=True): + minval = _get_minval(size, signed) + maxval = _get_maxval(size, signed) + if minval <= val <= maxval: + return val + + bits = size * 8 + if signed: + offset = 2**(bits-1) + return ((val + offset) % (2**bits)) - offset + else: + return val % (2**bits) + + def getsample(cp, size, i): _check_params(len(cp), size) if not (0 <= i < len(cp) / size): raise error("Index out of range") - if size == 1: - return struct.unpack_from("B", buffer(cp)[i:])[0] - elif size == 2: - return struct.unpack_from("H", buffer(cp)[i * 2:])[0] - elif size == 4: - return struct.unpack_from("I", buffer(cp)[i * 4:])[0] + return _get_sample(cp, size, i) + + +def max(cp, size): + _check_params(len(cp), size) + + if len(cp) == 0: + return 0 + + return max_(abs(sample) for sample in _get_samples(cp, size)) + + +def minmax(cp, size): + _check_params(len(cp), size) + + max_sample, min_sample = 0, 0 + for sample in _get_samples(cp, size): + if sample > max_sample: + max_sample = sample + if sample < min_sample: + min_sample = sample + + return min_sample, max_sample + + +def avg(cp, size): + _check_params(len(cp), size) + sample_count = _sample_count(cp, size) + if sample_count == 0: + return 0 + return sum(_get_samples(cp, size)) / sample_count + + +def rms(cp, size): + _check_params(len(cp), size) + + sample_count = _sample_count(cp, size) + if sample_count == 0: + return 0 + + sum_squares = sum(sample**2 for sample in _get_samples(cp, size)) + return int(math.sqrt(sum_squares / sample_count)) + + +def _sum2(cp1, cp2, length): + size = 2 + total = 0 + for i in range(length): + total += getsample(cp1, size, i) * getsample(cp2, size, i) + return total + + +def findfit(cp1, cp2): + size = 2 + + if len(cp1) % 2 != 0 or len(cp2) % 2 != 0: + raise error("Strings should be even-sized") + + if len(cp1) < len(cp2): + raise error("First sample should be longer") + + len1 = _sample_count(cp1, size) + len2 = _sample_count(cp2, size) + + sum_ri_2 = _sum2(cp2, cp2, len2) + sum_aij_2 = _sum2(cp1, cp1, len2) + sum_aij_ri = _sum2(cp1, cp2, len2) + + result = (sum_ri_2 * sum_aij_2 - sum_aij_ri * sum_aij_ri) / sum_aij_2 + + best_result = result + best_i = 0 + + for i in range(1, len1 - len2 + 1): + aj_m1 = _get_sample(cp1, size, i - 1) + aj_lm1 = _get_sample(cp1, size, i + len2 - 1) + + sum_aij_2 += aj_lm1**2 - aj_m1**2 + sum_aij_ri = _sum2(buffer(cp1)[i*size:], cp2, len2) + + result = (sum_ri_2 * sum_aij_2 - sum_aij_ri * sum_aij_ri) / sum_aij_2 + + if result < best_result: + best_result = result + best_i = i + + factor = _sum2(buffer(cp1)[best_i*size:], cp2, len2) / sum_ri_2 + + return best_i, factor + + +def findfactor(cp1, cp2): + size = 2 + + if len(cp1) % 2 != 0: + raise error("Strings should be even-sized") + + if len(cp1) != len(cp2): + raise error("Samples should be same size") + + sample_count = _sample_count(cp1, size) + + sum_ri_2 = _sum2(cp2, cp2, sample_count) + sum_aij_ri = _sum2(cp1, cp2, sample_count) + + return sum_aij_ri / sum_ri_2 + + +def findmax(cp, len2): + size = 2 + sample_count = _sample_count(cp, size) + + if len(cp) % 2 != 0: + raise error("Strings should be even-sized") + + if len2 < 0 or sample_count < len2: + raise error("Input sample should be longer") + + if sample_count == 0: + return 0 + + result = _sum2(cp, cp, len2) + best_result = result + best_i = 0 + + for i in range(1, sample_count - len2 + 1): + sample_leaving_window = getsample(cp, size, i - 1) + sample_entering_window = getsample(cp, size, i + len2 - 1) + + result -= sample_leaving_window**2 + result += sample_entering_window**2 + + if result > best_result: + best_result = result + best_i = i + + return best_i + + +def avgpp(cp, size): + _check_params(len(cp), size) + sample_count = _sample_count(cp, size) + + prevextremevalid = False + avg = 0 + nextreme = 0 + + prevval = getsample(cp, size, 0) + val = getsample(cp, size, 1) + + prevdiff = val - prevval + + for i in range(1, sample_count): + val = getsample(cp, size, i) + diff = val - prevval + + if diff * prevdiff < 0: + if prevextremevalid: + avg += abs(prevval - prevextreme) + nextreme += 1 + + prevextremevalid = True + prevextreme = prevval + + prevval = val + if diff != 0: + prevdiff = diff + + if nextreme == 0: + return 0 + + return avg / nextreme + + +def maxpp(cp, size): + _check_params(len(cp), size) + sample_count = _sample_count(cp, size) + + prevextremevalid = False + max = 0 + + prevval = getsample(cp, size, 0) + val = getsample(cp, size, 1) + + prevdiff = val - prevval + + for i in range(1, sample_count): + val = getsample(cp, size, i) + diff = val - prevval + + if diff * prevdiff < 0: + if prevextremevalid: + extremediff = abs(prevval - prevextreme) + if extremediff > max: + max = extremediff + prevextremevalid = True + prevextreme = prevval + + prevval = val + if diff != 0: + prevdiff = diff + + return max + + +def cross(cp, size): + _check_params(len(cp), size) + + crossings = 0 + last_sample = 0 + for sample in _get_samples(cp, size): + if sample <= 0 < last_sample or sample >= 0 > last_sample: + crossings += 1 + last_sample = sample + + return crossings + + +def mul(cp, size, factor): + _check_params(len(cp), size) + clip = _get_clipfn(size) + + result = create_string_buffer(len(cp)) + + for i, sample in enumerate(_get_samples(cp, size)): + sample = clip(int(sample * factor)) + _put_sample(result, size, i, sample) + + return result.raw + + +def tomono(cp, size, fac1, fac2): + _check_params(len(cp), size) + clip = _get_clipfn(size) + + sample_count = _sample_count(cp, size) + + result = create_string_buffer(len(cp) / 2) + + for i in range(0, sample_count, 2): + l_sample = getsample(cp, size, i) + r_sample = getsample(cp, size, i + 1) + + sample = (l_sample * fac1) + (r_sample * fac2) + sample = clip(sample) + + _put_sample(result, size, i / 2, sample) + + return result.raw + + +def tostereo(cp, size, fac1, fac2): + _check_params(len(cp), size) + + sample_count = _sample_count(cp, size) + + result = create_string_buffer(len(cp) * 2) + clip = _get_clipfn(size) + + for i in range(sample_count): + sample = _get_sample(cp, size, i) + + l_sample = clip(sample * fac1) + r_sample = clip(sample * fac2) + + _put_sample(result, size, i * 2, l_sample) + _put_sample(result, size, i * 2 + 1, r_sample) + + return result.raw + + +def add(cp1, cp2, size): + _check_params(len(cp1), size) + maxval = _get_maxval(size) + + if len(cp1) != len(cp2): + raise error("Lengths should be the same") + + clip = _get_clipfn(size) + sample_count = _sample_count(cp1, size) + result = create_string_buffer(len(cp1)) + + for i in range(sample_count): + sample1 = getsample(cp1, size, i) + sample2 = getsample(cp2, size, i) + + sample = clip(sample1 + sample2) + + _put_sample(result, size, i, sample) + + return result.raw + + +def bias(cp, size, bias): + _check_params(len(cp), size) + + result = create_string_buffer(len(cp)) + + for i, sample in enumerate(_get_samples(cp, size)): + sample = _overflow(sample + bias, size) + _put_sample(result, size, i, sample) + + return result.raw + + +def reverse(cp, size): + _check_params(len(cp), size) + sample_count = _sample_count(cp, size) + + result = create_string_buffer(len(cp)) + for i, sample in enumerate(_get_samples(cp, size)): + _put_sample(result, size, sample_count - i - 1, sample) + + return result.raw + + +def lin2lin(cp, size, size2): + _check_params(len(cp), size) + _check_size(size2) + + if size == size2: + return cp + + new_len = (len(cp) / size) * size2 + + result = create_string_buffer(new_len) + + for i in range(_sample_count(cp, size)): + sample = _get_sample(cp, size, i) + if size < size2: + sample = sample << (4 * size2 / size) + elif size > size2: + sample = sample >> (4 * size / size2) + + sample = _overflow(sample, size2) + + _put_sample(result, size2, i, sample) + + return result.raw + + +def ratecv(cp, size, nchannels, inrate, outrate, state, weightA=1, weightB=0): + _check_params(len(cp), size) + if nchannels < 1: + raise error("# of channels should be >= 1") + + bytes_per_frame = size * nchannels + frame_count = len(cp) / bytes_per_frame + + if bytes_per_frame / nchannels != size: + raise OverflowError("width * nchannels too big for a C int") + + if weightA < 1 or weightB < 0: + raise error("weightA should be >= 1, weightB should be >= 0") + + if len(cp) % bytes_per_frame != 0: + raise error("not a whole number of frames") + + if inrate <= 0 or outrate <= 0: + raise error("sampling rate not > 0") + + d = gcd(inrate, outrate) + inrate /= d + outrate /= d + + prev_i = [0] * nchannels + cur_i = [0] * nchannels + + if state is None: + d = -outrate + else: + d, samps = state + + if len(samps) != nchannels: + raise error("illegal state argument") + + prev_i, cur_i = zip(*samps) + prev_i, cur_i = list(prev_i), list(cur_i) + + q = frame_count / inrate + ceiling = (q + 1) * outrate + nbytes = ceiling * bytes_per_frame + + result = create_string_buffer(nbytes) + + samples = _get_samples(cp, size) + out_i = 0 + while True: + while d < 0: + if frame_count == 0: + samps = zip(prev_i, cur_i) + retval = result.raw + + # slice off extra bytes + trim_index = (out_i * bytes_per_frame) - len(retval) + retval = buffer(retval)[:trim_index] + + return (retval, (d, tuple(samps))) + + for chan in range(nchannels): + prev_i[chan] = cur_i[chan] + cur_i[chan] = samples.next() + + cur_i[chan] = ( + (weightA * cur_i[chan] + weightB * prev_i[chan]) + / (weightA + weightB) + ) + + frame_count -= 1 + d += outrate + + while d >= 0: + for chan in range(nchannels): + cur_o = ( + (prev_i[chan] * d + cur_i[chan] * (outrate - d)) + / outrate + ) + _put_sample(result, size, out_i, _overflow(cur_o, size)) + out_i += 1 + d -= inrate + + +def lin2ulaw(cp, size): + raise NotImplementedError() + + +def ulaw2lin(cp, size): + raise NotImplementedError() + + +def lin2alaw(cp, size): + raise NotImplementedError() + + +def alaw2lin(cp, size): + raise NotImplementedError() + + +def lin2adpcm(cp, size, state): + raise NotImplementedError() + + +def adpcm2lin(cp, size, state): + raise NotImplementedError() From noreply at buildbot.pypy.org Tue Feb 18 19:34:27 2014 From: noreply at buildbot.pypy.org (jiaaro) Date: Tue, 18 Feb 2014 19:34:27 +0100 (CET) Subject: [pypy-commit] pypy default: Merged pypy/pypy into default Message-ID: <20140218183427.C82D01C04FF@cobra.cs.uni-duesseldorf.de> Author: James Robert Branch: Changeset: r69204:98186a76751e Date: 2014-01-23 20:11 -0500 http://bitbucket.org/pypy/pypy/changeset/98186a76751e/ Log: Merged pypy/pypy into default diff --git a/pypy/interpreter/pyparser/test/test_parsestring.py b/pypy/interpreter/pyparser/test/test_parsestring.py --- a/pypy/interpreter/pyparser/test/test_parsestring.py +++ b/pypy/interpreter/pyparser/test/test_parsestring.py @@ -2,9 +2,9 @@ import py, sys class TestParsetring: - def parse_and_compare(self, literal, value): + def parse_and_compare(self, literal, value, encoding=None): space = self.space - w_ret = parsestring.parsestr(space, None, literal) + w_ret = parsestring.parsestr(space, encoding, literal) if isinstance(value, str): assert space.type(w_ret) == space.w_str assert space.str_w(w_ret) == value diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -400,16 +400,16 @@ '_PyObject_CallFunction_SizeT', '_PyObject_CallMethod_SizeT', 'PyBuffer_FromMemory', 'PyBuffer_FromReadWriteMemory', 'PyBuffer_FromObject', - 'PyBuffer_FromReadWriteObject', 'PyBuffer_New', 'PyBuffer_Type', '_Py_init_bufferobject', + 'PyBuffer_FromReadWriteObject', 'PyBuffer_New', 'PyBuffer_Type', '_Py_get_buffer_type', 'PyCObject_FromVoidPtr', 'PyCObject_FromVoidPtrAndDesc', 'PyCObject_AsVoidPtr', 'PyCObject_GetDesc', 'PyCObject_Import', 'PyCObject_SetVoidPtr', - 'PyCObject_Type', '_Py_init_pycobject', + 'PyCObject_Type', '_Py_get_cobject_type', 'PyCapsule_New', 'PyCapsule_IsValid', 'PyCapsule_GetPointer', 'PyCapsule_GetName', 'PyCapsule_GetDestructor', 'PyCapsule_GetContext', 'PyCapsule_SetPointer', 'PyCapsule_SetName', 'PyCapsule_SetDestructor', - 'PyCapsule_SetContext', 'PyCapsule_Import', 'PyCapsule_Type', '_Py_init_capsule', + 'PyCapsule_SetContext', 'PyCapsule_Import', 'PyCapsule_Type', '_Py_get_capsule_type', 'PyObject_AsReadBuffer', 'PyObject_AsWriteBuffer', 'PyObject_CheckReadBuffer', @@ -691,17 +691,25 @@ prefix = 'PyPy' else: prefix = 'cpyexttest' - init_buffer = rffi.llexternal('_%s_init_bufferobject' % prefix, [], lltype.Void, - compilation_info=eci, releasegil=False) - init_pycobject = rffi.llexternal('_%s_init_pycobject' % prefix, [], lltype.Void, - compilation_info=eci, releasegil=False) - init_capsule = rffi.llexternal('_%s_init_capsule' % prefix, [], lltype.Void, - compilation_info=eci, releasegil=False) - INIT_FUNCTIONS.extend([ - lambda space: init_buffer(), - lambda space: init_pycobject(), - lambda space: init_capsule(), - ]) + # jump through hoops to avoid releasing the GIL during initialization + # of the cpyext module. The C functions are called with no wrapper, + # but must not do anything like calling back PyType_Ready(). We + # use them just to get a pointer to the PyTypeObjects defined in C. + get_buffer_type = rffi.llexternal('_%s_get_buffer_type' % prefix, + [], PyTypeObjectPtr, + compilation_info=eci, _nowrapper=True) + get_cobject_type = rffi.llexternal('_%s_get_cobject_type' % prefix, + [], PyTypeObjectPtr, + compilation_info=eci, _nowrapper=True) + get_capsule_type = rffi.llexternal('_%s_get_capsule_type' % prefix, + [], PyTypeObjectPtr, + compilation_info=eci, _nowrapper=True) + def init_types(space): + from pypy.module.cpyext.typeobject import py_type_ready + py_type_ready(space, get_buffer_type()) + py_type_ready(space, get_cobject_type()) + py_type_ready(space, get_capsule_type()) + INIT_FUNCTIONS.append(init_types) from pypy.module.posix.interp_posix import add_fork_hook reinit_tls = rffi.llexternal('%sThread_ReInitTLS' % prefix, [], lltype.Void, compilation_info=eci) diff --git a/pypy/module/cpyext/src/bufferobject.c b/pypy/module/cpyext/src/bufferobject.c --- a/pypy/module/cpyext/src/bufferobject.c +++ b/pypy/module/cpyext/src/bufferobject.c @@ -783,9 +783,9 @@ return size; } -void _Py_init_bufferobject(void) +PyTypeObject *_Py_get_buffer_type(void) { - PyType_Ready(&PyBuffer_Type); + return &PyBuffer_Type; } static PySequenceMethods buffer_as_sequence = { diff --git a/pypy/module/cpyext/src/capsule.c b/pypy/module/cpyext/src/capsule.c --- a/pypy/module/cpyext/src/capsule.c +++ b/pypy/module/cpyext/src/capsule.c @@ -321,8 +321,7 @@ PyCapsule_Type__doc__ /*tp_doc*/ }; -void _Py_init_capsule() +PyTypeObject *_Py_get_capsule_type(void) { - PyType_Ready(&PyCapsule_Type); + return &PyCapsule_Type; } - diff --git a/pypy/module/cpyext/src/cobject.c b/pypy/module/cpyext/src/cobject.c --- a/pypy/module/cpyext/src/cobject.c +++ b/pypy/module/cpyext/src/cobject.c @@ -156,7 +156,7 @@ PyCObject_Type__doc__ /*tp_doc*/ }; -void _Py_init_pycobject() +PyTypeObject *_Py_get_cobject_type(void) { - PyType_Ready(&PyCObject_Type); + return &PyCObject_Type; } diff --git a/pypy/module/cpyext/test/test_ztranslation.py b/pypy/module/cpyext/test/test_ztranslation.py --- a/pypy/module/cpyext/test/test_ztranslation.py +++ b/pypy/module/cpyext/test/test_ztranslation.py @@ -1,4 +1,4 @@ from pypy.objspace.fake.checkmodule import checkmodule def test_cpyext_translates(): - checkmodule('cpyext') + checkmodule('cpyext', '_rawffi') diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -549,11 +549,14 @@ pto.c_tp_flags |= Py_TPFLAGS_READY return pto +def py_type_ready(space, pto): + if pto.c_tp_flags & Py_TPFLAGS_READY: + return + type_realize(space, rffi.cast(PyObject, pto)) + @cpython_api([PyTypeObjectPtr], rffi.INT_real, error=-1) def PyType_Ready(space, pto): - if pto.c_tp_flags & Py_TPFLAGS_READY: - return 0 - type_realize(space, rffi.cast(PyObject, pto)) + py_type_ready(space, pto) return 0 def type_realize(space, py_obj): diff --git a/pypy/module/pypyjit/policy.py b/pypy/module/pypyjit/policy.py --- a/pypy/module/pypyjit/policy.py +++ b/pypy/module/pypyjit/policy.py @@ -98,7 +98,8 @@ modname == '__builtin__.functional' or modname == '__builtin__.descriptor' or modname == 'thread.os_local' or - modname == 'thread.os_thread'): + modname == 'thread.os_thread' or + modname.startswith('_rawffi.alt')): return True if '.' in modname: modname, rest = modname.split('.', 1) diff --git a/pypy/module/pypyjit/test_pypy_c/test_weakref.py b/pypy/module/pypyjit/test_pypy_c/test_weakref.py --- a/pypy/module/pypyjit/test_pypy_c/test_weakref.py +++ b/pypy/module/pypyjit/test_pypy_c/test_weakref.py @@ -30,13 +30,13 @@ guard_not_invalidated(descr=...) p64 = getfield_gc(ConstPtr(ptr40), descr=) guard_value(p64, ConstPtr(ptr42), descr=...) - p65 = getfield_gc(p14, descr=) + p65 = getfield_gc(p14, descr=) guard_value(p65, ConstPtr(ptr45), descr=...) p66 = getfield_gc(p14, descr=) guard_nonnull_class(p66, ..., descr=...) p67 = force_token() setfield_gc(p0, p67, descr=) - p68 = call_may_force(ConstClass(WeakrefLifelineWithCallbacks.make_weakref_with_callback), p66, ConstPtr(ptr50), p14, ConstPtr(ptr51), descr=) + p68 = call_may_force(ConstClass(WeakrefLifelineWithCallbacks.make_weakref_with_callback), p66, ConstPtr(ptr50), p14, ConstPtr(ptr51), descr=) guard_not_forced(descr=...) guard_no_exception(descr=...) guard_nonnull_class(p68, ..., descr=...) From noreply at buildbot.pypy.org Tue Feb 18 19:34:29 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Tue, 18 Feb 2014 19:34:29 +0100 (CET) Subject: [pypy-commit] pypy default: Merged in jiaaro/pypy (pull request #204) Message-ID: <20140218183429.19DCF1C04FF@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r69205:57010784af18 Date: 2014-02-18 10:33 -0800 http://bitbucket.org/pypy/pypy/changeset/57010784af18/ Log: Merged in jiaaro/pypy (pull request #204) Implement most audioop functions diff --git a/lib_pypy/audioop.py b/lib_pypy/audioop.py --- a/lib_pypy/audioop.py +++ b/lib_pypy/audioop.py @@ -1,14 +1,20 @@ - import struct +import math +from fractions import gcd +from ctypes import create_string_buffer class error(Exception): pass +# this module redefines the names of some builtins that are used +max_ = max + + def _check_size(size): if size != 1 and size != 2 and size != 4: - raise error("Size should be 1, 2 or 4") + raise error("Size should be 1, 2 or 4") def _check_params(length, size): @@ -17,13 +23,517 @@ raise error("not a whole number of frames") +def _sample_count(cp, size): + return len(cp) / size + + +def _get_samples(cp, size, signed=True): + for i in range(_sample_count(cp, size)): + yield _get_sample(cp, size, i, signed) + + +def _struct_format(size, signed): + if size == 1: + return "b" if signed else "B" + elif size == 2: + return "h" if signed else "H" + elif size == 4: + return "i" if signed else "I" + + +def _get_sample(cp, size, i, signed=True): + fmt = _struct_format(size, signed) + start = i * size + end = start + size + return struct.unpack_from(fmt, buffer(cp)[start:end])[0] + + +def _put_sample(cp, size, i, val, signed=True): + fmt = _struct_format(size, signed) + struct.pack_into(fmt, cp, i * size, val) + + +def _get_maxval(size, signed=True): + if signed and size == 1: return 0x7f + elif size == 1: return 0xff + elif signed and size == 2: return 0x7fff + elif size == 2: return 0xffff + elif signed and size == 4: return 0x7fffffff + elif size == 4: return 0xffffffff + + +def _get_minval(size, signed=True): + if not signed: return 0 + elif size == 1: return -0x80 + elif size == 2: return -0x8000 + elif size == 4: return -0x80000000 + + +def _get_clipfn(size, signed=True): + maxval = _get_maxval(size, signed) + minval = _get_minval(size, signed) + return lambda val: max_(min(val, maxval), minval) + + +def _overflow(val, size, signed=True): + minval = _get_minval(size, signed) + maxval = _get_maxval(size, signed) + if minval <= val <= maxval: + return val + + bits = size * 8 + if signed: + offset = 2**(bits-1) + return ((val + offset) % (2**bits)) - offset + else: + return val % (2**bits) + + def getsample(cp, size, i): _check_params(len(cp), size) if not (0 <= i < len(cp) / size): raise error("Index out of range") - if size == 1: - return struct.unpack_from("B", buffer(cp)[i:])[0] - elif size == 2: - return struct.unpack_from("H", buffer(cp)[i * 2:])[0] - elif size == 4: - return struct.unpack_from("I", buffer(cp)[i * 4:])[0] + return _get_sample(cp, size, i) + + +def max(cp, size): + _check_params(len(cp), size) + + if len(cp) == 0: + return 0 + + return max_(abs(sample) for sample in _get_samples(cp, size)) + + +def minmax(cp, size): + _check_params(len(cp), size) + + max_sample, min_sample = 0, 0 + for sample in _get_samples(cp, size): + if sample > max_sample: + max_sample = sample + if sample < min_sample: + min_sample = sample + + return min_sample, max_sample + + +def avg(cp, size): + _check_params(len(cp), size) + sample_count = _sample_count(cp, size) + if sample_count == 0: + return 0 + return sum(_get_samples(cp, size)) / sample_count + + +def rms(cp, size): + _check_params(len(cp), size) + + sample_count = _sample_count(cp, size) + if sample_count == 0: + return 0 + + sum_squares = sum(sample**2 for sample in _get_samples(cp, size)) + return int(math.sqrt(sum_squares / sample_count)) + + +def _sum2(cp1, cp2, length): + size = 2 + total = 0 + for i in range(length): + total += getsample(cp1, size, i) * getsample(cp2, size, i) + return total + + +def findfit(cp1, cp2): + size = 2 + + if len(cp1) % 2 != 0 or len(cp2) % 2 != 0: + raise error("Strings should be even-sized") + + if len(cp1) < len(cp2): + raise error("First sample should be longer") + + len1 = _sample_count(cp1, size) + len2 = _sample_count(cp2, size) + + sum_ri_2 = _sum2(cp2, cp2, len2) + sum_aij_2 = _sum2(cp1, cp1, len2) + sum_aij_ri = _sum2(cp1, cp2, len2) + + result = (sum_ri_2 * sum_aij_2 - sum_aij_ri * sum_aij_ri) / sum_aij_2 + + best_result = result + best_i = 0 + + for i in range(1, len1 - len2 + 1): + aj_m1 = _get_sample(cp1, size, i - 1) + aj_lm1 = _get_sample(cp1, size, i + len2 - 1) + + sum_aij_2 += aj_lm1**2 - aj_m1**2 + sum_aij_ri = _sum2(buffer(cp1)[i*size:], cp2, len2) + + result = (sum_ri_2 * sum_aij_2 - sum_aij_ri * sum_aij_ri) / sum_aij_2 + + if result < best_result: + best_result = result + best_i = i + + factor = _sum2(buffer(cp1)[best_i*size:], cp2, len2) / sum_ri_2 + + return best_i, factor + + +def findfactor(cp1, cp2): + size = 2 + + if len(cp1) % 2 != 0: + raise error("Strings should be even-sized") + + if len(cp1) != len(cp2): + raise error("Samples should be same size") + + sample_count = _sample_count(cp1, size) + + sum_ri_2 = _sum2(cp2, cp2, sample_count) + sum_aij_ri = _sum2(cp1, cp2, sample_count) + + return sum_aij_ri / sum_ri_2 + + +def findmax(cp, len2): + size = 2 + sample_count = _sample_count(cp, size) + + if len(cp) % 2 != 0: + raise error("Strings should be even-sized") + + if len2 < 0 or sample_count < len2: + raise error("Input sample should be longer") + + if sample_count == 0: + return 0 + + result = _sum2(cp, cp, len2) + best_result = result + best_i = 0 + + for i in range(1, sample_count - len2 + 1): + sample_leaving_window = getsample(cp, size, i - 1) + sample_entering_window = getsample(cp, size, i + len2 - 1) + + result -= sample_leaving_window**2 + result += sample_entering_window**2 + + if result > best_result: + best_result = result + best_i = i + + return best_i + + +def avgpp(cp, size): + _check_params(len(cp), size) + sample_count = _sample_count(cp, size) + + prevextremevalid = False + avg = 0 + nextreme = 0 + + prevval = getsample(cp, size, 0) + val = getsample(cp, size, 1) + + prevdiff = val - prevval + + for i in range(1, sample_count): + val = getsample(cp, size, i) + diff = val - prevval + + if diff * prevdiff < 0: + if prevextremevalid: + avg += abs(prevval - prevextreme) + nextreme += 1 + + prevextremevalid = True + prevextreme = prevval + + prevval = val + if diff != 0: + prevdiff = diff + + if nextreme == 0: + return 0 + + return avg / nextreme + + +def maxpp(cp, size): + _check_params(len(cp), size) + sample_count = _sample_count(cp, size) + + prevextremevalid = False + max = 0 + + prevval = getsample(cp, size, 0) + val = getsample(cp, size, 1) + + prevdiff = val - prevval + + for i in range(1, sample_count): + val = getsample(cp, size, i) + diff = val - prevval + + if diff * prevdiff < 0: + if prevextremevalid: + extremediff = abs(prevval - prevextreme) + if extremediff > max: + max = extremediff + prevextremevalid = True + prevextreme = prevval + + prevval = val + if diff != 0: + prevdiff = diff + + return max + + +def cross(cp, size): + _check_params(len(cp), size) + + crossings = 0 + last_sample = 0 + for sample in _get_samples(cp, size): + if sample <= 0 < last_sample or sample >= 0 > last_sample: + crossings += 1 + last_sample = sample + + return crossings + + +def mul(cp, size, factor): + _check_params(len(cp), size) + clip = _get_clipfn(size) + + result = create_string_buffer(len(cp)) + + for i, sample in enumerate(_get_samples(cp, size)): + sample = clip(int(sample * factor)) + _put_sample(result, size, i, sample) + + return result.raw + + +def tomono(cp, size, fac1, fac2): + _check_params(len(cp), size) + clip = _get_clipfn(size) + + sample_count = _sample_count(cp, size) + + result = create_string_buffer(len(cp) / 2) + + for i in range(0, sample_count, 2): + l_sample = getsample(cp, size, i) + r_sample = getsample(cp, size, i + 1) + + sample = (l_sample * fac1) + (r_sample * fac2) + sample = clip(sample) + + _put_sample(result, size, i / 2, sample) + + return result.raw + + +def tostereo(cp, size, fac1, fac2): + _check_params(len(cp), size) + + sample_count = _sample_count(cp, size) + + result = create_string_buffer(len(cp) * 2) + clip = _get_clipfn(size) + + for i in range(sample_count): + sample = _get_sample(cp, size, i) + + l_sample = clip(sample * fac1) + r_sample = clip(sample * fac2) + + _put_sample(result, size, i * 2, l_sample) + _put_sample(result, size, i * 2 + 1, r_sample) + + return result.raw + + +def add(cp1, cp2, size): + _check_params(len(cp1), size) + maxval = _get_maxval(size) + + if len(cp1) != len(cp2): + raise error("Lengths should be the same") + + clip = _get_clipfn(size) + sample_count = _sample_count(cp1, size) + result = create_string_buffer(len(cp1)) + + for i in range(sample_count): + sample1 = getsample(cp1, size, i) + sample2 = getsample(cp2, size, i) + + sample = clip(sample1 + sample2) + + _put_sample(result, size, i, sample) + + return result.raw + + +def bias(cp, size, bias): + _check_params(len(cp), size) + + result = create_string_buffer(len(cp)) + + for i, sample in enumerate(_get_samples(cp, size)): + sample = _overflow(sample + bias, size) + _put_sample(result, size, i, sample) + + return result.raw + + +def reverse(cp, size): + _check_params(len(cp), size) + sample_count = _sample_count(cp, size) + + result = create_string_buffer(len(cp)) + for i, sample in enumerate(_get_samples(cp, size)): + _put_sample(result, size, sample_count - i - 1, sample) + + return result.raw + + +def lin2lin(cp, size, size2): + _check_params(len(cp), size) + _check_size(size2) + + if size == size2: + return cp + + new_len = (len(cp) / size) * size2 + + result = create_string_buffer(new_len) + + for i in range(_sample_count(cp, size)): + sample = _get_sample(cp, size, i) + if size < size2: + sample = sample << (4 * size2 / size) + elif size > size2: + sample = sample >> (4 * size / size2) + + sample = _overflow(sample, size2) + + _put_sample(result, size2, i, sample) + + return result.raw + + +def ratecv(cp, size, nchannels, inrate, outrate, state, weightA=1, weightB=0): + _check_params(len(cp), size) + if nchannels < 1: + raise error("# of channels should be >= 1") + + bytes_per_frame = size * nchannels + frame_count = len(cp) / bytes_per_frame + + if bytes_per_frame / nchannels != size: + raise OverflowError("width * nchannels too big for a C int") + + if weightA < 1 or weightB < 0: + raise error("weightA should be >= 1, weightB should be >= 0") + + if len(cp) % bytes_per_frame != 0: + raise error("not a whole number of frames") + + if inrate <= 0 or outrate <= 0: + raise error("sampling rate not > 0") + + d = gcd(inrate, outrate) + inrate /= d + outrate /= d + + prev_i = [0] * nchannels + cur_i = [0] * nchannels + + if state is None: + d = -outrate + else: + d, samps = state + + if len(samps) != nchannels: + raise error("illegal state argument") + + prev_i, cur_i = zip(*samps) + prev_i, cur_i = list(prev_i), list(cur_i) + + q = frame_count / inrate + ceiling = (q + 1) * outrate + nbytes = ceiling * bytes_per_frame + + result = create_string_buffer(nbytes) + + samples = _get_samples(cp, size) + out_i = 0 + while True: + while d < 0: + if frame_count == 0: + samps = zip(prev_i, cur_i) + retval = result.raw + + # slice off extra bytes + trim_index = (out_i * bytes_per_frame) - len(retval) + retval = buffer(retval)[:trim_index] + + return (retval, (d, tuple(samps))) + + for chan in range(nchannels): + prev_i[chan] = cur_i[chan] + cur_i[chan] = samples.next() + + cur_i[chan] = ( + (weightA * cur_i[chan] + weightB * prev_i[chan]) + / (weightA + weightB) + ) + + frame_count -= 1 + d += outrate + + while d >= 0: + for chan in range(nchannels): + cur_o = ( + (prev_i[chan] * d + cur_i[chan] * (outrate - d)) + / outrate + ) + _put_sample(result, size, out_i, _overflow(cur_o, size)) + out_i += 1 + d -= inrate + + +def lin2ulaw(cp, size): + raise NotImplementedError() + + +def ulaw2lin(cp, size): + raise NotImplementedError() + + +def lin2alaw(cp, size): + raise NotImplementedError() + + +def alaw2lin(cp, size): + raise NotImplementedError() + + +def lin2adpcm(cp, size, state): + raise NotImplementedError() + + +def adpcm2lin(cp, size, state): + raise NotImplementedError() From noreply at buildbot.pypy.org Tue Feb 18 19:38:42 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Tue, 18 Feb 2014 19:38:42 +0100 (CET) Subject: [pypy-commit] pypy default: Some cleanup to audioop Message-ID: <20140218183842.E685E1C04FF@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r69206:84efb3ba05f1 Date: 2014-02-18 10:38 -0800 http://bitbucket.org/pypy/pypy/changeset/84efb3ba05f1/ Log: Some cleanup to audioop diff --git a/lib_pypy/audioop.py b/lib_pypy/audioop.py --- a/lib_pypy/audioop.py +++ b/lib_pypy/audioop.py @@ -1,5 +1,6 @@ +import __builtin__ +import math import struct -import math from fractions import gcd from ctypes import create_string_buffer @@ -8,10 +9,6 @@ pass -# this module redefines the names of some builtins that are used -max_ = max - - def _check_size(size): if size != 1 and size != 2 and size != 4: raise error("Size should be 1, 2 or 4") @@ -54,25 +51,35 @@ def _get_maxval(size, signed=True): - if signed and size == 1: return 0x7f - elif size == 1: return 0xff - elif signed and size == 2: return 0x7fff - elif size == 2: return 0xffff - elif signed and size == 4: return 0x7fffffff - elif size == 4: return 0xffffffff + if signed and size == 1: + return 0x7f + elif size == 1: + return 0xff + elif signed and size == 2: + return 0x7fff + elif size == 2: + return 0xffff + elif signed and size == 4: + return 0x7fffffff + elif size == 4: + return 0xffffffff def _get_minval(size, signed=True): - if not signed: return 0 - elif size == 1: return -0x80 - elif size == 2: return -0x8000 - elif size == 4: return -0x80000000 + if not signed: + return 0 + elif size == 1: + return -0x80 + elif size == 2: + return -0x8000 + elif size == 4: + return -0x80000000 def _get_clipfn(size, signed=True): maxval = _get_maxval(size, signed) minval = _get_minval(size, signed) - return lambda val: max_(min(val, maxval), minval) + return lambda val: __builtin__.max(min(val, maxval), minval) def _overflow(val, size, signed=True): @@ -102,7 +109,7 @@ if len(cp) == 0: return 0 - return max_(abs(sample) for sample in _get_samples(cp, size)) + return __builtin__.max(abs(sample) for sample in _get_samples(cp, size)) def minmax(cp, size): @@ -110,10 +117,8 @@ max_sample, min_sample = 0, 0 for sample in _get_samples(cp, size): - if sample > max_sample: - max_sample = sample - if sample < min_sample: - min_sample = sample + max_sample = __builtin__.max(sample, max_sample) + min_sample = __builtin__.min(sample, min_sample) return min_sample, max_sample @@ -237,6 +242,7 @@ sample_count = _sample_count(cp, size) prevextremevalid = False + prevextreme = None avg = 0 nextreme = 0 @@ -272,6 +278,7 @@ sample_count = _sample_count(cp, size) prevextremevalid = False + prevextreme = None max = 0 prevval = getsample(cp, size, 0) @@ -366,7 +373,6 @@ def add(cp1, cp2, size): _check_params(len(cp1), size) - maxval = _get_maxval(size) if len(cp1) != len(cp2): raise error("Lengths should be the same") From noreply at buildbot.pypy.org Tue Feb 18 20:00:08 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 18 Feb 2014 20:00:08 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: Comments Message-ID: <20140218190008.0AF801C15BA@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r770:a55008e93bb1 Date: 2014-02-18 19:59 +0100 http://bitbucket.org/pypy/stmgc/changeset/a55008e93bb1/ Log: Comments diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -257,6 +257,9 @@ STM_PSEGMENT->safe_point = SP_NO_TRANSACTION; STM_PSEGMENT->transaction_state = TS_NONE; + /* we did cond_broadcast() above already, in + try_wait_for_other_safe_points(). It may wake up + other threads in cond_wait() for a free segment. */ mutex_unlock(); } diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -243,7 +243,7 @@ static void align_nursery_at_transaction_start(void) { - /* When the transaction start, we must align the 'nursery_current' + /* When the transaction starts, we must align the 'nursery_current' and set creation markers for the part of the section the follows. */ uintptr_t c = (uintptr_t)STM_SEGMENT->nursery_current; diff --git a/c7/stm/sync.c b/c7/stm/sync.c --- a/c7/stm/sync.c +++ b/c7/stm/sync.c @@ -227,6 +227,13 @@ if (i == STM_SEGMENT->segment_num) continue; /* ignore myself */ + /* If the other thread is SP_NO_TRANSACTION, then it can be + ignored here: as long as we have the mutex, it will remain + SP_NO_TRANSACTION. If it is already at a suitable safe point, + it must be in a cond_wait(), so it will not resume as long + as we hold the mutex. Thus the only cases is if it is + SP_RUNNING, or at the wrong kind of safe point. + */ struct stm_priv_segment_info_s *other_pseg = get_priv_segment(i); if (other_pseg->safe_point == SP_RUNNING || (requested_safe_point_kind == SP_SAFE_POINT_CAN_COLLECT && From noreply at buildbot.pypy.org Wed Feb 19 06:43:33 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 19 Feb 2014 06:43:33 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: Adapt demo2.c. Message-ID: <20140219054333.E3C6A1C3360@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r771:c07b26b32fb1 Date: 2014-02-19 06:43 +0100 http://bitbucket.org/pypy/stmgc/changeset/c07b26b32fb1/ Log: Adapt demo2.c. diff --git a/c7/Makefile b/c7/Makefile --- a/c7/Makefile +++ b/c7/Makefile @@ -14,16 +14,14 @@ rm -f $(BUILD_EXE) $(DEBUG_EXE) $(RELEASE_EXE) -H_FILES = core.h list.h pagecopy.h reader_writer_lock.h stmsync.h pages.h nursery.h largemalloc.h +H_FILES = stmgc.h stm/*.h +C_FILES = stmgc.c stm/*.c -C_FILES = core.c list.c pagecopy.c reader_writer_lock.c stmsync.c pages.c nursery.c largemalloc.c -DEBUG = -g - - -# note that we don't say -DNDEBUG, so that asserts should still be compiled in -# also, all debug code with extra checks but not the debugprints -build-%: %.c ${H_FILES} ${C_FILES} - clang -pthread -g -O1 $< -o build-$* -Wall ${C_FILES} -release-%: %.c ${H_FILES} ${C_FILES} - clang -pthread -g -DNDEBUG -O2 $< -o release-$* -Wall ${C_FILES} +# note that 'build' is optimized but still contains all asserts +debug-%: %.c ${H_FILES} ${C_FILES} + clang -pthread -g $< -o debug-$* -Wall -Werror stmgc.c +build-%: %.c ${H_FILES} ${C_FILES} + clang -pthread -g -O1 $< -o build-$* -Wall stmgc.c +release-%: %.c ${H_FILES} ${C_FILES} + clang -pthread -g -DNDEBUG -O2 $< -o release-$* -Wall stmgc.c diff --git a/c7/demo2.c b/c7/demo2.c --- a/c7/demo2.c +++ b/c7/demo2.c @@ -4,8 +4,7 @@ #include #include -#include "core.h" -#include "stmsync.h" +#include "stmgc.h" #define LIST_LENGTH 6000 #define BUNCH 400 @@ -20,8 +19,13 @@ nodeptr_t next; }; +__thread stm_thread_local_t stm_thread_local; -size_t stmcb_size(struct object_s *ob) +#define PUSH_ROOT(p) (void)0 // XXX... +#define POP_ROOT(p) (void)0 // XXX... + + +ssize_t stmcb_size_rounded_up(struct object_s *ob) { return sizeof(struct node_s); } @@ -34,97 +38,88 @@ } -nodeptr_t global_chained_list = NULL; +nodeptr_t global_chained_list; -long check_sorted() +long check_sorted(void) { nodeptr_t r_n; long prev, sum; - jmpbufptr_t here; + stm_jmpbuf_t here; - back: - if (__builtin_setjmp(here) == 0) { - stm_start_transaction(&here); - - stm_read((objptr_t)global_chained_list); - r_n = global_chained_list; - assert(r_n->value == -1); - - prev = -1; - sum = 0; - while (r_n->next) { - r_n = r_n->next; - stm_read((objptr_t)r_n); - sum += r_n->value; + STM_START_TRANSACTION(&stm_thread_local, here); - _stm_start_safe_point(0); - _stm_stop_safe_point(0); - if (prev >= r_n->value) { - stm_stop_transaction(); - return -1; - } - - prev = r_n->value; + stm_read((objptr_t)global_chained_list); + r_n = global_chained_list; + assert(r_n->value == -1); + + prev = -1; + sum = 0; + while (r_n->next) { + r_n = r_n->next; + stm_read((objptr_t)r_n); + sum += r_n->value; + + stm_safe_point(); + if (prev >= r_n->value) { + stm_commit_transaction(); + return -1; } - stm_stop_transaction(); - return sum; + prev = r_n->value; } - goto back; + + stm_commit_transaction(); + return sum; } nodeptr_t swap_nodes(nodeptr_t initial) { - jmpbufptr_t here; + stm_jmpbuf_t here; assert(initial != NULL); - back: - if (__builtin_setjmp(here) == 0) { - stm_start_transaction(&here); - nodeptr_t prev = initial; - stm_read((objptr_t)prev); - - int i; - for (i=0; inext; - if (current == NULL) { - stm_stop_transaction(); - return NULL; - } - stm_read((objptr_t)current); - nodeptr_t next = current->next; - if (next == NULL) { - stm_stop_transaction(); - return NULL; - } - stm_read((objptr_t)next); - - if (next->value < current->value) { - stm_write((objptr_t)prev); - stm_write((objptr_t)current); - stm_write((objptr_t)next); - - prev->next = next; - current->next = next->next; - next->next = current; - _stm_start_safe_point(0); - _stm_stop_safe_point(0); - } - prev = current; + STM_START_TRANSACTION(&stm_thread_local, here); + + nodeptr_t prev = initial; + stm_read((objptr_t)prev); + + int i; + for (i=0; inext; + if (current == NULL) { + stm_commit_transaction(); + return NULL; } + stm_read((objptr_t)current); + nodeptr_t next = current->next; + if (next == NULL) { + stm_commit_transaction(); + return NULL; + } + stm_read((objptr_t)next); - stm_stop_transaction(); - return prev; + if (next->value < current->value) { + stm_write((objptr_t)prev); + stm_write((objptr_t)current); + stm_write((objptr_t)next); + + prev->next = next; + current->next = next->next; + next->next = current; + + stm_safe_point(); + } + prev = current; } - goto back; + + stm_commit_transaction(); + return prev; } - -void bubble_run() +void bubble_run(void) { nodeptr_t r_current; @@ -136,25 +131,25 @@ /* initialize list with values in decreasing order */ -void setup_list() +void setup_list(void) { int i; nodeptr_t w_newnode, w_prev; - stm_start_transaction(NULL); + stm_start_inevitable_transaction(&stm_thread_local); global_chained_list = (nodeptr_t)stm_allocate(sizeof(struct node_s)); global_chained_list->value = -1; global_chained_list->next = NULL; - - stm_push_root((objptr_t)global_chained_list); - + + PUSH_ROOT(global_chained_list); + w_prev = global_chained_list; for (i = 0; i < LIST_LENGTH; i++) { - stm_push_root((objptr_t)w_prev); + PUSH_ROOT(w_prev); w_newnode = (nodeptr_t)stm_allocate(sizeof(struct node_s)); - - w_prev = (nodeptr_t)stm_pop_root(); + + POP_ROOT(w_prev); w_newnode->value = LIST_LENGTH - i; w_newnode->next = NULL; @@ -163,44 +158,31 @@ w_prev = w_newnode; } - _stm_minor_collect(); /* hack.. */ - global_chained_list = (nodeptr_t)stm_pop_root(); - - stm_stop_transaction(); + //_stm_minor_collect(); /* hack.. */ + //POP_ROOT(global_chained_list); --- remains in the shadowstack - - + stm_commit_transaction(); + + printf("setup ok\n"); } static sem_t done; -static sem_t go; -static sem_t initialized; void *demo2(void *arg) { - int status; - if (arg != NULL) { - /* we still need to initialize */ - stm_setup_pthread(); - sem_post(&initialized); - status = sem_wait(&go); - assert(status == 0); - } - + stm_register_thread_local(&stm_thread_local); + while (check_sorted() == -1) { bubble_run(); } - if (arg != NULL) { - status = sem_post(&done); - assert(status == 0); - stm_teardown_pthread(); - } - + assert(stm_thread_local.shadowstack == stm_thread_local.shadowstack_base); + stm_unregister_thread_local(&stm_thread_local); + status = sem_post(&done); assert(status == 0); return NULL; } @@ -236,32 +218,22 @@ { int status; - status = sem_init(&initialized, 0, 0); - assert(status == 0); - status = sem_init(&go, 0, 0); - assert(status == 0); - + status = sem_init(&done, 0, 0); assert(status == 0); + stm_setup(); - stm_setup_pthread(); - - newthread(demo2, (void*)1); - - status = sem_wait(&initialized); - assert(status == 0); + stm_register_thread_local(&stm_thread_local); setup_list(); - status = sem_post(&go); - assert(status == 0); - - demo2(NULL); - - status = sem_wait(&done); - assert(status == 0); - + newthread(demo2, (void*)1); + newthread(demo2, (void*)2); + + status = sem_wait(&done); assert(status == 0); + status = sem_wait(&done); assert(status == 0); + final_check(); - stm_teardown_pthread(); + stm_unregister_thread_local(&stm_thread_local); stm_teardown(); return 0; diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -199,7 +199,7 @@ /* may collect! */ STM_SEGMENT->nursery_current -= size_rounded_up; /* restore correct val */ - if (collectable_safe_point()) + if (_stm_collectable_safe_point()) return (stm_char *)stm_allocate(size_rounded_up); if (size_rounded_up < MEDIUM_OBJECT) { diff --git a/c7/stm/nursery.h b/c7/stm/nursery.h --- a/c7/stm/nursery.h +++ b/c7/stm/nursery.h @@ -3,6 +3,10 @@ #define NSE_SIGNAL 1 #define NSE_SIGNAL_DONE 2 +#if _STM_NSE_SIGNAL != NSE_SIGNAL +# error "adapt _STM_NSE_SIGNAL" +#endif + /* Rules for 'v_nursery_section_end': - Its main purpose is to be read by the owning thread in stm_allocate(). diff --git a/c7/stm/sync.c b/c7/stm/sync.c --- a/c7/stm/sync.c +++ b/c7/stm/sync.c @@ -266,7 +266,7 @@ return true; } -static bool collectable_safe_point(void) +bool _stm_collectable_safe_point(void) { bool any_operation = false; restart:; diff --git a/c7/stm/sync.h b/c7/stm/sync.h --- a/c7/stm/sync.h +++ b/c7/stm/sync.h @@ -17,4 +17,3 @@ /* see the source for an exact description */ static bool try_wait_for_other_safe_points(int requested_safe_point_kind); -static bool collectable_safe_point(void); diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -82,6 +82,7 @@ stm_char *_stm_allocate_slowpath(ssize_t); void _stm_become_inevitable(char*); void _stm_start_transaction(stm_thread_local_t *, stm_jmpbuf_t *); +bool _stm_collectable_safe_point(void); #ifdef STM_TESTS bool _stm_was_read(object_t *obj); @@ -100,6 +101,7 @@ #endif #define _STM_GCFLAG_WRITE_BARRIER_CALLED 0x80 +#define _STM_NSE_SIGNAL 1 #define STM_FLAGS_PREBUILT 0 @@ -208,13 +210,13 @@ void stm_register_thread_local(stm_thread_local_t *tl); void stm_unregister_thread_local(stm_thread_local_t *tl); -/* Starting and ending transactions. You should only call stm_read(), - stm_write() and stm_allocate() from within a transaction. Use - the macro STM_START_TRANSACTION() to start a transaction that +/* Starting and ending transactions. stm_read(), stm_write() and + stm_allocate() should only be called from within a transaction. + Use the macro STM_START_TRANSACTION() to start a transaction that can be restarted using the 'jmpbuf' (a local variable of type stm_jmpbuf_t). */ #define STM_START_TRANSACTION(tl, jmpbuf) ({ \ - int _restart = __builtin_setjmp(&jmpbuf); \ + int _restart = __builtin_setjmp(jmpbuf); \ _stm_start_transaction(tl, &jmpbuf); \ _restart; \ }) @@ -239,6 +241,13 @@ _stm_become_inevitable(msg); } +/* Forces a safe-point if needed. Normally not needed: this is + automatic if you call stm_allocate(). */ +static inline void stm_safe_point(void) { + if (STM_SEGMENT->v_nursery_section_end == _STM_NSE_SIGNAL) + _stm_collectable_safe_point(); +} + /* ==================== END ==================== */ From noreply at buildbot.pypy.org Wed Feb 19 07:12:15 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 19 Feb 2014 07:12:15 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: _pages_privatize() with count > 1 Message-ID: <20140219061215.B8B261C0282@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r772:902061e5b47e Date: 2014-02-19 07:12 +0100 http://bitbucket.org/pypy/stmgc/changeset/902061e5b47e/ Log: _pages_privatize() with count > 1 diff --git a/c7/stm/pages.c b/c7/stm/pages.c --- a/c7/stm/pages.c +++ b/c7/stm/pages.c @@ -25,37 +25,8 @@ } } -static void _pages_privatize(uintptr_t pagenum, uintptr_t count) +static void privatize_range_and_unlock(uintptr_t pagenum, uintptr_t count) { - assert(count == 1); /* XXX */ - -#ifdef HAVE_FULL_EXCHANGE_INSN - /* use __sync_lock_test_and_set() as a cheaper alternative to - __sync_bool_compare_and_swap(). */ - int previous = __sync_lock_test_and_set(&flag_page_private[pagenum], - REMAPPING_PAGE); - assert(previous != FREE_PAGE); - if (previous == PRIVATE_PAGE) { - flag_page_private[pagenum] = PRIVATE_PAGE; - return; - } - bool was_shared = (previous == SHARED_PAGE); -#else - bool was_shared = __sync_bool_compare_and_swap(&flag_page_private[pagenum], - SHARED_PAGE, REMAPPING_PAGE); -#endif - if (!was_shared) { - while (1) { - uint8_t state = ((uint8_t volatile *)flag_page_private)[pagenum]; - if (state != REMAPPING_PAGE) { - assert(state == PRIVATE_PAGE); - break; - } - spin_loop(); - } - return; - } - ssize_t pgoff1 = pagenum; ssize_t pgoff2 = pagenum + NB_PAGES; ssize_t localpgoff = pgoff1 + NB_PAGES * STM_SEGMENT->segment_num; @@ -64,18 +35,71 @@ void *localpg = stm_object_pages + localpgoff * 4096UL; void *otherpg = stm_object_pages + otherpgoff * 4096UL; - // XXX should not use pgoff2, but instead the next unused page in - // thread 2, so that after major GCs the next dirty pages are the - // same as the old ones - int res = remap_file_pages(localpg, 4096, 0, pgoff2, 0); + int res = remap_file_pages(localpg, count * 4096, 0, pgoff2, 0); if (res < 0) { perror("remap_file_pages"); abort(); } - pagecopy(localpg, otherpg); + uintptr_t i; + for (i = 0; i < count; i++) { + pagecopy(localpg + 4096 * i, otherpg + 4096 * i); + } write_fence(); - assert(flag_page_private[pagenum] == REMAPPING_PAGE); - flag_page_private[pagenum] = PRIVATE_PAGE; + for (i = 0; i < count; i++) { + assert(flag_page_private[pagenum + i] == REMAPPING_PAGE); + flag_page_private[pagenum + i] = PRIVATE_PAGE; + } +} + +static void _pages_privatize(uintptr_t pagenum, uintptr_t count) +{ + uintptr_t page_start_range = pagenum; + uintptr_t pagestop = pagenum + count; + + while (flag_page_private[pagenum + count - 1] == PRIVATE_PAGE) { + if (!--count) + return; + } + + for (; pagenum < pagestop; pagenum++) { +#ifdef HAVE_FULL_EXCHANGE_INSN + /* use __sync_lock_test_and_set() as a cheaper alternative to + __sync_bool_compare_and_swap(). */ + int prev = __sync_lock_test_and_set(&flag_page_private[pagenum], + REMAPPING_PAGE); + assert(prev != FREE_PAGE); + if (prev == PRIVATE_PAGE) { + flag_page_private[pagenum] = PRIVATE_PAGE; + } + bool was_shared = (prev == SHARED_PAGE); +#else + bool was_shared = __sync_bool_compare_and_swap( + &flag_page_private[pagenum + cnt1], + SHARED_PAGE, REMAPPING_PAGE); +#endif + if (!was_shared) { + if (pagenum > page_start_range) { + privatize_range_and_unlock(page_start_range, + pagenum - page_start_range); + } + page_start_range = pagenum + 1; + + while (1) { + uint8_t state; + state = ((uint8_t volatile *)flag_page_private)[pagenum]; + if (state != REMAPPING_PAGE) { + assert(state == PRIVATE_PAGE); + break; + } + spin_loop(); + } + } + } + + if (pagenum > page_start_range) { + privatize_range_and_unlock(page_start_range, + pagenum - page_start_range); + } } static void set_creation_markers(stm_char *p, uint64_t size, int newvalue) diff --git a/c7/stm/pages.h b/c7/stm/pages.h --- a/c7/stm/pages.h +++ b/c7/stm/pages.h @@ -22,9 +22,10 @@ static void pages_initialize_shared(uintptr_t pagenum, uintptr_t count); inline static void pages_privatize(uintptr_t pagenum, uintptr_t count) { - while (flag_page_private[pagenum + count - 1] == PRIVATE_PAGE) { + while (flag_page_private[pagenum] == PRIVATE_PAGE) { if (!--count) return; + pagenum++; } _pages_privatize(pagenum, count); } diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -320,13 +320,13 @@ stm_read(obj) return lib._get_ptr(obj, idx) -def stm_set_char(obj, c): +def stm_set_char(obj, c, offset=HDR): stm_write(obj) - stm_get_real_address(obj)[HDR] = c + stm_get_real_address(obj)[offset] = c -def stm_get_char(obj): +def stm_get_char(obj, offset=HDR): stm_read(obj) - return stm_get_real_address(obj)[HDR] + return stm_get_real_address(obj)[offset] def stm_get_real_address(obj): return lib._stm_real_address(ffi.cast('object_t*', obj)) diff --git a/c7/test/test_basic.py b/c7/test/test_basic.py --- a/c7/test/test_basic.py +++ b/c7/test/test_basic.py @@ -158,7 +158,6 @@ # visible from the other thread. self.start_transaction() lpx = stm_allocate(16) - print lpx stm_set_char(lpx, '.') self.commit_transaction() @@ -168,9 +167,7 @@ self.start_transaction() lpy = stm_allocate(16) - print lpy stm_set_char(lpy, 'y') - print "LAST COMMIT" self.commit_transaction() self.switch(1) @@ -379,6 +376,32 @@ self.start_transaction() assert stm_get_char(lp1) == 'b' + def test_object_on_two_pages(self): + self.start_transaction() + lp1 = stm_allocate(4104) + stm_set_char(lp1, '0') + stm_set_char(lp1, '1', offset=4103) + self.commit_transaction() + # + self.start_transaction() + stm_set_char(lp1, 'a') + stm_set_char(lp1, 'b', offset=4103) + # + self.switch(1) + self.start_transaction() + assert stm_get_char(lp1) == '0' + assert stm_get_char(lp1, offset=4103) == '1' + self.commit_transaction() + # + self.switch(0) + self.commit_transaction() + # + self.switch(1) + self.start_transaction() + assert stm_get_char(lp1) == 'a' + assert stm_get_char(lp1, offset=4103) == 'b' + self.commit_transaction() + # def test_resolve_write_write_no_conflict(self): # self.start_transaction() # p1 = stm_allocate(16) From noreply at buildbot.pypy.org Wed Feb 19 07:19:32 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 19 Feb 2014 07:19:32 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: Move the demo2.c in the new "demo" subdirectory. It works :-) Message-ID: <20140219061932.E71F91C0282@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r773:5f576a000c84 Date: 2014-02-19 07:18 +0100 http://bitbucket.org/pypy/stmgc/changeset/5f576a000c84/ Log: Move the demo2.c in the new "demo" subdirectory. It works :-) diff --git a/c7/Makefile b/c7/demo/Makefile rename from c7/Makefile rename to c7/demo/Makefile --- a/c7/Makefile +++ b/c7/demo/Makefile @@ -14,14 +14,14 @@ rm -f $(BUILD_EXE) $(DEBUG_EXE) $(RELEASE_EXE) -H_FILES = stmgc.h stm/*.h -C_FILES = stmgc.c stm/*.c +H_FILES = ../stmgc.h ../stm/*.h +C_FILES = ../stmgc.c ../stm/*.c # note that 'build' is optimized but still contains all asserts debug-%: %.c ${H_FILES} ${C_FILES} - clang -pthread -g $< -o debug-$* -Wall -Werror stmgc.c + clang -I.. -pthread -g $< -o debug-$* -Wall -Werror ../stmgc.c build-%: %.c ${H_FILES} ${C_FILES} - clang -pthread -g -O1 $< -o build-$* -Wall stmgc.c + clang -I.. -pthread -g -O1 $< -o build-$* -Wall ../stmgc.c release-%: %.c ${H_FILES} ${C_FILES} - clang -pthread -g -DNDEBUG -O2 $< -o release-$* -Wall stmgc.c + clang -I.. -pthread -g -DNDEBUG -O2 $< -o release-$* -Wall ../stmgc.c diff --git a/c7/demo2.c b/c7/demo/demo2.c rename from c7/demo2.c rename to c7/demo/demo2.c From noreply at buildbot.pypy.org Wed Feb 19 07:36:15 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 19 Feb 2014 07:36:15 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: Workaround for llvm bug. :-/ Message-ID: <20140219063615.EC24E1C0282@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r774:8837b8f858b1 Date: 2014-02-19 07:36 +0100 http://bitbucket.org/pypy/stmgc/changeset/8837b8f858b1/ Log: Workaround for llvm bug. :-/ diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -119,3 +119,7 @@ static void teardown_core(void); static void abort_with_mutex(void) __attribute__((noreturn)); + +static inline void _duck(void) { + asm("/* workaround for llvm bug */"); +} diff --git a/c7/stm/pages.c b/c7/stm/pages.c --- a/c7/stm/pages.c +++ b/c7/stm/pages.c @@ -189,7 +189,7 @@ dataofs += 256; } } - p[0] = 0; + p[0] = 0; _duck(); p[1] = 0; p += 2; } From noreply at buildbot.pypy.org Wed Feb 19 07:43:03 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 19 Feb 2014 07:43:03 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: Comment Message-ID: <20140219064303.4F5A01C0282@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r775:e39e716e910a Date: 2014-02-19 07:42 +0100 http://bitbucket.org/pypy/stmgc/changeset/e39e716e910a/ Log: Comment diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -121,5 +121,8 @@ static void abort_with_mutex(void) __attribute__((noreturn)); static inline void _duck(void) { + /* put a call to _duck() between two instructions that set 0 into + a %gs-prefixed address and that may otherwise be replaced with + llvm.memset --- it fails later because of the prefix... */ asm("/* workaround for llvm bug */"); } From noreply at buildbot.pypy.org Wed Feb 19 09:05:10 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 19 Feb 2014 09:05:10 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: Silence a specific warning in release builds: 'unused function xyz' Message-ID: <20140219080510.AB21E1C0282@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r776:f5c7556d9bbd Date: 2014-02-19 09:04 +0100 http://bitbucket.org/pypy/stmgc/changeset/f5c7556d9bbd/ Log: Silence a specific warning in release builds: 'unused function xyz' diff --git a/c7/demo/Makefile b/c7/demo/Makefile --- a/c7/demo/Makefile +++ b/c7/demo/Makefile @@ -24,4 +24,5 @@ build-%: %.c ${H_FILES} ${C_FILES} clang -I.. -pthread -g -O1 $< -o build-$* -Wall ../stmgc.c release-%: %.c ${H_FILES} ${C_FILES} - clang -I.. -pthread -g -DNDEBUG -O2 $< -o release-$* -Wall ../stmgc.c + clang -I.. -pthread -g -DNDEBUG -O2 $< -o release-$* \ + -Wall -Wno-unused-function ../stmgc.c From noreply at buildbot.pypy.org Wed Feb 19 09:45:04 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 19 Feb 2014 09:45:04 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: Add an LLVM patch. It might not be needed on simple examples, but I Message-ID: <20140219084504.ED2461C358C@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r777:b5c61f587a33 Date: 2014-02-19 09:44 +0100 http://bitbucket.org/pypy/stmgc/changeset/b5c61f587a33/ Log: Add an LLVM patch. It might not be needed on simple examples, but I fear that it will be needed to compile large programs. diff --git a/c7/llvmfix/no-memset-creation-with-addrspace.diff b/c7/llvmfix/no-memset-creation-with-addrspace.diff new file mode 100644 --- /dev/null +++ b/c7/llvmfix/no-memset-creation-with-addrspace.diff @@ -0,0 +1,16 @@ +Index: lib/Transforms/Scalar/MemCpyOptimizer.cpp +=================================================================== +--- lib/Transforms/Scalar/MemCpyOptimizer.cpp (revision 201645) ++++ lib/Transforms/Scalar/MemCpyOptimizer.cpp (working copy) +@@ -368,6 +368,11 @@ + Value *StartPtr, Value *ByteVal) { + if (TD == 0) return 0; + ++ // We have to check for address space < 256, since llvm.memset only supports ++ // user defined address spaces. ++ if (cast(StartPtr->getType())->getAddressSpace() >= 256) ++ return 0; ++ + // Okay, so we now have a single store that can be splatable. Scan to find + // all subsequent stores of the same value to offset from the same pointer. + // Join these together into ranges, so we can decide whether contiguous blocks diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -123,6 +123,8 @@ static inline void _duck(void) { /* put a call to _duck() between two instructions that set 0 into a %gs-prefixed address and that may otherwise be replaced with - llvm.memset --- it fails later because of the prefix... */ + llvm.memset --- it fails later because of the prefix... + This is not needed any more after applying the patch + llvmfix/no-memset-creation-with-addrspace.diff. */ asm("/* workaround for llvm bug */"); } From noreply at buildbot.pypy.org Wed Feb 19 10:41:31 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 19 Feb 2014 10:41:31 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: In case of abort, restore explicitly the position of the shadowstack. Message-ID: <20140219094131.7B0A21C1041@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r778:44b81083d7cf Date: 2014-02-19 10:41 +0100 http://bitbucket.org/pypy/stmgc/changeset/44b81083d7cf/ Log: In case of abort, restore explicitly the position of the shadowstack. diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -119,6 +119,7 @@ STM_PSEGMENT->transaction_state = (jmpbuf != NULL ? TS_REGULAR : TS_INEVITABLE); STM_SEGMENT->jmpbuf_ptr = jmpbuf; + STM_PSEGMENT->shadowstack_at_start_of_transaction = tl->shadowstack; mutex_unlock(); @@ -330,6 +331,7 @@ stm_jmpbuf_t *jmpbuf_ptr = STM_SEGMENT->jmpbuf_ptr; stm_thread_local_t *tl = STM_SEGMENT->running_thread; + tl->shadowstack = STM_PSEGMENT->shadowstack_at_start_of_transaction; release_thread_segment(tl); STM_PSEGMENT->safe_point = SP_NO_TRANSACTION; STM_PSEGMENT->transaction_state = TS_NONE; diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -63,6 +63,7 @@ uint8_t safe_point; /* one of the SP_xxx constants */ uint8_t transaction_state; /* one of the TS_xxx constants */ uintptr_t real_nursery_section_end; + object_t **shadowstack_at_start_of_transaction; }; enum { From noreply at buildbot.pypy.org Wed Feb 19 10:44:42 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 19 Feb 2014 10:44:42 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: A test for 44b81083d7cf. Message-ID: <20140219094442.91DBD1C033D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r779:77f7aa36c2c7 Date: 2014-02-19 10:44 +0100 http://bitbucket.org/pypy/stmgc/changeset/77f7aa36c2c7/ Log: A test for 44b81083d7cf. diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -291,6 +291,9 @@ class Conflict(Exception): pass +class EmptyStack(Exception): + pass + def is_in_nursery(o): return lib._stm_in_nursery(o) @@ -464,6 +467,8 @@ def pop_root(self): tl = self.tls[self.current_thread] curlength = tl.shadowstack - tl.shadowstack_base + if curlength == 0: + raise EmptyStack assert 0 < curlength <= SHADOWSTACK_LENGTH tl.shadowstack -= 1 return ffi.cast("object_t *", tl.shadowstack[0]) diff --git a/c7/test/test_basic.py b/c7/test/test_basic.py --- a/c7/test/test_basic.py +++ b/c7/test/test_basic.py @@ -402,6 +402,12 @@ assert stm_get_char(lp1, offset=4103) == 'b' self.commit_transaction() + def test_abort_restores_shadowstack(self): + self.start_transaction() + self.push_root(ffi.cast("object_t *", 0)) + self.abort_transaction() + py.test.raises(EmptyStack, self.pop_root) + # def test_resolve_write_write_no_conflict(self): # self.start_transaction() # p1 = stm_allocate(16) From noreply at buildbot.pypy.org Wed Feb 19 11:23:10 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 19 Feb 2014 11:23:10 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: Make and run "demo2" in a test. Message-ID: <20140219102310.1EA971D257B@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r780:dde898021541 Date: 2014-02-19 11:22 +0100 http://bitbucket.org/pypy/stmgc/changeset/dde898021541/ Log: Make and run "demo2" in a test. diff --git a/c7/test/test_demo.py b/c7/test/test_demo.py new file mode 100644 --- /dev/null +++ b/c7/test/test_demo.py @@ -0,0 +1,18 @@ +import py +import os + + +class TestDemo: + + def _do(self, cmd): + print cmd + err = os.system(cmd) + if err: py.test.fail("'%s' failed (result %r)" % (cmd, err)) + + def make_and_run(self, target): + self._do("make -C ../demo %s" % target) + self._do("../demo/%s" % target) + + def test_demo2_debug(self): self.make_and_run("debug-demo2") + def test_demo2_build(self): self.make_and_run("build-demo2") + def test_demo2_release(self): self.make_and_run("release-demo2") From noreply at buildbot.pypy.org Wed Feb 19 11:28:15 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Wed, 19 Feb 2014 11:28:15 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: more info in test_random output Message-ID: <20140219102816.00DC81D2547@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7-refactor Changeset: r781:0fa13ec9be0d Date: 2014-02-19 11:28 +0100 http://bitbucket.org/pypy/stmgc/changeset/0fa13ec9be0d/ Log: more info in test_random output diff --git a/c7/test/test_random.py b/c7/test/test_random.py --- a/c7/test/test_random.py +++ b/c7/test/test_random.py @@ -19,17 +19,17 @@ return "lp%d" % _root_numbering _global_time = 0 -def contention_management(our_trs, other_trs, wait=False): +def contention_management(our_trs, other_trs, wait=False, objs_in_conflict=None): if other_trs.start_time < our_trs.start_time: pass else: - other_trs.must_abort = True + other_trs.set_must_abort(objs_in_conflict) - if not other_trs.must_abort: - our_trs.must_abort = True + if not other_trs.check_must_abort(): + our_trs.set_must_abort(objs_in_conflict) elif wait: # abort anyway: - our_trs.must_abort = True + our_trs.set_must_abort(objs_in_conflict) class TransactionState(object): @@ -38,9 +38,18 @@ self.read_set = set() self.write_set = set() self.values = {} - self.must_abort = False + self._must_abort = False self.start_time = start_time + self.objs_in_conflict = set() + def set_must_abort(self, objs_in_conflict=None): + if objs_in_conflict is not None: + self.objs_in_conflict |= objs_in_conflict + self._must_abort = True + + def check_must_abort(self): + return self._must_abort + def has_conflict_with(self, committed): return bool(self.read_set & committed.write_set) @@ -53,8 +62,9 @@ self.values.update(committed.values) if self.has_conflict_with(committed): - contention_management(self, committed) - return self.must_abort + contention_management(self, committed, + objs_in_conflict=self.read_set & committed.write_set) + return self.check_must_abort() def read_root(self, r): self.read_set.add(r) @@ -137,7 +147,7 @@ trs = self.transaction_state gtrs = self.global_state.committed_transaction_state self.global_state.check_for_write_read_conflicts(trs) - conflicts = trs.must_abort + conflicts = trs.check_must_abort() if not conflicts: # update global committed state w/o conflict assert not gtrs.update_from_committed(trs) @@ -146,7 +156,7 @@ return conflicts def abort_transaction(self): - assert self.transaction_state.must_abort + assert self.transaction_state.check_must_abort() self.roots_on_stack = self.roots_on_transaction_start del self.saved_roots[self.roots_on_stack:] self.transaction_state = None @@ -161,41 +171,48 @@ self.committed_transaction_state = TransactionState(0) def push_state_to_other_threads(self, tr_state): - assert not tr_state.must_abort + assert not tr_state.check_must_abort() for ts in self.thread_states: other_trs = ts.transaction_state if other_trs is None or other_trs is tr_state: continue other_trs.update_from_committed(tr_state, only_new=True) - if tr_state.must_abort: - self.ex.do('# conflict while pushing to other threads') + if tr_state.check_must_abort(): + self.ex.do('# conflict while pushing to other threads: %s' % + tr_state.objs_in_conflict) def check_for_write_write_conflicts(self, tr_state): - assert not tr_state.must_abort + assert not tr_state.check_must_abort() + for ts in self.thread_states: + other_trs = ts.transaction_state + if other_trs is None or other_trs is tr_state: + continue + + confl_set = other_trs.write_set & tr_state.write_set + if confl_set: + contention_management(tr_state, other_trs, True, + objs_in_conflict=confl_set) + + if tr_state.check_must_abort(): + self.ex.do('# write-write conflict: %s' % + tr_state.objs_in_conflict) + + def check_for_write_read_conflicts(self, tr_state): + assert not tr_state.check_must_abort() for ts in self.thread_states: other_trs = ts.transaction_state if other_trs is None or other_trs is tr_state: continue - if other_trs.write_set & tr_state.write_set: - contention_management(tr_state, other_trs, True) + confl_set = other_trs.read_set & tr_state.write_set + if confl_set: + contention_management(tr_state, other_trs, + objs_in_conflict=confl_set) - if tr_state.must_abort: - self.ex.do('# write-write conflict') - - def check_for_write_read_conflicts(self, tr_state): - assert not tr_state.must_abort - for ts in self.thread_states: - other_trs = ts.transaction_state - if other_trs is None or other_trs is tr_state: - continue - - if other_trs.read_set & tr_state.write_set: - contention_management(tr_state, other_trs) - - if tr_state.must_abort: - self.ex.do('# write-read conflict') + if tr_state.check_must_abort(): + self.ex.do('# write-read conflict: %s' % + tr_state.objs_in_conflict) # ========== STM OPERATIONS ========== @@ -248,7 +265,7 @@ trs.write_root(r, v) global_state.check_for_write_write_conflicts(trs) - if trs.must_abort: + if trs.check_must_abort(): thread_state.abort_transaction() ex.do("py.test.raises(Conflict, stm_set_char, %s, %s)" % (r, repr(chr(v)))) else: @@ -265,7 +282,7 @@ class OpSwitchThread(Operation): def do(self, ex, global_state, thread_state): trs = thread_state.transaction_state - conflicts = trs is not None and trs.must_abort + conflicts = trs is not None and trs.check_must_abort() # if conflicts: thread_state.abort_transaction() From noreply at buildbot.pypy.org Wed Feb 19 12:16:20 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 19 Feb 2014 12:16:20 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: Write down what a irc user needed to do in order to get numpy running Message-ID: <20140219111620.D2C771C04FF@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5156:69b48a914860 Date: 2014-02-19 12:16 +0100 http://bitbucket.org/pypy/extradoc/changeset/69b48a914860/ Log: Write down what a irc user needed to do in order to get numpy running diff --git a/blog/draft/running-numpy.rst b/blog/draft/running-numpy.rst new file mode 100644 --- /dev/null +++ b/blog/draft/running-numpy.rst @@ -0,0 +1,9 @@ +How to install NumPy on PyPy +============================ + +* apt-get install pypy-dev + +* git clone https://bitbucket.org/pypy/numpy.git; cd numpy; + sudo pypy setup.py install + +* sudo pypy -c 'import numpy' # only once From noreply at buildbot.pypy.org Wed Feb 19 15:43:13 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Wed, 19 Feb 2014 15:43:13 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: add objects with refs to test_random Message-ID: <20140219144313.59B831C1041@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7-refactor Changeset: r782:4142761d9da0 Date: 2014-02-19 15:42 +0100 http://bitbucket.org/pypy/stmgc/changeset/4142761d9da0/ Log: add objects with refs to test_random diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -303,6 +303,12 @@ lib._set_type_id(o, tid) return o +def stm_allocate_old_refs(n): + o = lib._stm_allocate_old(HDR + n * WORD) + tid = 421420 + n + lib._set_type_id(o, tid) + return o + def stm_allocate(size): o = lib.stm_allocate(size) tid = 42 + size diff --git a/c7/test/test_random.py b/c7/test/test_random.py --- a/c7/test/test_random.py +++ b/c7/test/test_random.py @@ -13,10 +13,14 @@ exec cmd in globals(), self.content _root_numbering = 0 -def get_new_root_name(): +is_ref_type_map = {} +def get_new_root_name(is_ref_type): global _root_numbering _root_numbering += 1 - return "lp%d" % _root_numbering + r = "lp%d" % _root_numbering + is_ref_type_map[r] = is_ref_type + return r + _global_time = 0 def contention_management(our_trs, other_trs, wait=False, objs_in_conflict=None): @@ -124,7 +128,7 @@ def update_roots(self, ex): assert self.roots_on_stack == self.roots_on_transaction_start - for r in self.saved_roots[::-1]: + for r in reversed(self.saved_roots): ex.do('%s = self.pop_root()' % r) self.roots_on_stack -= 1 assert self.roots_on_stack == 0 @@ -244,7 +248,7 @@ class OpAllocate(Operation): def do(self, ex, global_state, thread_state): - r = get_new_root_name() + r = get_new_root_name(False) thread_state.push_roots(ex) ex.do('%s = stm_allocate(16)' % r) assert thread_state.transaction_state.write_root(r, 0) is None @@ -252,32 +256,59 @@ thread_state.pop_roots(ex) thread_state.register_root(r) +class OpAllocateRef(Operation): + def do(self, ex, global_state, thread_state): + r = get_new_root_name(True) + thread_state.push_roots(ex) + ex.do('%s = stm_allocate_refs(1)' % r) + assert thread_state.transaction_state.write_root(r, "ffi.NULL") is None + + thread_state.pop_roots(ex) + thread_state.register_root(r) + + class OpForgetRoot(Operation): def do(self, ex, global_state, thread_state): r = thread_state.forget_random_root() ex.do('# forget %s' % r) -class OpSetChar(Operation): +class OpWrite(Operation): def do(self, ex, global_state, thread_state): r = thread_state.get_random_root() - v = ord(global_state.rnd.choice("abcdefghijklmnop")) + if is_ref_type_map[r]: + v = thread_state.get_random_root() + else: + v = ord(global_state.rnd.choice("abcdefghijklmnop")) trs = thread_state.transaction_state trs.write_root(r, v) global_state.check_for_write_write_conflicts(trs) if trs.check_must_abort(): thread_state.abort_transaction() - ex.do("py.test.raises(Conflict, stm_set_char, %s, %s)" % (r, repr(chr(v)))) + if is_ref_type_map[r]: + ex.do("py.test.raises(Conflict, stm_set_ref, %s, 0, %s)" % (r, v)) + else: + ex.do("py.test.raises(Conflict, stm_set_char, %s, %s)" % (r, repr(chr(v)))) else: - ex.do("stm_set_char(%s, %s)" % (r, repr(chr(v)))) + if is_ref_type_map[r]: + ex.do("stm_set_ref(%s, 0, %s)" % (r, v)) + else: + ex.do("stm_set_char(%s, %s)" % (r, repr(chr(v)))) -class OpGetChar(Operation): +class OpRead(Operation): def do(self, ex, global_state, thread_state): r = thread_state.get_random_root() trs = thread_state.transaction_state v = trs.read_root(r) # - ex.do("assert stm_get_char(%s) == %s" % (r, repr(chr(v)))) + if is_ref_type_map[r]: + if v in thread_state.saved_roots or v in global_state.shared_roots: + ex.do("assert stm_get_ref(%s, 0) == %s" % (r, v)) + else: + # we still need to read it (as it is in the read-set): + ex.do("stm_get_ref(%s, 0)" % r) + else: + ex.do("assert stm_get_char(%s) == %s" % (r, repr(chr(v)))) class OpSwitchThread(Operation): def do(self, ex, global_state, thread_state): @@ -298,7 +329,7 @@ def test_fixed_16_bytes_objects(self, seed=1010): rnd = random.Random(seed) - N_OBJECTS = 5 + N_OBJECTS = 3 N_THREADS = 2 ex = Exec(self) ex.do(""" @@ -316,10 +347,15 @@ curr_thread = global_state.thread_states[0] for i in range(N_OBJECTS): - r = get_new_root_name() + r = get_new_root_name(False) ex.do('%s = stm_allocate_old(16)' % r) global_state.committed_transaction_state.write_root(r, 0) global_state.shared_roots.append(r) + + r = get_new_root_name(True) + ex.do('%s = stm_allocate_old_refs(1)' % r) + global_state.committed_transaction_state.write_root(r, "ffi.NULL") + global_state.shared_roots.append(r) global_state.committed_transaction_state.write_set = set() global_state.committed_transaction_state.read_set = set() @@ -338,14 +374,28 @@ action = rnd.choice([ OpAllocate, - OpSetChar, - OpSetChar, - OpGetChar, - OpGetChar, + OpAllocateRef, + OpWrite, + OpWrite, + OpWrite, + OpRead, + OpRead, + OpRead, + OpRead, OpCommitTransaction, OpForgetRoot, ]) action().do(ex, global_state, curr_thread) + + for ts in global_state.thread_states: + if ts.transaction_state is not None: + if curr_thread != ts: + ex.do('#') + curr_thread = ts + OpSwitchThread().do(ex, global_state, curr_thread) + if curr_thread.transaction_state: + # could have aborted in the switch() above + OpCommitTransaction().do(ex, global_state, curr_thread) From noreply at buildbot.pypy.org Wed Feb 19 15:53:43 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Wed, 19 Feb 2014 15:53:43 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: use SOME_MEDIUM_SIZE and SOME_LARGE_SIZE constants to allocate objects -> fails currently Message-ID: <20140219145343.286D81C1041@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7-refactor Changeset: r783:961ea6611d93 Date: 2014-02-19 15:46 +0100 http://bitbucket.org/pypy/stmgc/changeset/961ea6611d93/ Log: use SOME_MEDIUM_SIZE and SOME_LARGE_SIZE constants to allocate objects -> fails currently diff --git a/c7/test/test_random.py b/c7/test/test_random.py --- a/c7/test/test_random.py +++ b/c7/test/test_random.py @@ -250,7 +250,12 @@ def do(self, ex, global_state, thread_state): r = get_new_root_name(False) thread_state.push_roots(ex) - ex.do('%s = stm_allocate(16)' % r) + size = global_state.rnd.choice([ + 16, + "SOME_MEDIUM_SIZE+16", + "SOME_LARGE_SIZE+16", + ]) + ex.do('%s = stm_allocate(%s)' % (r, size)) assert thread_state.transaction_state.write_root(r, 0) is None thread_state.pop_roots(ex) From noreply at buildbot.pypy.org Wed Feb 19 15:53:44 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Wed, 19 Feb 2014 15:53:44 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: also randomly allocate number of refs per object (no effects so far) Message-ID: <20140219145344.468621C1041@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7-refactor Changeset: r784:35224c1ca388 Date: 2014-02-19 15:49 +0100 http://bitbucket.org/pypy/stmgc/changeset/35224c1ca388/ Log: also randomly allocate number of refs per object (no effects so far) diff --git a/c7/test/test_random.py b/c7/test/test_random.py --- a/c7/test/test_random.py +++ b/c7/test/test_random.py @@ -265,7 +265,8 @@ def do(self, ex, global_state, thread_state): r = get_new_root_name(True) thread_state.push_roots(ex) - ex.do('%s = stm_allocate_refs(1)' % r) + num = global_state.rnd.randrange(1, 10) + ex.do('%s = stm_allocate_refs(%s)' % (r, num)) assert thread_state.transaction_state.write_root(r, "ffi.NULL") is None thread_state.pop_roots(ex) From noreply at buildbot.pypy.org Wed Feb 19 15:53:45 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Wed, 19 Feb 2014 15:53:45 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: add explicit aborts to test_random Message-ID: <20140219145345.50EE71C1041@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7-refactor Changeset: r785:39016b9afc2a Date: 2014-02-19 15:53 +0100 http://bitbucket.org/pypy/stmgc/changeset/39016b9afc2a/ Log: add explicit aborts to test_random diff --git a/c7/test/test_random.py b/c7/test/test_random.py --- a/c7/test/test_random.py +++ b/c7/test/test_random.py @@ -245,6 +245,14 @@ ex.do('py.test.raises(Conflict, self.commit_transaction)') else: ex.do('self.commit_transaction()') + +class OpAbortTransaction(Operation): + def do(self, ex, global_state, thread_state): + thread_state.transaction_state.set_must_abort() + thread_state.abort_transaction() + ex.do('self.abort_transaction()') + + class OpAllocate(Operation): def do(self, ex, global_state, thread_state): @@ -384,11 +392,15 @@ OpWrite, OpWrite, OpWrite, + OpWrite, + OpRead, + OpRead, OpRead, OpRead, OpRead, OpRead, OpCommitTransaction, + OpAbortTransaction, OpForgetRoot, ]) action().do(ex, global_state, curr_thread) From noreply at buildbot.pypy.org Wed Feb 19 16:05:13 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Wed, 19 Feb 2014 16:05:13 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: comments for test_random Message-ID: <20140219150513.D84881C358C@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7-refactor Changeset: r786:8d4439fd0ad6 Date: 2014-02-19 16:05 +0100 http://bitbucket.org/pypy/stmgc/changeset/8d4439fd0ad6/ Log: comments for test_random diff --git a/c7/test/test_random.py b/c7/test/test_random.py --- a/c7/test/test_random.py +++ b/c7/test/test_random.py @@ -12,6 +12,8 @@ print >> sys.stderr, cmd exec cmd in globals(), self.content + + _root_numbering = 0 is_ref_type_map = {} def get_new_root_name(is_ref_type): @@ -24,6 +26,8 @@ _global_time = 0 def contention_management(our_trs, other_trs, wait=False, objs_in_conflict=None): + """exact copy of logic in contention.c""" + if other_trs.start_time < our_trs.start_time: pass else: @@ -37,7 +41,10 @@ class TransactionState(object): - """maintains read/write sets""" + """State of a transaction running in a thread, + e.g. maintains read/write sets. The state will be + discarded on abort or pushed to other threads""" + def __init__(self, start_time): self.read_set = set() self.write_set = set() @@ -83,7 +90,9 @@ class ThreadState(object): - """maintains state for one thread """ + """Maintains state for one thread. Mostly manages things + to be kept between transactions (e.g. saved roots) and + handles discarding/reseting states on transaction abort""" def __init__(self, num, global_state): self.num = num @@ -167,6 +176,10 @@ class GlobalState(object): + """Maintains the global view (in a TransactionState) on + objects and threads. It also handles checking for conflicts + between threads and pushing state to other threads""" + def __init__(self, ex, rnd): self.ex = ex self.rnd = rnd @@ -374,10 +387,28 @@ global_state.committed_transaction_state.read_set = set() # random steps: + possible_actions = [ + OpAllocate, + OpAllocateRef, + OpWrite, + OpWrite, + OpWrite, + OpWrite, + OpRead, + OpRead, + OpRead, + OpRead, + OpRead, + OpRead, + OpCommitTransaction, + OpAbortTransaction, + OpForgetRoot, + ] remaining_steps = 200 while remaining_steps > 0: remaining_steps -= 1 + # make sure we are in a transaction: n_thread = rnd.randrange(0, N_THREADS) if n_thread != curr_thread.num: ex.do('#') @@ -386,25 +417,12 @@ if curr_thread.transaction_state is None: OpStartTransaction().do(ex, global_state, curr_thread) - action = rnd.choice([ - OpAllocate, - OpAllocateRef, - OpWrite, - OpWrite, - OpWrite, - OpWrite, - OpRead, - OpRead, - OpRead, - OpRead, - OpRead, - OpRead, - OpCommitTransaction, - OpAbortTransaction, - OpForgetRoot, - ]) + # do something random + action = rnd.choice(possible_actions) action().do(ex, global_state, curr_thread) + # to make sure we don't have aborts in the test's teardown method, + # we will simply stop all running transactions for ts in global_state.thread_states: if ts.transaction_state is not None: if curr_thread != ts: @@ -420,7 +438,7 @@ def _make_fun(seed): def test_fun(self): self.test_fixed_16_bytes_objects(seed) - test_fun.__name__ = 'test_fixed_16_bytes_objects_%d' % seed + test_fun.__name__ = 'test_random_%d' % seed return test_fun for _seed in range(5000, 5100): From noreply at buildbot.pypy.org Wed Feb 19 17:50:17 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 19 Feb 2014 17:50:17 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: Getting started on duhton (nothing done so far) Message-ID: <20140219165017.7FE7E1C033D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r788:81514bb0cb7a Date: 2014-02-19 17:49 +0100 http://bitbucket.org/pypy/stmgc/changeset/81514bb0cb7a/ Log: Getting started on duhton (nothing done so far) diff --git a/duhton/Makefile b/duhton/Makefile --- a/duhton/Makefile +++ b/duhton/Makefile @@ -1,27 +1,20 @@ -C7SOURCES = ../c7/core.c \ - ../c7/pagecopy.c \ - ../c7/list.c \ - ../c7/pages.c \ - ../c7/nursery.c \ - ../c7/stmsync.c \ - ../c7/largemalloc.c \ - ../c7/reader_writer_lock.c +C7SOURCES = ../c7/stmgc.c ../c7/stm/*.c -C7HEADERS = ../c7/*.h +C7HEADERS = ../c7/stmgc.h ../c7/stm/*.h all: duhton_debug duhton duhton: *.c *.h $(C7SOURCES) $(C7HEADERS) - clang -pthread -g -O2 -o duhton *.c $(C7SOURCES) -Wall + clang -pthread -g -O2 -o duhton *.c ../c7/stmgc.c -Wall duhton_release: *.c *.h $(C7SOURCES) $(C7HEADERS) - clang -pthread -g -DNDEBUG -O2 -o duhton_release *.c $(C7SOURCES) -Wall + clang -pthread -g -DNDEBUG -O2 -o duhton_release *.c ../c7/stmgc.c -Wall duhton_debug: *.c *.h $(C7SOURCES) $(C7HEADERS) - clang -pthread -g -DDu_DEBUG -o duhton_debug *.c $(C7SOURCES) -Wall + clang -pthread -g -DDu_DEBUG -o duhton_debug *.c ../c7/stmgc.c -Wall clean: rm -f duhton duhton_debug diff --git a/duhton/duhton.h b/duhton/duhton.h --- a/duhton/duhton.h +++ b/duhton/duhton.h @@ -4,8 +4,7 @@ #include #include #include -#include "../c7/core.h" -#include "../c7/list.h" +#include "../c7/stmgc.h" #define STM 1 /* hackish removal of all read/write barriers. synchronization is up to From noreply at buildbot.pypy.org Wed Feb 19 17:50:16 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 19 Feb 2014 17:50:16 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: Add dprintf from stmgc/c4. Message-ID: <20140219165016.5DAA81C033D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r787:c924c44d3852 Date: 2014-02-19 14:32 +0100 http://bitbucket.org/pypy/stmgc/changeset/c924c44d3852/ Log: Add dprintf from stmgc/c4. diff --git a/c7/demo/Makefile b/c7/demo/Makefile --- a/c7/demo/Makefile +++ b/c7/demo/Makefile @@ -20,7 +20,8 @@ # note that 'build' is optimized but still contains all asserts debug-%: %.c ${H_FILES} ${C_FILES} - clang -I.. -pthread -g $< -o debug-$* -Wall -Werror ../stmgc.c + clang -I.. -pthread -DSTM_DEBUGPRINT -g $< -o debug-$* \ + -Wall -Werror ../stmgc.c build-%: %.c ${H_FILES} ${C_FILES} clang -I.. -pthread -g -O1 $< -o build-$* -Wall ../stmgc.c release-%: %.c ${H_FILES} ${C_FILES} diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -2,8 +2,6 @@ # error "must be compiled via stmgc.c" #endif -#include - static uint8_t write_locks[READMARKER_END - READMARKER_START]; @@ -121,6 +119,8 @@ STM_SEGMENT->jmpbuf_ptr = jmpbuf; STM_PSEGMENT->shadowstack_at_start_of_transaction = tl->shadowstack; + dprintf(("start_transaction\n")); + mutex_unlock(); uint8_t old_rv = STM_SEGMENT->transaction_read_version; @@ -241,6 +241,8 @@ goto restart; /* cannot abort any more from here */ + dprintf(("commit_transaction\n")); + assert(STM_PSEGMENT->transaction_state != TS_MUST_ABORT); STM_SEGMENT->jmpbuf_ptr = NULL; @@ -316,6 +318,8 @@ static void abort_with_mutex(void) { + dprintf(("~~~ ABORT\n")); + switch (STM_PSEGMENT->transaction_state) { case TS_REGULAR: case TS_MUST_ABORT: diff --git a/c7/stm/fprintcolor.c b/c7/stm/fprintcolor.c new file mode 100644 --- /dev/null +++ b/c7/stm/fprintcolor.c @@ -0,0 +1,33 @@ +/* ------------------------------------------------------------ */ +#ifdef STM_DEBUGPRINT +/* ------------------------------------------------------------ */ + + +int dprintfcolor(void) +{ + return 31 + STM_SEGMENT->segment_num % 6; +} + +int threadcolor_printf(const char *format, ...) +{ + char buffer[2048]; + va_list ap; + int result; + int size = (int)sprintf(buffer, "\033[%dm", dprintfcolor()); + assert(size >= 0); + + va_start(ap, format); + result = vsnprintf(buffer + size, 2000, format, ap); + assert(result >= 0); + va_end(ap); + + strcpy(buffer + size + result, "\033[0m"); + fputs(buffer, stderr); + + return result; +} + + +/* ------------------------------------------------------------ */ +#endif +/* ------------------------------------------------------------ */ diff --git a/c7/stm/fprintcolor.h b/c7/stm/fprintcolor.h new file mode 100644 --- /dev/null +++ b/c7/stm/fprintcolor.h @@ -0,0 +1,27 @@ +/* ------------------------------------------------------------ */ +#ifdef STM_DEBUGPRINT +/* ------------------------------------------------------------ */ + + +#include + + +#define dprintf(args) threadcolor_printf args +int dprintfcolor(void); + +int threadcolor_printf(const char *format, ...) + __attribute__((format (printf, 1, 2))); + + +/* ------------------------------------------------------------ */ +#else +/* ------------------------------------------------------------ */ + + +#define dprintf(args) do { } while(0) +#define dprintfcolor() 0 + + +/* ------------------------------------------------------------ */ +#endif +/* ------------------------------------------------------------ */ diff --git a/c7/stmgc.c b/c7/stmgc.c --- a/c7/stmgc.c +++ b/c7/stmgc.c @@ -10,6 +10,7 @@ #include "stm/largemalloc.h" #include "stm/nursery.h" #include "stm/contention.h" +#include "stm/fprintcolor.h" #include "stm/misc.c" #include "stm/list.c" @@ -23,3 +24,4 @@ #include "stm/setup.c" #include "stm/core.c" #include "stm/contention.c" +#include "stm/fprintcolor.c" diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -271,7 +271,9 @@ } ''', sources=source_files, - define_macros=[('STM_TESTS', '1'), ('STM_NO_COND_WAIT', '1')], + define_macros=[('STM_TESTS', '1'), + ('STM_NO_COND_WAIT', '1'), + ('STM_DEBUGPRINT', '1')], undef_macros=['NDEBUG'], include_dirs=[parent_dir], extra_compile_args=['-g', '-O0', '-Werror'], From noreply at buildbot.pypy.org Wed Feb 19 17:50:18 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 19 Feb 2014 17:50:18 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: merge heads Message-ID: <20140219165018.938CF1C033D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r789:fa6db780b4a1 Date: 2014-02-19 17:50 +0100 http://bitbucket.org/pypy/stmgc/changeset/fa6db780b4a1/ Log: merge heads diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -305,6 +305,12 @@ lib._set_type_id(o, tid) return o +def stm_allocate_old_refs(n): + o = lib._stm_allocate_old(HDR + n * WORD) + tid = 421420 + n + lib._set_type_id(o, tid) + return o + def stm_allocate(size): o = lib.stm_allocate(size) tid = 42 + size diff --git a/c7/test/test_random.py b/c7/test/test_random.py --- a/c7/test/test_random.py +++ b/c7/test/test_random.py @@ -12,35 +12,55 @@ print >> sys.stderr, cmd exec cmd in globals(), self.content + + _root_numbering = 0 -def get_new_root_name(): +is_ref_type_map = {} +def get_new_root_name(is_ref_type): global _root_numbering _root_numbering += 1 - return "lp%d" % _root_numbering + r = "lp%d" % _root_numbering + is_ref_type_map[r] = is_ref_type + return r + _global_time = 0 -def contention_management(our_trs, other_trs, wait=False): +def contention_management(our_trs, other_trs, wait=False, objs_in_conflict=None): + """exact copy of logic in contention.c""" + if other_trs.start_time < our_trs.start_time: pass else: - other_trs.must_abort = True + other_trs.set_must_abort(objs_in_conflict) - if not other_trs.must_abort: - our_trs.must_abort = True + if not other_trs.check_must_abort(): + our_trs.set_must_abort(objs_in_conflict) elif wait: # abort anyway: - our_trs.must_abort = True + our_trs.set_must_abort(objs_in_conflict) class TransactionState(object): - """maintains read/write sets""" + """State of a transaction running in a thread, + e.g. maintains read/write sets. The state will be + discarded on abort or pushed to other threads""" + def __init__(self, start_time): self.read_set = set() self.write_set = set() self.values = {} - self.must_abort = False + self._must_abort = False self.start_time = start_time + self.objs_in_conflict = set() + def set_must_abort(self, objs_in_conflict=None): + if objs_in_conflict is not None: + self.objs_in_conflict |= objs_in_conflict + self._must_abort = True + + def check_must_abort(self): + return self._must_abort + def has_conflict_with(self, committed): return bool(self.read_set & committed.write_set) @@ -53,8 +73,9 @@ self.values.update(committed.values) if self.has_conflict_with(committed): - contention_management(self, committed) - return self.must_abort + contention_management(self, committed, + objs_in_conflict=self.read_set & committed.write_set) + return self.check_must_abort() def read_root(self, r): self.read_set.add(r) @@ -69,7 +90,9 @@ class ThreadState(object): - """maintains state for one thread """ + """Maintains state for one thread. Mostly manages things + to be kept between transactions (e.g. saved roots) and + handles discarding/reseting states on transaction abort""" def __init__(self, num, global_state): self.num = num @@ -114,7 +137,7 @@ def update_roots(self, ex): assert self.roots_on_stack == self.roots_on_transaction_start - for r in self.saved_roots[::-1]: + for r in reversed(self.saved_roots): ex.do('%s = self.pop_root()' % r) self.roots_on_stack -= 1 assert self.roots_on_stack == 0 @@ -137,7 +160,7 @@ trs = self.transaction_state gtrs = self.global_state.committed_transaction_state self.global_state.check_for_write_read_conflicts(trs) - conflicts = trs.must_abort + conflicts = trs.check_must_abort() if not conflicts: # update global committed state w/o conflict assert not gtrs.update_from_committed(trs) @@ -146,13 +169,17 @@ return conflicts def abort_transaction(self): - assert self.transaction_state.must_abort + assert self.transaction_state.check_must_abort() self.roots_on_stack = self.roots_on_transaction_start del self.saved_roots[self.roots_on_stack:] self.transaction_state = None class GlobalState(object): + """Maintains the global view (in a TransactionState) on + objects and threads. It also handles checking for conflicts + between threads and pushing state to other threads""" + def __init__(self, ex, rnd): self.ex = ex self.rnd = rnd @@ -161,41 +188,48 @@ self.committed_transaction_state = TransactionState(0) def push_state_to_other_threads(self, tr_state): - assert not tr_state.must_abort + assert not tr_state.check_must_abort() for ts in self.thread_states: other_trs = ts.transaction_state if other_trs is None or other_trs is tr_state: continue other_trs.update_from_committed(tr_state, only_new=True) - if tr_state.must_abort: - self.ex.do('# conflict while pushing to other threads') + if tr_state.check_must_abort(): + self.ex.do('# conflict while pushing to other threads: %s' % + tr_state.objs_in_conflict) def check_for_write_write_conflicts(self, tr_state): - assert not tr_state.must_abort + assert not tr_state.check_must_abort() + for ts in self.thread_states: + other_trs = ts.transaction_state + if other_trs is None or other_trs is tr_state: + continue + + confl_set = other_trs.write_set & tr_state.write_set + if confl_set: + contention_management(tr_state, other_trs, True, + objs_in_conflict=confl_set) + + if tr_state.check_must_abort(): + self.ex.do('# write-write conflict: %s' % + tr_state.objs_in_conflict) + + def check_for_write_read_conflicts(self, tr_state): + assert not tr_state.check_must_abort() for ts in self.thread_states: other_trs = ts.transaction_state if other_trs is None or other_trs is tr_state: continue - if other_trs.write_set & tr_state.write_set: - contention_management(tr_state, other_trs, True) + confl_set = other_trs.read_set & tr_state.write_set + if confl_set: + contention_management(tr_state, other_trs, + objs_in_conflict=confl_set) - if tr_state.must_abort: - self.ex.do('# write-write conflict') - - def check_for_write_read_conflicts(self, tr_state): - assert not tr_state.must_abort - for ts in self.thread_states: - other_trs = ts.transaction_state - if other_trs is None or other_trs is tr_state: - continue - - if other_trs.read_set & tr_state.write_set: - contention_management(tr_state, other_trs) - - if tr_state.must_abort: - self.ex.do('# write-read conflict') + if tr_state.check_must_abort(): + self.ex.do('# write-read conflict: %s' % + tr_state.objs_in_conflict) # ========== STM OPERATIONS ========== @@ -224,48 +258,89 @@ ex.do('py.test.raises(Conflict, self.commit_transaction)') else: ex.do('self.commit_transaction()') + +class OpAbortTransaction(Operation): + def do(self, ex, global_state, thread_state): + thread_state.transaction_state.set_must_abort() + thread_state.abort_transaction() + ex.do('self.abort_transaction()') + + class OpAllocate(Operation): def do(self, ex, global_state, thread_state): - r = get_new_root_name() + r = get_new_root_name(False) thread_state.push_roots(ex) - ex.do('%s = stm_allocate(16)' % r) + size = global_state.rnd.choice([ + 16, + "SOME_MEDIUM_SIZE+16", + "SOME_LARGE_SIZE+16", + ]) + ex.do('%s = stm_allocate(%s)' % (r, size)) assert thread_state.transaction_state.write_root(r, 0) is None thread_state.pop_roots(ex) thread_state.register_root(r) +class OpAllocateRef(Operation): + def do(self, ex, global_state, thread_state): + r = get_new_root_name(True) + thread_state.push_roots(ex) + num = global_state.rnd.randrange(1, 10) + ex.do('%s = stm_allocate_refs(%s)' % (r, num)) + assert thread_state.transaction_state.write_root(r, "ffi.NULL") is None + + thread_state.pop_roots(ex) + thread_state.register_root(r) + + class OpForgetRoot(Operation): def do(self, ex, global_state, thread_state): r = thread_state.forget_random_root() ex.do('# forget %s' % r) -class OpSetChar(Operation): +class OpWrite(Operation): def do(self, ex, global_state, thread_state): r = thread_state.get_random_root() - v = ord(global_state.rnd.choice("abcdefghijklmnop")) + if is_ref_type_map[r]: + v = thread_state.get_random_root() + else: + v = ord(global_state.rnd.choice("abcdefghijklmnop")) trs = thread_state.transaction_state trs.write_root(r, v) global_state.check_for_write_write_conflicts(trs) - if trs.must_abort: + if trs.check_must_abort(): thread_state.abort_transaction() - ex.do("py.test.raises(Conflict, stm_set_char, %s, %s)" % (r, repr(chr(v)))) + if is_ref_type_map[r]: + ex.do("py.test.raises(Conflict, stm_set_ref, %s, 0, %s)" % (r, v)) + else: + ex.do("py.test.raises(Conflict, stm_set_char, %s, %s)" % (r, repr(chr(v)))) else: - ex.do("stm_set_char(%s, %s)" % (r, repr(chr(v)))) + if is_ref_type_map[r]: + ex.do("stm_set_ref(%s, 0, %s)" % (r, v)) + else: + ex.do("stm_set_char(%s, %s)" % (r, repr(chr(v)))) -class OpGetChar(Operation): +class OpRead(Operation): def do(self, ex, global_state, thread_state): r = thread_state.get_random_root() trs = thread_state.transaction_state v = trs.read_root(r) # - ex.do("assert stm_get_char(%s) == %s" % (r, repr(chr(v)))) + if is_ref_type_map[r]: + if v in thread_state.saved_roots or v in global_state.shared_roots: + ex.do("assert stm_get_ref(%s, 0) == %s" % (r, v)) + else: + # we still need to read it (as it is in the read-set): + ex.do("stm_get_ref(%s, 0)" % r) + else: + ex.do("assert stm_get_char(%s) == %s" % (r, repr(chr(v)))) class OpSwitchThread(Operation): def do(self, ex, global_state, thread_state): trs = thread_state.transaction_state - conflicts = trs is not None and trs.must_abort + conflicts = trs is not None and trs.check_must_abort() # if conflicts: thread_state.abort_transaction() @@ -281,7 +356,7 @@ def test_fixed_16_bytes_objects(self, seed=1010): rnd = random.Random(seed) - N_OBJECTS = 5 + N_OBJECTS = 3 N_THREADS = 2 ex = Exec(self) ex.do(""" @@ -299,18 +374,41 @@ curr_thread = global_state.thread_states[0] for i in range(N_OBJECTS): - r = get_new_root_name() + r = get_new_root_name(False) ex.do('%s = stm_allocate_old(16)' % r) global_state.committed_transaction_state.write_root(r, 0) global_state.shared_roots.append(r) + + r = get_new_root_name(True) + ex.do('%s = stm_allocate_old_refs(1)' % r) + global_state.committed_transaction_state.write_root(r, "ffi.NULL") + global_state.shared_roots.append(r) global_state.committed_transaction_state.write_set = set() global_state.committed_transaction_state.read_set = set() # random steps: + possible_actions = [ + OpAllocate, + OpAllocateRef, + OpWrite, + OpWrite, + OpWrite, + OpWrite, + OpRead, + OpRead, + OpRead, + OpRead, + OpRead, + OpRead, + OpCommitTransaction, + OpAbortTransaction, + OpForgetRoot, + ] remaining_steps = 200 while remaining_steps > 0: remaining_steps -= 1 + # make sure we are in a transaction: n_thread = rnd.randrange(0, N_THREADS) if n_thread != curr_thread.num: ex.do('#') @@ -319,23 +417,28 @@ if curr_thread.transaction_state is None: OpStartTransaction().do(ex, global_state, curr_thread) - action = rnd.choice([ - OpAllocate, - OpSetChar, - OpSetChar, - OpGetChar, - OpGetChar, - OpCommitTransaction, - OpForgetRoot, - ]) + # do something random + action = rnd.choice(possible_actions) action().do(ex, global_state, curr_thread) + + # to make sure we don't have aborts in the test's teardown method, + # we will simply stop all running transactions + for ts in global_state.thread_states: + if ts.transaction_state is not None: + if curr_thread != ts: + ex.do('#') + curr_thread = ts + OpSwitchThread().do(ex, global_state, curr_thread) + if curr_thread.transaction_state: + # could have aborted in the switch() above + OpCommitTransaction().do(ex, global_state, curr_thread) def _make_fun(seed): def test_fun(self): self.test_fixed_16_bytes_objects(seed) - test_fun.__name__ = 'test_fixed_16_bytes_objects_%d' % seed + test_fun.__name__ = 'test_random_%d' % seed return test_fun for _seed in range(5000, 5100): From noreply at buildbot.pypy.org Wed Feb 19 18:17:32 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 19 Feb 2014 18:17:32 +0100 (CET) Subject: [pypy-commit] pypy default: enable passing audioop tests Message-ID: <20140219171732.B1B541C0282@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69207:4f635ff9b6f7 Date: 2014-02-19 12:16 -0500 http://bitbucket.org/pypy/pypy/changeset/4f635ff9b6f7/ Log: enable passing audioop tests diff --git a/lib-python/2.7/test/test_audioop.py b/lib-python/2.7/test/test_audioop.py --- a/lib-python/2.7/test/test_audioop.py +++ b/lib-python/2.7/test/test_audioop.py @@ -1,6 +1,6 @@ import audioop import unittest -from test.test_support import run_unittest +from test.test_support import run_unittest, impl_detail endian = 'big' if audioop.getsample('\0\1', 2, 0) == 1 else 'little' @@ -93,21 +93,25 @@ wtd = len(d2)//3 self.assertEqual(len(audioop.lin2lin(d1, got, wtd)), len(d2)) + @impl_detail(pypy=False) def test_adpcm2lin(self): # Very cursory test self.assertEqual(audioop.adpcm2lin(b'\0\0', 1, None), (b'\0' * 4, (0,0))) self.assertEqual(audioop.adpcm2lin(b'\0\0', 2, None), (b'\0' * 8, (0,0))) self.assertEqual(audioop.adpcm2lin(b'\0\0', 4, None), (b'\0' * 16, (0,0))) + @impl_detail(pypy=False) def test_lin2adpcm(self): # Very cursory test self.assertEqual(audioop.lin2adpcm('\0\0\0\0', 1, None), ('\0\0', (0,0))) + @impl_detail(pypy=False) def test_lin2alaw(self): self.assertEqual(audioop.lin2alaw(data[0], 1), '\xd5\xc5\xf5') self.assertEqual(audioop.lin2alaw(data[1], 2), '\xd5\xd5\xd5') self.assertEqual(audioop.lin2alaw(data[2], 4), '\xd5\xd5\xd5') + @impl_detail(pypy=False) def test_alaw2lin(self): # Cursory d = audioop.lin2alaw(data[0], 1) @@ -123,11 +127,13 @@ self.assertEqual(audioop.alaw2lin(d, 4), b'\x00\x00\x08\x00\x00\x00\x08\x01\x00\x00\x10\x02') + @impl_detail(pypy=False) def test_lin2ulaw(self): self.assertEqual(audioop.lin2ulaw(data[0], 1), '\xff\xe7\xdb') self.assertEqual(audioop.lin2ulaw(data[1], 2), '\xff\xff\xff') self.assertEqual(audioop.lin2ulaw(data[2], 4), '\xff\xff\xff') + @impl_detail(pypy=False) def test_ulaw2lin(self): # Cursory d = audioop.lin2ulaw(data[0], 1) @@ -195,6 +201,7 @@ self.assertRaises(audioop.error, audioop.findmax, ''.join( chr(x) for x in xrange(256)), -2392392) + @impl_detail(pypy=False) def test_issue7673(self): state = None for data, size in INVALID_DATA: @@ -219,6 +226,7 @@ self.assertRaises(audioop.error, audioop.lin2alaw, data, size) self.assertRaises(audioop.error, audioop.lin2adpcm, data, size, state) + @impl_detail(pypy=False) def test_wrongsize(self): data = b'abc' state = None diff --git a/lib-python/conftest.py b/lib-python/conftest.py --- a/lib-python/conftest.py +++ b/lib-python/conftest.py @@ -109,7 +109,7 @@ RegrTest('test_asynchat.py', usemodules='select fcntl'), RegrTest('test_asyncore.py', usemodules='select fcntl'), RegrTest('test_atexit.py', core=True), - RegrTest('test_audioop.py', skip="incomplete module"), + RegrTest('test_audioop.py'), RegrTest('test_augassign.py', core=True), RegrTest('test_base64.py', usemodules='struct'), RegrTest('test_bastion.py'), From noreply at buildbot.pypy.org Wed Feb 19 18:54:17 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 19 Feb 2014 18:54:17 +0100 (CET) Subject: [pypy-commit] pypy default: fix promote_to_largest in ufuncs (issue1663) Message-ID: <20140219175417.BF7D51C35DA@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69208:53eef9736a3b Date: 2014-02-19 12:41 -0500 http://bitbucket.org/pypy/pypy/changeset/53eef9736a3b/ Log: fix promote_to_largest in ufuncs (issue1663) diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -964,8 +964,7 @@ # ----------------------- reduce ------------------------------- - def _reduce_ufunc_impl(ufunc_name, promote_to_largest=False, - cumulative=False): + def _reduce_ufunc_impl(ufunc_name, cumulative=False): @unwrap_spec(keepdims=bool) def impl(self, space, w_axis=None, w_dtype=None, w_out=None, keepdims=False): if space.is_none(w_out): @@ -976,13 +975,11 @@ else: out = w_out return getattr(interp_ufuncs.get(space), ufunc_name).reduce( - space, self, promote_to_largest, w_axis, - keepdims, out, w_dtype, cumulative=cumulative) - return func_with_new_name(impl, "reduce_%s_impl_%d_%d" % (ufunc_name, - promote_to_largest, cumulative)) + space, self, w_axis, keepdims, out, w_dtype, cumulative=cumulative) + return func_with_new_name(impl, "reduce_%s_impl_%d" % (ufunc_name, cumulative)) - descr_sum = _reduce_ufunc_impl("add", True) - descr_prod = _reduce_ufunc_impl("multiply", True) + descr_sum = _reduce_ufunc_impl("add") + descr_prod = _reduce_ufunc_impl("multiply") descr_max = _reduce_ufunc_impl("maximum") descr_min = _reduce_ufunc_impl("minimum") descr_all = _reduce_ufunc_impl('logical_and') diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -19,12 +19,15 @@ class W_Ufunc(W_Root): - _immutable_fields_ = ["name", "promote_to_float", "promote_bools", "identity", - "int_only", "allow_bool", "allow_complex", "complex_to_float"] + _immutable_fields_ = [ + "name", "promote_to_largest", "promote_to_float", "promote_bools", + "identity", "int_only", "allow_bool", "allow_complex", "complex_to_float" + ] - def __init__(self, name, promote_to_float, promote_bools, identity, - int_only, allow_bool, allow_complex, complex_to_float): + def __init__(self, name, promote_to_largest, promote_to_float, promote_bools, + identity, int_only, allow_bool, allow_complex, complex_to_float): self.name = name + self.promote_to_largest = promote_to_largest self.promote_to_float = promote_to_float self.promote_bools = promote_bools self.identity = identity @@ -88,9 +91,8 @@ 'output must be an array')) else: out = w_out - return self.reduce(space, w_obj, False, #do not promote_to_largest - w_axis, True, #keepdims must be true - out, w_dtype, cumulative=True) + return self.reduce(space, w_obj, w_axis, True, #keepdims must be true + out, w_dtype, cumulative=True) @unwrap_spec(skipna=bool, keepdims=bool) def descr_reduce(self, space, w_obj, w_axis=None, w_dtype=None, @@ -154,15 +156,13 @@ out = None elif not isinstance(w_out, W_NDimArray): raise OperationError(space.w_TypeError, space.wrap( - 'output must be an array')) + 'output must be an array')) else: out = w_out - promote_to_largest = False - return self.reduce(space, w_obj, promote_to_largest, w_axis, keepdims, out, - w_dtype) + return self.reduce(space, w_obj, w_axis, keepdims, out, w_dtype) - def reduce(self, space, w_obj, promote_to_largest, w_axis, - keepdims=False, out=None, dtype=None, cumulative=False): + def reduce(self, space, w_obj, w_axis, keepdims=False, out=None, dtype=None, + cumulative=False): if self.argcount != 2: raise OperationError(space.w_ValueError, space.wrap("reduce only " "supported for binary functions")) @@ -185,7 +185,7 @@ dtype = find_unaryop_result_dtype( space, obj.get_dtype(), promote_to_float=self.promote_to_float, - promote_to_largest=promote_to_largest, + promote_to_largest=self.promote_to_largest, promote_bools=True ) if self.identity is None: @@ -263,18 +263,18 @@ return self._outer(space, __args__) def _outer(self, space, __args__): - raise OperationError(space.w_ValueError, - space.wrap("outer product only supported for binary functions")) + raise OperationError(space.w_ValueError, space.wrap( + "outer product only supported for binary functions")) class W_Ufunc1(W_Ufunc): _immutable_fields_ = ["func", "bool_result"] argcount = 1 - def __init__(self, func, name, promote_to_float=False, promote_bools=False, - identity=None, bool_result=False, int_only=False, + def __init__(self, func, name, promote_to_largest=False, promote_to_float=False, + promote_bools=False, identity=None, bool_result=False, int_only=False, allow_bool=True, allow_complex=True, complex_to_float=False): - W_Ufunc.__init__(self, name, promote_to_float, promote_bools, identity, - int_only, allow_bool, allow_complex, complex_to_float) + W_Ufunc.__init__(self, name, promote_to_largest, promote_to_float, promote_bools, + identity, int_only, allow_bool, allow_complex, complex_to_float) self.func = func self.bool_result = bool_result @@ -336,11 +336,11 @@ _immutable_fields_ = ["func", "comparison_func", "done_func"] argcount = 2 - def __init__(self, func, name, promote_to_float=False, promote_bools=False, - identity=None, comparison_func=False, int_only=False, + def __init__(self, func, name, promote_to_largest=False, promote_to_float=False, + promote_bools=False, identity=None, comparison_func=False, int_only=False, allow_bool=True, allow_complex=True, complex_to_float=False): - W_Ufunc.__init__(self, name, promote_to_float, promote_bools, identity, - int_only, allow_bool, allow_complex, complex_to_float) + W_Ufunc.__init__(self, name, promote_to_largest, promote_to_float, promote_bools, + identity, int_only, allow_bool, allow_complex, complex_to_float) self.func = func self.comparison_func = comparison_func if name == 'logical_and': @@ -606,9 +606,9 @@ def __init__(self, space): "NOT_RPYTHON" for ufunc_def in [ - ("add", "add", 2, {"identity": 0}), + ("add", "add", 2, {"identity": 0, "promote_to_largest": True}), ("subtract", "sub", 2), - ("multiply", "mul", 2, {"identity": 1}), + ("multiply", "mul", 2, {"identity": 1, "promote_to_largest": True}), ("bitwise_and", "bitwise_and", 2, {"identity": 1, "int_only": True}), ("bitwise_or", "bitwise_or", 2, {"identity": 0, diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -756,7 +756,7 @@ raises(ValueError, maximum.reduce, zeros((2, 0)), axis=1) def test_reduce_1d(self): - from numpypy import add, maximum, less + from numpypy import array, add, maximum, less, float16, complex64 assert less.reduce([5, 4, 3, 2, 1]) assert add.reduce([1, 2, 3]) == 6 @@ -764,6 +764,12 @@ assert maximum.reduce([1, 2, 3]) == 3 raises(ValueError, maximum.reduce, []) + assert add.reduce(array([True, False] * 200)) == 200 + assert add.reduce(array([True, False] * 200, dtype='int8')) == 200 + assert add.reduce(array([True, False] * 200), dtype='int8') == -56 + assert type(add.reduce(array([True, False] * 200, dtype='float16'))) is float16 + assert type(add.reduce(array([True, False] * 200, dtype='complex64'))) is complex64 + def test_reduceND(self): from numpypy import add, arange a = arange(12).reshape(3, 4) @@ -1025,7 +1031,7 @@ assert logaddexp2(float('inf'), float('inf')) == float('inf') def test_accumulate(self): - from numpypy import add, multiply, arange + from numpypy import add, multiply, arange, dtype assert (add.accumulate([2, 3, 5]) == [2, 5, 10]).all() assert (multiply.accumulate([2, 3, 5]) == [2, 6, 30]).all() a = arange(4).reshape(2,2) @@ -1041,6 +1047,8 @@ print b assert (b == [[0, 0, 1], [1, 3, 5]]).all() assert b.dtype == int + assert add.accumulate([True]*200)[-1] == 200 + assert add.accumulate([True]*200).dtype == dtype('int') def test_noncommutative_reduce_accumulate(self): import numpypy as np From noreply at buildbot.pypy.org Wed Feb 19 19:01:02 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 19 Feb 2014 19:01:02 +0100 (CET) Subject: [pypy-commit] pypy default: fix more ufunc result dtypes Message-ID: <20140219180102.6CE621C361B@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69209:3907f379b172 Date: 2014-02-19 12:59 -0500 http://bitbucket.org/pypy/pypy/changeset/3907f379b172/ Log: fix more ufunc result dtypes diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -186,7 +186,7 @@ space, obj.get_dtype(), promote_to_float=self.promote_to_float, promote_to_largest=self.promote_to_largest, - promote_bools=True + promote_bools=self.promote_bools, ) if self.identity is None: for i in range(shapelen): diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -1031,7 +1031,7 @@ assert logaddexp2(float('inf'), float('inf')) == float('inf') def test_accumulate(self): - from numpypy import add, multiply, arange, dtype + from numpypy import add, subtract, multiply, divide, arange, dtype assert (add.accumulate([2, 3, 5]) == [2, 5, 10]).all() assert (multiply.accumulate([2, 3, 5]) == [2, 6, 30]).all() a = arange(4).reshape(2,2) @@ -1049,6 +1049,8 @@ assert b.dtype == int assert add.accumulate([True]*200)[-1] == 200 assert add.accumulate([True]*200).dtype == dtype('int') + assert subtract.accumulate([True]*200).dtype == dtype('bool') + assert divide.accumulate([True]*200).dtype == dtype('int8') def test_noncommutative_reduce_accumulate(self): import numpypy as np From noreply at buildbot.pypy.org Wed Feb 19 19:38:54 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 19 Feb 2014 19:38:54 +0100 (CET) Subject: [pypy-commit] pypy default: Windows: need this include, otherwise the type 'off_t' is not defined, Message-ID: <20140219183854.ECF3A1C033D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69210:87752dbcfa4c Date: 2014-02-19 19:09 +0100 http://bitbucket.org/pypy/pypy/changeset/87752dbcfa4c/ Log: Windows: need this include, otherwise the type 'off_t' is not defined, which confuses some extension modules diff --git a/pypy/module/cpyext/include/Python.h b/pypy/module/cpyext/include/Python.h --- a/pypy/module/cpyext/include/Python.h +++ b/pypy/module/cpyext/include/Python.h @@ -24,6 +24,7 @@ # include # endif # include +# include /* for 'off_t' */ # define Py_DEPRECATED(VERSION_UNUSED) # ifdef Py_BUILD_CORE # define PyAPI_FUNC(RTYPE) __declspec(dllexport) RTYPE From noreply at buildbot.pypy.org Wed Feb 19 19:38:56 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 19 Feb 2014 19:38:56 +0100 (CET) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20140219183856.852BF1C033D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69211:6a81e7b47559 Date: 2014-02-19 19:38 +0100 http://bitbucket.org/pypy/pypy/changeset/6a81e7b47559/ Log: merge heads diff --git a/lib-python/2.7/test/test_audioop.py b/lib-python/2.7/test/test_audioop.py --- a/lib-python/2.7/test/test_audioop.py +++ b/lib-python/2.7/test/test_audioop.py @@ -1,6 +1,6 @@ import audioop import unittest -from test.test_support import run_unittest +from test.test_support import run_unittest, impl_detail endian = 'big' if audioop.getsample('\0\1', 2, 0) == 1 else 'little' @@ -93,21 +93,25 @@ wtd = len(d2)//3 self.assertEqual(len(audioop.lin2lin(d1, got, wtd)), len(d2)) + @impl_detail(pypy=False) def test_adpcm2lin(self): # Very cursory test self.assertEqual(audioop.adpcm2lin(b'\0\0', 1, None), (b'\0' * 4, (0,0))) self.assertEqual(audioop.adpcm2lin(b'\0\0', 2, None), (b'\0' * 8, (0,0))) self.assertEqual(audioop.adpcm2lin(b'\0\0', 4, None), (b'\0' * 16, (0,0))) + @impl_detail(pypy=False) def test_lin2adpcm(self): # Very cursory test self.assertEqual(audioop.lin2adpcm('\0\0\0\0', 1, None), ('\0\0', (0,0))) + @impl_detail(pypy=False) def test_lin2alaw(self): self.assertEqual(audioop.lin2alaw(data[0], 1), '\xd5\xc5\xf5') self.assertEqual(audioop.lin2alaw(data[1], 2), '\xd5\xd5\xd5') self.assertEqual(audioop.lin2alaw(data[2], 4), '\xd5\xd5\xd5') + @impl_detail(pypy=False) def test_alaw2lin(self): # Cursory d = audioop.lin2alaw(data[0], 1) @@ -123,11 +127,13 @@ self.assertEqual(audioop.alaw2lin(d, 4), b'\x00\x00\x08\x00\x00\x00\x08\x01\x00\x00\x10\x02') + @impl_detail(pypy=False) def test_lin2ulaw(self): self.assertEqual(audioop.lin2ulaw(data[0], 1), '\xff\xe7\xdb') self.assertEqual(audioop.lin2ulaw(data[1], 2), '\xff\xff\xff') self.assertEqual(audioop.lin2ulaw(data[2], 4), '\xff\xff\xff') + @impl_detail(pypy=False) def test_ulaw2lin(self): # Cursory d = audioop.lin2ulaw(data[0], 1) @@ -195,6 +201,7 @@ self.assertRaises(audioop.error, audioop.findmax, ''.join( chr(x) for x in xrange(256)), -2392392) + @impl_detail(pypy=False) def test_issue7673(self): state = None for data, size in INVALID_DATA: @@ -219,6 +226,7 @@ self.assertRaises(audioop.error, audioop.lin2alaw, data, size) self.assertRaises(audioop.error, audioop.lin2adpcm, data, size, state) + @impl_detail(pypy=False) def test_wrongsize(self): data = b'abc' state = None diff --git a/lib-python/conftest.py b/lib-python/conftest.py --- a/lib-python/conftest.py +++ b/lib-python/conftest.py @@ -109,7 +109,7 @@ RegrTest('test_asynchat.py', usemodules='select fcntl'), RegrTest('test_asyncore.py', usemodules='select fcntl'), RegrTest('test_atexit.py', core=True), - RegrTest('test_audioop.py', skip="incomplete module"), + RegrTest('test_audioop.py'), RegrTest('test_augassign.py', core=True), RegrTest('test_base64.py', usemodules='struct'), RegrTest('test_bastion.py'), diff --git a/lib_pypy/audioop.py b/lib_pypy/audioop.py --- a/lib_pypy/audioop.py +++ b/lib_pypy/audioop.py @@ -1,5 +1,8 @@ - +import __builtin__ +import math import struct +from fractions import gcd +from ctypes import create_string_buffer class error(Exception): @@ -8,7 +11,7 @@ def _check_size(size): if size != 1 and size != 2 and size != 4: - raise error("Size should be 1, 2 or 4") + raise error("Size should be 1, 2 or 4") def _check_params(length, size): @@ -17,13 +20,526 @@ raise error("not a whole number of frames") +def _sample_count(cp, size): + return len(cp) / size + + +def _get_samples(cp, size, signed=True): + for i in range(_sample_count(cp, size)): + yield _get_sample(cp, size, i, signed) + + +def _struct_format(size, signed): + if size == 1: + return "b" if signed else "B" + elif size == 2: + return "h" if signed else "H" + elif size == 4: + return "i" if signed else "I" + + +def _get_sample(cp, size, i, signed=True): + fmt = _struct_format(size, signed) + start = i * size + end = start + size + return struct.unpack_from(fmt, buffer(cp)[start:end])[0] + + +def _put_sample(cp, size, i, val, signed=True): + fmt = _struct_format(size, signed) + struct.pack_into(fmt, cp, i * size, val) + + +def _get_maxval(size, signed=True): + if signed and size == 1: + return 0x7f + elif size == 1: + return 0xff + elif signed and size == 2: + return 0x7fff + elif size == 2: + return 0xffff + elif signed and size == 4: + return 0x7fffffff + elif size == 4: + return 0xffffffff + + +def _get_minval(size, signed=True): + if not signed: + return 0 + elif size == 1: + return -0x80 + elif size == 2: + return -0x8000 + elif size == 4: + return -0x80000000 + + +def _get_clipfn(size, signed=True): + maxval = _get_maxval(size, signed) + minval = _get_minval(size, signed) + return lambda val: __builtin__.max(min(val, maxval), minval) + + +def _overflow(val, size, signed=True): + minval = _get_minval(size, signed) + maxval = _get_maxval(size, signed) + if minval <= val <= maxval: + return val + + bits = size * 8 + if signed: + offset = 2**(bits-1) + return ((val + offset) % (2**bits)) - offset + else: + return val % (2**bits) + + def getsample(cp, size, i): _check_params(len(cp), size) if not (0 <= i < len(cp) / size): raise error("Index out of range") - if size == 1: - return struct.unpack_from("B", buffer(cp)[i:])[0] - elif size == 2: - return struct.unpack_from("H", buffer(cp)[i * 2:])[0] - elif size == 4: - return struct.unpack_from("I", buffer(cp)[i * 4:])[0] + return _get_sample(cp, size, i) + + +def max(cp, size): + _check_params(len(cp), size) + + if len(cp) == 0: + return 0 + + return __builtin__.max(abs(sample) for sample in _get_samples(cp, size)) + + +def minmax(cp, size): + _check_params(len(cp), size) + + max_sample, min_sample = 0, 0 + for sample in _get_samples(cp, size): + max_sample = __builtin__.max(sample, max_sample) + min_sample = __builtin__.min(sample, min_sample) + + return min_sample, max_sample + + +def avg(cp, size): + _check_params(len(cp), size) + sample_count = _sample_count(cp, size) + if sample_count == 0: + return 0 + return sum(_get_samples(cp, size)) / sample_count + + +def rms(cp, size): + _check_params(len(cp), size) + + sample_count = _sample_count(cp, size) + if sample_count == 0: + return 0 + + sum_squares = sum(sample**2 for sample in _get_samples(cp, size)) + return int(math.sqrt(sum_squares / sample_count)) + + +def _sum2(cp1, cp2, length): + size = 2 + total = 0 + for i in range(length): + total += getsample(cp1, size, i) * getsample(cp2, size, i) + return total + + +def findfit(cp1, cp2): + size = 2 + + if len(cp1) % 2 != 0 or len(cp2) % 2 != 0: + raise error("Strings should be even-sized") + + if len(cp1) < len(cp2): + raise error("First sample should be longer") + + len1 = _sample_count(cp1, size) + len2 = _sample_count(cp2, size) + + sum_ri_2 = _sum2(cp2, cp2, len2) + sum_aij_2 = _sum2(cp1, cp1, len2) + sum_aij_ri = _sum2(cp1, cp2, len2) + + result = (sum_ri_2 * sum_aij_2 - sum_aij_ri * sum_aij_ri) / sum_aij_2 + + best_result = result + best_i = 0 + + for i in range(1, len1 - len2 + 1): + aj_m1 = _get_sample(cp1, size, i - 1) + aj_lm1 = _get_sample(cp1, size, i + len2 - 1) + + sum_aij_2 += aj_lm1**2 - aj_m1**2 + sum_aij_ri = _sum2(buffer(cp1)[i*size:], cp2, len2) + + result = (sum_ri_2 * sum_aij_2 - sum_aij_ri * sum_aij_ri) / sum_aij_2 + + if result < best_result: + best_result = result + best_i = i + + factor = _sum2(buffer(cp1)[best_i*size:], cp2, len2) / sum_ri_2 + + return best_i, factor + + +def findfactor(cp1, cp2): + size = 2 + + if len(cp1) % 2 != 0: + raise error("Strings should be even-sized") + + if len(cp1) != len(cp2): + raise error("Samples should be same size") + + sample_count = _sample_count(cp1, size) + + sum_ri_2 = _sum2(cp2, cp2, sample_count) + sum_aij_ri = _sum2(cp1, cp2, sample_count) + + return sum_aij_ri / sum_ri_2 + + +def findmax(cp, len2): + size = 2 + sample_count = _sample_count(cp, size) + + if len(cp) % 2 != 0: + raise error("Strings should be even-sized") + + if len2 < 0 or sample_count < len2: + raise error("Input sample should be longer") + + if sample_count == 0: + return 0 + + result = _sum2(cp, cp, len2) + best_result = result + best_i = 0 + + for i in range(1, sample_count - len2 + 1): + sample_leaving_window = getsample(cp, size, i - 1) + sample_entering_window = getsample(cp, size, i + len2 - 1) + + result -= sample_leaving_window**2 + result += sample_entering_window**2 + + if result > best_result: + best_result = result + best_i = i + + return best_i + + +def avgpp(cp, size): + _check_params(len(cp), size) + sample_count = _sample_count(cp, size) + + prevextremevalid = False + prevextreme = None + avg = 0 + nextreme = 0 + + prevval = getsample(cp, size, 0) + val = getsample(cp, size, 1) + + prevdiff = val - prevval + + for i in range(1, sample_count): + val = getsample(cp, size, i) + diff = val - prevval + + if diff * prevdiff < 0: + if prevextremevalid: + avg += abs(prevval - prevextreme) + nextreme += 1 + + prevextremevalid = True + prevextreme = prevval + + prevval = val + if diff != 0: + prevdiff = diff + + if nextreme == 0: + return 0 + + return avg / nextreme + + +def maxpp(cp, size): + _check_params(len(cp), size) + sample_count = _sample_count(cp, size) + + prevextremevalid = False + prevextreme = None + max = 0 + + prevval = getsample(cp, size, 0) + val = getsample(cp, size, 1) + + prevdiff = val - prevval + + for i in range(1, sample_count): + val = getsample(cp, size, i) + diff = val - prevval + + if diff * prevdiff < 0: + if prevextremevalid: + extremediff = abs(prevval - prevextreme) + if extremediff > max: + max = extremediff + prevextremevalid = True + prevextreme = prevval + + prevval = val + if diff != 0: + prevdiff = diff + + return max + + +def cross(cp, size): + _check_params(len(cp), size) + + crossings = 0 + last_sample = 0 + for sample in _get_samples(cp, size): + if sample <= 0 < last_sample or sample >= 0 > last_sample: + crossings += 1 + last_sample = sample + + return crossings + + +def mul(cp, size, factor): + _check_params(len(cp), size) + clip = _get_clipfn(size) + + result = create_string_buffer(len(cp)) + + for i, sample in enumerate(_get_samples(cp, size)): + sample = clip(int(sample * factor)) + _put_sample(result, size, i, sample) + + return result.raw + + +def tomono(cp, size, fac1, fac2): + _check_params(len(cp), size) + clip = _get_clipfn(size) + + sample_count = _sample_count(cp, size) + + result = create_string_buffer(len(cp) / 2) + + for i in range(0, sample_count, 2): + l_sample = getsample(cp, size, i) + r_sample = getsample(cp, size, i + 1) + + sample = (l_sample * fac1) + (r_sample * fac2) + sample = clip(sample) + + _put_sample(result, size, i / 2, sample) + + return result.raw + + +def tostereo(cp, size, fac1, fac2): + _check_params(len(cp), size) + + sample_count = _sample_count(cp, size) + + result = create_string_buffer(len(cp) * 2) + clip = _get_clipfn(size) + + for i in range(sample_count): + sample = _get_sample(cp, size, i) + + l_sample = clip(sample * fac1) + r_sample = clip(sample * fac2) + + _put_sample(result, size, i * 2, l_sample) + _put_sample(result, size, i * 2 + 1, r_sample) + + return result.raw + + +def add(cp1, cp2, size): + _check_params(len(cp1), size) + + if len(cp1) != len(cp2): + raise error("Lengths should be the same") + + clip = _get_clipfn(size) + sample_count = _sample_count(cp1, size) + result = create_string_buffer(len(cp1)) + + for i in range(sample_count): + sample1 = getsample(cp1, size, i) + sample2 = getsample(cp2, size, i) + + sample = clip(sample1 + sample2) + + _put_sample(result, size, i, sample) + + return result.raw + + +def bias(cp, size, bias): + _check_params(len(cp), size) + + result = create_string_buffer(len(cp)) + + for i, sample in enumerate(_get_samples(cp, size)): + sample = _overflow(sample + bias, size) + _put_sample(result, size, i, sample) + + return result.raw + + +def reverse(cp, size): + _check_params(len(cp), size) + sample_count = _sample_count(cp, size) + + result = create_string_buffer(len(cp)) + for i, sample in enumerate(_get_samples(cp, size)): + _put_sample(result, size, sample_count - i - 1, sample) + + return result.raw + + +def lin2lin(cp, size, size2): + _check_params(len(cp), size) + _check_size(size2) + + if size == size2: + return cp + + new_len = (len(cp) / size) * size2 + + result = create_string_buffer(new_len) + + for i in range(_sample_count(cp, size)): + sample = _get_sample(cp, size, i) + if size < size2: + sample = sample << (4 * size2 / size) + elif size > size2: + sample = sample >> (4 * size / size2) + + sample = _overflow(sample, size2) + + _put_sample(result, size2, i, sample) + + return result.raw + + +def ratecv(cp, size, nchannels, inrate, outrate, state, weightA=1, weightB=0): + _check_params(len(cp), size) + if nchannels < 1: + raise error("# of channels should be >= 1") + + bytes_per_frame = size * nchannels + frame_count = len(cp) / bytes_per_frame + + if bytes_per_frame / nchannels != size: + raise OverflowError("width * nchannels too big for a C int") + + if weightA < 1 or weightB < 0: + raise error("weightA should be >= 1, weightB should be >= 0") + + if len(cp) % bytes_per_frame != 0: + raise error("not a whole number of frames") + + if inrate <= 0 or outrate <= 0: + raise error("sampling rate not > 0") + + d = gcd(inrate, outrate) + inrate /= d + outrate /= d + + prev_i = [0] * nchannels + cur_i = [0] * nchannels + + if state is None: + d = -outrate + else: + d, samps = state + + if len(samps) != nchannels: + raise error("illegal state argument") + + prev_i, cur_i = zip(*samps) + prev_i, cur_i = list(prev_i), list(cur_i) + + q = frame_count / inrate + ceiling = (q + 1) * outrate + nbytes = ceiling * bytes_per_frame + + result = create_string_buffer(nbytes) + + samples = _get_samples(cp, size) + out_i = 0 + while True: + while d < 0: + if frame_count == 0: + samps = zip(prev_i, cur_i) + retval = result.raw + + # slice off extra bytes + trim_index = (out_i * bytes_per_frame) - len(retval) + retval = buffer(retval)[:trim_index] + + return (retval, (d, tuple(samps))) + + for chan in range(nchannels): + prev_i[chan] = cur_i[chan] + cur_i[chan] = samples.next() + + cur_i[chan] = ( + (weightA * cur_i[chan] + weightB * prev_i[chan]) + / (weightA + weightB) + ) + + frame_count -= 1 + d += outrate + + while d >= 0: + for chan in range(nchannels): + cur_o = ( + (prev_i[chan] * d + cur_i[chan] * (outrate - d)) + / outrate + ) + _put_sample(result, size, out_i, _overflow(cur_o, size)) + out_i += 1 + d -= inrate + + +def lin2ulaw(cp, size): + raise NotImplementedError() + + +def ulaw2lin(cp, size): + raise NotImplementedError() + + +def lin2alaw(cp, size): + raise NotImplementedError() + + +def alaw2lin(cp, size): + raise NotImplementedError() + + +def lin2adpcm(cp, size, state): + raise NotImplementedError() + + +def adpcm2lin(cp, size, state): + raise NotImplementedError() diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -78,3 +78,7 @@ .. branch: optimize-int-and Optimize away INT_AND with constant mask of 1s that fully cover the bitrange of other operand. + +.. branch: bounds-int-add-or +Propagate appropriate bounds through INT_(OR|XOR|AND) operations if the +operands are positive to kill some guards diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -964,8 +964,7 @@ # ----------------------- reduce ------------------------------- - def _reduce_ufunc_impl(ufunc_name, promote_to_largest=False, - cumulative=False): + def _reduce_ufunc_impl(ufunc_name, cumulative=False): @unwrap_spec(keepdims=bool) def impl(self, space, w_axis=None, w_dtype=None, w_out=None, keepdims=False): if space.is_none(w_out): @@ -976,13 +975,11 @@ else: out = w_out return getattr(interp_ufuncs.get(space), ufunc_name).reduce( - space, self, promote_to_largest, w_axis, - keepdims, out, w_dtype, cumulative=cumulative) - return func_with_new_name(impl, "reduce_%s_impl_%d_%d" % (ufunc_name, - promote_to_largest, cumulative)) + space, self, w_axis, keepdims, out, w_dtype, cumulative=cumulative) + return func_with_new_name(impl, "reduce_%s_impl_%d" % (ufunc_name, cumulative)) - descr_sum = _reduce_ufunc_impl("add", True) - descr_prod = _reduce_ufunc_impl("multiply", True) + descr_sum = _reduce_ufunc_impl("add") + descr_prod = _reduce_ufunc_impl("multiply") descr_max = _reduce_ufunc_impl("maximum") descr_min = _reduce_ufunc_impl("minimum") descr_all = _reduce_ufunc_impl('logical_and') diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -19,12 +19,15 @@ class W_Ufunc(W_Root): - _immutable_fields_ = ["name", "promote_to_float", "promote_bools", "identity", - "int_only", "allow_bool", "allow_complex", "complex_to_float"] + _immutable_fields_ = [ + "name", "promote_to_largest", "promote_to_float", "promote_bools", + "identity", "int_only", "allow_bool", "allow_complex", "complex_to_float" + ] - def __init__(self, name, promote_to_float, promote_bools, identity, - int_only, allow_bool, allow_complex, complex_to_float): + def __init__(self, name, promote_to_largest, promote_to_float, promote_bools, + identity, int_only, allow_bool, allow_complex, complex_to_float): self.name = name + self.promote_to_largest = promote_to_largest self.promote_to_float = promote_to_float self.promote_bools = promote_bools self.identity = identity @@ -88,9 +91,8 @@ 'output must be an array')) else: out = w_out - return self.reduce(space, w_obj, False, #do not promote_to_largest - w_axis, True, #keepdims must be true - out, w_dtype, cumulative=True) + return self.reduce(space, w_obj, w_axis, True, #keepdims must be true + out, w_dtype, cumulative=True) @unwrap_spec(skipna=bool, keepdims=bool) def descr_reduce(self, space, w_obj, w_axis=None, w_dtype=None, @@ -154,15 +156,13 @@ out = None elif not isinstance(w_out, W_NDimArray): raise OperationError(space.w_TypeError, space.wrap( - 'output must be an array')) + 'output must be an array')) else: out = w_out - promote_to_largest = False - return self.reduce(space, w_obj, promote_to_largest, w_axis, keepdims, out, - w_dtype) + return self.reduce(space, w_obj, w_axis, keepdims, out, w_dtype) - def reduce(self, space, w_obj, promote_to_largest, w_axis, - keepdims=False, out=None, dtype=None, cumulative=False): + def reduce(self, space, w_obj, w_axis, keepdims=False, out=None, dtype=None, + cumulative=False): if self.argcount != 2: raise OperationError(space.w_ValueError, space.wrap("reduce only " "supported for binary functions")) @@ -185,8 +185,8 @@ dtype = find_unaryop_result_dtype( space, obj.get_dtype(), promote_to_float=self.promote_to_float, - promote_to_largest=promote_to_largest, - promote_bools=True + promote_to_largest=self.promote_to_largest, + promote_bools=self.promote_bools, ) if self.identity is None: for i in range(shapelen): @@ -263,18 +263,18 @@ return self._outer(space, __args__) def _outer(self, space, __args__): - raise OperationError(space.w_ValueError, - space.wrap("outer product only supported for binary functions")) + raise OperationError(space.w_ValueError, space.wrap( + "outer product only supported for binary functions")) class W_Ufunc1(W_Ufunc): _immutable_fields_ = ["func", "bool_result"] argcount = 1 - def __init__(self, func, name, promote_to_float=False, promote_bools=False, - identity=None, bool_result=False, int_only=False, + def __init__(self, func, name, promote_to_largest=False, promote_to_float=False, + promote_bools=False, identity=None, bool_result=False, int_only=False, allow_bool=True, allow_complex=True, complex_to_float=False): - W_Ufunc.__init__(self, name, promote_to_float, promote_bools, identity, - int_only, allow_bool, allow_complex, complex_to_float) + W_Ufunc.__init__(self, name, promote_to_largest, promote_to_float, promote_bools, + identity, int_only, allow_bool, allow_complex, complex_to_float) self.func = func self.bool_result = bool_result @@ -336,11 +336,11 @@ _immutable_fields_ = ["func", "comparison_func", "done_func"] argcount = 2 - def __init__(self, func, name, promote_to_float=False, promote_bools=False, - identity=None, comparison_func=False, int_only=False, + def __init__(self, func, name, promote_to_largest=False, promote_to_float=False, + promote_bools=False, identity=None, comparison_func=False, int_only=False, allow_bool=True, allow_complex=True, complex_to_float=False): - W_Ufunc.__init__(self, name, promote_to_float, promote_bools, identity, - int_only, allow_bool, allow_complex, complex_to_float) + W_Ufunc.__init__(self, name, promote_to_largest, promote_to_float, promote_bools, + identity, int_only, allow_bool, allow_complex, complex_to_float) self.func = func self.comparison_func = comparison_func if name == 'logical_and': @@ -606,9 +606,9 @@ def __init__(self, space): "NOT_RPYTHON" for ufunc_def in [ - ("add", "add", 2, {"identity": 0}), + ("add", "add", 2, {"identity": 0, "promote_to_largest": True}), ("subtract", "sub", 2), - ("multiply", "mul", 2, {"identity": 1}), + ("multiply", "mul", 2, {"identity": 1, "promote_to_largest": True}), ("bitwise_and", "bitwise_and", 2, {"identity": 1, "int_only": True}), ("bitwise_or", "bitwise_or", 2, {"identity": 0, diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -756,7 +756,7 @@ raises(ValueError, maximum.reduce, zeros((2, 0)), axis=1) def test_reduce_1d(self): - from numpypy import add, maximum, less + from numpypy import array, add, maximum, less, float16, complex64 assert less.reduce([5, 4, 3, 2, 1]) assert add.reduce([1, 2, 3]) == 6 @@ -764,6 +764,12 @@ assert maximum.reduce([1, 2, 3]) == 3 raises(ValueError, maximum.reduce, []) + assert add.reduce(array([True, False] * 200)) == 200 + assert add.reduce(array([True, False] * 200, dtype='int8')) == 200 + assert add.reduce(array([True, False] * 200), dtype='int8') == -56 + assert type(add.reduce(array([True, False] * 200, dtype='float16'))) is float16 + assert type(add.reduce(array([True, False] * 200, dtype='complex64'))) is complex64 + def test_reduceND(self): from numpypy import add, arange a = arange(12).reshape(3, 4) @@ -1025,7 +1031,7 @@ assert logaddexp2(float('inf'), float('inf')) == float('inf') def test_accumulate(self): - from numpypy import add, multiply, arange + from numpypy import add, subtract, multiply, divide, arange, dtype assert (add.accumulate([2, 3, 5]) == [2, 5, 10]).all() assert (multiply.accumulate([2, 3, 5]) == [2, 6, 30]).all() a = arange(4).reshape(2,2) @@ -1041,6 +1047,10 @@ print b assert (b == [[0, 0, 1], [1, 3, 5]]).all() assert b.dtype == int + assert add.accumulate([True]*200)[-1] == 200 + assert add.accumulate([True]*200).dtype == dtype('int') + assert subtract.accumulate([True]*200).dtype == dtype('bool') + assert divide.accumulate([True]*200).dtype == dtype('int8') def test_noncommutative_reduce_accumulate(self): import numpypy as np diff --git a/rpython/jit/metainterp/optimizeopt/intbounds.py b/rpython/jit/metainterp/optimizeopt/intbounds.py --- a/rpython/jit/metainterp/optimizeopt/intbounds.py +++ b/rpython/jit/metainterp/optimizeopt/intbounds.py @@ -7,6 +7,7 @@ CONST_0, MODE_ARRAY, MODE_STR, MODE_UNICODE) from rpython.jit.metainterp.optimizeopt.util import make_dispatcher_method from rpython.jit.metainterp.resoperation import rop +from rpython.jit.backend.llsupport import symbolic def get_integer_min(is_unsigned, byte_size): @@ -23,6 +24,17 @@ return (1 << ((byte_size << 3) - 1)) - 1 +def next_pow2_m1(n): + """Calculate next power of 2 greater than n minus one.""" + n |= n >> 1 + n |= n >> 2 + n |= n >> 4 + n |= n >> 8 + n |= n >> 16 + n |= n >> 32 + return n + + class OptIntBounds(Optimization): """Keeps track of the bounds placed on integers by guards and remove redundant guards""" @@ -56,17 +68,24 @@ optimize_GUARD_FALSE = optimize_GUARD_TRUE optimize_GUARD_VALUE = optimize_GUARD_TRUE - def optimize_INT_XOR(self, op): + def optimize_INT_OR_or_XOR(self, op): v1 = self.getvalue(op.getarg(0)) v2 = self.getvalue(op.getarg(1)) if v1 is v2: - self.make_constant_int(op.result, 0) + if op.getopnum() == rop.INT_OR: + self.make_equal_to(op.result, v1) + else: + self.make_constant_int(op.result, 0) return self.emit_operation(op) if v1.intbound.known_ge(IntBound(0, 0)) and \ v2.intbound.known_ge(IntBound(0, 0)): r = self.getvalue(op.result) - r.intbound.make_ge(IntLowerBound(0)) + mostsignificant = v1.intbound.upper | v2.intbound.upper + r.intbound.intersect(IntBound(0, next_pow2_m1(mostsignificant))) + + optimize_INT_OR = optimize_INT_OR_or_XOR + optimize_INT_XOR = optimize_INT_OR_or_XOR def optimize_INT_AND(self, op): v1 = self.getvalue(op.getarg(0)) @@ -82,6 +101,10 @@ val = v1.box.getint() if val >= 0: r.intbound.intersect(IntBound(0, val)) + elif v1.intbound.known_ge(IntBound(0, 0)) and \ + v2.intbound.known_ge(IntBound(0, 0)): + lesser = min(v1.intbound.upper, v2.intbound.upper) + r.intbound.intersect(IntBound(0, next_pow2_m1(lesser))) def optimize_INT_SUB(self, op): v1 = self.getvalue(op.getarg(0)) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_intbounds.py b/rpython/jit/metainterp/optimizeopt/test/test_intbounds.py new file mode 100644 --- /dev/null +++ b/rpython/jit/metainterp/optimizeopt/test/test_intbounds.py @@ -0,0 +1,12 @@ +from rpython.jit.metainterp.optimizeopt.intbounds import next_pow2_m1 + + +def test_next_pow2_m1(): + assert next_pow2_m1(0) == 0 + assert next_pow2_m1(1) == 1 + assert next_pow2_m1(7) == 7 + assert next_pow2_m1(256) == 511 + assert next_pow2_m1(255) == 255 + assert next_pow2_m1(80) == 127 + assert next_pow2_m1((1 << 32) - 5) == (1 << 32) - 1 + assert next_pow2_m1((1 << 64) - 1) == (1 << 64) - 1 diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -5326,6 +5326,114 @@ """ self.optimize_loop(ops, ops) + def test_int_and_cmp_above_bounds(self): + ops = """ + [p0,p1] + i0 = getarrayitem_gc(p0, 0, descr=chararraydescr) + i1 = getarrayitem_gc(p1, 0, descr=u2arraydescr) + i2 = int_and(i0, i1) + i3 = int_le(i2, 255) + guard_true(i3) [] + jump(i2) + """ + + expected = """ + [p0,p1] + i0 = getarrayitem_gc(p0, 0, descr=chararraydescr) + i1 = getarrayitem_gc(p1, 0, descr=u2arraydescr) + i2 = int_and(i0, i1) + jump(i2) + """ + self.optimize_loop(ops, expected) + + def test_int_and_cmp_below_bounds(self): + ops = """ + [p0,p1] + i0 = getarrayitem_gc(p0, 0, descr=chararraydescr) + i1 = getarrayitem_gc(p1, 0, descr=u2arraydescr) + i2 = int_and(i0, i1) + i3 = int_lt(i2, 255) + guard_true(i3) [] + jump(i2) + """ + self.optimize_loop(ops, ops) + + def test_int_or_cmp_above_bounds(self): + ops = """ + [p0,p1] + i0 = getarrayitem_gc(p0, 0, descr=chararraydescr) + i1 = getarrayitem_gc(p1, 0, descr=u2arraydescr) + i2 = int_or(i0, i1) + i3 = int_le(i2, 65535) + guard_true(i3) [] + jump(i2) + """ + + expected = """ + [p0,p1] + i0 = getarrayitem_gc(p0, 0, descr=chararraydescr) + i1 = getarrayitem_gc(p1, 0, descr=u2arraydescr) + i2 = int_or(i0, i1) + jump(i2) + """ + self.optimize_loop(ops, expected) + + def test_int_or_cmp_below_bounds(self): + ops = """ + [p0,p1] + i0 = getarrayitem_gc(p0, 0, descr=chararraydescr) + i1 = getarrayitem_gc(p1, 0, descr=u2arraydescr) + i2 = int_or(i0, i1) + i3 = int_lt(i2, 65535) + guard_true(i3) [] + jump(i2) + """ + self.optimize_loop(ops, ops) + + def test_int_xor_cmp_above_bounds(self): + ops = """ + [p0,p1] + i0 = getarrayitem_gc(p0, 0, descr=chararraydescr) + i1 = getarrayitem_gc(p1, 0, descr=u2arraydescr) + i2 = int_xor(i0, i1) + i3 = int_le(i2, 65535) + guard_true(i3) [] + jump(i2) + """ + + expected = """ + [p0,p1] + i0 = getarrayitem_gc(p0, 0, descr=chararraydescr) + i1 = getarrayitem_gc(p1, 0, descr=u2arraydescr) + i2 = int_xor(i0, i1) + jump(i2) + """ + self.optimize_loop(ops, expected) + + def test_int_xor_cmp_below_bounds(self): + ops = """ + [p0,p1] + i0 = getarrayitem_gc(p0, 0, descr=chararraydescr) + i1 = getarrayitem_gc(p1, 0, descr=u2arraydescr) + i2 = int_xor(i0, i1) + i3 = int_lt(i2, 65535) + guard_true(i3) [] + jump(i2) + """ + self.optimize_loop(ops, ops) + + def test_int_or_same_arg(self): + ops = """ + [i0] + i1 = int_or(i0, i0) + jump(i1) + """ + expected = """ + [i0] + jump(i0) + """ + self.optimize_loop(ops, expected) + class TestLLtype(BaseTestOptimizeBasic, LLtypeMixin): pass diff --git a/rpython/jit/metainterp/optimizeopt/test/test_util.py b/rpython/jit/metainterp/optimizeopt/test/test_util.py --- a/rpython/jit/metainterp/optimizeopt/test/test_util.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_util.py @@ -1,6 +1,6 @@ import py, random -from rpython.rtyper.lltypesystem import lltype, llmemory, rclass, rstr +from rpython.rtyper.lltypesystem import lltype, llmemory, rclass, rstr, rffi from rpython.rtyper.lltypesystem.rclass import OBJECT, OBJECT_VTABLE from rpython.rtyper.rclass import FieldListAccessor, IR_QUASIIMMUTABLE @@ -208,6 +208,8 @@ chararray = lltype.GcArray(lltype.Char) chararraydescr = cpu.arraydescrof(chararray) + u2array = lltype.GcArray(rffi.USHORT) + u2arraydescr = cpu.arraydescrof(u2array) # array of structs (complex data) complexarray = lltype.GcArray( From noreply at buildbot.pypy.org Wed Feb 19 19:39:06 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 19 Feb 2014 19:39:06 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: Update for Windows Message-ID: <20140219183906.05CE01C033D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5157:db73d6fca0d2 Date: 2014-02-19 19:08 +0100 http://bitbucket.org/pypy/extradoc/changeset/db73d6fca0d2/ Log: Update for Windows diff --git a/blog/draft/running-numpy.rst b/blog/draft/running-numpy.rst --- a/blog/draft/running-numpy.rst +++ b/blog/draft/running-numpy.rst @@ -3,6 +3,9 @@ * apt-get install pypy-dev +* Windows: you need to edit the Python.h from PyPy to add at the end: + ``#include `` + * git clone https://bitbucket.org/pypy/numpy.git; cd numpy; sudo pypy setup.py install From noreply at buildbot.pypy.org Wed Feb 19 19:50:17 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 19 Feb 2014 19:50:17 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: Backed out changeset db73d6fca0d2 Message-ID: <20140219185017.7C61C1C033D@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5158:8fe1c70ff8a0 Date: 2014-02-19 19:50 +0100 http://bitbucket.org/pypy/extradoc/changeset/8fe1c70ff8a0/ Log: Backed out changeset db73d6fca0d2 It turns out not to be needed, it was a problem of trying the wrong branch. diff --git a/blog/draft/running-numpy.rst b/blog/draft/running-numpy.rst --- a/blog/draft/running-numpy.rst +++ b/blog/draft/running-numpy.rst @@ -3,9 +3,6 @@ * apt-get install pypy-dev -* Windows: you need to edit the Python.h from PyPy to add at the end: - ``#include `` - * git clone https://bitbucket.org/pypy/numpy.git; cd numpy; sudo pypy setup.py install From noreply at buildbot.pypy.org Wed Feb 19 20:33:44 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 19 Feb 2014 20:33:44 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: Expand Message-ID: <20140219193344.17D5A1D267B@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5159:9281c0996c69 Date: 2014-02-19 20:33 +0100 http://bitbucket.org/pypy/extradoc/changeset/9281c0996c69/ Log: Expand diff --git a/blog/draft/running-numpy.rst b/blog/draft/running-numpy.rst --- a/blog/draft/running-numpy.rst +++ b/blog/draft/running-numpy.rst @@ -1,9 +1,23 @@ How to install NumPy on PyPy ============================ -* apt-get install pypy-dev +* Debian, Ubuntu and maybe others: make sure to run + ``apt-get install pypy-dev``. The Debian package is, as usual, split + into more pieces than we'd like. You get it all in one go if you + installed PyPy any other way. -* git clone https://bitbucket.org/pypy/numpy.git; cd numpy; - sudo pypy setup.py install +* Windows: you may or may not need to edit the file ``include/Python.h`` + from your PyPy installation to add this line at the end: + ``#include `` -* sudo pypy -c 'import numpy' # only once +* Run:: + + git clone https://bitbucket.org/pypy/numpy.git + cd numpy + sudo pypy setup.py install + +* If you get a permission error when importing NumPy, you need to + import NumPy once as root:: + + cd somewhere_else_unrelated + sudo pypy -c 'import numpy' From noreply at buildbot.pypy.org Wed Feb 19 20:38:42 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 19 Feb 2014 20:38:42 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: Can also download directly Message-ID: <20140219193842.D25E51D26BD@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5160:c54b5cc4534d Date: 2014-02-19 20:38 +0100 http://bitbucket.org/pypy/extradoc/changeset/c54b5cc4534d/ Log: Can also download directly diff --git a/blog/draft/running-numpy.rst b/blog/draft/running-numpy.rst --- a/blog/draft/running-numpy.rst +++ b/blog/draft/running-numpy.rst @@ -16,6 +16,9 @@ cd numpy sudo pypy setup.py install + or download https://bitbucket.org/pypy/numpy/get/pypy-compat.zip, + extract it, and run the last line above. + * If you get a permission error when importing NumPy, you need to import NumPy once as root:: From noreply at buildbot.pypy.org Thu Feb 20 17:17:37 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Thu, 20 Feb 2014 17:17:37 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: add colors to test_random Message-ID: <20140220161737.9A35F1C0F86@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7-refactor Changeset: r801:115aea22a81d Date: 2014-02-20 17:17 +0100 http://bitbucket.org/pypy/stmgc/changeset/115aea22a81d/ Log: add colors to test_random diff --git a/c7/test/test_random.py b/c7/test/test_random.py --- a/c7/test/test_random.py +++ b/c7/test/test_random.py @@ -4,12 +4,16 @@ from cStringIO import StringIO + + class Exec(object): def __init__(self, test): self.content = {'self': test} + self.thread_num = 0 def do(self, cmd): - print >> sys.stderr, cmd + color = "\033[%dm" % (31 + self.thread_num % 6) + print >> sys.stderr, color + cmd + "\033[0m" exec cmd in globals(), self.content @@ -192,49 +196,49 @@ self.prebuilt_roots = [] self.committed_transaction_state = TransactionState(0) - def push_state_to_other_threads(self, tr_state): - assert not tr_state.check_must_abort() + def push_state_to_other_threads(self, trs): + assert not trs.check_must_abort() for ts in self.thread_states: other_trs = ts.transaction_state - if other_trs is None or other_trs is tr_state: + if other_trs is None or other_trs is trs: continue - other_trs.update_from_committed(tr_state, only_new=True) + other_trs.update_from_committed(trs, only_new=True) - if tr_state.check_must_abort(): + if trs.check_must_abort(): self.ex.do('# conflict while pushing to other threads: %s' % - tr_state.objs_in_conflict) + trs.objs_in_conflict) - def check_for_write_write_conflicts(self, tr_state): - assert not tr_state.check_must_abort() + def check_for_write_write_conflicts(self, trs): + assert not trs.check_must_abort() for ts in self.thread_states: other_trs = ts.transaction_state - if other_trs is None or other_trs is tr_state: + if other_trs is None or other_trs is trs: continue - confl_set = other_trs.write_set & tr_state.write_set + confl_set = other_trs.write_set & trs.write_set if confl_set: - contention_management(tr_state, other_trs, True, + contention_management(trs, other_trs, True, objs_in_conflict=confl_set) - if tr_state.check_must_abort(): + if trs.check_must_abort(): self.ex.do('# write-write conflict: %s' % - tr_state.objs_in_conflict) + trs.objs_in_conflict) - def check_for_write_read_conflicts(self, tr_state): - assert not tr_state.check_must_abort() + def check_for_write_read_conflicts(self, trs): + assert not trs.check_must_abort() for ts in self.thread_states: other_trs = ts.transaction_state - if other_trs is None or other_trs is tr_state: + if other_trs is None or other_trs is trs: continue - confl_set = other_trs.read_set & tr_state.write_set + confl_set = other_trs.read_set & trs.write_set if confl_set: - contention_management(tr_state, other_trs, + contention_management(trs, other_trs, objs_in_conflict=confl_set) - if tr_state.check_must_abort(): + if trs.check_must_abort(): self.ex.do('# write-read conflict: %s' % - tr_state.objs_in_conflict) + trs.objs_in_conflict) # ========== STM OPERATIONS ========== @@ -381,6 +385,7 @@ def do(self, ex, global_state, thread_state): trs = thread_state.transaction_state conflicts = trs is not None and trs.check_must_abort() + ex.thread_num = thread_state.num # if conflicts: thread_state.abort_transaction() From noreply at buildbot.pypy.org Thu Feb 20 18:30:16 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 20 Feb 2014 18:30:16 +0100 (CET) Subject: [pypy-commit] pypy default: fix test_tostring on 32bit Message-ID: <20140220173016.BE41B1D2801@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69224:ad0c48672b08 Date: 2014-02-20 12:19 -0500 http://bitbucket.org/pypy/pypy/changeset/ad0c48672b08/ Log: fix test_tostring on 32bit diff --git a/pypy/module/micronumpy/test/test_scalar.py b/pypy/module/micronumpy/test/test_scalar.py --- a/pypy/module/micronumpy/test/test_scalar.py +++ b/pypy/module/micronumpy/test/test_scalar.py @@ -210,8 +210,8 @@ def test_tostring(self): import numpy as np - assert np.int64(123).tostring() == np.array(123, dtype=int).tostring() - assert np.int64(123).tostring('C') == np.array(123, dtype=int).tostring() + assert np.int64(123).tostring() == np.array(123, dtype='i8').tostring() + assert np.int64(123).tostring('C') == np.array(123, dtype='i8').tostring() assert np.float64(1.5).tostring() == np.array(1.5, dtype=float).tostring() exc = raises(TypeError, 'np.int64(123).tostring("Z")') assert exc.value[0] == 'order not understood' From noreply at buildbot.pypy.org Thu Feb 20 20:29:10 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 20 Feb 2014 20:29:10 +0100 (CET) Subject: [pypy-commit] pypy default: fix numpy int/uint conversions Message-ID: <20140220192910.5DFE71D26C7@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69225:5dae92869ac0 Date: 2014-02-20 14:24 -0500 http://bitbucket.org/pypy/pypy/changeset/5dae92869ac0/ Log: fix numpy int/uint conversions diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -44,7 +44,7 @@ w_arr = array(space, w_value, dtype, copy=False) if len(w_arr.get_shape()) != 0: return w_arr - w_value = w_arr.get_scalar_value() + w_value = w_arr.get_scalar_value().item(space) return dtype.itemtype.coerce_subtype(space, w_subtype, w_value) def descr_reduce(self, space): @@ -155,18 +155,22 @@ return space.index(self.item(space)) def descr_int(self, space): - box = self.convert_to(space, W_LongBox._get_dtype(space)) - assert isinstance(box, W_LongBox) - return space.wrap(box.value) + if isinstance(self, W_UnsignedIntegerBox): + box = self.convert_to(space, W_UInt64Box._get_dtype(space)) + else: + box = self.convert_to(space, W_Int64Box._get_dtype(space)) + return space.int(box.item(space)) def descr_long(self, space): - box = self.convert_to(space, W_Int64Box._get_dtype(space)) - assert isinstance(box, W_Int64Box) - return space.wrap(box.value) + if isinstance(self, W_UnsignedIntegerBox): + box = self.convert_to(space, W_UInt64Box._get_dtype(space)) + else: + box = self.convert_to(space, W_Int64Box._get_dtype(space)) + return space.long(box.item(space)) def descr_float(self, space): box = self.convert_to(space, W_Float64Box._get_dtype(space)) - assert isinstance(box, W_Float64Box) + assert isinstance(box, PrimitiveBox) return space.wrap(box.value) def descr_oct(self, space): diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -572,6 +572,7 @@ raises(OverflowError, numpy.int64, 9223372036854775808) raises(OverflowError, numpy.int64, 18446744073709551615) raises(OverflowError, numpy.uint64, 18446744073709551616) + assert numpy.uint64((2<<63) - 1) == (2<<63) - 1 def test_float16(self): import numpy diff --git a/pypy/module/micronumpy/test/test_scalar.py b/pypy/module/micronumpy/test/test_scalar.py --- a/pypy/module/micronumpy/test/test_scalar.py +++ b/pypy/module/micronumpy/test/test_scalar.py @@ -35,6 +35,7 @@ assert int(np.str_('12')) == 12 exc = raises(ValueError, "int(np.str_('abc'))") assert exc.value.message.startswith('invalid literal for int()') + assert int(np.uint64((2<<63) - 1)) == (2<<63) - 1 assert oct(np.int32(11)) == '013' assert oct(np.float32(11.6)) == '013' assert oct(np.complex64(11-12j)) == '013' From noreply at buildbot.pypy.org Thu Feb 20 22:31:25 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 20 Feb 2014 22:31:25 +0100 (CET) Subject: [pypy-commit] pypy default: fix numpy scalar descr_float Message-ID: <20140220213125.6CCD51C1504@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69226:5c0b56c4704a Date: 2014-02-20 16:18 -0500 http://bitbucket.org/pypy/pypy/changeset/5c0b56c4704a/ Log: fix numpy scalar descr_float diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -170,8 +170,7 @@ def descr_float(self, space): box = self.convert_to(space, W_Float64Box._get_dtype(space)) - assert isinstance(box, PrimitiveBox) - return space.wrap(box.value) + return space.float(box.item(space)) def descr_oct(self, space): return space.oct(self.descr_int(space)) diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -2204,6 +2204,12 @@ assert exc.value.message == "don't know how to convert " \ "scalar number to %s" % op + def test__float__(self): + import numpy as np + assert float(np.array(1.5)) == 1.5 + exc = raises(TypeError, "float(np.array([1.5, 2.5]))") + assert exc.value[0] == 'only length-1 arrays can be converted to Python scalars' + def test__reduce__(self): from numpypy import array, dtype from cPickle import loads, dumps From noreply at buildbot.pypy.org Fri Feb 21 01:20:09 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 21 Feb 2014 01:20:09 +0100 (CET) Subject: [pypy-commit] pypy default: fix numpy complex types with non-native byteorder Message-ID: <20140221002009.DB46D1C136D@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69227:fb089d6960ac Date: 2014-02-20 18:42 -0500 http://bitbucket.org/pypy/pypy/changeset/fb089d6960ac/ Log: fix numpy complex types with non-native byteorder diff --git a/pypy/module/micronumpy/arrayimpl/concrete.py b/pypy/module/micronumpy/arrayimpl/concrete.py --- a/pypy/module/micronumpy/arrayimpl/concrete.py +++ b/pypy/module/micronumpy/arrayimpl/concrete.py @@ -86,25 +86,25 @@ return SliceArray(self.start, strides, backstrides, new_shape, self, orig_array, dtype=dtype) - def get_real(self, orig_array): + def get_real(self, space, orig_array): strides = self.get_strides() backstrides = self.get_backstrides() if self.dtype.is_complex_type(): - dtype = self.dtype.float_type + dtype = self.dtype.get_float_dtype(space) return SliceArray(self.start, strides, backstrides, self.get_shape(), self, orig_array, dtype=dtype) return SliceArray(self.start, strides, backstrides, self.get_shape(), self, orig_array) def set_real(self, space, orig_array, w_value): - tmp = self.get_real(orig_array) + tmp = self.get_real(space, orig_array) tmp.setslice(space, convert_to_array(space, w_value)) def get_imag(self, space, orig_array): strides = self.get_strides() backstrides = self.get_backstrides() if self.dtype.is_complex_type(): - dtype = self.dtype.float_type + dtype = self.dtype.get_float_dtype(space) return SliceArray(self.start + dtype.get_size(), strides, backstrides, self.get_shape(), self, orig_array, dtype=dtype) impl = NonWritableArray(self.get_shape(), self.dtype, self.order, strides, diff --git a/pypy/module/micronumpy/arrayimpl/scalar.py b/pypy/module/micronumpy/arrayimpl/scalar.py --- a/pypy/module/micronumpy/arrayimpl/scalar.py +++ b/pypy/module/micronumpy/arrayimpl/scalar.py @@ -78,22 +78,22 @@ scalar.value = dtype.itemtype.runpack_str(space, self.value.raw_str()) return scalar - def get_real(self, orig_array): + def get_real(self, space, orig_array): if self.dtype.is_complex_type(): - scalar = Scalar(self.dtype.float_type) + scalar = Scalar(self.dtype.get_float_dtype(space)) scalar.value = self.value.convert_real_to(scalar.dtype) return scalar return self def set_real(self, space, orig_array, w_val): w_arr = convert_to_array(space, w_val) - dtype = self.dtype.float_type or self.dtype if len(w_arr.get_shape()) > 0: raise OperationError(space.w_ValueError, space.wrap( "could not broadcast input array from shape " + "(%s) into shape ()" % ( ','.join([str(x) for x in w_arr.get_shape()],)))) if self.dtype.is_complex_type(): + dtype = self.dtype.get_float_dtype(space) self.value = self.dtype.itemtype.composite( w_arr.get_scalar_value().convert_to(space, dtype), self.value.convert_imag_to(dtype)) @@ -102,7 +102,7 @@ def get_imag(self, space, orig_array): if self.dtype.is_complex_type(): - scalar = Scalar(self.dtype.float_type) + scalar = Scalar(self.dtype.get_float_dtype(space)) scalar.value = self.value.convert_imag_to(scalar.dtype) return scalar scalar = Scalar(self.dtype) @@ -116,16 +116,15 @@ #Only called on complex dtype assert self.dtype.is_complex_type() w_arr = convert_to_array(space, w_val) - dtype = self.dtype.float_type if len(w_arr.get_shape()) > 0: raise OperationError(space.w_ValueError, space.wrap( "could not broadcast input array from shape " + "(%s) into shape ()" % ( ','.join([str(x) for x in w_arr.get_shape()],)))) + dtype = self.dtype.get_float_dtype(space) self.value = self.dtype.itemtype.composite( self.value.convert_real_to(dtype), - w_arr.get_scalar_value().convert_to(space, dtype), - ) + w_arr.get_scalar_value().convert_to(space, dtype)) def descr_getitem(self, space, _, w_idx): if space.isinstance_w(w_idx, space.w_tuple): diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -123,6 +123,11 @@ return '|S' + str(self.get_size()) return self.name + def get_float_dtype(self, space): + assert self.kind == NPY_COMPLEXLTR + assert self.float_type is not None + return get_dtype_cache(space).dtypes_by_name[self.byteorder + self.float_type] + def descr_str(self, space): return space.wrap(self.get_name()) @@ -697,7 +702,7 @@ char=NPY_CFLOATLTR, w_box_type = space.gettypefor(interp_boxes.W_Complex64Box), aliases=['csingle'], - float_type = self.w_float32dtype, + float_type=NPY_FLOATLTR, ) self.w_complex128dtype = W_Dtype( types.Complex128(), @@ -709,7 +714,7 @@ alternate_constructors=[space.w_complex, space.gettypefor(interp_boxes.W_ComplexFloatingBox)], aliases=["complex", 'cfloat', 'cdouble'], - float_type = self.w_float64dtype, + float_type=NPY_DOUBLELTR, ) self.w_complexlongdtype = W_Dtype( types.ComplexLong(), @@ -719,7 +724,7 @@ char=NPY_CLONGDOUBLELTR, w_box_type = space.gettypefor(interp_boxes.W_ComplexLongBox), aliases=["clongdouble", "clongfloat"], - float_type = self.w_floatlongdtype, + float_type=NPY_LONGDOUBLELTR, ) self.w_stringdtype = W_Dtype( types.StringType(), diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -323,8 +323,8 @@ return wrap_impl(space, w_subtype, self, copy) def descr_get_real(self, space): - return wrap_impl(space, space.type(self), self, - self.implementation.get_real(self)) + ret = self.implementation.get_real(space, self) + return wrap_impl(space, space.type(self), self, ret) def descr_get_imag(self, space): ret = self.implementation.get_imag(space, self) diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -3028,6 +3028,20 @@ v = fromstring("abcd", dtype="|S2") assert v[0] == "ab" assert v[1] == "cd" + v = fromstring('@\x01\x99\x99\x99\x99\x99\x9a\xbf\xf1\x99\x99\x99\x99\x99\x9a', + dtype=dtype('>c16')) + assert v.tostring() == \ + '@\x01\x99\x99\x99\x99\x99\x9a\xbf\xf1\x99\x99\x99\x99\x99\x9a' + assert v[0] == 2.2-1.1j + assert v.real == 2.2 + assert v.imag == -1.1 + v = fromstring('\x9a\x99\x99\x99\x99\x99\x01@\x9a\x99\x99\x99\x99\x99\xf1\xbf', + dtype=dtype(' Author: Brian Kearns Branch: Changeset: r69228:b2ec9d79e30c Date: 2014-02-20 19:11 -0500 http://bitbucket.org/pypy/pypy/changeset/b2ec9d79e30c/ Log: fix fromstring for non-native types diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -3006,6 +3006,10 @@ assert (m == [1.0, -1.0, 2.0, 3.0]).all() n = fromstring("3.4 2.0 3.8 2.2", dtype='int32', sep=" ") assert (n == [3]).all() + n = fromstring('\x00\x00\x00{', dtype='>i4') + assert n == 123 + n = fromstring('W\xb0', dtype='>f2') + assert n == 123. o = fromstring("1.0 2f.0f 3.8 2.2", dtype='float32', sep=" ") assert len(o) == 2 assert o[0] == 1.0 diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -198,7 +198,9 @@ self._write(storage, i, offset, value) def runpack_str(self, space, s): - v = runpack(self.format_code, s) + v = rffi.cast(self.T, runpack(self.format_code, s)) + if not self.native: + v = byteswap(v) return self.box(v) @simple_binary_op @@ -972,8 +974,10 @@ def runpack_str(self, space, s): assert len(s) == 2 - fval = unpack_float(s, native_is_bigendian) - return self.box(fval) + fval = self.box(unpack_float(s, native_is_bigendian)) + if not self.native: + fval = self.byteswap(fval) + return fval def default_fromstring(self, space): return self.box(-1.0) @@ -1599,8 +1603,10 @@ def runpack_str(self, space, s): assert len(s) == interp_boxes.long_double_size - fval = unpack_float80(s, native_is_bigendian) - return self.box(fval) + fval = self.box(unpack_float80(s, native_is_bigendian)) + if not self.native: + fval = self.byteswap(fval) + return fval def byteswap(self, w_v): value = self.unbox(w_v) From noreply at buildbot.pypy.org Fri Feb 21 02:24:30 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 21 Feb 2014 02:24:30 +0100 (CET) Subject: [pypy-commit] pypy remove-intlong-smm: restore the barely more efficient overrides Message-ID: <20140221012430.012341C31C7@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: remove-intlong-smm Changeset: r69229:d788869dbd75 Date: 2014-02-20 17:12 -0800 http://bitbucket.org/pypy/pypy/changeset/d788869dbd75/ Log: restore the barely more efficient overrides diff --git a/pypy/objspace/std/boolobject.py b/pypy/objspace/std/boolobject.py --- a/pypy/objspace/std/boolobject.py +++ b/pypy/objspace/std/boolobject.py @@ -2,6 +2,7 @@ import operator +from rpython.rlib.rarithmetic import r_uint from rpython.tool.sourcetools import func_renamer, func_with_new_name from pypy.interpreter.gateway import WrappedDefault, interp2app, unwrap_spec @@ -24,6 +25,12 @@ def unwrap(self, space): return bool(self.intval) + def uint_w(self, space): + return r_uint(self.intval) + + def int(self, space): + return space.newint(self.intval) + @staticmethod @unwrap_spec(w_obj=WrappedDefault(False)) def descr_new(space, w_booltype, w_obj): From noreply at buildbot.pypy.org Fri Feb 21 02:24:31 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 21 Feb 2014 02:24:31 +0100 (CET) Subject: [pypy-commit] pypy remove-intlong-smm: try to generate less code, cleanup Message-ID: <20140221012431.3F0751C31C7@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: remove-intlong-smm Changeset: r69230:8a62ef0780d3 Date: 2014-02-20 17:18 -0800 http://bitbucket.org/pypy/pypy/changeset/8a62ef0780d3/ Log: try to generate less code, cleanup diff --git a/pypy/objspace/std/boolobject.py b/pypy/objspace/std/boolobject.py --- a/pypy/objspace/std/boolobject.py +++ b/pypy/objspace/std/boolobject.py @@ -50,6 +50,7 @@ int_op = getattr(W_IntObject, descr_name) op = getattr(operator, opname + '_' if opname in ('and', 'or') else opname) + @func_renamer(descr_name) def descr_binop(self, space, w_other): if not isinstance(w_other, W_BoolObject): @@ -57,7 +58,12 @@ a = bool(self.intval) b = bool(w_other.intval) return space.newbool(op(a, b)) - return descr_binop, func_with_new_name(descr_binop, 'descr_r' + opname) + + @func_renamer('descr_r' + opname) + def descr_rbinop(self, space, w_other): + return descr_binop(self, space, w_other) + + return descr_binop, descr_rbinop descr_and, descr_rand = _make_bitwise_binop('and') descr_or, descr_ror = _make_bitwise_binop('or') diff --git a/pypy/objspace/std/intobject.py b/pypy/objspace/std/intobject.py --- a/pypy/objspace/std/intobject.py +++ b/pypy/objspace/std/intobject.py @@ -21,8 +21,7 @@ from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.buffer import Buffer from pypy.interpreter.error import OperationError, oefmt -from pypy.interpreter.gateway import ( - WrappedDefault, interp2app, interpindirect2app, unwrap_spec) +from pypy.interpreter.gateway import WrappedDefault, interp2app, unwrap_spec from pypy.objspace.std import newformat from pypy.objspace.std.model import ( BINARY_OPS, CMP_OPS, COMMUTATIVE_OPS, IDTAG_INT) @@ -284,14 +283,23 @@ """T.__new__(S, ...) -> a new object with type S, a subtype of T""" return _new_int(space, w_inttype, w_x, w_base) - descr_pos = func_with_new_name(int, 'descr_pos') - descr_index = func_with_new_name(int, 'descr_index') - descr_trunc = func_with_new_name(int, 'descr_trunc') - descr_conjugate = func_with_new_name(int, 'descr_conjugate') + def descr_hash(self, space): + # unlike CPython, we don't special-case the value -1 in most of + # our hash functions, so there is not much sense special-casing + # it here either. Make sure this is consistent with the hash of + # floats and longs. + return self.int(space) - def descr_get_numerator(self, space): + def _int(self, space): return self.int(space) - descr_get_real = descr_get_numerator + + descr_pos = func_with_new_name(_int, 'descr_pos') + descr_index = func_with_new_name(_int, 'descr_index') + descr_trunc = func_with_new_name(_int, 'descr_trunc') + descr_conjugate = func_with_new_name(_int, 'descr_conjugate') + + descr_get_numerator = func_with_new_name(_int, 'descr_get_numerator') + descr_get_real = func_with_new_name(_int, 'descr_get_real') def descr_get_denominator(self, space): return wrapint(space, 1) @@ -305,16 +313,10 @@ return space.newtuple([self, w_other]) def descr_long(self, space): + # XXX: should try smalllong from pypy.objspace.std.longobject import W_LongObject return W_LongObject.fromint(space, self.intval) - def descr_hash(self, space): - # unlike CPython, we don't special-case the value -1 in most of - # our hash functions, so there is not much sense special-casing - # it here either. Make sure this is consistent with the hash of - # floats and longs. - return self.int(space) - def descr_nonzero(self, space): return space.newbool(self.intval != 0) @@ -430,6 +432,7 @@ def _make_generic_descr_binop(opname, ovf=True): op = getattr(operator, opname + '_' if opname in ('and', 'or') else opname) + descr_rname = 'descr_r' + opname @func_renamer('descr_' + opname) def descr_binop(self, space, w_other): @@ -448,10 +451,12 @@ return wrapint(space, z) if opname in COMMUTATIVE_OPS: - return descr_binop, func_with_new_name(descr_binop, - 'descr_r' + opname) + @func_renamer(descr_rname) + def descr_rbinop(self, space, w_other): + return descr_binop(self, space, w_other) + return descr_binop, descr_rbinop - @func_renamer('descr_r' + opname) + @func_renamer(descr_rname) def descr_rbinop(self, space, w_other): if not isinstance(w_other, W_IntObject): return space.w_NotImplemented @@ -509,15 +514,17 @@ return _ovf2long(space, opname, w_other, self) else: return func(space, y, x) + return descr_binop, descr_rbinop + descr_lshift, descr_rlshift = _make_descr_binop(_lshift) + descr_rshift, descr_rrshift = _make_descr_binop(_rshift, ovf=False) + descr_floordiv, descr_rfloordiv = _make_descr_binop(_floordiv) descr_div, descr_rdiv = _make_descr_binop(_div) descr_truediv, descr_rtruediv = _make_descr_binop(_truediv, ovf=False) descr_mod, descr_rmod = _make_descr_binop(_mod) descr_divmod, descr_rdivmod = _make_descr_binop(_divmod) - descr_lshift, descr_rlshift = _make_descr_binop(_lshift) - descr_rshift, descr_rrshift = _make_descr_binop(_rshift, ovf=False) def wrapint(space, x): @@ -600,8 +607,8 @@ # check for easy cases if type(w_value) is W_IntObject: value = w_value.intval - elif space.lookup(w_value, '__int__') is not None or \ - space.lookup(w_value, '__trunc__') is not None: + elif (space.lookup(w_value, '__int__') is not None or + space.lookup(w_value, '__trunc__') is not None): # otherwise, use the __int__() or the __trunc__() methods w_obj = w_value if space.lookup(w_obj, '__int__') is None: diff --git a/pypy/objspace/std/longobject.py b/pypy/objspace/std/longobject.py --- a/pypy/objspace/std/longobject.py +++ b/pypy/objspace/std/longobject.py @@ -361,6 +361,7 @@ def _make_generic_descr_binop(opname): methname = opname + '_' if opname in ('and', 'or') else opname + descr_rname = 'descr_r' + opname op = getattr(rbigint, methname) @func_renamer('descr_' + opname) @@ -369,9 +370,11 @@ return W_LongObject(op(self.num, w_other.asbigint())) if opname in COMMUTATIVE_OPS: - descr_rbinop = func_with_new_name(descr_binop, 'descr_r' + opname) + @func_renamer(descr_rname) + def descr_rbinop(self, space, w_other): + return descr_binop(self, space, w_other) else: - @func_renamer('descr_r' + opname) + @func_renamer(descr_rname) @delegate_other def descr_rbinop(self, space, w_other): # XXX: delegate, for --objspace-std-withsmalllong diff --git a/pypy/objspace/std/marshal_impl.py b/pypy/objspace/std/marshal_impl.py --- a/pypy/objspace/std/marshal_impl.py +++ b/pypy/objspace/std/marshal_impl.py @@ -28,7 +28,7 @@ from pypy.objspace.std.listobject import W_ListObject from pypy.objspace.std.typeobject import W_TypeObject from pypy.objspace.std.longobject import W_LongObject, newlong -from pypy.objspace.std.smalllongobject import W_SmallLongObject +from pypy.objspace.std.smalllongobject import W_SmallLongObject from pypy.objspace.std.noneobject import W_NoneObject from pypy.objspace.std.unicodeobject import W_UnicodeObject diff --git a/pypy/objspace/std/smalllongobject.py b/pypy/objspace/std/smalllongobject.py --- a/pypy/objspace/std/smalllongobject.py +++ b/pypy/objspace/std/smalllongobject.py @@ -200,28 +200,31 @@ return func(self, space, w_other) if opname in COMMUTATIVE_OPS: - descr_rbinop = func_with_new_name(descr_binop, descr_rname) - else: - long_rop = getattr(W_LongObject, descr_rname) @func_renamer(descr_rname) def descr_rbinop(self, space, w_other): - if isinstance(w_other, W_AbstractIntObject): - w_other = _int2small(space, w_other) - elif not isinstance(w_other, W_AbstractLongObject): - return space.w_NotImplemented - elif not isinstance(w_other, W_SmallLongObject): + return descr_binop(self, space, w_other) + return descr_binop, descr_rbinop + + long_rop = getattr(W_LongObject, descr_rname) + @func_renamer(descr_rname) + def descr_rbinop(self, space, w_other): + if isinstance(w_other, W_AbstractIntObject): + w_other = _int2small(space, w_other) + elif not isinstance(w_other, W_AbstractLongObject): + return space.w_NotImplemented + elif not isinstance(w_other, W_SmallLongObject): + self = _small2long(space, self) + return long_rop(self, space, w_other) + + if ovf: + try: + return func(w_other, space, self) + except OverflowError: self = _small2long(space, self) + w_other = _small2long(space, w_other) return long_rop(self, space, w_other) - - if ovf: - try: - return func(w_other, space, self) - except OverflowError: - self = _small2long(space, self) - w_other = _small2long(space, w_other) - return long_rop(self, space, w_other) - else: - return func(w_other, space, self) + else: + return func(w_other, space, self) return descr_binop, descr_rbinop From noreply at buildbot.pypy.org Fri Feb 21 02:24:32 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 21 Feb 2014 02:24:32 +0100 (CET) Subject: [pypy-commit] pypy remove-intlong-smm: apply optimized_int_add to INPLACE_ADD and add a similar opt. for Message-ID: <20140221012432.722941C31C7@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: remove-intlong-smm Changeset: r69231:bb83564f9dbd Date: 2014-02-20 17:20 -0800 http://bitbucket.org/pypy/pypy/changeset/bb83564f9dbd/ Log: apply optimized_int_add to INPLACE_ADD and add a similar opt. for subtraction. these may prove useful for interpreted mode now that we lack mulitmethod shortcuts diff --git a/pypy/objspace/std/frame.py b/pypy/objspace/std/frame.py --- a/pypy/objspace/std/frame.py +++ b/pypy/objspace/std/frame.py @@ -36,6 +36,22 @@ self.pushvalue(w_result) +def int_BINARY_SUBTRACT(self, oparg, next_instr): + space = self.space + w_2 = self.popvalue() + w_1 = self.popvalue() + if type(w_1) is W_IntObject and type(w_2) is W_IntObject: + try: + z = ovfcheck(w_1.intval - w_2.intval) + except OverflowError: + w_result = w_1.descr_sub(space, w_2) + else: + w_result = space.newint(z) + else: + w_result = space.sub(w_1, w_2) + self.pushvalue(w_result) + + def list_BINARY_SUBSCR(self, oparg, next_instr): space = self.space w_2 = self.popvalue() @@ -56,6 +72,9 @@ pass if space.config.objspace.std.optimized_int_add: StdObjSpaceFrame.BINARY_ADD = int_BINARY_ADD + StdObjSpaceFrame.INPLACE_ADD = int_BINARY_ADD + StdObjSpaceFrame.BINARY_SUB = int_BINARY_SUBTRACT + StdObjSpaceFrame.INPLACE_SUBTRACT = int_BINARY_SUBTRACT if space.config.objspace.std.optimized_list_getitem: StdObjSpaceFrame.BINARY_SUBSCR = list_BINARY_SUBSCR from pypy.objspace.std.callmethod import LOOKUP_METHOD, CALL_METHOD diff --git a/pypy/objspace/std/test/test_intobject.py b/pypy/objspace/std/test/test_intobject.py --- a/pypy/objspace/std/test/test_intobject.py +++ b/pypy/objspace/std/test/test_intobject.py @@ -310,6 +310,13 @@ return 42 assert I(1).conjugate() == 1 + def test_inplace(self): + a = 1 + a += 1 + assert a == 2 + a -= 1 + assert a == 1 + def test_trunc(self): import math assert math.trunc(1) == 1 From noreply at buildbot.pypy.org Fri Feb 21 02:24:33 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 21 Feb 2014 02:24:33 +0100 (CET) Subject: [pypy-commit] pypy remove-intlong-smm: try out optimized_int_add which might be useful now Message-ID: <20140221012433.ABE3E1C31C7@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: remove-intlong-smm Changeset: r69232:48d4ccfd24b1 Date: 2014-02-20 17:23 -0800 http://bitbucket.org/pypy/pypy/changeset/48d4ccfd24b1/ Log: try out optimized_int_add which might be useful now diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -302,6 +302,7 @@ config.objspace.std.suggest(withrangelist=True) config.objspace.std.suggest(withmethodcache=True) config.objspace.std.suggest(withprebuiltchar=True) + config.objspace.std.suggest(optimized_int_add=True) config.objspace.std.suggest(optimized_list_getitem=True) config.objspace.std.suggest(getattributeshortcut=True) #config.objspace.std.suggest(newshortcut=True) From noreply at buildbot.pypy.org Fri Feb 21 06:37:47 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 21 Feb 2014 06:37:47 +0100 (CET) Subject: [pypy-commit] pypy default: fix complex log behavior to match numpy Message-ID: <20140221053747.23E741C1504@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69233:fc1fb890aaa6 Date: 2014-02-20 20:30 -0500 http://bitbucket.org/pypy/pypy/changeset/fc1fb890aaa6/ Log: fix complex log behavior to match numpy diff --git a/pypy/module/micronumpy/test/complex_testcases.txt b/pypy/module/micronumpy/test/complex_testcases.txt --- a/pypy/module/micronumpy/test/complex_testcases.txt +++ b/pypy/module/micronumpy/test/complex_testcases.txt @@ -1346,7 +1346,7 @@ log0201 log 0.79999999999999993 0.60000000000000009 -> 6.1629758220391547e-33 0.64350110879328448 -- special values -log1000 log -0.0 0.0 -> -inf 0.0 divide-by-zero +log1000 log -0.0 0.0 -> -inf 3.1415926535897931 divide-by-zero log1001 log 0.0 0.0 -> -inf 0.0 divide-by-zero log1002 log 0.0 inf -> inf 1.5707963267948966 log1003 log 2.3 inf -> inf 1.5707963267948966 @@ -1368,8 +1368,8 @@ log1019 log nan 2.3 -> nan nan log1020 log nan inf -> inf nan log1021 log nan nan -> nan nan -log1022 log -0.0 -0.0 -> -inf 0.0 divide-by-zero -log1023 log 0.0 -0.0 -> -inf 0.0 divide-by-zero +log1022 log -0.0 -0.0 -> -inf -3.1415926535897931 divide-by-zero +log1023 log 0.0 -0.0 -> -inf -0.0 divide-by-zero log1024 log 0.0 -inf -> inf -1.5707963267948966 log1025 log 2.3 -inf -> inf -1.5707963267948966 log1026 log -0.0 -inf -> inf -1.5707963267948966 @@ -1514,7 +1514,7 @@ logt0201 log10 0.79999999999999993 0.60000000000000009 -> 2.6765463916147622e-33 0.2794689806475476 -- special values -logt1000 log10 -0.0 0.0 -> -inf 0.0 divide-by-zero +logt1000 log10 -0.0 0.0 -> -inf 1.3643763538418412 divide-by-zero logt1001 log10 0.0 0.0 -> -inf 0.0 divide-by-zero logt1002 log10 0.0 inf -> inf 0.68218817692092071 logt1003 log10 2.3 inf -> inf 0.68218817692092071 @@ -1536,8 +1536,8 @@ logt1019 log10 nan 2.3 -> nan nan logt1020 log10 nan inf -> inf nan logt1021 log10 nan nan -> nan nan -logt1022 log10 -0.0 -0.0 -> -inf 0.0 divide-by-zero -logt1023 log10 0.0 -0.0 -> -inf 0.0 divide-by-zero +logt1022 log10 -0.0 -0.0 -> -inf -1.3643763538418412 divide-by-zero +logt1023 log10 0.0 -0.0 -> -inf -0.0 divide-by-zero logt1024 log10 0.0 -inf -> inf -0.68218817692092071 logt1025 log10 2.3 -inf -> inf -0.68218817692092071 logt1026 log10 -0.0 -inf -> inf -0.68218817692092071 diff --git a/pypy/module/micronumpy/test/test_complex.py b/pypy/module/micronumpy/test/test_complex.py --- a/pypy/module/micronumpy/test/test_complex.py +++ b/pypy/module/micronumpy/test/test_complex.py @@ -412,7 +412,7 @@ except OverflowError: res = cmpl(inf, nan) except ValueError: - res = cmpl(ninf, 0) + res = cmpl(ninf, math.atan2(a[i].imag, a[i].real) / log_2) msg = 'result of log2(%r(%r)) got %r expected %r\n ' % \ (c,a[i], b[i], res) # cast untranslated boxed results to float, diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -25,6 +25,7 @@ degToRad = math.pi / 180.0 log2 = math.log(2) log2e = 1. / log2 +log10 = math.log(10) def simple_unary_op(func): specialize.argtype(1)(func) @@ -1549,22 +1550,25 @@ @complex_unary_op def log(self, v): - if v[0] == 0 and v[1] == 0: - return -rfloat.INFINITY, 0 - return rcomplex.c_log(*v) + try: + return rcomplex.c_log(*v) + except ValueError: + return -rfloat.INFINITY, math.atan2(v[1], v[0]) @complex_unary_op def log2(self, v): - if v[0] == 0 and v[1] == 0: - return -rfloat.INFINITY, 0 - r = rcomplex.c_log(*v) + try: + r = rcomplex.c_log(*v) + except ValueError: + r = -rfloat.INFINITY, math.atan2(v[1], v[0]) return r[0] / log2, r[1] / log2 @complex_unary_op def log10(self, v): - if v[0] == 0 and v[1] == 0: - return -rfloat.INFINITY, 0 - return rcomplex.c_log10(*v) + try: + return rcomplex.c_log10(*v) + except ValueError: + return -rfloat.INFINITY, math.atan2(v[1], v[0]) / log10 @complex_unary_op def log1p(self, v): From noreply at buildbot.pypy.org Fri Feb 21 06:37:48 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 21 Feb 2014 06:37:48 +0100 (CET) Subject: [pypy-commit] pypy default: fix some numpy str/repr cases Message-ID: <20140221053748.8E5911C1504@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69234:3e6219275db7 Date: 2014-02-21 00:17 -0500 http://bitbucket.org/pypy/pypy/changeset/3e6219275db7/ Log: fix some numpy str/repr cases diff --git a/pypy/module/micronumpy/arrayimpl/scalar.py b/pypy/module/micronumpy/arrayimpl/scalar.py --- a/pypy/module/micronumpy/arrayimpl/scalar.py +++ b/pypy/module/micronumpy/arrayimpl/scalar.py @@ -106,10 +106,7 @@ scalar.value = self.value.convert_imag_to(scalar.dtype) return scalar scalar = Scalar(self.dtype) - if self.dtype.is_flexible_type(): - scalar.value = self.value - else: - scalar.value = scalar.dtype.itemtype.box(0) + scalar.value = scalar.dtype.coerce(space, None) return scalar def set_imag(self, space, orig_array, w_val): diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -262,24 +262,30 @@ def descr_str(self, space): cache = get_appbridge_cache(space) if cache.w_array_str is None: - return space.wrap(self.dump_data()) + return space.wrap(self.dump_data(prefix='', separator='', suffix='')) return space.call_function(cache.w_array_str, self) - def dump_data(self, prefix='array(', suffix=')'): + def dump_data(self, prefix='array(', separator=',', suffix=')'): i = self.create_iter() first = True dtype = self.get_dtype() s = StringBuilder() s.append(prefix) - s.append('[') + if not self.is_scalar(): + s.append('[') while not i.done(): if first: first = False else: - s.append(', ') - s.append(dtype.itemtype.str_format(i.getitem())) + s.append(separator) + s.append(' ') + if self.is_scalar() and dtype.is_str_type(): + s.append(dtype.itemtype.to_str(i.getitem())) + else: + s.append(dtype.itemtype.str_format(i.getitem())) i.next() - s.append(']') + if not self.is_scalar(): + s.append(']') s.append(suffix) return s.build() diff --git a/pypy/module/micronumpy/test/test_complex.py b/pypy/module/micronumpy/test/test_complex.py --- a/pypy/module/micronumpy/test/test_complex.py +++ b/pypy/module/micronumpy/test/test_complex.py @@ -477,6 +477,7 @@ assert c[i] == max(a[i], b[i]) def test_basic(self): + import sys from numpypy import (dtype, add, array, dtype, subtract as sub, multiply, divide, negative, absolute as abs, floor_divide, real, imag, sign) @@ -507,9 +508,8 @@ assert str(exc.value) == \ "could not broadcast input array from shape (2) into shape ()" a = array('abc') - assert str(a.real) == str(a) - # numpy imag for flexible types returns self - assert str(a.imag) == str(a) + assert str(a.real) == 'abc' + assert str(a.imag) == '' for t in 'complex64', 'complex128', 'clongdouble': complex_ = dtype(t).type O = complex(0, 0) @@ -578,10 +578,14 @@ assert repr(abs(complex(float('nan'), float('nan')))) == 'nan' # numpy actually raises an AttributeError, # but numpypy raises a TypeError - exc = raises((TypeError, AttributeError), 'c2.real = 10.') - assert str(exc.value) == "readonly attribute" - exc = raises((TypeError, AttributeError), 'c2.imag = 10.') - assert str(exc.value) == "readonly attribute" + if '__pypy__' in sys.builtin_module_names: + exct, excm = TypeError, 'readonly attribute' + else: + exct, excm = AttributeError, 'is not writable' + exc = raises(exct, 'c2.real = 10.') + assert excm in exc.value[0] + exc = raises(exct, 'c2.imag = 10.') + assert excm in exc.value[0] assert(real(c2) == 3.0) assert(imag(c2) == 4.0) diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -3115,6 +3115,12 @@ assert array(2.2-1.1j, dtype=' Author: Armin Rigo Branch: c7-refactor Changeset: r802:31fcb790a36e Date: 2014-02-21 09:24 +0100 http://bitbucket.org/pypy/stmgc/changeset/31fcb790a36e/ Log: Fix: found out how we are really supposed to get fresh zero-mapped pages in an mmap. The answer is simply to call mmap(MAP_FIXED) again. diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -93,10 +93,16 @@ */ char *readmarkers = REAL_ADDRESS(STM_SEGMENT->segment_base, FIRST_READMARKER_PAGE * 4096UL); - if (madvise(readmarkers, NB_READMARKER_PAGES * 4096UL, - MADV_DONTNEED) < 0) { - perror("madvise"); + dprintf(("reset_transaction_read_version: %p %ld\n", readmarkers, + (long)(NB_READMARKER_PAGES * 4096UL))); + if (mmap(readmarkers, NB_READMARKER_PAGES * 4096UL, + PROT_READ | PROT_WRITE, + MAP_FIXED | MAP_PAGES_FLAGS, -1, 0) != readmarkers) { + /* fall-back */ +#if STM_TESTS abort(); +#endif + memset(readmarkers, 0, NB_READMARKER_PAGES * 4096UL); } reset_transaction_read_version_prebuilt(); STM_SEGMENT->transaction_read_version = 1; diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -144,6 +144,10 @@ */ static inline void stm_read(object_t *obj) { +#if 0 /* very costly check */ + assert(((stm_read_marker_t *)(((uintptr_t)obj) >> 4))->rm + <= STM_SEGMENT->transaction_read_version); +#endif ((stm_read_marker_t *)(((uintptr_t)obj) >> 4))->rm = STM_SEGMENT->transaction_read_version; } From noreply at buildbot.pypy.org Fri Feb 21 09:25:04 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 21 Feb 2014 09:25:04 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: Kill old tests, fix test_demo. Message-ID: <20140221082504.ECEE61D23D4@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r803:fd44c4ddab50 Date: 2014-02-21 09:24 +0100 http://bitbucket.org/pypy/stmgc/changeset/fd44c4ddab50/ Log: Kill old tests, fix test_demo. diff --git a/c7/test/test_bug.py b/c7/test/test_bug.py deleted file mode 100644 --- a/c7/test/test_bug.py +++ /dev/null @@ -1,224 +0,0 @@ -from support import * -import py - - -class TestBug(BaseTest): - - def test_write_marker_no_conflict(self): - # initialization - stm_start_transaction() - lp0 = stm_allocate(16) - stm_set_char(lp0, '\x00') - stm_push_root(lp0) - lp1 = stm_allocate(16) - stm_set_char(lp1, '\x01') - stm_push_root(lp1) - lp2 = stm_allocate(16) - stm_set_char(lp2, '\x02') - stm_push_root(lp2) - lp3 = stm_allocate(16) - stm_set_char(lp3, '\x03') - stm_push_root(lp3) - lp4 = stm_allocate(16) - stm_set_char(lp4, '\x04') - stm_push_root(lp4) - stm_stop_transaction() - lp4 = stm_pop_root() - lp3 = stm_pop_root() - lp2 = stm_pop_root() - lp1 = stm_pop_root() - lp0 = stm_pop_root() - # - self.switch(1) - stm_start_transaction() - assert stm_get_char(lp1) == '\x01' - stm_set_char(lp1, '\x15') - # - self.switch(0) - stm_start_transaction() - assert stm_get_char(lp2) == '\x02' - # - self.switch(1) - assert stm_get_char(lp4) == '\x04' - assert stm_get_char(lp4) == '\x04' - assert stm_get_char(lp2) == '\x02' - assert stm_get_char(lp0) == '\x00' - assert stm_get_char(lp1) == '\x15' - assert stm_get_char(lp2) == '\x02' - stm_stop_transaction() #1 lp1='\x15' - stm_start_transaction() - stm_stop_transaction() #2 - stm_start_transaction() - assert stm_get_char(lp2) == '\x02' - assert stm_get_char(lp4) == '\x04' - assert stm_get_char(lp2) == '\x02' - assert stm_get_char(lp0) == '\x00' - assert stm_get_char(lp0) == '\x00' - assert stm_get_char(lp1) == '\x15' - assert stm_get_char(lp0) == '\x00' - assert stm_get_char(lp2) == '\x02' - assert stm_get_char(lp0) == '\x00' - assert stm_get_char(lp4) == '\x04' - stm_set_char(lp4, '\xdf') - # - self.switch(0) - assert stm_get_char(lp3) == '\x03' - stm_stop_transaction() #3 - # - self.switch(1) - assert stm_get_char(lp4) == '\xdf' - stm_set_char(lp4, '\x0c') - assert stm_get_char(lp2) == '\x02' - assert stm_get_char(lp3) == '\x03' - # - self.switch(0) - stm_start_transaction() - assert stm_get_char(lp3) == '\x03' - stm_stop_transaction() #4 - # - self.switch(1) - assert stm_get_char(lp0) == '\x00' - stm_set_char(lp0, 's') - # - self.switch(0) - stm_start_transaction() - assert stm_get_char(lp1) == '\x15' - # - self.switch(1) - assert stm_get_char(lp4) == '\x0c' - stm_set_char(lp4, 'Q') - assert stm_get_char(lp2) == '\x02' - # - self.switch(0) - assert stm_get_char(lp3) == '\x03' - # - self.switch(1) - assert stm_get_char(lp0) == 's' - # - self.switch(0) - assert stm_get_char(lp4) == '\x04' - assert stm_get_char(lp1) == '\x15' - stm_set_char(lp1, '\xd1') - stm_stop_transaction() #5 lp1='\xd1' - stm_start_transaction() - assert stm_get_char(lp2) == '\x02' - stm_set_char(lp2, 'j') - # - py.test.raises(Conflict, self.switch, 1) - stm_start_transaction() - assert stm_get_char(lp3) == '\x03' - # - self.switch(0) - assert stm_get_char(lp4) == '\x04' - # - self.switch(1) - assert stm_get_char(lp0) == '\x00' - assert stm_get_char(lp3) == '\x03' - # - self.switch(0) - assert stm_get_char(lp4) == '\x04' - assert stm_get_char(lp4) == '\x04' - # - self.switch(1) - assert stm_get_char(lp0) == '\x00' - # - self.switch(0) - assert stm_get_char(lp0) == '\x00' - assert stm_get_char(lp1) == '\xd1' - # - self.switch(1) - assert stm_get_char(lp3) == '\x03' - # - self.switch(0) - assert stm_get_char(lp1) == '\xd1' - assert stm_get_char(lp0) == '\x00' - # - self.switch(1) - assert stm_get_char(lp2) == '\x02' - assert stm_get_char(lp2) == '\x02' - assert stm_get_char(lp0) == '\x00' - stm_set_char(lp0, '\xdf') - # - self.switch(0) - assert stm_get_char(lp2) == 'j' - stm_set_char(lp2, '\xed') - assert stm_get_char(lp1) == '\xd1' - # - self.switch(1) - assert stm_get_char(lp1) == '\xd1' - assert stm_get_char(lp3) == '\x03' - # - self.switch(0) - assert stm_get_char(lp2) == '\xed' - stm_set_char(lp2, '\x02') - assert stm_get_char(lp2) == '\x02' - stm_set_char(lp2, 'Q') - # - self.switch(1) - assert stm_get_char(lp0) == '\xdf' - stm_set_char(lp0, '#') - # - self.switch(0) - assert stm_get_char(lp1) == '\xd1' - stm_stop_transaction() #6 lp2='Q' - # - py.test.raises(Conflict, self.switch, 1) - stm_start_transaction() - assert stm_get_char(lp0) == '\x00' - assert stm_get_char(lp3) == '\x03' - stm_set_char(lp3, '\xf9') - # - self.switch(0) - stm_start_transaction() - assert stm_get_char(lp0) == '\x00' - assert stm_get_char(lp1) == '\xd1' - # - self.switch(1) - stm_stop_transaction() #7 lp3='\xf9' - # - self.switch(0) - stm_stop_transaction() #8 - stm_start_transaction() - assert stm_get_char(lp4) == '\x04' - assert stm_get_char(lp3) == '\xf9' - # - self.switch(1) - stm_start_transaction() - assert stm_get_char(lp0) == '\x00' - stm_set_char(lp0, 'N') - # - self.switch(0) - assert stm_get_char(lp4) == '\x04' - stm_set_char(lp4, 'K') - # - self.switch(1) - assert stm_get_char(lp4) == '\x04' - assert stm_get_char(lp3) == '\xf9' - # - self.switch(0) - assert stm_get_char(lp3) == '\xf9' - assert stm_get_char(lp4) == 'K' - stm_set_char(lp4, '\xce') - # - self.switch(1) - stm_stop_transaction() #9 lp0='N' - stm_start_transaction() - assert stm_get_char(lp2) == 'Q' - assert stm_get_char(lp4) == '\x04' - assert stm_get_char(lp1) == '\xd1' - stm_set_char(lp1, '\xdb') - stm_stop_transaction() #10 lp1='\xdb' - # - self.switch(0) - stm_stop_transaction() #11 lp4='\xce' - stm_start_transaction() - assert stm_get_char(lp2) == 'Q' - assert stm_get_char(lp0) == 'N' - # - self.switch(1) - stm_start_transaction() - assert stm_get_char(lp0) == 'N' - stm_set_char(lp0, '\x80') - # - stm_stop_transaction() - py.test.raises(Conflict, self.switch, 0) diff --git a/c7/test/test_demo.py b/c7/test/test_demo.py --- a/c7/test/test_demo.py +++ b/c7/test/test_demo.py @@ -11,7 +11,7 @@ def make_and_run(self, target): self._do("make -C ../demo %s" % target) - self._do("../demo/%s" % target) + self._do("../demo/%s 2> /dev/null" % target) def test_demo2_debug(self): self.make_and_run("debug-demo2") def test_demo2_build(self): self.make_and_run("build-demo2") From noreply at buildbot.pypy.org Fri Feb 21 09:26:28 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 21 Feb 2014 09:26:28 +0100 (CET) Subject: [pypy-commit] pypy default: Found out how, in general on POSIX, we're supposed to get fresh Message-ID: <20140221082628.B6E841D23D4@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69235:992e29624c5f Date: 2014-02-21 09:25 +0100 http://bitbucket.org/pypy/pypy/changeset/992e29624c5f/ Log: Found out how, in general on POSIX, we're supposed to get fresh zero-mapped pages inside an mmap. Replaces the hacks with madvise() or reading /dev/zero. The answer is simply to call mmap(MAP_FIXED) again. diff --git a/rpython/rlib/rmmap.py b/rpython/rlib/rmmap.py --- a/rpython/rlib/rmmap.py +++ b/rpython/rlib/rmmap.py @@ -55,7 +55,7 @@ # constants, look in sys/mman.h and platform docs for the meaning # some constants are linux only so they will be correctly exposed outside # depending on the OS - constant_names = ['MAP_SHARED', 'MAP_PRIVATE', + constant_names = ['MAP_SHARED', 'MAP_PRIVATE', 'MAP_FIXED', 'PROT_READ', 'PROT_WRITE', 'MS_SYNC'] opt_constant_names = ['MAP_ANON', 'MAP_ANONYMOUS', 'MAP_NORESERVE', @@ -675,10 +675,17 @@ return m def alloc_hinted(hintp, map_size): - flags = MAP_PRIVATE | MAP_ANONYMOUS - prot = PROT_EXEC | PROT_READ | PROT_WRITE + flags = NonConstant(MAP_PRIVATE | MAP_ANONYMOUS) + prot = NonConstant(PROT_EXEC | PROT_READ | PROT_WRITE) return c_mmap_safe(hintp, map_size, prot, flags, -1, 0) + def clear_large_memory_chunk_aligned(addr, map_size): + addr = rffi.cast(PTR, addr) + flags = NonConstant(MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS) + prot = NonConstant(PROT_READ | PROT_WRITE) + res = c_mmap_safe(addr, map_size, prot, flags, -1, 0) + return res == addr + # XXX is this really necessary? class Hint: pos = -0x4fff0000 # for reproducible results diff --git a/rpython/rtyper/lltypesystem/llarena.py b/rpython/rtyper/lltypesystem/llarena.py --- a/rpython/rtyper/lltypesystem/llarena.py +++ b/rpython/rtyper/lltypesystem/llarena.py @@ -409,41 +409,28 @@ MEMORY_ALIGNMENT = memory_alignment() -if sys.platform.startswith('linux'): - # This only works with linux's madvise(), which is really not a memory - # usage hint but a real command. It guarantees that after MADV_DONTNEED - # the pages are cleared again. +if os.name == 'posix': + # The general Posix solution to clear a large range of memory that + # was obtained with mmap() is to call mmap() again with MAP_FIXED. - # Note that the trick of the general 'posix' section below, i.e. - # reading /dev/zero, does not seem to have the correct effect of - # lazily-allocating pages on all Linux systems. + legacy_getpagesize = rffi.llexternal('getpagesize', [], rffi.INT, + sandboxsafe=True, _nowrapper=True) - from rpython.rtyper.tool import rffi_platform - from rpython.translator.tool.cbuild import ExternalCompilationInfo - _eci = ExternalCompilationInfo(includes=['sys/mman.h']) - MADV_DONTNEED = rffi_platform.getconstantinteger('MADV_DONTNEED', - '#include ') - linux_madvise = rffi.llexternal('madvise', - [llmemory.Address, rffi.SIZE_T, rffi.INT], - rffi.INT, - sandboxsafe=True, _nowrapper=True, - compilation_info=_eci) - linux_getpagesize = rffi.llexternal('getpagesize', [], rffi.INT, - sandboxsafe=True, _nowrapper=True, - compilation_info=_eci) - - class LinuxPageSize: + class PosixPageSize: def __init__(self): self.pagesize = 0 def _cleanup_(self): self.pagesize = 0 - linuxpagesize = LinuxPageSize() + posixpagesize = PosixPageSize() def clear_large_memory_chunk(baseaddr, size): - pagesize = linuxpagesize.pagesize + from rpython.rlib import rmmap + + pagesize = posixpagesize.pagesize if pagesize == 0: - pagesize = rffi.cast(lltype.Signed, linux_getpagesize()) - linuxpagesize.pagesize = pagesize + pagesize = rffi.cast(lltype.Signed, legacy_getpagesize()) + posixpagesize.pagesize = pagesize + if size > 2 * pagesize: lowbits = rffi.cast(lltype.Signed, baseaddr) & (pagesize - 1) if lowbits: # clear the initial misaligned part, if any @@ -452,56 +439,13 @@ baseaddr += partpage size -= partpage length = size & -pagesize - madv_length = rffi.cast(rffi.SIZE_T, length) - madv_flags = rffi.cast(rffi.INT, MADV_DONTNEED) - err = linux_madvise(baseaddr, madv_length, madv_flags) - if rffi.cast(lltype.Signed, err) == 0: - baseaddr += length # madvise() worked + if rmmap.clear_large_memory_chunk_aligned(baseaddr, length): + baseaddr += length # clearing worked size -= length + if size > 0: # clear the final misaligned part, if any llmemory.raw_memclear(baseaddr, size) -elif os.name == 'posix': - READ_MAX = (sys.maxint//4) + 1 # upper bound on reads to avoid surprises - raw_os_open = rffi.llexternal('open', - [rffi.CCHARP, rffi.INT, rffi.MODE_T], - rffi.INT, - sandboxsafe=True, _nowrapper=True) - raw_os_read = rffi.llexternal('read', - [rffi.INT, llmemory.Address, rffi.SIZE_T], - rffi.SIZE_T, - sandboxsafe=True, _nowrapper=True) - raw_os_close = rffi.llexternal('close', - [rffi.INT], - rffi.INT, - sandboxsafe=True, _nowrapper=True) - _dev_zero = rffi.str2charp('/dev/zero') # prebuilt - lltype.render_immortal(_dev_zero) - - def clear_large_memory_chunk(baseaddr, size): - # on some Unixy platforms, reading from /dev/zero is the fastest way - # to clear arenas, because the kernel knows that it doesn't - # need to even allocate the pages before they are used. - - # NB.: careful, don't do anything that could malloc here! - # this code is called during GC initialization. - fd = raw_os_open(_dev_zero, - rffi.cast(rffi.INT, os.O_RDONLY), - rffi.cast(rffi.MODE_T, 0644)) - if rffi.cast(lltype.Signed, fd) != -1: - while size > 0: - size1 = rffi.cast(rffi.SIZE_T, min(READ_MAX, size)) - count = raw_os_read(fd, baseaddr, size1) - count = rffi.cast(lltype.Signed, count) - if count <= 0: - break - size -= count - baseaddr += count - raw_os_close(fd) - - if size > 0: # reading from /dev/zero failed, fallback - llmemory.raw_memclear(baseaddr, size) - else: # XXX any better implementation on Windows? # Should use VirtualAlloc() to reserve the range of pages, From noreply at buildbot.pypy.org Fri Feb 21 09:41:31 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Fri, 21 Feb 2014 09:41:31 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: add stm_become_inevitable to random tests Message-ID: <20140221084132.009CF1C0132@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7-refactor Changeset: r804:24e260eabf78 Date: 2014-02-21 09:41 +0100 http://bitbucket.org/pypy/stmgc/changeset/24e260eabf78/ Log: add stm_become_inevitable to random tests diff --git a/c7/test/test_random.py b/c7/test/test_random.py --- a/c7/test/test_random.py +++ b/c7/test/test_random.py @@ -28,18 +28,32 @@ return r +class WriteWriteConflictNotTestable(Exception): + # How can I test a write-write conflict between + # an inevitable and a normal transaction? The + # inevitable transaction would have to wait, + # but now for tests we simply abort. Of course + # aborting the inevitable transaction is not possible.. + pass + _global_time = 0 def contention_management(our_trs, other_trs, wait=False, objs_in_conflict=None): """exact copy of logic in contention.c""" + if our_trs.inevitable and wait: + # we win but cannot wait in tests... + raise WriteWriteConflictNotTestable - if other_trs.start_time < our_trs.start_time: + if our_trs.inevitable: + other_trs.set_must_abort(objs_in_conflict) + elif other_trs.start_time < our_trs.start_time: pass - else: + elif not other_trs.inevitable: other_trs.set_must_abort(objs_in_conflict) if not other_trs.check_must_abort(): our_trs.set_must_abort(objs_in_conflict) elif wait: + assert not our_trs.inevitable # abort anyway: our_trs.set_must_abort(objs_in_conflict) @@ -56,8 +70,10 @@ self._must_abort = False self.start_time = start_time self.objs_in_conflict = set() + self.inevitable = False def set_must_abort(self, objs_in_conflict=None): + assert not self.inevitable if objs_in_conflict is not None: self.objs_in_conflict |= objs_in_conflict self._must_abort = True @@ -271,10 +287,32 @@ class OpAbortTransaction(Operation): def do(self, ex, global_state, thread_state): - thread_state.transaction_state.set_must_abort() + trs = thread_state.transaction_state + if trs.inevitable: + return + trs.set_must_abort() thread_state.abort_transaction() ex.do('self.abort_transaction()') +class OpBecomeInevitable(Operation): + def do(self, ex, global_state, thread_state): + trs = thread_state.transaction_state + for ts in global_state.thread_states: + other_trs = ts.transaction_state + if (other_trs and trs is not other_trs + and other_trs.inevitable): + trs.set_must_abort() + break + + thread_state.push_roots(ex) + if trs.check_must_abort(): + thread_state.abort_transaction() + ex.do('py.test.raises(Conflict, stm_become_inevitable)') + else: + trs.inevitable = True + ex.do('stm_become_inevitable()') + thread_state.pop_roots(ex) + thread_state.reload_roots(ex) class OpAllocate(Operation): @@ -326,14 +364,27 @@ else: v = ord(global_state.rnd.choice("abcdefghijklmnop")) trs = thread_state.transaction_state - trs.write_root(r, v) # if is_ref_type_map[r]: ex.do("idx = (stm_get_obj_size(%s) - HDR) / WORD - 1" % r) else: ex.do("offset = stm_get_obj_size(%s) - 1" % r) # - global_state.check_for_write_write_conflicts(trs) + was_written = False + try: + # HACK to avoid calling write_root() just yet + was_written = r in trs.write_set + trs.write_set.add(r) + global_state.check_for_write_write_conflicts(trs) + except WriteWriteConflictNotTestable: + if not was_written: + trs.write_set.remove(r) + ex.do("# this is an untestable write-write conflict between an") + ex.do("# inevitable and a normal transaction :(") + return + # + trs.write_root(r, v) + # if trs.check_must_abort(): thread_state.abort_transaction() if is_ref_type_map[r]: @@ -445,6 +496,7 @@ OpCommitTransaction, OpAbortTransaction, OpForgetRoot, + OpBecomeInevitable, # OpMinorCollect, ] for _ in range(200): From noreply at buildbot.pypy.org Fri Feb 21 09:45:34 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 21 Feb 2014 09:45:34 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: Simplify code here: this call to contention_management() cannot lead Message-ID: <20140221084534.769C01C15AC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r805:de70a62f229a Date: 2014-02-21 09:45 +0100 http://bitbucket.org/pypy/stmgc/changeset/de70a62f229a/ Log: Simplify code here: this call to contention_management() cannot lead to a safe-point, after all. diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -148,7 +148,7 @@ # error "The logic in the functions below only works with two segments" #endif -static bool detect_write_read_conflicts(void) +static void detect_write_read_conflicts(void) { long remote_num = 1 - STM_SEGMENT->segment_num; char *remote_base = get_segment_base(remote_num); @@ -159,7 +159,8 @@ switch (get_priv_segment(remote_num)->transaction_state) { case TS_NONE: case TS_MUST_ABORT: - return false; /* no need to do any check */ + return; /* no need to do any check */ + default:; } LIST_FOREACH_R( @@ -173,11 +174,11 @@ /* If we reach this point, it means we aborted the other thread. We're done here. */ - return true; + assert(get_priv_segment(remote_num)->transaction_state == + TS_MUST_ABORT); + return; } })); - - return false; } static void push_modified_to_other_segments(void) @@ -249,8 +250,7 @@ the mutex, or it needs to restart. */ /* detect conflicts */ - if (UNLIKELY(detect_write_read_conflicts())) - goto restart; + detect_write_read_conflicts(); /* cannot abort any more from here */ dprintf(("commit_transaction\n")); From noreply at buildbot.pypy.org Fri Feb 21 09:48:36 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 21 Feb 2014 09:48:36 +0100 (CET) Subject: [pypy-commit] pypy default: fix numpy record initialization case Message-ID: <20140221084836.EF74E1C15AC@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69236:be5e3955ea8e Date: 2014-02-21 03:44 -0500 http://bitbucket.org/pypy/pypy/changeset/be5e3955ea8e/ Log: fix numpy record initialization case diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -3161,7 +3161,8 @@ raises(IndexError, 'a[0]["xyz"]') assert a[0]['x'] == 0 assert a[0]['y'] == 0 - raises(ValueError, "a[0] = (1, 2, 3)") + exc = raises(ValueError, "a[0] = (1, 2, 3)") + assert exc.value[0] == 'size of tuple must match number of fields.' a[0]['x'] = 13 assert a[0]['x'] == 13 a[1] = (1, 2) @@ -3487,6 +3488,15 @@ a = np.array([1,2,3], dtype='int16') assert (a * 2).dtype == np.dtype('int16') + def test_coerce_record(self): + import numpy as np + dt = np.dtype([('a', '?'), ('b', '?')]) + b = np.array([True, True]) + a = np.array([b, b, b], dtype=dt) + assert a.shape == (3, 2) + for i in a.flat: + assert tuple(i) == (True, False) + class AppTestPyPy(BaseNumpyAppTest): def setup_class(cls): diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -1873,22 +1873,24 @@ if isinstance(w_item, interp_boxes.W_VoidBox): return w_item if w_item is not None: - # we treat every sequence as sequence, no special support - # for arrays - if not space.issequence_w(w_item): - raise OperationError(space.w_TypeError, space.wrap( - "expected sequence")) - if len(dtype.fields) != space.len_w(w_item): - raise OperationError(space.w_ValueError, space.wrap( - "wrong length")) - items_w = space.fixedview(w_item) + if space.isinstance_w(w_item, space.w_tuple): + if len(dtype.fields) != space.len_w(w_item): + raise OperationError(space.w_ValueError, space.wrap( + "size of tuple must match number of fields.")) + items_w = space.fixedview(w_item) + else: + # XXX support initializing from readable buffers + items_w = [w_item] else: items_w = [None] * len(dtype.fields) arr = VoidBoxStorage(dtype.get_size(), dtype) - for i in range(len(items_w)): + for i in range(len(dtype.fields)): ofs, subdtype = dtype.fields[dtype.fieldnames[i]] itemtype = subdtype.itemtype - w_box = itemtype.coerce(space, subdtype, items_w[i]) + try: + w_box = itemtype.coerce(space, subdtype, items_w[i]) + except IndexError: + w_box = itemtype.coerce(space, subdtype, None) itemtype.store(arr, 0, ofs, w_box) return interp_boxes.W_VoidBox(arr, 0, dtype) From noreply at buildbot.pypy.org Fri Feb 21 10:15:31 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 21 Feb 2014 10:15:31 +0100 (CET) Subject: [pypy-commit] pypy default: handle another record coerce case Message-ID: <20140221091531.407521C0132@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69237:fa985109fcb9 Date: 2014-02-21 04:08 -0500 http://bitbucket.org/pypy/pypy/changeset/fa985109fcb9/ Log: handle another record coerce case diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -3497,6 +3497,11 @@ for i in a.flat: assert tuple(i) == (True, False) + dt = np.dtype([('A', ' Author: Armin Rigo Branch: Changeset: r69238:d323244b7eec Date: 2014-02-21 12:16 +0100 http://bitbucket.org/pypy/pypy/changeset/d323244b7eec/ Log: Try to give as much info as possible here diff --git a/rpython/jit/codewriter/call.py b/rpython/jit/codewriter/call.py --- a/rpython/jit/codewriter/call.py +++ b/rpython/jit/codewriter/call.py @@ -192,8 +192,15 @@ # check the number and type of arguments FUNC = op.args[0].concretetype.TO ARGS = FUNC.ARGS - assert NON_VOID_ARGS == [T for T in ARGS if T is not lltype.Void] - assert RESULT == FUNC.RESULT + if NON_VOID_ARGS != [T for T in ARGS if T is not lltype.Void]: + raise Exception( + "in operation %r: caling a function with signature %r, " + "but passing actual arguments (ignoring voids) of types %r" + % (op, FUNC, NON_VOID_ARGS)) + if RESULT != FUNC.RESULT: + raise Exception( + "in operation %r: caling a function with signature %r, " + "but the actual return type is %r" % (op, FUNC, RESULT)) # ok # get the 'elidable' and 'loopinvariant' flags from the function object elidable = False From noreply at buildbot.pypy.org Fri Feb 21 13:22:54 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Fri, 21 Feb 2014 13:22:54 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: cleanups Message-ID: <20140221122254.087C01C0104@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7-refactor Changeset: r806:e7349745e08f Date: 2014-02-21 13:22 +0100 http://bitbucket.org/pypy/stmgc/changeset/e7349745e08f/ Log: cleanups diff --git a/c7/test/test_random.py b/c7/test/test_random.py --- a/c7/test/test_random.py +++ b/c7/test/test_random.py @@ -18,16 +18,6 @@ -_root_numbering = 0 -is_ref_type_map = {} -def get_new_root_name(is_ref_type): - global _root_numbering - _root_numbering += 1 - r = "lp%d" % _root_numbering - is_ref_type_map[r] = is_ref_type - return r - - class WriteWriteConflictNotTestable(Exception): # How can I test a write-write conflict between # an inevitable and a normal transaction? The @@ -36,7 +26,6 @@ # aborting the inevitable transaction is not possible.. pass -_global_time = 0 def contention_management(our_trs, other_trs, wait=False, objs_in_conflict=None): """exact copy of logic in contention.c""" if our_trs.inevitable and wait: @@ -172,9 +161,7 @@ def start_transaction(self): assert self.transaction_state is None - global _global_time - _global_time += 1 - start_time = _global_time + start_time = self.global_state.inc_and_get_global_time() trs = TransactionState(start_time) trs.update_from_committed( self.global_state.committed_transaction_state) @@ -211,6 +198,22 @@ self.thread_states = [] self.prebuilt_roots = [] self.committed_transaction_state = TransactionState(0) + self.global_time = 0 + self.root_numbering = 0 + self.ref_type_map = {} + + def get_new_root_name(self, is_ref_type): + self.root_numbering += 1 + r = "lp_%s_%d" % ("ref" if is_ref_type else "char", self.root_numbering) + self.ref_type_map[r] = is_ref_type + return r + + def has_ref_type(self, r): + return self.ref_type_map[r] + + def inc_and_get_global_time(self): + self.global_time += 1 + return self.global_time def push_state_to_other_threads(self, trs): assert not trs.check_must_abort() @@ -317,7 +320,7 @@ class OpAllocate(Operation): def do(self, ex, global_state, thread_state): - r = get_new_root_name(False) + r = global_state.get_new_root_name(False) thread_state.push_roots(ex) size = global_state.rnd.choice([ 16, @@ -333,7 +336,7 @@ class OpAllocateRef(Operation): def do(self, ex, global_state, thread_state): - r = get_new_root_name(True) + r = global_state.get_new_root_name(True) thread_state.push_roots(ex) num = global_state.rnd.randrange(1, 100) ex.do('%s = stm_allocate_refs(%s)' % (r, num)) @@ -359,20 +362,22 @@ class OpWrite(Operation): def do(self, ex, global_state, thread_state): r = thread_state.get_random_root() - if is_ref_type_map[r]: + is_ref = global_state.has_ref_type(r) + if is_ref: v = thread_state.get_random_root() else: v = ord(global_state.rnd.choice("abcdefghijklmnop")) trs = thread_state.transaction_state # - if is_ref_type_map[r]: + if is_ref: ex.do("idx = (stm_get_obj_size(%s) - HDR) / WORD - 1" % r) else: ex.do("offset = stm_get_obj_size(%s) - 1" % r) # was_written = False try: - # HACK to avoid calling write_root() just yet + # HACK to avoid calling write_root() just yet because we have to + # undo it in case of the exception :( was_written = r in trs.write_set trs.write_set.add(r) global_state.check_for_write_write_conflicts(trs) @@ -387,12 +392,12 @@ # if trs.check_must_abort(): thread_state.abort_transaction() - if is_ref_type_map[r]: + if is_ref: ex.do("py.test.raises(Conflict, stm_set_ref, %s, idx, %s)" % (r, v)) else: ex.do("py.test.raises(Conflict, stm_set_char, %s, %s, offset)" % (r, repr(chr(v)))) else: - if is_ref_type_map[r]: + if is_ref: ex.do("stm_set_ref(%s, idx, %s)" % (r, v)) else: ex.do("stm_set_char(%s, %s, offset)" % (r, repr(chr(v)))) @@ -403,7 +408,7 @@ trs = thread_state.transaction_state v = trs.read_root(r) # - if is_ref_type_map[r]: + if global_state.has_ref_type(r): ex.do("idx = (stm_get_obj_size(%s) - HDR) / WORD - 1" % r) if v in thread_state.saved_roots or v in global_state.prebuilt_roots: # v = root known to this transaction; or prebuilt @@ -450,11 +455,6 @@ class TestRandom(BaseTest): def test_fixed_16_bytes_objects(self, seed=1010): - global _root_numbering - _root_numbering = 0 - global is_ref_type_map - is_ref_type_map = {} - rnd = random.Random(seed) N_OBJECTS = 3 @@ -475,12 +475,12 @@ curr_thread = global_state.thread_states[0] for i in range(N_OBJECTS): - r = get_new_root_name(False) + r = global_state.get_new_root_name(False) ex.do('%s = stm_allocate_old(16)' % r) global_state.committed_transaction_state.write_root(r, 0) global_state.prebuilt_roots.append(r) - r = get_new_root_name(True) + r = global_state.get_new_root_name(True) ex.do('%s = stm_allocate_old_refs(1)' % r) global_state.committed_transaction_state.write_root(r, "ffi.NULL") global_state.prebuilt_roots.append(r) @@ -506,6 +506,7 @@ ex.do('#') curr_thread = global_state.thread_states[n_thread] OpSwitchThread().do(ex, global_state, curr_thread) + if curr_thread.transaction_state is None: OpStartTransaction().do(ex, global_state, curr_thread) From noreply at buildbot.pypy.org Fri Feb 21 13:54:59 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Fri, 21 Feb 2014 13:54:59 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: more cleanups in test_random Message-ID: <20140221125459.A64CF1C0104@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7-refactor Changeset: r807:656a920108f2 Date: 2014-02-21 13:55 +0100 http://bitbucket.org/pypy/stmgc/changeset/656a920108f2/ Log: more cleanups in test_random diff --git a/c7/test/test_random.py b/c7/test/test_random.py --- a/c7/test/test_random.py +++ b/c7/test/test_random.py @@ -17,6 +17,12 @@ exec cmd in globals(), self.content +def raising_call(conflict, func, *args): + arguments = ", ".join(map(str, args)) + if conflict: + return "py.test.raises(Conflict, %s, %s)" % (func, arguments) + return "%s(%s)" % (func, arguments) + class WriteWriteConflictNotTestable(Exception): # How can I test a write-write conflict between @@ -227,6 +233,16 @@ self.ex.do('# conflict while pushing to other threads: %s' % trs.objs_in_conflict) + def check_if_can_become_inevitable(self, trs): + assert not trs.check_must_abort() + for ts in self.thread_states: + other_trs = ts.transaction_state + if (other_trs and trs is not other_trs + and other_trs.inevitable): + self.ex.do("# there is another inevitable transaction:") + trs.set_must_abort() + break + def check_for_write_write_conflicts(self, trs): assert not trs.check_must_abort() for ts in self.thread_states: @@ -284,9 +300,7 @@ # if aborts: thread_state.abort_transaction() - ex.do('py.test.raises(Conflict, self.commit_transaction)') - else: - ex.do('self.commit_transaction()') + ex.do(raising_call(aborts, "self.commit_transaction")) class OpAbortTransaction(Operation): def do(self, ex, global_state, thread_state): @@ -300,20 +314,15 @@ class OpBecomeInevitable(Operation): def do(self, ex, global_state, thread_state): trs = thread_state.transaction_state - for ts in global_state.thread_states: - other_trs = ts.transaction_state - if (other_trs and trs is not other_trs - and other_trs.inevitable): - trs.set_must_abort() - break + global_state.check_if_can_become_inevitable(trs) thread_state.push_roots(ex) + ex.do(raising_call(trs.check_must_abort(), + "stm_become_inevitable")) if trs.check_must_abort(): thread_state.abort_transaction() - ex.do('py.test.raises(Conflict, stm_become_inevitable)') else: trs.inevitable = True - ex.do('stm_become_inevitable()') thread_state.pop_roots(ex) thread_state.reload_roots(ex) @@ -362,18 +371,10 @@ class OpWrite(Operation): def do(self, ex, global_state, thread_state): r = thread_state.get_random_root() + trs = thread_state.transaction_state is_ref = global_state.has_ref_type(r) - if is_ref: - v = thread_state.get_random_root() - else: - v = ord(global_state.rnd.choice("abcdefghijklmnop")) - trs = thread_state.transaction_state # - if is_ref: - ex.do("idx = (stm_get_obj_size(%s) - HDR) / WORD - 1" % r) - else: - ex.do("offset = stm_get_obj_size(%s) - 1" % r) - # + # check for possible write-write conflict: was_written = False try: # HACK to avoid calling write_root() just yet because we have to @@ -384,23 +385,27 @@ except WriteWriteConflictNotTestable: if not was_written: trs.write_set.remove(r) - ex.do("# this is an untestable write-write conflict between an") - ex.do("# inevitable and a normal transaction :(") + ex.do("# writing to %s produces an untestable write-write" % r) + ex.do("# conflict between an inevitable and a normal transaction :(") return # + # decide on a value to write + if is_ref: + v = thread_state.get_random_root() + ex.do("idx = (stm_get_obj_size(%s) - HDR) / WORD - 1" % r) + else: + v = ord(global_state.rnd.choice("abcdefghijklmnop")) + ex.do("offset = stm_get_obj_size(%s) - 1" % r) trs.write_root(r, v) # if trs.check_must_abort(): thread_state.abort_transaction() - if is_ref: - ex.do("py.test.raises(Conflict, stm_set_ref, %s, idx, %s)" % (r, v)) - else: - ex.do("py.test.raises(Conflict, stm_set_char, %s, %s, offset)" % (r, repr(chr(v)))) + if is_ref: + ex.do(raising_call(trs.check_must_abort(), + "stm_set_ref", r, "idx", v)) else: - if is_ref: - ex.do("stm_set_ref(%s, idx, %s)" % (r, v)) - else: - ex.do("stm_set_char(%s, %s, offset)" % (r, repr(chr(v)))) + ex.do(raising_call(trs.check_must_abort(), + "stm_set_char", r, repr(chr(v)), "offset")) class OpRead(Operation): def do(self, ex, global_state, thread_state): @@ -445,9 +450,10 @@ # if conflicts: thread_state.abort_transaction() - ex.do('py.test.raises(Conflict, self.switch, %s)' % thread_state.num) - else: - ex.do('self.switch(%s)' % thread_state.num) + + ex.do(raising_call(conflicts, + "self.switch", thread_state.num)) + # ========== TEST GENERATION ========== From noreply at buildbot.pypy.org Fri Feb 21 15:06:48 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Fri, 21 Feb 2014 15:06:48 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: keep track of object sizes and assert them sometimes Message-ID: <20140221140648.EA9D31C15AC@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7-refactor Changeset: r808:620c1f07da76 Date: 2014-02-21 15:06 +0100 http://bitbucket.org/pypy/stmgc/changeset/620c1f07da76/ Log: keep track of object sizes and assert them sometimes diff --git a/c7/test/test_random.py b/c7/test/test_random.py --- a/c7/test/test_random.py +++ b/c7/test/test_random.py @@ -207,16 +207,21 @@ self.global_time = 0 self.root_numbering = 0 self.ref_type_map = {} + self.root_sizes = {} - def get_new_root_name(self, is_ref_type): + def get_new_root_name(self, is_ref_type, size): self.root_numbering += 1 r = "lp_%s_%d" % ("ref" if is_ref_type else "char", self.root_numbering) self.ref_type_map[r] = is_ref_type + self.root_sizes[r] = size return r def has_ref_type(self, r): return self.ref_type_map[r] + def get_root_size(self, r): + return self.root_sizes[r] + def inc_and_get_global_time(self): self.global_time += 1 return self.global_time @@ -329,13 +334,14 @@ class OpAllocate(Operation): def do(self, ex, global_state, thread_state): - r = global_state.get_new_root_name(False) - thread_state.push_roots(ex) size = global_state.rnd.choice([ - 16, + "16", "SOME_MEDIUM_SIZE+16", #"SOME_LARGE_SIZE+16", ]) + r = global_state.get_new_root_name(False, size) + thread_state.push_roots(ex) + ex.do('%s = stm_allocate(%s)' % (r, size)) thread_state.transaction_state.add_root(r, 0) @@ -345,9 +351,9 @@ class OpAllocateRef(Operation): def do(self, ex, global_state, thread_state): - r = global_state.get_new_root_name(True) + num = str(global_state.rnd.randrange(1, 100)) + r = global_state.get_new_root_name(True, num) thread_state.push_roots(ex) - num = global_state.rnd.randrange(1, 100) ex.do('%s = stm_allocate_refs(%s)' % (r, num)) thread_state.transaction_state.add_root(r, "ffi.NULL") @@ -392,20 +398,22 @@ # decide on a value to write if is_ref: v = thread_state.get_random_root() - ex.do("idx = (stm_get_obj_size(%s) - HDR) / WORD - 1" % r) else: v = ord(global_state.rnd.choice("abcdefghijklmnop")) - ex.do("offset = stm_get_obj_size(%s) - 1" % r) trs.write_root(r, v) # - if trs.check_must_abort(): + aborts = trs.check_must_abort() + if aborts: thread_state.abort_transaction() + offset = global_state.get_root_size(r) + " - 1" if is_ref: - ex.do(raising_call(trs.check_must_abort(), - "stm_set_ref", r, "idx", v)) + ex.do(raising_call(aborts, "stm_set_ref", r, offset, v)) + if not aborts: + ex.do(raising_call(False, "stm_set_ref", r, "0", v)) else: - ex.do(raising_call(trs.check_must_abort(), - "stm_set_char", r, repr(chr(v)), "offset")) + ex.do(raising_call(aborts, "stm_set_char", r, repr(chr(v)), offset)) + if not aborts: + ex.do(raising_call(False, "stm_set_char", r, repr(chr(v)), "HDR")) class OpRead(Operation): def do(self, ex, global_state, thread_state): @@ -413,11 +421,12 @@ trs = thread_state.transaction_state v = trs.read_root(r) # + offset = global_state.get_root_size(r) + " - 1" if global_state.has_ref_type(r): - ex.do("idx = (stm_get_obj_size(%s) - HDR) / WORD - 1" % r) if v in thread_state.saved_roots or v in global_state.prebuilt_roots: # v = root known to this transaction; or prebuilt - ex.do("assert stm_get_ref(%s, idx) == %s" % (r, v)) + ex.do("assert stm_get_ref(%s, %s) == %s" % (r, offset, v)) + ex.do("assert stm_get_ref(%s, 0) == %s" % (r, v)) elif v != "ffi.NULL": # if v came from this transaction: re-add it to saved_roots because # it survived by being referenced by another saved root @@ -433,14 +442,25 @@ else: ex.do("# register %r in this thread" % v) # - ex.do("%s = stm_get_ref(%s, idx)" % (v, r)) + ex.do("%s = stm_get_ref(%s, %s)" % (v, r, offset)) + ex.do("%s = stm_get_ref(%s, 0)" % (v, r)) thread_state.register_root(v) else: # v is NULL; we still need to read it (as it should be in the read-set): - ex.do("assert stm_get_ref(%s, idx) == %s" % (r,v)) + ex.do("assert stm_get_ref(%s, %s) == %s" % (r,offset,v)) + ex.do("assert stm_get_ref(%s, 0) == %s" % (r,v)) else: - ex.do("offset = stm_get_obj_size(%s) - 1" % r) - ex.do("assert stm_get_char(%s, offset) == %s" % (r, repr(chr(v)))) + ex.do("assert stm_get_char(%s, %s) == %s" % (r, offset, repr(chr(v)))) + ex.do("assert stm_get_char(%s, HDR) == %s" % (r, repr(chr(v)))) + +class OpAssertSize(Operation): + def do(self, ex, global_state, thread_state): + r = thread_state.get_random_root() + size = global_state.get_root_size(r) + if global_state.has_ref_type(r): + ex.do("assert stm_get_obj_size(%s) == %s" % (r, size + " * WORD + HDR")) + else: + ex.do("assert stm_get_obj_size(%s) == %s" % (r, size)) class OpSwitchThread(Operation): def do(self, ex, global_state, thread_state): @@ -466,12 +486,7 @@ N_OBJECTS = 3 N_THREADS = 2 ex = Exec(self) - ex.do(""" -################################################################ -################################################################ -################################################################ -################################################################ - """) + ex.do("################################################################\n"*10) ex.do('# initialization') global_state = GlobalState(ex, rnd) @@ -481,13 +496,13 @@ curr_thread = global_state.thread_states[0] for i in range(N_OBJECTS): - r = global_state.get_new_root_name(False) - ex.do('%s = stm_allocate_old(16)' % r) + r = global_state.get_new_root_name(False, "384") + ex.do('%s = stm_allocate_old(384)' % r) global_state.committed_transaction_state.write_root(r, 0) global_state.prebuilt_roots.append(r) - r = global_state.get_new_root_name(True) - ex.do('%s = stm_allocate_old_refs(1)' % r) + r = global_state.get_new_root_name(True, "50") + ex.do('%s = stm_allocate_old_refs(50)' % r) global_state.committed_transaction_state.write_root(r, "ffi.NULL") global_state.prebuilt_roots.append(r) global_state.committed_transaction_state.write_set = set() @@ -503,6 +518,7 @@ OpAbortTransaction, OpForgetRoot, OpBecomeInevitable, + OpAssertSize, # OpMinorCollect, ] for _ in range(200): From noreply at buildbot.pypy.org Fri Feb 21 15:34:25 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Fri, 21 Feb 2014 15:34:25 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: save roots around self.switch() too Message-ID: <20140221143425.22A041D23D4@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7-refactor Changeset: r809:0f6f9932b271 Date: 2014-02-21 15:34 +0100 http://bitbucket.org/pypy/stmgc/changeset/0f6f9932b271/ Log: save roots around self.switch() too diff --git a/c7/test/test_random.py b/c7/test/test_random.py --- a/c7/test/test_random.py +++ b/c7/test/test_random.py @@ -123,6 +123,7 @@ def register_root(self, r): self.saved_roots.append(r) + assert len(self.saved_roots) < SHADOWSTACK_LENGTH def forget_random_root(self): # # forget some non-pushed root for now @@ -463,16 +464,28 @@ ex.do("assert stm_get_obj_size(%s) == %s" % (r, size)) class OpSwitchThread(Operation): - def do(self, ex, global_state, thread_state): - trs = thread_state.transaction_state - conflicts = trs is not None and trs.check_must_abort() - ex.thread_num = thread_state.num - # - if conflicts: - thread_state.abort_transaction() + def do(self, ex, global_state, thread_state, new_thread_state=None): + if new_thread_state is None: + new_thread_state = global_state.rnd.choice(global_state.thread_states) - ex.do(raising_call(conflicts, - "self.switch", thread_state.num)) + if new_thread_state != thread_state: + if thread_state.transaction_state: + thread_state.push_roots(ex) + ex.do('#') + # + trs = new_thread_state.transaction_state + conflicts = trs is not None and trs.check_must_abort() + ex.thread_num = new_thread_state.num + # + ex.do(raising_call(conflicts, + "self.switch", new_thread_state.num)) + if conflicts: + new_thread_state.abort_transaction() + else: + new_thread_state.pop_roots(ex) + new_thread_state.reload_roots(ex) + + return new_thread_state @@ -523,11 +536,7 @@ ] for _ in range(200): # make sure we are in a transaction: - n_thread = rnd.randrange(0, N_THREADS) - if n_thread != curr_thread.num: - ex.do('#') - curr_thread = global_state.thread_states[n_thread] - OpSwitchThread().do(ex, global_state, curr_thread) + curr_thread = OpSwitchThread().do(ex, global_state, curr_thread) if curr_thread.transaction_state is None: OpStartTransaction().do(ex, global_state, curr_thread) @@ -542,8 +551,8 @@ if ts.transaction_state is not None: if curr_thread != ts: ex.do('#') - curr_thread = ts - OpSwitchThread().do(ex, global_state, curr_thread) + curr_thread = OpSwitchThread().do(ex, global_state, curr_thread, + new_thread_state=ts) # could have aborted in the switch() above: if curr_thread.transaction_state: From noreply at buildbot.pypy.org Fri Feb 21 18:52:29 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 21 Feb 2014 18:52:29 +0100 (CET) Subject: [pypy-commit] pypy default: fix segfault of fill called on record types Message-ID: <20140221175229.C9EC11D24CF@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69239:4e601676010a Date: 2014-02-21 12:40 -0500 http://bitbucket.org/pypy/pypy/changeset/4e601676010a/ Log: fix segfault of fill called on record types diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -2672,7 +2672,7 @@ assert arange(3)[array(1)] == 1 def test_fill(self): - from numpypy import array + from numpypy import array, empty a = array([1, 2, 3]) a.fill(10) assert (a == [10, 10, 10]).all() @@ -2695,6 +2695,20 @@ e.fill(1.5-3j) assert e == 1.5-3j + a = empty(5, dtype='S3') + a.fill('abc') + for i in a: + assert i == 'abc' + + a = empty(10, dtype=[(_, int) for _ in 'abcde']) + a.fill(123) + for i in a: + import sys + if '__pypy__' in sys.builtin_module_names: + assert tuple(i) == (123,) + (0,) * 4 + else: + assert tuple(i) == (123,) * 5 + def test_array_indexing_bool(self): from numpypy import arange a = arange(10) diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -1897,11 +1897,20 @@ itemtype.store(arr, 0, ofs, w_box) return interp_boxes.W_VoidBox(arr, 0, dtype) - @jit.unroll_safe def store(self, arr, i, ofs, box): assert isinstance(box, interp_boxes.W_VoidBox) - for k in range(box.dtype.get_size()): - arr.storage[k + i + ofs] = box.arr.storage[k + box.ofs] + self._store(arr.storage, i, ofs, box, box.dtype.get_size()) + + @jit.unroll_safe + def _store(self, storage, i, ofs, box, size): + for k in range(size): + storage[k + i + ofs] = box.arr.storage[k + box.ofs] + + def fill(self, storage, width, box, start, stop, offset): + assert isinstance(box, interp_boxes.W_VoidBox) + assert width == box.dtype.get_size() + for i in xrange(start, stop, width): + self._store(storage, i, offset, box, width) def byteswap(self, w_v): # XXX implement From noreply at buildbot.pypy.org Fri Feb 21 19:13:23 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Fri, 21 Feb 2014 19:13:23 +0100 (CET) Subject: [pypy-commit] pypy remove-intlong-smm: fix the inplace ops Message-ID: <20140221181323.57E0F1C15AC@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: remove-intlong-smm Changeset: r69240:3aff7d060076 Date: 2014-02-21 10:04 -0800 http://bitbucket.org/pypy/pypy/changeset/3aff7d060076/ Log: fix the inplace ops diff --git a/pypy/objspace/std/frame.py b/pypy/objspace/std/frame.py --- a/pypy/objspace/std/frame.py +++ b/pypy/objspace/std/frame.py @@ -1,6 +1,9 @@ """StdObjSpace custom opcode implementations""" +import operator + from rpython.rlib.rarithmetic import ovfcheck +from rpython.tool.sourcetools import func_renamer from pypy.interpreter.pyframe import PyFrame from pypy.interpreter.error import oefmt @@ -20,36 +23,41 @@ raise AssertionError -def int_BINARY_ADD(self, oparg, next_instr): - space = self.space - w_2 = self.popvalue() - w_1 = self.popvalue() - if type(w_1) is W_IntObject and type(w_2) is W_IntObject: - try: - z = ovfcheck(w_1.intval + w_2.intval) - except OverflowError: - w_result = w_1.descr_add(space, w_2) +def _intshortcut(spaceopname): + if spaceopname.startswith('inplace_'): + opname = spaceopname[len('inplace_'):] + funcprefix = 'int_' + else: + opname = spaceopname + funcprefix = 'int_BINARY_' + op = getattr(operator, opname) + int_op = getattr(W_IntObject, 'descr_' + opname) + + @func_renamer(funcprefix + spaceopname.upper()) + def opimpl(self, oparg, next_instr): + space = self.space + space_op = getattr(space, spaceopname) + + w_2 = self.popvalue() + w_1 = self.popvalue() + if type(w_1) is W_IntObject and type(w_2) is W_IntObject: + try: + z = ovfcheck(op(w_1.intval, w_2.intval)) + except OverflowError: + w_result = int_op(w_1, space, w_2) + else: + w_result = space.newint(z) else: - w_result = space.newint(z) - else: - w_result = space.add(w_1, w_2) - self.pushvalue(w_result) + w_result = space_op(w_1, w_2) + self.pushvalue(w_result) + return opimpl -def int_BINARY_SUBTRACT(self, oparg, next_instr): - space = self.space - w_2 = self.popvalue() - w_1 = self.popvalue() - if type(w_1) is W_IntObject and type(w_2) is W_IntObject: - try: - z = ovfcheck(w_1.intval - w_2.intval) - except OverflowError: - w_result = w_1.descr_sub(space, w_2) - else: - w_result = space.newint(z) - else: - w_result = space.sub(w_1, w_2) - self.pushvalue(w_result) + +int_BINARY_ADD = _intshortcut('add') +int_INPLACE_ADD = _intshortcut('inplace_add') +int_BINARY_SUBTRACT = _intshortcut('sub') +int_INPLACE_SUBTRACT = _intshortcut('inplace_sub') def list_BINARY_SUBSCR(self, oparg, next_instr): @@ -72,9 +80,9 @@ pass if space.config.objspace.std.optimized_int_add: StdObjSpaceFrame.BINARY_ADD = int_BINARY_ADD - StdObjSpaceFrame.INPLACE_ADD = int_BINARY_ADD - StdObjSpaceFrame.BINARY_SUB = int_BINARY_SUBTRACT - StdObjSpaceFrame.INPLACE_SUBTRACT = int_BINARY_SUBTRACT + StdObjSpaceFrame.INPLACE_ADD = int_INPLACE_ADD + StdObjSpaceFrame.BINARY_SUBTRACT = int_BINARY_SUBTRACT + StdObjSpaceFrame.INPLACE_SUBTRACT = int_INPLACE_SUBTRACT if space.config.objspace.std.optimized_list_getitem: StdObjSpaceFrame.BINARY_SUBSCR = list_BINARY_SUBSCR from pypy.objspace.std.callmethod import LOOKUP_METHOD, CALL_METHOD diff --git a/pypy/objspace/std/test/test_intobject.py b/pypy/objspace/std/test/test_intobject.py --- a/pypy/objspace/std/test/test_intobject.py +++ b/pypy/objspace/std/test/test_intobject.py @@ -556,3 +556,12 @@ class AppTestIntOptimizedAdd(AppTestInt): spaceconfig = {"objspace.std.optimized_int_add": True} + + def test_inplace(self): + # ensure other inplace ops still work + l = [] + l += xrange(5) + assert l == list(range(5)) + a = 8.5 + a -= .5 + assert a == 8 From noreply at buildbot.pypy.org Fri Feb 21 19:19:34 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 21 Feb 2014 19:19:34 +0100 (CET) Subject: [pypy-commit] pypy default: fix network tests for new python.org Message-ID: <20140221181934.263791C15AC@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69241:05178cffe8df Date: 2014-02-21 13:18 -0500 http://bitbucket.org/pypy/pypy/changeset/05178cffe8df/ Log: fix network tests for new python.org diff --git a/rpython/rlib/test/test_rsocket.py b/rpython/rlib/test/test_rsocket.py --- a/rpython/rlib/test/test_rsocket.py +++ b/rpython/rlib/test/test_rsocket.py @@ -311,7 +311,7 @@ assert isinstance(lst, list) found = False for family, socktype, protocol, canonname, addr in lst: - if addr.get_host() == '82.94.164.162': + if addr.get_host() == '140.211.10.69': found = True assert found, lst diff --git a/rpython/translator/sandbox/test/test_sandlib.py b/rpython/translator/sandbox/test/test_sandlib.py --- a/rpython/translator/sandbox/test/test_sandlib.py +++ b/rpython/translator/sandbox/test/test_sandlib.py @@ -104,17 +104,17 @@ class SocketProc(VirtualizedSocketProc, SimpleIOSandboxedProc): def build_virtual_root(self): pass - + def entry_point(argv): fd = os.open("tcp://python.org:80", os.O_RDONLY, 0777) os.write(fd, 'GET /\n') - print os.read(fd, 30) + print os.read(fd, 50) return 0 exe = compile(entry_point) proc = SocketProc([exe]) output, error = proc.communicate("") - assert output.startswith(' Author: Armin Rigo Branch: Changeset: r906:ab49565447d2 Date: 2014-02-22 10:46 +0100 http://bitbucket.org/pypy/buildbot/changeset/ab49565447d2/ Log: missing import diff --git a/bbhook/hook.py b/bbhook/hook.py --- a/bbhook/hook.py +++ b/bbhook/hook.py @@ -5,6 +5,7 @@ import time import thread, Queue import traceback +import pprint from .main import app from . import scm From noreply at buildbot.pypy.org Sat Feb 22 10:59:06 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 22 Feb 2014 10:59:06 +0100 (CET) Subject: [pypy-commit] pypy default: Use the RPython-level detection of out-of-bound indices. This should Message-ID: <20140222095906.AA4981C11A9@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69269:311bb87a8956 Date: 2014-02-22 10:58 +0100 http://bitbucket.org/pypy/pypy/changeset/311bb87a8956/ Log: Use the RPython-level detection of out-of-bound indices. This should give better code when jitted (lower number of guards). diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py --- a/pypy/objspace/std/bytearrayobject.py +++ b/pypy/objspace/std/bytearrayobject.py @@ -40,7 +40,11 @@ return len(self.data) def _getitem_result(self, space, index): - return space.wrap(ord(self.data[index])) + try: + character = self.data[index] + except IndexError: + raise oefmt(space.w_IndexError, "bytearray index out of range") + return space.wrap(ord(character)) def _val(self, space): return space.bufferstr_w(self) diff --git a/pypy/objspace/std/stringmethods.py b/pypy/objspace/std/stringmethods.py --- a/pypy/objspace/std/stringmethods.py +++ b/pypy/objspace/std/stringmethods.py @@ -78,15 +78,15 @@ return self._new_from_list(ret) index = space.getindex_w(w_index, space.w_IndexError, "string index") - selflen = self._len() - if index < 0: - index += selflen - if index < 0 or index >= selflen: - raise oefmt(space.w_IndexError, "string index out of range") return self._getitem_result(space, index) def _getitem_result(self, space, index): - return self._new(self._val(space)[index]) + selfvalue = self._val(space) + try: + character = selfvalue[index] + except IndexError: + raise oefmt(space.w_IndexError, "string index out of range") + return self._new(character) def descr_getslice(self, space, w_start, w_stop): selfvalue = self._val(space) From noreply at buildbot.pypy.org Sat Feb 22 11:11:06 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 22 Feb 2014 11:11:06 +0100 (CET) Subject: [pypy-commit] pypy.org extradoc: The Python web site change broke the URL redirection to Message-ID: <20140222101106.6922A1C3599@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r476:5f7e6ebc089c Date: 2014-02-22 11:10 +0100 http://bitbucket.org/pypy/pypy.org/changeset/5f7e6ebc089c/ Log: The Python web site change broke the URL redirection to mail.python.org. Fix. diff --git a/contact.html b/contact.html --- a/contact.html +++ b/contact.html @@ -47,7 +47,7 @@

Contact

  • irc: #pypy on irc.freenode.net
  • -
  • mailing list: pypy-dev at python.org
  • +
  • mailing list: pypy-dev at python.org
  • for security related issues, non-public funding enquiries etc. please contact pypy@sfconservancy.org
  • the bug tracker
  • more on our dev site.
  • diff --git a/source/contact.txt b/source/contact.txt --- a/source/contact.txt +++ b/source/contact.txt @@ -18,7 +18,7 @@ * code on `bitbucket`_. -.. __: http://python.org/mailman/listinfo/pypy-dev +.. __: http://mail.python.org/mailman/listinfo/pypy-dev .. _`bug tracker`: https://bugs.pypy.org .. _`dev site`: http://doc.pypy.org .. _`bitbucket`: https://bitbucket.org/pypy/pypy/overview From noreply at buildbot.pypy.org Sat Feb 22 11:11:14 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 22 Feb 2014 11:11:14 +0100 (CET) Subject: [pypy-commit] pypy default: The Python web site change broke the URL redirection to mail.python.org. Message-ID: <20140222101114.995CC1C3599@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69270:1144d1950bde Date: 2014-02-22 11:10 +0100 http://bitbucket.org/pypy/pypy/changeset/1144d1950bde/ Log: The Python web site change broke the URL redirection to mail.python.org. Fix. diff --git a/pypy/doc/eventhistory.rst b/pypy/doc/eventhistory.rst --- a/pypy/doc/eventhistory.rst +++ b/pypy/doc/eventhistory.rst @@ -267,7 +267,7 @@ .. _`day 1`: http://codespeak.net/pipermail/pypy-dev/2005q2/002169.html .. _`day 2`: http://codespeak.net/pipermail/pypy-dev/2005q2/002171.html .. _`day 3`: http://codespeak.net/pipermail/pypy-dev/2005q2/002172.html -.. _`pypy-dev`: http://python.org/mailman/listinfo/pypy-dev +.. _`pypy-dev`: http://mail.python.org/mailman/listinfo/pypy-dev .. _EuroPython: http://europython.org .. _`translation`: translation.html diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst --- a/pypy/doc/faq.rst +++ b/pypy/doc/faq.rst @@ -243,7 +243,7 @@ discussions. .. _`contact us`: index.html -.. _`mailing list`: http://python.org/mailman/listinfo/pypy-dev +.. _`mailing list`: http://mail.python.org/mailman/listinfo/pypy-dev ------------------------------------------------------------- OSError: ... cannot restore segment prot after reloc... Help? diff --git a/pypy/doc/getting-started-dev.rst b/pypy/doc/getting-started-dev.rst --- a/pypy/doc/getting-started-dev.rst +++ b/pypy/doc/getting-started-dev.rst @@ -386,7 +386,7 @@ .. _`full Python interpreter`: getting-started-python.html .. _`the blog`: http://morepypy.blogspot.com -.. _`pypy-dev mailing list`: http://python.org/mailman/listinfo/pypy-dev +.. _`pypy-dev mailing list`: http://mail.python.org/mailman/listinfo/pypy-dev .. _`contact possibilities`: index.html .. _`py library`: http://pylib.org diff --git a/pypy/doc/index-report.rst b/pypy/doc/index-report.rst --- a/pypy/doc/index-report.rst +++ b/pypy/doc/index-report.rst @@ -99,7 +99,7 @@ .. _`py-lib`: http://pylib.org/ .. _`py.test`: http://pytest.org/ .. _codespeak: http://codespeak.net/ -.. _`pypy-dev`: http://python.org/mailman/listinfo/pypy-dev +.. _`pypy-dev`: http://mail.python.org/mailman/listinfo/pypy-dev Reports of 2006 diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst --- a/pypy/doc/index.rst +++ b/pypy/doc/index.rst @@ -104,8 +104,8 @@ .. _`PyPy blog`: http://morepypy.blogspot.com/ .. _`development bug/feature tracker`: https://bugs.pypy.org .. _here: http://tismerysoft.de/pypy/irc-logs/pypy -.. _`Mercurial commit mailing list`: http://python.org/mailman/listinfo/pypy-commit -.. _`development mailing list`: http://python.org/mailman/listinfo/pypy-dev +.. _`Mercurial commit mailing list`: http://mail.python.org/mailman/listinfo/pypy-commit +.. _`development mailing list`: http://mail.python.org/mailman/listinfo/pypy-dev .. _`FAQ`: faq.html .. _`Getting Started`: getting-started.html .. _`Papers`: extradoc.html From noreply at buildbot.pypy.org Sat Feb 22 18:15:55 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 22 Feb 2014 18:15:55 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: Progress on copying the logic from minimark.py. Message-ID: <20140222171555.84AD31C03FC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r810:3993d902abc7 Date: 2014-02-22 18:15 +0100 http://bitbucket.org/pypy/stmgc/changeset/3993d902abc7/ Log: Progress on copying the logic from minimark.py. diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -13,11 +13,10 @@ { assert(_running_transaction()); - LIST_APPEND(STM_PSEGMENT->old_objects_to_trace, obj); - /* for old objects from the same transaction, we are done now */ if (obj_from_same_transaction(obj)) { obj->stm_flags |= GCFLAG_WRITE_BARRIER_CALLED; + LIST_APPEND(STM_PSEGMENT->old_objects_pointing_to_young, obj); return; } @@ -221,6 +220,15 @@ list_clear(STM_PSEGMENT->modified_objects); } +static void _finish_transaction(void) +{ + stm_thread_local_t *tl = STM_SEGMENT->running_thread; + release_thread_segment(tl); + STM_PSEGMENT->safe_point = SP_NO_TRANSACTION; + STM_PSEGMENT->transaction_state = TS_NONE; + list_clear(STM_PSEGMENT->old_objects_pointing_to_young); +} + void stm_commit_transaction(void) { mutex_lock(); @@ -267,10 +275,7 @@ reset_all_creation_markers_and_push_created_data(); /* done */ - stm_thread_local_t *tl = STM_SEGMENT->running_thread; - release_thread_segment(tl); - STM_PSEGMENT->safe_point = SP_NO_TRANSACTION; - STM_PSEGMENT->transaction_state = TS_NONE; + _finish_transaction(); /* we did cond_broadcast() above already, in try_wait_for_other_safe_points(). It may wake up @@ -345,13 +350,13 @@ /* reset all the modified objects (incl. re-adding GCFLAG_WRITE_BARRIER) */ reset_modified_from_other_segments(); + reset_all_creation_markers(); + stm_jmpbuf_t *jmpbuf_ptr = STM_SEGMENT->jmpbuf_ptr; stm_thread_local_t *tl = STM_SEGMENT->running_thread; tl->shadowstack = STM_PSEGMENT->shadowstack_at_start_of_transaction; - release_thread_segment(tl); - STM_PSEGMENT->safe_point = SP_NO_TRANSACTION; - STM_PSEGMENT->transaction_state = TS_NONE; - reset_all_creation_markers(); + + _finish_transaction(); cond_broadcast(); mutex_unlock(); diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -52,7 +52,7 @@ struct stm_priv_segment_info_s { struct stm_segment_info_s pub; - struct list_s *old_objects_to_trace; + struct list_s *old_objects_pointing_to_young; struct list_s *modified_objects; struct list_s *creation_markers; uint64_t start_time; diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -21,13 +21,13 @@ { memset(small_alloc_shared, 0, sizeof(small_alloc_shared)); memset(small_alloc_privtz, 0, sizeof(small_alloc_privtz)); - free_pages = NULL; + free_uniform_pages = NULL; } -static void check_gcpage_still_shared(void) -{ - //...; -} +//static void check_gcpage_still_shared(void) +//{ +// //...; +//} #define GCPAGE_NUM_PAGES 20 @@ -54,8 +54,8 @@ char *p = uninitialized_page_start; long i; for (i = 0; i < 16; i++) { - *(char **)p = free_pages; - free_pages = p; + *(char **)p = free_uniform_pages; + free_uniform_pages = p; } return; @@ -69,7 +69,7 @@ /* not thread-safe! Use only when holding the mutex */ assert(_has_mutex()); - if (free_pages == NULL) + if (free_uniform_pages == NULL) grab_more_free_pages_for_small_allocations(); abort();//... diff --git a/c7/stm/gcpage.h b/c7/stm/gcpage.h --- a/c7/stm/gcpage.h +++ b/c7/stm/gcpage.h @@ -19,15 +19,31 @@ char *next_object; /* the next address we will return, or NULL */ char *range_last; /* if equal to next_object: next_object starts with a next pointer; if greater: last item of a - contigous range of unallocated objs */ + contiguous range of unallocated objs */ }; + +/* For each small request size, we have three independent chained lists + of address ranges: + + - 'small_alloc_shared': ranges are within pages that are likely to be + shared. We don't know for sure, because pages can be privatized + by normal run of stm_write(). + + - 'small_alloc_sh_old': moved from 'small_alloc_shared' when we're + looking for a range with the creation_marker set; this collects + the unsuitable ranges, i.e. the ones with already at least one + object and no creation marker. + + - 'small_alloc_privtz': ranges are within pages that are privatized. +*/ static struct small_alloc_s small_alloc_shared[GC_N_SMALL_REQUESTS]; +static struct small_alloc_s small_alloc_sh_old[GC_N_SMALL_REQUESTS]; static struct small_alloc_s small_alloc_privtz[GC_N_SMALL_REQUESTS]; -static char *free_pages; +static char *free_uniform_pages; static void setup_gcpage(void); static void teardown_gcpage(void); -static void check_gcpage_still_shared(void); +//static void check_gcpage_still_shared(void); static char *allocate_outside_nursery_large(uint64_t size); diff --git a/c7/stm/list.h b/c7/stm/list.h --- a/c7/stm/list.h +++ b/c7/stm/list.h @@ -45,6 +45,7 @@ static inline uintptr_t list_pop_item(struct list_s *lst) { + assert(lst->count > 0); return lst->items[--lst->count]; } diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -33,6 +33,8 @@ char reserved[64]; } nursery_ctl __attribute__((aligned(64))); +static struct list_s *old_objects_pointing_to_young; + /************************************************************/ @@ -43,6 +45,12 @@ assert(MEDIUM_OBJECT < LARGE_OBJECT); assert(LARGE_OBJECT < NURSERY_SECTION_SIZE); nursery_ctl.used = 0; + old_objects_pointing_to_young = list_create(); +} + +static void teardown_nursery(void) +{ + list_free(old_objects_pointing_to_young); } static inline bool _is_in_nursery(object_t *obj) @@ -100,7 +108,6 @@ } } - static void minor_trace_if_young(object_t **pobj) { /* takes a normal pointer to a thread-local pointer to an object */ @@ -110,7 +117,9 @@ if (!_is_young(obj)) return; - /* the location the object moved to is the second word in 'obj' */ + /* If the object was already seen here, its first word was set + to GCWORD_MOVED. In that case, the forwarding location, i.e. + where the object moved to, is stored in the second word in 'obj'. */ char *realobj = (char *)REAL_ADDRESS(stm_object_pages, obj); object_t **pforwarded_array = (object_t **)realobj; @@ -129,7 +138,9 @@ The pages S or W above are both pages of uniform sizes obtained from the end of the address space. The difference is that page S - can be shared, but page W needs to be privatized. + can be shared, but page W needs to be privatized. Moreover, + cases 2 and 4 differ in the creation_marker they need to put, + which has a granularity of 256 bytes. */ size_t size = stmcb_size_rounded_up((struct object_s *)realobj); uintptr_t lock_idx = (((uintptr_t)obj) >> 4) - READMARKER_START; @@ -214,9 +225,12 @@ pforwarded_array[0] = GCWORD_MOVED; pforwarded_array[1] = nobj; *pobj = nobj; + + /* Must trace the object later */ + LIST_APPEND(old_objects_pointing_to_young, nobj); } -static void minor_trace_roots(void) +static void collect_roots_in_nursery(void) { stm_thread_local_t *tl = stm_thread_locals; do { @@ -229,6 +243,48 @@ } while (tl != stm_thread_locals); } +static void trace_and_drag_out_of_nursery(object_t *obj) +{ + if (is_in_shared_pages(obj)) { + /* the object needs fixing only in one copy, because all copies + are shared and identical. */ + char *realobj = (char *)REAL_ADDRESS(stm_object_pages, obj); + stmcb_trace((struct object_s *)realobj, &minor_trace_if_young); + } + else { + /* every segment needs fixing */ + long i; + for (i = 0; i < NB_SEGMENTS; i++) { + char *realobj = (char *)REAL_ADDRESS(get_segment_base(i), obj); + stmcb_trace((struct object_s *)realobj, &minor_trace_if_young); + } + } +} + +static void collect_oldrefs_to_nursery(struct list_s *lst) +{ + while (!list_is_empty(lst)) { + object_t *obj = (object_t *)list_pop_item(lst); + assert(!_is_in_nursery(obj)); + + /* We must have GCFLAG_WRITE_BARRIER_CALLED so far. If we + don't, it's because the same object was stored in several + segment's old_objects_pointing_to_young. It's fine to + ignore duplicates. */ + if ((obj->stm_flags & GCFLAG_WRITE_BARRIER_CALLED) == 0) + continue; + + /* Remove the flag GCFLAG_WRITE_BARRIER_CALLED. No live object + should have this flag set after a nursery collection. */ + obj->stm_flags &= ~GCFLAG_WRITE_BARRIER_CALLED; + + /* Trace the 'obj' to replace pointers to nursery with pointers + outside the nursery, possibly forcing nursery objects out + and adding them to 'old_objects_pointing_to_young' as well. */ + trace_and_drag_out_of_nursery(obj); + } +} + static void reset_nursery(void) { /* reset the global amount-of-nursery-used-so-far */ @@ -243,6 +299,7 @@ struct stm_priv_segment_info_s *other_pseg = get_priv_segment(i); /* no race condition here, because all other threads are paused in safe points, so cannot be e.g. in _stm_allocate_slowpath() */ + uintptr_t old_end = other_pseg->real_nursery_section_end; other_pseg->real_nursery_section_end = 0; other_pseg->pub.v_nursery_section_end = 0; @@ -252,7 +309,10 @@ 'transaction_read_version' without changing 'min_read_version_outside_nursery'. */ - if (other_pseg->pub.transaction_read_version < 0xff) { + if (other_pseg->transaction_state == TS_NONE) { + /* no transaction running now, nothing to do */ + } + else if (other_pseg->pub.transaction_read_version < 0xff) { other_pseg->pub.transaction_read_version++; assert(0 < other_pseg->min_read_version_outside_nursery && other_pseg->min_read_version_outside_nursery @@ -268,9 +328,15 @@ } /* reset the creation markers */ - char *creation_markers = REAL_ADDRESS(other_pseg->pub.segment_base, - NURSERY_START >> 8); - memset(creation_markers, 0, NURSERY_SIZE >> 8); + if (old_end > NURSERY_START) { + char *creation_markers = REAL_ADDRESS(other_pseg->pub.segment_base, + NURSERY_START >> 8); + assert(old_end < NURSERY_START + NURSERY_SIZE); + memset(creation_markers, 0, (old_end - NURSERY_START) >> 8); + } + else { + assert(old_end == 0 || old_end == NURSERY_START); + } } } @@ -278,18 +344,43 @@ { /* all other threads are paused in safe points during the whole minor collection */ + dprintf(("minor_collection\n")); assert(_has_mutex()); + assert(list_is_empty(old_objects_pointing_to_young)); - check_gcpage_still_shared(); + /* List of what we need to do and invariants we need to preserve + ------------------------------------------------------------- - minor_trace_roots(); + We must move out of the nursery any object found within the + nursery. This requires either one or NB_SEGMENTS copies, + depending on the current write-state of the object. - // copy modified_objects + We need to move the mark stored in the write_locks, read_markers + and creation_markers arrays. The creation_markers need some care + because they work at a coarser granularity of 256 bytes, so + objects with an "on" mark should not be moved too close to + objects with an "off" mark and vice-versa. + Then we must trace (= look inside) some objects outside the + nursery, and fix any pointer found that goes to a nursery object. + This tracing itself needs to be done either once or NB_SEGMENTS + times, depending on whether the object is fully in shared pages + or not. We assume that 'stmcb_size_rounded_up' produce the same + results on all copies (i.e. don't depend on modifiable + information). + */ - fprintf(stderr, "minor_collection\n"); - abort(); //...; + //check_gcpage_still_shared(); + collect_roots_in_nursery(); + + long i; + for (i = 0; i < NB_SEGMENTS; i++) { + struct stm_priv_segment_info_s *other_pseg = get_priv_segment(i); + collect_oldrefs_to_nursery(other_pseg->old_objects_pointing_to_young); + } + + collect_oldrefs_to_nursery(old_objects_pointing_to_young); reset_nursery(); } diff --git a/c7/stm/pages.c b/c7/stm/pages.c --- a/c7/stm/pages.c +++ b/c7/stm/pages.c @@ -205,3 +205,22 @@ list_clear(STM_PSEGMENT->creation_markers); } + +static bool is_in_shared_pages(object_t *obj) +{ + uintptr_t first_page = ((uintptr_t)obj) / 4096UL; + + if ((obj->stm_flags & GCFLAG_SMALL_UNIFORM) != 0) + return (flag_page_private[first_page] == SHARED_PAGE); + + ssize_t obj_size = stmcb_size_rounded_up( + (struct object_s *)REAL_ADDRESS(stm_object_pages, obj)); + + uintptr_t end_page = (((uintptr_t)obj) + obj_size + 4095) / 4096UL; + /* that's the page *following* the last page with the object */ + + while (first_page < end_page) + if (flag_page_private[first_page++] != SHARED_PAGE) + return false; + return true; +} diff --git a/c7/stm/pages.h b/c7/stm/pages.h --- a/c7/stm/pages.h +++ b/c7/stm/pages.h @@ -36,3 +36,4 @@ static void set_single_creation_marker(stm_char *p, int newvalue); static void reset_all_creation_markers(void); static void reset_all_creation_markers_and_push_created_data(void); +static bool is_in_shared_pages(object_t *obj); diff --git a/c7/stm/setup.c b/c7/stm/setup.c --- a/c7/stm/setup.c +++ b/c7/stm/setup.c @@ -64,7 +64,7 @@ pr->write_lock_num = i + 1; pr->pub.segment_num = i; pr->pub.segment_base = segment_base; - pr->old_objects_to_trace = list_create(); + pr->old_objects_pointing_to_young = list_create(); pr->modified_objects = list_create(); pr->creation_markers = list_create(); } @@ -96,7 +96,9 @@ long i; for (i = 0; i < NB_SEGMENTS; i++) { struct stm_priv_segment_info_s *pr = get_priv_segment(i); - list_free(pr->old_objects_to_trace); + list_free(pr->old_objects_pointing_to_young); + list_free(pr->modified_objects); + list_free(pr->creation_markers); } munmap(stm_object_pages, TOTAL_MEMORY); @@ -107,6 +109,7 @@ teardown_core(); teardown_sync(); teardown_gcpage(); + teardown_nursery(); } void stm_register_thread_local(stm_thread_local_t *tl) From noreply at buildbot.pypy.org Sat Feb 22 18:27:53 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sat, 22 Feb 2014 18:27:53 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: Re-share the nursery pages after a minor collection Message-ID: <20140222172753.5EF551C03FC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r811:a8f773df8a63 Date: 2014-02-22 18:27 +0100 http://bitbucket.org/pypy/stmgc/changeset/a8f773df8a63/ Log: Re-share the nursery pages after a minor collection diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -383,6 +383,8 @@ collect_oldrefs_to_nursery(old_objects_pointing_to_young); reset_nursery(); + + pages_make_shared_again(FIRST_NURSERY_PAGE, NB_NURSERY_PAGES); } diff --git a/c7/stm/pages.c b/c7/stm/pages.c --- a/c7/stm/pages.c +++ b/c7/stm/pages.c @@ -5,9 +5,9 @@ static void pages_initialize_shared(uintptr_t pagenum, uintptr_t count) { - /* call remap_file_pages() to make all pages in the - range(pagenum, pagenum+count) refer to the same - physical range of pages from segment 0 */ + /* call remap_file_pages() to make all pages in the range(pagenum, + pagenum+count) refer to the same physical range of pages from + segment 0. */ uintptr_t i; for (i = 1; i < NB_SEGMENTS; i++) { char *segment_base = get_segment_base(i); @@ -19,9 +19,27 @@ abort(); } } - for (i = 0; i < count; i++) { - assert(flag_page_private[pagenum + i] == FREE_PAGE); + for (i = 0; i < count; i++) flag_page_private[pagenum + i] = SHARED_PAGE; +} + +static void pages_make_shared_again(uintptr_t pagenum, uintptr_t count) +{ + /* Same as pages_initialize_shared(), but tries hard to minimize the + total number of pages that remap_file_pages() must handle, by + fragmenting calls as much as possible (the overhead of one system + call appears smaller as the overhead per page). */ + uintptr_t start, i = 0; + while (i < count) { + if (flag_page_private[pagenum + (i++)] == SHARED_PAGE) + continue; + start = i; /* first index of a private page */ + while (1) { + i++; + if (i == count || flag_page_private[pagenum + i] == SHARED_PAGE) + break; + } + pages_initialize_shared(pagenum + start, i - start); } } diff --git a/c7/stm/pages.h b/c7/stm/pages.h --- a/c7/stm/pages.h +++ b/c7/stm/pages.h @@ -20,6 +20,7 @@ static void _pages_privatize(uintptr_t pagenum, uintptr_t count, bool full); static void pages_initialize_shared(uintptr_t pagenum, uintptr_t count); +static void pages_make_shared_again(uintptr_t pagenum, uintptr_t count); inline static void pages_privatize(uintptr_t pagenum, uintptr_t count, bool full) { From noreply at buildbot.pypy.org Sat Feb 22 19:54:07 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sat, 22 Feb 2014 19:54:07 +0100 (CET) Subject: [pypy-commit] pypy default: improve dtype creation error messages Message-ID: <20140222185407.352211D22DE@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69271:4a4a4fae4a49 Date: 2014-02-22 12:52 -0500 http://bitbucket.org/pypy/pypy/changeset/4a4a4fae4a49/ Log: improve dtype creation error messages diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -429,8 +429,7 @@ pass if name[0] in 'VSUc' or name[0] in '<>=|' and name[1] in 'VSUc': return variable_dtype(space, name) - raise OperationError(space.w_TypeError, space.wrap( - "data type %s not understood" % name)) + raise oefmt(space.w_TypeError, 'data type "%s" not understood', name) elif space.isinstance_w(w_dtype, space.w_list): return dtype_from_list(space, w_dtype) elif space.isinstance_w(w_dtype, space.w_tuple): @@ -449,9 +448,9 @@ return dtype if w_dtype is dtype.w_box_type: return dtype - raise oefmt(space.w_TypeError, - "data type not understood (value of type %T not expected " - "here)", w_dtype) + if space.isinstance_w(w_dtype, space.w_type): + raise oefmt(space.w_NotImplementedError, "object dtype not implemented") + raise oefmt(space.w_TypeError, "data type not understood") W_Dtype.typedef = TypeDef("dtype", __module__ = "numpy", diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -68,7 +68,11 @@ assert exc.value.message == "There are no fields in dtype int8." exc = raises(TypeError, dtype, (1, 2)) - assert 'data type not understood' in str(exc.value) + assert exc.value[0] == 'data type not understood' + exc = raises(TypeError, dtype, lambda: 42) + assert exc.value[0] == 'data type not understood' + exc = raises(TypeError, dtype, 'oooo') + assert exc.value[0] == 'data type "oooo" not understood' raises(KeyError, 'dtype(int)["asdasd"]') def test_dtype_from_tuple(self): @@ -365,6 +369,18 @@ s2 = np.array(123, dtype=dt2).byteswap().tostring() assert s1 == s2 + def test_object(self): + import numpy as np + import sys + class O(object): + pass + for o in [object, O]: + if '__pypy__' not in sys.builtin_module_names: + assert np.dtype(o).str == '|O8' + else: + exc = raises(NotImplementedError, "np.dtype(o)") + assert exc.value[0] == 'object dtype not implemented' + class AppTestTypes(BaseAppTestDtypes): def test_abstract_types(self): import numpypy as numpy From noreply at buildbot.pypy.org Sat Feb 22 19:54:08 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sat, 22 Feb 2014 19:54:08 +0100 (CET) Subject: [pypy-commit] pypy default: fix numpy.concatenate(axis=None) Message-ID: <20140222185408.9604C1D22DE@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69272:a00ad3c03145 Date: 2014-02-22 13:52 -0500 http://bitbucket.org/pypy/pypy/changeset/a00ad3c03145/ Log: fix numpy.concatenate(axis=None) diff --git a/pypy/module/micronumpy/interp_arrayops.py b/pypy/module/micronumpy/interp_arrayops.py --- a/pypy/module/micronumpy/interp_arrayops.py +++ b/pypy/module/micronumpy/interp_arrayops.py @@ -98,15 +98,25 @@ return w_arr.descr_dot(space, w_obj2, w_out) - at unwrap_spec(axis=int) -def concatenate(space, w_args, axis=0): +def concatenate(space, w_args, w_axis=None): args_w = space.listview(w_args) if len(args_w) == 0: - raise OperationError(space.w_ValueError, space.wrap("need at least one array to concatenate")) + raise oefmt(space.w_ValueError, "need at least one array to concatenate") args_w = [convert_to_array(space, w_arg) for w_arg in args_w] + if w_axis is None: + w_axis = space.wrap(0) + if space.is_none(w_axis): + args_w = [w_arg.reshape(space, + space.newlist([w_arg.descr_get_size(space)])) + for w_arg in args_w] + w_axis = space.wrap(0) dtype = args_w[0].get_dtype() shape = args_w[0].get_shape()[:] ndim = len(shape) + if ndim == 0: + raise oefmt(space.w_ValueError, + "zero-dimensional arrays cannot be concatenated") + axis = space.int_w(w_axis) orig_axis = axis if axis < 0: axis = ndim + axis diff --git a/pypy/module/micronumpy/interp_support.py b/pypy/module/micronumpy/interp_support.py --- a/pypy/module/micronumpy/interp_support.py +++ b/pypy/module/micronumpy/interp_support.py @@ -91,7 +91,7 @@ axis = maxint else: axis = space.int_w(w_axis) - if axis < -shapelen or axis>= shapelen: + if axis < -shapelen or axis >= shapelen: raise oefmt(space.w_ValueError, "axis entry %d is out of bounds [%d, %d)", axis, -shapelen, shapelen) diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -1773,6 +1773,18 @@ def test_concatenate(self): from numpypy import array, concatenate, dtype + exc = raises(ValueError, concatenate, (array(1.5), array(2.5))) + assert exc.value[0] == 'zero-dimensional arrays cannot be concatenated' + a = concatenate((array(1.5), array(2.5)), axis=None) + assert (a == [1.5, 2.5]).all() + assert exc.value[0] == 'zero-dimensional arrays cannot be concatenated' + exc = raises(ValueError, concatenate, (array([1.5]), array(2.5))) + assert exc.value[0] == 'all the input arrays must have same number ' \ + 'of dimensions' + exc = raises(ValueError, concatenate, (array(1.5), array([2.5]))) + assert exc.value[0] == 'zero-dimensional arrays cannot be concatenated' + a = concatenate((array([1.5]), array([2.5]))) + assert (a == [1.5, 2.5]).all() a1 = array([0,1,2]) a2 = array([3,4,5]) a = concatenate((a1, a2)) @@ -1783,6 +1795,8 @@ assert (a == [0,1,2,3,4,5]).all() a = concatenate((a1, a2), axis=-1) assert (a == [0,1,2,3,4,5]).all() + a = concatenate((a1, a2), axis=None) + assert (a == [0,1,2,3,4,5]).all() b1 = array([[1, 2], [3, 4]]) b2 = array([[5, 6]]) @@ -1790,6 +1804,8 @@ assert (b == [[1, 2],[3, 4],[5, 6]]).all() c = concatenate((b1, b2.T), axis=1) assert (c == [[1, 2, 5],[3, 4, 6]]).all() + c1 = concatenate((b1, b2), axis=None) + assert (c1 == [1, 2, 3, 4, 5, 6]).all() d = concatenate(([0],[1])) assert (d == [0,1]).all() e1 = array([[0,1],[2,3]]) From noreply at buildbot.pypy.org Sat Feb 22 22:15:04 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sat, 22 Feb 2014 22:15:04 +0100 (CET) Subject: [pypy-commit] pypy default: implement comparison funcs for record types Message-ID: <20140222211504.2C1FB1C03FC@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69273:9ba1d3bb478e Date: 2014-02-22 15:52 -0500 http://bitbucket.org/pypy/pypy/changeset/9ba1d3bb478e/ Log: implement comparison funcs for record types diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -5,6 +5,7 @@ import re +from pypy.interpreter import special from pypy.interpreter.baseobjspace import InternalSpaceCache, W_Root from pypy.interpreter.error import OperationError from pypy.module.micronumpy import interp_boxes @@ -74,6 +75,7 @@ def __init__(self): """NOT_RPYTHON""" self.fromcache = InternalSpaceCache(self).getorbuild + self.w_NotImplemented = special.NotImplemented(self) def _freeze_(self): return True @@ -194,6 +196,9 @@ def is_w(self, w_obj, w_what): return w_obj is w_what + def eq_w(self, w_obj, w_what): + return w_obj == w_what + def issubtype(self, w_type1, w_type2): return BoolObject(True) diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -371,17 +371,23 @@ w_ldtype = w_lhs.get_dtype() w_rdtype = w_rhs.get_dtype() if w_ldtype.is_str_type() and w_rdtype.is_str_type() and \ - self.comparison_func: + self.comparison_func: pass elif (w_ldtype.is_str_type() or w_rdtype.is_str_type()) and \ - self.comparison_func and w_out is None: + self.comparison_func and w_out is None: return space.wrap(False) - elif (w_ldtype.is_flexible_type() or \ - w_rdtype.is_flexible_type()): - raise OperationError(space.w_TypeError, space.wrap( - 'unsupported operand dtypes %s and %s for "%s"' % \ - (w_rdtype.get_name(), w_ldtype.get_name(), - self.name))) + elif w_ldtype.is_flexible_type() or w_rdtype.is_flexible_type(): + if self.comparison_func: + if self.name == 'equal' or self.name == 'not_equal': + res = w_ldtype.eq(space, w_rdtype) + if not res: + return space.wrap(self.name == 'not_equal') + else: + return space.w_NotImplemented + else: + raise oefmt(space.w_TypeError, + 'unsupported operand dtypes %s and %s for "%s"', + w_rdtype.name, w_ldtype.name, self.name) if self.are_common_types(w_ldtype, w_rdtype): if not w_lhs.is_scalar() and w_rhs.is_scalar(): diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -3573,6 +3573,28 @@ exc = raises(ValueError, "a.view(('float32', 2))") assert exc.value[0] == 'new type not compatible with array.' + def test_record_ufuncs(self): + import numpy as np + a = np.zeros(3, dtype=[('a', 'i8'), ('b', 'i8')]) + b = np.zeros(3, dtype=[('a', 'i8'), ('b', 'i8')]) + c = np.zeros(3, dtype=[('a', 'f8'), ('b', 'f8')]) + d = np.ones(3, dtype=[('a', 'i8'), ('b', 'i8')]) + e = np.ones(3, dtype=[('a', 'i8'), ('b', 'i8'), ('c', 'i8')]) + exc = raises(TypeError, abs, a) + assert exc.value[0] == 'Not implemented for this type' + assert (a == a).all() + assert not (a != a).any() + assert (a == b).all() + assert not (a != b).any() + assert a != c + assert not a == c + assert (a != d).all() + assert not (a == d).any() + assert a != e + assert not a == e + assert np.greater(a, a) is NotImplemented + assert np.less_equal(a, a) is NotImplemented + class AppTestPyPy(BaseNumpyAppTest): def setup_class(cls): if option.runappdirect and '__pypy__' not in sys.builtin_module_names: diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -1944,6 +1944,20 @@ pieces.append(")") return "".join(pieces) + def eq(self, v1, v2): + assert isinstance(v1, interp_boxes.W_VoidBox) + assert isinstance(v2, interp_boxes.W_VoidBox) + s1 = v1.dtype.get_size() + s2 = v2.dtype.get_size() + assert s1 == s2 + for i in range(s1): + if v1.arr.storage[v1.ofs + i] != v2.arr.storage[v2.ofs + i]: + return False + return True + + def ne(self, v1, v2): + return not self.eq(v1, v2) + for tp in [Int32, Int64]: if tp.T == lltype.Signed: IntP = tp From noreply at buildbot.pypy.org Sat Feb 22 23:46:50 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sat, 22 Feb 2014 23:46:50 +0100 (CET) Subject: [pypy-commit] pypy default: support auto naming record dtype fields Message-ID: <20140222224650.3C12C1C11A9@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69274:f437d5038346 Date: 2014-02-22 17:24 -0500 http://bitbucket.org/pypy/pypy/changeset/f437d5038346/ Log: support auto naming record dtype fields diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -357,8 +357,8 @@ fields = {} offset = 0 fieldnames = [] - for w_elem in lst_w: - size = 1 + for i in range(len(lst_w)): + w_elem = lst_w[i] w_shape = space.newtuple([]) if space.len_w(w_elem) == 3: w_fldname, w_flddesc, w_shape = space.fixedview(w_elem) @@ -368,11 +368,13 @@ w_fldname, w_flddesc = space.fixedview(w_elem, 2) subdtype = descr__new__(space, space.gettypefor(W_Dtype), w_flddesc, w_shape=w_shape) fldname = space.str_w(w_fldname) + if fldname == '': + fldname = 'f%d' % i if fldname in fields: - raise OperationError(space.w_ValueError, space.wrap("two fields with the same name")) + raise oefmt(space.w_ValueError, "two fields with the same name") assert isinstance(subdtype, W_Dtype) fields[fldname] = (offset, subdtype) - offset += subdtype.get_size() * size + offset += subdtype.get_size() fieldnames.append(fldname) itemtype = types.RecordType() return W_Dtype(itemtype, NPY_VOID, NPY_VOIDLTR, diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -991,6 +991,8 @@ assert d.type is void assert d.char == 'V' assert d.names == ("x", "y", "z", "value") + d.names = ('a', '', 'c', 'd') + assert d.names == ('a', '', 'c', 'd') d.names = ('a', 'b', 'c', 'd') assert d.names == ('a', 'b', 'c', 'd') exc = raises(ValueError, "d.names = ('a', 'b', 'c', 'c')") @@ -1000,6 +1002,14 @@ assert d.names == ('a', 'b', 'c', 'd') raises(KeyError, 'd["xyz"]') raises(KeyError, 'd.fields["xyz"]') + d = dtype([('', ' Author: Brian Kearns Branch: Changeset: r69275:acb4d3bbfda9 Date: 2014-02-22 17:39 -0500 http://bitbucket.org/pypy/pypy/changeset/acb4d3bbfda9/ Log: fix hash(np.dtype) diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -289,6 +289,9 @@ return space.wrap(0) return space.wrap(len(self.fields)) + def descr_hash(self, space): + return space.hash(self.descr_reduce(space)) + def descr_reduce(self, space): w_class = space.type(self) @@ -465,6 +468,7 @@ __getitem__ = interp2app(W_Dtype.descr_getitem), __len__ = interp2app(W_Dtype.descr_len), + __hash__ = interp2app(W_Dtype.descr_hash), __reduce__ = interp2app(W_Dtype.descr_reduce), __setstate__ = interp2app(W_Dtype.descr_setstate), newbyteorder = interp2app(W_Dtype.descr_newbyteorder), diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -351,13 +351,13 @@ dt2 = dt.newbyteorder("<") dt3 = dt.newbyteorder(">") assert dt.byteorder != dt1.byteorder - #assert hash(dt) == hash(dt1) + assert hash(dt) == hash(dt1) if dt == dt2: assert dt.byteorder != dt2.byteorder - #assert hash(dt) == hash(dt2) + assert hash(dt) == hash(dt2) else: assert dt.byteorder != dt3.byteorder - #assert hash(dt) == hash(dt3) + assert hash(dt) == hash(dt3) exc = raises(ValueError, dt.newbyteorder, 'XX') assert exc.value[0] == 'XX is an unrecognized byteorder' From noreply at buildbot.pypy.org Sun Feb 23 08:34:57 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 23 Feb 2014 08:34:57 +0100 (CET) Subject: [pypy-commit] pypy py3k: Fix bogus skipif which always skips these tests Message-ID: <20140223073457.11B451C303A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: py3k Changeset: r69276:4b0188d1eb55 Date: 2014-02-23 08:34 +0100 http://bitbucket.org/pypy/pypy/changeset/4b0188d1eb55/ Log: Fix bogus skipif which always skips these tests diff --git a/pypy/module/thread/test/test_lock.py b/pypy/module/thread/test/test_lock.py --- a/pypy/module/thread/test/test_lock.py +++ b/pypy/module/thread/test/test_lock.py @@ -1,6 +1,6 @@ from __future__ import with_statement import py -import sys +import sys, os from pypy.module.thread.test.support import GenericTestThread from rpython.translator.c.test.test_genc import compile @@ -150,7 +150,7 @@ class AppTestLockSignals(GenericTestThread): - pytestmark = py.test.mark.skipif("sys.platform != 'posix'") + pytestmark = py.test.mark.skipif("os.name != 'posix'") def setup_class(cls): cls.w_using_pthread_cond = cls.space.wrap(sys.platform == 'freebsd6') From noreply at buildbot.pypy.org Sun Feb 23 08:43:59 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 23 Feb 2014 08:43:59 +0100 (CET) Subject: [pypy-commit] pypy default: Backport from the py3k branch: add lock._py3k_acquire() with the Message-ID: <20140223074359.4442B1C02EA@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69277:9b4e9b797ba1 Date: 2014-02-23 08:43 +0100 http://bitbucket.org/pypy/pypy/changeset/9b4e9b797ba1/ Log: Backport from the py3k branch: add lock._py3k_acquire() with the same semantics as in Python 3. diff --git a/pypy/module/thread/os_lock.py b/pypy/module/thread/os_lock.py --- a/pypy/module/thread/os_lock.py +++ b/pypy/module/thread/os_lock.py @@ -2,11 +2,57 @@ Python locks, based on true threading locks provided by the OS. """ +import time from rpython.rlib import rthread from pypy.module.thread.error import wrap_thread_error from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef +from rpython.rlib.rarithmetic import r_longlong + + +LONGLONG_MAX = r_longlong(2 ** (r_longlong.BITS-1) - 1) +TIMEOUT_MAX = LONGLONG_MAX + +RPY_LOCK_FAILURE, RPY_LOCK_ACQUIRED, RPY_LOCK_INTR = range(3) + +def parse_acquire_args(space, blocking, timeout): + if not blocking and timeout != -1.0: + raise OperationError(space.w_ValueError, space.wrap( + "can't specify a timeout for a non-blocking call")) + if timeout < 0.0 and timeout != -1.0: + raise OperationError(space.w_ValueError, space.wrap( + "timeout value must be strictly positive")) + if not blocking: + microseconds = 0 + elif timeout == -1.0: + microseconds = -1 + else: + timeout *= 1e6 + if timeout > float(TIMEOUT_MAX): + raise OperationError(space.w_OverflowError, space.wrap( + "timeout value is too large")) + microseconds = r_longlong(timeout) + return microseconds + + +def acquire_timed(space, lock, microseconds): + """Helper to acquire an interruptible lock with a timeout.""" + endtime = (time.time() * 1e6) + microseconds + while True: + result = lock.acquire_timed(microseconds) + if result == RPY_LOCK_INTR: + # Run signal handlers if we were interrupted + space.getexecutioncontext().checksignals() + if microseconds >= 0: + microseconds = r_longlong(endtime - (time.time() * 1e6)) + # Check for negative values, since those mean block + # forever + if microseconds <= 0: + result = RPY_LOCK_FAILURE + if result != RPY_LOCK_INTR: + break + return result class Lock(W_Root): @@ -21,8 +67,8 @@ except rthread.error: raise wrap_thread_error(space, "out of resources") - @unwrap_spec(waitflag=int) - def descr_lock_acquire(self, space, waitflag=1): + @unwrap_spec(blocking=int) + def descr_lock_acquire(self, space, blocking=1): """Lock the lock. With the default argument of True, this blocks if the lock is already locked (even by the same thread), waiting for another thread to release the lock, and returns True once the lock is @@ -30,9 +76,24 @@ and the return value reflects whether the lock is acquired. The blocking operation is not interruptible.""" mylock = self.lock - result = mylock.acquire(bool(waitflag)) + result = mylock.acquire(bool(blocking)) return space.newbool(result) + @unwrap_spec(blocking=int, timeout=float) + def descr_lock_py3k_acquire(self, space, blocking=1, timeout=-1.0): + """(Backport of a Python 3 API for PyPy. This version takes +a timeout argument and handles signals, like Ctrl-C.) + +Lock the lock. Without argument, this blocks if the lock is already +locked (even by the same thread), waiting for another thread to release +the lock, and return None once the lock is acquired. +With an argument, this will only block if the argument is true, +and the return value reflects whether the lock is acquired. +The blocking operation is interruptible.""" + microseconds = parse_acquire_args(space, blocking, timeout) + result = acquire_timed(space, self.lock, microseconds) + return space.newbool(result == RPY_LOCK_ACQUIRED) + def descr_lock_release(self, space): """Release the lock, allowing another thread that is blocked waiting for the lock to acquire the lock. The lock must be in the locked state, @@ -69,6 +130,7 @@ descr_locked = interp2app(Lock.descr_lock_locked) descr__enter__ = interp2app(Lock.descr__enter__) descr__exit__ = interp2app(Lock.descr__exit__) +descr_py3k_acquire = interp2app(Lock.descr_lock_py3k_acquire) Lock.typedef = TypeDef("thread.lock", @@ -84,6 +146,7 @@ unlock it. A thread attempting to lock a lock that it has already locked will block until another thread unlocks it. Deadlocks may ensue.""", acquire = descr_acquire, + _py3k_acquire = descr_py3k_acquire, release = descr_release, locked = descr_locked, __enter__ = descr__enter__, diff --git a/pypy/module/thread/test/test_lock.py b/pypy/module/thread/test/test_lock.py --- a/pypy/module/thread/test/test_lock.py +++ b/pypy/module/thread/test/test_lock.py @@ -1,4 +1,6 @@ from __future__ import with_statement +import py +import sys, os from pypy.module.thread.test.support import GenericTestThread from rpython.translator.c.test.test_genc import compile @@ -49,6 +51,16 @@ assert feedback == [42] assert lock.locked() is False + def test_timeout(self): + import thread + lock = thread.allocate_lock() + assert lock.acquire() is True + assert lock.acquire(False) is False + raises(TypeError, lock.acquire, True, timeout=.1) + lock._py3k_acquire(True, timeout=.01) + lock._py3k_acquire(True, .01) + + def test_compile_lock(): from rpython.rlib import rgc from rpython.rlib.rthread import allocate_lock @@ -73,3 +85,78 @@ class AppTestLockAgain(GenericTestThread): # test it at app-level again to detect strange interactions test_lock_again = AppTestLock.test_lock.im_func + + +class AppTestLockSignals(GenericTestThread): + pytestmark = py.test.mark.skipif("os.name != 'posix'") + + def setup_class(cls): + cls.w_using_pthread_cond = cls.space.wrap(sys.platform == 'freebsd6') + + def w_acquire_retries_on_intr(self, lock): + import thread, os, signal, time + self.sig_recvd = False + def my_handler(signal, frame): + self.sig_recvd = True + old_handler = signal.signal(signal.SIGUSR1, my_handler) + try: + def other_thread(): + # Acquire the lock in a non-main thread, so this test works for + # RLocks. + lock.acquire() + # Wait until the main thread is blocked in the lock acquire, and + # then wake it up with this. + time.sleep(0.5) + os.kill(os.getpid(), signal.SIGUSR1) + # Let the main thread take the interrupt, handle it, and retry + # the lock acquisition. Then we'll let it run. + time.sleep(0.5) + lock.release() + thread.start_new_thread(other_thread, ()) + # Wait until we can't acquire it without blocking... + while lock.acquire(blocking=False): + lock.release() + time.sleep(0.01) + result = lock.acquire() # Block while we receive a signal. + assert self.sig_recvd + assert result + finally: + signal.signal(signal.SIGUSR1, old_handler) + + def test_lock_acquire_retries_on_intr(self): + import thread + self.acquire_retries_on_intr(thread.allocate_lock()) + + def w_alarm_interrupt(self, sig, frame): + raise KeyboardInterrupt + + def test_lock_acquire_interruption(self): + if self.using_pthread_cond: + skip('POSIX condition variables cannot be interrupted') + import thread, signal, time + # Mimic receiving a SIGINT (KeyboardInterrupt) with SIGALRM while stuck + # in a deadlock. + # XXX this test can fail when the legacy (non-semaphore) implementation + # of locks is used in thread_pthread.h, see issue #11223. + oldalrm = signal.signal(signal.SIGALRM, self.alarm_interrupt) + try: + lock = thread.allocate_lock() + lock.acquire() + signal.alarm(1) + t1 = time.time() + # XXX: raises doesn't work here? + #raises(KeyboardInterrupt, lock.acquire, timeout=5) + try: + lock._py3k_acquire(timeout=10) + except KeyboardInterrupt: + pass + else: + assert False, 'Expected KeyboardInterrupt' + dt = time.time() - t1 + # Checking that KeyboardInterrupt was raised is not sufficient. + # We want to assert that lock.acquire() was interrupted because + # of the signal, not that the signal handler was called immediately + # after timeout return of lock.acquire() (which can fool assertRaises). + assert dt < 8.0 + finally: + signal.signal(signal.SIGALRM, oldalrm) From noreply at buildbot.pypy.org Sun Feb 23 08:58:08 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 23 Feb 2014 08:58:08 +0100 (CET) Subject: [pypy-commit] pypy default: Translation fix Message-ID: <20140223075808.E0E871C303A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69278:2def17020fa9 Date: 2014-02-23 08:57 +0100 http://bitbucket.org/pypy/pypy/changeset/2def17020fa9/ Log: Translation fix diff --git a/pypy/module/thread/os_lock.py b/pypy/module/thread/os_lock.py --- a/pypy/module/thread/os_lock.py +++ b/pypy/module/thread/os_lock.py @@ -8,6 +8,7 @@ from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef +from pypy.interpreter.error import OperationError from rpython.rlib.rarithmetic import r_longlong From noreply at buildbot.pypy.org Sun Feb 23 09:09:43 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 23 Feb 2014 09:09:43 +0100 (CET) Subject: [pypy-commit] pypy default: Unsure if "instance_ptr_iszero" and "instance_ptr_nonzero" are meant to Message-ID: <20140223080943.4C0861C03FC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69279:c9f9e30aac58 Date: 2014-02-23 09:08 +0100 http://bitbucket.org/pypy/pypy/changeset/c9f9e30aac58/ Log: Unsure if "instance_ptr_iszero" and "instance_ptr_nonzero" are meant to appear or not during codewriting. Right now they do, and then metainterp crashes because they are not actually implemented. Fix? diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -967,8 +967,7 @@ if self._is_rclass_instance(op.args[0]): assert self._is_rclass_instance(op.args[1]) op = SpaceOperation('instance_ptr_eq', op.args, op.result) - prefix = 'instance_' - op1 = self._rewrite_equality(op, prefix + 'ptr_iszero') + op1 = self._rewrite_equality(op, 'ptr_iszero') return self._rewrite_cmp_ptrs(op1) def rewrite_op_ptr_ne(self, op): @@ -976,8 +975,7 @@ if self._is_rclass_instance(op.args[0]): assert self._is_rclass_instance(op.args[1]) op = SpaceOperation('instance_ptr_ne', op.args, op.result) - prefix = 'instance_' - op1 = self._rewrite_equality(op, prefix + 'ptr_nonzero') + op1 = self._rewrite_equality(op, 'ptr_nonzero') return self._rewrite_cmp_ptrs(op1) rewrite_op_ptr_iszero = _rewrite_cmp_ptrs diff --git a/rpython/jit/codewriter/test/test_jtransform.py b/rpython/jit/codewriter/test/test_jtransform.py --- a/rpython/jit/codewriter/test/test_jtransform.py +++ b/rpython/jit/codewriter/test/test_jtransform.py @@ -705,8 +705,8 @@ c0 = const(lltype.nullptr(rclass.OBJECT)) for opname, newopname, reducedname in [ - ('ptr_eq', 'instance_ptr_eq', 'instance_ptr_iszero'), - ('ptr_ne', 'instance_ptr_ne', 'instance_ptr_nonzero') + ('ptr_eq', 'instance_ptr_eq', 'ptr_iszero'), + ('ptr_ne', 'instance_ptr_ne', 'ptr_nonzero') ]: op = SpaceOperation(opname, [v1, v2], v3) op1 = Transformer().rewrite_operation(op) From noreply at buildbot.pypy.org Sun Feb 23 09:09:44 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 23 Feb 2014 09:09:44 +0100 (CET) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20140223080944.5E4211C03FC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69280:584db7c5330f Date: 2014-02-23 09:09 +0100 http://bitbucket.org/pypy/pypy/changeset/584db7c5330f/ Log: merge heads diff --git a/pypy/module/thread/os_lock.py b/pypy/module/thread/os_lock.py --- a/pypy/module/thread/os_lock.py +++ b/pypy/module/thread/os_lock.py @@ -8,6 +8,7 @@ from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef +from pypy.interpreter.error import OperationError from rpython.rlib.rarithmetic import r_longlong From noreply at buildbot.pypy.org Sun Feb 23 09:18:56 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 23 Feb 2014 09:18:56 +0100 (CET) Subject: [pypy-commit] pypy default: Remove dead code Message-ID: <20140223081856.B7F531C03FC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69281:5dc7a91fe1e3 Date: 2014-02-23 09:18 +0100 http://bitbucket.org/pypy/pypy/changeset/5dc7a91fe1e3/ Log: Remove dead code diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -963,7 +963,6 @@ return self._rewrite_equality(op, 'int_is_true') def rewrite_op_ptr_eq(self, op): - prefix = '' if self._is_rclass_instance(op.args[0]): assert self._is_rclass_instance(op.args[1]) op = SpaceOperation('instance_ptr_eq', op.args, op.result) @@ -971,7 +970,6 @@ return self._rewrite_cmp_ptrs(op1) def rewrite_op_ptr_ne(self, op): - prefix = '' if self._is_rclass_instance(op.args[0]): assert self._is_rclass_instance(op.args[1]) op = SpaceOperation('instance_ptr_ne', op.args, op.result) From noreply at buildbot.pypy.org Sun Feb 23 09:59:53 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 23 Feb 2014 09:59:53 +0100 (CET) Subject: [pypy-commit] pypy default: Copy the py3k logic in Condition.wait() Message-ID: <20140223085953.627511D26F0@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69282:8d88f18cc867 Date: 2014-02-23 09:59 +0100 http://bitbucket.org/pypy/pypy/changeset/8d88f18cc867/ Log: Copy the py3k logic in Condition.wait() diff --git a/lib-python/2.7/threading.py b/lib-python/2.7/threading.py --- a/lib-python/2.7/threading.py +++ b/lib-python/2.7/threading.py @@ -244,22 +244,11 @@ if __debug__: self._note("%s.wait(): got it", self) else: - # Balancing act: We can't afford a pure busy loop, so we - # have to sleep; but if we sleep the whole timeout time, - # we'll be unresponsive. The scheme here sleeps very - # little at first, longer as time goes on, but never longer - # than 20 times per second (or the timeout time remaining). - endtime = _time() + timeout - delay = 0.0005 # 500 us -> initial delay of 1 ms - while True: - gotit = waiter.acquire(0) - if gotit: - break - remaining = endtime - _time() - if remaining <= 0: - break - delay = min(delay * 2, remaining, .05) - _sleep(delay) + # PyPy patch: use _py3k_acquire() + if timeout > 0: + gotit = waiter._py3k_acquire(True, timeout) + else: + gotit = waiter.acquire(False) if not gotit: if __debug__: self._note("%s.wait(%s): timed out", self, timeout) From noreply at buildbot.pypy.org Sun Feb 23 10:36:27 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 23 Feb 2014 10:36:27 +0100 (CET) Subject: [pypy-commit] pypy default: Support possibly-misaligned raw-storage getitems and setitems. Message-ID: <20140223093627.5D4E01C03FC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69283:6b8aaf94225a Date: 2014-02-23 10:35 +0100 http://bitbucket.org/pypy/pypy/changeset/6b8aaf94225a/ Log: Support possibly-misaligned raw-storage getitems and setitems. diff --git a/rpython/rlib/rawstorage.py b/rpython/rlib/rawstorage.py --- a/rpython/rlib/rawstorage.py +++ b/rpython/rlib/rawstorage.py @@ -18,17 +18,87 @@ def raw_storage_getitem(TP, storage, index): "NOT_RPYTHON" + _check_alignment(TP, index) return rffi.cast(rffi.CArrayPtr(TP), rffi.ptradd(storage, index))[0] def raw_storage_setitem(storage, index, item): "NOT_RPYTHON" - TP = rffi.CArrayPtr(lltype.typeOf(item)) - rffi.cast(TP, rffi.ptradd(storage, index))[0] = item + TP = lltype.typeOf(item) + _check_alignment(TP, index) + rffi.cast(rffi.CArrayPtr(TP), rffi.ptradd(storage, index))[0] = item @specialize.arg(1) def free_raw_storage(storage, track_allocation=True): lltype.free(storage, flavor='raw', track_allocation=track_allocation) +# ____________________________________________________________ +# +# Support for possibly-unaligned accesses + +from rpython.jit.backend import detect_cpu +try: + misaligned_is_fine = detect_cpu.autodetect().startswith('x86') +except detect_cpu.ProcessorAutodetectError: + misaligned_is_fine = False + + +class AlignmentError(NotImplementedError): + "Means that raw_storage_{get,set}item was used on unaligned memory" + +# Tweak? It seems a reasonable value for any system out there: requiring +# an aligned access to be up to 8-bytes-aligned, even for 64-bit data +# types on 32-bit systems. +MAXIMUM_ALIGNMENT = 8 + + at specialize.memo() +def _get_alignment_mask(TP): + size = rffi.sizeof(TP) + alignment = 1 + while (size & alignment) == 0 and alignment < MAXIMUM_ALIGNMENT: + alignment *= 2 + return alignment - 1 + +def _check_alignment(TP, index): + """Check that the 'index' does indeed have the maximum alignment + for the given type.""" + mask = _get_alignment_mask(TP) + if (index & mask) != 0: + raise AlignmentError + + at specialize.ll() +def raw_storage_getitem_unaligned(TP, storage, index): + if misaligned_is_fine: + return raw_storage_getitem(TP, storage, index) + mask = _get_alignment_mask(TP) + if (index & mask) == 0: + return raw_storage_getitem(TP, storage, index) + ptr = rffi.ptradd(storage, index) + with lltype.scoped_alloc(rffi.CArray(TP), 1) as s_array: + rffi.c_memcpy(rffi.cast(rffi.VOIDP, s_array), + rffi.cast(rffi.VOIDP, ptr), + rffi.sizeof(TP)) + return rffi.cast(rffi.CArrayPtr(TP), s_array)[0] + + at specialize.ll() +def raw_storage_setitem_unaligned(storage, index, item): + if misaligned_is_fine: + raw_storage_setitem(storage, index, item) + return + TP = lltype.typeOf(item) + mask = _get_alignment_mask(TP) + if (index & mask) == 0: + raw_storage_setitem(storage, index, item) + return + ptr = rffi.ptradd(storage, index) + with lltype.scoped_alloc(rffi.CArray(TP), 1) as s_array: + rffi.cast(rffi.CArrayPtr(TP), s_array)[0] = item + rffi.c_memcpy(rffi.cast(rffi.VOIDP, ptr), + rffi.cast(rffi.VOIDP, s_array), + rffi.sizeof(TP)) + +# ____________________________________________________________ + + class RawStorageGetitemEntry(ExtRegistryEntry): _about_ = raw_storage_getitem diff --git a/rpython/rlib/test/test_rawstorage.py b/rpython/rlib/test/test_rawstorage.py --- a/rpython/rlib/test/test_rawstorage.py +++ b/rpython/rlib/test/test_rawstorage.py @@ -1,23 +1,91 @@ +import py +import sys +from rpython.rtyper.lltypesystem import lltype +from rpython.rlib import rawstorage +from rpython.rlib.rawstorage import alloc_raw_storage, free_raw_storage,\ + raw_storage_setitem, raw_storage_getitem, AlignmentError,\ + raw_storage_setitem_unaligned, raw_storage_getitem_unaligned +from rpython.rtyper.test.tool import BaseRtypingTest +from rpython.translator.c.test.test_genc import compile -from rpython.rtyper.lltypesystem import lltype -from rpython.rlib.rawstorage import alloc_raw_storage, free_raw_storage,\ - raw_storage_setitem, raw_storage_getitem -from rpython.rtyper.test.tool import BaseRtypingTest def test_untranslated_storage(): + r = alloc_raw_storage(37) + raw_storage_setitem(r, 8, 1<<30) + res = raw_storage_getitem(lltype.Signed, r, 8) + assert res == 1<<30 + raw_storage_setitem(r, 8, 3.14) + res = raw_storage_getitem(lltype.Float, r, 8) + assert res == 3.14 + py.test.raises(AlignmentError, raw_storage_getitem, lltype.Signed, r, 3) + py.test.raises(AlignmentError, raw_storage_setitem, r, 3, 42.5) + free_raw_storage(r) + +def test_untranslated_storage_unaligned(monkeypatch): + monkeypatch.setattr(rawstorage, 'misaligned_is_fine', False) r = alloc_raw_storage(15) - raw_storage_setitem(r, 3, 1<<30) - res = raw_storage_getitem(lltype.Signed, r, 3) + raw_storage_setitem_unaligned(r, 3, 1<<30) + res = raw_storage_getitem_unaligned(lltype.Signed, r, 3) + assert res == 1<<30 + raw_storage_setitem_unaligned(r, 3, 3.14) + res = raw_storage_getitem_unaligned(lltype.Float, r, 3) + assert res == 3.14 free_raw_storage(r) - assert res == 1<<30 + class TestRawStorage(BaseRtypingTest): + def test_storage_int(self): def f(i): r = alloc_raw_storage(24) - raw_storage_setitem(r, 3, i) - res = raw_storage_getitem(lltype.Signed, r, 3) + raw_storage_setitem(r, 8, i) + res = raw_storage_getitem(lltype.Signed, r, 8) free_raw_storage(r) return res + x = self.interpret(f, [1<<30]) assert x == 1 << 30 + + def test_storage_float_unaligned(self, monkeypatch): + def f(v): + r = alloc_raw_storage(24) + raw_storage_setitem_unaligned(r, 3, v) + res = raw_storage_getitem_unaligned(lltype.Float, r, 3) + free_raw_storage(r) + return res + + monkeypatch.setattr(rawstorage, 'misaligned_is_fine', False) + x = self.interpret(f, [3.14]) + assert x == 3.14 + + +class TestCBackend(object): + + def test_backend_int(self): + def f(i): + r = alloc_raw_storage(24) + raw_storage_setitem(r, 8, i) + res = raw_storage_getitem(lltype.Signed, r, 8) + free_raw_storage(r) + return res != i + + fc = compile(f, [int]) + x = fc(-sys.maxint // 3) + assert x == 0 + + def test_backend_float_unaligned(self, monkeypatch): + def f(v): + r = alloc_raw_storage(24) + raw_storage_setitem_unaligned(r, 3, v) + res = raw_storage_getitem_unaligned(lltype.Float, r, 3) + free_raw_storage(r) + return res != v + + if monkeypatch is not None: + monkeypatch.setattr(rawstorage, 'misaligned_is_fine', False) + fc = compile(f, [float]) + x = fc(-3.14) + assert x == 0 + + def test_backend_float_unaligned_allow_misalign(self): + self.test_backend_float_unaligned(monkeypatch=None) From noreply at buildbot.pypy.org Sun Feb 23 14:25:16 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 23 Feb 2014 14:25:16 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: Tweaks Message-ID: <20140223132516.0B9571C303A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r812:027470d9d12b Date: 2014-02-23 14:25 +0100 http://bitbucket.org/pypy/stmgc/changeset/027470d9d12b/ Log: Tweaks diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -128,8 +128,14 @@ uint8_t old_rv = STM_SEGMENT->transaction_read_version; STM_SEGMENT->transaction_read_version = old_rv + 1; - if (UNLIKELY(old_rv == 0xff)) + if (UNLIKELY(old_rv >= 0xfe)) { + /* reset if transaction_read_version was 0xfe or 0xff. If it's + 0xff, then we need it because the new value would overflow to + 0. But resetting it already from 0xfe is better for short + or medium transactions: at the next minor collection we'll + still have one free number to increase to. */ reset_transaction_read_version(); + } STM_PSEGMENT->min_read_version_outside_nursery = STM_SEGMENT->transaction_read_version; diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -245,18 +245,19 @@ static void trace_and_drag_out_of_nursery(object_t *obj) { - if (is_in_shared_pages(obj)) { - /* the object needs fixing only in one copy, because all copies - are shared and identical. */ - char *realobj = (char *)REAL_ADDRESS(stm_object_pages, obj); + long i; + for (i = 0; i < NB_SEGMENTS; i++) { + struct object_s *realobj = + (struct object_s *)REAL_ADDRESS(get_segment_base(i), obj); + + realobj->stm_flags &= ~GCFLAG_WRITE_BARRIER_CALLED; + stmcb_trace((struct object_s *)realobj, &minor_trace_if_young); - } - else { - /* every segment needs fixing */ - long i; - for (i = 0; i < NB_SEGMENTS; i++) { - char *realobj = (char *)REAL_ADDRESS(get_segment_base(i), obj); - stmcb_trace((struct object_s *)realobj, &minor_trace_if_young); + + if (i == 0 && is_in_shared_pages(obj)) { + /* the object needs fixing only in one copy, because all copies + are shared and identical. */ + break; } } } @@ -274,9 +275,9 @@ if ((obj->stm_flags & GCFLAG_WRITE_BARRIER_CALLED) == 0) continue; - /* Remove the flag GCFLAG_WRITE_BARRIER_CALLED. No live object - should have this flag set after a nursery collection. */ - obj->stm_flags &= ~GCFLAG_WRITE_BARRIER_CALLED; + /* The flag GCFLAG_WRITE_BARRIER_CALLED is going to be removed: + no live object should have this flag set after a nursery + collection. It is done in either one or NB_SEGMENTS copies. */ /* Trace the 'obj' to replace pointers to nursery with pointers outside the nursery, possibly forcing nursery objects out From noreply at buildbot.pypy.org Sun Feb 23 15:29:55 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 23 Feb 2014 15:29:55 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: A first real test for nursery collection. Fails Message-ID: <20140223142955.1BFE01C03FC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r813:07aa2f23e825 Date: 2014-02-23 14:40 +0100 http://bitbucket.org/pypy/stmgc/changeset/07aa2f23e825/ Log: A first real test for nursery collection. Fails diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -24,10 +24,6 @@ free_uniform_pages = NULL; } -//static void check_gcpage_still_shared(void) -//{ -// //...; -//} #define GCPAGE_NUM_PAGES 20 diff --git a/c7/stm/gcpage.h b/c7/stm/gcpage.h --- a/c7/stm/gcpage.h +++ b/c7/stm/gcpage.h @@ -43,7 +43,6 @@ static void setup_gcpage(void); static void teardown_gcpage(void); -//static void check_gcpage_still_shared(void); static char *allocate_outside_nursery_large(uint64_t size); diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -29,6 +29,7 @@ static union { struct { uint64_t used; /* number of bytes from the nursery used so far */ + uint64_t initial_value_of_used; }; char reserved[64]; } nursery_ctl __attribute__((aligned(64))); @@ -51,6 +52,7 @@ static void teardown_nursery(void) { list_free(old_objects_pointing_to_young); + nursery_ctl.initial_value_of_used = 0; } static inline bool _is_in_nursery(object_t *obj) @@ -289,7 +291,7 @@ static void reset_nursery(void) { /* reset the global amount-of-nursery-used-so-far */ - nursery_ctl.used = 0; + nursery_ctl.used = nursery_ctl.initial_value_of_used; /* reset the write locks */ memset(write_locks + ((NURSERY_START >> 4) - READMARKER_START), @@ -332,7 +334,7 @@ if (old_end > NURSERY_START) { char *creation_markers = REAL_ADDRESS(other_pseg->pub.segment_base, NURSERY_START >> 8); - assert(old_end < NURSERY_START + NURSERY_SIZE); + assert(old_end <= NURSERY_START + NURSERY_SIZE); memset(creation_markers, 0, (old_end - NURSERY_START) >> 8); } else { @@ -371,8 +373,6 @@ information). */ - //check_gcpage_still_shared(); - collect_roots_in_nursery(); long i; @@ -516,5 +516,6 @@ assert(free_count == NURSERY_ALIGN(free_count)); assert(nursery_ctl.used <= NURSERY_SIZE - free_count); nursery_ctl.used = NURSERY_SIZE - free_count; + nursery_ctl.initial_value_of_used = nursery_ctl.used; } #endif diff --git a/c7/test/test_nursery.py b/c7/test/test_nursery.py --- a/c7/test/test_nursery.py +++ b/c7/test/test_nursery.py @@ -58,10 +58,31 @@ self.pop_root() # self.push_root(lp1) - lp2 = stm_allocate(16) + lp2 = stm_allocate(SOME_MEDIUM_SIZE) lp1b = self.pop_root() assert lp1b != lp1 # collection occurred + def test_several_minor_collections(self): + # make a long, ever-growing linked list of objects, in one transaction + lib._stm_set_nursery_free_count(NURSERY_SECTION_SIZE * 2) + self.start_transaction() + lp1 = stm_allocate(16) + self.push_root(lp1) + lp2 = lp1 + N = (NURSERY_SECTION_SIZE * 5) / 16 + for i in range(N): + self.push_root(lp2) + lp3 = stm_allocate(16) + lp2 = self.pop_root() + stm_set_ref(lp2, 0, lp3) + lp2 = lp3 + lp1 = self.pop_root() + lp2 = lp1 + for i in range(N): + assert lp2 + lp2 = stm_get_ref(lp2, 0) + assert lp2 == lp3 + def test_many_allocs(self): obj_size = 1024 num = (lib.NB_NURSERY_PAGES * 4096) / obj_size + 100 # more than what fits in the nursery From noreply at buildbot.pypy.org Sun Feb 23 15:29:56 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 23 Feb 2014 15:29:56 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: Fixes for the test. Message-ID: <20140223142956.208651C03FC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r814:9a45fb18c2da Date: 2014-02-23 15:29 +0100 http://bitbucket.org/pypy/stmgc/changeset/9a45fb18c2da/ Log: Fixes for the test. diff --git a/c7/stm/misc.c b/c7/stm/misc.c --- a/c7/stm/misc.c +++ b/c7/stm/misc.c @@ -52,3 +52,24 @@ { return ((stm_creation_marker_t *)(((uintptr_t)obj) >> 8))->cm; } + +#ifdef STM_TESTS +object_t *_stm_enum_old_objects_pointing_to_young(void) +{ + static long index = 0; + struct list_s *lst = STM_PSEGMENT->old_objects_pointing_to_young; + if (index < list_count(lst)) + return (object_t *)list_item(lst, index++); + index = 0; + return (object_t *)-1; +} +object_t *_stm_enum_modified_objects(void) +{ + static long index = 0; + struct list_s *lst = STM_PSEGMENT->modified_objects; + if (index < list_count(lst)) + return (object_t *)list_item(lst, index++); + index = 0; + return (object_t *)-1; +} +#endif diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -159,6 +159,7 @@ /* Copy the object to segment 0 (as a first step) */ memcpy(copyobj, realobj, size); + ((struct object_s *)copyobj)->stm_flags |= GCFLAG_WRITE_BARRIER_CALLED; nobj = (object_t *)(copyobj - stm_object_pages); @@ -193,6 +194,8 @@ for (i = 1; i < NB_SEGMENTS; i++) { uintptr_t diff = get_segment_base(i) - stm_object_pages; memcpy(copyobj + diff, realobj + diff, size); + ((struct object_s *)(copyobj + diff))->stm_flags |= + GCFLAG_WRITE_BARRIER_CALLED; } } @@ -224,6 +227,7 @@ } /* Done copying the object. */ + //dprintf(("%p -> %p\n", obj, nobj)); pforwarded_array[0] = GCWORD_MOVED; pforwarded_array[1] = nobj; *pobj = nobj; diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -98,6 +98,8 @@ void _stm_start_safe_point(void); void _stm_stop_safe_point(void); void _stm_set_nursery_free_count(uint64_t free_count); +object_t *_stm_enum_old_objects_pointing_to_young(void); +object_t *_stm_enum_modified_objects(void); #endif #define _STM_GCFLAG_WRITE_BARRIER_CALLED 0x80 diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -76,50 +76,12 @@ void _stm_set_nursery_free_count(uint64_t free_count); ssize_t stmcb_size_rounded_up(struct object_s *obj); + +object_t *_stm_enum_old_objects_pointing_to_young(void); +object_t *_stm_enum_modified_objects(void); """) -TEMPORARILY_DISABLED = """ -void stm_start_inevitable_transaction(stm_thread_local_t *tl); - -void _stm_minor_collect(); - -void *memset(void *s, int c, size_t n); -extern size_t stmcb_size(struct object_s *); -extern void stmcb_trace(struct object_s *, void (object_t **)); - -uint8_t _stm_get_flags(object_t *obj); -uint8_t stm_get_page_flag(int pagenum); -enum { - SHARED_PAGE=0, - REMAPPING_PAGE, - PRIVATE_PAGE, -}; /* flag_page_private */ - -enum { - GCFLAG_WRITE_BARRIER = 1, - GCFLAG_NOT_COMMITTED = 2, - GCFLAG_MOVED = 4, -}; - -void stm_largemalloc_init(char *data_start, size_t data_size); -int stm_largemalloc_resize_arena(size_t new_size); - -object_t *stm_large_malloc(size_t request_size); -void stm_large_free(object_t *data); - -void _stm_large_dump(void); -char *_stm_largemalloc_data_start(void); - -void _stm_move_object(object_t* obj, char *src, char *dst); -size_t _stm_data_size(struct object_s *data); -void _stm_chunk_pages(struct object_s *data, uintptr_t *start, uintptr_t *num); - -void stm_become_inevitable(char* msg); -void stm_start_inevitable_transaction(); -""" - - lib = ffi.verify(''' #include #include @@ -222,6 +184,9 @@ void _set_ptr(object_t *obj, int n, object_t *v) { + int nrefs = ((myobj_t*)obj)->type_id - 421420; + assert(n < nrefs); + stm_char *field_addr = ((stm_char*)obj); field_addr += SIZEOF_MYOBJ; /* header */ field_addr += n * sizeof(void*); /* field */ @@ -231,6 +196,9 @@ object_t * _get_ptr(object_t *obj, int n) { + int nrefs = ((myobj_t*)obj)->type_id - 421420; + assert(n < nrefs); + stm_char *field_addr = ((stm_char*)obj); field_addr += SIZEOF_MYOBJ; /* header */ field_addr += n * sizeof(void*); /* field */ @@ -396,6 +364,15 @@ def stm_get_flags(o): return lib._stm_get_flags(o) +def old_objects_pointing_to_young(): + return list(iter(lib._stm_enum_old_objects_pointing_to_young, + ffi.cast("object_t *", -1))) + +def modified_objects(): + return list(iter(lib._stm_enum_modified_objects, + ffi.cast("object_t *", -1))) + + SHADOWSTACK_LENGTH = 100 _keepalive = weakref.WeakKeyDictionary() diff --git a/c7/test/test_basic.py b/c7/test/test_basic.py --- a/c7/test/test_basic.py +++ b/c7/test/test_basic.py @@ -50,6 +50,7 @@ assert stm_was_written(lp1) stm_write(lp1) assert stm_was_written(lp1) + assert modified_objects() == [] # because same transaction self.commit_transaction() def test_allocate_old(self): @@ -83,7 +84,9 @@ # self.switch(1) self.start_transaction() + assert modified_objects() == [] stm_write(lp1) + assert modified_objects() == [lp1] assert stm_get_char(lp1) == 'a' stm_set_char(lp1, 'b') # diff --git a/c7/test/test_nursery.py b/c7/test/test_nursery.py --- a/c7/test/test_nursery.py +++ b/c7/test/test_nursery.py @@ -66,20 +66,41 @@ # make a long, ever-growing linked list of objects, in one transaction lib._stm_set_nursery_free_count(NURSERY_SECTION_SIZE * 2) self.start_transaction() - lp1 = stm_allocate(16) + lp1 = stm_allocate_refs(1) self.push_root(lp1) - lp2 = lp1 - N = (NURSERY_SECTION_SIZE * 5) / 16 + prev = lp1 + prevprev = None + FIT = (NURSERY_SECTION_SIZE * 2) / 16 - 1 # without 'lp1' above + N = (NURSERY_SECTION_SIZE * 4) / 16 + 41 for i in range(N): - self.push_root(lp2) - lp3 = stm_allocate(16) - lp2 = self.pop_root() - stm_set_ref(lp2, 0, lp3) - lp2 = lp3 + if prevprev: + assert stm_get_ref(prevprev, 0) == prev + self.push_root(prevprev) + self.push_root(prev) + lp3 = stm_allocate_refs(1) + prev = self.pop_root() + if prevprev: + prevprev = self.pop_root() + assert prevprev != prev + stm_set_ref(prev, 0, lp3) + prevprev = prev + prev = lp3 + + seeme = old_objects_pointing_to_young() + if i < FIT: + assert len(seeme) == 0 # no minor collection so far + else: + assert len(seeme) == 1 # the one from the prev minor coll + lp1 = self.pop_root() + assert modified_objects() == [] + lp2 = lp1 for i in range(N): assert lp2 + assert stm_creation_marker(lp2) == (0xff if is_in_nursery(lp2) + else 0x01) + prev = lp2 lp2 = stm_get_ref(lp2, 0) assert lp2 == lp3 From noreply at buildbot.pypy.org Sun Feb 23 16:25:47 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 23 Feb 2014 16:25:47 +0100 (CET) Subject: [pypy-commit] pypy default: Fix a warning in the C code Message-ID: <20140223152547.BB9C01C03FC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69284:9535a395a13f Date: 2014-02-23 16:25 +0100 http://bitbucket.org/pypy/pypy/changeset/9535a395a13f/ Log: Fix a warning in the C code diff --git a/rpython/rlib/_stacklet_asmgcc.py b/rpython/rlib/_stacklet_asmgcc.py --- a/rpython/rlib/_stacklet_asmgcc.py +++ b/rpython/rlib/_stacklet_asmgcc.py @@ -189,7 +189,7 @@ pypy_asm_stackwalk2 = rffi.llexternal('pypy_asm_stackwalk', [FUNCNOARG_P, ASM_FRAMEDATA_HEAD_PTR], - _c.handle, sandboxsafe=True, + lltype.Signed, sandboxsafe=True, _nowrapper=True) @@ -273,6 +273,7 @@ # h = pypy_asm_stackwalk2(llhelper(FUNCNOARG_P, _new_callback), alternateanchor) + h = rffi.cast(_c.handle, h) # llop.gc_reattach_callback_pieces(lltype.Void, callback_pieces) return self.get_result_suspstack(h) @@ -292,6 +293,7 @@ # h = pypy_asm_stackwalk2(llhelper(FUNCNOARG_P, _switch_callback), alternateanchor) + h = rffi.cast(_c.handle, h) # llop.gc_reattach_callback_pieces(lltype.Void, callback_pieces) if not h: From noreply at buildbot.pypy.org Sun Feb 23 16:32:46 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 23 Feb 2014 16:32:46 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: Next test passes Message-ID: <20140223153246.8C2A21C303A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r815:8a68e946b423 Date: 2014-02-23 15:34 +0100 http://bitbucket.org/pypy/stmgc/changeset/8a68e946b423/ Log: Next test passes diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -373,7 +373,7 @@ ffi.cast("object_t *", -1))) -SHADOWSTACK_LENGTH = 100 +SHADOWSTACK_LENGTH = 1000 _keepalive = weakref.WeakKeyDictionary() def _allocate_thread_local(): diff --git a/c7/test/test_nursery.py b/c7/test/test_nursery.py --- a/c7/test/test_nursery.py +++ b/c7/test/test_nursery.py @@ -105,18 +105,21 @@ assert lp2 == lp3 def test_many_allocs(self): + lib._stm_set_nursery_free_count(NURSERY_SECTION_SIZE * 2) obj_size = 1024 - num = (lib.NB_NURSERY_PAGES * 4096) / obj_size + 100 # more than what fits in the nursery + num = (NURSERY_SECTION_SIZE * 4) / obj_size + 41 self.start_transaction() for i in range(num): new = stm_allocate(obj_size) + stm_set_char(new, chr(i % 255)) self.push_root(new) old = [] young = [] - for _ in range(num): + for i in reversed(range(num)): r = self.pop_root() + assert stm_get_char(r) == chr(i % 255) if is_in_nursery(r): young.append(r) else: From noreply at buildbot.pypy.org Sun Feb 23 16:32:47 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 23 Feb 2014 16:32:47 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: Failing test Message-ID: <20140223153247.930471C303A@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r816:8cbb49ebda1c Date: 2014-02-23 16:32 +0100 http://bitbucket.org/pypy/stmgc/changeset/8cbb49ebda1c/ Log: Failing test diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -413,7 +413,7 @@ /* We just waited here, either from mutex_lock() or from cond_wait(), so we should check again if another thread did the minor collection itself */ - if (nursery_ctl.used + request_size <= NURSERY_SIZE) + if (request_size <= NURSERY_SIZE - nursery_ctl.used) goto exit; if (!try_wait_for_other_safe_points(SP_SAFE_POINT_CAN_COLLECT)) @@ -428,6 +428,12 @@ mutex_unlock(); } +void stm_collect(long level) +{ + assert(level == 0); + stm_minor_collection(-1); +} + /************************************************************/ diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -254,6 +254,9 @@ _stm_collectable_safe_point(); } +/* Forces a collection. */ +void stm_collect(long level); + /* ==================== END ==================== */ diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -79,6 +79,8 @@ object_t *_stm_enum_old_objects_pointing_to_young(void); object_t *_stm_enum_modified_objects(void); + +void stm_collect(long level); """) @@ -348,7 +350,7 @@ raise Conflict() def stm_minor_collect(): - lib._stm_minor_collect() + lib.stm_collect(0) def stm_get_page_flag(pagenum): return lib.stm_get_page_flag(pagenum) diff --git a/c7/test/test_nursery.py b/c7/test/test_nursery.py --- a/c7/test/test_nursery.py +++ b/c7/test/test_nursery.py @@ -129,11 +129,16 @@ assert young def test_larger_than_section(self): - obj_size = lib.NURSERY_SECTION + 16 + obj_size = NURSERY_SECTION_SIZE + 16 self.start_transaction() - new = stm_allocate(obj_size) - assert not is_in_nursery(new) + seen = set() + for i in range(10): + stm_minor_collect() + new = stm_allocate(obj_size) + assert not is_in_nursery(new) + seen.add(new) + assert len(seen) < 5 # addresses are reused def test_reset_partial_alloc_pages(self): self.start_transaction() From noreply at buildbot.pypy.org Sun Feb 23 20:02:33 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sun, 23 Feb 2014 20:02:33 +0100 (CET) Subject: [pypy-commit] pypy default: fix segfault on np.fromstring of record type Message-ID: <20140223190233.741C61C303A@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69288:cd632d18a772 Date: 2014-02-23 13:50 -0500 http://bitbucket.org/pypy/pypy/changeset/cd632d18a772/ Log: fix segfault on np.fromstring of record type diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -3058,6 +3058,7 @@ v = fromstring("abcd", dtype="|S2") assert v[0] == "ab" assert v[1] == "cd" + v = fromstring('@\x01\x99\x99\x99\x99\x99\x9a\xbf\xf1\x99\x99\x99\x99\x99\x9a', dtype=dtype('>c16')) assert v.tostring() == \ @@ -3073,6 +3074,18 @@ assert v.real == 2.2 assert v.imag == -1.1 + d = [('f0', 'i4'), ('f1', 'u2', (2, 3))] + if '__pypy__' not in sys.builtin_module_names: + r = fromstring('abcdefghijklmnop'*4*3, dtype=d) + assert (r[0:3:2]['f1'] == r['f1'][0:3:2]).all() + assert (r[0:3:2]['f1'][0] == r[0:3:2][0]['f1']).all() + assert (r[0:3:2]['f1'][0][()] == r[0:3:2][0]['f1'][()]).all() + assert r[0:3:2]['f1'][0].strides == r[0:3:2][0]['f1'].strides + else: + exc = raises(NotImplementedError, fromstring, + 'abcdefghijklmnop'*4*3, dtype=d) + assert exc.value[0] == "fromstring not implemented for record types" + def test_fromstring_types(self): from numpypy import fromstring, array, dtype a = fromstring('\xFF', dtype='int8') diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -1,7 +1,7 @@ import functools import math -from pypy.interpreter.error import OperationError +from pypy.interpreter.error import OperationError, oefmt from pypy.module.micronumpy import interp_boxes from pypy.module.micronumpy import support from pypy.module.micronumpy.arrayimpl.voidbox import VoidBoxStorage @@ -1897,6 +1897,10 @@ itemtype.store(arr, 0, ofs, w_box) return interp_boxes.W_VoidBox(arr, 0, dtype) + def runpack_str(self, space, s): + raise oefmt(space.w_NotImplementedError, + "fromstring not implemented for record types") + def store(self, arr, i, ofs, box): assert isinstance(box, interp_boxes.W_VoidBox) self._store(arr.storage, i, ofs, box, box.dtype.get_size()) From noreply at buildbot.pypy.org Sun Feb 23 20:02:32 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sun, 23 Feb 2014 20:02:32 +0100 (CET) Subject: [pypy-commit] pypy default: fix dtype from commastring Message-ID: <20140223190232.005621C303A@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69287:cf3918b33b35 Date: 2014-02-23 03:26 -0500 http://bitbucket.org/pypy/pypy/changeset/cf3918b33b35/ Log: fix dtype from commastring diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -357,26 +357,31 @@ self.w_box_type, endian, size=self.size) -def dtype_from_list(space, w_lst): + at specialize.arg(2) +def dtype_from_list(space, w_lst, simple): lst_w = space.listview(w_lst) fields = {} offset = 0 fieldnames = [] for i in range(len(lst_w)): w_elem = lst_w[i] - w_shape = space.newtuple([]) - if space.len_w(w_elem) == 3: - w_fldname, w_flddesc, w_shape = space.fixedview(w_elem) - if not base.issequence_w(space, w_shape): - w_shape = space.newtuple([w_shape]) + if simple: + subdtype = descr__new__(space, space.gettypefor(W_Dtype), w_elem) + fldname = 'f%d' % i else: - w_fldname, w_flddesc = space.fixedview(w_elem, 2) - subdtype = descr__new__(space, space.gettypefor(W_Dtype), w_flddesc, w_shape=w_shape) - fldname = space.str_w(w_fldname) - if fldname == '': - fldname = 'f%d' % i - if fldname in fields: - raise oefmt(space.w_ValueError, "two fields with the same name") + w_shape = space.newtuple([]) + if space.len_w(w_elem) == 3: + w_fldname, w_flddesc, w_shape = space.fixedview(w_elem) + if not base.issequence_w(space, w_shape): + w_shape = space.newtuple([w_shape]) + else: + w_fldname, w_flddesc = space.fixedview(w_elem, 2) + subdtype = descr__new__(space, space.gettypefor(W_Dtype), w_flddesc, w_shape=w_shape) + fldname = space.str_w(w_fldname) + if fldname == '': + fldname = 'f%d' % i + if fldname in fields: + raise oefmt(space.w_ValueError, "two fields with the same name") assert isinstance(subdtype, W_Dtype) fields[fldname] = (offset, subdtype) offset += subdtype.get_size() @@ -403,7 +408,7 @@ return descr__new__(space, space.gettypefor(W_Dtype), space.getitem(w_lst, space.wrap(0))) else: - return dtype_from_list(space, w_lst) + return dtype_from_list(space, w_lst, True) def descr__new__(space, w_subtype, w_dtype, w_align=None, w_copy=None, w_shape=None): @@ -446,7 +451,7 @@ return variable_dtype(space, name) raise oefmt(space.w_TypeError, 'data type "%s" not understood', name) elif space.isinstance_w(w_dtype, space.w_list): - return dtype_from_list(space, w_dtype) + return dtype_from_list(space, w_dtype, False) elif space.isinstance_w(w_dtype, space.w_tuple): w_dtype0 = space.getitem(w_dtype, space.wrap(0)) w_dtype1 = space.getitem(w_dtype, space.wrap(1)) From noreply at buildbot.pypy.org Sun Feb 23 20:02:30 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sun, 23 Feb 2014 20:02:30 +0100 (CET) Subject: [pypy-commit] pypy default: support dtype from commastring Message-ID: <20140223190230.A04801C303A@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69286:b744291da355 Date: 2014-02-23 02:39 -0500 http://bitbucket.org/pypy/pypy/changeset/b744291da355/ Log: support dtype from commastring diff --git a/pypy/module/micronumpy/appbridge.py b/pypy/module/micronumpy/appbridge.py --- a/pypy/module/micronumpy/appbridge.py +++ b/pypy/module/micronumpy/appbridge.py @@ -4,6 +4,7 @@ w__mean = None w__var = None w__std = None + w__commastring = None w_array_repr = None w_array_str = None diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -1,3 +1,4 @@ +from pypy.interpreter.argument import Arguments from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import interp2app, unwrap_spec @@ -8,6 +9,7 @@ from rpython.rlib.rarithmetic import LONG_BIT, r_longlong, r_ulonglong from rpython.rtyper.lltypesystem import rffi from rpython.rlib import jit +from pypy.module.micronumpy.appbridge import get_appbridge_cache from pypy.module.micronumpy.conversion_utils import byteorder_converter from pypy.module.micronumpy.constants import * @@ -391,9 +393,17 @@ "dtype from dict")) -def dtype_from_spec(space, name): - raise OperationError(space.w_NotImplementedError, space.wrap( - "dtype from spec")) +def dtype_from_spec(space, w_spec): + w_lst = get_appbridge_cache(space).call_method(space, + 'numpy.core._internal', '_commastring', Arguments(space, [w_spec])) + if not space.isinstance_w(w_lst, space.w_list) or space.len_w(w_lst) < 1: + raise oefmt(space.w_RuntimeError, + "_commastring is not returning a list with len >= 1") + if space.len_w(w_lst) == 1: + return descr__new__(space, space.gettypefor(W_Dtype), + space.getitem(w_lst, space.wrap(0))) + else: + return dtype_from_list(space, w_lst) def descr__new__(space, w_subtype, w_dtype, w_align=None, w_copy=None, w_shape=None): @@ -427,7 +437,7 @@ elif space.isinstance_w(w_dtype, space.w_str): name = space.str_w(w_dtype) if ',' in name: - return dtype_from_spec(space, name) + return dtype_from_spec(space, w_dtype) try: return cache.dtypes_by_name[name] except KeyError: diff --git a/pypy/module/micronumpy/test/test_appbridge.py b/pypy/module/micronumpy/test/test_appbridge.py --- a/pypy/module/micronumpy/test/test_appbridge.py +++ b/pypy/module/micronumpy/test/test_appbridge.py @@ -9,3 +9,10 @@ op() except ImportError as e: assert str(e) == 'No module named numpy.core' + + def test_dtype_commastring(self): + import numpy as np + try: + d = np.dtype('u4,u4,u4') + except ImportError as e: + assert str(e) == 'No module named numpy.core' From noreply at buildbot.pypy.org Sun Feb 23 20:02:29 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sun, 23 Feb 2014 20:02:29 +0100 (CET) Subject: [pypy-commit] pypy default: generalize numpy appbridge Message-ID: <20140223190229.5D08D1C303A@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69285:67f86a9b9918 Date: 2014-02-23 03:42 -0500 http://bitbucket.org/pypy/pypy/changeset/67f86a9b9918/ Log: generalize numpy appbridge diff --git a/pypy/module/micronumpy/appbridge.py b/pypy/module/micronumpy/appbridge.py --- a/pypy/module/micronumpy/appbridge.py +++ b/pypy/module/micronumpy/appbridge.py @@ -4,28 +4,20 @@ w__mean = None w__var = None w__std = None - w_module = None w_array_repr = None w_array_str = None def __init__(self, space): - self.w_import = space.appexec([], """(): - def f(): - import sys - __import__('numpy.core._methods') - return sys.modules['numpy.core._methods'] - return f - """) + pass - @specialize.arg(2) - def call_method(self, space, name, w_obj, args): - w_meth = getattr(self, 'w_' + name) - if w_meth is None: - if self.w_module is None: - self.w_module = space.call_function(self.w_import) - w_meth = space.getattr(self.w_module, space.wrap(name)) - setattr(self, 'w_' + name, w_meth) - return space.call_args(w_meth, args.prepend(w_obj)) + @specialize.arg(3) + def call_method(self, space, path, name, args): + w_method = getattr(self, 'w_' + name) + if w_method is None: + w_method = space.appexec([space.wrap(path), space.wrap(name)], + "(path, name): return getattr(__import__(path, fromlist=[name]), name)") + setattr(self, 'w_' + name, w_method) + return space.call_args(w_method, args) def set_string_function(space, w_f, w_repr): cache = get_appbridge_cache(space) diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -969,13 +969,16 @@ other_critical_dim) def descr_mean(self, space, __args__): - return get_appbridge_cache(space).call_method(space, '_mean', self, __args__) + return get_appbridge_cache(space).call_method(space, + 'numpy.core._methods', '_mean', __args__.prepend(self)) def descr_var(self, space, __args__): - return get_appbridge_cache(space).call_method(space, '_var', self, __args__) + return get_appbridge_cache(space).call_method(space, + 'numpy.core._methods', '_var', __args__.prepend(self)) def descr_std(self, space, __args__): - return get_appbridge_cache(space).call_method(space, '_std', self, __args__) + return get_appbridge_cache(space).call_method(space, + 'numpy.core._methods', '_std', __args__.prepend(self)) # ----------------------- reduce ------------------------------- diff --git a/pypy/module/micronumpy/test/test_appbridge.py b/pypy/module/micronumpy/test/test_appbridge.py new file mode 100644 --- /dev/null +++ b/pypy/module/micronumpy/test/test_appbridge.py @@ -0,0 +1,11 @@ +from pypy.module.micronumpy.test.test_base import BaseNumpyAppTest + +class AppTestAppBridge(BaseNumpyAppTest): + def test_array_methods(self): + import numpy as np + a = np.array(1.5) + for op in [a.mean, a.var, a.std]: + try: + op() + except ImportError as e: + assert str(e) == 'No module named numpy.core' From noreply at buildbot.pypy.org Sun Feb 23 20:02:34 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sun, 23 Feb 2014 20:02:34 +0100 (CET) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20140223190234.A73CB1C303A@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69289:d674245526d9 Date: 2014-02-23 14:01 -0500 http://bitbucket.org/pypy/pypy/changeset/d674245526d9/ Log: merge heads diff --git a/lib-python/2.7/threading.py b/lib-python/2.7/threading.py --- a/lib-python/2.7/threading.py +++ b/lib-python/2.7/threading.py @@ -244,22 +244,11 @@ if __debug__: self._note("%s.wait(): got it", self) else: - # Balancing act: We can't afford a pure busy loop, so we - # have to sleep; but if we sleep the whole timeout time, - # we'll be unresponsive. The scheme here sleeps very - # little at first, longer as time goes on, but never longer - # than 20 times per second (or the timeout time remaining). - endtime = _time() + timeout - delay = 0.0005 # 500 us -> initial delay of 1 ms - while True: - gotit = waiter.acquire(0) - if gotit: - break - remaining = endtime - _time() - if remaining <= 0: - break - delay = min(delay * 2, remaining, .05) - _sleep(delay) + # PyPy patch: use _py3k_acquire() + if timeout > 0: + gotit = waiter._py3k_acquire(True, timeout) + else: + gotit = waiter.acquire(False) if not gotit: if __debug__: self._note("%s.wait(%s): timed out", self, timeout) diff --git a/rpython/rlib/_stacklet_asmgcc.py b/rpython/rlib/_stacklet_asmgcc.py --- a/rpython/rlib/_stacklet_asmgcc.py +++ b/rpython/rlib/_stacklet_asmgcc.py @@ -189,7 +189,7 @@ pypy_asm_stackwalk2 = rffi.llexternal('pypy_asm_stackwalk', [FUNCNOARG_P, ASM_FRAMEDATA_HEAD_PTR], - _c.handle, sandboxsafe=True, + lltype.Signed, sandboxsafe=True, _nowrapper=True) @@ -273,6 +273,7 @@ # h = pypy_asm_stackwalk2(llhelper(FUNCNOARG_P, _new_callback), alternateanchor) + h = rffi.cast(_c.handle, h) # llop.gc_reattach_callback_pieces(lltype.Void, callback_pieces) return self.get_result_suspstack(h) @@ -292,6 +293,7 @@ # h = pypy_asm_stackwalk2(llhelper(FUNCNOARG_P, _switch_callback), alternateanchor) + h = rffi.cast(_c.handle, h) # llop.gc_reattach_callback_pieces(lltype.Void, callback_pieces) if not h: diff --git a/rpython/rlib/rawstorage.py b/rpython/rlib/rawstorage.py --- a/rpython/rlib/rawstorage.py +++ b/rpython/rlib/rawstorage.py @@ -18,17 +18,87 @@ def raw_storage_getitem(TP, storage, index): "NOT_RPYTHON" + _check_alignment(TP, index) return rffi.cast(rffi.CArrayPtr(TP), rffi.ptradd(storage, index))[0] def raw_storage_setitem(storage, index, item): "NOT_RPYTHON" - TP = rffi.CArrayPtr(lltype.typeOf(item)) - rffi.cast(TP, rffi.ptradd(storage, index))[0] = item + TP = lltype.typeOf(item) + _check_alignment(TP, index) + rffi.cast(rffi.CArrayPtr(TP), rffi.ptradd(storage, index))[0] = item @specialize.arg(1) def free_raw_storage(storage, track_allocation=True): lltype.free(storage, flavor='raw', track_allocation=track_allocation) +# ____________________________________________________________ +# +# Support for possibly-unaligned accesses + +from rpython.jit.backend import detect_cpu +try: + misaligned_is_fine = detect_cpu.autodetect().startswith('x86') +except detect_cpu.ProcessorAutodetectError: + misaligned_is_fine = False + + +class AlignmentError(NotImplementedError): + "Means that raw_storage_{get,set}item was used on unaligned memory" + +# Tweak? It seems a reasonable value for any system out there: requiring +# an aligned access to be up to 8-bytes-aligned, even for 64-bit data +# types on 32-bit systems. +MAXIMUM_ALIGNMENT = 8 + + at specialize.memo() +def _get_alignment_mask(TP): + size = rffi.sizeof(TP) + alignment = 1 + while (size & alignment) == 0 and alignment < MAXIMUM_ALIGNMENT: + alignment *= 2 + return alignment - 1 + +def _check_alignment(TP, index): + """Check that the 'index' does indeed have the maximum alignment + for the given type.""" + mask = _get_alignment_mask(TP) + if (index & mask) != 0: + raise AlignmentError + + at specialize.ll() +def raw_storage_getitem_unaligned(TP, storage, index): + if misaligned_is_fine: + return raw_storage_getitem(TP, storage, index) + mask = _get_alignment_mask(TP) + if (index & mask) == 0: + return raw_storage_getitem(TP, storage, index) + ptr = rffi.ptradd(storage, index) + with lltype.scoped_alloc(rffi.CArray(TP), 1) as s_array: + rffi.c_memcpy(rffi.cast(rffi.VOIDP, s_array), + rffi.cast(rffi.VOIDP, ptr), + rffi.sizeof(TP)) + return rffi.cast(rffi.CArrayPtr(TP), s_array)[0] + + at specialize.ll() +def raw_storage_setitem_unaligned(storage, index, item): + if misaligned_is_fine: + raw_storage_setitem(storage, index, item) + return + TP = lltype.typeOf(item) + mask = _get_alignment_mask(TP) + if (index & mask) == 0: + raw_storage_setitem(storage, index, item) + return + ptr = rffi.ptradd(storage, index) + with lltype.scoped_alloc(rffi.CArray(TP), 1) as s_array: + rffi.cast(rffi.CArrayPtr(TP), s_array)[0] = item + rffi.c_memcpy(rffi.cast(rffi.VOIDP, ptr), + rffi.cast(rffi.VOIDP, s_array), + rffi.sizeof(TP)) + +# ____________________________________________________________ + + class RawStorageGetitemEntry(ExtRegistryEntry): _about_ = raw_storage_getitem diff --git a/rpython/rlib/test/test_rawstorage.py b/rpython/rlib/test/test_rawstorage.py --- a/rpython/rlib/test/test_rawstorage.py +++ b/rpython/rlib/test/test_rawstorage.py @@ -1,23 +1,91 @@ +import py +import sys +from rpython.rtyper.lltypesystem import lltype +from rpython.rlib import rawstorage +from rpython.rlib.rawstorage import alloc_raw_storage, free_raw_storage,\ + raw_storage_setitem, raw_storage_getitem, AlignmentError,\ + raw_storage_setitem_unaligned, raw_storage_getitem_unaligned +from rpython.rtyper.test.tool import BaseRtypingTest +from rpython.translator.c.test.test_genc import compile -from rpython.rtyper.lltypesystem import lltype -from rpython.rlib.rawstorage import alloc_raw_storage, free_raw_storage,\ - raw_storage_setitem, raw_storage_getitem -from rpython.rtyper.test.tool import BaseRtypingTest def test_untranslated_storage(): + r = alloc_raw_storage(37) + raw_storage_setitem(r, 8, 1<<30) + res = raw_storage_getitem(lltype.Signed, r, 8) + assert res == 1<<30 + raw_storage_setitem(r, 8, 3.14) + res = raw_storage_getitem(lltype.Float, r, 8) + assert res == 3.14 + py.test.raises(AlignmentError, raw_storage_getitem, lltype.Signed, r, 3) + py.test.raises(AlignmentError, raw_storage_setitem, r, 3, 42.5) + free_raw_storage(r) + +def test_untranslated_storage_unaligned(monkeypatch): + monkeypatch.setattr(rawstorage, 'misaligned_is_fine', False) r = alloc_raw_storage(15) - raw_storage_setitem(r, 3, 1<<30) - res = raw_storage_getitem(lltype.Signed, r, 3) + raw_storage_setitem_unaligned(r, 3, 1<<30) + res = raw_storage_getitem_unaligned(lltype.Signed, r, 3) + assert res == 1<<30 + raw_storage_setitem_unaligned(r, 3, 3.14) + res = raw_storage_getitem_unaligned(lltype.Float, r, 3) + assert res == 3.14 free_raw_storage(r) - assert res == 1<<30 + class TestRawStorage(BaseRtypingTest): + def test_storage_int(self): def f(i): r = alloc_raw_storage(24) - raw_storage_setitem(r, 3, i) - res = raw_storage_getitem(lltype.Signed, r, 3) + raw_storage_setitem(r, 8, i) + res = raw_storage_getitem(lltype.Signed, r, 8) free_raw_storage(r) return res + x = self.interpret(f, [1<<30]) assert x == 1 << 30 + + def test_storage_float_unaligned(self, monkeypatch): + def f(v): + r = alloc_raw_storage(24) + raw_storage_setitem_unaligned(r, 3, v) + res = raw_storage_getitem_unaligned(lltype.Float, r, 3) + free_raw_storage(r) + return res + + monkeypatch.setattr(rawstorage, 'misaligned_is_fine', False) + x = self.interpret(f, [3.14]) + assert x == 3.14 + + +class TestCBackend(object): + + def test_backend_int(self): + def f(i): + r = alloc_raw_storage(24) + raw_storage_setitem(r, 8, i) + res = raw_storage_getitem(lltype.Signed, r, 8) + free_raw_storage(r) + return res != i + + fc = compile(f, [int]) + x = fc(-sys.maxint // 3) + assert x == 0 + + def test_backend_float_unaligned(self, monkeypatch): + def f(v): + r = alloc_raw_storage(24) + raw_storage_setitem_unaligned(r, 3, v) + res = raw_storage_getitem_unaligned(lltype.Float, r, 3) + free_raw_storage(r) + return res != v + + if monkeypatch is not None: + monkeypatch.setattr(rawstorage, 'misaligned_is_fine', False) + fc = compile(f, [float]) + x = fc(-3.14) + assert x == 0 + + def test_backend_float_unaligned_allow_misalign(self): + self.test_backend_float_unaligned(monkeypatch=None) From noreply at buildbot.pypy.org Sun Feb 23 21:06:50 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sun, 23 Feb 2014 21:06:50 +0100 (CET) Subject: [pypy-commit] pypy default: fix fill for complex with non-native byteorder Message-ID: <20140223200650.EA1421C3599@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69290:793aeb9704c0 Date: 2014-02-23 15:05 -0500 http://bitbucket.org/pypy/pypy/changeset/793aeb9704c0/ Log: fix fill for complex with non-native byteorder diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -2684,7 +2684,7 @@ assert arange(3)[array(1)] == 1 def test_fill(self): - from numpypy import array, empty + from numpypy import array, empty, dtype, zeros a = array([1, 2, 3]) a.fill(10) assert (a == [10, 10, 10]).all() @@ -2721,6 +2721,11 @@ else: assert tuple(i) == (123,) * 5 + a = zeros(3, dtype=dtype(complex).newbyteorder()) + a.fill(1.5+2.5j) + for i in a: + assert i == 1.5+2.5j + def test_array_indexing_bool(self): from numpypy import arange a = arange(10) diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -1054,13 +1054,6 @@ op = '+' if imag >= 0 or rfloat.isnan(imag) else '' return ''.join(['(', real_str, op, imag_str, ')']) - def fill(self, storage, width, box, start, stop, offset): - real, imag = self.unbox(box) - for i in xrange(start, stop, width): - raw_storage_setitem(storage, i+offset, real) - raw_storage_setitem(storage, - i+offset+rffi.sizeof(self.T), imag) - def runpack_str(self, space, s): comp = self.ComponentBoxType._get_dtype(space).itemtype l = len(s) // 2 @@ -1149,6 +1142,11 @@ def store(self, arr, i, offset, box): self._write(arr.storage, i, offset, self.unbox(box)) + def fill(self, storage, width, box, start, stop, offset): + value = self.unbox(box) + for i in xrange(start, stop, width): + self._write(storage, i, offset, value) + @complex_binary_op def add(self, v1, v2): return rcomplex.c_add(v1, v2) From noreply at buildbot.pypy.org Sun Feb 23 21:29:54 2014 From: noreply at buildbot.pypy.org (fijal) Date: Sun, 23 Feb 2014 21:29:54 +0100 (CET) Subject: [pypy-commit] pypy default: make _get_dtype elidable Message-ID: <20140223202954.51FC61C02EA@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r69291:cd6a5bc9740c Date: 2014-02-23 22:29 +0200 http://bitbucket.org/pypy/pypy/changeset/cd6a5bc9740c/ Log: make _get_dtype elidable diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -33,6 +33,7 @@ def new_dtype_getter(name): + @jit.elidable def _get_dtype(space): from pypy.module.micronumpy.interp_dtype import get_dtype_cache return get_dtype_cache(space).dtypes_by_name[name] From noreply at buildbot.pypy.org Sun Feb 23 21:37:45 2014 From: noreply at buildbot.pypy.org (fijal) Date: Sun, 23 Feb 2014 21:37:45 +0100 (CET) Subject: [pypy-commit] pypy default: I hate import * Message-ID: <20140223203745.0303F1C03FC@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r69292:15169d58e5d5 Date: 2014-02-23 22:37 +0200 http://bitbucket.org/pypy/pypy/changeset/15169d58e5d5/ Log: I hate import * diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -16,6 +16,7 @@ from pypy.interpreter.mixedmodule import MixedModule from rpython.rtyper.lltypesystem import lltype from rpython.rlib.rstring import StringBuilder +from rpython.rlib import jit from pypy.module.micronumpy.constants import * From noreply at buildbot.pypy.org Sun Feb 23 21:38:37 2014 From: noreply at buildbot.pypy.org (fijal) Date: Sun, 23 Feb 2014 21:38:37 +0100 (CET) Subject: [pypy-commit] pypy default: kill them then Message-ID: <20140223203837.1ABA31C03FC@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r69293:b712696ed9bb Date: 2014-02-23 22:37 +0200 http://bitbucket.org/pypy/pypy/changeset/b712696ed9bb/ Log: kill them then diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -17,7 +17,7 @@ from rpython.rtyper.lltypesystem import lltype from rpython.rlib.rstring import StringBuilder from rpython.rlib import jit -from pypy.module.micronumpy.constants import * +from pypy.module.micronumpy.constants import NPY_LONGDOUBLELTR, NPY_CLONGDOUBLELTR MIXIN_32 = (W_IntObject.typedef,) if LONG_BIT == 32 else () From noreply at buildbot.pypy.org Sun Feb 23 21:45:10 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sun, 23 Feb 2014 21:45:10 +0100 (CET) Subject: [pypy-commit] pypy align_float_cast: solved differently Message-ID: <20140223204510.6DC751C03FC@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: align_float_cast Changeset: r69294:ad533b895e2c Date: 2014-02-23 22:37 +0200 http://bitbucket.org/pypy/pypy/changeset/ad533b895e2c/ Log: solved differently From noreply at buildbot.pypy.org Sun Feb 23 21:45:11 2014 From: noreply at buildbot.pypy.org (mattip) Date: Sun, 23 Feb 2014 21:45:11 +0100 (CET) Subject: [pypy-commit] pypy closed-branches: merge heads Message-ID: <20140223204511.81DF61C03FC@cobra.cs.uni-duesseldorf.de> Author: Matti Picus Branch: closed-branches Changeset: r69295:25ae739a8d5b Date: 2014-02-23 22:43 +0200 http://bitbucket.org/pypy/pypy/changeset/25ae739a8d5b/ Log: merge heads From noreply at buildbot.pypy.org Sun Feb 23 21:47:07 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sun, 23 Feb 2014 21:47:07 +0100 (CET) Subject: [pypy-commit] pypy default: specify unaligned accesses in micronumpy Message-ID: <20140223204707.E161E1C03FC@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69296:b9b994201abe Date: 2014-02-23 15:45 -0500 http://bitbucket.org/pypy/pypy/changeset/b9b994201abe/ Log: specify unaligned accesses in micronumpy diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -9,8 +9,8 @@ from pypy.objspace.std.floatobject import float2string from pypy.objspace.std.complexobject import str_format from rpython.rlib import rfloat, clibffi, rcomplex -from rpython.rlib.rawstorage import (alloc_raw_storage, raw_storage_setitem, - raw_storage_getitem) +from rpython.rlib.rawstorage import (alloc_raw_storage, + raw_storage_getitem_unaligned, raw_storage_setitem_unaligned) from rpython.rlib.objectmodel import specialize from rpython.rlib.rarithmetic import widen, byteswap, r_ulonglong, most_neg_value_of, LONG_BIT from rpython.rtyper.lltypesystem import lltype, rffi @@ -174,7 +174,7 @@ raise NotImplementedError def _read(self, storage, i, offset): - res = raw_storage_getitem(self.T, storage, i + offset) + res = raw_storage_getitem_unaligned(self.T, storage, i + offset) if not self.native: res = byteswap(res) return res @@ -182,7 +182,7 @@ def _write(self, storage, i, offset, value): if not self.native: value = byteswap(value) - raw_storage_setitem(storage, i + offset, value) + raw_storage_setitem_unaligned(storage, i + offset, value) def read(self, arr, i, offset, dtype=None): return self.box(self._read(arr.storage, i, offset)) @@ -990,7 +990,7 @@ return self.box(float_unpack(r_ulonglong(swapped), 2)) def _read(self, storage, i, offset): - hbits = raw_storage_getitem(self._STORAGE_T, storage, i + offset) + hbits = raw_storage_getitem_unaligned(self._STORAGE_T, storage, i + offset) if not self.native: hbits = byteswap(hbits) return float_unpack(r_ulonglong(hbits), 2) @@ -1003,7 +1003,7 @@ hbits = rffi.cast(self._STORAGE_T, hbits) if not self.native: hbits = byteswap(hbits) - raw_storage_setitem(storage, i + offset, hbits) + raw_storage_setitem_unaligned(storage, i + offset, hbits) class Float32(BaseType, Float): T = rffi.FLOAT @@ -1120,8 +1120,8 @@ return real, imag def _read(self, storage, i, offset): - real = raw_storage_getitem(self.T, storage, i + offset) - imag = raw_storage_getitem(self.T, storage, i + offset + rffi.sizeof(self.T)) + real = raw_storage_getitem_unaligned(self.T, storage, i + offset) + imag = raw_storage_getitem_unaligned(self.T, storage, i + offset + rffi.sizeof(self.T)) if not self.native: real = byteswap(real) imag = byteswap(imag) @@ -1136,8 +1136,8 @@ if not self.native: real = byteswap(real) imag = byteswap(imag) - raw_storage_setitem(storage, i + offset, real) - raw_storage_setitem(storage, i + offset + rffi.sizeof(self.T), imag) + raw_storage_setitem_unaligned(storage, i + offset, real) + raw_storage_setitem_unaligned(storage, i + offset + rffi.sizeof(self.T), imag) def store(self, arr, i, offset, box): self._write(arr.storage, i, offset, self.unbox(box)) diff --git a/rpython/rlib/rawstorage.py b/rpython/rlib/rawstorage.py --- a/rpython/rlib/rawstorage.py +++ b/rpython/rlib/rawstorage.py @@ -1,4 +1,4 @@ - +from rpython.rlib.objectmodel import we_are_translated from rpython.rtyper.extregistry import ExtRegistryEntry from rpython.rtyper.lltypesystem import lltype, rffi, llmemory from rpython.annotator import model as annmodel @@ -19,12 +19,21 @@ def raw_storage_getitem(TP, storage, index): "NOT_RPYTHON" _check_alignment(TP, index) + return raw_storage_getitem_unchecked(TP, storage, index) + +def raw_storage_getitem_unchecked(TP, storage, index): + "NOT_RPYTHON" return rffi.cast(rffi.CArrayPtr(TP), rffi.ptradd(storage, index))[0] def raw_storage_setitem(storage, index, item): "NOT_RPYTHON" TP = lltype.typeOf(item) _check_alignment(TP, index) + raw_storage_setitem_unchecked(storage, index, item) + +def raw_storage_setitem_unchecked(storage, index, item): + "NOT_RPYTHON" + TP = lltype.typeOf(item) rffi.cast(rffi.CArrayPtr(TP), rffi.ptradd(storage, index))[0] = item @specialize.arg(1) @@ -68,10 +77,16 @@ @specialize.ll() def raw_storage_getitem_unaligned(TP, storage, index): if misaligned_is_fine: - return raw_storage_getitem(TP, storage, index) + if we_are_translated(): + return raw_storage_getitem(TP, storage, index) + else: + return raw_storage_getitem_unchecked(TP, storage, index) mask = _get_alignment_mask(TP) if (index & mask) == 0: - return raw_storage_getitem(TP, storage, index) + if we_are_translated(): + return raw_storage_getitem(TP, storage, index) + else: + return raw_storage_getitem_unchecked(TP, storage, index) ptr = rffi.ptradd(storage, index) with lltype.scoped_alloc(rffi.CArray(TP), 1) as s_array: rffi.c_memcpy(rffi.cast(rffi.VOIDP, s_array), @@ -82,12 +97,18 @@ @specialize.ll() def raw_storage_setitem_unaligned(storage, index, item): if misaligned_is_fine: - raw_storage_setitem(storage, index, item) + if we_are_translated(): + raw_storage_setitem(storage, index, item) + else: + raw_storage_setitem_unchecked(storage, index, item) return TP = lltype.typeOf(item) mask = _get_alignment_mask(TP) if (index & mask) == 0: - raw_storage_setitem(storage, index, item) + if we_are_translated(): + raw_storage_setitem(storage, index, item) + else: + raw_storage_setitem_unchecked(storage, index, item) return ptr = rffi.ptradd(storage, index) with lltype.scoped_alloc(rffi.CArray(TP), 1) as s_array: From noreply at buildbot.pypy.org Sun Feb 23 21:47:09 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sun, 23 Feb 2014 21:47:09 +0100 (CET) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20140223204709.234031C03FC@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69297:510c3afd0ff0 Date: 2014-02-23 15:46 -0500 http://bitbucket.org/pypy/pypy/changeset/510c3afd0ff0/ Log: merge heads diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -16,7 +16,8 @@ from pypy.interpreter.mixedmodule import MixedModule from rpython.rtyper.lltypesystem import lltype from rpython.rlib.rstring import StringBuilder -from pypy.module.micronumpy.constants import * +from rpython.rlib import jit +from pypy.module.micronumpy.constants import NPY_LONGDOUBLELTR, NPY_CLONGDOUBLELTR MIXIN_32 = (W_IntObject.typedef,) if LONG_BIT == 32 else () @@ -33,6 +34,7 @@ def new_dtype_getter(name): + @jit.elidable def _get_dtype(space): from pypy.module.micronumpy.interp_dtype import get_dtype_cache return get_dtype_cache(space).dtypes_by_name[name] From noreply at buildbot.pypy.org Sun Feb 23 22:14:46 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sun, 23 Feb 2014 22:14:46 +0100 (CET) Subject: [pypy-commit] pypy default: different pattern for micronumpy constants Message-ID: <20140223211446.B7D411C02EA@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69298:cd03cd6fcd23 Date: 2014-02-23 16:12 -0500 http://bitbucket.org/pypy/pypy/changeset/cd03cd6fcd23/ Log: different pattern for micronumpy constants diff --git a/pypy/module/micronumpy/constants.py b/pypy/module/micronumpy/constants.py --- a/pypy/module/micronumpy/constants.py +++ b/pypy/module/micronumpy/constants.py @@ -1,88 +1,88 @@ -NPY_BOOL = 0 -NPY_BYTE = 1 -NPY_UBYTE = 2 -NPY_SHORT = 3 -NPY_USHORT = 4 -NPY_INT = 5 -NPY_UINT = 6 -NPY_LONG = 7 -NPY_ULONG = 8 -NPY_LONGLONG = 9 -NPY_ULONGLONG = 10 -NPY_FLOAT = 11 -NPY_DOUBLE = 12 -NPY_LONGDOUBLE = 13 -NPY_CFLOAT = 14 -NPY_CDOUBLE = 15 -NPY_CLONGDOUBLE = 16 -NPY_OBJECT = 17 -NPY_STRING = 18 -NPY_UNICODE = 19 -NPY_VOID = 20 -NPY_DATETIME = 21 -NPY_TIMEDELTA = 22 -NPY_HALF = 23 -NPY_NTYPES = 24 -NPY_NOTYPE = 25 -NPY_CHAR = 26 -NPY_USERDEF = 256 +BOOL = 0 +BYTE = 1 +UBYTE = 2 +SHORT = 3 +USHORT = 4 +INT = 5 +UINT = 6 +LONG = 7 +ULONG = 8 +LONGLONG = 9 +ULONGLONG = 10 +FLOAT = 11 +DOUBLE = 12 +LONGDOUBLE = 13 +CFLOAT = 14 +CDOUBLE = 15 +CLONGDOUBLE = 16 +OBJECT = 17 +STRING = 18 +UNICODE = 19 +VOID = 20 +DATETIME = 21 +TIMEDELTA = 22 +HALF = 23 +NTYPES = 24 +NOTYPE = 25 +CHAR = 26 +USERDEF = 256 -NPY_BOOLLTR = '?' -NPY_BYTELTR = 'b' -NPY_UBYTELTR = 'B' -NPY_SHORTLTR = 'h' -NPY_USHORTLTR = 'H' -NPY_INTLTR = 'i' -NPY_UINTLTR = 'I' -NPY_LONGLTR = 'l' -NPY_ULONGLTR = 'L' -NPY_LONGLONGLTR = 'q' -NPY_ULONGLONGLTR = 'Q' -NPY_HALFLTR = 'e' -NPY_FLOATLTR = 'f' -NPY_DOUBLELTR = 'd' -NPY_LONGDOUBLELTR = 'g' -NPY_CFLOATLTR = 'F' -NPY_CDOUBLELTR = 'D' -NPY_CLONGDOUBLELTR = 'G' -NPY_OBJECTLTR = 'O' -NPY_STRINGLTR = 'S' -NPY_STRINGLTR2 = 'a' -NPY_UNICODELTR = 'U' -NPY_VOIDLTR = 'V' -NPY_DATETIMELTR = 'M' -NPY_TIMEDELTALTR = 'm' -NPY_CHARLTR = 'c' +BOOLLTR = '?' +BYTELTR = 'b' +UBYTELTR = 'B' +SHORTLTR = 'h' +USHORTLTR = 'H' +INTLTR = 'i' +UINTLTR = 'I' +LONGLTR = 'l' +ULONGLTR = 'L' +LONGLONGLTR = 'q' +ULONGLONGLTR = 'Q' +HALFLTR = 'e' +FLOATLTR = 'f' +DOUBLELTR = 'd' +LONGDOUBLELTR = 'g' +CFLOATLTR = 'F' +CDOUBLELTR = 'D' +CLONGDOUBLELTR = 'G' +OBJECTLTR = 'O' +STRINGLTR = 'S' +STRINGLTR2 = 'a' +UNICODELTR = 'U' +VOIDLTR = 'V' +DATETIMELTR = 'M' +TIMEDELTALTR = 'm' +CHARLTR = 'c' -NPY_INTPLTR = 'p' -NPY_UINTPLTR = 'P' +INTPLTR = 'p' +UINTPLTR = 'P' -NPY_GENBOOLLTR ='b' -NPY_SIGNEDLTR = 'i' -NPY_UNSIGNEDLTR = 'u' -NPY_FLOATINGLTR = 'f' -NPY_COMPLEXLTR = 'c' +GENBOOLLTR ='b' +SIGNEDLTR = 'i' +UNSIGNEDLTR = 'u' +FLOATINGLTR = 'f' +COMPLEXLTR = 'c' -NPY_ANYORDER = -1 -NPY_CORDER = 0 -NPY_FORTRANORDER = 1 -NPY_KEEPORDER = 2 +ANYORDER = -1 +CORDER = 0 +FORTRANORDER = 1 +KEEPORDER = 2 -NPY_CLIP = 0 -NPY_WRAP = 1 -NPY_RAISE = 2 +CLIP = 0 +WRAP = 1 +RAISE = 2 -NPY_LITTLE = '<' -NPY_BIG = '>' -NPY_NATIVE = '=' -NPY_SWAP = 's' -NPY_IGNORE = '|' +LITTLE = '<' +BIG = '>' +NATIVE = '=' +SWAP = 's' +IGNORE = '|' import sys if sys.byteorder == 'big': - NPY_NATBYTE = NPY_BIG - NPY_OPPBYTE = NPY_LITTLE + NATBYTE = BIG + OPPBYTE = LITTLE else: - NPY_NATBYTE = NPY_LITTLE - NPY_OPPBYTE = NPY_BIG + NATBYTE = LITTLE + OPPBYTE = BIG del sys diff --git a/pypy/module/micronumpy/conversion_utils.py b/pypy/module/micronumpy/conversion_utils.py --- a/pypy/module/micronumpy/conversion_utils.py +++ b/pypy/module/micronumpy/conversion_utils.py @@ -1,21 +1,21 @@ from pypy.interpreter.error import OperationError -from pypy.module.micronumpy.constants import * +from pypy.module.micronumpy import constants as NPY def byteorder_converter(space, new_order): endian = new_order[0] - if endian not in (NPY_BIG, NPY_LITTLE, NPY_NATIVE, NPY_IGNORE, NPY_SWAP): + if endian not in (NPY.BIG, NPY.LITTLE, NPY.NATIVE, NPY.IGNORE, NPY.SWAP): ch = endian if ch in ('b', 'B'): - endian = NPY_BIG + endian = NPY.BIG elif ch in ('l', 'L'): - endian = NPY_LITTLE + endian = NPY.LITTLE elif ch in ('n', 'N'): - endian = NPY_NATIVE + endian = NPY.NATIVE elif ch in ('i', 'I'): - endian = NPY_IGNORE + endian = NPY.IGNORE elif ch in ('s', 'S'): - endian = NPY_SWAP + endian = NPY.SWAP else: raise OperationError(space.w_ValueError, space.wrap( "%s is an unrecognized byteorder" % new_order)) @@ -24,18 +24,18 @@ def clipmode_converter(space, w_mode): if space.is_none(w_mode): - return NPY_RAISE + return NPY.RAISE if space.isinstance_w(w_mode, space.w_str): mode = space.str_w(w_mode) if mode.startswith('C') or mode.startswith('c'): - return NPY_CLIP + return NPY.CLIP if mode.startswith('W') or mode.startswith('w'): - return NPY_WRAP + return NPY.WRAP if mode.startswith('R') or mode.startswith('r'): - return NPY_RAISE + return NPY.RAISE elif space.isinstance_w(w_mode, space.w_int): mode = space.int_w(w_mode) - if NPY_CLIP <= mode <= NPY_RAISE: + if NPY.CLIP <= mode <= NPY.RAISE: return mode raise OperationError(space.w_TypeError, space.wrap("clipmode not understood")) @@ -46,19 +46,19 @@ return default if not space.isinstance_w(w_order, space.w_str): if space.is_true(w_order): - return NPY_FORTRANORDER + return NPY.FORTRANORDER else: - return NPY_CORDER + return NPY.CORDER else: order = space.str_w(w_order) if order.startswith('C') or order.startswith('c'): - return NPY_CORDER + return NPY.CORDER elif order.startswith('F') or order.startswith('f'): - return NPY_FORTRANORDER + return NPY.FORTRANORDER elif order.startswith('A') or order.startswith('a'): - return NPY_ANYORDER + return NPY.ANYORDER elif order.startswith('K') or order.startswith('k'): - return NPY_KEEPORDER + return NPY.KEEPORDER else: raise OperationError(space.w_TypeError, space.wrap( "order not understood")) diff --git a/pypy/module/micronumpy/interp_arrayops.py b/pypy/module/micronumpy/interp_arrayops.py --- a/pypy/module/micronumpy/interp_arrayops.py +++ b/pypy/module/micronumpy/interp_arrayops.py @@ -6,7 +6,7 @@ from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import unwrap_spec from pypy.module.micronumpy.conversion_utils import clipmode_converter -from pypy.module.micronumpy.constants import * +from pypy.module.micronumpy import constants as NPY def where(space, w_arr, w_x=None, w_y=None): """where(condition, [x, y]) @@ -238,12 +238,12 @@ index = index_w(space, idx) if index < 0 or index >= arr.get_size(): - if mode == NPY_RAISE: + if mode == NPY.RAISE: raise OperationError(space.w_IndexError, space.wrap( "index %d is out of bounds for axis 0 with size %d" % (index, arr.get_size()))) - elif mode == NPY_WRAP: + elif mode == NPY.WRAP: index = index % arr.get_size() - elif mode == NPY_CLIP: + elif mode == NPY.CLIP: if index < 0: index = 0 else: diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -17,7 +17,7 @@ from rpython.rtyper.lltypesystem import lltype from rpython.rlib.rstring import StringBuilder from rpython.rlib import jit -from pypy.module.micronumpy.constants import NPY_LONGDOUBLELTR, NPY_CLONGDOUBLELTR +from pypy.module.micronumpy import constants as NPY MIXIN_32 = (W_IntObject.typedef,) if LONG_BIT == 32 else () @@ -445,10 +445,10 @@ if long_double_size in (8, 12, 16): class W_FloatLongBox(W_FloatingBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY_LONGDOUBLELTR) + descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY.LONGDOUBLELTR) class W_ComplexLongBox(ComplexBox, W_ComplexFloatingBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY_CLONGDOUBLELTR) + descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY.CLONGDOUBLELTR) _COMPONENTS_BOX = W_FloatLongBox class W_FlexibleBox(W_GenericBox): diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -7,11 +7,10 @@ from pypy.module.micronumpy import types, interp_boxes, base from rpython.rlib.objectmodel import specialize from rpython.rlib.rarithmetic import LONG_BIT, r_longlong, r_ulonglong -from rpython.rtyper.lltypesystem import rffi from rpython.rlib import jit from pypy.module.micronumpy.appbridge import get_appbridge_cache from pypy.module.micronumpy.conversion_utils import byteorder_converter -from pypy.module.micronumpy.constants import * +from pypy.module.micronumpy import constants as NPY def decode_w_dtype(space, w_dtype): @@ -42,7 +41,7 @@ "w_box_type", "byteorder", "size?", "float_type", "fields?", "fieldnames?", "shape", "subdtype", "base"] - def __init__(self, itemtype, num, kind, name, char, w_box_type, byteorder=NPY_NATIVE, + def __init__(self, itemtype, num, kind, name, char, w_box_type, byteorder=NPY.NATIVE, size=1, alternate_constructors=[], aliases=[], float_type=None, fields=None, fieldnames=None, shape=[], subdtype=None): self.itemtype = itemtype @@ -87,35 +86,35 @@ return self.itemtype.coerce(space, self, w_item) def is_int_type(self): - return (self.kind == NPY_SIGNEDLTR or self.kind == NPY_UNSIGNEDLTR or - self.kind == NPY_GENBOOLLTR) + return (self.kind == NPY.SIGNEDLTR or self.kind == NPY.UNSIGNEDLTR or + self.kind == NPY.GENBOOLLTR) def is_signed(self): - return self.kind == NPY_SIGNEDLTR + return self.kind == NPY.SIGNEDLTR def is_complex_type(self): - return self.kind == NPY_COMPLEXLTR + return self.kind == NPY.COMPLEXLTR def is_float_type(self): - return self.kind == NPY_FLOATINGLTR or self.kind == NPY_COMPLEXLTR + return self.kind == NPY.FLOATINGLTR or self.kind == NPY.COMPLEXLTR def is_bool_type(self): - return self.kind == NPY_GENBOOLLTR + return self.kind == NPY.GENBOOLLTR def is_record_type(self): return self.fields is not None def is_str_type(self): - return self.num == NPY_STRING + return self.num == NPY.STRING def is_str_or_unicode(self): - return (self.num == NPY_STRING or self.num == NPY_UNICODE) + return (self.num == NPY.STRING or self.num == NPY.UNICODE) def is_flexible_type(self): return (self.is_str_or_unicode() or self.is_record_type()) def is_native(self): - return self.byteorder in (NPY_NATIVE, NPY_NATBYTE) + return self.byteorder in (NPY.NATIVE, NPY.NATBYTE) def get_size(self): return self.size * self.itemtype.get_element_size() @@ -126,7 +125,7 @@ return self.name def get_float_dtype(self, space): - assert self.kind == NPY_COMPLEXLTR + assert self.kind == NPY.COMPLEXLTR assert self.float_type is not None return get_dtype_cache(space).dtypes_by_name[self.byteorder + self.float_type] @@ -150,15 +149,15 @@ def descr_get_str(self, space): size = self.get_size() basic = self.kind - if basic == NPY_UNICODELTR: + if basic == NPY.UNICODELTR: size >>= 2 - endian = NPY_NATBYTE + endian = NPY.NATBYTE elif size // (self.size or 1) <= 1: - endian = NPY_IGNORE + endian = NPY.IGNORE else: endian = self.byteorder - if endian == NPY_NATIVE: - endian = NPY_NATBYTE + if endian == NPY.NATIVE: + endian = NPY.NATBYTE return space.wrap("%s%s%s" % (endian, basic, size)) def descr_get_descr(self, space): @@ -305,7 +304,7 @@ names = self.descr_get_names(space) values = self.descr_get_fields(space) if self.fields: - endian = NPY_IGNORE + endian = NPY.IGNORE #TODO: Implement this when subarrays are implemented subdescr = space.w_None size = 0 @@ -318,8 +317,8 @@ alignment = space.wrap(1) else: endian = self.byteorder - if endian == NPY_NATIVE: - endian = NPY_NATBYTE + if endian == NPY.NATIVE: + endian = NPY.NATBYTE subdescr = space.w_None w_size = space.wrap(-1) alignment = space.wrap(-1) @@ -333,8 +332,8 @@ raise OperationError(space.w_NotImplementedError, space.wrap("Pickling protocol version not supported")) endian = space.str_w(space.getitem(w_data, space.wrap(1))) - if endian == NPY_NATBYTE: - endian = NPY_NATIVE + if endian == NPY.NATBYTE: + endian = NPY.NATIVE self.byteorder = endian fieldnames = space.getitem(w_data, space.wrap(3)) @@ -344,15 +343,15 @@ self.descr_set_fields(space, fields) @unwrap_spec(new_order=str) - def descr_newbyteorder(self, space, new_order=NPY_SWAP): + def descr_newbyteorder(self, space, new_order=NPY.SWAP): newendian = byteorder_converter(space, new_order) endian = self.byteorder - if endian != NPY_IGNORE: - if newendian == NPY_SWAP: - endian = NPY_OPPBYTE if self.is_native() else NPY_NATBYTE - elif newendian != NPY_IGNORE: + if endian != NPY.IGNORE: + if newendian == NPY.SWAP: + endian = NPY.OPPBYTE if self.is_native() else NPY.NATBYTE + elif newendian != NPY.IGNORE: endian = newendian - itemtype = self.itemtype.__class__(endian in (NPY_NATIVE, NPY_NATBYTE)) + itemtype = self.itemtype.__class__(endian in (NPY.NATIVE, NPY.NATBYTE)) return W_Dtype(itemtype, self.num, self.kind, self.name, self.char, self.w_box_type, endian, size=self.size) @@ -387,9 +386,9 @@ offset += subdtype.get_size() fieldnames.append(fldname) itemtype = types.RecordType() - return W_Dtype(itemtype, NPY_VOID, NPY_VOIDLTR, + return W_Dtype(itemtype, NPY.VOID, NPY.VOIDLTR, "void" + str(8 * offset * itemtype.get_element_size()), - NPY_VOIDLTR, space.gettypefor(interp_boxes.W_VoidBox), + NPY.VOIDLTR, space.gettypefor(interp_boxes.W_VoidBox), fields=fields, fieldnames=fieldnames, size=offset) @@ -429,9 +428,9 @@ size *= dim if size == 1: return subdtype - return W_Dtype(types.VoidType(), NPY_VOID, NPY_VOIDLTR, + return W_Dtype(types.VoidType(), NPY.VOID, NPY.VOIDLTR, "void" + str(8 * subdtype.get_size() * size), - NPY_VOIDLTR, space.gettypefor(interp_boxes.W_VoidBox), + NPY.VOIDLTR, space.gettypefor(interp_boxes.W_VoidBox), shape=shape, subdtype=subdtype, size=subdtype.get_size() * size) @@ -523,24 +522,24 @@ size = int(name[1:]) except ValueError: raise OperationError(space.w_TypeError, space.wrap("data type not understood")) - if char == NPY_CHARLTR: - char = NPY_STRINGLTR + if char == NPY.CHARLTR: + char = NPY.STRINGLTR size = 1 - if char == NPY_STRINGLTR: + if char == NPY.STRINGLTR: itemtype = types.StringType() basename = 'string' - num = NPY_STRING + num = NPY.STRING w_box_type = space.gettypefor(interp_boxes.W_StringBox) - elif char == NPY_VOIDLTR: + elif char == NPY.VOIDLTR: itemtype = types.VoidType() basename = 'void' - num = NPY_VOID + num = NPY.VOID w_box_type = space.gettypefor(interp_boxes.W_VoidBox) - elif char == NPY_UNICODELTR: + elif char == NPY.UNICODELTR: itemtype = types.UnicodeType() basename = 'unicode' - num = NPY_UNICODE + num = NPY.UNICODE w_box_type = space.gettypefor(interp_boxes.W_UnicodeBox) else: assert False @@ -555,10 +554,10 @@ return W_Dtype( itemtype, size=size, - num=NPY_STRING, - kind=NPY_STRINGLTR, + num=NPY.STRING, + kind=NPY.STRINGLTR, name='string' + str(8 * size * itemtype.get_element_size()), - char=NPY_STRINGLTR, + char=NPY.STRINGLTR, w_box_type = space.gettypefor(interp_boxes.W_StringBox), ) @@ -568,10 +567,10 @@ return W_Dtype( itemtype, size=size, - num=NPY_UNICODE, - kind=NPY_UNICODELTR, + num=NPY.UNICODE, + kind=NPY.UNICODELTR, name='unicode' + str(8 * size * itemtype.get_element_size()), - char=NPY_UNICODELTR, + char=NPY.UNICODELTR, w_box_type = space.gettypefor(interp_boxes.W_UnicodeBox), ) @@ -580,72 +579,72 @@ def __init__(self, space): self.w_booldtype = W_Dtype( types.Bool(), - num=NPY_BOOL, - kind=NPY_GENBOOLLTR, + num=NPY.BOOL, + kind=NPY.GENBOOLLTR, name="bool", - char=NPY_BOOLLTR, + char=NPY.BOOLLTR, w_box_type=space.gettypefor(interp_boxes.W_BoolBox), alternate_constructors=[space.w_bool], aliases=['bool8'], ) self.w_int8dtype = W_Dtype( types.Int8(), - num=NPY_BYTE, - kind=NPY_SIGNEDLTR, + num=NPY.BYTE, + kind=NPY.SIGNEDLTR, name="int8", - char=NPY_BYTELTR, + char=NPY.BYTELTR, w_box_type=space.gettypefor(interp_boxes.W_Int8Box), aliases=['byte'], ) self.w_uint8dtype = W_Dtype( types.UInt8(), - num=NPY_UBYTE, - kind=NPY_UNSIGNEDLTR, + num=NPY.UBYTE, + kind=NPY.UNSIGNEDLTR, name="uint8", - char=NPY_UBYTELTR, + char=NPY.UBYTELTR, w_box_type=space.gettypefor(interp_boxes.W_UInt8Box), aliases=['ubyte'], ) self.w_int16dtype = W_Dtype( types.Int16(), - num=NPY_SHORT, - kind=NPY_SIGNEDLTR, + num=NPY.SHORT, + kind=NPY.SIGNEDLTR, name="int16", - char=NPY_SHORTLTR, + char=NPY.SHORTLTR, w_box_type=space.gettypefor(interp_boxes.W_Int16Box), aliases=['short'], ) self.w_uint16dtype = W_Dtype( types.UInt16(), - num=NPY_USHORT, - kind=NPY_UNSIGNEDLTR, + num=NPY.USHORT, + kind=NPY.UNSIGNEDLTR, name="uint16", - char=NPY_USHORTLTR, + char=NPY.USHORTLTR, w_box_type=space.gettypefor(interp_boxes.W_UInt16Box), aliases=['ushort'], ) self.w_int32dtype = W_Dtype( types.Int32(), - num=NPY_INT, - kind=NPY_SIGNEDLTR, + num=NPY.INT, + kind=NPY.SIGNEDLTR, name="int32", - char=NPY_INTLTR, + char=NPY.INTLTR, w_box_type=space.gettypefor(interp_boxes.W_Int32Box), ) self.w_uint32dtype = W_Dtype( types.UInt32(), - num=NPY_UINT, - kind=NPY_UNSIGNEDLTR, + num=NPY.UINT, + kind=NPY.UNSIGNEDLTR, name="uint32", - char=NPY_UINTLTR, + char=NPY.UINTLTR, w_box_type=space.gettypefor(interp_boxes.W_UInt32Box), ) self.w_longdtype = W_Dtype( types.Long(), - num=NPY_LONG, - kind=NPY_SIGNEDLTR, + num=NPY.LONG, + kind=NPY.SIGNEDLTR, name="int%d" % LONG_BIT, - char=NPY_LONGLTR, + char=NPY.LONGLTR, w_box_type=space.gettypefor(interp_boxes.W_LongBox), alternate_constructors=[space.w_int, space.gettypefor(interp_boxes.W_IntegerBox), @@ -655,10 +654,10 @@ ) self.w_ulongdtype = W_Dtype( types.ULong(), - num=NPY_ULONG, - kind=NPY_UNSIGNEDLTR, + num=NPY.ULONG, + kind=NPY.UNSIGNEDLTR, name="uint%d" % LONG_BIT, - char=NPY_ULONGLTR, + char=NPY.ULONGLTR, w_box_type=space.gettypefor(interp_boxes.W_ULongBox), alternate_constructors=[space.gettypefor(interp_boxes.W_UnsignedIntegerBox), ], @@ -666,38 +665,38 @@ ) self.w_int64dtype = W_Dtype( types.Int64(), - num=NPY_LONGLONG, - kind=NPY_SIGNEDLTR, + num=NPY.LONGLONG, + kind=NPY.SIGNEDLTR, name="int64", - char=NPY_LONGLONGLTR, + char=NPY.LONGLONGLTR, w_box_type=space.gettypefor(interp_boxes.W_Int64Box), alternate_constructors=[space.w_long], aliases=['longlong'], ) self.w_uint64dtype = W_Dtype( types.UInt64(), - num=NPY_ULONGLONG, - kind=NPY_UNSIGNEDLTR, + num=NPY.ULONGLONG, + kind=NPY.UNSIGNEDLTR, name="uint64", - char=NPY_ULONGLONGLTR, + char=NPY.ULONGLONGLTR, w_box_type=space.gettypefor(interp_boxes.W_UInt64Box), aliases=['ulonglong'], ) self.w_float32dtype = W_Dtype( types.Float32(), - num=NPY_FLOAT, - kind=NPY_FLOATINGLTR, + num=NPY.FLOAT, + kind=NPY.FLOATINGLTR, name="float32", - char=NPY_FLOATLTR, + char=NPY.FLOATLTR, w_box_type=space.gettypefor(interp_boxes.W_Float32Box), aliases=['single'] ) self.w_float64dtype = W_Dtype( types.Float64(), - num=NPY_DOUBLE, - kind=NPY_FLOATINGLTR, + num=NPY.DOUBLE, + kind=NPY.FLOATINGLTR, name="float64", - char=NPY_DOUBLELTR, + char=NPY.DOUBLELTR, w_box_type = space.gettypefor(interp_boxes.W_Float64Box), alternate_constructors=[space.w_float, space.gettypefor(interp_boxes.W_NumberBox), @@ -707,52 +706,52 @@ ) self.w_floatlongdtype = W_Dtype( types.FloatLong(), - num=NPY_LONGDOUBLE, - kind=NPY_FLOATINGLTR, + num=NPY.LONGDOUBLE, + kind=NPY.FLOATINGLTR, name="float%d" % (interp_boxes.long_double_size * 8), - char=NPY_LONGDOUBLELTR, + char=NPY.LONGDOUBLELTR, w_box_type=space.gettypefor(interp_boxes.W_FloatLongBox), aliases=["longdouble", "longfloat"], ) self.w_complex64dtype = W_Dtype( types.Complex64(), - num=NPY_CFLOAT, - kind=NPY_COMPLEXLTR, + num=NPY.CFLOAT, + kind=NPY.COMPLEXLTR, name="complex64", - char=NPY_CFLOATLTR, + char=NPY.CFLOATLTR, w_box_type = space.gettypefor(interp_boxes.W_Complex64Box), aliases=['csingle'], - float_type=NPY_FLOATLTR, + float_type=NPY.FLOATLTR, ) self.w_complex128dtype = W_Dtype( types.Complex128(), - num=NPY_CDOUBLE, - kind=NPY_COMPLEXLTR, + num=NPY.CDOUBLE, + kind=NPY.COMPLEXLTR, name="complex128", - char=NPY_CDOUBLELTR, + char=NPY.CDOUBLELTR, w_box_type = space.gettypefor(interp_boxes.W_Complex128Box), alternate_constructors=[space.w_complex, space.gettypefor(interp_boxes.W_ComplexFloatingBox)], aliases=["complex", 'cfloat', 'cdouble'], - float_type=NPY_DOUBLELTR, + float_type=NPY.DOUBLELTR, ) self.w_complexlongdtype = W_Dtype( types.ComplexLong(), - num=NPY_CLONGDOUBLE, - kind=NPY_COMPLEXLTR, + num=NPY.CLONGDOUBLE, + kind=NPY.COMPLEXLTR, name="complex%d" % (interp_boxes.long_double_size * 16), - char=NPY_CLONGDOUBLELTR, + char=NPY.CLONGDOUBLELTR, w_box_type = space.gettypefor(interp_boxes.W_ComplexLongBox), aliases=["clongdouble", "clongfloat"], - float_type=NPY_LONGDOUBLELTR, + float_type=NPY.LONGDOUBLELTR, ) self.w_stringdtype = W_Dtype( types.StringType(), size=0, - num=NPY_STRING, - kind=NPY_STRINGLTR, + num=NPY.STRING, + kind=NPY.STRINGLTR, name='string', - char=NPY_STRINGLTR, + char=NPY.STRINGLTR, w_box_type = space.gettypefor(interp_boxes.W_StringBox), alternate_constructors=[space.w_str, space.gettypefor(interp_boxes.W_CharacterBox)], @@ -761,20 +760,20 @@ self.w_unicodedtype = W_Dtype( types.UnicodeType(), size=0, - num=NPY_UNICODE, - kind=NPY_UNICODELTR, + num=NPY.UNICODE, + kind=NPY.UNICODELTR, name='unicode', - char=NPY_UNICODELTR, + char=NPY.UNICODELTR, w_box_type = space.gettypefor(interp_boxes.W_UnicodeBox), alternate_constructors=[space.w_unicode], ) self.w_voiddtype = W_Dtype( types.VoidType(), size=0, - num=NPY_VOID, - kind=NPY_VOIDLTR, + num=NPY.VOID, + kind=NPY.VOIDLTR, name='void', - char=NPY_VOIDLTR, + char=NPY.VOIDLTR, w_box_type = space.gettypefor(interp_boxes.W_VoidBox), #alternate_constructors=[space.w_buffer], # XXX no buffer in space @@ -783,26 +782,26 @@ ) self.w_float16dtype = W_Dtype( types.Float16(), - num=NPY_HALF, - kind=NPY_FLOATINGLTR, + num=NPY.HALF, + kind=NPY.FLOATINGLTR, name="float16", - char=NPY_HALFLTR, + char=NPY.HALFLTR, w_box_type=space.gettypefor(interp_boxes.W_Float16Box), ) self.w_intpdtype = W_Dtype( types.Long(), - num=NPY_LONG, - kind=NPY_SIGNEDLTR, + num=NPY.LONG, + kind=NPY.SIGNEDLTR, name='intp', - char=NPY_INTPLTR, + char=NPY.INTPLTR, w_box_type = space.gettypefor(interp_boxes.W_LongBox), ) self.w_uintpdtype = W_Dtype( types.ULong(), - num=NPY_ULONG, - kind=NPY_UNSIGNEDLTR, + num=NPY.ULONG, + kind=NPY.UNSIGNEDLTR, name='uintp', - char=NPY_UINTPLTR, + char=NPY.UINTPLTR, w_box_type = space.gettypefor(interp_boxes.W_ULongBox), ) float_dtypes = [self.w_float16dtype, self.w_float32dtype, @@ -834,14 +833,14 @@ for can_name in [dtype.kind + str(dtype.get_size()), dtype.char]: self.dtypes_by_name[can_name] = dtype - self.dtypes_by_name[NPY_NATBYTE + can_name] = dtype - self.dtypes_by_name[NPY_NATIVE + can_name] = dtype - self.dtypes_by_name[NPY_IGNORE + can_name] = dtype - new_name = NPY_OPPBYTE + can_name + self.dtypes_by_name[NPY.NATBYTE + can_name] = dtype + self.dtypes_by_name[NPY.NATIVE + can_name] = dtype + self.dtypes_by_name[NPY.IGNORE + can_name] = dtype + new_name = NPY.OPPBYTE + can_name itemtype = type(dtype.itemtype)(False) self.dtypes_by_name[new_name] = W_Dtype( itemtype, dtype.num, dtype.kind, new_name, dtype.char, - dtype.w_box_type, byteorder=NPY_OPPBYTE, + dtype.w_box_type, byteorder=NPY.OPPBYTE, float_type=dtype.float_type) for alias in dtype.aliases: self.dtypes_by_name[alias] = dtype @@ -898,7 +897,7 @@ space.wrap(itembits), space.wrap(dtype.itemtype.get_element_size())] if dtype.is_int_type(): - if dtype.kind == NPY_GENBOOLLTR: + if dtype.kind == NPY.GENBOOLLTR: w_maxobj = space.wrap(1) w_minobj = space.wrap(0) elif dtype.is_signed(): diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -23,7 +23,7 @@ from pypy.module.micronumpy.arrayimpl.base import BaseArrayImplementation from pypy.module.micronumpy.conversion_utils import order_converter, multi_axis_converter from pypy.module.micronumpy import support -from pypy.module.micronumpy.constants import * +from pypy.module.micronumpy import constants as NPY def _find_shape(space, w_size, dtype): if space.is_none(w_size): @@ -110,8 +110,8 @@ self.fill(space, self.get_dtype().coerce(space, w_value)) def descr_tostring(self, space, w_order=None): - order = order_converter(space, w_order, NPY_CORDER) - if order == NPY_FORTRANORDER: + order = order_converter(space, w_order, NPY.CORDER) + if order == NPY.FORTRANORDER: raise OperationError(space.w_NotImplementedError, space.wrap( "unsupported value for order")) return space.wrap(loop.tostring(space, self)) @@ -320,8 +320,8 @@ return self.implementation.get_scalar_value() def descr_copy(self, space, w_order=None): - order = order_converter(space, w_order, NPY_KEEPORDER) - if order == NPY_FORTRANORDER: + order = order_converter(space, w_order, NPY.KEEPORDER) + if order == NPY.FORTRANORDER: raise OperationError(space.w_NotImplementedError, space.wrap( "unsupported value for order")) copy = self.implementation.copy(space) @@ -375,7 +375,7 @@ numpy.reshape : equivalent function """ args_w, kw_w = __args__.unpack() - order = NPY_CORDER + order = NPY.CORDER if kw_w: if "order" in kw_w: order = order_converter(space, kw_w["order"], order) @@ -383,10 +383,10 @@ if kw_w: raise OperationError(space.w_TypeError, space.wrap( "reshape() got unexpected keyword argument(s)")) - if order == NPY_KEEPORDER: + if order == NPY.KEEPORDER: raise OperationError(space.w_ValueError, space.wrap( "order 'K' is not permitted for reshaping")) - if order != NPY_CORDER and order != NPY_ANYORDER: + if order != NPY.CORDER and order != NPY.ANYORDER: raise OperationError(space.w_NotImplementedError, space.wrap( "unsupported value for order")) if len(args_w) == 1: @@ -561,7 +561,7 @@ # by converting nonnative byte order. if self.is_scalar(): return space.wrap(0) - dtype = self.get_dtype().descr_newbyteorder(space, NPY_NATIVE) + dtype = self.get_dtype().descr_newbyteorder(space, NPY.NATIVE) contig = self.implementation.astype(space, dtype) return contig.argsort(space, w_axis) @@ -662,7 +662,7 @@ "getfield not implemented yet")) @unwrap_spec(new_order=str) - def descr_newbyteorder(self, space, new_order=NPY_SWAP): + def descr_newbyteorder(self, space, new_order=NPY.SWAP): return self.descr_view(space, self.get_dtype().descr_newbyteorder(space, new_order)) @@ -1138,7 +1138,7 @@ "__setstate__ called with len(args[1])==%d, not 5 or 4" % lens)) shape = space.getitem(w_state, space.wrap(base_index)) dtype = space.getitem(w_state, space.wrap(base_index+1)) - isfortran = space.getitem(w_state, space.wrap(base_index+2)) + #isfortran = space.getitem(w_state, space.wrap(base_index+2)) storage = space.getitem(w_state, space.wrap(base_index+3)) if not isinstance(dtype, interp_dtype.W_Dtype): raise OperationError(space.w_ValueError, space.wrap( @@ -1192,8 +1192,8 @@ w_base=w_buffer, writable=buf.is_writable()) - order = order_converter(space, w_order, NPY_CORDER) - if order == NPY_CORDER: + order = order_converter(space, w_order, NPY.CORDER) + if order == NPY.CORDER: order = 'C' else: order = 'F' diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -9,7 +9,7 @@ from pypy.module.micronumpy.interp_support import unwrap_axis_arg from pypy.module.micronumpy.strides import shape_agreement from pypy.module.micronumpy.base import convert_to_array, W_NDimArray -from pypy.module.micronumpy.constants import * +from pypy.module.micronumpy import constants as NPY def done_if_true(dtype, val): return dtype.itemtype.bool(val) @@ -462,24 +462,24 @@ if dt1.num > dt2.num: dt1, dt2 = dt2, dt1 # Some operations promote op(bool, bool) to return int8, rather than bool - if promote_bools and (dt1.kind == dt2.kind == NPY_GENBOOLLTR): + if promote_bools and (dt1.kind == dt2.kind == NPY.GENBOOLLTR): return interp_dtype.get_dtype_cache(space).w_int8dtype # Everything numeric promotes to complex if dt2.is_complex_type() or dt1.is_complex_type(): - if dt2.num == NPY_HALF: + if dt2.num == NPY.HALF: dt1, dt2 = dt2, dt1 - if dt2.num == NPY_CFLOAT: - if dt1.num == NPY_DOUBLE: + if dt2.num == NPY.CFLOAT: + if dt1.num == NPY.DOUBLE: return interp_dtype.get_dtype_cache(space).w_complex128dtype - elif dt1.num == NPY_LONGDOUBLE: + elif dt1.num == NPY.LONGDOUBLE: return interp_dtype.get_dtype_cache(space).w_complexlongdtype return interp_dtype.get_dtype_cache(space).w_complex64dtype - elif dt2.num == NPY_CDOUBLE: - if dt1.num == NPY_LONGDOUBLE: + elif dt2.num == NPY.CDOUBLE: + if dt1.num == NPY.LONGDOUBLE: return interp_dtype.get_dtype_cache(space).w_complexlongdtype return interp_dtype.get_dtype_cache(space).w_complex128dtype - elif dt2.num == NPY_CLONGDOUBLE: + elif dt2.num == NPY.CLONGDOUBLE: return interp_dtype.get_dtype_cache(space).w_complexlongdtype else: raise OperationError(space.w_TypeError, space.wrap("Unsupported types")) @@ -488,30 +488,30 @@ return find_unaryop_result_dtype(space, dt2, promote_to_float=True) # If they're the same kind, choose the greater one. if dt1.kind == dt2.kind and not dt2.is_flexible_type(): - if dt2.num == NPY_HALF: + if dt2.num == NPY.HALF: return dt1 return dt2 # Everything promotes to float, and bool promotes to everything. - if dt2.kind == NPY_FLOATINGLTR or dt1.kind == NPY_GENBOOLLTR: - if dt2.num == NPY_HALF and dt1.itemtype.get_element_size() == 2: + if dt2.kind == NPY.FLOATINGLTR or dt1.kind == NPY.GENBOOLLTR: + if dt2.num == NPY.HALF and dt1.itemtype.get_element_size() == 2: return interp_dtype.get_dtype_cache(space).w_float32dtype - if dt2.num == NPY_HALF and dt1.itemtype.get_element_size() >= 4: + if dt2.num == NPY.HALF and dt1.itemtype.get_element_size() >= 4: return interp_dtype.get_dtype_cache(space).w_float64dtype - if dt2.num == NPY_FLOAT and dt1.itemtype.get_element_size() >= 4: + if dt2.num == NPY.FLOAT and dt1.itemtype.get_element_size() >= 4: return interp_dtype.get_dtype_cache(space).w_float64dtype return dt2 # for now this means mixing signed and unsigned - if dt2.kind == NPY_SIGNEDLTR: + if dt2.kind == NPY.SIGNEDLTR: # if dt2 has a greater number of bytes, then just go with it if dt1.itemtype.get_element_size() < dt2.itemtype.get_element_size(): return dt2 # we need to promote both dtypes dtypenum = dt2.num + 2 - elif dt2.num == NPY_ULONGLONG or (LONG_BIT == 64 and dt2.num == NPY_ULONG): + elif dt2.num == NPY.ULONGLONG or (LONG_BIT == 64 and dt2.num == NPY.ULONG): # UInt64 + signed = Float64 - dtypenum = NPY_DOUBLE + dtypenum = NPY.DOUBLE elif dt2.is_flexible_type(): # For those operations that get here (concatenate, stack), # flexible types take precedence over numeric type @@ -528,7 +528,7 @@ newdtype = interp_dtype.get_dtype_cache(space).dtypes_by_num[dtypenum] if (newdtype.itemtype.get_element_size() > dt2.itemtype.get_element_size() or - newdtype.kind == NPY_FLOATINGLTR): + newdtype.kind == NPY.FLOATINGLTR): return newdtype else: # we only promoted to long on 32-bit or to longlong on 64-bit @@ -540,24 +540,24 @@ def find_unaryop_result_dtype(space, dt, promote_to_float=False, promote_bools=False, promote_to_largest=False): if promote_to_largest: - if dt.kind == NPY_GENBOOLLTR or dt.kind == NPY_SIGNEDLTR: + if dt.kind == NPY.GENBOOLLTR or dt.kind == NPY.SIGNEDLTR: if dt.get_size() * 8 < LONG_BIT: return interp_dtype.get_dtype_cache(space).w_longdtype - elif dt.kind == NPY_UNSIGNEDLTR: + elif dt.kind == NPY.UNSIGNEDLTR: if dt.get_size() * 8 < LONG_BIT: return interp_dtype.get_dtype_cache(space).w_ulongdtype else: - assert dt.kind == NPY_FLOATINGLTR or dt.kind == NPY_COMPLEXLTR + assert dt.kind == NPY.FLOATINGLTR or dt.kind == NPY.COMPLEXLTR return dt - if promote_bools and (dt.kind == NPY_GENBOOLLTR): + if promote_bools and (dt.kind == NPY.GENBOOLLTR): return interp_dtype.get_dtype_cache(space).w_int8dtype if promote_to_float: - if dt.kind == NPY_FLOATINGLTR or dt.kind == NPY_COMPLEXLTR: + if dt.kind == NPY.FLOATINGLTR or dt.kind == NPY.COMPLEXLTR: return dt - if dt.num >= NPY_INT: + if dt.num >= NPY.INT: return interp_dtype.get_dtype_cache(space).w_float64dtype for bytes, dtype in interp_dtype.get_dtype_cache(space).float_dtypes_by_num_bytes: - if (dtype.kind == NPY_FLOATINGLTR and + if (dtype.kind == NPY.FLOATINGLTR and dtype.itemtype.get_element_size() > dt.itemtype.get_element_size()): return dtype return dt @@ -594,7 +594,7 @@ if current_guess is None: return interp_dtype.variable_dtype(space, 'S%d' % space.len_w(w_obj)) - elif current_guess.num == NPY_STRING: + elif current_guess.num == NPY.STRING: if current_guess.get_size() < space.len_w(w_obj): return interp_dtype.variable_dtype(space, 'S%d' % space.len_w(w_obj)) diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -11,7 +11,7 @@ from pypy.module.micronumpy.base import W_NDimArray from pypy.module.micronumpy.iter import PureShapeIterator from pypy.module.micronumpy.support import index_w -from pypy.module.micronumpy.constants import * +from pypy.module.micronumpy import constants as NPY call2_driver = jit.JitDriver(name='numpy_call2', greens = ['shapelen', 'func', 'calc_dtype', @@ -597,13 +597,13 @@ mode=mode) index = index_w(space, arr_iter.getitem()) if index < 0 or index >= len(iterators): - if mode == NPY_RAISE: + if mode == NPY.RAISE: raise OperationError(space.w_ValueError, space.wrap( "invalid entry in choice array")) - elif mode == NPY_WRAP: + elif mode == NPY.WRAP: index = index % (len(iterators)) else: - assert mode == NPY_CLIP + assert mode == NPY.CLIP if index < 0: index = 0 else: diff --git a/pypy/module/micronumpy/test/test_base.py b/pypy/module/micronumpy/test/test_base.py --- a/pypy/module/micronumpy/test/test_base.py +++ b/pypy/module/micronumpy/test/test_base.py @@ -1,4 +1,4 @@ -from pypy.module.micronumpy.interp_dtype import NPY_NATBYTE, NPY_OPPBYTE +from pypy.module.micronumpy import constants as NPY from pypy.conftest import option class BaseNumpyAppTest(object): @@ -23,5 +23,5 @@ import sys sys.modules['numpypy'] = numpy """) - cls.w_non_native_prefix = cls.space.wrap(NPY_OPPBYTE) - cls.w_native_prefix = cls.space.wrap(NPY_NATBYTE) + cls.w_non_native_prefix = cls.space.wrap(NPY.OPPBYTE) + cls.w_native_prefix = cls.space.wrap(NPY.NATBYTE) From noreply at buildbot.pypy.org Sun Feb 23 22:16:38 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Sun, 23 Feb 2014 22:16:38 +0100 (CET) Subject: [pypy-commit] pypy remove-remaining-smm: Kill complextype.py, kill conjugate SMM. Message-ID: <20140223211638.7125D1C02EA@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: remove-remaining-smm Changeset: r69299:4286faa95835 Date: 2014-02-23 21:36 +0100 http://bitbucket.org/pypy/pypy/changeset/4286faa95835/ Log: Kill complextype.py, kill conjugate SMM. diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -6,7 +6,7 @@ from pypy.objspace.std.floattype import float_typedef from pypy.objspace.std.unicodeobject import W_UnicodeObject from pypy.objspace.std.intobject import W_IntObject -from pypy.objspace.std.complextype import complex_typedef +from pypy.objspace.std.complexobject import W_ComplexObject from rpython.rlib.rarithmetic import LONG_BIT from rpython.rtyper.lltypesystem import rffi from rpython.tool.sourcetools import func_with_new_name @@ -770,7 +770,7 @@ imag = GetSetProperty(W_ComplexFloatingBox.descr_get_imag), ) -W_Complex128Box.typedef = TypeDef("complex128", (W_ComplexFloatingBox.typedef, complex_typedef), +W_Complex128Box.typedef = TypeDef("complex128", (W_ComplexFloatingBox.typedef, W_ComplexObject.typedef), __module__ = "numpy", __new__ = interp2app(W_Complex128Box.descr__new__.im_func), __reduce__ = interp2app(W_Complex128Box.descr_reduce), @@ -785,7 +785,7 @@ __reduce__ = interp2app(W_FloatLongBox.descr_reduce), ) - W_ComplexLongBox.typedef = TypeDef("complex%d" % (long_double_size * 16), (W_ComplexFloatingBox.typedef, complex_typedef), + W_ComplexLongBox.typedef = TypeDef("complex%d" % (long_double_size * 16), (W_ComplexFloatingBox.typedef, W_ComplexObject.typedef), __module__ = "numpy", __new__ = interp2app(W_ComplexLongBox.descr__new__.im_func), __reduce__ = interp2app(W_ComplexLongBox.descr_reduce), diff --git a/pypy/objspace/std/complexobject.py b/pypy/objspace/std/complexobject.py --- a/pypy/objspace/std/complexobject.py +++ b/pypy/objspace/std/complexobject.py @@ -1,18 +1,27 @@ -from pypy.interpreter import gateway -from pypy.interpreter.error import OperationError +import math + +from pypy.interpreter.error import OperationError, oefmt +from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault from pypy.objspace.std import newformat +from pypy.objspace.std.floatobject import W_FloatObject, _hash_float from pypy.objspace.std.intobject import W_IntObject +from pypy.objspace.std.longobject import W_LongObject from pypy.objspace.std.model import registerimplementation, W_Object +from pypy.objspace.std.noneobject import W_NoneObject from pypy.objspace.std.register_all import register_all -from pypy.objspace.std.floatobject import W_FloatObject, _hash_float -from pypy.objspace.std.longobject import W_LongObject +from pypy.objspace.std.stdtypedef import GetSetProperty, StdTypeDef +from rpython.rlib import jit, rcomplex +from rpython.rlib.rarithmetic import intmask, r_ulonglong from rpython.rlib.rbigint import rbigint from rpython.rlib.rfloat import ( - formatd, DTSF_STR_PRECISION, isinf, isnan, copysign) -from rpython.rlib import jit, rcomplex -from rpython.rlib.rarithmetic import intmask, r_ulonglong + formatd, DTSF_STR_PRECISION, isinf, isnan, copysign, string_to_float) +from rpython.rlib.rstring import ParseStringError -import math + +# ERRORCODES + +ERR_WRONG_SECOND = "complex() can't take second arg if first is a string" +ERR_MALFORMED = "complex() arg is a malformed string" class W_AbstractComplexObject(W_Object): @@ -47,10 +56,168 @@ return space.newlong_from_rbigint(val) +def _split_complex(s): + slen = len(s) + if slen == 0: + raise ValueError + realstart = 0 + realstop = 0 + imagstart = 0 + imagstop = 0 + imagsign = ' ' + i = 0 + # ignore whitespace at beginning and end + while i < slen and s[i] == ' ': + i += 1 + while slen > 0 and s[slen-1] == ' ': + slen -= 1 + + if s[i] == '(' and s[slen-1] == ')': + i += 1 + slen -= 1 + # ignore whitespace after bracket + while i < slen and s[i] == ' ': + i += 1 + + # extract first number + realstart = i + pc = s[i] + while i < slen and s[i] != ' ': + if s[i] in ('+','-') and pc not in ('e','E') and i != realstart: + break + pc = s[i] + i += 1 + + realstop = i + + # ignore whitespace + while i < slen and s[i] == ' ': + i += 1 + + # return appropriate strings is only one number is there + if i >= slen: + newstop = realstop - 1 + if newstop < 0: + raise ValueError + if s[newstop] in ('j', 'J'): + if realstart == newstop: + imagpart = '1.0' + elif realstart == newstop-1 and s[realstart] == '+': + imagpart = '1.0' + elif realstart == newstop-1 and s[realstart] == '-': + imagpart = '-1.0' + else: + imagpart = s[realstart:newstop] + return '0.0', imagpart + else: + return s[realstart:realstop], '0.0' + + # find sign for imaginary part + if s[i] == '-' or s[i] == '+': + imagsign = s[i] + if imagsign == ' ': + raise ValueError + + i+=1 + # whitespace + while i < slen and s[i] == ' ': + i += 1 + if i >= slen: + raise ValueError + + imagstart = i + pc = s[i] + while i < slen and s[i] != ' ': + if s[i] in ('+','-') and pc not in ('e','E'): + break + pc = s[i] + i += 1 + + imagstop = i - 1 + if imagstop < 0: + raise ValueError + if s[imagstop] not in ('j','J'): + raise ValueError + if imagstop < imagstart: + raise ValueError + + while i A-Bj""" + return space.newcomplex(self.realval, -self.imagval) + registerimplementation(W_ComplexObject) w_one = W_ComplexObject(1, 0) @@ -250,11 +478,6 @@ def float__Complex(space, w_complex): raise OperationError(space.w_TypeError, space.wrap("can't convert complex to float; use abs(z)")) -def complex_conjugate__Complex(space, w_self): - #w_real = space.call_function(space.w_float,space.wrap(w_self.realval)) - #w_imag = space.call_function(space.w_float,space.wrap(-w_self.imagval)) - return space.newcomplex(w_self.realval,-w_self.imagval) - def format_float(x, code, precision): # like float2string, except that the ".0" is not necessary if isinf(x): @@ -291,5 +514,26 @@ def format__Complex_ANY(space, w_complex, w_format_spec): return newformat.run_formatter(space, w_format_spec, "format_complex", w_complex) -from pypy.objspace.std import complextype -register_all(vars(), complextype) +def complexwprop(name): + def fget(space, w_obj): + from pypy.objspace.std.complexobject import W_ComplexObject + if not isinstance(w_obj, W_ComplexObject): + raise OperationError(space.w_TypeError, + space.wrap("descriptor is for 'complex'")) + return space.newfloat(getattr(w_obj, name)) + return GetSetProperty(fget) + +W_ComplexObject.typedef = StdTypeDef("complex", + __doc__ = """complex(real[, imag]) -> complex number + +Create a complex number from a real part and an optional imaginary part. +This is equivalent to (real + imag*1j) where imag defaults to 0.""", + __new__ = interp2app(W_ComplexObject.descr__new__), + __getnewargs__ = interp2app(W_ComplexObject.descr___getnewargs__), + real = complexwprop('realval'), + imag = complexwprop('imagval'), + conjugate = interp2app(W_ComplexObject.descr_conjugate) + ) + +W_ComplexObject.typedef.registermethods(globals()) +register_all(vars(), globals()) diff --git a/pypy/objspace/std/complextype.py b/pypy/objspace/std/complextype.py deleted file mode 100644 --- a/pypy/objspace/std/complextype.py +++ /dev/null @@ -1,257 +0,0 @@ -from pypy.interpreter.error import OperationError, oefmt -from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault -from pypy.objspace.std.register_all import register_all -from pypy.objspace.std.noneobject import W_NoneObject -from pypy.objspace.std.stdtypedef import GetSetProperty, StdTypeDef -from pypy.objspace.std.stdtypedef import StdObjSpaceMultiMethod -from rpython.rlib.rfloat import string_to_float -from rpython.rlib.rstring import ParseStringError - -# ERRORCODES - -ERR_WRONG_SECOND = "complex() can't take second arg if first is a string" -ERR_MALFORMED = "complex() arg is a malformed string" - -complex_conjugate = StdObjSpaceMultiMethod('conjugate', 1, - doc="(A+Bj).conjugate() -> A-Bj") - -register_all(vars(),globals()) - -def _split_complex(s): - slen = len(s) - if slen == 0: - raise ValueError - realstart = 0 - realstop = 0 - imagstart = 0 - imagstop = 0 - imagsign = ' ' - i = 0 - # ignore whitespace at beginning and end - while i < slen and s[i] == ' ': - i += 1 - while slen > 0 and s[slen-1] == ' ': - slen -= 1 - - if s[i] == '(' and s[slen-1] == ')': - i += 1 - slen -= 1 - # ignore whitespace after bracket - while i < slen and s[i] == ' ': - i += 1 - - # extract first number - realstart = i - pc = s[i] - while i < slen and s[i] != ' ': - if s[i] in ('+','-') and pc not in ('e','E') and i != realstart: - break - pc = s[i] - i += 1 - - realstop = i - - # ignore whitespace - while i < slen and s[i] == ' ': - i += 1 - - # return appropriate strings is only one number is there - if i >= slen: - newstop = realstop - 1 - if newstop < 0: - raise ValueError - if s[newstop] in ('j', 'J'): - if realstart == newstop: - imagpart = '1.0' - elif realstart == newstop-1 and s[realstart] == '+': - imagpart = '1.0' - elif realstart == newstop-1 and s[realstart] == '-': - imagpart = '-1.0' - else: - imagpart = s[realstart:newstop] - return '0.0', imagpart - else: - return s[realstart:realstop], '0.0' - - # find sign for imaginary part - if s[i] == '-' or s[i] == '+': - imagsign = s[i] - if imagsign == ' ': - raise ValueError - - i+=1 - # whitespace - while i < slen and s[i] == ' ': - i += 1 - if i >= slen: - raise ValueError - - imagstart = i - pc = s[i] - while i < slen and s[i] != ' ': - if s[i] in ('+','-') and pc not in ('e','E'): - break - pc = s[i] - i += 1 - - imagstop = i - 1 - if imagstop < 0: - raise ValueError - if s[imagstop] not in ('j','J'): - raise ValueError - if imagstop < imagstart: - raise ValueError - - while i complex number - -Create a complex number from a real part and an optional imaginary part. -This is equivalent to (real + imag*1j) where imag defaults to 0.""", - __new__ = interp2app(descr__new__), - __getnewargs__ = interp2app(descr___getnewargs__), - real = complexwprop('realval'), - imag = complexwprop('imagval'), - ) - -complex_typedef.registermethods(globals()) diff --git a/pypy/objspace/std/model.py b/pypy/objspace/std/model.py --- a/pypy/objspace/std/model.py +++ b/pypy/objspace/std/model.py @@ -33,7 +33,6 @@ class result: from pypy.objspace.std.objecttype import object_typedef from pypy.objspace.std.floattype import float_typedef - from pypy.objspace.std.complextype import complex_typedef from pypy.objspace.std.typeobject import type_typedef from pypy.objspace.std.slicetype import slice_typedef from pypy.objspace.std.nonetype import none_typedef @@ -58,6 +57,7 @@ from pypy.objspace.std import typeobject from pypy.objspace.std import sliceobject from pypy.objspace.std import longobject + from pypy.objspace.std import complexobject from pypy.objspace.std import noneobject from pypy.objspace.std import iterobject from pypy.objspace.std import unicodeobject @@ -82,6 +82,7 @@ self.pythontypes.append(intobject.W_IntObject.typedef) self.pythontypes.append(boolobject.W_BoolObject.typedef) self.pythontypes.append(longobject.W_LongObject.typedef) + self.pythontypes.append(complexobject.W_ComplexObject.typedef) # the set of implementation types self.typeorder = { diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -270,7 +270,7 @@ return W_ComplexObject(realval, imagval) def unpackcomplex(self, w_complex): - from pypy.objspace.std.complextype import unpackcomplex + from pypy.objspace.std.complexobject import unpackcomplex return unpackcomplex(self, w_complex) def newlong(self, val): # val is an int diff --git a/pypy/objspace/std/test/test_complexobject.py b/pypy/objspace/std/test/test_complexobject.py --- a/pypy/objspace/std/test/test_complexobject.py +++ b/pypy/objspace/std/test/test_complexobject.py @@ -1,7 +1,6 @@ import py from pypy.objspace.std.complexobject import W_ComplexObject, \ - pow__Complex_Complex_ANY -from pypy.objspace.std import complextype as cobjtype + pow__Complex_Complex_ANY, _split_complex from pypy.objspace.std.multimethod import FailedToImplement from pypy.objspace.std import StdObjSpace @@ -21,7 +20,7 @@ _t_complex(r,i) def test_parse_complex(self): - f = cobjtype._split_complex + f = _split_complex def test_cparse(cnum, realnum, imagnum): result = f(cnum) assert len(result) == 2 From noreply at buildbot.pypy.org Sun Feb 23 22:16:39 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Sun, 23 Feb 2014 22:16:39 +0100 (CET) Subject: [pypy-commit] pypy remove-remaining-smm: IN-PROGESS: Kill binary SMMs of complex. Message-ID: <20140223211639.B4D0D1C02EA@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: remove-remaining-smm Changeset: r69300:e8346b7b8d72 Date: 2014-02-23 22:15 +0100 http://bitbucket.org/pypy/pypy/changeset/e8346b7b8d72/ Log: IN-PROGESS: Kill binary SMMs of complex. diff --git a/pypy/objspace/std/complexobject.py b/pypy/objspace/std/complexobject.py --- a/pypy/objspace/std/complexobject.py +++ b/pypy/objspace/std/complexobject.py @@ -343,23 +343,95 @@ """(A+Bj).conjugate() -> A-Bj""" return space.newcomplex(self.realval, -self.imagval) + def descr_add(self, space, w_rhs): + w_rhs = to_complex(space, w_rhs) + return W_ComplexObject(self.realval + w_rhs.realval, + self.imagval + w_rhs.imagval) + + def descr_radd(self, space, w_lhs): + w_lhs = to_complex(space, w_lhs) + return W_ComplexObject(w_lhs.realval + self.realval, + w_lhs.imagval + self.imagval) + + def descr_sub(self, space, w_rhs): + w_rhs = to_complex(space, w_rhs) + return W_ComplexObject(self.realval - w_rhs.realval, + self.imagval - w_rhs.imagval) + + def descr_rsub(self, space, w_lhs): + w_lhs = to_complex(space, w_lhs) + return W_ComplexObject(w_lhs.realval - self.realval, + w_lhs.imagval - self.imagval) + + def descr_mul(self, space, w_rhs): + w_rhs = to_complex(space, w_rhs) + return self.mul(w_rhs) + + def descr_truediv(self, space, w_rhs): + w_rhs = to_complex(space, w_rhs) + try: + return self.div(w_rhs) + except ZeroDivisionError, e: + raise OperationError(space.w_ZeroDivisionError, space.wrap(str(e))) + + def descr_floordiv(self, space, w_rhs): + w_rhs = to_complex(space, w_rhs) + w_rhs = to_complex(space, w_rhs) + # don't care about the slight slowdown you get from using divmod + try: + return self.divmod(space, w_rhs)[0] + except ZeroDivisionError, e: + raise OperationError(space.w_ZeroDivisionError, space.wrap(str(e))) + + def descr_mod(self, space, w_rhs): + w_rhs = to_complex(space, w_rhs) + try: + return self.divmod(space, w_rhs)[1] + except ZeroDivisionError, e: + raise OperationError(space.w_ZeroDivisionError, space.wrap(str(e))) + + def descr_divmod(self, space, w_rhs): + w_rhs = to_complex(space, w_rhs) + try: + div, mod = self.divmod(space, w_rhs) + except ZeroDivisionError, e: + raise OperationError(space.w_ZeroDivisionError, space.wrap(str(e))) + return space.newtuple([div, mod]) + + @unwrap_spec(w_third_arg=WrappedDefault(None)) + def descr_pow(self, space, w_exponent, w_third_arg): + w_exponent = to_complex(space, w_exponent) + if not space.is_w(w_third_arg, space.w_None): + raise OperationError(space.w_ValueError, space.wrap('complex modulo')) + try: + r = w_exponent.realval + if w_exponent.imagval == 0.0 and -100.0 <= r <= 100.0 and r == int(r): + w_p = self.pow_small_int(int(r)) + else: + w_p = self.pow(w_exponent) + except ZeroDivisionError: + raise OperationError(space.w_ZeroDivisionError, space.wrap("0.0 to a negative or complex power")) + except OverflowError: + raise OperationError(space.w_OverflowError, space.wrap("complex exponentiation")) + return w_p + registerimplementation(W_ComplexObject) w_one = W_ComplexObject(1, 0) -def delegate_Bool2Complex(space, w_bool): - return W_ComplexObject(w_bool.intval, 0.0) - -def delegate_Int2Complex(space, w_int): - return W_ComplexObject(w_int.intval, 0.0) - -def delegate_Long2Complex(space, w_long): - dval = w_long.tofloat(space) - return W_ComplexObject(dval, 0.0) - -def delegate_Float2Complex(space, w_float): - return W_ComplexObject(w_float.floatval, 0.0) +def to_complex(space, w_obj): + if isinstance(w_obj, W_ComplexObject): + return w_obj + if space.isinstance_w(w_obj, space.w_bool): + return W_ComplexObject(w_obj.intval, 0.0) + if space.isinstance_w(w_obj, space.w_int): + return W_ComplexObject(w_obj.intval, 0.0) + if space.isinstance_w(w_obj, space.w_long): + dval = w_obj.tofloat(space) + return W_ComplexObject(dval, 0.0) + if space.isinstance_w(w_obj, space.w_float): + return W_ComplexObject(w_obj.floatval, 0.0) def hash__Complex(space, w_value): hashreal = _hash_float(space, w_value.realval) @@ -367,60 +439,6 @@ combined = intmask(hashreal + 1000003 * hashimg) return space.newint(combined) -def add__Complex_Complex(space, w_complex1, w_complex2): - return W_ComplexObject(w_complex1.realval + w_complex2.realval, - w_complex1.imagval + w_complex2.imagval) - -def sub__Complex_Complex(space, w_complex1, w_complex2): - return W_ComplexObject(w_complex1.realval - w_complex2.realval, - w_complex1.imagval - w_complex2.imagval) - -def mul__Complex_Complex(space, w_complex1, w_complex2): - return w_complex1.mul(w_complex2) - -def div__Complex_Complex(space, w_complex1, w_complex2): - try: - return w_complex1.div(w_complex2) - except ZeroDivisionError, e: - raise OperationError(space.w_ZeroDivisionError, space.wrap(str(e))) - -truediv__Complex_Complex = div__Complex_Complex - -def mod__Complex_Complex(space, w_complex1, w_complex2): - try: - return w_complex1.divmod(space, w_complex2)[1] - except ZeroDivisionError, e: - raise OperationError(space.w_ZeroDivisionError, space.wrap(str(e))) - -def divmod__Complex_Complex(space, w_complex1, w_complex2): - try: - div, mod = w_complex1.divmod(space, w_complex2) - except ZeroDivisionError, e: - raise OperationError(space.w_ZeroDivisionError, space.wrap(str(e))) - return space.newtuple([div, mod]) - -def floordiv__Complex_Complex(space, w_complex1, w_complex2): - # don't care about the slight slowdown you get from using divmod - try: - return w_complex1.divmod(space, w_complex2)[0] - except ZeroDivisionError, e: - raise OperationError(space.w_ZeroDivisionError, space.wrap(str(e))) - -def pow__Complex_Complex_ANY(space, w_complex, w_exponent, thirdArg): - if not space.is_w(thirdArg, space.w_None): - raise OperationError(space.w_ValueError, space.wrap('complex modulo')) - try: - r = w_exponent.realval - if w_exponent.imagval == 0.0 and -100.0 <= r <= 100.0 and r == int(r): - w_p = w_complex.pow_small_int(int(r)) - else: - w_p = w_complex.pow(w_exponent) - except ZeroDivisionError: - raise OperationError(space.w_ZeroDivisionError, space.wrap("0.0 to a negative or complex power")) - except OverflowError: - raise OperationError(space.w_OverflowError, space.wrap("complex exponentiation")) - return w_p - def neg__Complex(space, w_complex): return W_ComplexObject(-w_complex.realval, -w_complex.imagval) @@ -473,6 +491,7 @@ (w_complex.imagval != 0.0)) def coerce__Complex_Complex(space, w_complex1, w_complex2): + #w_complex2 = to_complex(w_complex2) return space.newtuple([w_complex1, w_complex2]) def float__Complex(space, w_complex): @@ -532,7 +551,19 @@ __getnewargs__ = interp2app(W_ComplexObject.descr___getnewargs__), real = complexwprop('realval'), imag = complexwprop('imagval'), - conjugate = interp2app(W_ComplexObject.descr_conjugate) + conjugate = interp2app(W_ComplexObject.descr_conjugate), + + __add__ = interp2app(W_ComplexObject.descr_add), + __radd__ = interp2app(W_ComplexObject.descr_radd), + __sub__ = interp2app(W_ComplexObject.descr_sub), + __rsub__ = interp2app(W_ComplexObject.descr_rsub), + __mul__ = interp2app(W_ComplexObject.descr_mul), + __div__ = interp2app(W_ComplexObject.descr_truediv), + __truediv__ = interp2app(W_ComplexObject.descr_truediv), + __floordiv__ = interp2app(W_ComplexObject.descr_floordiv), + __mod__ = interp2app(W_ComplexObject.descr_mod), + __divmod__ = interp2app(W_ComplexObject.descr_divmod), + __pow__ = interp2app(W_ComplexObject.descr_pow), ) W_ComplexObject.typedef.registermethods(globals()) diff --git a/pypy/objspace/std/model.py b/pypy/objspace/std/model.py --- a/pypy/objspace/std/model.py +++ b/pypy/objspace/std/model.py @@ -136,11 +136,9 @@ self.typeorder[boolobject.W_BoolObject] += [ (floatobject.W_FloatObject, floatobject.delegate_Bool2Float), - (complexobject.W_ComplexObject, complexobject.delegate_Bool2Complex), ] self.typeorder[intobject.W_IntObject] += [ (floatobject.W_FloatObject, floatobject.delegate_Int2Float), - (complexobject.W_ComplexObject, complexobject.delegate_Int2Complex), ] if config.objspace.std.withsmalllong: from pypy.objspace.std import smalllongobject @@ -150,12 +148,6 @@ ] self.typeorder[longobject.W_LongObject] += [ (floatobject.W_FloatObject, floatobject.delegate_Long2Float), - (complexobject.W_ComplexObject, - complexobject.delegate_Long2Complex), - ] - self.typeorder[floatobject.W_FloatObject] += [ - (complexobject.W_ComplexObject, - complexobject.delegate_Float2Complex), ] if config.objspace.std.withstrbuf: diff --git a/pypy/objspace/std/test/test_complexobject.py b/pypy/objspace/std/test/test_complexobject.py --- a/pypy/objspace/std/test/test_complexobject.py +++ b/pypy/objspace/std/test/test_complexobject.py @@ -1,6 +1,5 @@ import py -from pypy.objspace.std.complexobject import W_ComplexObject, \ - pow__Complex_Complex_ANY, _split_complex +from pypy.objspace.std.complexobject import W_ComplexObject, _split_complex from pypy.objspace.std.multimethod import FailedToImplement from pypy.objspace.std import StdObjSpace @@ -75,7 +74,7 @@ assert _powi((0.0,1.0),2) == (-1.0,0.0) c = W_ComplexObject(0.0,1.0) p = W_ComplexObject(2.0,0.0) - r = pow__Complex_Complex_ANY(self.space,c,p,self.space.wrap(None)) + r = c.descr_pow(self.space, p, self.space.wrap(None)) assert r.realval == -1.0 assert r.imagval == 0.0 From noreply at buildbot.pypy.org Sun Feb 23 22:21:14 2014 From: noreply at buildbot.pypy.org (arigo) Date: Sun, 23 Feb 2014 22:21:14 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: In-progress: another refactoring simplifying various things, after discovering that we Message-ID: <20140223212114.91B8E1C02EA@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r817:8ee070f3575c Date: 2014-02-23 22:21 +0100 http://bitbucket.org/pypy/stmgc/changeset/8ee070f3575c/ Log: In-progress: another refactoring simplifying various things, after discovering that we can after all run a minor collection after each transaction... At least running about 30'000 opcodes in PyPy consumes typically 400 KB of nursery, and lowering the nursery size from 4 MB down to 400 KB has only a 1.5% performance impact. diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -12,6 +12,9 @@ void _stm_write_slowpath(object_t *obj) { assert(_running_transaction()); + assert(!_is_in_nursery(obj)); + abort();//... +#if 0 /* for old objects from the same transaction, we are done now */ if (obj_from_same_transaction(obj)) { @@ -51,9 +54,10 @@ /* claim the write-lock for this object */ retry:; - uintptr_t lock_idx = (((uintptr_t)obj) >> 4) - READMARKER_START; + uintptr_t lock_idx = (((uintptr_t)obj) >> 4) - WRITELOCK_START; uint8_t lock_num = STM_PSEGMENT->write_lock_num; uint8_t prev_owner; + assert((intptr_t)lock_idx >= 0); prev_owner = __sync_val_compare_and_swap(&write_locks[lock_idx], 0, lock_num); @@ -73,6 +77,7 @@ assert(!(obj->stm_flags & GCFLAG_WRITE_BARRIER_CALLED)); obj->stm_flags |= GCFLAG_WRITE_BARRIER_CALLED; LIST_APPEND(STM_PSEGMENT->modified_objects, obj); +#endif } static void reset_transaction_read_version(void) @@ -137,13 +142,11 @@ reset_transaction_read_version(); } - STM_PSEGMENT->min_read_version_outside_nursery = - STM_SEGMENT->transaction_read_version; + assert(list_is_empty(STM_PSEGMENT->modified_old_objects)); - assert(list_is_empty(STM_PSEGMENT->modified_objects)); - assert(list_is_empty(STM_PSEGMENT->creation_markers)); - - align_nursery_at_transaction_start(); +#ifdef STM_TESTS + check_nursery_at_transaction_start(); +#endif } @@ -158,8 +161,6 @@ long remote_num = 1 - STM_SEGMENT->segment_num; char *remote_base = get_segment_base(remote_num); uint8_t remote_version = get_segment(remote_num)->transaction_read_version; - uint8_t remote_min_outside_nursery = - get_priv_segment(remote_num)->min_read_version_outside_nursery; switch (get_priv_segment(remote_num)->transaction_state) { case TS_NONE: @@ -169,11 +170,10 @@ } LIST_FOREACH_R( - STM_PSEGMENT->modified_objects, + STM_PSEGMENT->modified_old_objects, object_t * /*item*/, ({ - if (was_read_remote(remote_base, item, remote_version, - remote_min_outside_nursery)) { + if (was_read_remote(remote_base, item, remote_version)) { /* A write-read conflict! */ contention_management(remote_num, false); @@ -196,25 +196,24 @@ get_priv_segment(remote_num)->transaction_state == TS_INEVITABLE); LIST_FOREACH_R( - STM_PSEGMENT->modified_objects, + STM_PSEGMENT->modified_old_objects, object_t * /*item*/, ({ if (remote_active) { assert(!was_read_remote(remote_base, item, - get_segment(remote_num)->transaction_read_version, - get_priv_segment(remote_num)-> - min_read_version_outside_nursery)); + get_segment(remote_num)->transaction_read_version)); } /* clear the write-lock (note that this runs with all other threads paused, so no need to be careful about ordering) */ - uintptr_t lock_idx = (((uintptr_t)item) >> 4) - READMARKER_START; + uintptr_t lock_idx = (((uintptr_t)item) >> 4) - WRITELOCK_START; + assert((intptr_t)lock_idx >= 0); assert(write_locks[lock_idx] == STM_PSEGMENT->write_lock_num); write_locks[lock_idx] = 0; - /* remove again the WRITE_BARRIER_CALLED flag */ - assert(item->stm_flags & GCFLAG_WRITE_BARRIER_CALLED); - item->stm_flags &= ~GCFLAG_WRITE_BARRIER_CALLED; + /* set again the WRITE_BARRIER flag */ + assert((item->stm_flags & GCFLAG_WRITE_BARRIER) == 0); + item->stm_flags |= GCFLAG_WRITE_BARRIER; /* copy the modified object to the other segment */ char *src = REAL_ADDRESS(local_base, item); @@ -223,7 +222,7 @@ memcpy(dst, src, size); })); - list_clear(STM_PSEGMENT->modified_objects); + list_clear(STM_PSEGMENT->modified_old_objects); } static void _finish_transaction(void) @@ -232,29 +231,23 @@ release_thread_segment(tl); STM_PSEGMENT->safe_point = SP_NO_TRANSACTION; STM_PSEGMENT->transaction_state = TS_NONE; - list_clear(STM_PSEGMENT->old_objects_pointing_to_young); + if (STM_PSEGMENT->overflow_objects_pointing_to_nursery != NULL) { + list_free(STM_PSEGMENT->overflow_objects_pointing_to_nursery); + STM_PSEGMENT->overflow_objects_pointing_to_nursery = NULL; + } } void stm_commit_transaction(void) { + minor_collection(); + mutex_lock(); assert(STM_PSEGMENT->safe_point = SP_RUNNING); STM_PSEGMENT->safe_point = SP_SAFE_POINT_CAN_COLLECT; restart: - switch (STM_PSEGMENT->transaction_state) { - - case TS_REGULAR: - case TS_INEVITABLE: - break; - - case TS_MUST_ABORT: - abort_with_mutex(); - - default: - assert(!"commit: bad transaction_state"); - } + abort_if_needed(); /* wait until the other thread is at a safe-point */ if (!try_wait_for_other_safe_points(SP_SAFE_POINT_CANNOT_COLLECT)) @@ -275,10 +268,11 @@ /* copy modified object versions to other threads */ push_modified_to_other_segments(); - /* reset the creation markers, and if necessary (i.e. if the page the - data is on is not SHARED) copy the data to other threads. The - hope is that it's rarely necessary. */ - reset_all_creation_markers_and_push_created_data(); + /* update 'overflow_number' if needed */ + if (STM_PSEGMENT->overflow_objects_pointing_to_nursery != NULL) { + highest_overflow_number += GCFLAG_OVERFLOW_NUMBER_bit0; + STM_PSEGMENT->overflow_number = highest_overflow_number; + } /* done */ _finish_transaction(); @@ -297,6 +291,8 @@ static void reset_modified_from_other_segments(void) { + abort();//... +#if 0 /* pull the right versions from other threads in order to reset our pages as part of an abort */ long remote_num = 1 - STM_SEGMENT->segment_num; @@ -304,7 +300,7 @@ char *remote_base = get_segment_base(remote_num); LIST_FOREACH_R( - STM_PSEGMENT->modified_objects, + STM_PSEGMENT->modified_old_objects, object_t * /*item*/, ({ /* all objects in 'modified_objects' have this flag */ @@ -331,12 +327,14 @@ write_fence(); /* clear the write-lock */ - uintptr_t lock_idx = (((uintptr_t)item) >> 4) - READMARKER_START; + uintptr_t lock_idx = (((uintptr_t)item) >> 4) - WRITELOCK_START; + assert((intptr_t)lock_idx >= 0); assert(write_locks[lock_idx]); write_locks[lock_idx] = 0; })); list_clear(STM_PSEGMENT->modified_objects); +#endif } static void abort_with_mutex(void) @@ -356,8 +354,6 @@ /* reset all the modified objects (incl. re-adding GCFLAG_WRITE_BARRIER) */ reset_modified_from_other_segments(); - reset_all_creation_markers(); - stm_jmpbuf_t *jmpbuf_ptr = STM_SEGMENT->jmpbuf_ptr; stm_thread_local_t *tl = STM_SEGMENT->running_thread; tl->shadowstack = STM_PSEGMENT->shadowstack_at_start_of_transaction; diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -11,8 +11,8 @@ #define NB_PAGES (1500*256) // 1500MB #define NB_SEGMENTS 2 +#define NB_SEGMENTS_MAX 240 /* don't increase NB_SEGMENTS past this */ #define MAP_PAGES_FLAGS (MAP_SHARED | MAP_ANONYMOUS | MAP_NORESERVE) -#define LARGE_OBJECT_WORDS 36 #define NB_NURSERY_PAGES 1024 // 4MB #define TOTAL_MEMORY (NB_PAGES * 4096UL * NB_SEGMENTS) @@ -25,23 +25,31 @@ #define FIRST_READMARKER_PAGE (READMARKER_START / 4096UL) #define NB_READMARKER_PAGES (FIRST_OBJECT_PAGE - FIRST_READMARKER_PAGE) -#define CREATMARKER_START ((FIRST_OBJECT_PAGE * 4096UL) >> 8) -#define FIRST_CREATMARKER_PAGE (CREATMARKER_START / 4096UL) +#define WRITELOCK_START ((END_NURSERY_PAGE * 4096UL) >> 4) +#define WRITELOCK_END READMARKER_END -enum { - /* this flag is not set on most objects. when stm_write() is called - on an object that is not from the current transaction, then - _stm_write_slowpath() is called, and then the flag is set to - say "called once already, no need to call again". */ - GCFLAG_WRITE_BARRIER_CALLED = _STM_GCFLAG_WRITE_BARRIER_CALLED, - /* allocated by gcpage.c in uniformly-sized pages of small objects */ +enum /* stm_flags */ { + /* This flag is set on non-nursery objects. It forces stm_write() + to call _stm_write_slowpath(). + */ + GCFLAG_WRITE_BARRIER = _STM_GCFLAG_WRITE_BARRIER, + + /* This flag is set by gcpage.c for all objects living in + uniformly-sized pages of small objects. + */ GCFLAG_SMALL_UNIFORM = 0x02, + + /* All remaining bits of the 32-bit 'stm_flags' field are taken by + the "overflow number". This is a number that identifies the + "overflow objects" from the current transaction among all old + objects. More precisely, overflow objects are objects from the + current transaction that have been flushed out of the nursery, + which occurs if the same transaction allocates too many objects. + */ + GCFLAG_OVERFLOW_NUMBER_bit0 = 0x04 /* must be last */ }; -#define CROSS_PAGE_BOUNDARY(start, stop) \ - (((uintptr_t)(start)) / 4096UL != ((uintptr_t)(stop)) / 4096UL) - /************************************************************/ @@ -52,44 +60,61 @@ struct stm_priv_segment_info_s { struct stm_segment_info_s pub; - struct list_s *old_objects_pointing_to_young; - struct list_s *modified_objects; - struct list_s *creation_markers; + + /* List of overflowed objects (from the same transaction but outside + the nursery) on which the write-barrier was triggered, so that + they likely contain a pointer to a nursery object */ + struct list_s *overflow_objects_pointing_to_nursery; + + /* List of old objects (older than the current transaction) that the + current transaction attempts to modify */ + struct list_s *modified_old_objects; + + /* Start time: to know approximately for how long a transaction has + been running, in contention management */ uint64_t start_time; + + /* This is the number stored in the overflowed objects (a multiple of + GCFLAG_OVERFLOW_NUMBER_bit0). It is incremented when the + transaction is done, but only if we actually overflowed any + object; otherwise, no object has got this number. */ + uint32_t overflow_number; + + /* The marker stored in the global 'write_locks' array to mean + "this segment has modified this old object". */ uint8_t write_lock_num; - uint8_t safe_point; /* one of the SP_xxx constants */ - uint8_t transaction_state; /* one of the TS_xxx constants */ - uint8_t min_read_version_outside_nursery; /* see was_read_remote() */ - uintptr_t real_nursery_section_end; + + /* The thread's safe-point state, one of the SP_xxx constants */ + uint8_t safe_point; + + /* The transaction status, one of the TS_xxx constants */ + uint8_t transaction_state; + + /* In case of abort, we restore the 'shadowstack' field. */ object_t **shadowstack_at_start_of_transaction; }; -enum { +enum /* safe_point */ { SP_NO_TRANSACTION=0, SP_RUNNING, SP_SAFE_POINT_CANNOT_COLLECT, SP_SAFE_POINT_CAN_COLLECT, }; -enum { +enum /* transaction_state */ { TS_NONE=0, TS_REGULAR, TS_INEVITABLE, TS_MUST_ABORT, }; -enum { /* for stm_creation_marker_t */ - CM_NOT_CURRENT_TRANSACTION = 0x00, - CM_CURRENT_TRANSACTION_OUTSIDE_NURSERY = 0x01, - CM_CURRENT_TRANSACTION_IN_NURSERY = 0xff, -}; static char *stm_object_pages; -static stm_thread_local_t *stm_thread_locals = NULL; +static stm_thread_local_t *stm_all_thread_locals = NULL; #ifdef STM_TESTS static char *stm_other_pages; #endif -static uint8_t write_locks[READMARKER_END - READMARKER_START]; +static uint8_t write_locks[WRITELOCK_END - WRITELOCK_START]; #define REAL_ADDRESS(segment_base, src) ((segment_base) + (uintptr_t)(src)) @@ -113,14 +138,18 @@ static bool _is_tl_registered(stm_thread_local_t *tl); static bool _running_transaction(void); -static inline bool obj_from_same_transaction(object_t *obj) { - return ((stm_creation_marker_t *)(((uintptr_t)obj) >> 8))->cm != - CM_NOT_CURRENT_TRANSACTION; -} - static void teardown_core(void); static void abort_with_mutex(void) __attribute__((noreturn)); +static inline bool was_read_remote(char *base, object_t *obj, + uint8_t other_transaction_read_version) +{ + uint8_t rm = ((struct stm_read_marker_s *) + (base + (((uintptr_t)obj) >> 4)))->rm; + assert(rm <= other_transaction_read_version); + return rm == other_transaction_read_version; +} + static inline void _duck(void) { /* put a call to _duck() between two instructions that set 0 into a %gs-prefixed address and that may otherwise be replaced with @@ -129,3 +158,17 @@ llvmfix/no-memset-creation-with-addrspace.diff. */ asm("/* workaround for llvm bug */"); } + +static inline void abort_if_needed(void) { + switch (STM_PSEGMENT->transaction_state) { + case TS_REGULAR: + case TS_INEVITABLE: + break; + + case TS_MUST_ABORT: + abort_with_mutex(); + + default: + assert(!"commit: bad transaction_state"); + } +} diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -19,8 +19,7 @@ static void teardown_gcpage(void) { - memset(small_alloc_shared, 0, sizeof(small_alloc_shared)); - memset(small_alloc_privtz, 0, sizeof(small_alloc_privtz)); + memset(small_alloc, 0, sizeof(small_alloc)); free_uniform_pages = NULL; } @@ -56,11 +55,10 @@ return; out_of_memory: - stm_fatalerror("out of memory!\n"); + stm_fatalerror("out of memory!\n"); /* XXX */ } -static char *_allocate_small_slowpath( - struct small_alloc_s small_alloc[], uint64_t size) +static char *_allocate_small_slowpath(uint64_t size) { /* not thread-safe! Use only when holding the mutex */ assert(_has_mutex()); @@ -72,8 +70,10 @@ } +#if 0 static char *allocate_outside_nursery_large(uint64_t size) { + abort(); //XXX review /* not thread-safe! Use only when holding the mutex */ assert(_has_mutex()); @@ -92,11 +92,9 @@ setup_N_pages(uninitialized_page_start, npages); uninitialized_page_start += npages * 4096UL; } - - assert(get_single_creation_marker((stm_char *)(addr - stm_object_pages)) - == 0); return addr; } +#endif object_t *_stm_allocate_old(ssize_t size_rounded_up) { diff --git a/c7/stm/gcpage.h b/c7/stm/gcpage.h --- a/c7/stm/gcpage.h +++ b/c7/stm/gcpage.h @@ -22,35 +22,17 @@ contiguous range of unallocated objs */ }; -/* For each small request size, we have three independent chained lists - of address ranges: - - - 'small_alloc_shared': ranges are within pages that are likely to be - shared. We don't know for sure, because pages can be privatized - by normal run of stm_write(). - - - 'small_alloc_sh_old': moved from 'small_alloc_shared' when we're - looking for a range with the creation_marker set; this collects - the unsuitable ranges, i.e. the ones with already at least one - object and no creation marker. - - - 'small_alloc_privtz': ranges are within pages that are privatized. -*/ -static struct small_alloc_s small_alloc_shared[GC_N_SMALL_REQUESTS]; -static struct small_alloc_s small_alloc_sh_old[GC_N_SMALL_REQUESTS]; -static struct small_alloc_s small_alloc_privtz[GC_N_SMALL_REQUESTS]; +static struct small_alloc_s small_alloc[GC_N_SMALL_REQUESTS]; static char *free_uniform_pages; static void setup_gcpage(void); static void teardown_gcpage(void); -static char *allocate_outside_nursery_large(uint64_t size); +//static char *allocate_outside_nursery_large(uint64_t size); -static char *_allocate_small_slowpath( - struct small_alloc_s small_alloc[], uint64_t size); +static char *_allocate_small_slowpath(uint64_t size); -static inline char *allocate_outside_nursery_small( - struct small_alloc_s small_alloc[], uint64_t size) +static inline char *allocate_outside_nursery_small(uint64_t size) { uint64_t index = size / 8; OPT_ASSERT(2 <= index); @@ -58,7 +40,7 @@ char *result = small_alloc[index].next_object; if (result == NULL) - return _allocate_small_slowpath(small_alloc, size); + return _allocate_small_slowpath(size); char *following; if (small_alloc[index].range_last == result) { diff --git a/c7/stm/misc.c b/c7/stm/misc.c --- a/c7/stm/misc.c +++ b/c7/stm/misc.c @@ -38,35 +38,28 @@ bool _stm_was_read(object_t *obj) { return was_read_remote(STM_SEGMENT->segment_base, obj, - STM_SEGMENT->transaction_read_version, - STM_PSEGMENT->min_read_version_outside_nursery); + STM_SEGMENT->transaction_read_version); } bool _stm_was_written(object_t *obj) { - return !!((((stm_creation_marker_t *)(((uintptr_t)obj) >> 8))->cm | - obj->stm_flags) & _STM_GCFLAG_WRITE_BARRIER_CALLED); -} - -uint8_t _stm_creation_marker(object_t *obj) -{ - return ((stm_creation_marker_t *)(((uintptr_t)obj) >> 8))->cm; + return (obj->stm_flags & _STM_GCFLAG_WRITE_BARRIER) == 0; } #ifdef STM_TESTS -object_t *_stm_enum_old_objects_pointing_to_young(void) +object_t *_stm_enum_overflow_objects_pointing_to_nursery(void) { static long index = 0; - struct list_s *lst = STM_PSEGMENT->old_objects_pointing_to_young; + struct list_s *lst = STM_PSEGMENT->overflow_objects_pointing_to_nursery; if (index < list_count(lst)) return (object_t *)list_item(lst, index++); index = 0; return (object_t *)-1; } -object_t *_stm_enum_modified_objects(void) +object_t *_stm_enum_modified_old_objects(void) { static long index = 0; - struct list_s *lst = STM_PSEGMENT->modified_objects; + struct list_s *lst = STM_PSEGMENT->modified_old_objects; if (index < list_count(lst)) return (object_t *)list_item(lst, index++); index = 0; diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -4,61 +4,38 @@ /************************************************************/ +/* xxx later: divide the nursery into sections, and zero them + incrementally. For now we avoid the mess of maintaining a + description of which parts of the nursery are already zeroed + and which ones are not (caused by the fact that each + transaction fills up a different amount). +*/ + #define NURSERY_START (FIRST_NURSERY_PAGE * 4096UL) #define NURSERY_SIZE (NB_NURSERY_PAGES * 4096UL) +#define NURSERY_END (NURSERY_START + NURSERY_SIZE) -/* an object larger than LARGE_OBJECT will never be allocated in - the nursery. */ -#define LARGE_OBJECT (65*1024) - -/* the nursery is divided in "sections" this big. Each section is - allocated to a single running thread. */ -#define NURSERY_SECTION_SIZE (128*1024) - -/* if objects are larger than this limit but smaller than LARGE_OBJECT, - then they might be allocted outside sections but still in the nursery. */ -#define MEDIUM_OBJECT (6*1024) - -/* size in bytes of the "line". Should be equal to the line used by - stm_creation_marker_t. */ -#define NURSERY_LINE 256 - -/************************************************************/ - - -static union { - struct { - uint64_t used; /* number of bytes from the nursery used so far */ - uint64_t initial_value_of_used; - }; - char reserved[64]; -} nursery_ctl __attribute__((aligned(64))); - -static struct list_s *old_objects_pointing_to_young; +static uintptr_t _stm_nursery_start; +uintptr_t _stm_nursery_end; /************************************************************/ static void setup_nursery(void) { - assert(NURSERY_LINE == (1 << 8)); /* from stm_creation_marker_t */ - assert((NURSERY_SECTION_SIZE % NURSERY_LINE) == 0); - assert(MEDIUM_OBJECT < LARGE_OBJECT); - assert(LARGE_OBJECT < NURSERY_SECTION_SIZE); - nursery_ctl.used = 0; - old_objects_pointing_to_young = list_create(); + assert(_STM_FAST_ALLOC <= NURSERY_SIZE); + _stm_nursery_start = NURSERY_START; + _stm_nursery_end = NURSERY_END; } static void teardown_nursery(void) { - list_free(old_objects_pointing_to_young); - nursery_ctl.initial_value_of_used = 0; } static inline bool _is_in_nursery(object_t *obj) { assert((uintptr_t)obj >= NURSERY_START); - return (uintptr_t)obj < NURSERY_START + NURSERY_SIZE; + return (uintptr_t)obj < NURSERY_END; } bool _stm_in_nursery(object_t *obj) @@ -66,29 +43,12 @@ return _is_in_nursery(obj); } +#if 0 static bool _is_young(object_t *obj) { return _is_in_nursery(obj); /* for now */ } - -static inline bool was_read_remote(char *base, object_t *obj, - uint8_t other_transaction_read_version, - uint8_t min_read_version_outside_nursery) -{ - uint8_t rm = ((struct stm_read_marker_s *) - (base + (((uintptr_t)obj) >> 4)))->rm; - - assert(min_read_version_outside_nursery <= - other_transaction_read_version); - assert(rm <= other_transaction_read_version); - - if (_is_in_nursery(obj)) { - return rm == other_transaction_read_version; - } - else { - return rm >= min_read_version_outside_nursery; - } -} +#endif /************************************************************/ @@ -110,8 +70,10 @@ } } +#if 0 static void minor_trace_if_young(object_t **pobj) { + abort(); //... /* takes a normal pointer to a thread-local pointer to an object */ object_t *obj = *pobj; if (obj == NULL) @@ -145,7 +107,7 @@ which has a granularity of 256 bytes. */ size_t size = stmcb_size_rounded_up((struct object_s *)realobj); - uintptr_t lock_idx = (((uintptr_t)obj) >> 4) - READMARKER_START; + uintptr_t lock_idx = (((uintptr_t)obj) >> 4) - WRITELOCK_START; uint8_t write_lock = write_locks[lock_idx]; object_t *nobj; long i; @@ -183,7 +145,7 @@ uintptr_t lastpage= (dataofs + size - 1) / 4096UL; pages_privatize(pagenum, lastpage - pagenum + 1, false); - lock_idx = (dataofs >> 4) - READMARKER_START; + lock_idx = (dataofs >> 4) - WRITELOCK_START; assert(write_locks[lock_idx] == 0); write_locks[lock_idx] = write_lock; @@ -238,7 +200,7 @@ static void collect_roots_in_nursery(void) { - stm_thread_local_t *tl = stm_thread_locals; + stm_thread_local_t *tl = stm_all_thread_locals; do { object_t **current = tl->shadowstack; object_t **base = tl->shadowstack_base; @@ -246,7 +208,7 @@ minor_trace_if_young(current); } tl = tl->next; - } while (tl != stm_thread_locals); + } while (tl != stm_all_thread_locals); } static void trace_and_drag_out_of_nursery(object_t *obj) @@ -256,7 +218,7 @@ struct object_s *realobj = (struct object_s *)REAL_ADDRESS(get_segment_base(i), obj); - realobj->stm_flags &= ~GCFLAG_WRITE_BARRIER_CALLED; + realobj->stm_flags |= GCFLAG_WRITE_BARRIER; stmcb_trace((struct object_s *)realobj, &minor_trace_if_young); @@ -278,8 +240,9 @@ don't, it's because the same object was stored in several segment's old_objects_pointing_to_young. It's fine to ignore duplicates. */ - if ((obj->stm_flags & GCFLAG_WRITE_BARRIER_CALLED) == 0) - continue; + abort();//... + //if ((obj->stm_flags & GCFLAG_WRITE_BARRIER_CALLED) == 0) + // continue; /* The flag GCFLAG_WRITE_BARRIER_CALLED is going to be removed: no live object should have this flag set after a nursery @@ -294,13 +257,10 @@ static void reset_nursery(void) { + abort();//... /* reset the global amount-of-nursery-used-so-far */ nursery_ctl.used = nursery_ctl.initial_value_of_used; - /* reset the write locks */ - memset(write_locks + ((NURSERY_START >> 4) - READMARKER_START), - 0, NURSERY_SIZE >> 4); - long i; for (i = 0; i < NB_SEGMENTS; i++) { struct stm_priv_segment_info_s *other_pseg = get_priv_segment(i); @@ -321,9 +281,10 @@ } else if (other_pseg->pub.transaction_read_version < 0xff) { other_pseg->pub.transaction_read_version++; - assert(0 < other_pseg->min_read_version_outside_nursery && + abort();//... + /*assert(0 < other_pseg->min_read_version_outside_nursery && other_pseg->min_read_version_outside_nursery - < other_pseg->pub.transaction_read_version); + < other_pseg->pub.transaction_read_version);*/ } else { /* however, when the value 0xff is reached, we are stuck @@ -338,7 +299,7 @@ if (old_end > NURSERY_START) { char *creation_markers = REAL_ADDRESS(other_pseg->pub.segment_base, NURSERY_START >> 8); - assert(old_end <= NURSERY_START + NURSERY_SIZE); + assert(old_end <= NURSERY_END); memset(creation_markers, 0, (old_end - NURSERY_START) >> 8); } else { @@ -346,14 +307,17 @@ } } } +#endif -static void do_minor_collection(void) +static void minor_collection(void) { - /* all other threads are paused in safe points during the whole - minor collection */ + assert(!_has_mutex()); + abort_if_needed(); + dprintf(("minor_collection\n")); - assert(_has_mutex()); - assert(list_is_empty(old_objects_pointing_to_young)); + + abort();//... +#if 0 /* List of what we need to do and invariants we need to preserve ------------------------------------------------------------- @@ -390,142 +354,66 @@ reset_nursery(); pages_make_shared_again(FIRST_NURSERY_PAGE, NB_NURSERY_PAGES); +#endif } -static void restore_nursery_section_end(uintptr_t prev_value) -{ - __sync_bool_compare_and_swap(&STM_SEGMENT->v_nursery_section_end, - prev_value, - STM_PSEGMENT->real_nursery_section_end); -} - -static void stm_minor_collection(uint64_t request_size) -{ - /* Run a minor collection --- but only if we can't get 'request_size' - bytes out of the nursery; if we can, no-op. */ - mutex_lock(); - - assert(STM_PSEGMENT->safe_point == SP_RUNNING); - STM_PSEGMENT->safe_point = SP_SAFE_POINT_CAN_COLLECT; - - restart: - /* We just waited here, either from mutex_lock() or from cond_wait(), - so we should check again if another thread did the minor - collection itself */ - if (request_size <= NURSERY_SIZE - nursery_ctl.used) - goto exit; - - if (!try_wait_for_other_safe_points(SP_SAFE_POINT_CAN_COLLECT)) - goto restart; - - /* now we can run our minor collection */ - do_minor_collection(); - - exit: - STM_PSEGMENT->safe_point = SP_RUNNING; - - mutex_unlock(); -} - void stm_collect(long level) { assert(level == 0); - stm_minor_collection(-1); + minor_collection(); } /************************************************************/ -#define NURSERY_ALIGN(bytes) \ - (((bytes) + NURSERY_LINE - 1) & ~(NURSERY_LINE - 1)) -static stm_char *allocate_from_nursery(uint64_t bytes) -{ - /* may collect! */ - /* thread-safe; allocate a chunk of memory from the nursery */ - bytes = NURSERY_ALIGN(bytes); - while (1) { - uint64_t p = __sync_fetch_and_add(&nursery_ctl.used, bytes); - if (LIKELY(p + bytes <= NURSERY_SIZE)) { - return (stm_char *)(NURSERY_START + p); - } - - /* nursery full! */ - stm_minor_collection(bytes); - } -} - - -stm_char *_stm_allocate_slowpath(ssize_t size_rounded_up) +object_t *_stm_allocate_slowpath(ssize_t size_rounded_up) { /* may collect! */ STM_SEGMENT->nursery_current -= size_rounded_up; /* restore correct val */ - if (_stm_collectable_safe_point()) - return (stm_char *)stm_allocate(size_rounded_up); + restart: + stm_safe_point(); - if (size_rounded_up < MEDIUM_OBJECT) { - /* This is a small object. The current section is really full. - Allocate the next section and initialize it with zeroes. */ - stm_char *p = allocate_from_nursery(NURSERY_SECTION_SIZE); - STM_SEGMENT->nursery_current = p + size_rounded_up; + OPT_ASSERT(size_rounded_up >= 16); + OPT_ASSERT((size_rounded_up & 7) == 0); + OPT_ASSERT(size_rounded_up < _STM_FAST_ALLOC); - /* Set v_nursery_section_end, but carefully: another thread may - have forced it to be equal to NSE_SIGNAL. */ - uintptr_t end = (uintptr_t)p + NURSERY_SECTION_SIZE; - uintptr_t prev_end = STM_PSEGMENT->real_nursery_section_end; - STM_PSEGMENT->real_nursery_section_end = end; - restore_nursery_section_end(prev_end); - - memset(REAL_ADDRESS(STM_SEGMENT->segment_base, p), 0, - NURSERY_SECTION_SIZE); - - /* Also fill the corresponding creation markers with 0xff. */ - set_creation_markers(p, NURSERY_SECTION_SIZE, - CM_CURRENT_TRANSACTION_IN_NURSERY); - return p; + stm_char *p = STM_SEGMENT->nursery_current; + stm_char *end = p + size_rounded_up; + if ((uintptr_t)end <= NURSERY_END) { + STM_SEGMENT->nursery_current = end; + return (object_t *)p; } - if (size_rounded_up < LARGE_OBJECT) { - /* A medium-sized object that doesn't fit into the current - nursery section. Note that if by chance it does fit, then - _stm_allocate_slowpath() is not even called. This case here - is to prevent too much of the nursery to remain not used - just because we tried to allocate a medium-sized object: - doing so doesn't end the current section. */ - stm_char *p = allocate_from_nursery(size_rounded_up); - memset(REAL_ADDRESS(STM_SEGMENT->segment_base, p), 0, - size_rounded_up); - set_single_creation_marker(p, CM_CURRENT_TRANSACTION_IN_NURSERY); - return p; - } - - abort(); + minor_collection(); + goto restart; } -static void align_nursery_at_transaction_start(void) +object_t *_stm_allocate_external(ssize_t size_rounded_up) { - /* When the transaction starts, we must align the 'nursery_current' - and set creation markers for the part of the section the follows. - */ - uintptr_t c = (uintptr_t)STM_SEGMENT->nursery_current; - c = NURSERY_ALIGN(c); - STM_SEGMENT->nursery_current = (stm_char *)c; - - uint64_t size = STM_PSEGMENT->real_nursery_section_end - c; - if (size > 0) { - set_creation_markers((stm_char *)c, size, - CM_CURRENT_TRANSACTION_IN_NURSERY); - } + abort();//... } #ifdef STM_TESTS void _stm_set_nursery_free_count(uint64_t free_count) { - assert(free_count == NURSERY_ALIGN(free_count)); - assert(nursery_ctl.used <= NURSERY_SIZE - free_count); - nursery_ctl.used = NURSERY_SIZE - free_count; - nursery_ctl.initial_value_of_used = nursery_ctl.used; + assert(free_count <= NURSERY_SIZE); + _stm_nursery_start = NURSERY_END - free_count; + + long i; + for (i = 0; i < NB_SEGMENTS; i++) { + if ((uintptr_t)get_segment(i)->nursery_current < _stm_nursery_start) + get_segment(i)->nursery_current = (stm_char *)_stm_nursery_start; + } } #endif + +static void check_nursery_at_transaction_start(void) +{ + assert((uintptr_t)STM_SEGMENT->nursery_current == _stm_nursery_start); + uintptr_t i; + for (i = 0; i < _stm_nursery_end - _stm_nursery_start; i++) + assert(STM_SEGMENT->nursery_current[i] == 0); +} diff --git a/c7/stm/nursery.h b/c7/stm/nursery.h --- a/c7/stm/nursery.h +++ b/c7/stm/nursery.h @@ -1,40 +1,8 @@ -/* special values of 'v_nursery_section_end' */ -#define NSE_SIGNAL 1 -#define NSE_SIGNAL_DONE 2 +/* '_stm_nursery_section_end' is either NURSERY_END or NSE_SIGNAL */ +#define NSE_SIGNAL _STM_NSE_SIGNAL -#if _STM_NSE_SIGNAL != NSE_SIGNAL -# error "adapt _STM_NSE_SIGNAL" -#endif -/* Rules for 'v_nursery_section_end': +static uint32_t highest_overflow_number; - - Its main purpose is to be read by the owning thread in stm_allocate(). - - - The owning thread can change its value without acquiring the mutex, - but it must do so carefully, with a compare_and_swap. - - - If a different thread has the mutex, it can force the field to the - value NSE_SIGNAL or NSE_SIGNAL_DONE with a regular write. This should - not be hidden by the compare_and_swap done by the owning thread: - even if it occurs just before or just after a compare_and_swap, - the end result is that the special value NSE_SIGNAL(_DONE) is still - in the field. - - - When the owning thread sees NSE_SIGNAL, it must signal and wait until - the other thread restores the value to NSE_SIGNAL_DONE. When the - owning thread sees NSE_SIGNAL_DONE, it can replace it, again with - compare_and_swap, with the real value. - - - This should in theory be a volatile field, because it can be read - from stm_allocate() while at the same time being changed to the value - NSE_SIGNAL by another thread. In practice, making it volatile has - probably just a small negative impact on performance for no good reason. -*/ - -static void align_nursery_at_transaction_start(void); -static void restore_nursery_section_end(uintptr_t prev_value); - -static inline bool was_read_remote(char *base, object_t *obj, - uint8_t other_transaction_read_version, - uint8_t min_read_version_outside_nursery); +static void check_nursery_at_transaction_start(void) __attribute__((unused)); diff --git a/c7/stm/pagecopy.c b/c7/stm/pagecopy.c --- a/c7/stm/pagecopy.c +++ b/c7/stm/pagecopy.c @@ -28,11 +28,13 @@ } } +#if 0 static void pagecopy_256(void *dest, const void *src) { PAGECOPY_128(dest, src ); PAGECOPY_128(dest + 128, src + 128); } +#endif #if 0 /* XXX enable if detected on the cpu */ static void pagecopy_ymm8(void *dest, const void *src) diff --git a/c7/stm/pagecopy.h b/c7/stm/pagecopy.h --- a/c7/stm/pagecopy.h +++ b/c7/stm/pagecopy.h @@ -1,3 +1,2 @@ static void pagecopy(void *dest, const void *src); // 4096 bytes -static void pagecopy_256(void *dest, const void *src); // 256 bytes diff --git a/c7/stm/pages.c b/c7/stm/pages.c --- a/c7/stm/pages.c +++ b/c7/stm/pages.c @@ -23,6 +23,7 @@ flag_page_private[pagenum + i] = SHARED_PAGE; } +#if 0 static void pages_make_shared_again(uintptr_t pagenum, uintptr_t count) { /* Same as pages_initialize_shared(), but tries hard to minimize the @@ -42,6 +43,7 @@ pages_initialize_shared(pagenum + start, i - start); } } +#endif static void privatize_range_and_unlock(uintptr_t pagenum, uintptr_t count, bool full) @@ -128,102 +130,7 @@ } } -static void set_creation_markers(stm_char *p, uint64_t size, int newvalue) -{ - /* Set the creation markers to 'newvalue' for all lines from 'p' to - 'p+size'. Both p and size should be aligned to the line size: 256. */ - - assert((((uintptr_t)p) & 255) == 0); - assert((size & 255) == 0); - assert(size > 0); - - uintptr_t cmaddr = ((uintptr_t)p) >> 8; - LIST_APPEND(STM_PSEGMENT->creation_markers, cmaddr); - - char *addr = REAL_ADDRESS(STM_SEGMENT->segment_base, cmaddr); - memset(addr, newvalue, size >> 8); -} - -static uint8_t get_single_creation_marker(stm_char *p) -{ - uintptr_t cmaddr = ((uintptr_t)p) >> 8; - return ((stm_creation_marker_t *)cmaddr)->cm; -} - -static void set_single_creation_marker(stm_char *p, int newvalue) -{ - uintptr_t cmaddr = ((uintptr_t)p) >> 8; - ((stm_creation_marker_t *)cmaddr)->cm = newvalue; - LIST_APPEND(STM_PSEGMENT->creation_markers, cmaddr); -} - -static void reset_all_creation_markers(void) -{ - /* Note that the page 'NB_PAGES - 1' is not actually used. This - ensures that the creation markers always end with some zeroes. - We reset the markers 8 at a time, by writing null integers - until we reach a place that is already null. - */ - LIST_FOREACH_R( - STM_PSEGMENT->creation_markers, - uintptr_t /*item*/, - ({ - TLPREFIX uint64_t *p = (TLPREFIX uint64_t *)(item & ~7); - while (*p != 0) - *p++ = 0; - })); - - list_clear(STM_PSEGMENT->creation_markers); -} - -static void reset_all_creation_markers_and_push_created_data(void) -{ - /* This is like reset_all_creation_markers(), but additionally - it looks for markers in non-SHARED pages, and pushes the - corresponding data (in 256-bytes blocks) to other threads. - */ -#if NB_SEGMENTS != 2 -# error "The logic in this function only works with two segments" -#endif - - char *local_base = STM_SEGMENT->segment_base; - long remote_num = 1 - STM_SEGMENT->segment_num; - char *remote_base = get_segment_base(remote_num); - - /* this logic assumes that creation markers are in 256-bytes blocks, - and pages are 4096 bytes, so creation markers are handled by groups - of 16 --- which is two 8-bytes uint64_t. */ - - LIST_FOREACH_R( - STM_PSEGMENT->creation_markers, - uintptr_t /*item*/, - ({ - TLPREFIX uint64_t *p = (TLPREFIX uint64_t *)(item & ~15); - while (p[0] != 0 || p[1] != 0) { - - uint64_t pagenum = ((uint64_t)p) >> 4; - if (flag_page_private[pagenum] != SHARED_PAGE) { - /* copying needed */ - uint64_t dataofs = ((uint64_t)p) << 8; - stm_char *start = (stm_char *)p; - stm_char *stop = start + 16; - while (start < stop) { - if (*start++ != 0) { - pagecopy_256(remote_base + dataofs, - local_base + dataofs); - } - dataofs += 256; - } - } - p[0] = 0; _duck(); - p[1] = 0; - p += 2; - } - })); - - list_clear(STM_PSEGMENT->creation_markers); -} - +#if 0 static bool is_in_shared_pages(object_t *obj) { uintptr_t first_page = ((uintptr_t)obj) / 4096UL; @@ -234,11 +141,11 @@ ssize_t obj_size = stmcb_size_rounded_up( (struct object_s *)REAL_ADDRESS(stm_object_pages, obj)); - uintptr_t end_page = (((uintptr_t)obj) + obj_size + 4095) / 4096UL; - /* that's the page *following* the last page with the object */ + uintptr_t last_page = (((uintptr_t)obj) + obj_size - 1) / 4096UL; - while (first_page < end_page) + while (first_page <= last_page) if (flag_page_private[first_page++] != SHARED_PAGE) return false; return true; } +#endif diff --git a/c7/stm/pages.h b/c7/stm/pages.h --- a/c7/stm/pages.h +++ b/c7/stm/pages.h @@ -1,5 +1,5 @@ -enum { +enum /* flag_page_private */ { /* The page is not in use. Assume that each segment sees its own copy. */ FREE_PAGE=0, @@ -12,15 +12,13 @@ /* Page is private for each segment. */ PRIVATE_PAGE, - -}; /* used for flag_page_private */ - +}; static uint8_t flag_page_private[NB_PAGES]; static void _pages_privatize(uintptr_t pagenum, uintptr_t count, bool full); static void pages_initialize_shared(uintptr_t pagenum, uintptr_t count); -static void pages_make_shared_again(uintptr_t pagenum, uintptr_t count); +//static void pages_make_shared_again(uintptr_t pagenum, uintptr_t count); inline static void pages_privatize(uintptr_t pagenum, uintptr_t count, bool full) { @@ -32,9 +30,4 @@ _pages_privatize(pagenum, count, full); } -static void set_creation_markers(stm_char *p, uint64_t size, int newvalue); -static uint8_t get_single_creation_marker(stm_char *p) __attribute__((unused)); -static void set_single_creation_marker(stm_char *p, int newvalue); -static void reset_all_creation_markers(void); -static void reset_all_creation_markers_and_push_created_data(void); -static bool is_in_shared_pages(object_t *obj); +//static bool is_in_shared_pages(object_t *obj); diff --git a/c7/stm/setup.c b/c7/stm/setup.c --- a/c7/stm/setup.c +++ b/c7/stm/setup.c @@ -5,14 +5,8 @@ void stm_setup(void) { -#if 0 - _stm_reset_shared_lock(); - _stm_reset_pages(); - - inevitable_lock = 0; -#endif - /* Check that some values are acceptable */ + assert(NB_SEGMENTS <= NB_SEGMENTS_MAX); assert(4096 <= ((uintptr_t)STM_SEGMENT)); assert((uintptr_t)STM_SEGMENT == (uintptr_t)STM_PSEGMENT); assert(((uintptr_t)STM_PSEGMENT) + sizeof(*STM_PSEGMENT) <= 8192); @@ -21,9 +15,6 @@ assert(READMARKER_START < READMARKER_END); assert(READMARKER_END <= 4096UL * FIRST_OBJECT_PAGE); assert(FIRST_OBJECT_PAGE < NB_PAGES); - assert(CREATMARKER_START >= 8192); - assert(2 <= FIRST_CREATMARKER_PAGE); - assert(FIRST_CREATMARKER_PAGE <= FIRST_READMARKER_PAGE); assert((NB_PAGES * 4096UL) >> 8 <= (FIRST_OBJECT_PAGE * 4096UL) >> 4); assert((END_NURSERY_PAGE * 4096UL) >> 8 <= (FIRST_READMARKER_PAGE * 4096UL)); @@ -53,10 +44,10 @@ memset(REAL_ADDRESS(segment_base, STM_PSEGMENT), 0, sizeof(*STM_PSEGMENT)); - /* Pages in range(2, FIRST_CREATMARKER_PAGE) are never used */ - if (FIRST_CREATMARKER_PAGE > 2) + /* Pages in range(2, FIRST_READMARKER_PAGE) are never used */ + if (FIRST_READMARKER_PAGE > 2) mprotect(segment_base + 8192, - (FIRST_CREATMARKER_PAGE - 2) * 4096UL, + (FIRST_READMARKER_PAGE - 2) * 4096UL, PROT_NONE); struct stm_priv_segment_info_s *pr = get_priv_segment(i); @@ -64,17 +55,16 @@ pr->write_lock_num = i + 1; pr->pub.segment_num = i; pr->pub.segment_base = segment_base; - pr->old_objects_pointing_to_young = list_create(); - pr->modified_objects = list_create(); - pr->creation_markers = list_create(); + pr->overflow_objects_pointing_to_nursery = NULL; + pr->modified_old_objects = list_create(); + pr->overflow_number = GCFLAG_OVERFLOW_NUMBER_bit0 * (i + 1); + highest_overflow_number = pr->overflow_number; } - /* Make the nursery pages shared. The other pages are - shared lazily, as remap_file_pages() takes a relatively - long time for each page. */ - pages_initialize_shared(FIRST_NURSERY_PAGE, NB_NURSERY_PAGES); + /* The pages are shared lazily, as remap_file_pages() takes a relatively + long time for each page. - /* The read markers are initially zero, which is correct: + The read markers are initially zero, which is correct: STM_SEGMENT->transaction_read_version never contains zero, so a null read marker means "not read" whatever the current transaction_read_version is. @@ -96,9 +86,8 @@ long i; for (i = 0; i < NB_SEGMENTS; i++) { struct stm_priv_segment_info_s *pr = get_priv_segment(i); - list_free(pr->old_objects_pointing_to_young); - list_free(pr->modified_objects); - list_free(pr->creation_markers); + assert(pr->overflow_objects_pointing_to_nursery == NULL); + list_free(pr->modified_old_objects); } munmap(stm_object_pages, TOTAL_MEMORY); @@ -115,15 +104,15 @@ void stm_register_thread_local(stm_thread_local_t *tl) { int num; - if (stm_thread_locals == NULL) { - stm_thread_locals = tl->next = tl->prev = tl; + if (stm_all_thread_locals == NULL) { + stm_all_thread_locals = tl->next = tl->prev = tl; num = 0; } else { - tl->next = stm_thread_locals; - tl->prev = stm_thread_locals->prev; - stm_thread_locals->prev->next = tl; - stm_thread_locals->prev = tl; + tl->next = stm_all_thread_locals; + tl->prev = stm_all_thread_locals->prev; + stm_all_thread_locals->prev->next = tl; + stm_all_thread_locals->prev = tl; num = tl->prev->associated_segment_num + 1; } @@ -137,10 +126,11 @@ void stm_unregister_thread_local(stm_thread_local_t *tl) { - if (tl == stm_thread_locals) { - stm_thread_locals = stm_thread_locals->next; - if (tl == stm_thread_locals) { - stm_thread_locals = NULL; + assert(tl->next != NULL); + if (tl == stm_all_thread_locals) { + stm_all_thread_locals = stm_all_thread_locals->next; + if (tl == stm_all_thread_locals) { + stm_all_thread_locals = NULL; return; } } diff --git a/c7/stm/sync.c b/c7/stm/sync.c --- a/c7/stm/sync.c +++ b/c7/stm/sync.c @@ -191,7 +191,6 @@ assert(STM_PSEGMENT->safe_point == SP_SAFE_POINT_CAN_COLLECT); STM_PSEGMENT->safe_point = SP_RUNNING; - restore_nursery_section_end(NSE_SIGNAL_DONE); if (STM_PSEGMENT->transaction_state == TS_MUST_ABORT) stm_abort_transaction(); } @@ -219,6 +218,8 @@ try_wait_for_other_safe_points() while another is currently blocked in the cond_wait() in this same function. */ + abort();//... +#if 0 assert(_has_mutex()); assert(STM_PSEGMENT->safe_point == SP_SAFE_POINT_CAN_COLLECT); @@ -265,36 +266,28 @@ cond_broadcast(); /* to wake up the other threads, but later, when they get the mutex again */ return true; +#endif } -bool _stm_collectable_safe_point(void) +void _stm_collectable_safe_point(void) { - bool any_operation = false; - restart:; - switch (STM_SEGMENT->v_nursery_section_end) { + /* If nursery_section_end was set to NSE_SIGNAL by another thread, + we end up here as soon as we try to call stm_allocate() or do + a call to stm_safe_point(). + See try_wait_for_other_safe_points() for details. + */ + mutex_lock(); + assert(STM_PSEGMENT->safe_point == SP_RUNNING); - case NSE_SIGNAL: - /* If nursery_section_end was set to NSE_SIGNAL by another thread, - we end up here as soon as we try to call stm_allocate(). - See try_wait_for_other_safe_points() for details. */ - mutex_lock(); - assert(STM_PSEGMENT->safe_point == SP_RUNNING); + if (_stm_nursery_end == NSE_SIGNAL) { STM_PSEGMENT->safe_point = SP_SAFE_POINT_CAN_COLLECT; + cond_broadcast(); - cond_wait(); + + do { cond_wait(); } while (_stm_nursery_end == NSE_SIGNAL); + STM_PSEGMENT->safe_point = SP_RUNNING; - mutex_unlock(); + } - /* Once the sync point is done, retry. */ - any_operation = true; - goto restart; - - case NSE_SIGNAL_DONE: - restore_nursery_section_end(NSE_SIGNAL_DONE); - any_operation = true; - break; - - default:; - } - return any_operation; + mutex_unlock(); } diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -12,21 +12,12 @@ #include #include #include -#include #include #if LONG_MAX == 2147483647 # error "Requires a 64-bit environment" #endif -#if BYTE_ORDER == 1234 -# define LENDIAN 1 // little endian -#elif BYTE_ORDER == 4321 -# define LENDIAN 0 // big endian -#else -# error "Unsupported endianness" -#endif - #define TLPREFIX __attribute__((address_space(256))) @@ -42,27 +33,16 @@ We assume that objects are at least 16 bytes long, and use their address divided by 16. The read marker is equal to 'STM_SEGMENT->transaction_read_version' if and only if the - object was read in the current transaction. */ + object was read in the current transaction. The nurseries + also have corresponding read markers, but they are never used. */ uint8_t rm; }; -struct stm_creation_marker_s { - /* In addition to read markers, every "line" of 256 bytes has one - extra byte, the creation marker, located at the address divided - by 256. The creation marker is either non-zero if all objects in - this line come have been allocated by the current transaction, - or 0x00 if none of them have been. Lines cannot contain a - mixture of both. Non-zero values are 0xff if in the nursery, - and 0x01 if outside the nursery. */ - uint8_t cm; -}; - struct stm_segment_info_s { uint8_t transaction_read_version; int segment_num; char *segment_base; stm_char *nursery_current; - uintptr_t v_nursery_section_end; /* see nursery.h */ struct stm_thread_local_s *running_thread; stm_jmpbuf_t *jmpbuf_ptr; }; @@ -79,10 +59,13 @@ /* this should use llvm's coldcc calling convention, but it's not exposed to C code so far */ void _stm_write_slowpath(object_t *); -stm_char *_stm_allocate_slowpath(ssize_t); +object_t *_stm_allocate_slowpath(ssize_t); +object_t *_stm_allocate_external(ssize_t); void _stm_become_inevitable(char*); void _stm_start_transaction(stm_thread_local_t *, stm_jmpbuf_t *); -bool _stm_collectable_safe_point(void); +void _stm_collectable_safe_point(void); + +extern uintptr_t _stm_nursery_end; #ifdef STM_TESTS bool _stm_was_read(object_t *obj); @@ -98,12 +81,13 @@ void _stm_start_safe_point(void); void _stm_stop_safe_point(void); void _stm_set_nursery_free_count(uint64_t free_count); -object_t *_stm_enum_old_objects_pointing_to_young(void); -object_t *_stm_enum_modified_objects(void); +object_t *_stm_enum_overflow_objects_pointing_to_nursery(void); +object_t *_stm_enum_modified_old_objects(void); #endif -#define _STM_GCFLAG_WRITE_BARRIER_CALLED 0x80 -#define _STM_NSE_SIGNAL 1 +#define _STM_GCFLAG_WRITE_BARRIER 0x01 +#define _STM_NSE_SIGNAL 0 +#define _STM_FAST_ALLOC (66*1024) #define STM_FLAGS_PREBUILT 0 @@ -133,7 +117,7 @@ */ struct object_s { - uint8_t stm_flags; /* reserved for the STM library */ + uint32_t stm_flags; /* reserved for the STM library */ }; /* The read barrier must be called whenever the object 'obj' is read. @@ -142,33 +126,24 @@ transaction commit, nothing that can potentially collect or do a safe point (like stm_write() on a different object). Also, if we might have finished the transaction and started the next one, then - stm_read() needs to be called again. + stm_read() needs to be called again. It can be omitted if + stm_write() is called, or immediately after getting the object from + stm_allocate(), as long as the rules above are respected. */ static inline void stm_read(object_t *obj) { -#if 0 /* very costly check */ - assert(((stm_read_marker_t *)(((uintptr_t)obj) >> 4))->rm - <= STM_SEGMENT->transaction_read_version); -#endif ((stm_read_marker_t *)(((uintptr_t)obj) >> 4))->rm = STM_SEGMENT->transaction_read_version; } /* The write barrier must be called *before* doing any change to the object 'obj'. If we might have finished the transaction and started - the next one, then stm_write() needs to be called again. - If stm_write() is called, it is not necessary to also call stm_read() - on the same object. + the next one, then stm_write() needs to be called again. It is not + necessary to call it immediately after stm_allocate(). */ static inline void stm_write(object_t *obj) { - /* this is: - 'if (cm < 0x80 && (stm_flags & WRITE_BARRIER_CALLED) == 0)' - where 'cm' can be 0 (not created in current transaction) - or 0xff (created in current transaction) - or 0x01 (same, but outside the nursery) */ - if (UNLIKELY(!((((stm_creation_marker_t *)(((uintptr_t)obj) >> 8))->cm | - obj->stm_flags) & _STM_GCFLAG_WRITE_BARRIER_CALLED))) + if (UNLIKELY((obj->stm_flags & _STM_GCFLAG_WRITE_BARRIER) != 0)) _stm_write_slowpath(obj); } @@ -190,11 +165,15 @@ OPT_ASSERT(size_rounded_up >= 16); OPT_ASSERT((size_rounded_up & 7) == 0); + if (UNLIKELY(size_rounded_up >= _STM_FAST_ALLOC)) + return _stm_allocate_external(size_rounded_up); + stm_char *p = STM_SEGMENT->nursery_current; stm_char *end = p + size_rounded_up; STM_SEGMENT->nursery_current = end; - if (UNLIKELY((uintptr_t)end > STM_SEGMENT->v_nursery_section_end)) - p = _stm_allocate_slowpath(size_rounded_up); + if (UNLIKELY((uintptr_t)end > _stm_nursery_end)) + return _stm_allocate_slowpath(size_rounded_up); + return (object_t *)p; } @@ -250,7 +229,7 @@ /* Forces a safe-point if needed. Normally not needed: this is automatic if you call stm_allocate(). */ static inline void stm_safe_point(void) { - if (STM_SEGMENT->v_nursery_section_end == _STM_NSE_SIGNAL) + if (_stm_nursery_end == _STM_NSE_SIGNAL) _stm_collectable_safe_point(); } diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -53,7 +53,6 @@ bool _checked_stm_write(object_t *obj); bool _stm_was_read(object_t *obj); bool _stm_was_written(object_t *obj); -uint8_t _stm_creation_marker(object_t *obj); bool _stm_in_nursery(object_t *obj); char *_stm_real_address(object_t *obj); object_t *_stm_segment_address(char *ptr); @@ -77,8 +76,8 @@ ssize_t stmcb_size_rounded_up(struct object_s *obj); -object_t *_stm_enum_old_objects_pointing_to_young(void); -object_t *_stm_enum_modified_objects(void); +object_t *_stm_enum_overflow_objects_pointing_to_nursery(void); +object_t *_stm_enum_modified_old_objects(void); void stm_collect(long level); """) @@ -248,7 +247,7 @@ ('STM_DEBUGPRINT', '1')], undef_macros=['NDEBUG'], include_dirs=[parent_dir], - extra_compile_args=['-g', '-O0', '-Werror'], + extra_compile_args=['-g', '-O0', '-Werror', '-ferror-limit=1'], force_generic_engine=True) From noreply at buildbot.pypy.org Sun Feb 23 22:26:44 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sun, 23 Feb 2014 22:26:44 +0100 (CET) Subject: [pypy-commit] pypy default: compare with a constant here instead of name Message-ID: <20140223212644.702801C02EA@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69301:f3d4ec29ecfc Date: 2014-02-23 16:25 -0500 http://bitbucket.org/pypy/pypy/changeset/f3d4ec29ecfc/ Log: compare with a constant here instead of name diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -312,7 +312,7 @@ else: res_dtype = calc_dtype if self.complex_to_float and calc_dtype.is_complex_type(): - if calc_dtype.name == 'complex64': + if calc_dtype.num == NPY.CFLOAT: res_dtype = interp_dtype.get_dtype_cache(space).w_float32dtype else: res_dtype = interp_dtype.get_dtype_cache(space).w_float64dtype From noreply at buildbot.pypy.org Sun Feb 23 22:45:59 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sun, 23 Feb 2014 22:45:59 +0100 (CET) Subject: [pypy-commit] pypy default: fix segfault on np.arr.astype(record) Message-ID: <20140223214559.1C9EA1C02EA@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69302:5ec998bf16a1 Date: 2014-02-23 16:40 -0500 http://bitbucket.org/pypy/pypy/changeset/5ec998bf16a1/ Log: fix segfault on np.arr.astype(record) diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -569,7 +569,7 @@ cur_dtype = self.get_dtype() new_dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype)) - if new_dtype.shape: + if new_dtype.num == NPY.VOID: raise oefmt(space.w_NotImplementedError, "%s.astype(%s) not implemented yet", cur_dtype.name, new_dtype.name) impl = self.implementation diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -3287,6 +3287,17 @@ assert arr[1]['y']['x'] == 0.0 assert arr[1]['x'] == 15 + def test_count_nonzero(self): + import numpy as np + import sys + d = [('f0', 'i4'), ('f1', 'i4', 2)] + arr = np.array([0, 1]) + if '__pypy__' not in sys.builtin_module_names: + arr = arr.astype(d)[:1] + assert np.count_nonzero(arr) == 0 + else: + raises(NotImplementedError, "arr.astype(d)") + def test_string_record(self): from numpypy import dtype, array From noreply at buildbot.pypy.org Sun Feb 23 23:00:27 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sun, 23 Feb 2014 23:00:27 +0100 (CET) Subject: [pypy-commit] pypy default: cleanup/pep8 Message-ID: <20140223220027.C0C951C03FC@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69303:c814fb7f7341 Date: 2014-02-23 16:55 -0500 http://bitbucket.org/pypy/pypy/changeset/c814fb7f7341/ Log: cleanup/pep8 diff --git a/pypy/module/micronumpy/arrayimpl/scalar.py b/pypy/module/micronumpy/arrayimpl/scalar.py --- a/pypy/module/micronumpy/arrayimpl/scalar.py +++ b/pypy/module/micronumpy/arrayimpl/scalar.py @@ -1,7 +1,6 @@ from pypy.module.micronumpy.arrayimpl import base from pypy.module.micronumpy.base import W_NDimArray, convert_to_array from pypy.module.micronumpy import support -from pypy.module.micronumpy.interp_boxes import W_GenericBox from pypy.interpreter.error import OperationError class ScalarIterator(base.BaseArrayIterator): diff --git a/pypy/module/micronumpy/test/test_appbridge.py b/pypy/module/micronumpy/test/test_appbridge.py --- a/pypy/module/micronumpy/test/test_appbridge.py +++ b/pypy/module/micronumpy/test/test_appbridge.py @@ -1,5 +1,6 @@ from pypy.module.micronumpy.test.test_base import BaseNumpyAppTest + class AppTestAppBridge(BaseNumpyAppTest): def test_array_methods(self): import numpy as np diff --git a/pypy/module/micronumpy/test/test_arrayops.py b/pypy/module/micronumpy/test/test_arrayops.py --- a/pypy/module/micronumpy/test/test_arrayops.py +++ b/pypy/module/micronumpy/test/test_arrayops.py @@ -1,5 +1,5 @@ +from pypy.module.micronumpy.test.test_base import BaseNumpyAppTest -from pypy.module.micronumpy.test.test_base import BaseNumpyAppTest class AppTestNumSupport(BaseNumpyAppTest): def test_where(self): diff --git a/pypy/module/micronumpy/test/test_base.py b/pypy/module/micronumpy/test/test_base.py --- a/pypy/module/micronumpy/test/test_base.py +++ b/pypy/module/micronumpy/test/test_base.py @@ -1,5 +1,6 @@ +from pypy.conftest import option from pypy.module.micronumpy import constants as NPY -from pypy.conftest import option + class BaseNumpyAppTest(object): spaceconfig = dict(usemodules=['micronumpy']) diff --git a/pypy/module/micronumpy/test/test_compile.py b/pypy/module/micronumpy/test/test_compile.py --- a/pypy/module/micronumpy/test/test_compile.py +++ b/pypy/module/micronumpy/test/test_compile.py @@ -1,4 +1,3 @@ - import py from pypy.module.micronumpy.compile import (numpy_compile, Assignment, ArrayConstant, FloatConstant, Operator, Variable, RangeConstant, Execute, diff --git a/pypy/module/micronumpy/test/test_complex.py b/pypy/module/micronumpy/test/test_complex.py --- a/pypy/module/micronumpy/test/test_complex.py +++ b/pypy/module/micronumpy/test/test_complex.py @@ -61,6 +61,7 @@ '%r and %r are not sufficiently close, %g > %g' %\ (a, b, absolute_error, max(abs_err, rel_err*abs(a)))) + def parse_testfile(fname): """Parse a file with test values @@ -85,6 +86,7 @@ flags ) + class AppTestUfuncs(BaseNumpyAppTest): def setup_class(cls): import os diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -1,4 +1,3 @@ -import py, sys from pypy.conftest import option from pypy.module.micronumpy.test.test_base import BaseNumpyAppTest from pypy.interpreter.gateway import interp2app diff --git a/pypy/module/micronumpy/test/test_iter.py b/pypy/module/micronumpy/test/test_iter.py --- a/pypy/module/micronumpy/test/test_iter.py +++ b/pypy/module/micronumpy/test/test_iter.py @@ -1,9 +1,11 @@ from pypy.module.micronumpy.iter import MultiDimViewIterator from pypy.module.micronumpy.arrayimpl.scalar import ScalarIterator + class MockArray(object): size = 1 + class TestIterDirect(object): def test_C_viewiterator(self): #Let's get started, simple iteration in C order with diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -24,6 +24,7 @@ def get_size(self): return 1 + def create_slice(space, a, chunks): return Chunks(chunks).apply(space, W_NDimArray(a)).implementation diff --git a/pypy/module/micronumpy/test/test_outarg.py b/pypy/module/micronumpy/test/test_outarg.py --- a/pypy/module/micronumpy/test/test_outarg.py +++ b/pypy/module/micronumpy/test/test_outarg.py @@ -1,6 +1,6 @@ -import py from pypy.module.micronumpy.test.test_base import BaseNumpyAppTest + class AppTestOutArg(BaseNumpyAppTest): def test_reduce_out(self): from numpypy import arange, zeros, array diff --git a/pypy/module/micronumpy/test/test_subtype.py b/pypy/module/micronumpy/test/test_subtype.py --- a/pypy/module/micronumpy/test/test_subtype.py +++ b/pypy/module/micronumpy/test/test_subtype.py @@ -1,9 +1,9 @@ -import py from pypy.module.micronumpy.test.test_base import BaseNumpyAppTest class AppTestSupport(BaseNumpyAppTest): spaceconfig = dict(usemodules=["micronumpy", "struct", "binascii"]) + def setup_class(cls): BaseNumpyAppTest.setup_class.im_func(cls) cls.w_NoNew = cls.space.appexec([], '''(): diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -1,16 +1,15 @@ - """ Tests that check if JIT-compiled numpy operations produce reasonably good assembler """ import py -from rpython.jit.metainterp import pyjitpl from rpython.jit.metainterp.test.support import LLJitMixin from rpython.jit.metainterp.warmspot import reset_jit, get_stats from pypy.module.micronumpy import interp_boxes from pypy.module.micronumpy.compile import FakeSpace, Parser, InterpreterState from pypy.module.micronumpy.base import W_NDimArray + class TestNumpyJIt(LLJitMixin): graph = None interp = None @@ -56,7 +55,7 @@ elif isinstance(w_res, interp_boxes.W_BoolBox): return float(w_res.value) raise TypeError(w_res) - + if self.graph is None: interp, graph = self.meta_interp(f, [0], listops=True, From noreply at buildbot.pypy.org Sun Feb 23 23:00:29 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Sun, 23 Feb 2014 23:00:29 +0100 (CET) Subject: [pypy-commit] pypy default: add dot bench script Message-ID: <20140223220029.02A261C03FC@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69304:74eff28ac998 Date: 2014-02-23 16:57 -0500 http://bitbucket.org/pypy/pypy/changeset/74eff28ac998/ Log: add dot bench script diff --git a/pypy/module/micronumpy/bench/dot.py b/pypy/module/micronumpy/bench/dot.py new file mode 100644 --- /dev/null +++ b/pypy/module/micronumpy/bench/dot.py @@ -0,0 +1,28 @@ +import time + +try: + import numpypy +except ImportError: + pass + +import numpy + +def get_matrix(): + import random + n = 502 + x = numpy.zeros((n,n), dtype=numpy.float64) + for i in range(n): + for j in range(n): + x[i][j] = random.random() + return x + +def main(): + x = get_matrix() + y = get_matrix() + a = time.time() + #z = numpy.dot(x, y) # uses numpy possibly-blas-lib dot + z = numpy.core.multiarray.dot(x, y) # uses strictly numpy C dot + b = time.time() + print '%.2f seconds' % (b-a) + +main() From noreply at buildbot.pypy.org Mon Feb 24 00:19:01 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 24 Feb 2014 00:19:01 +0100 (CET) Subject: [pypy-commit] pypy remove-remaining-smm: Fix test_coerce(). Message-ID: <20140223231901.8B31C1C303A@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: remove-remaining-smm Changeset: r69305:ea4b812690a9 Date: 2014-02-23 22:47 +0100 http://bitbucket.org/pypy/pypy/changeset/ea4b812690a9/ Log: Fix test_coerce(). diff --git a/pypy/objspace/std/complexobject.py b/pypy/objspace/std/complexobject.py --- a/pypy/objspace/std/complexobject.py +++ b/pypy/objspace/std/complexobject.py @@ -490,8 +490,8 @@ return space.newbool((w_complex.realval != 0.0) or (w_complex.imagval != 0.0)) -def coerce__Complex_Complex(space, w_complex1, w_complex2): - #w_complex2 = to_complex(w_complex2) +def coerce__Complex_ANY(space, w_complex1, w_complex2): + w_complex2 = to_complex(space, w_complex2) return space.newtuple([w_complex1, w_complex2]) def float__Complex(space, w_complex): From noreply at buildbot.pypy.org Mon Feb 24 00:19:02 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 24 Feb 2014 00:19:02 +0100 (CET) Subject: [pypy-commit] pypy remove-remaining-smm: Add remaining __r*__ to complex. Message-ID: <20140223231902.BEE741C303A@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: remove-remaining-smm Changeset: r69306:5f3f540c4ba5 Date: 2014-02-23 23:04 +0100 http://bitbucket.org/pypy/pypy/changeset/5f3f540c4ba5/ Log: Add remaining __r*__ to complex. diff --git a/pypy/objspace/std/complexobject.py b/pypy/objspace/std/complexobject.py --- a/pypy/objspace/std/complexobject.py +++ b/pypy/objspace/std/complexobject.py @@ -18,12 +18,6 @@ from rpython.rlib.rstring import ParseStringError -# ERRORCODES - -ERR_WRONG_SECOND = "complex() can't take second arg if first is a string" -ERR_MALFORMED = "complex() arg is a malformed string" - - class W_AbstractComplexObject(W_Object): __slots__ = () @@ -215,6 +209,9 @@ return (space.float_w(space.float(w_complex)), 0.0) +ERR_MALFORMED = "complex() arg is a malformed string" + + class W_ComplexObject(W_AbstractComplexObject): """This is a reimplementation of the CPython "PyComplexObject" """ @@ -367,6 +364,10 @@ w_rhs = to_complex(space, w_rhs) return self.mul(w_rhs) + def descr_rmul(self, space, w_lhs): + w_lhs = to_complex(space, w_lhs) + return w_lhs.mul(self) + def descr_truediv(self, space, w_rhs): w_rhs = to_complex(space, w_rhs) try: @@ -374,15 +375,29 @@ except ZeroDivisionError, e: raise OperationError(space.w_ZeroDivisionError, space.wrap(str(e))) + def descr_rtruediv(self, space, w_lhs): + w_lhs = to_complex(space, w_lhs) + try: + return w_lhs.div(self) + except ZeroDivisionError, e: + raise OperationError(space.w_ZeroDivisionError, space.wrap(str(e))) + def descr_floordiv(self, space, w_rhs): w_rhs = to_complex(space, w_rhs) - w_rhs = to_complex(space, w_rhs) # don't care about the slight slowdown you get from using divmod try: return self.divmod(space, w_rhs)[0] except ZeroDivisionError, e: raise OperationError(space.w_ZeroDivisionError, space.wrap(str(e))) + def descr_rfloordiv(self, space, w_lhs): + w_lhs = to_complex(space, w_lhs) + # don't care about the slight slowdown you get from using divmod + try: + return w_lhs.divmod(space, self)[0] + except ZeroDivisionError, e: + raise OperationError(space.w_ZeroDivisionError, space.wrap(str(e))) + def descr_mod(self, space, w_rhs): w_rhs = to_complex(space, w_rhs) try: @@ -390,6 +405,13 @@ except ZeroDivisionError, e: raise OperationError(space.w_ZeroDivisionError, space.wrap(str(e))) + def descr_rmod(self, space, w_lhs): + w_lhs = to_complex(space, w_lhs) + try: + return w_lhs.divmod(space, self)[1] + except ZeroDivisionError, e: + raise OperationError(space.w_ZeroDivisionError, space.wrap(str(e))) + def descr_divmod(self, space, w_rhs): w_rhs = to_complex(space, w_rhs) try: @@ -398,6 +420,14 @@ raise OperationError(space.w_ZeroDivisionError, space.wrap(str(e))) return space.newtuple([div, mod]) + def descr_rdivmod(self, space, w_lhs): + w_lhs = to_complex(space, w_lhs) + try: + div, mod = w_lhs.divmod(space, self) + except ZeroDivisionError, e: + raise OperationError(space.w_ZeroDivisionError, space.wrap(str(e))) + return space.newtuple([div, mod]) + @unwrap_spec(w_third_arg=WrappedDefault(None)) def descr_pow(self, space, w_exponent, w_third_arg): w_exponent = to_complex(space, w_exponent) @@ -558,11 +588,17 @@ __sub__ = interp2app(W_ComplexObject.descr_sub), __rsub__ = interp2app(W_ComplexObject.descr_rsub), __mul__ = interp2app(W_ComplexObject.descr_mul), + __rmul__ = interp2app(W_ComplexObject.descr_rmul), __div__ = interp2app(W_ComplexObject.descr_truediv), + __rdiv__ = interp2app(W_ComplexObject.descr_rtruediv), __truediv__ = interp2app(W_ComplexObject.descr_truediv), + __rtruediv__ = interp2app(W_ComplexObject.descr_rtruediv), __floordiv__ = interp2app(W_ComplexObject.descr_floordiv), + __rfloordiv__ = interp2app(W_ComplexObject.descr_rfloordiv), __mod__ = interp2app(W_ComplexObject.descr_mod), + __rmod__ = interp2app(W_ComplexObject.descr_rmod), __divmod__ = interp2app(W_ComplexObject.descr_divmod), + __rdivmod__ = interp2app(W_ComplexObject.descr_rdivmod), __pow__ = interp2app(W_ComplexObject.descr_pow), ) From noreply at buildbot.pypy.org Mon Feb 24 00:19:03 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 24 Feb 2014 00:19:03 +0100 (CET) Subject: [pypy-commit] pypy remove-remaining-smm: Move descr_conjugate. Message-ID: <20140223231903.EB08C1C303A@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: remove-remaining-smm Changeset: r69307:3f359859555e Date: 2014-02-23 23:13 +0100 http://bitbucket.org/pypy/pypy/changeset/3f359859555e/ Log: Move descr_conjugate. diff --git a/pypy/objspace/std/complexobject.py b/pypy/objspace/std/complexobject.py --- a/pypy/objspace/std/complexobject.py +++ b/pypy/objspace/std/complexobject.py @@ -336,10 +336,6 @@ return space.newtuple([space.newfloat(self.realval), space.newfloat(self.imagval)]) - def descr_conjugate(self, space): - """(A+Bj).conjugate() -> A-Bj""" - return space.newcomplex(self.realval, -self.imagval) - def descr_add(self, space, w_rhs): w_rhs = to_complex(space, w_rhs) return W_ComplexObject(self.realval + w_rhs.realval, @@ -445,6 +441,10 @@ raise OperationError(space.w_OverflowError, space.wrap("complex exponentiation")) return w_p + def descr_conjugate(self, space): + """(A+Bj).conjugate() -> A-Bj""" + return space.newcomplex(self.realval, -self.imagval) + registerimplementation(W_ComplexObject) w_one = W_ComplexObject(1, 0) @@ -581,7 +581,6 @@ __getnewargs__ = interp2app(W_ComplexObject.descr___getnewargs__), real = complexwprop('realval'), imag = complexwprop('imagval'), - conjugate = interp2app(W_ComplexObject.descr_conjugate), __add__ = interp2app(W_ComplexObject.descr_add), __radd__ = interp2app(W_ComplexObject.descr_radd), @@ -600,6 +599,8 @@ __divmod__ = interp2app(W_ComplexObject.descr_divmod), __rdivmod__ = interp2app(W_ComplexObject.descr_rdivmod), __pow__ = interp2app(W_ComplexObject.descr_pow), + + conjugate = interp2app(W_ComplexObject.descr_conjugate), ) W_ComplexObject.typedef.registermethods(globals()) From noreply at buildbot.pypy.org Mon Feb 24 00:19:05 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 24 Feb 2014 00:19:05 +0100 (CET) Subject: [pypy-commit] pypy remove-remaining-smm: Kill complex.__hash__ SMM. Message-ID: <20140223231905.21DBE1C303A@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: remove-remaining-smm Changeset: r69308:301f03687bca Date: 2014-02-23 23:16 +0100 http://bitbucket.org/pypy/pypy/changeset/301f03687bca/ Log: Kill complex.__hash__ SMM. diff --git a/pypy/objspace/std/complexobject.py b/pypy/objspace/std/complexobject.py --- a/pypy/objspace/std/complexobject.py +++ b/pypy/objspace/std/complexobject.py @@ -336,6 +336,12 @@ return space.newtuple([space.newfloat(self.realval), space.newfloat(self.imagval)]) + def descr_hash(self, space): + hashreal = _hash_float(space, self.realval) + hashimg = _hash_float(space, self.imagval) + combined = intmask(hashreal + 1000003 * hashimg) + return space.newint(combined) + def descr_add(self, space, w_rhs): w_rhs = to_complex(space, w_rhs) return W_ComplexObject(self.realval + w_rhs.realval, @@ -463,12 +469,6 @@ if space.isinstance_w(w_obj, space.w_float): return W_ComplexObject(w_obj.floatval, 0.0) -def hash__Complex(space, w_value): - hashreal = _hash_float(space, w_value.realval) - hashimg = _hash_float(space, w_value.imagval) - combined = intmask(hashreal + 1000003 * hashimg) - return space.newint(combined) - def neg__Complex(space, w_complex): return W_ComplexObject(-w_complex.realval, -w_complex.imagval) @@ -582,6 +582,8 @@ real = complexwprop('realval'), imag = complexwprop('imagval'), + __hash__ = interp2app(W_ComplexObject.descr_hash), + __add__ = interp2app(W_ComplexObject.descr_add), __radd__ = interp2app(W_ComplexObject.descr_radd), __sub__ = interp2app(W_ComplexObject.descr_sub), From noreply at buildbot.pypy.org Mon Feb 24 00:19:06 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 24 Feb 2014 00:19:06 +0100 (CET) Subject: [pypy-commit] pypy remove-remaining-smm: Fix the case when binary ops are called with an unsupported operand. Message-ID: <20140223231906.52A451C303A@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: remove-remaining-smm Changeset: r69309:7d53c974e8fa Date: 2014-02-23 23:23 +0100 http://bitbucket.org/pypy/pypy/changeset/7d53c974e8fa/ Log: Fix the case when binary ops are called with an unsupported operand. diff --git a/pypy/objspace/std/complexobject.py b/pypy/objspace/std/complexobject.py --- a/pypy/objspace/std/complexobject.py +++ b/pypy/objspace/std/complexobject.py @@ -344,34 +344,48 @@ def descr_add(self, space, w_rhs): w_rhs = to_complex(space, w_rhs) + if w_rhs is None: + return space.w_NotImplemented return W_ComplexObject(self.realval + w_rhs.realval, self.imagval + w_rhs.imagval) def descr_radd(self, space, w_lhs): w_lhs = to_complex(space, w_lhs) + if w_lhs is None: + return space.w_NotImplemented return W_ComplexObject(w_lhs.realval + self.realval, w_lhs.imagval + self.imagval) def descr_sub(self, space, w_rhs): w_rhs = to_complex(space, w_rhs) + if w_rhs is None: + return space.w_NotImplemented return W_ComplexObject(self.realval - w_rhs.realval, self.imagval - w_rhs.imagval) def descr_rsub(self, space, w_lhs): w_lhs = to_complex(space, w_lhs) + if w_lhs is None: + return space.w_NotImplemented return W_ComplexObject(w_lhs.realval - self.realval, w_lhs.imagval - self.imagval) def descr_mul(self, space, w_rhs): w_rhs = to_complex(space, w_rhs) + if w_rhs is None: + return space.w_NotImplemented return self.mul(w_rhs) def descr_rmul(self, space, w_lhs): w_lhs = to_complex(space, w_lhs) + if w_lhs is None: + return space.w_NotImplemented return w_lhs.mul(self) def descr_truediv(self, space, w_rhs): w_rhs = to_complex(space, w_rhs) + if w_rhs is None: + return space.w_NotImplemented try: return self.div(w_rhs) except ZeroDivisionError, e: @@ -379,6 +393,8 @@ def descr_rtruediv(self, space, w_lhs): w_lhs = to_complex(space, w_lhs) + if w_lhs is None: + return space.w_NotImplemented try: return w_lhs.div(self) except ZeroDivisionError, e: @@ -386,6 +402,8 @@ def descr_floordiv(self, space, w_rhs): w_rhs = to_complex(space, w_rhs) + if w_rhs is None: + return space.w_NotImplemented # don't care about the slight slowdown you get from using divmod try: return self.divmod(space, w_rhs)[0] @@ -394,6 +412,8 @@ def descr_rfloordiv(self, space, w_lhs): w_lhs = to_complex(space, w_lhs) + if w_lhs is None: + return space.w_NotImplemented # don't care about the slight slowdown you get from using divmod try: return w_lhs.divmod(space, self)[0] @@ -402,6 +422,8 @@ def descr_mod(self, space, w_rhs): w_rhs = to_complex(space, w_rhs) + if w_rhs is None: + return space.w_NotImplemented try: return self.divmod(space, w_rhs)[1] except ZeroDivisionError, e: @@ -409,6 +431,8 @@ def descr_rmod(self, space, w_lhs): w_lhs = to_complex(space, w_lhs) + if w_lhs is None: + return space.w_NotImplemented try: return w_lhs.divmod(space, self)[1] except ZeroDivisionError, e: @@ -416,6 +440,8 @@ def descr_divmod(self, space, w_rhs): w_rhs = to_complex(space, w_rhs) + if w_rhs is None: + return space.w_NotImplemented try: div, mod = self.divmod(space, w_rhs) except ZeroDivisionError, e: @@ -424,6 +450,8 @@ def descr_rdivmod(self, space, w_lhs): w_lhs = to_complex(space, w_lhs) + if w_lhs is None: + return space.w_NotImplemented try: div, mod = w_lhs.divmod(space, self) except ZeroDivisionError, e: @@ -433,6 +461,8 @@ @unwrap_spec(w_third_arg=WrappedDefault(None)) def descr_pow(self, space, w_exponent, w_third_arg): w_exponent = to_complex(space, w_exponent) + if w_exponent is None: + return space.w_NotImplemented if not space.is_w(w_third_arg, space.w_None): raise OperationError(space.w_ValueError, space.wrap('complex modulo')) try: @@ -522,6 +552,8 @@ def coerce__Complex_ANY(space, w_complex1, w_complex2): w_complex2 = to_complex(space, w_complex2) + if w_complex2 is None: + return space.w_NotImplemented return space.newtuple([w_complex1, w_complex2]) def float__Complex(space, w_complex): From noreply at buildbot.pypy.org Mon Feb 24 00:19:07 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 24 Feb 2014 00:19:07 +0100 (CET) Subject: [pypy-commit] pypy remove-remaining-smm: Don't special case w_bool. Message-ID: <20140223231907.7B9901C303A@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: remove-remaining-smm Changeset: r69310:77fb097f2896 Date: 2014-02-23 23:26 +0100 http://bitbucket.org/pypy/pypy/changeset/77fb097f2896/ Log: Don't special case w_bool. diff --git a/pypy/objspace/std/complexobject.py b/pypy/objspace/std/complexobject.py --- a/pypy/objspace/std/complexobject.py +++ b/pypy/objspace/std/complexobject.py @@ -489,8 +489,6 @@ def to_complex(space, w_obj): if isinstance(w_obj, W_ComplexObject): return w_obj - if space.isinstance_w(w_obj, space.w_bool): - return W_ComplexObject(w_obj.intval, 0.0) if space.isinstance_w(w_obj, space.w_int): return W_ComplexObject(w_obj.intval, 0.0) if space.isinstance_w(w_obj, space.w_long): From noreply at buildbot.pypy.org Mon Feb 24 00:19:08 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 24 Feb 2014 00:19:08 +0100 (CET) Subject: [pypy-commit] pypy remove-remaining-smm: Kill complex' comparison SMMs. Message-ID: <20140223231908.A0B0B1C303A@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: remove-remaining-smm Changeset: r69311:539b3c1e0501 Date: 2014-02-23 23:46 +0100 http://bitbucket.org/pypy/pypy/changeset/539b3c1e0501/ Log: Kill complex' comparison SMMs. diff --git a/pypy/objspace/std/complexobject.py b/pypy/objspace/std/complexobject.py --- a/pypy/objspace/std/complexobject.py +++ b/pypy/objspace/std/complexobject.py @@ -342,6 +342,34 @@ combined = intmask(hashreal + 1000003 * hashimg) return space.newint(combined) + def descr_eq(self, space, w_other): + if isinstance(w_other, W_ComplexObject): + return space.newbool((self.realval == w_other.realval) and + (self.imagval == w_other.imagval)) + if (space.isinstance_w(w_other, space.w_int) or + space.isinstance_w(w_other, space.w_long)): + if self.imagval: + return space.w_False + return space.eq(space.newfloat(self.realval), w_other) + return space.w_NotImplemented + + def descr_ne(self, space, w_other): + if isinstance(w_other, W_ComplexObject): + return space.newbool((self.realval != w_other.realval) or + (self.imagval != w_other.imagval)) + if (space.isinstance_w(w_other, space.w_int) or + space.isinstance_w(w_other, space.w_long)): + if self.imagval: + return space.w_True + return space.ne(space.newfloat(self.realval), w_other) + return space.w_NotImplemented + + def _fail_cmp(self, space, w_other): + if isinstance(w_other, W_ComplexObject): + raise OperationError(space.w_TypeError, + space.wrap('cannot compare complex numbers using <, <=, >, >=')) + return space.w_NotImplemented + def descr_add(self, space, w_rhs): w_rhs = to_complex(space, w_rhs) if w_rhs is None: @@ -509,41 +537,6 @@ except OverflowError, e: raise OperationError(space.w_OverflowError, space.wrap(str(e))) -def eq__Complex_Complex(space, w_complex1, w_complex2): - return space.newbool((w_complex1.realval == w_complex2.realval) and - (w_complex1.imagval == w_complex2.imagval)) - -def ne__Complex_Complex(space, w_complex1, w_complex2): - return space.newbool((w_complex1.realval != w_complex2.realval) or - (w_complex1.imagval != w_complex2.imagval)) - -def eq__Complex_Long(space, w_complex1, w_long2): - if w_complex1.imagval: - return space.w_False - return space.eq(space.newfloat(w_complex1.realval), w_long2) -eq__Complex_Int = eq__Complex_Long - -def eq__Long_Complex(space, w_long1, w_complex2): - return eq__Complex_Long(space, w_complex2, w_long1) -eq__Int_Complex = eq__Long_Complex - -def ne__Complex_Long(space, w_complex1, w_long2): - if w_complex1.imagval: - return space.w_True - return space.ne(space.newfloat(w_complex1.realval), w_long2) -ne__Complex_Int = ne__Complex_Long - -def ne__Long_Complex(space, w_long1, w_complex2): - return ne__Complex_Long(space, w_complex2, w_long1) -ne__Int_Complex = ne__Long_Complex - -def lt__Complex_Complex(space, w_complex1, w_complex2): - raise OperationError(space.w_TypeError, space.wrap('cannot compare complex numbers using <, <=, >, >=')) - -gt__Complex_Complex = lt__Complex_Complex -ge__Complex_Complex = lt__Complex_Complex -le__Complex_Complex = lt__Complex_Complex - def nonzero__Complex(space, w_complex): return space.newbool((w_complex.realval != 0.0) or (w_complex.imagval != 0.0)) @@ -611,8 +604,14 @@ __getnewargs__ = interp2app(W_ComplexObject.descr___getnewargs__), real = complexwprop('realval'), imag = complexwprop('imagval'), + __hash__ = interp2app(W_ComplexObject.descr_hash), - __hash__ = interp2app(W_ComplexObject.descr_hash), + __eq__ = interp2app(W_ComplexObject.descr_eq), + __ne__ = interp2app(W_ComplexObject.descr_ne), + __lt__ = interp2app(W_ComplexObject._fail_cmp), + __le__ = interp2app(W_ComplexObject._fail_cmp), + __gt__ = interp2app(W_ComplexObject._fail_cmp), + __ge__ = interp2app(W_ComplexObject._fail_cmp), __add__ = interp2app(W_ComplexObject.descr_add), __radd__ = interp2app(W_ComplexObject.descr_radd), From noreply at buildbot.pypy.org Mon Feb 24 00:19:09 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 24 Feb 2014 00:19:09 +0100 (CET) Subject: [pypy-commit] pypy remove-remaining-smm: Kill dead imports. Message-ID: <20140223231909.C00581C303A@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: remove-remaining-smm Changeset: r69312:83173344840d Date: 2014-02-23 23:47 +0100 http://bitbucket.org/pypy/pypy/changeset/83173344840d/ Log: Kill dead imports. diff --git a/pypy/objspace/std/complexobject.py b/pypy/objspace/std/complexobject.py --- a/pypy/objspace/std/complexobject.py +++ b/pypy/objspace/std/complexobject.py @@ -3,11 +3,8 @@ from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault from pypy.objspace.std import newformat -from pypy.objspace.std.floatobject import W_FloatObject, _hash_float -from pypy.objspace.std.intobject import W_IntObject -from pypy.objspace.std.longobject import W_LongObject +from pypy.objspace.std.floatobject import _hash_float from pypy.objspace.std.model import registerimplementation, W_Object -from pypy.objspace.std.noneobject import W_NoneObject from pypy.objspace.std.register_all import register_all from pypy.objspace.std.stdtypedef import GetSetProperty, StdTypeDef from rpython.rlib import jit, rcomplex From noreply at buildbot.pypy.org Mon Feb 24 00:19:10 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 24 Feb 2014 00:19:10 +0100 (CET) Subject: [pypy-commit] pypy remove-remaining-smm: Kill remaining complex SMMs. Message-ID: <20140223231910.E72251C303A@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: remove-remaining-smm Changeset: r69313:b4df3096e70c Date: 2014-02-24 00:14 +0100 http://bitbucket.org/pypy/pypy/changeset/b4df3096e70c/ Log: Kill remaining complex SMMs. diff --git a/pypy/objspace/std/complexobject.py b/pypy/objspace/std/complexobject.py --- a/pypy/objspace/std/complexobject.py +++ b/pypy/objspace/std/complexobject.py @@ -333,12 +333,74 @@ return space.newtuple([space.newfloat(self.realval), space.newfloat(self.imagval)]) + def _format_float(self, x, code, precision): + # like float2string, except that the ".0" is not necessary + if isinf(x): + if x > 0.0: + return "inf" + else: + return "-inf" + elif isnan(x): + return "nan" + else: + return formatd(x, code, precision) + + def _repr_format(self, x): + return self._format_float(x, 'r', 0) + + def _str_format(self, x): + return self._format_float(x, 'g', DTSF_STR_PRECISION) + + def descr_repr(self, space): + if self.realval == 0 and copysign(1., self.realval) == 1.: + return space.wrap(self._repr_format(self.imagval) + 'j') + sign = (copysign(1., self.imagval) == 1. or + isnan(self.imagval)) and '+' or '' + return space.wrap('(' + self._repr_format(self.realval) + + sign + self._repr_format(self.imagval) + 'j)') + + def descr_str(self, space): + if self.realval == 0 and copysign(1., self.realval) == 1.: + return space.wrap(self._str_format(self.imagval) + 'j') + sign = (copysign(1., self.imagval) == 1. or + isnan(self.imagval)) and '+' or '' + return space.wrap('(' + self._str_format(self.realval) + + sign + self._str_format(self.imagval) + 'j)') + def descr_hash(self, space): hashreal = _hash_float(space, self.realval) hashimg = _hash_float(space, self.imagval) combined = intmask(hashreal + 1000003 * hashimg) return space.newint(combined) + def descr_coerce(self, space, w_other): + w_other = to_complex(space, w_other) + if w_other is None: + return space.w_NotImplemented + return space.newtuple([self, w_other]) + + def descr_format(self, space, w_format_spec): + return newformat.run_formatter(space, w_format_spec, "format_complex", self) + + def descr_nonzero(self, space): + return space.newbool((self.realval != 0.0) or (self.imagval != 0.0)) + + def descr_float(self, space): + raise OperationError(space.w_TypeError, + space.wrap("can't convert complex to float; use abs(z)")) + + def descr_neg(self, space): + return W_ComplexObject(-self.realval, -self.imagval) + + def descr_pos(self, space): + return W_ComplexObject(self.realval, self.imagval) + + def descr_abs(self, space): + try: + return space.newfloat(math.hypot(self.realval, self.imagval)) + except OverflowError, e: + raise OperationError(space.w_OverflowError, space.wrap(str(e))) + def descr_eq(self, space, w_other): if isinstance(w_other, W_ComplexObject): return space.newbool((self.realval == w_other.realval) and @@ -522,67 +584,6 @@ if space.isinstance_w(w_obj, space.w_float): return W_ComplexObject(w_obj.floatval, 0.0) -def neg__Complex(space, w_complex): - return W_ComplexObject(-w_complex.realval, -w_complex.imagval) - -def pos__Complex(space, w_complex): - return W_ComplexObject(w_complex.realval, w_complex.imagval) - -def abs__Complex(space, w_complex): - try: - return space.newfloat(math.hypot(w_complex.realval, w_complex.imagval)) - except OverflowError, e: - raise OperationError(space.w_OverflowError, space.wrap(str(e))) - -def nonzero__Complex(space, w_complex): - return space.newbool((w_complex.realval != 0.0) or - (w_complex.imagval != 0.0)) - -def coerce__Complex_ANY(space, w_complex1, w_complex2): - w_complex2 = to_complex(space, w_complex2) - if w_complex2 is None: - return space.w_NotImplemented - return space.newtuple([w_complex1, w_complex2]) - -def float__Complex(space, w_complex): - raise OperationError(space.w_TypeError, space.wrap("can't convert complex to float; use abs(z)")) - -def format_float(x, code, precision): - # like float2string, except that the ".0" is not necessary - if isinf(x): - if x > 0.0: - return "inf" - else: - return "-inf" - elif isnan(x): - return "nan" - else: - return formatd(x, code, precision) - -def repr_format(x): - return format_float(x, 'r', 0) -def str_format(x): - return format_float(x, 'g', DTSF_STR_PRECISION) - -def repr__Complex(space, w_complex): - if w_complex.realval == 0 and copysign(1., w_complex.realval) == 1.: - return space.wrap(repr_format(w_complex.imagval) + 'j') - sign = (copysign(1., w_complex.imagval) == 1. or - isnan(w_complex.imagval)) and '+' or '' - return space.wrap('(' + repr_format(w_complex.realval) - + sign + repr_format(w_complex.imagval) + 'j)') - -def str__Complex(space, w_complex): - if w_complex.realval == 0 and copysign(1., w_complex.realval) == 1.: - return space.wrap(str_format(w_complex.imagval) + 'j') - sign = (copysign(1., w_complex.imagval) == 1. or - isnan(w_complex.imagval)) and '+' or '' - return space.wrap('(' + str_format(w_complex.realval) - + sign + str_format(w_complex.imagval) + 'j)') - -def format__Complex_ANY(space, w_complex, w_format_spec): - return newformat.run_formatter(space, w_format_spec, "format_complex", w_complex) - def complexwprop(name): def fget(space, w_obj): from pypy.objspace.std.complexobject import W_ComplexObject @@ -601,7 +602,16 @@ __getnewargs__ = interp2app(W_ComplexObject.descr___getnewargs__), real = complexwprop('realval'), imag = complexwprop('imagval'), + __repr__ = interp2app(W_ComplexObject.descr_repr), + __str__ = interp2app(W_ComplexObject.descr_str), __hash__ = interp2app(W_ComplexObject.descr_hash), + __coerce__ = interp2app(W_ComplexObject.descr_coerce), + __format__ = interp2app(W_ComplexObject.descr_format), + __nonzero__ = interp2app(W_ComplexObject.descr_nonzero), + __float__ = interp2app(W_ComplexObject.descr_float), + __neg__ = interp2app(W_ComplexObject.descr_neg), + __pos__ = interp2app(W_ComplexObject.descr_pos), + __abs__ = interp2app(W_ComplexObject.descr_abs), __eq__ = interp2app(W_ComplexObject.descr_eq), __ne__ = interp2app(W_ComplexObject.descr_ne), From noreply at buildbot.pypy.org Mon Feb 24 00:19:12 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 24 Feb 2014 00:19:12 +0100 (CET) Subject: [pypy-commit] pypy remove-remaining-smm: Make W_ComplexObject a W_Root. Message-ID: <20140223231912.09A691C303A@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: remove-remaining-smm Changeset: r69314:9bad70f6ed72 Date: 2014-02-24 00:18 +0100 http://bitbucket.org/pypy/pypy/changeset/9bad70f6ed72/ Log: Make W_ComplexObject a W_Root. diff --git a/pypy/objspace/std/complexobject.py b/pypy/objspace/std/complexobject.py --- a/pypy/objspace/std/complexobject.py +++ b/pypy/objspace/std/complexobject.py @@ -1,11 +1,10 @@ import math +from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault from pypy.objspace.std import newformat from pypy.objspace.std.floatobject import _hash_float -from pypy.objspace.std.model import registerimplementation, W_Object -from pypy.objspace.std.register_all import register_all from pypy.objspace.std.stdtypedef import GetSetProperty, StdTypeDef from rpython.rlib import jit, rcomplex from rpython.rlib.rarithmetic import intmask, r_ulonglong @@ -15,7 +14,7 @@ from rpython.rlib.rstring import ParseStringError -class W_AbstractComplexObject(W_Object): +class W_AbstractComplexObject(W_Root): __slots__ = () def is_w(self, space, w_other): @@ -568,7 +567,6 @@ """(A+Bj).conjugate() -> A-Bj""" return space.newcomplex(self.realval, -self.imagval) -registerimplementation(W_ComplexObject) w_one = W_ComplexObject(1, 0) @@ -640,6 +638,3 @@ conjugate = interp2app(W_ComplexObject.descr_conjugate), ) - -W_ComplexObject.typedef.registermethods(globals()) -register_all(vars(), globals()) From noreply at buildbot.pypy.org Mon Feb 24 00:35:40 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 24 Feb 2014 00:35:40 +0100 (CET) Subject: [pypy-commit] pypy remove-remaining-smm: Move to_complex(). Message-ID: <20140223233540.7081A1C3599@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: remove-remaining-smm Changeset: r69315:5f71433268dc Date: 2014-02-24 00:21 +0100 http://bitbucket.org/pypy/pypy/changeset/5f71433268dc/ Log: Move to_complex(). diff --git a/pypy/objspace/std/complexobject.py b/pypy/objspace/std/complexobject.py --- a/pypy/objspace/std/complexobject.py +++ b/pypy/objspace/std/complexobject.py @@ -275,6 +275,17 @@ def int(self, space): raise OperationError(space.w_TypeError, space.wrap("can't convert complex to int; use int(abs(z))")) + def _to_complex(self, space, w_obj): + if isinstance(w_obj, W_ComplexObject): + return w_obj + if space.isinstance_w(w_obj, space.w_int): + return W_ComplexObject(w_obj.intval, 0.0) + if space.isinstance_w(w_obj, space.w_long): + dval = w_obj.tofloat(space) + return W_ComplexObject(dval, 0.0) + if space.isinstance_w(w_obj, space.w_float): + return W_ComplexObject(w_obj.floatval, 0.0) + @staticmethod @unwrap_spec(w_real = WrappedDefault(0.0)) def descr__new__(space, w_complextype, w_real, w_imag=None): @@ -373,7 +384,7 @@ return space.newint(combined) def descr_coerce(self, space, w_other): - w_other = to_complex(space, w_other) + w_other = self._to_complex(space, w_other) if w_other is None: return space.w_NotImplemented return space.newtuple([self, w_other]) @@ -429,47 +440,47 @@ return space.w_NotImplemented def descr_add(self, space, w_rhs): - w_rhs = to_complex(space, w_rhs) + w_rhs = self._to_complex(space, w_rhs) if w_rhs is None: return space.w_NotImplemented return W_ComplexObject(self.realval + w_rhs.realval, self.imagval + w_rhs.imagval) def descr_radd(self, space, w_lhs): - w_lhs = to_complex(space, w_lhs) + w_lhs = self._to_complex(space, w_lhs) if w_lhs is None: return space.w_NotImplemented return W_ComplexObject(w_lhs.realval + self.realval, w_lhs.imagval + self.imagval) def descr_sub(self, space, w_rhs): - w_rhs = to_complex(space, w_rhs) + w_rhs = self._to_complex(space, w_rhs) if w_rhs is None: return space.w_NotImplemented return W_ComplexObject(self.realval - w_rhs.realval, self.imagval - w_rhs.imagval) def descr_rsub(self, space, w_lhs): - w_lhs = to_complex(space, w_lhs) + w_lhs = self._to_complex(space, w_lhs) if w_lhs is None: return space.w_NotImplemented return W_ComplexObject(w_lhs.realval - self.realval, w_lhs.imagval - self.imagval) def descr_mul(self, space, w_rhs): - w_rhs = to_complex(space, w_rhs) + w_rhs = self._to_complex(space, w_rhs) if w_rhs is None: return space.w_NotImplemented return self.mul(w_rhs) def descr_rmul(self, space, w_lhs): - w_lhs = to_complex(space, w_lhs) + w_lhs = self._to_complex(space, w_lhs) if w_lhs is None: return space.w_NotImplemented return w_lhs.mul(self) def descr_truediv(self, space, w_rhs): - w_rhs = to_complex(space, w_rhs) + w_rhs = self._to_complex(space, w_rhs) if w_rhs is None: return space.w_NotImplemented try: @@ -478,7 +489,7 @@ raise OperationError(space.w_ZeroDivisionError, space.wrap(str(e))) def descr_rtruediv(self, space, w_lhs): - w_lhs = to_complex(space, w_lhs) + w_lhs = self._to_complex(space, w_lhs) if w_lhs is None: return space.w_NotImplemented try: @@ -487,7 +498,7 @@ raise OperationError(space.w_ZeroDivisionError, space.wrap(str(e))) def descr_floordiv(self, space, w_rhs): - w_rhs = to_complex(space, w_rhs) + w_rhs = self._to_complex(space, w_rhs) if w_rhs is None: return space.w_NotImplemented # don't care about the slight slowdown you get from using divmod @@ -497,7 +508,7 @@ raise OperationError(space.w_ZeroDivisionError, space.wrap(str(e))) def descr_rfloordiv(self, space, w_lhs): - w_lhs = to_complex(space, w_lhs) + w_lhs = self._to_complex(space, w_lhs) if w_lhs is None: return space.w_NotImplemented # don't care about the slight slowdown you get from using divmod @@ -507,7 +518,7 @@ raise OperationError(space.w_ZeroDivisionError, space.wrap(str(e))) def descr_mod(self, space, w_rhs): - w_rhs = to_complex(space, w_rhs) + w_rhs = self._to_complex(space, w_rhs) if w_rhs is None: return space.w_NotImplemented try: @@ -516,7 +527,7 @@ raise OperationError(space.w_ZeroDivisionError, space.wrap(str(e))) def descr_rmod(self, space, w_lhs): - w_lhs = to_complex(space, w_lhs) + w_lhs = self._to_complex(space, w_lhs) if w_lhs is None: return space.w_NotImplemented try: @@ -525,7 +536,7 @@ raise OperationError(space.w_ZeroDivisionError, space.wrap(str(e))) def descr_divmod(self, space, w_rhs): - w_rhs = to_complex(space, w_rhs) + w_rhs = self._to_complex(space, w_rhs) if w_rhs is None: return space.w_NotImplemented try: @@ -535,7 +546,7 @@ return space.newtuple([div, mod]) def descr_rdivmod(self, space, w_lhs): - w_lhs = to_complex(space, w_lhs) + w_lhs = self._to_complex(space, w_lhs) if w_lhs is None: return space.w_NotImplemented try: @@ -546,7 +557,7 @@ @unwrap_spec(w_third_arg=WrappedDefault(None)) def descr_pow(self, space, w_exponent, w_third_arg): - w_exponent = to_complex(space, w_exponent) + w_exponent = self._to_complex(space, w_exponent) if w_exponent is None: return space.w_NotImplemented if not space.is_w(w_third_arg, space.w_None): @@ -571,17 +582,6 @@ w_one = W_ComplexObject(1, 0) -def to_complex(space, w_obj): - if isinstance(w_obj, W_ComplexObject): - return w_obj - if space.isinstance_w(w_obj, space.w_int): - return W_ComplexObject(w_obj.intval, 0.0) - if space.isinstance_w(w_obj, space.w_long): - dval = w_obj.tofloat(space) - return W_ComplexObject(dval, 0.0) - if space.isinstance_w(w_obj, space.w_float): - return W_ComplexObject(w_obj.floatval, 0.0) - def complexwprop(name): def fget(space, w_obj): from pypy.objspace.std.complexobject import W_ComplexObject From noreply at buildbot.pypy.org Mon Feb 24 00:35:41 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 24 Feb 2014 00:35:41 +0100 (CET) Subject: [pypy-commit] pypy remove-remaining-smm: pep8 Message-ID: <20140223233541.9FB9F1C3599@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: remove-remaining-smm Changeset: r69316:a141c6e25ef6 Date: 2014-02-24 00:35 +0100 http://bitbucket.org/pypy/pypy/changeset/a141c6e25ef6/ Log: pep8 diff --git a/pypy/objspace/std/complexobject.py b/pypy/objspace/std/complexobject.py --- a/pypy/objspace/std/complexobject.py +++ b/pypy/objspace/std/complexobject.py @@ -23,9 +23,9 @@ return False if self.user_overridden_class or w_other.user_overridden_class: return self is w_other - real1 = space.float_w(space.getattr(self, space.wrap("real"))) + real1 = space.float_w(space.getattr(self, space.wrap("real"))) real2 = space.float_w(space.getattr(w_other, space.wrap("real"))) - imag1 = space.float_w(space.getattr(self, space.wrap("imag"))) + imag1 = space.float_w(space.getattr(self, space.wrap("imag"))) imag2 = space.float_w(space.getattr(w_other, space.wrap("imag"))) real1 = float2longlong(real1) real2 = float2longlong(real2) @@ -73,7 +73,7 @@ realstart = i pc = s[i] while i < slen and s[i] != ' ': - if s[i] in ('+','-') and pc not in ('e','E') and i != realstart: + if s[i] in ('+', '-') and pc not in ('e', 'E') and i != realstart: break pc = s[i] i += 1 @@ -108,7 +108,7 @@ if imagsign == ' ': raise ValueError - i+=1 + i += 1 # whitespace while i < slen and s[i] == ' ': i += 1 @@ -118,7 +118,7 @@ imagstart = i pc = s[i] while i < slen and s[i] != ' ': - if s[i] in ('+','-') and pc not in ('e','E'): + if s[i] in ('+', '-') and pc not in ('e', 'E'): break pc = s[i] i += 1 @@ -126,14 +126,14 @@ imagstop = i - 1 if imagstop < 0: raise ValueError - if s[imagstop] not in ('j','J'): + if s[imagstop] not in ('j', 'J'): raise ValueError if imagstop < imagstart: raise ValueError - while i" % (self.realval, self.imagval) + return "" % (self.realval, self.imagval) def as_tuple(self): return (self.realval, self.imagval) @@ -287,7 +286,7 @@ return W_ComplexObject(w_obj.floatval, 0.0) @staticmethod - @unwrap_spec(w_real = WrappedDefault(0.0)) + @unwrap_spec(w_real=WrappedDefault(0.0)) def descr__new__(space, w_complextype, w_real, w_imag=None): from pypy.objspace.std.complexobject import W_ComplexObject @@ -297,7 +296,7 @@ # is itself a subclass of complex. noarg2 = w_imag is None if (noarg2 and space.is_w(w_complextype, space.w_complex) - and space.is_w(space.type(w_real), space.w_complex)): + and space.is_w(space.type(w_real), space.w_complex)): return w_real if space.isinstance_w(w_real, space.w_str) or \ @@ -637,4 +636,4 @@ __pow__ = interp2app(W_ComplexObject.descr_pow), conjugate = interp2app(W_ComplexObject.descr_conjugate), - ) +) From noreply at buildbot.pypy.org Mon Feb 24 02:04:05 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 24 Feb 2014 02:04:05 +0100 (CET) Subject: [pypy-commit] pypy default: add a numpy constant Message-ID: <20140224010405.711681C02EA@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69318:1a50a1ac857b Date: 2014-02-23 19:08 -0500 http://bitbucket.org/pypy/pypy/changeset/1a50a1ac857b/ Log: add a numpy constant diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -27,7 +27,7 @@ class UMathModule(MixedModule): appleveldefs = {} - interpleveldefs = {} + interpleveldefs = {'FLOATING_POINT_SUPPORT': 'space.wrap(1)'} # ufuncs for exposed, impl in [ ("absolute", "absolute"), diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -83,6 +83,10 @@ class AppTestUfuncs(BaseNumpyAppTest): + def test_constants(self): + import numpy as np + assert np.FLOATING_POINT_SUPPORT == 1 + def test_ufunc_instance(self): from numpypy import add, ufunc From noreply at buildbot.pypy.org Mon Feb 24 02:04:04 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 24 Feb 2014 02:04:04 +0100 (CET) Subject: [pypy-commit] pypy default: failing test for hash of record dtypes Message-ID: <20140224010404.51EC11C02EA@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69317:f33ed1c13f41 Date: 2014-02-23 18:14 -0500 http://bitbucket.org/pypy/pypy/changeset/f33ed1c13f41/ Log: failing test for hash of record dtypes diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -315,6 +315,22 @@ ]: assert hash(tp(value)) == hash(value) + d1 = numpy.dtype([('f0', 'i4'), ('f1', 'i4')]) + d2 = numpy.dtype([('f0', 'i4'), ('f1', 'i4')]) + d3 = numpy.dtype([('f0', 'i4'), ('f2', 'i4')]) + d4 = numpy.dtype([('f0', 'i4'), ('f1', d1)]) + d5 = numpy.dtype([('f0', 'i4'), ('f1', d2)]) + d6 = numpy.dtype([('f0', 'i4'), ('f1', d3)]) + import sys + if '__pypy__' not in sys.builtin_module_names: + assert hash(d1) == hash(d2) + assert hash(d1) != hash(d3) + assert hash(d4) == hash(d5) + assert hash(d4) != hash(d6) + else: + for d in [d1, d2, d3, d4, d5, d6]: + raises(TypeError, hash, d) + def test_pickle(self): from numpypy import array, dtype from cPickle import loads, dumps From noreply at buildbot.pypy.org Mon Feb 24 02:04:06 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 24 Feb 2014 02:04:06 +0100 (CET) Subject: [pypy-commit] pypy default: fix ndarray.astype(S0) Message-ID: <20140224010406.E9FD51C02EA@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69319:f2ff6d660efd Date: 2014-02-23 19:17 -0500 http://bitbucket.org/pypy/pypy/changeset/f2ff6d660efd/ Log: fix ndarray.astype(S0) diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -572,6 +572,10 @@ if new_dtype.num == NPY.VOID: raise oefmt(space.w_NotImplementedError, "%s.astype(%s) not implemented yet", cur_dtype.name, new_dtype.name) + if new_dtype.num == NPY.STRING and new_dtype.size == 0: + if cur_dtype.num == NPY.STRING: + new_dtype = interp_dtype.variable_dtype(space, + 'S' + str(cur_dtype.size)) impl = self.implementation if isinstance(impl, scalar.Scalar): return W_NDimArray.new_scalar(space, new_dtype, impl.value) diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -2102,6 +2102,7 @@ assert a[2] == 3.0 a = array('123') + assert a.astype('S0').dtype == 'S3' assert a.astype('i8') == 123 a = array('abcdefgh') exc = raises(ValueError, a.astype, 'i8') From noreply at buildbot.pypy.org Mon Feb 24 02:04:08 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 24 Feb 2014 02:04:08 +0100 (CET) Subject: [pypy-commit] pypy default: better error for ndarray.take with unsupported mode Message-ID: <20140224010408.63D531C02EA@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69320:036b96efb852 Date: 2014-02-23 19:59 -0500 http://bitbucket.org/pypy/pypy/changeset/036b96efb852/ Log: better error for ndarray.take with unsupported mode diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -1234,7 +1234,8 @@ app_take = applevel(r""" def take(a, indices, axis, out, mode): - assert mode == 'raise' + if mode != 'raise': + raise NotImplementedError("mode != raise not implemented") if axis is None: from numpy import array indices = array(indices) diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -2822,6 +2822,10 @@ assert ((a + a).take([3]) == [6]).all() a = arange(12).reshape(2, 6) assert (a[:,::2].take([3, 2, 1]) == [6, 4, 2]).all() + import sys + if '__pypy__' in sys.builtin_module_names: + exc = raises(NotImplementedError, "a.take([3, 2, 1], mode='clip')") + assert exc.value[0] == "mode != raise not implemented" def test_ptp(self): import numpypy as np From noreply at buildbot.pypy.org Mon Feb 24 02:52:49 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 24 Feb 2014 02:52:49 +0100 (CET) Subject: [pypy-commit] pypy remove-remaining-smm: Kill floattype.py. Message-ID: <20140224015249.F2B791C02EA@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: remove-remaining-smm Changeset: r69321:2ae7ef84292e Date: 2014-02-24 01:08 +0100 http://bitbucket.org/pypy/pypy/changeset/2ae7ef84292e/ Log: Kill floattype.py. diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -3,7 +3,7 @@ from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef, GetSetProperty from pypy.objspace.std.bytesobject import W_BytesObject -from pypy.objspace.std.floattype import float_typedef +from pypy.objspace.std.floatobject import W_FloatObject from pypy.objspace.std.unicodeobject import W_UnicodeObject from pypy.objspace.std.intobject import W_IntObject from pypy.objspace.std.complexobject import W_ComplexObject @@ -750,7 +750,7 @@ __reduce__ = interp2app(W_Float32Box.descr_reduce), ) -W_Float64Box.typedef = TypeDef("float64", (W_FloatingBox.typedef, float_typedef), +W_Float64Box.typedef = TypeDef("float64", (W_FloatingBox.typedef, W_FloatObject.typedef), __module__ = "numpy", __new__ = interp2app(W_Float64Box.descr__new__.im_func), __reduce__ = interp2app(W_Float64Box.descr_reduce), diff --git a/pypy/objspace/std/floatobject.py b/pypy/objspace/std/floatobject.py --- a/pypy/objspace/std/floatobject.py +++ b/pypy/objspace/std/floatobject.py @@ -1,24 +1,114 @@ +import math import operator +import sys from pypy.interpreter.error import OperationError, oefmt -from pypy.objspace.std import model, newformat -from pypy.objspace.std.floattype import float_typedef, W_AbstractFloatObject +from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault, interpindirect2app +from pypy.interpreter.typedef import GetSetProperty +from pypy.objspace.std import newformat +from pypy.objspace.std.longobject import W_LongObject from pypy.objspace.std.multimethod import FailedToImplementArgs from pypy.objspace.std.model import registerimplementation, W_Object from pypy.objspace.std.register_all import register_all -from pypy.objspace.std.noneobject import W_NoneObject -from pypy.objspace.std.longobject import W_LongObject +from pypy.objspace.std.stdtypedef import StdTypeDef, SMM +from rpython.rlib import rarithmetic, rfloat from rpython.rlib.rarithmetic import ovfcheck_float_to_int, intmask, LONG_BIT from rpython.rlib.rfloat import ( isinf, isnan, isfinite, INFINITY, NAN, copysign, formatd, DTSF_ADD_DOT_0, DTSF_STR_PRECISION, float_as_rbigint_ratio) from rpython.rlib.rbigint import rbigint -from rpython.rlib import rfloat +from rpython.rlib.rstring import ParseStringError from rpython.tool.sourcetools import func_with_new_name +from rpython.rlib.unroll import unrolling_iterable +from pypy.objspace.std.intobject import W_IntObject -import math -from pypy.objspace.std.intobject import W_IntObject + +float_as_integer_ratio = SMM("as_integer_ratio", 1) +float_is_integer = SMM("is_integer", 1) +float_hex = SMM("hex", 1) + + +class W_AbstractFloatObject(W_Object): + __slots__ = () + + def is_w(self, space, w_other): + from rpython.rlib.longlong2float import float2longlong + if not isinstance(w_other, W_AbstractFloatObject): + return False + if self.user_overridden_class or w_other.user_overridden_class: + return self is w_other + one = float2longlong(space.float_w(self)) + two = float2longlong(space.float_w(w_other)) + return one == two + + def immutable_unique_id(self, space): + if self.user_overridden_class: + return None + from rpython.rlib.longlong2float import float2longlong + from pypy.objspace.std.model import IDTAG_FLOAT as tag + val = float2longlong(space.float_w(self)) + b = rbigint.fromrarith_int(val) + b = b.lshift(3).or_(rbigint.fromint(tag)) + return space.newlong_from_rbigint(b) + + def int(self, space): + raise NotImplementedError + + +def detect_floatformat(): + from rpython.rtyper.lltypesystem import rffi, lltype + buf = lltype.malloc(rffi.CCHARP.TO, 8, flavor='raw') + rffi.cast(rffi.DOUBLEP, buf)[0] = 9006104071832581.0 + packed = rffi.charpsize2str(buf, 8) + if packed == "\x43\x3f\xff\x01\x02\x03\x04\x05": + double_format = 'IEEE, big-endian' + elif packed == "\x05\x04\x03\x02\x01\xff\x3f\x43": + double_format = 'IEEE, little-endian' + else: + double_format = 'unknown' + lltype.free(buf, flavor='raw') + # + buf = lltype.malloc(rffi.CCHARP.TO, 4, flavor='raw') + rffi.cast(rffi.FLOATP, buf)[0] = rarithmetic.r_singlefloat(16711938.0) + packed = rffi.charpsize2str(buf, 4) + if packed == "\x4b\x7f\x01\x02": + float_format = 'IEEE, big-endian' + elif packed == "\x02\x01\x7f\x4b": + float_format = 'IEEE, little-endian' + else: + float_format = 'unknown' + lltype.free(buf, flavor='raw') + + return double_format, float_format + +_double_format, _float_format = detect_floatformat() + + +def _string_to_float(space, w_source, string): + try: + return rfloat.string_to_float(string) + except ParseStringError as e: + from pypy.objspace.std.intobject import wrap_parsestringerror + raise wrap_parsestringerror(space, e, w_source) + + +_alpha = zip("abcdef", range(10, 16)) + zip("ABCDEF", range(10, 16)) +_hex_to_int = zip("0123456789", range(10)) + _alpha +_hex_to_int_iterable = unrolling_iterable(_hex_to_int) + +def _hex_from_char(c): + for h, v in _hex_to_int_iterable: + if h == c: + return v + return -1 + +def _hex_digit(s, j, co_end, float_digits): + if j < float_digits: + i = co_end - j + else: + i = co_end - 1 - j + return _hex_from_char(s[i]) class W_FloatObject(W_AbstractFloatObject): @@ -26,8 +116,6 @@ The constructor takes an RPython float as an argument.""" _immutable_fields_ = ['floatval'] - typedef = float_typedef - def __init__(self, floatval): self.floatval = floatval @@ -51,8 +139,218 @@ def __repr__(self): return "" % self.floatval + @staticmethod + @unwrap_spec(w_x=WrappedDefault(0.0)) + def descr__new__(space, w_floattype, w_x): + from pypy.objspace.std.floatobject import W_FloatObject + w_value = w_x # 'x' is the keyword argument name in CPython + if space.lookup(w_value, "__float__") is not None: + w_obj = space.float(w_value) + if space.is_w(w_floattype, space.w_float): + return w_obj + value = space.float_w(w_obj) + elif (space.isinstance_w(w_value, space.w_str) or + space.isinstance_w(w_value, space.w_bytearray)): + value = _string_to_float(space, w_value, space.bufferstr_w(w_value)) + elif space.isinstance_w(w_value, space.w_unicode): + from unicodeobject import unicode_to_decimal_w + value = _string_to_float(space, w_value, + unicode_to_decimal_w(space, w_value)) + else: + value = space.float_w(w_x) + w_obj = space.allocate_instance(W_FloatObject, w_floattype) + W_FloatObject.__init__(w_obj, value) + return w_obj + + @staticmethod + @unwrap_spec(s=str) + def descr_fromhex(space, w_cls, s): + length = len(s) + i = 0 + value = 0.0 + while i < length and s[i].isspace(): + i += 1 + if i == length: + raise OperationError(space.w_ValueError, + space.wrap("invalid hex string")) + sign = 1 + if s[i] == "-": + sign = -1 + i += 1 + elif s[i] == "+": + i += 1 + if length == i: + raise OperationError(space.w_ValueError, + space.wrap("invalid hex string")) + if s[i] == "i" or s[i] == "I": + i += 1 + if length - i >= 2 and s[i:i + 2].lower() == "nf": + i += 2 + value = rfloat.INFINITY + if length - i >= 5 and s[i:i + 5].lower() == "inity": + i += 5 + elif s[i] == "n" or s[i] == "N": + i += 1 + if length - i >= 2 and s[i:i + 2].lower() == "an": + i += 2 + value = rfloat.NAN + else: + if (s[i] == "0" and length - i > 1 and + (s[i + 1] == "x" or s[i + 1] == "X")): + i += 2 + co_start = i + while i < length and _hex_from_char(s[i]) >= 0: + i += 1 + whole_end = i + if i < length and s[i] == ".": + i += 1 + while i < length and _hex_from_char(s[i]) >= 0: + i += 1 + co_end = i - 1 + else: + co_end = i + total_digits = co_end - co_start + float_digits = co_end - whole_end + if not total_digits: + raise OperationError(space.w_ValueError, + space.wrap("invalid hex string")) + const_one = rfloat.DBL_MIN_EXP - rfloat.DBL_MANT_DIG + sys.maxint // 2 + const_two = sys.maxint // 2 + 1 - rfloat.DBL_MAX_EXP + if total_digits > min(const_one, const_two) // 4: + raise OperationError(space.w_ValueError, space.wrap("way too long")) + if i < length and (s[i] == "p" or s[i] == "P"): + i += 1 + if i == length: + raise OperationError(space.w_ValueError, + space.wrap("invalid hex string")) + exp_sign = 1 + if s[i] == "-" or s[i] == "+": + if s[i] == "-": + exp_sign = -1 + i += 1 + if i == length: + raise OperationError(space.w_ValueError, + space.wrap("invalid hex string")) + if not s[i].isdigit(): + raise OperationError(space.w_ValueError, + space.wrap("invalid hex string")) + exp = ord(s[i]) - ord('0') + i += 1 + while i < length and s[i].isdigit(): + exp = exp * 10 + (ord(s[i]) - ord('0')) + if exp >= (sys.maxint-9) // 10: + if exp_sign > 0: + exp_sign = 2 # overflow in positive numbers + else: + exp_sign = -2 # overflow in negative numbers + i += 1 + if exp_sign == -1: + exp = -exp + elif exp_sign == -2: + exp = -sys.maxint / 2 + elif exp_sign == 2: + exp = sys.maxint / 2 + else: + exp = 0 + while (total_digits and + _hex_digit(s, total_digits - 1, co_end, float_digits) == 0): + total_digits -= 1 + if not total_digits or exp <= -sys.maxint / 2: + value = 0.0 + elif exp >= sys.maxint // 2: + raise OperationError(space.w_OverflowError, space.wrap("too large")) + else: + exp -= 4 * float_digits + top_exp = exp + 4 * (total_digits - 1) + digit = _hex_digit(s, total_digits - 1, co_end, float_digits) + while digit: + top_exp += 1 + digit //= 2 + if top_exp < rfloat.DBL_MIN_EXP - rfloat.DBL_MANT_DIG: + value = 0.0 + elif top_exp > rfloat.DBL_MAX_EXP: + raise OperationError(space.w_OverflowError, + space.wrap("too large")) + else: + lsb = max(top_exp, rfloat.DBL_MIN_EXP) - rfloat.DBL_MANT_DIG + value = 0 + if exp >= lsb: + for j in range(total_digits - 1, -1, -1): + value = 16.0 * value + _hex_digit(s, j, co_end, + float_digits) + value = math.ldexp(value, exp) + else: + half_eps = 1 << ((lsb - exp - 1) % 4) + key_digit = (lsb - exp - 1) // 4 + for j in range(total_digits - 1, key_digit, -1): + value = 16.0 * value + _hex_digit(s, j, co_end, + float_digits) + digit = _hex_digit(s, key_digit, co_end, float_digits) + value = 16.0 * value + (digit & (16 - 2*half_eps)) + if digit & half_eps: + round_up = False + if (digit & (3 * half_eps - 1) or + (half_eps == 8 and + _hex_digit(s, key_digit + 1, co_end, float_digits) & 1)): + round_up = True + else: + for j in range(key_digit - 1, -1, -1): + if _hex_digit(s, j, co_end, float_digits): + round_up = True + break + if round_up: + value += 2 * half_eps + mant_dig = rfloat.DBL_MANT_DIG + if (top_exp == rfloat.DBL_MAX_EXP and + value == math.ldexp(2 * half_eps, mant_dig)): + raise OperationError(space.w_OverflowError, + space.wrap("too large")) + value = math.ldexp(value, (exp + 4*key_digit)) + while i < length and s[i].isspace(): + i += 1 + if i != length: + raise OperationError(space.w_ValueError, + space.wrap("invalid hex string")) + w_float = space.wrap(sign * value) + return space.call_function(w_cls, w_float) + + def descr_conjugate(self, space): + return space.float(self) + + def descr_get_real(self, space): + return space.float(self) + + def descr_get_imag(self, space): + return space.wrap(0.0) + + @staticmethod + @unwrap_spec(kind=str) + def descr___getformat__(space, w_cls, kind): + if kind == "float": + return space.wrap(_float_format) + elif kind == "double": + return space.wrap(_double_format) + raise OperationError(space.w_ValueError, + space.wrap("only float and double are valid")) + + registerimplementation(W_FloatObject) +W_FloatObject.typedef = StdTypeDef("float", + __doc__ = '''float(x) -> floating point number + +Convert a string or number to a floating point number, if possible.''', + __new__ = interp2app(W_FloatObject.descr__new__), + __getformat__ = interp2app(W_FloatObject.descr___getformat__, as_classmethod=True), + fromhex = interp2app(W_FloatObject.descr_fromhex, as_classmethod=True), + conjugate = interp2app(W_FloatObject.descr_conjugate), + real = GetSetProperty(W_FloatObject.descr_get_real), + imag = GetSetProperty(W_FloatObject.descr_get_imag), + __int__ = interpindirect2app(W_AbstractFloatObject.int), +) +W_FloatObject.typedef.registermethods(globals()) + + # bool-to-float delegation def delegate_Bool2Float(space, w_bool): return W_FloatObject(float(w_bool.intval)) @@ -560,5 +858,4 @@ return space.w_False return space.wrap(math.floor(v) == v) -from pypy.objspace.std import floattype -register_all(vars(), floattype) +register_all(vars(), globals()) diff --git a/pypy/objspace/std/floattype.py b/pypy/objspace/std/floattype.py deleted file mode 100644 --- a/pypy/objspace/std/floattype.py +++ /dev/null @@ -1,307 +0,0 @@ -import math -import sys -from rpython.rlib.unroll import unrolling_iterable -from rpython.rlib import rfloat, rarithmetic -from pypy.interpreter import typedef -from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault,\ - interpindirect2app -from pypy.interpreter.error import OperationError -from pypy.objspace.std.register_all import register_all -from pypy.objspace.std.stdtypedef import StdTypeDef, SMM -from pypy.objspace.std.model import W_Object -from rpython.rlib.rbigint import rbigint -from rpython.rlib.rstring import ParseStringError - - -float_as_integer_ratio = SMM("as_integer_ratio", 1) -float_is_integer = SMM("is_integer", 1) -float_hex = SMM("hex", 1) - -def descr_conjugate(space, w_float): - return space.float(w_float) - -register_all(vars(), globals()) - - - at unwrap_spec(w_x = WrappedDefault(0.0)) -def descr__new__(space, w_floattype, w_x): - from pypy.objspace.std.floatobject import W_FloatObject - w_value = w_x # 'x' is the keyword argument name in CPython - if space.lookup(w_value, "__float__") is not None: - w_obj = space.float(w_value) - if space.is_w(w_floattype, space.w_float): - return w_obj - value = space.float_w(w_obj) - elif (space.isinstance_w(w_value, space.w_str) or - space.isinstance_w(w_value, space.w_bytearray)): - value = _string_to_float(space, w_value, space.bufferstr_w(w_value)) - elif space.isinstance_w(w_value, space.w_unicode): - from unicodeobject import unicode_to_decimal_w - value = _string_to_float(space, w_value, - unicode_to_decimal_w(space, w_value)) - else: - value = space.float_w(w_x) - w_obj = space.allocate_instance(W_FloatObject, w_floattype) - W_FloatObject.__init__(w_obj, value) - return w_obj - - -def _string_to_float(space, w_source, string): - try: - return rfloat.string_to_float(string) - except ParseStringError as e: - from pypy.objspace.std.intobject import wrap_parsestringerror - raise wrap_parsestringerror(space, e, w_source) - - -def detect_floatformat(): - from rpython.rtyper.lltypesystem import rffi, lltype - buf = lltype.malloc(rffi.CCHARP.TO, 8, flavor='raw') - rffi.cast(rffi.DOUBLEP, buf)[0] = 9006104071832581.0 - packed = rffi.charpsize2str(buf, 8) - if packed == "\x43\x3f\xff\x01\x02\x03\x04\x05": - double_format = 'IEEE, big-endian' - elif packed == "\x05\x04\x03\x02\x01\xff\x3f\x43": - double_format = 'IEEE, little-endian' - else: - double_format = 'unknown' - lltype.free(buf, flavor='raw') - # - buf = lltype.malloc(rffi.CCHARP.TO, 4, flavor='raw') - rffi.cast(rffi.FLOATP, buf)[0] = rarithmetic.r_singlefloat(16711938.0) - packed = rffi.charpsize2str(buf, 4) - if packed == "\x4b\x7f\x01\x02": - float_format = 'IEEE, big-endian' - elif packed == "\x02\x01\x7f\x4b": - float_format = 'IEEE, little-endian' - else: - float_format = 'unknown' - lltype.free(buf, flavor='raw') - - return double_format, float_format - -_double_format, _float_format = detect_floatformat() - - at unwrap_spec(kind=str) -def descr___getformat__(space, w_cls, kind): - if kind == "float": - return space.wrap(_float_format) - elif kind == "double": - return space.wrap(_double_format) - raise OperationError(space.w_ValueError, - space.wrap("only float and double are valid")) - -_alpha = zip("abcdef", range(10, 16)) + zip("ABCDEF", range(10, 16)) -_hex_to_int = zip("0123456789", range(10)) + _alpha -_hex_to_int_iterable = unrolling_iterable(_hex_to_int) -def _hex_from_char(c): - for h, v in _hex_to_int_iterable: - if h == c: - return v - return -1 - -def _hex_digit(s, j, co_end, float_digits): - if j < float_digits: - i = co_end - j - else: - i = co_end - 1 - j - return _hex_from_char(s[i]) - - at unwrap_spec(s=str) -def descr_fromhex(space, w_cls, s): - length = len(s) - i = 0 - value = 0.0 - while i < length and s[i].isspace(): - i += 1 - if i == length: - raise OperationError(space.w_ValueError, - space.wrap("invalid hex string")) - sign = 1 - if s[i] == "-": - sign = -1 - i += 1 - elif s[i] == "+": - i += 1 - if length == i: - raise OperationError(space.w_ValueError, - space.wrap("invalid hex string")) - if s[i] == "i" or s[i] == "I": - i += 1 - if length - i >= 2 and s[i:i + 2].lower() == "nf": - i += 2 - value = rfloat.INFINITY - if length - i >= 5 and s[i:i + 5].lower() == "inity": - i += 5 - elif s[i] == "n" or s[i] == "N": - i += 1 - if length - i >= 2 and s[i:i + 2].lower() == "an": - i += 2 - value = rfloat.NAN - else: - if (s[i] == "0" and length - i > 1 and - (s[i + 1] == "x" or s[i + 1] == "X")): - i += 2 - co_start = i - while i < length and _hex_from_char(s[i]) >= 0: - i += 1 - whole_end = i - if i < length and s[i] == ".": - i += 1 - while i < length and _hex_from_char(s[i]) >= 0: - i += 1 - co_end = i - 1 - else: - co_end = i - total_digits = co_end - co_start - float_digits = co_end - whole_end - if not total_digits: - raise OperationError(space.w_ValueError, - space.wrap("invalid hex string")) - const_one = rfloat.DBL_MIN_EXP - rfloat.DBL_MANT_DIG + sys.maxint // 2 - const_two = sys.maxint // 2 + 1 - rfloat.DBL_MAX_EXP - if total_digits > min(const_one, const_two) // 4: - raise OperationError(space.w_ValueError, space.wrap("way too long")) - if i < length and (s[i] == "p" or s[i] == "P"): - i += 1 - if i == length: - raise OperationError(space.w_ValueError, - space.wrap("invalid hex string")) - exp_sign = 1 - if s[i] == "-" or s[i] == "+": - if s[i] == "-": - exp_sign = -1 - i += 1 - if i == length: - raise OperationError(space.w_ValueError, - space.wrap("invalid hex string")) - if not s[i].isdigit(): - raise OperationError(space.w_ValueError, - space.wrap("invalid hex string")) - exp = ord(s[i]) - ord('0') - i += 1 - while i < length and s[i].isdigit(): - exp = exp * 10 + (ord(s[i]) - ord('0')) - if exp >= (sys.maxint-9) // 10: - if exp_sign > 0: - exp_sign = 2 # overflow in positive numbers - else: - exp_sign = -2 # overflow in negative numbers - i += 1 - if exp_sign == -1: - exp = -exp - elif exp_sign == -2: - exp = -sys.maxint / 2 - elif exp_sign == 2: - exp = sys.maxint / 2 - else: - exp = 0 - while (total_digits and - _hex_digit(s, total_digits - 1, co_end, float_digits) == 0): - total_digits -= 1 - if not total_digits or exp <= -sys.maxint / 2: - value = 0.0 - elif exp >= sys.maxint // 2: - raise OperationError(space.w_OverflowError, space.wrap("too large")) - else: - exp -= 4 * float_digits - top_exp = exp + 4 * (total_digits - 1) - digit = _hex_digit(s, total_digits - 1, co_end, float_digits) - while digit: - top_exp += 1 - digit //= 2 - if top_exp < rfloat.DBL_MIN_EXP - rfloat.DBL_MANT_DIG: - value = 0.0 - elif top_exp > rfloat.DBL_MAX_EXP: - raise OperationError(space.w_OverflowError, - space.wrap("too large")) - else: - lsb = max(top_exp, rfloat.DBL_MIN_EXP) - rfloat.DBL_MANT_DIG - value = 0 - if exp >= lsb: - for j in range(total_digits - 1, -1, -1): - value = 16.0 * value + _hex_digit(s, j, co_end, - float_digits) - value = math.ldexp(value, exp) - else: - half_eps = 1 << ((lsb - exp - 1) % 4) - key_digit = (lsb - exp - 1) // 4 - for j in range(total_digits - 1, key_digit, -1): - value = 16.0 * value + _hex_digit(s, j, co_end, - float_digits) - digit = _hex_digit(s, key_digit, co_end, float_digits) - value = 16.0 * value + (digit & (16 - 2*half_eps)) - if digit & half_eps: - round_up = False - if (digit & (3 * half_eps - 1) or - (half_eps == 8 and - _hex_digit(s, key_digit + 1, co_end, float_digits) & 1)): - round_up = True - else: - for j in range(key_digit - 1, -1, -1): - if _hex_digit(s, j, co_end, float_digits): - round_up = True - break - if round_up: - value += 2 * half_eps - mant_dig = rfloat.DBL_MANT_DIG - if (top_exp == rfloat.DBL_MAX_EXP and - value == math.ldexp(2 * half_eps, mant_dig)): - raise OperationError(space.w_OverflowError, - space.wrap("too large")) - value = math.ldexp(value, (exp + 4*key_digit)) - while i < length and s[i].isspace(): - i += 1 - if i != length: - raise OperationError(space.w_ValueError, - space.wrap("invalid hex string")) - w_float = space.wrap(sign * value) - return space.call_function(w_cls, w_float) - -def descr_get_real(space, w_obj): - return space.float(w_obj) - -def descr_get_imag(space, w_obj): - return space.wrap(0.0) - -# ____________________________________________________________ - -class W_AbstractFloatObject(W_Object): - __slots__ = () - - def is_w(self, space, w_other): - from rpython.rlib.longlong2float import float2longlong - if not isinstance(w_other, W_AbstractFloatObject): - return False - if self.user_overridden_class or w_other.user_overridden_class: - return self is w_other - one = float2longlong(space.float_w(self)) - two = float2longlong(space.float_w(w_other)) - return one == two - - def immutable_unique_id(self, space): - if self.user_overridden_class: - return None - from rpython.rlib.longlong2float import float2longlong - from pypy.objspace.std.model import IDTAG_FLOAT as tag - val = float2longlong(space.float_w(self)) - b = rbigint.fromrarith_int(val) - b = b.lshift(3).or_(rbigint.fromint(tag)) - return space.newlong_from_rbigint(b) - - def int(self, space): - raise NotImplementedError - -float_typedef = StdTypeDef("float", - __doc__ = '''float(x) -> floating point number - -Convert a string or number to a floating point number, if possible.''', - __new__ = interp2app(descr__new__), - __getformat__ = interp2app(descr___getformat__, as_classmethod=True), - fromhex = interp2app(descr_fromhex, as_classmethod=True), - conjugate = interp2app(descr_conjugate), - real = typedef.GetSetProperty(descr_get_real), - imag = typedef.GetSetProperty(descr_get_imag), - __int__ = interpindirect2app(W_AbstractFloatObject.int), -) -float_typedef.registermethods(globals()) diff --git a/pypy/objspace/std/model.py b/pypy/objspace/std/model.py --- a/pypy/objspace/std/model.py +++ b/pypy/objspace/std/model.py @@ -32,7 +32,6 @@ # All the Python types that we want to provide in this StdObjSpace class result: from pypy.objspace.std.objecttype import object_typedef - from pypy.objspace.std.floattype import float_typedef from pypy.objspace.std.typeobject import type_typedef from pypy.objspace.std.slicetype import slice_typedef from pypy.objspace.std.nonetype import none_typedef @@ -57,7 +56,6 @@ from pypy.objspace.std import typeobject from pypy.objspace.std import sliceobject from pypy.objspace.std import longobject - from pypy.objspace.std import complexobject from pypy.objspace.std import noneobject from pypy.objspace.std import iterobject from pypy.objspace.std import unicodeobject @@ -82,6 +80,7 @@ self.pythontypes.append(intobject.W_IntObject.typedef) self.pythontypes.append(boolobject.W_BoolObject.typedef) self.pythontypes.append(longobject.W_LongObject.typedef) + self.pythontypes.append(floatobject.W_FloatObject.typedef) self.pythontypes.append(complexobject.W_ComplexObject.typedef) # the set of implementation types From noreply at buildbot.pypy.org Mon Feb 24 02:52:51 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 24 Feb 2014 02:52:51 +0100 (CET) Subject: [pypy-commit] pypy remove-remaining-smm: Kill float's binary SMMs. Message-ID: <20140224015251.41CAD1C02EA@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: remove-remaining-smm Changeset: r69322:c12626ccdd0a Date: 2014-02-24 02:07 +0100 http://bitbucket.org/pypy/pypy/changeset/c12626ccdd0a/ Log: Kill float's binary SMMs. diff --git a/pypy/objspace/std/floatobject.py b/pypy/objspace/std/floatobject.py --- a/pypy/objspace/std/floatobject.py +++ b/pypy/objspace/std/floatobject.py @@ -7,7 +7,6 @@ from pypy.interpreter.typedef import GetSetProperty from pypy.objspace.std import newformat from pypy.objspace.std.longobject import W_LongObject -from pypy.objspace.std.multimethod import FailedToImplementArgs from pypy.objspace.std.model import registerimplementation, W_Object from pypy.objspace.std.register_all import register_all from pypy.objspace.std.stdtypedef import StdTypeDef, SMM @@ -314,6 +313,160 @@ w_float = space.wrap(sign * value) return space.call_function(w_cls, w_float) + def _to_float(self, space, w_obj): + if isinstance(w_obj, W_FloatObject): + return w_obj + if space.isinstance_w(w_obj, space.w_int): + return W_FloatObject(float(w_obj.intval)) + if space.isinstance_w(w_obj, space.w_long): + return W_FloatObject(w_obj.tofloat(space)) + + def descr_coerce(self, space, w_other): + w_other = self._to_float(space, w_other) + if w_other is None: + return space.w_NotImplemented + return space.newtuple([self, w_other]) + + def descr_add(self, space, w_rhs): + w_rhs = self._to_float(space, w_rhs) + if w_rhs is None: + return space.w_NotImplemented + return W_FloatObject(self.floatval + w_rhs.floatval) + + def descr_radd(self, space, w_lhs): + w_lhs = self._to_float(space, w_lhs) + if w_lhs is None: + return space.w_NotImplemented + return W_FloatObject(w_lhs.floatval + self.floatval) + + def descr_sub(self, space, w_rhs): + w_rhs = self._to_float(space, w_rhs) + if w_rhs is None: + return space.w_NotImplemented + return W_FloatObject(self.floatval - w_rhs.floatval) + + def descr_rsub(self, space, w_lhs): + w_lhs = self._to_float(space, w_lhs) + if w_lhs is None: + return space.w_NotImplemented + return W_FloatObject(w_lhs.floatval - self.floatval) + + def descr_mul(self, space, w_rhs): + w_rhs = self._to_float(space, w_rhs) + if w_rhs is None: + return space.w_NotImplemented + return W_FloatObject(self.floatval * w_rhs.floatval) + + def descr_rmul(self, space, w_lhs): + w_lhs = self._to_float(space, w_lhs) + if w_lhs is None: + return space.w_NotImplemented + return W_FloatObject(w_lhs.floatval * self.floatval) + + def descr_div(self, space, w_rhs): + w_rhs = self._to_float(space, w_rhs) + if w_rhs is None: + return space.w_NotImplemented + rhs = w_rhs.floatval + if rhs == 0.0: + raise OperationError(space.w_ZeroDivisionError, space.wrap("float division")) + return W_FloatObject(self.floatval / rhs) + + def descr_rdiv(self, space, w_lhs): + w_lhs = self._to_float(space, w_lhs) + if w_lhs is None: + return space.w_NotImplemented + lhs = w_lhs.floatval + if lhs == 0.0: + raise OperationError(space.w_ZeroDivisionError, space.wrap("float division")) + return W_FloatObject(lhs / self.floatval) + + def descr_floordiv(self, space, w_rhs): + w_rhs = self._to_float(space, w_rhs) + if w_rhs is None: + return space.w_NotImplemented + return _divmod_w(space, self, w_rhs)[0] + + def descr_rfloordiv(self, space, w_lhs): + w_lhs = self._to_float(space, w_lhs) + if w_lhs is None: + return space.w_NotImplemented + return _divmod_w(space, w_lhs, self)[0] + + def descr_mod(self, space, w_rhs): + w_rhs = self._to_float(space, w_rhs) + if w_rhs is None: + return space.w_NotImplemented + x = self.floatval + y = w_rhs.floatval + if y == 0.0: + raise OperationError(space.w_ZeroDivisionError, space.wrap("float modulo")) + try: + mod = math.fmod(x, y) + except ValueError: + mod = rfloat.NAN + else: + if mod: + # ensure the remainder has the same sign as the denominator + if (y < 0.0) != (mod < 0.0): + mod += y + else: + # the remainder is zero, and in the presence of signed zeroes + # fmod returns different results across platforms; ensure + # it has the same sign as the denominator; we'd like to do + # "mod = y * 0.0", but that may get optimized away + mod = copysign(0.0, y) + + return W_FloatObject(mod) + + def descr_rmod(self, space, w_lhs): + w_lhs = self._to_float(space, w_lhs) + if w_lhs is None: + return space.w_NotImplemented + return w_lhs.descr_mod(space, self) + + def descr_divmod(self, space, w_rhs): + w_rhs = self._to_float(space, w_rhs) + if w_rhs is None: + return space.w_NotImplemented + return space.newtuple(_divmod_w(space, self, w_rhs)) + + def descr_rdivmod(self, space, w_lhs): + w_lhs = self._to_float(space, w_lhs) + if w_lhs is None: + return space.w_NotImplemented + return space.newtuple(_divmod_w(space, w_lhs, self)) + + @unwrap_spec(w_third_arg=WrappedDefault(None)) + def descr_pow(self, space, w_rhs, w_third_arg): + # This raises FailedToImplement in cases like overflow where a + # (purely theoretical) big-precision float implementation would have + # a chance to give a result, and directly OperationError for errors + # that we want to force to be reported to the user. + + w_rhs = self._to_float(space, w_rhs) + if w_rhs is None: + return space.w_NotImplemented + if not space.is_w(w_third_arg, space.w_None): + raise OperationError(space.w_TypeError, space.wrap( + "pow() 3rd argument not allowed unless all arguments are integers")) + x = self.floatval + y = w_rhs.floatval + + try: + result = _pow(space, x, y) + except PowDomainError: + raise oefmt(space.w_ValueError, + "negative number cannot be raised to a fractional power") + return W_FloatObject(result) + + @unwrap_spec(w_third_arg=WrappedDefault(None)) + def descr_rpow(self, space, w_lhs, w_third_arg): + w_lhs = self._to_float(space, w_lhs) + if w_lhs is None: + return space.w_NotImplemented + return w_lhs.descr_pow(space, self, w_third_arg) + def descr_conjugate(self, space): return space.float(self) @@ -343,6 +496,27 @@ __new__ = interp2app(W_FloatObject.descr__new__), __getformat__ = interp2app(W_FloatObject.descr___getformat__, as_classmethod=True), fromhex = interp2app(W_FloatObject.descr_fromhex, as_classmethod=True), + __coerce__ = interp2app(W_FloatObject.descr_coerce), + + __add__ = interp2app(W_FloatObject.descr_add), + __radd__ = interp2app(W_FloatObject.descr_radd), + __sub__ = interp2app(W_FloatObject.descr_sub), + __rsub__ = interp2app(W_FloatObject.descr_rsub), + __mul__ = interp2app(W_FloatObject.descr_mul), + __rmul__ = interp2app(W_FloatObject.descr_rmul), + __div__ = interp2app(W_FloatObject.descr_div), + __rdiv__ = interp2app(W_FloatObject.descr_rdiv), + __truediv__ = interp2app(W_FloatObject.descr_div), + __rtruediv__ = interp2app(W_FloatObject.descr_rdiv), + __floordiv__ = interp2app(W_FloatObject.descr_floordiv), + __rfloordiv__ = interp2app(W_FloatObject.descr_rfloordiv), + __mod__ = interp2app(W_FloatObject.descr_mod), + __rmod__ = interp2app(W_FloatObject.descr_rmod), + __divmod__ = interp2app(W_FloatObject.descr_divmod), + __rdivmod__ = interp2app(W_FloatObject.descr_rdivmod), + __pow__ = interp2app(W_FloatObject.descr_pow), + __rpow__ = interp2app(W_FloatObject.descr_rpow), + conjugate = interp2app(W_FloatObject.descr_conjugate), real = GetSetProperty(W_FloatObject.descr_get_real), imag = GetSetProperty(W_FloatObject.descr_get_imag), @@ -351,19 +525,6 @@ W_FloatObject.typedef.registermethods(globals()) -# bool-to-float delegation -def delegate_Bool2Float(space, w_bool): - return W_FloatObject(float(w_bool.intval)) - -# int-to-float delegation -def delegate_Int2Float(space, w_intobj): - return W_FloatObject(float(w_intobj.intval)) - -# long-to-float delegation -def delegate_Long2Float(space, w_longobj): - return W_FloatObject(w_longobj.tofloat(space)) - - # float__Float is supposed to do nothing, unless it has # a derived float object, where it should return # an exact one. @@ -613,67 +774,11 @@ return x -# coerce -def coerce__Float_Float(space, w_float1, w_float2): - return space.newtuple([w_float1, w_float2]) - - -def add__Float_Float(space, w_float1, w_float2): - x = w_float1.floatval - y = w_float2.floatval - return W_FloatObject(x + y) - -def sub__Float_Float(space, w_float1, w_float2): - x = w_float1.floatval - y = w_float2.floatval - return W_FloatObject(x - y) - -def mul__Float_Float(space, w_float1, w_float2): - x = w_float1.floatval - y = w_float2.floatval - return W_FloatObject(x * y) - -def div__Float_Float(space, w_float1, w_float2): - x = w_float1.floatval - y = w_float2.floatval - if y == 0.0: - raise FailedToImplementArgs(space.w_ZeroDivisionError, space.wrap("float division")) - return W_FloatObject(x / y) - -truediv__Float_Float = div__Float_Float - -def floordiv__Float_Float(space, w_float1, w_float2): - w_div, w_mod = _divmod_w(space, w_float1, w_float2) - return w_div - -def mod__Float_Float(space, w_float1, w_float2): - x = w_float1.floatval - y = w_float2.floatval - if y == 0.0: - raise FailedToImplementArgs(space.w_ZeroDivisionError, space.wrap("float modulo")) - try: - mod = math.fmod(x, y) - except ValueError: - mod = rfloat.NAN - else: - if mod: - # ensure the remainder has the same sign as the denominator - if (y < 0.0) != (mod < 0.0): - mod += y - else: - # the remainder is zero, and in the presence of signed zeroes - # fmod returns different results across platforms; ensure - # it has the same sign as the denominator; we'd like to do - # "mod = y * 0.0", but that may get optimized away - mod = copysign(0.0, y) - - return W_FloatObject(mod) - def _divmod_w(space, w_float1, w_float2): x = w_float1.floatval y = w_float2.floatval if y == 0.0: - raise FailedToImplementArgs(space.w_ZeroDivisionError, space.wrap("float modulo")) + raise OperationError(space.w_ZeroDivisionError, space.wrap("float modulo")) try: mod = math.fmod(x, y) except ValueError: @@ -709,27 +814,6 @@ return [W_FloatObject(floordiv), W_FloatObject(mod)] -def divmod__Float_Float(space, w_float1, w_float2): - return space.newtuple(_divmod_w(space, w_float1, w_float2)) - -def pow__Float_Float_ANY(space, w_float1, w_float2, thirdArg): - # This raises FailedToImplement in cases like overflow where a - # (purely theoretical) big-precision float implementation would have - # a chance to give a result, and directly OperationError for errors - # that we want to force to be reported to the user. - if not space.is_w(thirdArg, space.w_None): - raise OperationError(space.w_TypeError, space.wrap( - "pow() 3rd argument not allowed unless all arguments are integers")) - x = w_float1.floatval - y = w_float2.floatval - - try: - result = _pow(space, x, y) - except PowDomainError: - raise oefmt(space.w_ValueError, - "negative number cannot be raised to a fractional power") - return W_FloatObject(result) - class PowDomainError(ValueError): """Signals a negative number raised to a fractional power""" @@ -810,7 +894,7 @@ # We delegate to our implementation of math.pow() the error detection. z = math.pow(x,y) except OverflowError: - raise FailedToImplementArgs(space.w_OverflowError, + raise OperationError(space.w_OverflowError, space.wrap("float power")) except ValueError: raise OperationError(space.w_ValueError, diff --git a/pypy/objspace/std/model.py b/pypy/objspace/std/model.py --- a/pypy/objspace/std/model.py +++ b/pypy/objspace/std/model.py @@ -133,21 +133,12 @@ # when trying to dispatch multimethods. # XXX build these lists a bit more automatically later - self.typeorder[boolobject.W_BoolObject] += [ - (floatobject.W_FloatObject, floatobject.delegate_Bool2Float), - ] - self.typeorder[intobject.W_IntObject] += [ - (floatobject.W_FloatObject, floatobject.delegate_Int2Float), - ] if config.objspace.std.withsmalllong: from pypy.objspace.std import smalllongobject self.typeorder[smalllongobject.W_SmallLongObject] += [ (floatobject.W_FloatObject, smalllongobject.delegate_SmallLong2Float), (complexobject.W_ComplexObject, smalllongobject.delegate_SmallLong2Complex), ] - self.typeorder[longobject.W_LongObject] += [ - (floatobject.W_FloatObject, floatobject.delegate_Long2Float), - ] if config.objspace.std.withstrbuf: from pypy.objspace.std import strbufobject diff --git a/pypy/objspace/std/test/test_floatobject.py b/pypy/objspace/std/test/test_floatobject.py --- a/pypy/objspace/std/test/test_floatobject.py +++ b/pypy/objspace/std/test/test_floatobject.py @@ -12,27 +12,27 @@ f2 = fobj.W_FloatObject(y) f3 = fobj.W_FloatObject(z) self.space.raises_w(self.space.w_TypeError, - fobj.pow__Float_Float_ANY, - self.space, f1, f2, f3) + f1.descr_pow, + self.space, f2, f3) def test_pow_ffn(self): x = 10.0 y = 2.0 f1 = fobj.W_FloatObject(x) f2 = fobj.W_FloatObject(y) - v = fobj.pow__Float_Float_ANY(self.space, f1, f2, self.space.w_None) + v = f1.descr_pow(self.space, f2, self.space.w_None) assert v.floatval == x ** y f1 = fobj.W_FloatObject(-1.23) f2 = fobj.W_FloatObject(-4.56) self.space.raises_w(self.space.w_ValueError, - fobj.pow__Float_Float_ANY, - self.space, f1, f2, + f1.descr_pow, + self.space, f2, self.space.w_None) x = -10 y = 2.0 f1 = fobj.W_FloatObject(x) f2 = fobj.W_FloatObject(y) - v = fobj.pow__Float_Float_ANY(self.space, f1, f2, self.space.w_None) + v = f1.descr_pow(self.space, f2, self.space.w_None) assert v.floatval == x**y def test_dont_use_long_impl(self): From noreply at buildbot.pypy.org Mon Feb 24 02:52:52 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 24 Feb 2014 02:52:52 +0100 (CET) Subject: [pypy-commit] pypy remove-remaining-smm: Kill float's comparison SMMs. Message-ID: <20140224015252.7269D1C02EA@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: remove-remaining-smm Changeset: r69323:9b1c802844d9 Date: 2014-02-24 02:49 +0100 http://bitbucket.org/pypy/pypy/changeset/9b1c802844d9/ Log: Kill float's comparison SMMs. diff --git a/pypy/objspace/std/floatobject.py b/pypy/objspace/std/floatobject.py --- a/pypy/objspace/std/floatobject.py +++ b/pypy/objspace/std/floatobject.py @@ -17,11 +17,8 @@ DTSF_ADD_DOT_0, DTSF_STR_PRECISION, float_as_rbigint_ratio) from rpython.rlib.rbigint import rbigint from rpython.rlib.rstring import ParseStringError -from rpython.tool.sourcetools import func_with_new_name from rpython.rlib.unroll import unrolling_iterable -from pypy.objspace.std.intobject import W_IntObject - float_as_integer_ratio = SMM("as_integer_ratio", 1) float_is_integer = SMM("is_integer", 1) @@ -110,6 +107,53 @@ return _hex_from_char(s[i]) +def make_compare_func(opname): + op = getattr(operator, opname) + + if opname == 'eq' or opname == 'ne': + def do_compare_bigint(f1, b2): + """f1 is a float. b2 is a bigint.""" + if not isfinite(f1) or math.floor(f1) != f1: + return opname == 'ne' + b1 = rbigint.fromfloat(f1) + res = b1.eq(b2) + if opname == 'ne': + res = not res + return res + else: + def do_compare_bigint(f1, b2): + """f1 is a float. b2 is a bigint.""" + if not isfinite(f1): + return op(f1, 0.0) + if opname == 'gt' or opname == 'le': + # 'float > long' <==> 'ceil(float) > long' + # 'float <= long' <==> 'ceil(float) <= long' + f1 = math.ceil(f1) + else: + # 'float < long' <==> 'floor(float) < long' + # 'float >= long' <==> 'floor(float) >= long' + f1 = math.floor(f1) + b1 = rbigint.fromfloat(f1) + return getattr(b1, opname)(b2) + + def _compare(self, space, w_other): + if isinstance(w_other, W_FloatObject): + return space.newbool(op(self.floatval, w_other.floatval)) + if space.isinstance_w(w_other, space.w_int): + f1 = self.floatval + i2 = w_other.intval + f2 = float(i2) + if LONG_BIT > 32 and int(f2) != i2: + res = do_compare_bigint(f1, rbigint.fromint(i2)) + else: + res = op(f1, f2) + return space.newbool(res) + if space.isinstance_w(w_other, space.w_long): + return space.newbool(do_compare_bigint(self.floatval, w_other.num)) + return space.w_NotImplemented + return _compare + + class W_FloatObject(W_AbstractFloatObject): """This is a implementation of the app-level 'float' type. The constructor takes an RPython float as an argument.""" @@ -327,6 +371,13 @@ return space.w_NotImplemented return space.newtuple([self, w_other]) + descr_eq = make_compare_func('eq') + descr_ne = make_compare_func('ne') + descr_lt = make_compare_func('lt') + descr_le = make_compare_func('le') + descr_gt = make_compare_func('gt') + descr_ge = make_compare_func('ge') + def descr_add(self, space, w_rhs): w_rhs = self._to_float(space, w_rhs) if w_rhs is None: @@ -498,6 +549,13 @@ fromhex = interp2app(W_FloatObject.descr_fromhex, as_classmethod=True), __coerce__ = interp2app(W_FloatObject.descr_coerce), + __eq__ = interp2app(W_FloatObject.descr_eq), + __ne__ = interp2app(W_FloatObject.descr_ne), + __lt__ = interp2app(W_FloatObject.descr_lt), + __le__ = interp2app(W_FloatObject.descr_le), + __gt__ = interp2app(W_FloatObject.descr_gt), + __ge__ = interp2app(W_FloatObject.descr_ge), + __add__ = interp2app(W_FloatObject.descr_add), __radd__ = interp2app(W_FloatObject.descr_radd), __sub__ = interp2app(W_FloatObject.descr_sub), @@ -614,113 +672,6 @@ def format__Float_ANY(space, w_float, w_spec): return newformat.run_formatter(space, w_spec, "format_float", w_float) -# ____________________________________________________________ -# A mess to handle all cases of float comparison without relying -# on delegation, which can unfortunately loose precision when -# casting an int or a long to a float. - -def list_compare_funcs(declarator): - for op in ['lt', 'le', 'eq', 'ne', 'gt', 'ge']: - func, name = declarator(op) - globals()[name] = func_with_new_name(func, name) - -def _reverse(opname): - if opname[0] == 'l': return 'g' + opname[1:] - elif opname[0] == 'g': return 'l' + opname[1:] - else: return opname - - -def declare_compare_bigint(opname): - """Return a helper function that implements a float-bigint comparison.""" - op = getattr(operator, opname) - # - if opname == 'eq' or opname == 'ne': - def do_compare_bigint(f1, b2): - """f1 is a float. b2 is a bigint.""" - if not isfinite(f1) or math.floor(f1) != f1: - return opname == 'ne' - b1 = rbigint.fromfloat(f1) - res = b1.eq(b2) - if opname == 'ne': - res = not res - return res - else: - def do_compare_bigint(f1, b2): - """f1 is a float. b2 is a bigint.""" - if not isfinite(f1): - return op(f1, 0.0) - if opname == 'gt' or opname == 'le': - # 'float > long' <==> 'ceil(float) > long' - # 'float <= long' <==> 'ceil(float) <= long' - f1 = math.ceil(f1) - else: - # 'float < long' <==> 'floor(float) < long' - # 'float >= long' <==> 'floor(float) >= long' - f1 = math.floor(f1) - b1 = rbigint.fromfloat(f1) - return getattr(b1, opname)(b2) - # - return do_compare_bigint, 'compare_bigint_' + opname -list_compare_funcs(declare_compare_bigint) - - -def declare_cmp_float_float(opname): - op = getattr(operator, opname) - def f(space, w_float1, w_float2): - f1 = w_float1.floatval - f2 = w_float2.floatval - return space.newbool(op(f1, f2)) - return f, opname + "__Float_Float" -list_compare_funcs(declare_cmp_float_float) - -def declare_cmp_float_int(opname): - op = getattr(operator, opname) - compare = globals()['compare_bigint_' + opname] - def f(space, w_float1, w_int2): - f1 = w_float1.floatval - i2 = w_int2.intval - f2 = float(i2) - if LONG_BIT > 32 and int(f2) != i2: - res = compare(f1, rbigint.fromint(i2)) - else: - res = op(f1, f2) - return space.newbool(res) - return f, opname + "__Float_Int" -list_compare_funcs(declare_cmp_float_int) - -def declare_cmp_float_long(opname): - compare = globals()['compare_bigint_' + opname] - def f(space, w_float1, w_long2): - f1 = w_float1.floatval - b2 = w_long2.num - return space.newbool(compare(f1, b2)) - return f, opname + "__Float_Long" -list_compare_funcs(declare_cmp_float_long) - -def declare_cmp_int_float(opname): - op = getattr(operator, opname) - revcompare = globals()['compare_bigint_' + _reverse(opname)] - def f(space, w_int1, w_float2): - f2 = w_float2.floatval - i1 = w_int1.intval - f1 = float(i1) - if LONG_BIT > 32 and int(f1) != i1: - res = revcompare(f2, rbigint.fromint(i1)) - else: - res = op(f1, f2) - return space.newbool(res) - return f, opname + "__Int_Float" -list_compare_funcs(declare_cmp_int_float) - -def declare_cmp_long_float(opname): - revcompare = globals()['compare_bigint_' + _reverse(opname)] - def f(space, w_long1, w_float2): - f2 = w_float2.floatval - b1 = w_long1.num - return space.newbool(revcompare(f2, b1)) - return f, opname + "__Long_Float" -list_compare_funcs(declare_cmp_long_float) - # ____________________________________________________________ From noreply at buildbot.pypy.org Mon Feb 24 03:09:28 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 24 Feb 2014 03:09:28 +0100 (CET) Subject: [pypy-commit] pypy default: handle ndarray getitem with ellipsis Message-ID: <20140224020929.00E501C02EA@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69324:b7dd23fb75be Date: 2014-02-23 20:28 -0500 http://bitbucket.org/pypy/pypy/changeset/b7dd23fb75be/ Log: handle ndarray getitem with ellipsis diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -75,6 +75,7 @@ def __init__(self): """NOT_RPYTHON""" self.fromcache = InternalSpaceCache(self).getorbuild + self.w_Ellipsis = special.Ellipsis(self) self.w_NotImplemented = special.NotImplemented(self) def _freeze_(self): diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -218,7 +218,9 @@ prefix) def descr_getitem(self, space, w_idx): - if isinstance(w_idx, W_NDimArray) and w_idx.get_dtype().is_bool_type() \ + if space.is_w(w_idx, space.w_Ellipsis): + return self + elif isinstance(w_idx, W_NDimArray) and w_idx.get_dtype().is_bool_type() \ and len(w_idx.get_shape()) > 0: return self.getitem_filter(space, w_idx) try: diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -2259,6 +2259,13 @@ a[b] = 1. assert (a == [[1., 1., 1.]]).all() + def test_ellipsis_indexing(self): + import numpy as np + a = np.array(1.5) + assert a[...] is a + a = np.array([1, 2, 3]) + assert a[...] is a + class AppTestNumArrayFromBuffer(BaseNumpyAppTest): spaceconfig = dict(usemodules=["micronumpy", "array", "mmap"]) From noreply at buildbot.pypy.org Mon Feb 24 03:09:30 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 24 Feb 2014 03:09:30 +0100 (CET) Subject: [pypy-commit] pypy default: fix some dtype str/repr cases Message-ID: <20140224020930.3A4841C02EA@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69325:00beb1b741ed Date: 2014-02-23 20:52 -0500 http://bitbucket.org/pypy/pypy/changeset/00beb1b741ed/ Log: fix some dtype str/repr cases diff --git a/pypy/module/micronumpy/arrayimpl/sort.py b/pypy/module/micronumpy/arrayimpl/sort.py --- a/pypy/module/micronumpy/arrayimpl/sort.py +++ b/pypy/module/micronumpy/arrayimpl/sort.py @@ -10,7 +10,7 @@ from rpython.rlib.unroll import unrolling_iterable from rpython.rlib.rarithmetic import widen from rpython.rlib.objectmodel import specialize -from pypy.interpreter.error import OperationError +from pypy.interpreter.error import OperationError, oefmt from pypy.module.micronumpy.base import W_NDimArray from pypy.module.micronumpy import interp_dtype, types from pypy.module.micronumpy.iter import AxisIterator @@ -175,9 +175,9 @@ return cache._lookup(tp)(arr, space, w_axis, itemtype.get_element_size()) # XXX this should probably be changed - raise OperationError(space.w_NotImplementedError, - space.wrap("sorting of non-numeric types " + \ - "'%s' is not implemented" % arr.dtype.get_name(), )) + raise oefmt(space.w_NotImplementedError, + "sorting of non-numeric types '%s' is not implemented", + arr.dtype.name) all_types = (types.all_float_types + types.all_complex_types + types.all_int_types) @@ -318,9 +318,9 @@ return cache._lookup(tp)(arr, space, w_axis, itemtype.get_element_size()) # XXX this should probably be changed - raise OperationError(space.w_NotImplementedError, - space.wrap("sorting of non-numeric types " + \ - "'%s' is not implemented" % arr.dtype.get_name(), )) + raise oefmt(space.w_NotImplementedError, + "sorting of non-numeric types '%s' is not implemented", + arr.dtype.name) all_types = (types.all_float_types + types.all_complex_types + types.all_int_types) diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -119,21 +119,30 @@ def get_size(self): return self.size * self.itemtype.get_element_size() - def get_name(self): - if self.char == 'S': - return '|S' + str(self.get_size()) - return self.name - def get_float_dtype(self, space): assert self.kind == NPY.COMPLEXLTR assert self.float_type is not None return get_dtype_cache(space).dtypes_by_name[self.byteorder + self.float_type] def descr_str(self, space): - return space.wrap(self.get_name()) + if not self.is_record_type(): + if self.char == 'S': + s = '|S' + str(self.get_size()) + else: + s = self.name + return space.wrap(s) + return space.str(self.descr_get_descr(space)) def descr_repr(self, space): - return space.wrap("dtype('%s')" % self.get_name()) + if not self.is_record_type(): + if self.char == 'S': + s = 'S' + str(self.get_size()) + else: + s = self.name + r = space.wrap(s) + else: + r = self.descr_get_descr(space) + return space.wrap("dtype(%s)" % space.str_w(space.repr(r))) def descr_get_itemsize(self, space): return space.wrap(self.get_size()) diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -160,6 +160,12 @@ d = dtype('?') assert repr(d) == "dtype('bool')" assert str(d) == "bool" + d = dtype([('', ' Author: Armin Rigo Branch: py3k Changeset: r69326:f629c3ae62be Date: 2014-02-22 08:07 +0100 http://bitbucket.org/pypy/pypy/changeset/f629c3ae62be/ Log: Redo 992e29624c5f, this time hopefully right -- it's a bit of a mess to call c_mmap_safe() from two different points in translation (grafted from b771fb9117d277848fd63d41db349e53a635397a) diff --git a/rpython/rlib/rmmap.py b/rpython/rlib/rmmap.py --- a/rpython/rlib/rmmap.py +++ b/rpython/rlib/rmmap.py @@ -10,6 +10,7 @@ from rpython.rtyper.lltypesystem import rffi, lltype from rpython.rlib import rposix from rpython.translator.tool.cbuild import ExternalCompilationInfo +from rpython.rlib.objectmodel import we_are_translated from rpython.rlib.nonconst import NonConstant from rpython.rlib.rarithmetic import intmask @@ -675,14 +676,20 @@ return m def alloc_hinted(hintp, map_size): - flags = NonConstant(MAP_PRIVATE | MAP_ANONYMOUS) - prot = NonConstant(PROT_EXEC | PROT_READ | PROT_WRITE) + flags = MAP_PRIVATE | MAP_ANONYMOUS + prot = PROT_EXEC | PROT_READ | PROT_WRITE + if we_are_translated(): + flags = NonConstant(flags) + prot = NonConstant(prot) return c_mmap_safe(hintp, map_size, prot, flags, -1, 0) def clear_large_memory_chunk_aligned(addr, map_size): addr = rffi.cast(PTR, addr) - flags = NonConstant(MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS) - prot = NonConstant(PROT_READ | PROT_WRITE) + flags = MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS + prot = PROT_READ | PROT_WRITE + if we_are_translated(): + flags = NonConstant(flags) + prot = NonConstant(prot) res = c_mmap_safe(addr, map_size, prot, flags, -1, 0) return res == addr From noreply at buildbot.pypy.org Mon Feb 24 04:10:16 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 24 Feb 2014 04:10:16 +0100 (CET) Subject: [pypy-commit] pypy remove-remaining-smm: Kill W_AbstractFloatObject, its only subclass is W_FloatObject. Message-ID: <20140224031016.0D1D81C02EA@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: remove-remaining-smm Changeset: r69327:0893e9d51fd5 Date: 2014-02-24 02:54 +0100 http://bitbucket.org/pypy/pypy/changeset/0893e9d51fd5/ Log: Kill W_AbstractFloatObject, its only subclass is W_FloatObject. diff --git a/pypy/objspace/std/floatobject.py b/pypy/objspace/std/floatobject.py --- a/pypy/objspace/std/floatobject.py +++ b/pypy/objspace/std/floatobject.py @@ -25,33 +25,6 @@ float_hex = SMM("hex", 1) -class W_AbstractFloatObject(W_Object): - __slots__ = () - - def is_w(self, space, w_other): - from rpython.rlib.longlong2float import float2longlong - if not isinstance(w_other, W_AbstractFloatObject): - return False - if self.user_overridden_class or w_other.user_overridden_class: - return self is w_other - one = float2longlong(space.float_w(self)) - two = float2longlong(space.float_w(w_other)) - return one == two - - def immutable_unique_id(self, space): - if self.user_overridden_class: - return None - from rpython.rlib.longlong2float import float2longlong - from pypy.objspace.std.model import IDTAG_FLOAT as tag - val = float2longlong(space.float_w(self)) - b = rbigint.fromrarith_int(val) - b = b.lshift(3).or_(rbigint.fromint(tag)) - return space.newlong_from_rbigint(b) - - def int(self, space): - raise NotImplementedError - - def detect_floatformat(): from rpython.rtyper.lltypesystem import rffi, lltype buf = lltype.malloc(rffi.CCHARP.TO, 8, flavor='raw') @@ -154,7 +127,7 @@ return _compare -class W_FloatObject(W_AbstractFloatObject): +class W_FloatObject(W_Object): """This is a implementation of the app-level 'float' type. The constructor takes an RPython float as an argument.""" _immutable_fields_ = ['floatval'] @@ -179,6 +152,26 @@ else: return space.newint(value) + def is_w(self, space, w_other): + from rpython.rlib.longlong2float import float2longlong + if not isinstance(w_other, W_FloatObject): + return False + if self.user_overridden_class or w_other.user_overridden_class: + return self is w_other + one = float2longlong(space.float_w(self)) + two = float2longlong(space.float_w(w_other)) + return one == two + + def immutable_unique_id(self, space): + if self.user_overridden_class: + return None + from rpython.rlib.longlong2float import float2longlong + from pypy.objspace.std.model import IDTAG_FLOAT as tag + val = float2longlong(space.float_w(self)) + b = rbigint.fromrarith_int(val) + b = b.lshift(3).or_(rbigint.fromint(tag)) + return space.newlong_from_rbigint(b) + def __repr__(self): return "" % self.floatval @@ -578,7 +571,7 @@ conjugate = interp2app(W_FloatObject.descr_conjugate), real = GetSetProperty(W_FloatObject.descr_get_real), imag = GetSetProperty(W_FloatObject.descr_get_imag), - __int__ = interpindirect2app(W_AbstractFloatObject.int), + __int__ = interpindirect2app(W_FloatObject.int), ) W_FloatObject.typedef.registermethods(globals()) From noreply at buildbot.pypy.org Mon Feb 24 04:10:17 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 24 Feb 2014 04:10:17 +0100 (CET) Subject: [pypy-commit] pypy remove-remaining-smm: Kill float's unary SMMs. Message-ID: <20140224031017.73DB71C02EA@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: remove-remaining-smm Changeset: r69328:ccf5419c3a76 Date: 2014-02-24 03:22 +0100 http://bitbucket.org/pypy/pypy/changeset/ccf5419c3a76/ Log: Kill float's unary SMMs. diff --git a/pypy/objspace/std/floatobject.py b/pypy/objspace/std/floatobject.py --- a/pypy/objspace/std/floatobject.py +++ b/pypy/objspace/std/floatobject.py @@ -3,7 +3,7 @@ import sys from pypy.interpreter.error import OperationError, oefmt -from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault, interpindirect2app +from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault from pypy.interpreter.typedef import GetSetProperty from pypy.objspace.std import newformat from pypy.objspace.std.longobject import W_LongObject @@ -358,12 +358,65 @@ if space.isinstance_w(w_obj, space.w_long): return W_FloatObject(w_obj.tofloat(space)) + def descr_repr(self, space): + return space.wrap(float2string(self.floatval, 'r', 0)) + + def descr_str(self, space): + return space.wrap(float2string(self.floatval, 'g', DTSF_STR_PRECISION)) + + def descr_hash(self, space): + return space.wrap(_hash_float(space, self.floatval)) + + def descr_format(self, space, w_spec): + return newformat.run_formatter(space, w_spec, "format_float", self) + def descr_coerce(self, space, w_other): w_other = self._to_float(space, w_other) if w_other is None: return space.w_NotImplemented return space.newtuple([self, w_other]) + def descr_nonzero(self, space): + return space.newbool(self.floatval != 0.0) + + def descr_float(self, space): + if space.is_w(space.type(self), space.w_float): + return self + a = self.floatval + return W_FloatObject(a) + + def descr_long(self, space): + try: + return W_LongObject.fromfloat(space, self.floatval) + except OverflowError: + raise OperationError( + space.w_OverflowError, + space.wrap("cannot convert float infinity to integer")) + except ValueError: + raise OperationError(space.w_ValueError, + space.wrap("cannot convert float NaN to integer")) + + def descr_trunc(self, space): + whole = math.modf(self.floatval)[1] + try: + value = ovfcheck_float_to_int(whole) + except OverflowError: + return self.descr_long(space) + else: + return space.newint(value) + + def descr_neg(self, space): + return W_FloatObject(-self.floatval) + + def descr_pos(self, space): + return self.descr_float(space) + + def descr_abs(self, space): + return W_FloatObject(abs(self.floatval)) + + def descr_getnewargs(self, space): + return space.newtuple([self.descr_float(space)]) + descr_eq = make_compare_func('eq') descr_ne = make_compare_func('ne') descr_lt = make_compare_func('lt') @@ -540,7 +593,20 @@ __new__ = interp2app(W_FloatObject.descr__new__), __getformat__ = interp2app(W_FloatObject.descr___getformat__, as_classmethod=True), fromhex = interp2app(W_FloatObject.descr_fromhex, as_classmethod=True), + __repr__ = interp2app(W_FloatObject.descr_repr), + __str__ = interp2app(W_FloatObject.descr_str), + __hash__ = interp2app(W_FloatObject.descr_hash), + __format__ = interp2app(W_FloatObject.descr_format), __coerce__ = interp2app(W_FloatObject.descr_coerce), + __nonzero__ = interp2app(W_FloatObject.descr_nonzero), + __int__ = interp2app(W_FloatObject.int), + __float__ = interp2app(W_FloatObject.descr_float), + __long__ = interp2app(W_FloatObject.descr_long), + __trunc__ = interp2app(W_FloatObject.descr_trunc), + __neg__ = interp2app(W_FloatObject.descr_neg), + __pos__ = interp2app(W_FloatObject.descr_pos), + __abs__ = interp2app(W_FloatObject.descr_abs), + __getnewargs__ = interp2app(W_FloatObject.descr_getnewargs), __eq__ = interp2app(W_FloatObject.descr_eq), __ne__ = interp2app(W_FloatObject.descr_ne), @@ -571,40 +637,10 @@ conjugate = interp2app(W_FloatObject.descr_conjugate), real = GetSetProperty(W_FloatObject.descr_get_real), imag = GetSetProperty(W_FloatObject.descr_get_imag), - __int__ = interpindirect2app(W_FloatObject.int), ) W_FloatObject.typedef.registermethods(globals()) -# float__Float is supposed to do nothing, unless it has -# a derived float object, where it should return -# an exact one. -def float__Float(space, w_float1): - if space.is_w(space.type(w_float1), space.w_float): - return w_float1 - a = w_float1.floatval - return W_FloatObject(a) - -def long__Float(space, w_floatobj): - try: - return W_LongObject.fromfloat(space, w_floatobj.floatval) - except OverflowError: - raise OperationError( - space.w_OverflowError, - space.wrap("cannot convert float infinity to integer")) - except ValueError: - raise OperationError(space.w_ValueError, - space.wrap("cannot convert float NaN to integer")) - -def trunc__Float(space, w_floatobj): - whole = math.modf(w_floatobj.floatval)[1] - try: - value = ovfcheck_float_to_int(whole) - except OverflowError: - return long__Float(space, w_floatobj) - else: - return space.newint(value) - def _char_from_hex(number): return "0123456789abcdef"[number] @@ -613,7 +649,7 @@ def float_hex__Float(space, w_float): value = w_float.floatval if not isfinite(value): - return str__Float(space, w_float) + return w_float.descr_str(space) if value == 0.0: if copysign(1., value) == -1.: return space.wrap("-0x0.0p+0") @@ -656,21 +692,9 @@ s = "nan" return s -def repr__Float(space, w_float): - return space.wrap(float2string(w_float.floatval, 'r', 0)) - -def str__Float(space, w_float): - return space.wrap(float2string(w_float.floatval, 'g', DTSF_STR_PRECISION)) - -def format__Float_ANY(space, w_float, w_spec): - return newformat.run_formatter(space, w_spec, "format_float", w_float) - # ____________________________________________________________ -def hash__Float(space, w_value): - return space.wrap(_hash_float(space, w_value.floatval)) - def _hash_float(space, v): if isnan(v): return 0 @@ -849,21 +873,6 @@ return z -def neg__Float(space, w_float1): - return W_FloatObject(-w_float1.floatval) - -def pos__Float(space, w_float): - return float__Float(space, w_float) - -def abs__Float(space, w_float): - return W_FloatObject(abs(w_float.floatval)) - -def nonzero__Float(space, w_float): - return space.newbool(w_float.floatval != 0.0) - -def getnewargs__Float(space, w_float): - return space.newtuple([W_FloatObject(w_float.floatval)]) - def float_as_integer_ratio__Float(space, w_float): value = w_float.floatval try: From noreply at buildbot.pypy.org Mon Feb 24 04:10:18 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 24 Feb 2014 04:10:18 +0100 (CET) Subject: [pypy-commit] pypy remove-remaining-smm: Move descr___getformat__(). Message-ID: <20140224031018.AB1BF1C02EA@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: remove-remaining-smm Changeset: r69329:ae5780334104 Date: 2014-02-24 03:31 +0100 http://bitbucket.org/pypy/pypy/changeset/ae5780334104/ Log: Move descr___getformat__(). diff --git a/pypy/objspace/std/floatobject.py b/pypy/objspace/std/floatobject.py --- a/pypy/objspace/std/floatobject.py +++ b/pypy/objspace/std/floatobject.py @@ -199,6 +199,16 @@ return w_obj @staticmethod + @unwrap_spec(kind=str) + def descr___getformat__(space, w_cls, kind): + if kind == "float": + return space.wrap(_float_format) + elif kind == "double": + return space.wrap(_double_format) + raise OperationError(space.w_ValueError, + space.wrap("only float and double are valid")) + + @staticmethod @unwrap_spec(s=str) def descr_fromhex(space, w_cls, s): length = len(s) @@ -573,16 +583,6 @@ def descr_get_imag(self, space): return space.wrap(0.0) - @staticmethod - @unwrap_spec(kind=str) - def descr___getformat__(space, w_cls, kind): - if kind == "float": - return space.wrap(_float_format) - elif kind == "double": - return space.wrap(_double_format) - raise OperationError(space.w_ValueError, - space.wrap("only float and double are valid")) - registerimplementation(W_FloatObject) From noreply at buildbot.pypy.org Mon Feb 24 04:10:19 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 24 Feb 2014 04:10:19 +0100 (CET) Subject: [pypy-commit] pypy remove-remaining-smm: Kill last float SMMs. Message-ID: <20140224031019.DFAB71C02EA@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: remove-remaining-smm Changeset: r69330:a2d089c94859 Date: 2014-02-24 03:41 +0100 http://bitbucket.org/pypy/pypy/changeset/a2d089c94859/ Log: Kill last float SMMs. diff --git a/pypy/objspace/std/floatobject.py b/pypy/objspace/std/floatobject.py --- a/pypy/objspace/std/floatobject.py +++ b/pypy/objspace/std/floatobject.py @@ -9,7 +9,7 @@ from pypy.objspace.std.longobject import W_LongObject from pypy.objspace.std.model import registerimplementation, W_Object from pypy.objspace.std.register_all import register_all -from pypy.objspace.std.stdtypedef import StdTypeDef, SMM +from pypy.objspace.std.stdtypedef import StdTypeDef from rpython.rlib import rarithmetic, rfloat from rpython.rlib.rarithmetic import ovfcheck_float_to_int, intmask, LONG_BIT from rpython.rlib.rfloat import ( @@ -20,11 +20,6 @@ from rpython.rlib.unroll import unrolling_iterable -float_as_integer_ratio = SMM("as_integer_ratio", 1) -float_is_integer = SMM("is_integer", 1) -float_hex = SMM("hex", 1) - - def detect_floatformat(): from rpython.rtyper.lltypesystem import rffi, lltype buf = lltype.malloc(rffi.CCHARP.TO, 8, flavor='raw') @@ -574,15 +569,70 @@ return space.w_NotImplemented return w_lhs.descr_pow(space, self, w_third_arg) - def descr_conjugate(self, space): - return space.float(self) - def descr_get_real(self, space): return space.float(self) def descr_get_imag(self, space): return space.wrap(0.0) + def descr_conjugate(self, space): + return space.float(self) + + def descr_is_integer(self, space): + v = self.floatval + if not rfloat.isfinite(v): + return space.w_False + return space.wrap(math.floor(v) == v) + + def descr_as_integer_ratio(self, space): + value = self.floatval + try: + num, den = float_as_rbigint_ratio(value) + except OverflowError: + w_msg = space.wrap("cannot pass infinity to as_integer_ratio()") + raise OperationError(space.w_OverflowError, w_msg) + except ValueError: + w_msg = space.wrap("cannot pass nan to as_integer_ratio()") + raise OperationError(space.w_ValueError, w_msg) + + w_num = space.newlong_from_rbigint(num) + w_den = space.newlong_from_rbigint(den) + # Try to return int + return space.newtuple([space.int(w_num), space.int(w_den)]) + + def descr_hex(self, space): + value = self.floatval + if not isfinite(value): + return self.descr_str(space) + if value == 0.0: + if copysign(1., value) == -1.: + return space.wrap("-0x0.0p+0") + else: + return space.wrap("0x0.0p+0") + mant, exp = math.frexp(value) + shift = 1 - max(rfloat.DBL_MIN_EXP - exp, 0) + mant = math.ldexp(mant, shift) + mant = abs(mant) + exp -= shift + result = ['\0'] * ((TOHEX_NBITS - 1) // 4 + 2) + result[0] = _char_from_hex(int(mant)) + mant -= int(mant) + result[1] = "." + for i in range((TOHEX_NBITS - 1) // 4): + mant *= 16.0 + result[i + 2] = _char_from_hex(int(mant)) + mant -= int(mant) + if exp < 0: + sign = "-" + else: + sign = "+" + exp = abs(exp) + s = ''.join(result) + if value < 0.0: + return space.wrap("-0x%sp%s%d" % (s, sign, exp)) + else: + return space.wrap("0x%sp%s%d" % (s, sign, exp)) + registerimplementation(W_FloatObject) @@ -634,9 +684,12 @@ __pow__ = interp2app(W_FloatObject.descr_pow), __rpow__ = interp2app(W_FloatObject.descr_rpow), - conjugate = interp2app(W_FloatObject.descr_conjugate), real = GetSetProperty(W_FloatObject.descr_get_real), imag = GetSetProperty(W_FloatObject.descr_get_imag), + conjugate = interp2app(W_FloatObject.descr_conjugate), + is_integer = interp2app(W_FloatObject.descr_is_integer), + as_integer_ratio = interp2app(W_FloatObject.descr_as_integer_ratio), + hex = interp2app(W_FloatObject.descr_hex), ) W_FloatObject.typedef.registermethods(globals()) @@ -646,39 +699,6 @@ TOHEX_NBITS = rfloat.DBL_MANT_DIG + 3 - (rfloat.DBL_MANT_DIG + 2) % 4 -def float_hex__Float(space, w_float): - value = w_float.floatval - if not isfinite(value): - return w_float.descr_str(space) - if value == 0.0: - if copysign(1., value) == -1.: - return space.wrap("-0x0.0p+0") - else: - return space.wrap("0x0.0p+0") - mant, exp = math.frexp(value) - shift = 1 - max(rfloat.DBL_MIN_EXP - exp, 0) - mant = math.ldexp(mant, shift) - mant = abs(mant) - exp -= shift - result = ['\0'] * ((TOHEX_NBITS - 1) // 4 + 2) - result[0] = _char_from_hex(int(mant)) - mant -= int(mant) - result[1] = "." - for i in range((TOHEX_NBITS - 1) // 4): - mant *= 16.0 - result[i + 2] = _char_from_hex(int(mant)) - mant -= int(mant) - if exp < 0: - sign = "-" - else: - sign = "+" - exp = abs(exp) - s = ''.join(result) - if value < 0.0: - return space.wrap("-0x%sp%s%d" % (s, sign, exp)) - else: - return space.wrap("0x%sp%s%d" % (s, sign, exp)) - def float2string(x, code, precision): # we special-case explicitly inf and nan here if isfinite(x): @@ -873,26 +893,4 @@ return z -def float_as_integer_ratio__Float(space, w_float): - value = w_float.floatval - try: - num, den = float_as_rbigint_ratio(value) - except OverflowError: - w_msg = space.wrap("cannot pass infinity to as_integer_ratio()") - raise OperationError(space.w_OverflowError, w_msg) - except ValueError: - w_msg = space.wrap("cannot pass nan to as_integer_ratio()") - raise OperationError(space.w_ValueError, w_msg) - - w_num = space.newlong_from_rbigint(num) - w_den = space.newlong_from_rbigint(den) - # Try to return int - return space.newtuple([space.int(w_num), space.int(w_den)]) - -def float_is_integer__Float(space, w_float): - v = w_float.floatval - if not rfloat.isfinite(v): - return space.w_False - return space.wrap(math.floor(v) == v) - register_all(vars(), globals()) From noreply at buildbot.pypy.org Mon Feb 24 04:10:21 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 24 Feb 2014 04:10:21 +0100 (CET) Subject: [pypy-commit] pypy remove-remaining-smm: Remove trailing whitespace. Message-ID: <20140224031021.1C5841C02EA@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: remove-remaining-smm Changeset: r69331:cfa332355a56 Date: 2014-02-24 03:43 +0100 http://bitbucket.org/pypy/pypy/changeset/cfa332355a56/ Log: Remove trailing whitespace. diff --git a/pypy/objspace/std/test/test_floatobject.py b/pypy/objspace/std/test/test_floatobject.py --- a/pypy/objspace/std/test/test_floatobject.py +++ b/pypy/objspace/std/test/test_floatobject.py @@ -61,7 +61,7 @@ class AppTestAppFloatTest: spaceconfig = dict(usemodules=['binascii', 'rctime']) - + def setup_class(cls): cls.w_py26 = cls.space.wrap(sys.version_info >= (2, 6)) @@ -812,7 +812,7 @@ def check(a, b): assert (a, math.copysign(1.0, a)) == (b, math.copysign(1.0, b)) - + check(mod(-1.0, 1.0), 0.0) check(mod(-1e-100, 1.0), 1.0) check(mod(-0.0, 1.0), 0.0) From noreply at buildbot.pypy.org Mon Feb 24 04:10:22 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 24 Feb 2014 04:10:22 +0100 (CET) Subject: [pypy-commit] pypy remove-remaining-smm: Organize imports. Message-ID: <20140224031022.502A01C02EA@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: remove-remaining-smm Changeset: r69332:86996b85032a Date: 2014-02-24 03:46 +0100 http://bitbucket.org/pypy/pypy/changeset/86996b85032a/ Log: Organize imports. diff --git a/pypy/objspace/std/floatobject.py b/pypy/objspace/std/floatobject.py --- a/pypy/objspace/std/floatobject.py +++ b/pypy/objspace/std/floatobject.py @@ -12,10 +12,10 @@ from pypy.objspace.std.stdtypedef import StdTypeDef from rpython.rlib import rarithmetic, rfloat from rpython.rlib.rarithmetic import ovfcheck_float_to_int, intmask, LONG_BIT +from rpython.rlib.rbigint import rbigint from rpython.rlib.rfloat import ( isinf, isnan, isfinite, INFINITY, NAN, copysign, formatd, DTSF_ADD_DOT_0, DTSF_STR_PRECISION, float_as_rbigint_ratio) -from rpython.rlib.rbigint import rbigint from rpython.rlib.rstring import ParseStringError from rpython.rlib.unroll import unrolling_iterable diff --git a/pypy/objspace/std/test/test_floatobject.py b/pypy/objspace/std/test/test_floatobject.py --- a/pypy/objspace/std/test/test_floatobject.py +++ b/pypy/objspace/std/test/test_floatobject.py @@ -1,16 +1,16 @@ -from pypy.objspace.std import floatobject as fobj -from pypy.objspace.std.multimethod import FailedToImplement -import py, sys +import sys + +from pypy.objspace.std.floatobject import W_FloatObject + class TestW_FloatObject: - def test_pow_fff(self): x = 10.0 y = 2.0 z = 13.0 - f1 = fobj.W_FloatObject(x) - f2 = fobj.W_FloatObject(y) - f3 = fobj.W_FloatObject(z) + f1 = W_FloatObject(x) + f2 = W_FloatObject(y) + f3 = W_FloatObject(z) self.space.raises_w(self.space.w_TypeError, f1.descr_pow, self.space, f2, f3) @@ -18,20 +18,20 @@ def test_pow_ffn(self): x = 10.0 y = 2.0 - f1 = fobj.W_FloatObject(x) - f2 = fobj.W_FloatObject(y) + f1 = W_FloatObject(x) + f2 = W_FloatObject(y) v = f1.descr_pow(self.space, f2, self.space.w_None) assert v.floatval == x ** y - f1 = fobj.W_FloatObject(-1.23) - f2 = fobj.W_FloatObject(-4.56) + f1 = W_FloatObject(-1.23) + f2 = W_FloatObject(-4.56) self.space.raises_w(self.space.w_ValueError, f1.descr_pow, self.space, f2, self.space.w_None) x = -10 y = 2.0 - f1 = fobj.W_FloatObject(x) - f2 = fobj.W_FloatObject(y) + f1 = W_FloatObject(x) + f2 = W_FloatObject(y) v = f1.descr_pow(self.space, f2, self.space.w_None) assert v.floatval == x**y From noreply at buildbot.pypy.org Mon Feb 24 04:10:23 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 24 Feb 2014 04:10:23 +0100 (CET) Subject: [pypy-commit] pypy remove-remaining-smm: Make W_FloatObject a W_Root. Message-ID: <20140224031023.755CF1C02EA@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: remove-remaining-smm Changeset: r69333:ea78a007be31 Date: 2014-02-24 03:53 +0100 http://bitbucket.org/pypy/pypy/changeset/ea78a007be31/ Log: Make W_FloatObject a W_Root. diff --git a/pypy/objspace/std/floatobject.py b/pypy/objspace/std/floatobject.py --- a/pypy/objspace/std/floatobject.py +++ b/pypy/objspace/std/floatobject.py @@ -2,13 +2,12 @@ import operator import sys +from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault from pypy.interpreter.typedef import GetSetProperty from pypy.objspace.std import newformat from pypy.objspace.std.longobject import W_LongObject -from pypy.objspace.std.model import registerimplementation, W_Object -from pypy.objspace.std.register_all import register_all from pypy.objspace.std.stdtypedef import StdTypeDef from rpython.rlib import rarithmetic, rfloat from rpython.rlib.rarithmetic import ovfcheck_float_to_int, intmask, LONG_BIT @@ -122,7 +121,7 @@ return _compare -class W_FloatObject(W_Object): +class W_FloatObject(W_Root): """This is a implementation of the app-level 'float' type. The constructor takes an RPython float as an argument.""" _immutable_fields_ = ['floatval'] @@ -139,7 +138,7 @@ def int(self, space): if (type(self) is not W_FloatObject and space.is_overloaded(self, space.w_float, '__int__')): - return W_Object.int(self, space) + return W_Root.int(self, space) try: value = ovfcheck_float_to_int(self.floatval) except OverflowError: @@ -634,8 +633,6 @@ return space.wrap("0x%sp%s%d" % (s, sign, exp)) -registerimplementation(W_FloatObject) - W_FloatObject.typedef = StdTypeDef("float", __doc__ = '''float(x) -> floating point number @@ -691,7 +688,6 @@ as_integer_ratio = interp2app(W_FloatObject.descr_as_integer_ratio), hex = interp2app(W_FloatObject.descr_hex), ) -W_FloatObject.typedef.registermethods(globals()) def _char_from_hex(number): @@ -891,6 +887,3 @@ if negate_result: z = -z return z - - -register_all(vars(), globals()) From noreply at buildbot.pypy.org Mon Feb 24 04:10:24 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 24 Feb 2014 04:10:24 +0100 (CET) Subject: [pypy-commit] pypy remove-remaining-smm: Move _string_to_float(). Message-ID: <20140224031024.8A21A1C02EA@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: remove-remaining-smm Changeset: r69334:3952560d233a Date: 2014-02-24 03:57 +0100 http://bitbucket.org/pypy/pypy/changeset/3952560d233a/ Log: Move _string_to_float(). diff --git a/pypy/objspace/std/floatobject.py b/pypy/objspace/std/floatobject.py --- a/pypy/objspace/std/floatobject.py +++ b/pypy/objspace/std/floatobject.py @@ -48,14 +48,6 @@ _double_format, _float_format = detect_floatformat() -def _string_to_float(space, w_source, string): - try: - return rfloat.string_to_float(string) - except ParseStringError as e: - from pypy.objspace.std.intobject import wrap_parsestringerror - raise wrap_parsestringerror(space, e, w_source) - - _alpha = zip("abcdef", range(10, 16)) + zip("ABCDEF", range(10, 16)) _hex_to_int = zip("0123456789", range(10)) + _alpha _hex_to_int_iterable = unrolling_iterable(_hex_to_int) @@ -172,7 +164,13 @@ @staticmethod @unwrap_spec(w_x=WrappedDefault(0.0)) def descr__new__(space, w_floattype, w_x): - from pypy.objspace.std.floatobject import W_FloatObject + def _string_to_float(space, w_source, string): + try: + return rfloat.string_to_float(string) + except ParseStringError as e: + from pypy.objspace.std.intobject import wrap_parsestringerror + raise wrap_parsestringerror(space, e, w_source) + w_value = w_x # 'x' is the keyword argument name in CPython if space.lookup(w_value, "__float__") is not None: w_obj = space.float(w_value) From noreply at buildbot.pypy.org Mon Feb 24 04:10:25 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 24 Feb 2014 04:10:25 +0100 (CET) Subject: [pypy-commit] pypy remove-remaining-smm: Move wrap_parsestringerror() -> pypy.objspace.std.util. Message-ID: <20140224031025.B0D8B1C02EA@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: remove-remaining-smm Changeset: r69335:82db216f833a Date: 2014-02-24 04:09 +0100 http://bitbucket.org/pypy/pypy/changeset/82db216f833a/ Log: Move wrap_parsestringerror() -> pypy.objspace.std.util. diff --git a/pypy/objspace/std/floatobject.py b/pypy/objspace/std/floatobject.py --- a/pypy/objspace/std/floatobject.py +++ b/pypy/objspace/std/floatobject.py @@ -9,6 +9,7 @@ from pypy.objspace.std import newformat from pypy.objspace.std.longobject import W_LongObject from pypy.objspace.std.stdtypedef import StdTypeDef +from pypy.objspace.std.util import wrap_parsestringerror from rpython.rlib import rarithmetic, rfloat from rpython.rlib.rarithmetic import ovfcheck_float_to_int, intmask, LONG_BIT from rpython.rlib.rbigint import rbigint @@ -168,7 +169,6 @@ try: return rfloat.string_to_float(string) except ParseStringError as e: - from pypy.objspace.std.intobject import wrap_parsestringerror raise wrap_parsestringerror(space, e, w_source) w_value = w_x # 'x' is the keyword argument name in CPython diff --git a/pypy/objspace/std/intobject.py b/pypy/objspace/std/intobject.py --- a/pypy/objspace/std/intobject.py +++ b/pypy/objspace/std/intobject.py @@ -13,8 +13,7 @@ from rpython.rlib.rarithmetic import ( LONG_BIT, is_valid_int, ovfcheck, r_longlong, r_uint, string_to_int) from rpython.rlib.rbigint import rbigint -from rpython.rlib.rstring import ( - InvalidBaseError, ParseStringError, ParseStringOverflowError) +from rpython.rlib.rstring import ParseStringError, ParseStringOverflowError from rpython.tool.sourcetools import func_renamer, func_with_new_name from pypy.interpreter import typedef @@ -26,6 +25,7 @@ from pypy.objspace.std.model import ( BINARY_OPS, CMP_OPS, COMMUTATIVE_OPS, IDTAG_INT) from pypy.objspace.std.stdtypedef import StdTypeDef +from pypy.objspace.std.util import wrap_parsestringerror SENTINEL = object() @@ -605,15 +605,6 @@ return w_res -def wrap_parsestringerror(space, e, w_source): - if isinstance(e, InvalidBaseError): - w_msg = space.wrap(e.msg) - else: - w_msg = space.wrap('%s: %s' % (e.msg, - space.str_w(space.repr(w_source)))) - return OperationError(space.w_ValueError, w_msg) - - def _recover_with_smalllong(space): """True if there is a chance that a SmallLong would fit when an Int does not diff --git a/pypy/objspace/std/util.py b/pypy/objspace/std/util.py --- a/pypy/objspace/std/util.py +++ b/pypy/objspace/std/util.py @@ -1,3 +1,7 @@ +from pypy.interpreter.error import oefmt +from rpython.rlib.rstring import InvalidBaseError + + def negate(f): """Create a function which calls `f` and negates its result. When the result is ``space.w_NotImplemented``, ``space.w_NotImplemented`` is @@ -22,3 +26,11 @@ where = length assert where >= 0 return where + + +def wrap_parsestringerror(space, e, w_source): + if isinstance(e, InvalidBaseError): + raise oefmt(space.w_ValueError, e.msg) + else: + raise oefmt(space.w_ValueError, '%s: %s', + e.msg, space.str_w(space.repr(w_source))) From noreply at buildbot.pypy.org Mon Feb 24 04:16:28 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 24 Feb 2014 04:16:28 +0100 (CET) Subject: [pypy-commit] pypy remove-remaining-smm: Fix imports. Message-ID: <20140224031628.7A6BA1C02EA@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: remove-remaining-smm Changeset: r69336:cc55e80e9b8a Date: 2014-02-24 04:15 +0100 http://bitbucket.org/pypy/pypy/changeset/cc55e80e9b8a/ Log: Fix imports. diff --git a/pypy/module/_csv/interp_reader.py b/pypy/module/_csv/interp_reader.py --- a/pypy/module/_csv/interp_reader.py +++ b/pypy/module/_csv/interp_reader.py @@ -7,6 +7,7 @@ from pypy.module._csv.interp_csv import _build_dialect from pypy.module._csv.interp_csv import (QUOTE_MINIMAL, QUOTE_ALL, QUOTE_NONNUMERIC, QUOTE_NONE) +from pypy.objspace.std.util import wrap_parsestringerror (START_RECORD, START_FIELD, ESCAPED_CHAR, IN_FIELD, IN_QUOTED_FIELD, ESCAPE_IN_QUOTED_FIELD, QUOTE_IN_QUOTED_FIELD, @@ -48,7 +49,6 @@ try: ff = string_to_float(field) except ParseStringError as e: - from pypy.objspace.std.intobject import wrap_parsestringerror raise wrap_parsestringerror(space, e, space.wrap(field)) w_obj = space.wrap(ff) else: diff --git a/pypy/objspace/std/longobject.py b/pypy/objspace/std/longobject.py --- a/pypy/objspace/std/longobject.py +++ b/pypy/objspace/std/longobject.py @@ -18,6 +18,7 @@ from pypy.objspace.std.model import ( BINARY_OPS, CMP_OPS, COMMUTATIVE_OPS, IDTAG_LONG) from pypy.objspace.std.stdtypedef import StdTypeDef +from pypy.objspace.std.util import wrap_parsestringerror def delegate_other(func): @@ -538,7 +539,6 @@ try: bigint = rbigint.fromstr2(string, base) except ParseStringError as e: - from pypy.objspace.std.intobject import wrap_parsestringerror raise wrap_parsestringerror(space, e, w_source) return newbigint(space, w_longtype, bigint) _string_to_w_long._dont_inline_ = True From noreply at buildbot.pypy.org Mon Feb 24 05:59:59 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 24 Feb 2014 05:59:59 +0100 (CET) Subject: [pypy-commit] pypy remove-remaining-smm: Use oefmt and split some long lines. Message-ID: <20140224045959.F17001C303A@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: remove-remaining-smm Changeset: r69337:f5478e57e58e Date: 2014-02-24 04:30 +0100 http://bitbucket.org/pypy/pypy/changeset/f5478e57e58e/ Log: Use oefmt and split some long lines. diff --git a/pypy/objspace/std/complexobject.py b/pypy/objspace/std/complexobject.py --- a/pypy/objspace/std/complexobject.py +++ b/pypy/objspace/std/complexobject.py @@ -183,9 +183,8 @@ return (space.float_w(w_z), 0.0) elif isinstance(w_z, W_ComplexObject): return (w_z.realval, w_z.imagval) - raise OperationError(space.w_TypeError, - space.wrap("__complex__() must return" - " a complex number")) + raise oefmt(space.w_TypeError, + "__complex__() must return a complex number") # # no '__complex__' method, so we assume it is a float, @@ -272,7 +271,8 @@ return w_result def int(self, space): - raise OperationError(space.w_TypeError, space.wrap("can't convert complex to int; use int(abs(z))")) + raise oefmt(space.w_TypeError, + "can't convert complex to int; use int(abs(z))") def _to_complex(self, space, w_obj): if isinstance(w_obj, W_ComplexObject): @@ -303,27 +303,28 @@ space.isinstance_w(w_real, space.w_unicode): # a string argument if not noarg2: - raise OperationError(space.w_TypeError, - space.wrap("complex() can't take second arg" - " if first is a string")) + raise oefmt(space.w_TypeError, "complex() can't take second" + " arg if first is a string") try: realstr, imagstr = _split_complex(space.str_w(w_real)) except ValueError: - raise OperationError(space.w_ValueError, space.wrap(ERR_MALFORMED)) + raise oefmt(space.w_ValueError, ERR_MALFORMED) try: realval = string_to_float(realstr) imagval = string_to_float(imagstr) except ParseStringError: - raise OperationError(space.w_ValueError, space.wrap(ERR_MALFORMED)) + raise oefmt(space.w_ValueError, ERR_MALFORMED) else: # non-string arguments - realval, imagval = unpackcomplex(space, w_real, strict_typing=False) + realval, imagval = unpackcomplex(space, w_real, + strict_typing=False) # now take w_imag into account if not noarg2: # complex(x, y) == x+y*j, even if 'y' is already a complex. - realval2, imagval2 = unpackcomplex(space, w_imag, strict_typing=False) + realval2, imagval2 = unpackcomplex(space, w_imag, + strict_typing=False) # try to preserve the signs of zeroes of realval and realval2 if imagval2 != 0.0: @@ -389,14 +390,15 @@ return space.newtuple([self, w_other]) def descr_format(self, space, w_format_spec): - return newformat.run_formatter(space, w_format_spec, "format_complex", self) + return newformat.run_formatter(space, w_format_spec, "format_complex", + self) def descr_nonzero(self, space): return space.newbool((self.realval != 0.0) or (self.imagval != 0.0)) def descr_float(self, space): - raise OperationError(space.w_TypeError, - space.wrap("can't convert complex to float; use abs(z)")) + raise oefmt(space.w_TypeError, + "can't convert complex to float; use abs(z)") def descr_neg(self, space): return W_ComplexObject(-self.realval, -self.imagval) @@ -408,7 +410,7 @@ try: return space.newfloat(math.hypot(self.realval, self.imagval)) except OverflowError, e: - raise OperationError(space.w_OverflowError, space.wrap(str(e))) + raise oefmt(space.w_OverflowError, str(e)) def descr_eq(self, space, w_other): if isinstance(w_other, W_ComplexObject): @@ -434,8 +436,8 @@ def _fail_cmp(self, space, w_other): if isinstance(w_other, W_ComplexObject): - raise OperationError(space.w_TypeError, - space.wrap('cannot compare complex numbers using <, <=, >, >=')) + raise oefmt(space.w_TypeError, + "cannot compare complex numbers using <, <=, >, >=") return space.w_NotImplemented def descr_add(self, space, w_rhs): @@ -485,7 +487,7 @@ try: return self.div(w_rhs) except ZeroDivisionError, e: - raise OperationError(space.w_ZeroDivisionError, space.wrap(str(e))) + raise oefmt(space.w_ZeroDivisionError, str(e)) def descr_rtruediv(self, space, w_lhs): w_lhs = self._to_complex(space, w_lhs) @@ -494,7 +496,7 @@ try: return w_lhs.div(self) except ZeroDivisionError, e: - raise OperationError(space.w_ZeroDivisionError, space.wrap(str(e))) + raise oefmt(space.w_ZeroDivisionError, str(e)) def descr_floordiv(self, space, w_rhs): w_rhs = self._to_complex(space, w_rhs) @@ -504,7 +506,7 @@ try: return self.divmod(space, w_rhs)[0] except ZeroDivisionError, e: - raise OperationError(space.w_ZeroDivisionError, space.wrap(str(e))) + raise oefmt(space.w_ZeroDivisionError, str(e)) def descr_rfloordiv(self, space, w_lhs): w_lhs = self._to_complex(space, w_lhs) @@ -514,7 +516,7 @@ try: return w_lhs.divmod(space, self)[0] except ZeroDivisionError, e: - raise OperationError(space.w_ZeroDivisionError, space.wrap(str(e))) + raise oefmt(space.w_ZeroDivisionError, str(e)) def descr_mod(self, space, w_rhs): w_rhs = self._to_complex(space, w_rhs) @@ -523,7 +525,7 @@ try: return self.divmod(space, w_rhs)[1] except ZeroDivisionError, e: - raise OperationError(space.w_ZeroDivisionError, space.wrap(str(e))) + raise oefmt(space.w_ZeroDivisionError, str(e)) def descr_rmod(self, space, w_lhs): w_lhs = self._to_complex(space, w_lhs) @@ -532,7 +534,7 @@ try: return w_lhs.divmod(space, self)[1] except ZeroDivisionError, e: - raise OperationError(space.w_ZeroDivisionError, space.wrap(str(e))) + raise oefmt(space.w_ZeroDivisionError, str(e)) def descr_divmod(self, space, w_rhs): w_rhs = self._to_complex(space, w_rhs) @@ -541,7 +543,7 @@ try: div, mod = self.divmod(space, w_rhs) except ZeroDivisionError, e: - raise OperationError(space.w_ZeroDivisionError, space.wrap(str(e))) + raise oefmt(space.w_ZeroDivisionError, str(e)) return space.newtuple([div, mod]) def descr_rdivmod(self, space, w_lhs): @@ -551,7 +553,7 @@ try: div, mod = w_lhs.divmod(space, self) except ZeroDivisionError, e: - raise OperationError(space.w_ZeroDivisionError, space.wrap(str(e))) + raise oefmt(space.w_ZeroDivisionError, str(e)) return space.newtuple([div, mod]) @unwrap_spec(w_third_arg=WrappedDefault(None)) @@ -560,17 +562,19 @@ if w_exponent is None: return space.w_NotImplemented if not space.is_w(w_third_arg, space.w_None): - raise OperationError(space.w_ValueError, space.wrap('complex modulo')) + raise oefmt(space.w_ValueError, 'complex modulo') try: r = w_exponent.realval - if w_exponent.imagval == 0.0 and -100.0 <= r <= 100.0 and r == int(r): + if (w_exponent.imagval == 0.0 and -100.0 <= r <= 100.0 and + r == int(r)): w_p = self.pow_small_int(int(r)) else: w_p = self.pow(w_exponent) except ZeroDivisionError: - raise OperationError(space.w_ZeroDivisionError, space.wrap("0.0 to a negative or complex power")) + raise oefmt(space.w_ZeroDivisionError, + "0.0 to a negative or complex power") except OverflowError: - raise OperationError(space.w_OverflowError, space.wrap("complex exponentiation")) + raise oefmt(space.w_OverflowError, "complex exponentiation") return w_p def descr_conjugate(self, space): @@ -585,8 +589,7 @@ def fget(space, w_obj): from pypy.objspace.std.complexobject import W_ComplexObject if not isinstance(w_obj, W_ComplexObject): - raise OperationError(space.w_TypeError, - space.wrap("descriptor is for 'complex'")) + raise oefmt(space.w_TypeError, "descriptor is for 'complex'") return space.newfloat(getattr(w_obj, name)) return GetSetProperty(fget) From noreply at buildbot.pypy.org Mon Feb 24 06:00:01 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 24 Feb 2014 06:00:01 +0100 (CET) Subject: [pypy-commit] pypy remove-remaining-smm: Fix. Message-ID: <20140224050001.307FC1C303A@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: remove-remaining-smm Changeset: r69338:947db7701d1b Date: 2014-02-24 04:34 +0100 http://bitbucket.org/pypy/pypy/changeset/947db7701d1b/ Log: Fix. diff --git a/pypy/objspace/std/floatobject.py b/pypy/objspace/std/floatobject.py --- a/pypy/objspace/std/floatobject.py +++ b/pypy/objspace/std/floatobject.py @@ -475,10 +475,10 @@ w_lhs = self._to_float(space, w_lhs) if w_lhs is None: return space.w_NotImplemented - lhs = w_lhs.floatval - if lhs == 0.0: + selfval = self.floatval + if selfval == 0.0: raise OperationError(space.w_ZeroDivisionError, space.wrap("float division")) - return W_FloatObject(lhs / self.floatval) + return W_FloatObject(w_lhs.floatval / selfval) def descr_floordiv(self, space, w_rhs): w_rhs = self._to_float(space, w_rhs) From noreply at buildbot.pypy.org Mon Feb 24 06:00:02 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 24 Feb 2014 06:00:02 +0100 (CET) Subject: [pypy-commit] pypy remove-remaining-smm: Use oefmt. Message-ID: <20140224050002.67D931C303A@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: remove-remaining-smm Changeset: r69339:33053f5a43be Date: 2014-02-24 04:43 +0100 http://bitbucket.org/pypy/pypy/changeset/33053f5a43be/ Log: Use oefmt. diff --git a/pypy/objspace/std/floatobject.py b/pypy/objspace/std/floatobject.py --- a/pypy/objspace/std/floatobject.py +++ b/pypy/objspace/std/floatobject.py @@ -3,7 +3,7 @@ import sys from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.error import OperationError, oefmt +from pypy.interpreter.error import oefmt from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault from pypy.interpreter.typedef import GetSetProperty from pypy.objspace.std import newformat @@ -197,8 +197,7 @@ return space.wrap(_float_format) elif kind == "double": return space.wrap(_double_format) - raise OperationError(space.w_ValueError, - space.wrap("only float and double are valid")) + raise oefmt(space.w_ValueError, "only float and double are valid") @staticmethod @unwrap_spec(s=str) @@ -209,8 +208,7 @@ while i < length and s[i].isspace(): i += 1 if i == length: - raise OperationError(space.w_ValueError, - space.wrap("invalid hex string")) + raise oefmt(space.w_ValueError, "invalid hex string") sign = 1 if s[i] == "-": sign = -1 @@ -218,8 +216,7 @@ elif s[i] == "+": i += 1 if length == i: - raise OperationError(space.w_ValueError, - space.wrap("invalid hex string")) + raise oefmt(space.w_ValueError, "invalid hex string") if s[i] == "i" or s[i] == "I": i += 1 if length - i >= 2 and s[i:i + 2].lower() == "nf": @@ -250,28 +247,24 @@ total_digits = co_end - co_start float_digits = co_end - whole_end if not total_digits: - raise OperationError(space.w_ValueError, - space.wrap("invalid hex string")) + raise oefmt(space.w_ValueError, "invalid hex string") const_one = rfloat.DBL_MIN_EXP - rfloat.DBL_MANT_DIG + sys.maxint // 2 const_two = sys.maxint // 2 + 1 - rfloat.DBL_MAX_EXP if total_digits > min(const_one, const_two) // 4: - raise OperationError(space.w_ValueError, space.wrap("way too long")) + raise oefmt(space.w_ValueError, "way too long") if i < length and (s[i] == "p" or s[i] == "P"): i += 1 if i == length: - raise OperationError(space.w_ValueError, - space.wrap("invalid hex string")) + raise oefmt(space.w_ValueError, "invalid hex string") exp_sign = 1 if s[i] == "-" or s[i] == "+": if s[i] == "-": exp_sign = -1 i += 1 if i == length: - raise OperationError(space.w_ValueError, - space.wrap("invalid hex string")) + raise oefmt(space.w_ValueError, "invalid hex string") if not s[i].isdigit(): - raise OperationError(space.w_ValueError, - space.wrap("invalid hex string")) + raise oefmt(space.w_ValueError, "invalid hex string") exp = ord(s[i]) - ord('0') i += 1 while i < length and s[i].isdigit(): @@ -296,7 +289,7 @@ if not total_digits or exp <= -sys.maxint / 2: value = 0.0 elif exp >= sys.maxint // 2: - raise OperationError(space.w_OverflowError, space.wrap("too large")) + raise oefmt(space.w_OverflowError, "too large") else: exp -= 4 * float_digits top_exp = exp + 4 * (total_digits - 1) @@ -307,8 +300,7 @@ if top_exp < rfloat.DBL_MIN_EXP - rfloat.DBL_MANT_DIG: value = 0.0 elif top_exp > rfloat.DBL_MAX_EXP: - raise OperationError(space.w_OverflowError, - space.wrap("too large")) + raise oefmt(space.w_OverflowError, "too large") else: lsb = max(top_exp, rfloat.DBL_MIN_EXP) - rfloat.DBL_MANT_DIG value = 0 @@ -341,14 +333,12 @@ mant_dig = rfloat.DBL_MANT_DIG if (top_exp == rfloat.DBL_MAX_EXP and value == math.ldexp(2 * half_eps, mant_dig)): - raise OperationError(space.w_OverflowError, - space.wrap("too large")) + raise oefmt(space.w_OverflowError, "too large") value = math.ldexp(value, (exp + 4*key_digit)) while i < length and s[i].isspace(): i += 1 if i != length: - raise OperationError(space.w_ValueError, - space.wrap("invalid hex string")) + raise oefmt(space.w_ValueError, "invalid hex string") w_float = space.wrap(sign * value) return space.call_function(w_cls, w_float) @@ -391,12 +381,11 @@ try: return W_LongObject.fromfloat(space, self.floatval) except OverflowError: - raise OperationError( - space.w_OverflowError, - space.wrap("cannot convert float infinity to integer")) + raise oefmt(space.w_OverflowError, + "cannot convert float infinity to integer") except ValueError: - raise OperationError(space.w_ValueError, - space.wrap("cannot convert float NaN to integer")) + raise oefmt(space.w_ValueError, + "cannot convert float NaN to integer") def descr_trunc(self, space): whole = math.modf(self.floatval)[1] @@ -468,7 +457,7 @@ return space.w_NotImplemented rhs = w_rhs.floatval if rhs == 0.0: - raise OperationError(space.w_ZeroDivisionError, space.wrap("float division")) + raise oefmt(space.w_ZeroDivisionError, "float division") return W_FloatObject(self.floatval / rhs) def descr_rdiv(self, space, w_lhs): @@ -477,7 +466,7 @@ return space.w_NotImplemented selfval = self.floatval if selfval == 0.0: - raise OperationError(space.w_ZeroDivisionError, space.wrap("float division")) + raise oefmt(space.w_ZeroDivisionError, "float division") return W_FloatObject(w_lhs.floatval / selfval) def descr_floordiv(self, space, w_rhs): @@ -499,7 +488,7 @@ x = self.floatval y = w_rhs.floatval if y == 0.0: - raise OperationError(space.w_ZeroDivisionError, space.wrap("float modulo")) + raise oefmt(space.w_ZeroDivisionError, "float modulo") try: mod = math.fmod(x, y) except ValueError: @@ -547,8 +536,8 @@ if w_rhs is None: return space.w_NotImplemented if not space.is_w(w_third_arg, space.w_None): - raise OperationError(space.w_TypeError, space.wrap( - "pow() 3rd argument not allowed unless all arguments are integers")) + raise oefmt(space.w_TypeError, "pow() 3rd argument not allowed " + "unless all arguments are integers") x = self.floatval y = w_rhs.floatval @@ -586,11 +575,11 @@ try: num, den = float_as_rbigint_ratio(value) except OverflowError: - w_msg = space.wrap("cannot pass infinity to as_integer_ratio()") - raise OperationError(space.w_OverflowError, w_msg) + raise oefmt(space.w_OverflowError, + "cannot pass infinity to as_integer_ratio()") except ValueError: - w_msg = space.wrap("cannot pass nan to as_integer_ratio()") - raise OperationError(space.w_ValueError, w_msg) + raise oefmt(space.w_ValueError, + "cannot pass nan to as_integer_ratio()") w_num = space.newlong_from_rbigint(num) w_den = space.newlong_from_rbigint(den) @@ -760,7 +749,7 @@ x = w_float1.floatval y = w_float2.floatval if y == 0.0: - raise OperationError(space.w_ZeroDivisionError, space.wrap("float modulo")) + raise oefmt(space.w_ZeroDivisionError, "float modulo") try: mod = math.fmod(x, y) except ValueError: @@ -846,9 +835,8 @@ if x == 0.0: if y < 0.0: - raise OperationError(space.w_ZeroDivisionError, - space.wrap("0.0 cannot be raised to " - "a negative power")) + raise oefmt(space.w_ZeroDivisionError, + "0.0 cannot be raised to a negative power") negate_result = False # special case: "(-1.0) ** bignum" should not raise PowDomainError, @@ -876,11 +864,9 @@ # We delegate to our implementation of math.pow() the error detection. z = math.pow(x,y) except OverflowError: - raise OperationError(space.w_OverflowError, - space.wrap("float power")) + raise oefmt(space.w_OverflowError, "float power") except ValueError: - raise OperationError(space.w_ValueError, - space.wrap("float power")) + raise oefmt(space.w_ValueError, "float power") if negate_result: z = -z From noreply at buildbot.pypy.org Mon Feb 24 06:00:03 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 24 Feb 2014 06:00:03 +0100 (CET) Subject: [pypy-commit] pypy remove-remaining-smm: Do style changes and clean up module namespace a bit. Message-ID: <20140224050003.A55181C303A@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: remove-remaining-smm Changeset: r69340:d3d741d4b31a Date: 2014-02-24 05:07 +0100 http://bitbucket.org/pypy/pypy/changeset/d3d741d4b31a/ Log: Do style changes and clean up module namespace a bit. diff --git a/pypy/objspace/std/floatobject.py b/pypy/objspace/std/floatobject.py --- a/pypy/objspace/std/floatobject.py +++ b/pypy/objspace/std/floatobject.py @@ -66,6 +66,9 @@ i = co_end - 1 - j return _hex_from_char(s[i]) +def _char_from_hex(number): + return "0123456789abcdef"[number] + def make_compare_func(opname): op = getattr(operator, opname) @@ -179,7 +182,8 @@ value = space.float_w(w_obj) elif (space.isinstance_w(w_value, space.w_str) or space.isinstance_w(w_value, space.w_bytearray)): - value = _string_to_float(space, w_value, space.bufferstr_w(w_value)) + value = _string_to_float(space, w_value, + space.bufferstr_w(w_value)) elif space.isinstance_w(w_value, space.w_unicode): from unicodeobject import unicode_to_decimal_w value = _string_to_float(space, w_value, @@ -291,7 +295,7 @@ elif exp >= sys.maxint // 2: raise oefmt(space.w_OverflowError, "too large") else: - exp -= 4 * float_digits + exp -= 4 * float_digits top_exp = exp + 4 * (total_digits - 1) digit = _hex_digit(s, total_digits - 1, co_end, float_digits) while digit: @@ -350,11 +354,25 @@ if space.isinstance_w(w_obj, space.w_long): return W_FloatObject(w_obj.tofloat(space)) + def _float2string(self, x, code, precision): + # we special-case explicitly inf and nan here + if isfinite(x): + s = formatd(x, code, precision, DTSF_ADD_DOT_0) + elif isinf(x): + if x > 0.0: + s = "inf" + else: + s = "-inf" + else: # isnan(x): + s = "nan" + return s + def descr_repr(self, space): - return space.wrap(float2string(self.floatval, 'r', 0)) + return space.wrap(self._float2string(self.floatval, 'r', 0)) def descr_str(self, space): - return space.wrap(float2string(self.floatval, 'g', DTSF_STR_PRECISION)) + return space.wrap(self._float2string(self.floatval, 'g', + DTSF_STR_PRECISION)) def descr_hash(self, space): return space.wrap(_hash_float(space, self.floatval)) @@ -544,8 +562,8 @@ try: result = _pow(space, x, y) except PowDomainError: - raise oefmt(space.w_ValueError, - "negative number cannot be raised to a fractional power") + raise oefmt(space.w_ValueError, "negative number cannot be raised " + "to a fractional power") return W_FloatObject(result) @unwrap_spec(w_third_arg=WrappedDefault(None)) @@ -587,6 +605,7 @@ return space.newtuple([space.int(w_num), space.int(w_den)]) def descr_hex(self, space): + TOHEX_NBITS = rfloat.DBL_MANT_DIG + 3 - (rfloat.DBL_MANT_DIG + 2) % 4 value = self.floatval if not isfinite(value): return self.descr_str(space) @@ -677,27 +696,6 @@ ) -def _char_from_hex(number): - return "0123456789abcdef"[number] - -TOHEX_NBITS = rfloat.DBL_MANT_DIG + 3 - (rfloat.DBL_MANT_DIG + 2) % 4 - -def float2string(x, code, precision): - # we special-case explicitly inf and nan here - if isfinite(x): - s = formatd(x, code, precision, DTSF_ADD_DOT_0) - elif isinf(x): - if x > 0.0: - s = "inf" - else: - s = "-inf" - else: # isnan(x): - s = "nan" - return s - - -# ____________________________________________________________ - def _hash_float(space, v): if isnan(v): return 0 @@ -785,6 +783,7 @@ return [W_FloatObject(floordiv), W_FloatObject(mod)] + class PowDomainError(ValueError): """Signals a negative number raised to a fractional power""" @@ -862,7 +861,7 @@ try: # We delegate to our implementation of math.pow() the error detection. - z = math.pow(x,y) + z = math.pow(x, y) except OverflowError: raise oefmt(space.w_OverflowError, "float power") except ValueError: From noreply at buildbot.pypy.org Mon Feb 24 06:00:04 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 24 Feb 2014 06:00:04 +0100 (CET) Subject: [pypy-commit] pypy remove-remaining-smm: Make W_NoneObject a W_Root. Message-ID: <20140224050004.D5A291C303A@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: remove-remaining-smm Changeset: r69341:9f9e64ea5ddc Date: 2014-02-24 05:17 +0100 http://bitbucket.org/pypy/pypy/changeset/9f9e64ea5ddc/ Log: Make W_NoneObject a W_Root. diff --git a/pypy/objspace/std/model.py b/pypy/objspace/std/model.py --- a/pypy/objspace/std/model.py +++ b/pypy/objspace/std/model.py @@ -34,7 +34,6 @@ from pypy.objspace.std.objecttype import object_typedef from pypy.objspace.std.typeobject import type_typedef from pypy.objspace.std.slicetype import slice_typedef - from pypy.objspace.std.nonetype import none_typedef self.pythontypes = [value for key, value in result.__dict__.items() if not key.startswith('_')] # don't look @@ -67,6 +66,7 @@ # not-multimethod based types + self.pythontypes.append(noneobject.W_NoneObject.typedef) self.pythontypes.append(tupleobject.W_TupleObject.typedef) self.pythontypes.append(listobject.W_ListObject.typedef) self.pythontypes.append(dictmultiobject.W_DictMultiObject.typedef) diff --git a/pypy/objspace/std/noneobject.py b/pypy/objspace/std/noneobject.py --- a/pypy/objspace/std/noneobject.py +++ b/pypy/objspace/std/noneobject.py @@ -1,27 +1,23 @@ -""" - None Object implementation +from pypy.interpreter.baseobjspace import W_Root +from pypy.interpreter.gateway import interp2app +from pypy.objspace.std.stdtypedef import StdTypeDef - ok and tested -""" -from pypy.objspace.std.model import registerimplementation, W_Object -from pypy.objspace.std.register_all import register_all - -class W_NoneObject(W_Object): - from pypy.objspace.std.nonetype import none_typedef as typedef - +class W_NoneObject(W_Root): def unwrap(w_self, space): return None -registerimplementation(W_NoneObject) + def descr_nonzero(self, space): + return space.w_False + + def descr_repr(self, space): + return space.wrap('None') + W_NoneObject.w_None = W_NoneObject() -def nonzero__None(space, w_none): - return space.w_False - -def repr__None(space, w_none): - return space.wrap('None') - -register_all(vars()) - +W_NoneObject.typedef = StdTypeDef("NoneType", + __nonzero__ = interp2app(W_NoneObject.descr_nonzero), + __repr__ = interp2app(W_NoneObject.descr_repr), +) +W_NoneObject.typedef.acceptable_as_base_class = False diff --git a/pypy/objspace/std/nonetype.py b/pypy/objspace/std/nonetype.py deleted file mode 100644 --- a/pypy/objspace/std/nonetype.py +++ /dev/null @@ -1,8 +0,0 @@ -from pypy.objspace.std.stdtypedef import StdTypeDef - - -# ____________________________________________________________ - -none_typedef = StdTypeDef("NoneType", - ) -none_typedef.acceptable_as_base_class = False diff --git a/pypy/objspace/std/test/test_noneobject.py b/pypy/objspace/std/test/test_noneobject.py --- a/pypy/objspace/std/test/test_noneobject.py +++ b/pypy/objspace/std/test/test_noneobject.py @@ -1,11 +1,7 @@ - - - class TestW_NoneObject: - def test_equality(self): assert self.space.eq_w(self.space.w_None, self.space.w_None) - + def test_inequality(self): neresult = self.space.ne(self.space.w_None, self.space.w_None) assert not self.space.is_true(neresult) From noreply at buildbot.pypy.org Mon Feb 24 09:09:36 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 24 Feb 2014 09:09:36 +0100 (CET) Subject: [pypy-commit] pypy default: fix more dtype str/repr cases Message-ID: <20140224080936.D5DF71C03FC@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69342:cb3ee4379850 Date: 2014-02-23 21:23 -0500 http://bitbucket.org/pypy/pypy/changeset/cb3ee4379850/ Log: fix more dtype str/repr cases diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -125,21 +125,28 @@ return get_dtype_cache(space).dtypes_by_name[self.byteorder + self.float_type] def descr_str(self, space): - if not self.is_record_type(): + if not self.num == NPY.VOID: if self.char == 'S': s = '|S' + str(self.get_size()) else: s = self.name return space.wrap(s) + elif self.subdtype is not None: + return space.str(space.newtuple([ + self.subdtype.descr_get_str(space), + self.descr_get_shape(space)])) return space.str(self.descr_get_descr(space)) def descr_repr(self, space): - if not self.is_record_type(): + if not self.num == NPY.VOID: if self.char == 'S': s = 'S' + str(self.get_size()) else: s = self.name r = space.wrap(s) + elif self.subdtype is not None: + r = space.newtuple([self.subdtype.descr_get_str(space), + self.descr_get_shape(space)]) else: r = self.descr_get_descr(space) return space.wrap("dtype(%s)" % space.str_w(space.repr(r))) diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -166,6 +166,9 @@ d = dtype('S5') assert repr(d) == "dtype('S5')" assert str(d) == "|S5" + d = dtype((' Author: Brian Kearns Branch: Changeset: r69343:8c8fad86e3ec Date: 2014-02-23 21:44 -0500 http://bitbucket.org/pypy/pypy/changeset/8c8fad86e3ec/ Log: keep dtype name attribute constant diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -37,7 +37,7 @@ class W_Dtype(W_Root): - _immutable_fields_ = ["itemtype?", "num", "kind", "name?", "char", + _immutable_fields_ = ["itemtype?", "num", "kind", "name", "char", "w_box_type", "byteorder", "size?", "float_type", "fields?", "fieldnames?", "shape", "subdtype", "base"] @@ -127,10 +127,9 @@ def descr_str(self, space): if not self.num == NPY.VOID: if self.char == 'S': - s = '|S' + str(self.get_size()) + return space.wrap('|S' + str(self.get_size())) else: - s = self.name - return space.wrap(s) + return self.descr_get_name(space) elif self.subdtype is not None: return space.str(space.newtuple([ self.subdtype.descr_get_str(space), @@ -140,10 +139,9 @@ def descr_repr(self, space): if not self.num == NPY.VOID: if self.char == 'S': - s = 'S' + str(self.get_size()) + r = space.wrap('S' + str(self.get_size())) else: - s = self.name - r = space.wrap(s) + r = self.descr_get_name(space) elif self.subdtype is not None: r = space.newtuple([self.subdtype.descr_get_str(space), self.descr_get_shape(space)]) @@ -162,6 +160,11 @@ return space.w_None return space.newtuple([space.wrap(self.subdtype), self.descr_get_shape(space)]) + def descr_get_name(self, space): + if self.is_flexible_type(): + return space.wrap(self.name + str(self.get_size() * 8)) + return space.wrap(self.name) + def descr_get_str(self, space): size = self.get_size() basic = self.kind @@ -225,8 +228,8 @@ return space.w_None w_d = space.newdict() for name, (offset, subdtype) in self.fields.iteritems(): - space.setitem(w_d, space.wrap(name), space.newtuple([subdtype, - space.wrap(offset)])) + space.setitem(w_d, space.wrap(name), + space.newtuple([subdtype, space.wrap(offset)])) return w_d def descr_set_fields(self, space, w_fields): @@ -245,10 +248,8 @@ self.fields[space.str_w(key)] = offset, dtype size += dtype.get_size() - self.itemtype = types.RecordType() self.size = size - self.name = "void" + str(8 * self.get_size()) def descr_get_names(self, space): if len(self.fieldnames) == 0: @@ -401,9 +402,7 @@ fields[fldname] = (offset, subdtype) offset += subdtype.get_size() fieldnames.append(fldname) - itemtype = types.RecordType() - return W_Dtype(itemtype, NPY.VOID, NPY.VOIDLTR, - "void" + str(8 * offset * itemtype.get_element_size()), + return W_Dtype(types.RecordType(), NPY.VOID, NPY.VOIDLTR, "void", NPY.VOIDLTR, space.gettypefor(interp_boxes.W_VoidBox), fields=fields, fieldnames=fieldnames, size=offset) @@ -513,7 +512,7 @@ subdtype = GetSetProperty(W_Dtype.descr_get_subdtype), str = GetSetProperty(W_Dtype.descr_get_str), - name = interp_attrproperty("name", cls=W_Dtype), + name = GetSetProperty(W_Dtype.descr_get_name), base = GetSetProperty(W_Dtype.descr_get_base), shape = GetSetProperty(W_Dtype.descr_get_shape), isnative = GetSetProperty(W_Dtype.descr_get_isnative), @@ -544,25 +543,22 @@ if char == NPY.STRINGLTR: itemtype = types.StringType() - basename = 'string' + name = 'string' num = NPY.STRING w_box_type = space.gettypefor(interp_boxes.W_StringBox) elif char == NPY.VOIDLTR: itemtype = types.VoidType() - basename = 'void' + name = 'void' num = NPY.VOID w_box_type = space.gettypefor(interp_boxes.W_VoidBox) elif char == NPY.UNICODELTR: itemtype = types.UnicodeType() - basename = 'unicode' + name = 'unicode' num = NPY.UNICODE w_box_type = space.gettypefor(interp_boxes.W_UnicodeBox) else: assert False - - return W_Dtype(itemtype, num, char, - basename + str(8 * size * itemtype.get_element_size()), - char, w_box_type, size=size) + return W_Dtype(itemtype, num, char, name, char, w_box_type, size=size) def new_string_dtype(space, size): @@ -572,7 +568,7 @@ size=size, num=NPY.STRING, kind=NPY.STRINGLTR, - name='string' + str(8 * size * itemtype.get_element_size()), + name='string', char=NPY.STRINGLTR, w_box_type = space.gettypefor(interp_boxes.W_StringBox), ) @@ -585,7 +581,7 @@ size=size, num=NPY.UNICODE, kind=NPY.UNICODELTR, - name='unicode' + str(8 * size * itemtype.get_element_size()), + name='unicode', char=NPY.UNICODELTR, w_box_type = space.gettypefor(interp_boxes.W_UnicodeBox), ) From noreply at buildbot.pypy.org Mon Feb 24 09:09:39 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 24 Feb 2014 09:09:39 +0100 (CET) Subject: [pypy-commit] pypy default: cleanup interp_dtype Message-ID: <20140224080939.347B11C03FC@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69344:d11918648b16 Date: 2014-02-23 22:02 -0500 http://bitbucket.org/pypy/pypy/changeset/d11918648b16/ Log: cleanup interp_dtype diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -536,54 +536,53 @@ try: size = int(name[1:]) except ValueError: - raise OperationError(space.w_TypeError, space.wrap("data type not understood")) + raise oefmt(space.w_TypeError, "data type not understood") if char == NPY.CHARLTR: char = NPY.STRINGLTR size = 1 if char == NPY.STRINGLTR: - itemtype = types.StringType() - name = 'string' - num = NPY.STRING - w_box_type = space.gettypefor(interp_boxes.W_StringBox) + return new_string_dtype(space, size) + elif char == NPY.UNICODELTR: + return new_unicode_dtype(space, size) elif char == NPY.VOIDLTR: - itemtype = types.VoidType() - name = 'void' - num = NPY.VOID - w_box_type = space.gettypefor(interp_boxes.W_VoidBox) - elif char == NPY.UNICODELTR: - itemtype = types.UnicodeType() - name = 'unicode' - num = NPY.UNICODE - w_box_type = space.gettypefor(interp_boxes.W_UnicodeBox) - else: - assert False - return W_Dtype(itemtype, num, char, name, char, w_box_type, size=size) + return new_void_dtype(space, size) + assert False def new_string_dtype(space, size): - itemtype = types.StringType() return W_Dtype( - itemtype, + types.StringType(), size=size, num=NPY.STRING, kind=NPY.STRINGLTR, name='string', char=NPY.STRINGLTR, - w_box_type = space.gettypefor(interp_boxes.W_StringBox), + w_box_type=space.gettypefor(interp_boxes.W_StringBox), ) def new_unicode_dtype(space, size): - itemtype = types.UnicodeType() return W_Dtype( - itemtype, + types.UnicodeType(), size=size, num=NPY.UNICODE, kind=NPY.UNICODELTR, name='unicode', char=NPY.UNICODELTR, - w_box_type = space.gettypefor(interp_boxes.W_UnicodeBox), + w_box_type=space.gettypefor(interp_boxes.W_UnicodeBox), + ) + + +def new_void_dtype(space, size): + return W_Dtype( + types.VoidType(), + size=size, + num=NPY.VOID, + kind=NPY.VOIDLTR, + name='void', + char=NPY.VOIDLTR, + w_box_type=space.gettypefor(interp_boxes.W_VoidBox), ) From noreply at buildbot.pypy.org Mon Feb 24 09:09:40 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 24 Feb 2014 09:09:40 +0100 (CET) Subject: [pypy-commit] pypy default: fix changing field names on record dtypes Message-ID: <20140224080940.66C6D1C03FC@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69345:3b0fac02a353 Date: 2014-02-23 22:49 -0500 http://bitbucket.org/pypy/pypy/changeset/3b0fac02a353/ Log: fix changing field names on record dtypes diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -232,21 +232,23 @@ space.newtuple([subdtype, space.wrap(offset)])) return w_d - def descr_set_fields(self, space, w_fields): + def descr_set_fields(self, space, w_fieldnames, w_fields): if w_fields == space.w_None: self.fields = None else: + self.fieldnames = [] self.fields = {} size = 0 - for key in space.listview(w_fields): - value = space.getitem(w_fields, key) + for w_name in space.fixedview(w_fieldnames): + name = space.str_w(w_name) + value = space.getitem(w_fields, w_name) dtype = space.getitem(value, space.wrap(0)) assert isinstance(dtype, W_Dtype) + offset = space.int_w(space.getitem(value, space.wrap(1))) - offset = space.int_w(space.getitem(value, space.wrap(1))) - self.fields[space.str_w(key)] = offset, dtype - + self.fieldnames.append(name) + self.fields[name] = offset, dtype size += dtype.get_size() self.itemtype = types.RecordType() self.size = size @@ -257,20 +259,27 @@ return space.newtuple([space.wrap(name) for name in self.fieldnames]) def descr_set_names(self, space, w_names): + if len(self.fieldnames) == 0: + raise oefmt(space.w_ValueError, "there are no fields defined") + if not space.issequence_w(w_names) or \ + space.len_w(w_names) != len(self.fieldnames): + raise oefmt(space.w_ValueError, + "must replace all names at once " + "with a sequence of length %d", + len(self.fieldnames)) fieldnames = [] - if w_names != space.w_None: - iter = space.iter(w_names) - while True: - try: - name = space.str_w(space.next(iter)) - except OperationError, e: - if not e.match(space, space.w_StopIteration): - raise - break - if name in fieldnames: - raise OperationError(space.w_ValueError, space.wrap( - "Duplicate field names given.")) - fieldnames.append(name) + for w_name in space.fixedview(w_names): + if not space.isinstance_w(w_name, space.w_str): + raise oefmt(space.w_ValueError, + "item #%d of names is of type %T and not string", + len(fieldnames), w_name) + fieldnames.append(space.str_w(w_name)) + fields = {} + for i in range(len(self.fieldnames)): + if fieldnames[i] in fields: + raise oefmt(space.w_ValueError, "Duplicate field names given.") + fields[fieldnames[i]] = self.fields[self.fieldnames[i]] + self.fields = fields self.fieldnames = fieldnames def descr_del_names(self, space): @@ -353,11 +362,9 @@ endian = NPY.NATIVE self.byteorder = endian - fieldnames = space.getitem(w_data, space.wrap(3)) - self.descr_set_names(space, fieldnames) - - fields = space.getitem(w_data, space.wrap(4)) - self.descr_set_fields(space, fields) + w_fieldnames = space.getitem(w_data, space.wrap(3)) + w_fields = space.getitem(w_data, space.wrap(4)) + self.descr_set_fields(space, w_fieldnames, w_fields) @unwrap_spec(new_order=str) def descr_newbyteorder(self, space, new_order=NPY.SWAP): diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -48,6 +48,10 @@ assert dtype('bool') is d assert dtype('|b1') is d assert repr(type(d)) == "" + exc = raises(ValueError, "d.names = []") + assert exc.value[0] == "there are no fields defined" + exc = raises(ValueError, "d.names = None") + assert exc.value[0] == "there are no fields defined" assert dtype('int8').num == 1 assert dtype('int8').name == 'int8' @@ -1006,21 +1010,34 @@ from numpypy import dtype, void raises(ValueError, "dtype([('x', int), ('x', float)])") - d = dtype([("x", "int32"), ("y", "int32"), ("z", "int32"), ("value", float)]) - assert d.fields['x'] == (dtype('int32'), 0) - assert d.fields['value'] == (dtype(float), 12) - assert d['x'] == dtype('int32') - assert d.name == "void160" + d = dtype([("x", " Author: Brian Kearns Branch: Changeset: r69346:12fa4e02e3cf Date: 2014-02-24 02:15 -0500 http://bitbucket.org/pypy/pypy/changeset/12fa4e02e3cf/ Log: improve dtype setstate functionality diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -37,13 +37,15 @@ class W_Dtype(W_Root): - _immutable_fields_ = ["itemtype?", "num", "kind", "name", "char", - "w_box_type", "byteorder", "size?", "float_type", - "fields?", "fieldnames?", "shape", "subdtype", "base"] + _immutable_fields_ = [ + "num", "kind", "name", "char", "w_box_type", "float_type", + "itemtype?", "byteorder?", "fields?", "fieldnames?", "size?", + "shape?", "subdtype?", "base?" + ] def __init__(self, itemtype, num, kind, name, char, w_box_type, byteorder=NPY.NATIVE, size=1, alternate_constructors=[], aliases=[], float_type=None, - fields=None, fieldnames=None, shape=[], subdtype=None): + fields={}, fieldnames=[], shape=[], subdtype=None): self.itemtype = itemtype self.num = num self.kind = kind @@ -56,10 +58,8 @@ self.aliases = aliases self.float_type = float_type self.fields = fields - if fieldnames is None: - fieldnames = [] self.fieldnames = fieldnames - self.shape = list(shape) + self.shape = shape self.subdtype = subdtype if not subdtype: self.base = self @@ -102,16 +102,16 @@ return self.kind == NPY.GENBOOLLTR def is_record_type(self): - return self.fields is not None + return bool(self.fields) def is_str_type(self): return self.num == NPY.STRING def is_str_or_unicode(self): - return (self.num == NPY.STRING or self.num == NPY.UNICODE) + return self.num == NPY.STRING or self.num == NPY.UNICODE def is_flexible_type(self): - return (self.is_str_or_unicode() or self.is_record_type()) + return self.is_str_or_unicode() or self.num == NPY.VOID def is_native(self): return self.byteorder in (NPY.NATIVE, NPY.NATBYTE) @@ -125,28 +125,29 @@ return get_dtype_cache(space).dtypes_by_name[self.byteorder + self.float_type] def descr_str(self, space): - if not self.num == NPY.VOID: - if self.char == 'S': - return space.wrap('|S' + str(self.get_size())) - else: - return self.descr_get_name(space) + if self.fields: + return space.str(self.descr_get_descr(space)) elif self.subdtype is not None: return space.str(space.newtuple([ self.subdtype.descr_get_str(space), self.descr_get_shape(space)])) - return space.str(self.descr_get_descr(space)) + else: + if self.is_flexible_type(): + return space.wrap('|' + self.char + str(self.get_size())) + else: + return self.descr_get_name(space) def descr_repr(self, space): - if not self.num == NPY.VOID: - if self.char == 'S': - r = space.wrap('S' + str(self.get_size())) - else: - r = self.descr_get_name(space) + if self.fields: + r = self.descr_get_descr(space) elif self.subdtype is not None: r = space.newtuple([self.subdtype.descr_get_str(space), self.descr_get_shape(space)]) else: - r = self.descr_get_descr(space) + if self.is_flexible_type(): + r = space.wrap(self.char + str(self.get_size())) + else: + r = self.descr_get_name(space) return space.wrap("dtype(%s)" % space.str_w(space.repr(r))) def descr_get_itemsize(self, space): @@ -224,7 +225,7 @@ return space.wrap(not self.eq(space, w_other)) def descr_get_fields(self, space): - if self.fields is None: + if not self.fields: return space.w_None w_d = space.newdict() for name, (offset, subdtype) in self.fields.iteritems(): @@ -232,34 +233,13 @@ space.newtuple([subdtype, space.wrap(offset)])) return w_d - def descr_set_fields(self, space, w_fieldnames, w_fields): - if w_fields == space.w_None: - self.fields = None - else: - self.fieldnames = [] - self.fields = {} - size = 0 - for w_name in space.fixedview(w_fieldnames): - name = space.str_w(w_name) - value = space.getitem(w_fields, w_name) - - dtype = space.getitem(value, space.wrap(0)) - assert isinstance(dtype, W_Dtype) - offset = space.int_w(space.getitem(value, space.wrap(1))) - - self.fieldnames.append(name) - self.fields[name] = offset, dtype - size += dtype.get_size() - self.itemtype = types.RecordType() - self.size = size - def descr_get_names(self, space): - if len(self.fieldnames) == 0: + if not self.fields: return space.w_None return space.newtuple([space.wrap(name) for name in self.fieldnames]) def descr_set_names(self, space, w_names): - if len(self.fieldnames) == 0: + if not self.fields: raise oefmt(space.w_ValueError, "there are no fields defined") if not space.issequence_w(w_names) or \ space.len_w(w_names) != len(self.fieldnames): @@ -354,17 +334,63 @@ return space.newtuple([w_class, builder_args, data]) def descr_setstate(self, space, w_data): - if space.int_w(space.getitem(w_data, space.wrap(0))) != 3: - raise OperationError(space.w_NotImplementedError, space.wrap("Pickling protocol version not supported")) + if self.fields is None: # if builtin dtype + return space.w_None + + version = space.int_w(space.getitem(w_data, space.wrap(0))) + if version != 3: + raise oefmt(space.w_ValueError, + "can't handle version %d of numpy.dtype pickle", + version) endian = space.str_w(space.getitem(w_data, space.wrap(1))) if endian == NPY.NATBYTE: endian = NPY.NATIVE - self.byteorder = endian + w_subarray = space.getitem(w_data, space.wrap(2)) w_fieldnames = space.getitem(w_data, space.wrap(3)) w_fields = space.getitem(w_data, space.wrap(4)) - self.descr_set_fields(space, w_fieldnames, w_fields) + size = space.int_w(space.getitem(w_data, space.wrap(5))) + + if (w_fieldnames == space.w_None) != (w_fields == space.w_None): + raise oefmt(space.w_ValueError, "inconsistent fields and names") + + self.byteorder = endian + self.shape = [] + self.subdtype = None + self.base = self + + if w_subarray != space.w_None: + if not space.isinstance_w(w_subarray, space.w_tuple) or \ + space.len_w(w_subarray) != 2: + raise oefmt(space.w_ValueError, + "incorrect subarray in __setstate__") + subdtype, w_shape = space.fixedview(w_subarray) + assert isinstance(subdtype, W_Dtype) + if not base.issequence_w(space, w_shape): + self.shape = [space.int_w(w_shape)] + else: + self.shape = [space.int_w(w_s) for w_s in space.fixedview(w_shape)] + self.subdtype = subdtype + self.base = subdtype.base + + if w_fieldnames != space.w_None: + self.fieldnames = [] + self.fields = {} + for w_name in space.fixedview(w_fieldnames): + name = space.str_w(w_name) + value = space.getitem(w_fields, w_name) + + dtype = space.getitem(value, space.wrap(0)) + assert isinstance(dtype, W_Dtype) + offset = space.int_w(space.getitem(value, space.wrap(1))) + + self.fieldnames.append(name) + self.fields[name] = offset, dtype + self.itemtype = types.RecordType() + + if self.is_flexible_type(): + self.size = size @unwrap_spec(new_order=str) def descr_newbyteorder(self, space, new_order=NPY.SWAP): @@ -862,6 +888,8 @@ float_type=dtype.float_type) for alias in dtype.aliases: self.dtypes_by_name[alias] = dtype + for dtype in self.dtypes_by_name.values(): + dtype.fields = None # mark these as builtin typeinfo_full = { 'LONGLONG': self.w_int64dtype, diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -1096,6 +1096,96 @@ assert dt.subdtype == (dtype(float), (10,)) assert dt.base == dtype(float) + def test_setstate(self): + import numpy as np + import sys + d = np.dtype('f8') + d.__setstate__((3, '|', (np.dtype('float64'), (2,)), None, None, 20, 1, 0)) + assert d.str == ('<' if sys.byteorder == 'little' else '>') + 'f8' + assert d.fields is None + assert d.shape == () + assert d.itemsize == 8 + assert d.subdtype is None + assert repr(d) == "dtype('float64')" + + d = np.dtype(('>' if sys.byteorder == 'little' else '<') + 'f8') + d.__setstate__((3, '|', (np.dtype('float64'), (2,)), None, None, 20, 1, 0)) + assert d.str == '|f8' + assert d.fields is None + assert d.shape == (2,) + assert d.itemsize == 8 + assert d.subdtype is not None + assert repr(d) == "dtype((' Author: Brian Kearns Branch: Changeset: r69347:2404d646630a Date: 2014-02-24 02:32 -0500 http://bitbucket.org/pypy/pypy/changeset/2404d646630a/ Log: more cleanup for interp_dtype diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -473,10 +473,10 @@ elif space.isinstance_w(w_item, space.w_int): indx = space.int_w(w_item) try: - item = self.dtype.fieldnames[indx] + item = self.dtype.names[indx] except IndexError: if indx < 0: - indx += len(self.dtype.fieldnames) + indx += len(self.dtype.names) raise OperationError(space.w_IndexError, space.wrap( "invalid index (%d)" % indx)) else: diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -39,32 +39,34 @@ class W_Dtype(W_Root): _immutable_fields_ = [ "num", "kind", "name", "char", "w_box_type", "float_type", - "itemtype?", "byteorder?", "fields?", "fieldnames?", "size?", - "shape?", "subdtype?", "base?" + "itemtype?", "byteorder?", "names?", "fields?", "size?", + "shape?", "subdtype?", "base?", + "alternate_constructors", "aliases", ] - def __init__(self, itemtype, num, kind, name, char, w_box_type, byteorder=NPY.NATIVE, - size=1, alternate_constructors=[], aliases=[], float_type=None, - fields={}, fieldnames=[], shape=[], subdtype=None): + def __init__(self, itemtype, num, kind, name, char, w_box_type, + float_type=None, byteorder=NPY.NATIVE, names=[], fields={}, + size=1, shape=[], subdtype=None, + alternate_constructors=[], aliases=[]): self.itemtype = itemtype self.num = num self.kind = kind self.name = name self.char = char self.w_box_type = w_box_type + self.float_type = float_type self.byteorder = byteorder + self.names = names + self.fields = fields self.size = size - self.alternate_constructors = alternate_constructors - self.aliases = aliases - self.float_type = float_type - self.fields = fields - self.fieldnames = fieldnames self.shape = shape self.subdtype = subdtype if not subdtype: self.base = self else: self.base = subdtype.base + self.alternate_constructors = alternate_constructors + self.aliases = aliases def __repr__(self): if self.fields is not None: @@ -186,7 +188,7 @@ self.descr_get_str(space)])]) else: descr = [] - for name in self.fieldnames: + for name in self.names: subdtype = self.fields[name][1] subdescr = [space.wrap(name)] if subdtype.is_record_type(): @@ -236,31 +238,31 @@ def descr_get_names(self, space): if not self.fields: return space.w_None - return space.newtuple([space.wrap(name) for name in self.fieldnames]) + return space.newtuple([space.wrap(name) for name in self.names]) def descr_set_names(self, space, w_names): if not self.fields: raise oefmt(space.w_ValueError, "there are no fields defined") if not space.issequence_w(w_names) or \ - space.len_w(w_names) != len(self.fieldnames): + space.len_w(w_names) != len(self.names): raise oefmt(space.w_ValueError, "must replace all names at once " "with a sequence of length %d", - len(self.fieldnames)) - fieldnames = [] + len(self.names)) + names = [] for w_name in space.fixedview(w_names): if not space.isinstance_w(w_name, space.w_str): raise oefmt(space.w_ValueError, "item #%d of names is of type %T and not string", - len(fieldnames), w_name) - fieldnames.append(space.str_w(w_name)) + len(names), w_name) + names.append(space.str_w(w_name)) fields = {} - for i in range(len(self.fieldnames)): - if fieldnames[i] in fields: + for i in range(len(self.names)): + if names[i] in fields: raise oefmt(space.w_ValueError, "Duplicate field names given.") - fields[fieldnames[i]] = self.fields[self.fieldnames[i]] + fields[names[i]] = self.fields[self.names[i]] self.fields = fields - self.fieldnames = fieldnames + self.names = names def descr_del_names(self, space): raise OperationError(space.w_AttributeError, space.wrap( @@ -278,7 +280,7 @@ elif space.isinstance_w(w_item, space.w_int): indx = space.int_w(w_item) try: - item = self.fieldnames[indx] + item = self.names[indx] except IndexError: raise OperationError(space.w_IndexError, space.wrap( "Field index %d out of range." % indx)) @@ -348,11 +350,11 @@ endian = NPY.NATIVE w_subarray = space.getitem(w_data, space.wrap(2)) - w_fieldnames = space.getitem(w_data, space.wrap(3)) + w_names = space.getitem(w_data, space.wrap(3)) w_fields = space.getitem(w_data, space.wrap(4)) size = space.int_w(space.getitem(w_data, space.wrap(5))) - if (w_fieldnames == space.w_None) != (w_fields == space.w_None): + if (w_names == space.w_None) != (w_fields == space.w_None): raise oefmt(space.w_ValueError, "inconsistent fields and names") self.byteorder = endian @@ -374,10 +376,10 @@ self.subdtype = subdtype self.base = subdtype.base - if w_fieldnames != space.w_None: - self.fieldnames = [] + if w_names != space.w_None: + self.names = [] self.fields = {} - for w_name in space.fixedview(w_fieldnames): + for w_name in space.fixedview(w_names): name = space.str_w(w_name) value = space.getitem(w_fields, w_name) @@ -385,7 +387,7 @@ assert isinstance(dtype, W_Dtype) offset = space.int_w(space.getitem(value, space.wrap(1))) - self.fieldnames.append(name) + self.names.append(name) self.fields[name] = offset, dtype self.itemtype = types.RecordType() @@ -403,7 +405,7 @@ endian = newendian itemtype = self.itemtype.__class__(endian in (NPY.NATIVE, NPY.NATBYTE)) return W_Dtype(itemtype, self.num, self.kind, self.name, self.char, - self.w_box_type, endian, size=self.size) + self.w_box_type, byteorder=endian, size=self.size) @specialize.arg(2) @@ -411,7 +413,7 @@ lst_w = space.listview(w_lst) fields = {} offset = 0 - fieldnames = [] + names = [] for i in range(len(lst_w)): w_elem = lst_w[i] if simple: @@ -434,10 +436,10 @@ assert isinstance(subdtype, W_Dtype) fields[fldname] = (offset, subdtype) offset += subdtype.get_size() - fieldnames.append(fldname) + names.append(fldname) return W_Dtype(types.RecordType(), NPY.VOID, NPY.VOIDLTR, "void", NPY.VOIDLTR, space.gettypefor(interp_boxes.W_VoidBox), - fields=fields, fieldnames=fieldnames, size=offset) + names=names, fields=fields, size=offset) def dtype_from_dict(space, w_dict): diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -1843,7 +1843,7 @@ assert isinstance(item, interp_boxes.W_VoidBox) dt = item.arr.dtype ret_unwrapped = [] - for name in dt.fieldnames: + for name in dt.names: ofs, dtype = dt.fields[name] if isinstance(dtype.itemtype, VoidType): read_val = dtype.itemtype.readarray(item.arr, ofs, 0, dtype) @@ -1886,7 +1886,7 @@ items_w = [None] * len(dtype.fields) arr = VoidBoxStorage(dtype.get_size(), dtype) for i in range(len(dtype.fields)): - ofs, subdtype = dtype.fields[dtype.fieldnames[i]] + ofs, subdtype = dtype.fields[dtype.names[i]] itemtype = subdtype.itemtype try: w_box = itemtype.coerce(space, subdtype, items_w[i]) @@ -1922,7 +1922,7 @@ assert isinstance(box, interp_boxes.W_VoidBox) items = [] dtype = box.dtype - for name in dtype.fieldnames: + for name in dtype.names: ofs, subdtype = dtype.fields[name] itemtype = subdtype.itemtype subbox = itemtype.read(box.arr, box.ofs, ofs, subdtype) @@ -1934,7 +1934,7 @@ assert isinstance(box, interp_boxes.W_VoidBox) pieces = ["("] first = True - for name in box.dtype.fieldnames: + for name in box.dtype.names: ofs, subdtype = box.dtype.fields[name] tp = subdtype.itemtype if first: From noreply at buildbot.pypy.org Mon Feb 24 09:09:43 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 24 Feb 2014 09:09:43 +0100 (CET) Subject: [pypy-commit] pypy default: provide dtype.isbuiltin Message-ID: <20140224080943.DA2FA1C03FC@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69348:528cf66eae75 Date: 2014-02-24 02:32 -0500 http://bitbucket.org/pypy/pypy/changeset/528cf66eae75/ Log: provide dtype.isbuiltin diff --git a/pypy/module/micronumpy/arrayimpl/sort.py b/pypy/module/micronumpy/arrayimpl/sort.py --- a/pypy/module/micronumpy/arrayimpl/sort.py +++ b/pypy/module/micronumpy/arrayimpl/sort.py @@ -12,7 +12,7 @@ from rpython.rlib.objectmodel import specialize from pypy.interpreter.error import OperationError, oefmt from pypy.module.micronumpy.base import W_NDimArray -from pypy.module.micronumpy import interp_dtype, types +from pypy.module.micronumpy import interp_dtype, types, constants as NPY from pypy.module.micronumpy.iter import AxisIterator INT_SIZE = rffi.sizeof(lltype.Signed) @@ -310,9 +310,9 @@ def sort_array(arr, space, w_axis, w_order): cache = space.fromcache(SortCache) # that populates SortClasses itemtype = arr.dtype.itemtype - if not arr.dtype.is_native(): - raise OperationError(space.w_NotImplementedError, - space.wrap("sorting of non-native btyeorder not supported yet")) + if arr.dtype.byteorder == NPY.OPPBYTE: + raise oefmt(space.w_NotImplementedError, + "sorting of non-native byteorder not supported yet") for tp in all_types: if isinstance(itemtype, tp[0]): return cache._lookup(tp)(arr, space, w_axis, diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -45,7 +45,7 @@ ] def __init__(self, itemtype, num, kind, name, char, w_box_type, - float_type=None, byteorder=NPY.NATIVE, names=[], fields={}, + float_type=None, byteorder=None, names=[], fields={}, size=1, shape=[], subdtype=None, alternate_constructors=[], aliases=[]): self.itemtype = itemtype @@ -55,6 +55,8 @@ self.char = char self.w_box_type = w_box_type self.float_type = float_type + if byteorder is None: + byteorder = NPY.IGNORE if self.num == NPY.STRING else NPY.NATIVE self.byteorder = byteorder self.names = names self.fields = fields @@ -124,7 +126,10 @@ def get_float_dtype(self, space): assert self.kind == NPY.COMPLEXLTR assert self.float_type is not None - return get_dtype_cache(space).dtypes_by_name[self.byteorder + self.float_type] + dtype = get_dtype_cache(space).dtypes_by_name[self.float_type] + if self.byteorder == NPY.OPPBYTE: + dtype = dtype.descr_newbyteorder(space) + return dtype def descr_str(self, space): if self.fields: @@ -158,6 +163,11 @@ def descr_get_alignment(self, space): return space.wrap(self.itemtype.alignment) + def descr_get_isbuiltin(self, space): + if self.fields is None: + return space.wrap(1) + return space.wrap(0) + def descr_get_subdtype(self, space): if self.subdtype is None: return space.w_None @@ -405,7 +415,8 @@ endian = newendian itemtype = self.itemtype.__class__(endian in (NPY.NATIVE, NPY.NATBYTE)) return W_Dtype(itemtype, self.num, self.kind, self.name, self.char, - self.w_box_type, byteorder=endian, size=self.size) + self.w_box_type, self.float_type, byteorder=endian, + size=self.size) @specialize.arg(2) @@ -492,10 +503,15 @@ name = space.str_w(w_dtype) if ',' in name: return dtype_from_spec(space, w_dtype) + cname = name[1:] if name[0] == NPY.OPPBYTE else name try: - return cache.dtypes_by_name[name] + dtype = cache.dtypes_by_name[cname] except KeyError: pass + else: + if name[0] == NPY.OPPBYTE: + dtype = dtype.descr_newbyteorder(space) + return dtype if name[0] in 'VSUc' or name[0] in '<>=|' and name[1] in 'VSUc': return variable_dtype(space, name) raise oefmt(space.w_TypeError, 'data type "%s" not understood', name) @@ -544,6 +560,7 @@ byteorder = interp_attrproperty("byteorder", cls=W_Dtype), itemsize = GetSetProperty(W_Dtype.descr_get_itemsize), alignment = GetSetProperty(W_Dtype.descr_get_alignment), + isbuiltin = GetSetProperty(W_Dtype.descr_get_isbuiltin), subdtype = GetSetProperty(W_Dtype.descr_get_subdtype), str = GetSetProperty(W_Dtype.descr_get_str), @@ -874,6 +891,7 @@ # we reverse, so the stuff with lower numbers override stuff with # higher numbers for dtype in reversed(self.builtin_dtypes): + dtype.fields = None # mark these as builtin self.dtypes_by_num[dtype.num] = dtype self.dtypes_by_name[dtype.name] = dtype for can_name in [dtype.kind + str(dtype.get_size()), @@ -882,16 +900,8 @@ self.dtypes_by_name[NPY.NATBYTE + can_name] = dtype self.dtypes_by_name[NPY.NATIVE + can_name] = dtype self.dtypes_by_name[NPY.IGNORE + can_name] = dtype - new_name = NPY.OPPBYTE + can_name - itemtype = type(dtype.itemtype)(False) - self.dtypes_by_name[new_name] = W_Dtype( - itemtype, dtype.num, dtype.kind, new_name, dtype.char, - dtype.w_box_type, byteorder=NPY.OPPBYTE, - float_type=dtype.float_type) for alias in dtype.aliases: self.dtypes_by_name[alias] = dtype - for dtype in self.dtypes_by_name.values(): - dtype.fields = None # mark these as builtin typeinfo_full = { 'LONGLONG': self.w_int64dtype, diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -40,6 +40,7 @@ def test_dtype_basic(self): from numpypy import dtype + import sys d = dtype('?') assert d.num == 0 @@ -47,6 +48,8 @@ assert dtype(d) is d assert dtype('bool') is d assert dtype('|b1') is d + b = '>' if sys.byteorder == 'little' else '<' + assert dtype(b + 'i4') is not dtype(b + 'i4') assert repr(type(d)) == "" exc = raises(ValueError, "d.names = []") assert exc.value[0] == "there are no fields defined" @@ -157,6 +160,26 @@ a = array(range(5), long) assert a.dtype is dtype(long) + def test_isbuiltin(self): + import numpy as np + import sys + assert np.dtype('?').isbuiltin == 1 + assert np.dtype(int).newbyteorder().isbuiltin == 0 + assert np.dtype(np.dtype(int)).isbuiltin == 1 + assert np.dtype('=i4').isbuiltin == 1 + b = '>' if sys.byteorder == 'little' else '<' + assert np.dtype(b + 'i4').isbuiltin == 0 + assert np.dtype(b + 'i4').newbyteorder().isbuiltin == 0 + b = '<' if sys.byteorder == 'little' else '>' + assert np.dtype(b + 'i4').isbuiltin == 1 + assert np.dtype(b + 'i4').newbyteorder().isbuiltin == 0 + assert np.dtype((int, 2)).isbuiltin == 0 + assert np.dtype([('', int), ('', float)]).isbuiltin == 0 + assert np.dtype('void').isbuiltin == 1 + assert np.dtype(str).isbuiltin == 1 + assert np.dtype('S0').isbuiltin == 1 + assert np.dtype('S5').isbuiltin == 0 + def test_repr_str(self): from numpypy import dtype @@ -837,6 +860,11 @@ assert dtype(nnp + 'i8').byteorder == nnp assert dtype('=i8').byteorder == '=' assert dtype(byteorder + 'i8').byteorder == '=' + assert dtype(str).byteorder == '|' + assert dtype('S5').byteorder == '|' + assert dtype('>S5').byteorder == '|' + assert dtype(' Author: Brian Kearns Branch: Changeset: r69349:f9df75b6805c Date: 2014-02-24 03:30 -0500 http://bitbucket.org/pypy/pypy/changeset/f9df75b6805c/ Log: another dtype repr case diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -56,7 +56,10 @@ self.w_box_type = w_box_type self.float_type = float_type if byteorder is None: - byteorder = NPY.IGNORE if self.num == NPY.STRING else NPY.NATIVE + if itemtype.get_element_size() == 1: + byteorder = NPY.IGNORE + else: + byteorder = NPY.NATIVE self.byteorder = byteorder self.names = names self.fields = fields @@ -140,7 +143,7 @@ self.descr_get_shape(space)])) else: if self.is_flexible_type(): - return space.wrap('|' + self.char + str(self.get_size())) + return self.descr_get_str(space) else: return self.descr_get_name(space) @@ -152,7 +155,11 @@ self.descr_get_shape(space)]) else: if self.is_flexible_type(): - r = space.wrap(self.char + str(self.get_size())) + if self.byteorder != NPY.IGNORE: + byteorder = NPY.NATBYTE if self.is_native() else NPY.OPPBYTE + else: + byteorder = '' + r = space.wrap(byteorder + self.char + str(self.size)) else: r = self.descr_get_name(space) return space.wrap("dtype(%s)" % space.str_w(space.repr(r))) diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -182,7 +182,7 @@ def test_repr_str(self): from numpypy import dtype - + b = dtype(int).newbyteorder().newbyteorder().byteorder assert '.dtype' in repr(dtype) d = dtype('?') assert repr(d) == "dtype('bool')" @@ -193,9 +193,15 @@ d = dtype('S5') assert repr(d) == "dtype('S5')" assert str(d) == "|S5" + d = dtype('U5') + assert repr(d) == "dtype('%sU5')" % b + assert str(d) == "%sU5" % b d = dtype(('S5').byteorder == '|' assert dtype(' Author: Armin Rigo Branch: c7-refactor Changeset: r818:8c254c425ce5 Date: 2014-02-24 10:29 +0100 http://bitbucket.org/pypy/stmgc/changeset/8c254c425ce5/ Log: Reimplement the _stm_write_slowpath(). diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -13,71 +13,93 @@ { assert(_running_transaction()); assert(!_is_in_nursery(obj)); - abort();//... -#if 0 - /* for old objects from the same transaction, we are done now */ - if (obj_from_same_transaction(obj)) { - obj->stm_flags |= GCFLAG_WRITE_BARRIER_CALLED; - LIST_APPEND(STM_PSEGMENT->old_objects_pointing_to_young, obj); + /* is this an object from the same transaction, outside the nursery? */ + if ((obj->stm_flags & -GCFLAG_OVERFLOW_NUMBER_bit0) == + STM_PSEGMENT->overflow_number) { + + obj->stm_flags &= ~GCFLAG_WRITE_BARRIER; + assert(STM_PSEGMENT->overflow_objects_pointing_to_nursery != NULL); + LIST_APPEND(STM_PSEGMENT->overflow_objects_pointing_to_nursery, obj); return; } + /* do a read-barrier now. Note that this must occur before the + safepoints that may be issued in contention_management(). */ + stm_read(obj); - /* otherwise, we need to privatize the pages containing the object, - if they are still SHARED_PAGE. The common case is that there is - only one page in total. */ - size_t obj_size = 0; - uintptr_t first_page = ((uintptr_t)obj) / 4096UL; + /* claim the write-lock for this object. In case we're running the + same transaction since a long while, the object can be already in + 'modified_old_objects' (but, because it had GCFLAG_WRITE_BARRIER, + not in 'old_objects_pointing_to_nursery'). We'll detect this case + by finding that we already own the write-lock. */ + uintptr_t lock_idx = (((uintptr_t)obj) >> 4) - WRITELOCK_START; + uint8_t lock_num = STM_PSEGMENT->write_lock_num; + assert((intptr_t)lock_idx >= 0); + retry: + if (write_locks[lock_idx] == 0) { + if (UNLIKELY(!__sync_bool_compare_and_swap(&write_locks[lock_idx], + 0, lock_num))) + goto retry; - /* If the object is in the uniform pages of small objects (outside the - nursery), then it fits into one page. Otherwise, we need to compute - it based on its location and size. */ - if ((obj->stm_flags & GCFLAG_SMALL_UNIFORM) != 0) { - pages_privatize(first_page, 1, true); + /* First change to this old object from this transaction. + Add it to the list 'modified_old_objects'. */ + LIST_APPEND(STM_PSEGMENT->modified_old_objects, obj); + + /* We need to privatize the pages containing the object, if they + are still SHARED_PAGE. The common case is that there is only + one page in total. */ + size_t obj_size = 0; + uintptr_t first_page = ((uintptr_t)obj) / 4096UL; + + /* If the object is in the uniform pages of small objects + (outside the nursery), then it fits into one page. This is + the common case. Otherwise, we need to compute it based on + its location and size. */ + if ((obj->stm_flags & GCFLAG_SMALL_UNIFORM) != 0) { + pages_privatize(first_page, 1, true); + } + else { + /* get the size of the object */ + obj_size = stmcb_size_rounded_up( + (struct object_s *)REAL_ADDRESS(STM_SEGMENT->segment_base, obj)); + + /* that's the page *following* the last page with the object */ + uintptr_t end_page = (((uintptr_t)obj) + obj_size + 4095) / 4096UL; + + pages_privatize(first_page, end_page - first_page, true); + } + } + else if (write_locks[lock_idx] == lock_num) { + OPT_ASSERT(STM_PSEGMENT->old_objects_pointing_to_nursery != NULL); +#ifdef STM_TESTS + bool found = false; + LIST_FOREACH_R(STM_PSEGMENT->modified_old_objects, object_t *, + ({ if (item == obj) { found = true; break; } })); + assert(found); +#endif } else { - /* get the size of the object */ - obj_size = stmcb_size_rounded_up( - (struct object_s *)REAL_ADDRESS(STM_SEGMENT->segment_base, obj)); - - /* that's the page *following* the last page with the object */ - uintptr_t end_page = (((uintptr_t)obj) + obj_size + 4095) / 4096UL; - - pages_privatize(first_page, end_page - first_page, true); - } - - - /* do a read-barrier *before* the safepoints that may be issued in - contention_management() */ - stm_read(obj); - - /* claim the write-lock for this object */ - retry:; - uintptr_t lock_idx = (((uintptr_t)obj) >> 4) - WRITELOCK_START; - uint8_t lock_num = STM_PSEGMENT->write_lock_num; - uint8_t prev_owner; - assert((intptr_t)lock_idx >= 0); - prev_owner = __sync_val_compare_and_swap(&write_locks[lock_idx], - 0, lock_num); - - /* if there was no lock-holder, we are done; otherwise... */ - if (UNLIKELY(prev_owner != 0)) { - /* otherwise, call the contention manager, and then possibly retry. - By construction it should not be possible that the owner - of the object is already us */ + /* call the contention manager, and then retry (unless we were + aborted). */ mutex_lock(); - contention_management(prev_owner - 1, true); + uint8_t prev_owner = ((volatile uint8_t *)write_locks)[lock_idx]; + if (prev_owner != 0 && prev_owner != lock_num) + contention_management(prev_owner - 1, true); mutex_unlock(); goto retry; } + /* A common case for write_locks[] that was either 0 or lock_num: + we need to add the object to 'old_objects_pointing_to_nursery' + if there is such a list. */ + if (STM_PSEGMENT->old_objects_pointing_to_nursery != NULL) + LIST_APPEND(STM_PSEGMENT->old_objects_pointing_to_nursery, obj); + /* add the write-barrier-already-called flag ONLY if we succeeded in getting the write-lock */ - assert(!(obj->stm_flags & GCFLAG_WRITE_BARRIER_CALLED)); - obj->stm_flags |= GCFLAG_WRITE_BARRIER_CALLED; - LIST_APPEND(STM_PSEGMENT->modified_objects, obj); -#endif + assert(obj->stm_flags & GCFLAG_WRITE_BARRIER); + obj->stm_flags &= ~GCFLAG_WRITE_BARRIER; } static void reset_transaction_read_version(void) @@ -143,6 +165,8 @@ } assert(list_is_empty(STM_PSEGMENT->modified_old_objects)); + assert(STM_PSEGMENT->old_objects_pointing_to_nursery == NULL); + assert(STM_PSEGMENT->overflow_objects_pointing_to_nursery == NULL); #ifdef STM_TESTS check_nursery_at_transaction_start(); diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -61,15 +61,27 @@ struct stm_priv_segment_info_s { struct stm_segment_info_s pub; + /* List of old objects (older than the current transaction) that the + current transaction attempts to modify. This is used to track + the STM status: it's old objects that where written to and that + need to be copied to other segments upon commit. */ + struct list_s *modified_old_objects; + + /* List of the modified old objects that may point to the nursery. + If the current transaction didn't span a minor collection so far, + this is NULL, understood as meaning implicitly "this is the same + as 'modified_old_objects'". Otherwise, this list is a subset of + 'modified_old_objects'. */ + struct list_s *old_objects_pointing_to_nursery; + /* List of overflowed objects (from the same transaction but outside the nursery) on which the write-barrier was triggered, so that - they likely contain a pointer to a nursery object */ + they likely contain a pointer to a nursery object. This is used + by the GC: it's roots for the next minor collection. This is + NULL if the current transaction didn't span a minor collection + so far. */ struct list_s *overflow_objects_pointing_to_nursery; - /* List of old objects (older than the current transaction) that the - current transaction attempts to modify */ - struct list_s *modified_old_objects; - /* Start time: to know approximately for how long a transaction has been running, in contention management */ uint64_t start_time; diff --git a/c7/stm/setup.c b/c7/stm/setup.c --- a/c7/stm/setup.c +++ b/c7/stm/setup.c @@ -87,6 +87,7 @@ for (i = 0; i < NB_SEGMENTS; i++) { struct stm_priv_segment_info_s *pr = get_priv_segment(i); assert(pr->overflow_objects_pointing_to_nursery == NULL); + assert(pr->old_objects_pointing_to_nursery == NULL); list_free(pr->modified_old_objects); } From noreply at buildbot.pypy.org Mon Feb 24 10:44:15 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 24 Feb 2014 10:44:15 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: Tweaks Message-ID: <20140224094415.B8F9F1C02EA@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r819:c39f632101a6 Date: 2014-02-24 10:44 +0100 http://bitbucket.org/pypy/stmgc/changeset/c39f632101a6/ Log: Tweaks diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -251,14 +251,16 @@ static void _finish_transaction(void) { + STM_PSEGMENT->safe_point = SP_NO_TRANSACTION; + STM_PSEGMENT->transaction_state = TS_NONE; + + /* reset these lists to NULL for the next transaction */ + LIST_FREE(STM_PSEGMENT->old_objects_pointing_to_nursery); + LIST_FREE(STM_PSEGMENT->overflow_objects_pointing_to_nursery); + stm_thread_local_t *tl = STM_SEGMENT->running_thread; release_thread_segment(tl); - STM_PSEGMENT->safe_point = SP_NO_TRANSACTION; - STM_PSEGMENT->transaction_state = TS_NONE; - if (STM_PSEGMENT->overflow_objects_pointing_to_nursery != NULL) { - list_free(STM_PSEGMENT->overflow_objects_pointing_to_nursery); - STM_PSEGMENT->overflow_objects_pointing_to_nursery = NULL; - } + /* cannot access STM_SEGMENT or STM_PSEGMENT from here ! */ } void stm_commit_transaction(void) @@ -315,8 +317,6 @@ static void reset_modified_from_other_segments(void) { - abort();//... -#if 0 /* pull the right versions from other threads in order to reset our pages as part of an abort */ long remote_num = 1 - STM_SEGMENT->segment_num; @@ -327,8 +327,8 @@ STM_PSEGMENT->modified_old_objects, object_t * /*item*/, ({ - /* all objects in 'modified_objects' have this flag */ - assert(item->stm_flags & GCFLAG_WRITE_BARRIER_CALLED); + /* all objects in 'modified_objects' have this flag removed */ + assert((item->stm_flags & GCFLAG_WRITE_BARRIER) == 0); /* memcpy in the opposite direction than push_modified_to_other_segments() */ @@ -337,9 +337,9 @@ ssize_t size = stmcb_size_rounded_up((struct object_s *)src); memcpy(dst, src, size); - /* copying from the other segment removed again the - WRITE_BARRIER_CALLED flag */ - assert(!(item->stm_flags & GCFLAG_WRITE_BARRIER_CALLED)); + /* copying from the other segment added again the + WRITE_BARRIER flag */ + assert(item->stm_flags & GCFLAG_WRITE_BARRIER); /* write all changes to the object before we release the write lock below. This is needed because we need to @@ -353,12 +353,11 @@ /* clear the write-lock */ uintptr_t lock_idx = (((uintptr_t)item) >> 4) - WRITELOCK_START; assert((intptr_t)lock_idx >= 0); - assert(write_locks[lock_idx]); + assert(write_locks[lock_idx] == STM_PSEGMENT->write_lock_num); write_locks[lock_idx] = 0; })); - list_clear(STM_PSEGMENT->modified_objects); -#endif + list_clear(STM_PSEGMENT->modified_old_objects); } static void abort_with_mutex(void) diff --git a/c7/stm/list.h b/c7/stm/list.h --- a/c7/stm/list.h +++ b/c7/stm/list.h @@ -13,6 +13,8 @@ free(lst); } +#define LIST_FREE(lst) (list_free(lst), (lst) = NULL) + static struct list_s *_list_grow(struct list_s *, uintptr_t); diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -26,6 +26,10 @@ assert(_STM_FAST_ALLOC <= NURSERY_SIZE); _stm_nursery_start = NURSERY_START; _stm_nursery_end = NURSERY_END; + + long i; + for (i = 0; i < NB_SEGMENTS; i++) + get_segment(i)->nursery_current = (stm_char *)NURSERY_START; } static void teardown_nursery(void) From noreply at buildbot.pypy.org Mon Feb 24 11:15:03 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 24 Feb 2014 11:15:03 +0100 (CET) Subject: [pypy-commit] pypy default: fix some numpy string type handling Message-ID: <20140224101503.7E66B1C10A8@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69350:eea11bc790e6 Date: 2014-02-24 04:49 -0500 http://bitbucket.org/pypy/pypy/changeset/eea11bc790e6/ Log: fix some numpy string type handling diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -1447,7 +1447,7 @@ # scalars and strings w/o __array__ method isstr = space.isinstance_w(w_object, space.w_str) if not issequence_w(space, w_object) or isstr: - if dtype is None or dtype.is_str_or_unicode(): + if dtype is None or (dtype.is_str_or_unicode() and dtype.get_size() < 1): dtype = interp_ufuncs.find_dtype_for_scalar(space, w_object) return W_NDimArray.new_scalar(space, dtype, w_object) @@ -1500,6 +1500,8 @@ def zeros(space, w_shape, w_dtype=None, w_order=None): dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype)) + if dtype.is_str_or_unicode() and dtype.get_size() < 1: + dtype = interp_dtype.variable_dtype(space, dtype.char + '1') shape = _find_shape(space, w_shape, dtype) return W_NDimArray.from_shape(space, shape, dtype=dtype) @@ -1511,6 +1513,8 @@ else: dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype)) + if dtype.is_str_or_unicode() and dtype.get_size() < 1: + dtype = interp_dtype.variable_dtype(space, dtype.char + '1') return W_NDimArray.from_shape(space, w_a.get_shape(), dtype=dtype, w_instance=w_a if subok else None) diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -387,12 +387,15 @@ assert zeros(()).shape == () assert zeros((), dtype='S') == '' assert zeros((), dtype='S').shape == () + assert zeros((), dtype='S').dtype == '|S1' def test_empty_like(self): import numpy as np a = np.empty_like(np.zeros(())) assert a.shape == () assert a.dtype == np.float_ + a = np.empty_like(a, dtype='S') + assert a.dtype == '|S1' a = np.zeros((2, 3)) assert a.shape == (2, 3) a[0,0] = 1 @@ -1677,11 +1680,12 @@ assert exc.value[0] == "data-type must not be 0-sized" assert a.view('S4') == '\x03' a = array('abc1', dtype='c') - assert a.view('S4') == 'abc1' import sys if '__pypy__' in sys.builtin_module_names: - raises(NotImplementedError, a.view, [('a', 'i2'), ('b', 'i2')]) + raises(ValueError, a.view, 'S4') + raises(ValueError, a.view, [('a', 'i2'), ('b', 'i2')]) else: + assert a.view('S4') == 'abc1' b = a.view([('a', 'i2'), ('b', 'i2')]) assert b.shape == (1,) assert b[0][0] == 25185 @@ -3397,6 +3401,12 @@ a = array('x', dtype='c') assert str(a.dtype) == '|S1' assert a == 'x' + a = array('abc', 'S2') + assert a.dtype.str == '|S2' + assert a == 'ab' + a = array('abc', 'S5') + assert a.dtype.str == '|S5' + assert a == 'abc' def test_newbyteorder(self): import numpy as np diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -1661,15 +1661,17 @@ @jit.unroll_safe def coerce(self, space, dtype, w_item): - from pypy.module.micronumpy.interp_dtype import new_string_dtype if isinstance(w_item, interp_boxes.W_StringBox): return w_item if w_item is None: w_item = space.wrap('') arg = space.str_w(space.str(w_item)) - arr = VoidBoxStorage(len(arg), new_string_dtype(space, len(arg))) - for i in range(len(arg)): + arr = VoidBoxStorage(dtype.size, dtype) + j = min(len(arg), dtype.size) + for i in range(j): arr.storage[i] = arg[i] + for j in range(j, dtype.size): + arr.storage[j] = '\x00' return interp_boxes.W_StringBox(arr, 0, arr.dtype) def store(self, arr, i, offset, box): From noreply at buildbot.pypy.org Mon Feb 24 11:15:04 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 24 Feb 2014 11:15:04 +0100 (CET) Subject: [pypy-commit] pypy default: remove some unnecessary code Message-ID: <20140224101504.C58C91C10A8@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69351:cd4420b9945f Date: 2014-02-24 05:08 -0500 http://bitbucket.org/pypy/pypy/changeset/cd4420b9945f/ Log: remove some unnecessary code diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -90,19 +90,11 @@ obj_iter.next() return out -setslice_driver1 = jit.JitDriver(name='numpy_setslice1', - greens = ['shapelen', 'dtype'], - reds = 'auto') -setslice_driver2 = jit.JitDriver(name='numpy_setslice2', +setslice_driver = jit.JitDriver(name='numpy_setslice', greens = ['shapelen', 'dtype'], reds = 'auto') def setslice(space, shape, target, source): - if target.dtype.is_str_or_unicode(): - return setslice_build_and_convert(space, shape, target, source) - return setslice_to(space, shape, target, source) - -def setslice_to(space, shape, target, source): # note that unlike everything else, target and source here are # array implementations, not arrays target_iter = target.create_iter(shape) @@ -110,22 +102,11 @@ dtype = target.dtype shapelen = len(shape) while not target_iter.done(): - setslice_driver1.jit_merge_point(shapelen=shapelen, dtype=dtype) - target_iter.setitem(source_iter.getitem().convert_to(space, dtype)) - target_iter.next() - source_iter.next() - return target - -def setslice_build_and_convert(space, shape, target, source): - # note that unlike everything else, target and source here are - # array implementations, not arrays - target_iter = target.create_iter(shape) - source_iter = source.create_iter(shape) - dtype = target.dtype - shapelen = len(shape) - while not target_iter.done(): - setslice_driver2.jit_merge_point(shapelen=shapelen, dtype=dtype) - target_iter.setitem(dtype.build_and_convert(space, source_iter.getitem())) + setslice_driver.jit_merge_point(shapelen=shapelen, dtype=dtype) + if dtype.is_str_or_unicode(): + target_iter.setitem(dtype.coerce(space, source_iter.getitem())) + else: + target_iter.setitem(source_iter.getitem().convert_to(space, dtype)) target_iter.next() source_iter.next() return target @@ -434,43 +415,21 @@ ri.next() return res -flatiter_setitem_driver1 = jit.JitDriver(name = 'numpy_flatiter_setitem1', - greens = ['dtype'], - reds = 'auto') - -flatiter_setitem_driver2 = jit.JitDriver(name = 'numpy_flatiter_setitem2', +flatiter_setitem_driver = jit.JitDriver(name = 'numpy_flatiter_setitem', greens = ['dtype'], reds = 'auto') def flatiter_setitem(space, arr, val, start, step, length): dtype = arr.get_dtype() - if dtype.is_str_or_unicode(): - return flatiter_setitem_build_and_convert(space, arr, val, start, step, length) - return flatiter_setitem_to(space, arr, val, start, step, length) - -def flatiter_setitem_to(space, arr, val, start, step, length): - dtype = arr.get_dtype() arr_iter = arr.create_iter() val_iter = val.create_iter() arr_iter.next_skip_x(start) while length > 0: - flatiter_setitem_driver1.jit_merge_point(dtype=dtype) - arr_iter.setitem(val_iter.getitem().convert_to(space, dtype)) - # need to repeat i_nput values until all assignments are done - arr_iter.next_skip_x(step) - length -= 1 - val_iter.next() - # WTF numpy? - val_iter.reset() - -def flatiter_setitem_build_and_convert(space, arr, val, start, step, length): - dtype = arr.get_dtype() - arr_iter = arr.create_iter() - val_iter = val.create_iter() - arr_iter.next_skip_x(start) - while length > 0: - flatiter_setitem_driver2.jit_merge_point(dtype=dtype) - arr_iter.setitem(dtype.build_and_convert(space, val_iter.getitem())) + flatiter_setitem_driver.jit_merge_point(dtype=dtype) + if dtype.is_str_or_unicode(): + arr_iter.setitem(dtype.coerce(space, val_iter.getitem())) + else: + arr_iter.setitem(val_iter.getitem().convert_to(space, dtype)) # need to repeat i_nput values until all assignments are done arr_iter.next_skip_x(step) length -= 1 diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -1744,23 +1744,6 @@ def bool(self, v): return bool(self.to_str(v)) - def build_and_convert(self, space, mydtype, box): - if isinstance(box, interp_boxes.W_StringBox): - return box - assert isinstance(box, interp_boxes.W_GenericBox) - if box.get_dtype(space).is_str_or_unicode(): - arg = box.get_dtype(space).itemtype.to_str(box) - else: - w_arg = box.descr_str(space) - arg = space.str_w(space.str(w_arg)) - arr = VoidBoxStorage(mydtype.size, mydtype) - i = 0 - for i in range(min(len(arg), mydtype.size)): - arr.storage[i] = arg[i] - for j in range(i + 1, mydtype.size): - arr.storage[j] = '\x00' - return interp_boxes.W_StringBox(arr, 0, arr.dtype) - def fill(self, storage, width, box, start, stop, offset): for i in xrange(start, stop, width): self._store(storage, i, offset, box, width) From noreply at buildbot.pypy.org Mon Feb 24 11:20:32 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Mon, 24 Feb 2014 11:20:32 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: untested additions to test_random.py Message-ID: <20140224102032.B40121C10A8@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7-refactor Changeset: r820:a6b28e92f1d9 Date: 2014-02-24 11:20 +0100 http://bitbucket.org/pypy/stmgc/changeset/a6b28e92f1d9/ Log: untested additions to test_random.py diff --git a/c7/test/test_random.py b/c7/test/test_random.py --- a/c7/test/test_random.py +++ b/c7/test/test_random.py @@ -66,6 +66,12 @@ self.start_time = start_time self.objs_in_conflict = set() self.inevitable = False + self.created_in_this_transaction = set() + + def get_old_modified(self): + # returns only the ones that are modified and not from + # this transaction + return self.write_set.difference(self.created_in_this_transaction) def set_must_abort(self, objs_in_conflict=None): assert not self.inevitable @@ -96,9 +102,11 @@ self.read_set.add(r) return self.values[r] - def add_root(self, r, v): + def add_root(self, r, v, created_in_this_transaction): assert self.values.get(r, None) is None self.values[r] = v + if created_in_this_transaction: + self.created_in_this_transaction.add(r) def write_root(self, r, v): self.read_set.add(r) @@ -133,10 +141,17 @@ # del self.saved_roots[idx] # return r - # forget all non-pushed roots for now - assert self.roots_on_stack == self.roots_on_transaction_start - res = str(self.saved_roots[self.roots_on_stack:]) - del self.saved_roots[self.roots_on_stack:] + if self.transaction_state.inevitable: + # forget *all* roots + self.roots_on_stack = 0 + self.roots_on_transaction_start = 0 + res = str(self.saved_roots) + del self.saved_roots[:] + else: + # forget all non-pushed roots for now + assert self.roots_on_stack == self.roots_on_transaction_start + res = str(self.saved_roots[self.roots_on_stack:]) + del self.saved_roots[self.roots_on_stack:] return res def get_random_root(self): @@ -160,11 +175,13 @@ def reload_roots(self, ex): assert self.roots_on_stack == self.roots_on_transaction_start - ex.do("# reload roots on stack:") - for r in reversed(self.saved_roots[:self.roots_on_stack]): - ex.do('%s = self.pop_root()' % r) - for r in self.saved_roots[:self.roots_on_stack]: - ex.do('self.push_root(%s)' % r) + to_reload = self.saved_roots[:self.roots_on_stack] + if to_reload: + ex.do("# reload roots on stack:") + for r in reversed(to_reload): + ex.do('%s = self.pop_root()' % r) + for r in to_reload: + ex.do('self.push_root(%s)' % r) def start_transaction(self): assert self.transaction_state is None @@ -294,6 +311,11 @@ # ex.do('self.start_transaction()') thread_state.reload_roots(ex) + # + # assert that everything known is old: + old_objs = thread_state.saved_roots + for o in old_objs: + ex.do("assert not is_in_nursery(%s)" % o) class OpCommitTransaction(Operation): @@ -344,7 +366,7 @@ thread_state.push_roots(ex) ex.do('%s = stm_allocate(%s)' % (r, size)) - thread_state.transaction_state.add_root(r, 0) + thread_state.transaction_state.add_root(r, 0, True) thread_state.pop_roots(ex) thread_state.reload_roots(ex) @@ -356,7 +378,7 @@ r = global_state.get_new_root_name(True, num) thread_state.push_roots(ex) ex.do('%s = stm_allocate_refs(%s)' % (r, num)) - thread_state.transaction_state.add_root(r, "ffi.NULL") + thread_state.transaction_state.add_root(r, "ffi.NULL", True) thread_state.pop_roots(ex) thread_state.reload_roots(ex) @@ -373,7 +395,10 @@ class OpForgetRoot(Operation): def do(self, ex, global_state, thread_state): r = thread_state.forget_random_root() - ex.do('# forget %s' % r) + if thread_state.transaction_state.inevitable: + ex.do('# inevitable forget %s' % r) + else: + ex.do('# forget %s' % r) class OpWrite(Operation): def do(self, ex, global_state, thread_state): @@ -401,7 +426,7 @@ v = thread_state.get_random_root() else: v = ord(global_state.rnd.choice("abcdefghijklmnop")) - trs.write_root(r, v) + assert trs.write_root(r, v) is not None # aborts = trs.check_must_abort() if aborts: @@ -433,14 +458,17 @@ # it survived by being referenced by another saved root # if v is from a different transaction: # we fish its value from somewhere and add it to our known roots + global_trs = global_state.committed_transaction_state if v not in trs.values: # not from this transaction AND not known at the start of this # transaction - trs.add_root(v, global_state.committed_transaction_state.values[v]) + trs.add_root(v, global_trs.values[v], False) ex.do("# get %r from other thread" % v) - elif v not in global_state.committed_transaction_state.values: + elif v not in global_trs.values: + # created and forgotten earlier in this thread ex.do("# revive %r in this thread" % v) else: + # created in an earlier transaction, now also known here ex.do("# register %r in this thread" % v) # ex.do("%s = stm_get_ref(%s, %s)" % (v, r, offset)) @@ -463,6 +491,22 @@ else: ex.do("assert stm_get_obj_size(%s) == %s" % (r, size)) +class OpAssertModified(Operation): + def do(self, ex, global_state, thread_state): + trs = thread_state.transaction_state + modified = trs.get_old_modified() + ex.do("# modified = %s" % modified) + ex.do("modified = modified_objects()") + if not modified: + ex.do("assert modified == []") + else: + saved = [m for m in modified + if m in thread_state.saved_roots or m in global_state.prebuilt_roots] + ex.do("assert {%s}.issubset(set(modified))" % ( + ", ".join(saved) + )) + + class OpSwitchThread(Operation): def do(self, ex, global_state, thread_state, new_thread_state=None): if new_thread_state is None: @@ -511,12 +555,12 @@ for i in range(N_OBJECTS): r = global_state.get_new_root_name(False, "384") ex.do('%s = stm_allocate_old(384)' % r) - global_state.committed_transaction_state.write_root(r, 0) + global_state.committed_transaction_state.add_root(r, 0, False) global_state.prebuilt_roots.append(r) r = global_state.get_new_root_name(True, "50") ex.do('%s = stm_allocate_old_refs(50)' % r) - global_state.committed_transaction_state.write_root(r, "ffi.NULL") + global_state.committed_transaction_state.add_root(r, "ffi.NULL", False) global_state.prebuilt_roots.append(r) global_state.committed_transaction_state.write_set = set() global_state.committed_transaction_state.read_set = set() @@ -532,7 +576,8 @@ OpForgetRoot, OpBecomeInevitable, OpAssertSize, - # OpMinorCollect, + #OpAssertModified, + OpMinorCollect, ] for _ in range(200): # make sure we are in a transaction: From noreply at buildbot.pypy.org Mon Feb 24 12:10:05 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 24 Feb 2014 12:10:05 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: progress Message-ID: <20140224111005.182901C0132@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r821:976dbb25fe76 Date: 2014-02-24 12:09 +0100 http://bitbucket.org/pypy/stmgc/changeset/976dbb25fe76/ Log: progress diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -49,7 +49,6 @@ /* We need to privatize the pages containing the object, if they are still SHARED_PAGE. The common case is that there is only one page in total. */ - size_t obj_size = 0; uintptr_t first_page = ((uintptr_t)obj) / 4096UL; /* If the object is in the uniform pages of small objects @@ -60,12 +59,16 @@ pages_privatize(first_page, 1, true); } else { + char *realobj; + size_t obj_size; + uintptr_t end_page; + /* get the size of the object */ - obj_size = stmcb_size_rounded_up( - (struct object_s *)REAL_ADDRESS(STM_SEGMENT->segment_base, obj)); + realobj = REAL_ADDRESS(STM_SEGMENT->segment_base, obj); + obj_size = stmcb_size_rounded_up((struct object_s *)realobj); /* that's the page *following* the last page with the object */ - uintptr_t end_page = (((uintptr_t)obj) + obj_size + 4095) / 4096UL; + end_page = (((uintptr_t)obj) + obj_size + 4095) / 4096UL; pages_privatize(first_page, end_page - first_page, true); } @@ -265,19 +268,16 @@ void stm_commit_transaction(void) { - minor_collection(); + assert(!_has_mutex()); + assert(STM_PSEGMENT->safe_point == SP_RUNNING); + + minor_collection(/*commit=*/ true); mutex_lock(); - - assert(STM_PSEGMENT->safe_point = SP_RUNNING); STM_PSEGMENT->safe_point = SP_SAFE_POINT_CAN_COLLECT; - restart: - abort_if_needed(); - /* wait until the other thread is at a safe-point */ - if (!try_wait_for_other_safe_points(SP_SAFE_POINT_CANNOT_COLLECT)) - goto restart; + wait_for_other_safe_points(SP_SAFE_POINT_CANNOT_COLLECT); /* the rest of this function runs either atomically without releasing the mutex, or it needs to restart. */ diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -96,10 +96,16 @@ "this segment has modified this old object". */ uint8_t write_lock_num; - /* The thread's safe-point state, one of the SP_xxx constants */ + /* The thread's safe-point state, one of the SP_xxx constants. The + thread is in a "safe point" if it is not concurrently doing any + change that might cause race conditions in other threads. A + thread may enter but not *leave* the safe point it is in without + getting hold of the mutex. Broadly speaking, any value other + than SP_RUNNING means a safe point of some kind. */ uint8_t safe_point; - /* The transaction status, one of the TS_xxx constants */ + /* The transaction status, one of the TS_xxx constants. This is + only accessed when we hold the mutex. */ uint8_t transaction_state; /* In case of abort, we restore the 'shadowstack' field. */ diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -12,7 +12,7 @@ largemalloc_init_arena(base, length); uninitialized_page_start = stm_object_pages + END_NURSERY_PAGE * 4096UL; - uninitialized_page_stop = stm_object_pages + (NB_PAGES - 1) * 4096UL; + uninitialized_page_stop = stm_object_pages + NB_PAGES * 4096UL; assert(GC_MEDIUM_REQUEST >= (1 << 8)); } @@ -70,31 +70,31 @@ } -#if 0 -static char *allocate_outside_nursery_large(uint64_t size) +static object_t *allocate_outside_nursery_large(uint64_t size) { - abort(); //XXX review - /* not thread-safe! Use only when holding the mutex */ - assert(_has_mutex()); + /* thread-safe: use the lock of pages.c to prevent any remapping + from occurring under our feet */ + mutex_pages_lock(); - /* Allocate the object with largemalloc.c from the lower addresses. - Assumes that 'size' is at least 256 bytes; it's needed for - the creation marker to uniquely identify this object */ - OPT_ASSERT(size >= (1 << 8)); - OPT_ASSERT((size & 7) == 0); - + /* Allocate the object with largemalloc.c from the lower addresses. */ char *addr = large_malloc(size); if (addr + size > uninitialized_page_start) { uintptr_t npages; npages = (addr + size - uninitialized_page_start) / 4096UL; npages += GCPAGE_NUM_PAGES; + if (uninitialized_page_stop - uninitialized_page_start < + npages * 4096UL) { + stm_fatalerror("out of memory!\n"); /* XXX */ + } setup_N_pages(uninitialized_page_start, npages); uninitialized_page_start += npages * 4096UL; } - return addr; + + mutex_pages_unlock(); + + return (object_t *)(addr - stm_object_pages); } -#endif object_t *_stm_allocate_old(ssize_t size_rounded_up) { diff --git a/c7/stm/gcpage.h b/c7/stm/gcpage.h --- a/c7/stm/gcpage.h +++ b/c7/stm/gcpage.h @@ -27,7 +27,7 @@ static void setup_gcpage(void); static void teardown_gcpage(void); -//static char *allocate_outside_nursery_large(uint64_t size); +static object_t *allocate_outside_nursery_large(uint64_t size); static char *_allocate_small_slowpath(uint64_t size); diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -47,13 +47,6 @@ return _is_in_nursery(obj); } -#if 0 -static bool _is_young(object_t *obj) -{ - return _is_in_nursery(obj); /* for now */ -} -#endif - /************************************************************/ @@ -74,122 +67,46 @@ } } -#if 0 static void minor_trace_if_young(object_t **pobj) { - abort(); //... /* takes a normal pointer to a thread-local pointer to an object */ object_t *obj = *pobj; if (obj == NULL) return; - if (!_is_young(obj)) + if (!_is_in_nursery(obj)) return; /* If the object was already seen here, its first word was set to GCWORD_MOVED. In that case, the forwarding location, i.e. where the object moved to, is stored in the second word in 'obj'. */ - char *realobj = (char *)REAL_ADDRESS(stm_object_pages, obj); - object_t **pforwarded_array = (object_t **)realobj; + object_t *TLPREFIX *pforwarded_array = (object_t *TLPREFIX *)obj; if (pforwarded_array[0] == GCWORD_MOVED) { *pobj = pforwarded_array[1]; /* already moved */ return; } - /* We need to make a copy of this object. There are three different - places where the copy can be located, based on four criteria. - - 1) object larger than GC_MEDIUM_REQUEST => largemalloc.c - 2) otherwise, object from current transaction => page S - 3) otherwise, object with the write lock => page W - 4) otherwise, object without the write lock => page S - - The pages S or W above are both pages of uniform sizes obtained - from the end of the address space. The difference is that page S - can be shared, but page W needs to be privatized. Moreover, - cases 2 and 4 differ in the creation_marker they need to put, - which has a granularity of 256 bytes. + /* We need to make a copy of this object. It goes either in + a largemalloc.c-managed area, or if it's small enough, in + one of the small uniform pages from gcpage.c. */ + char *realobj = REAL_ADDRESS(STM_SEGMENT->segment_base, obj); size_t size = stmcb_size_rounded_up((struct object_s *)realobj); - uintptr_t lock_idx = (((uintptr_t)obj) >> 4) - WRITELOCK_START; - uint8_t write_lock = write_locks[lock_idx]; object_t *nobj; - long i; if (1 /*size >= GC_MEDIUM_REQUEST*/) { - /* case 1: object is larger than GC_MEDIUM_REQUEST. + /* case 1: object is not small enough. Ask gcpage.c for an allocation via largemalloc. */ - char *copyobj; - copyobj = allocate_outside_nursery_large(size < 256 ? 256 : size); // XXX temp + nobj = allocate_outside_nursery_large(size); - /* Copy the object to segment 0 (as a first step) */ - memcpy(copyobj, realobj, size); - ((struct object_s *)copyobj)->stm_flags |= GCFLAG_WRITE_BARRIER_CALLED; - - nobj = (object_t *)(copyobj - stm_object_pages); - - if (write_lock == 0) { - /* The object is not write-locked, so any copy should be - identical. Now some pages of the destination might be - private already (because of some older action); if so, we - need to replicate the corresponding parts. The hope is - that it's relatively uncommon. */ - uintptr_t p, pend = ((uintptr_t)(copyobj + size - 1)) & ~4095; - for (p = (uintptr_t)copyobj; p < pend; p = (p + 4096) & ~4095) { - minor_copy_in_page_to_other_segments(p, 4096 - (p & 4095)); - } - minor_copy_in_page_to_other_segments(p, ((size-1) & 4095) + 1); - } - else { - /* The object has the write lock. We need to privatize the - pages, and repeat the write lock in the new copy. */ - uintptr_t dataofs = (uintptr_t)nobj; - uintptr_t pagenum = dataofs / 4096UL; - uintptr_t lastpage= (dataofs + size - 1) / 4096UL; - pages_privatize(pagenum, lastpage - pagenum + 1, false); - - lock_idx = (dataofs >> 4) - WRITELOCK_START; - assert(write_locks[lock_idx] == 0); - write_locks[lock_idx] = write_lock; - - /* Then, for each segment > 0, we need to repeat the - memcpy() done above. XXX This could be optimized if - NB_SEGMENTS > 2 by reading all non-written copies from the - same segment, instead of reading really all segments. */ - for (i = 1; i < NB_SEGMENTS; i++) { - uintptr_t diff = get_segment_base(i) - stm_object_pages; - memcpy(copyobj + diff, realobj + diff, size); - ((struct object_s *)(copyobj + diff))->stm_flags |= - GCFLAG_WRITE_BARRIER_CALLED; - } - } - - /* If the source creation marker is CM_CURRENT_TRANSACTION_IN_NURSERY, - write CM_CURRENT_TRANSACTION_OUTSIDE_NURSERY in the destination */ - uintptr_t cmaddr = ((uintptr_t)obj) >> 8; - - for (i = 0; i < NB_SEGMENTS; i++) { - char *absaddr = get_segment_base(i) + cmaddr; - if (((struct stm_creation_marker_s *)absaddr)->cm != 0) { - uintptr_t ncmaddr = ((uintptr_t)nobj) >> 8; - absaddr = get_segment_base(i) + ncmaddr; - ((struct stm_creation_marker_s *)absaddr)->cm = - CM_CURRENT_TRANSACTION_OUTSIDE_NURSERY; - } - } + /* Copy the object */ + char *realnobj = REAL_ADDRESS(STM_SEGMENT->segment_base, nobj); + memcpy(realnobj, realobj, size); } else { - /* cases 2 to 4 */ + /* case "small enough" */ abort(); //... - allocate_outside_nursery_small(small_alloc_shared, size); - allocate_outside_nursery_small(small_alloc_privtz, size); - } - - /* Copy the read markers */ - for (i = 0; i < NB_SEGMENTS; i++) { - uint8_t rm = get_segment_base(i)[((uintptr_t)obj) >> 4]; - get_segment_base(i)[((uintptr_t)nobj) >> 4] = rm; } /* Done copying the object. */ @@ -199,173 +116,87 @@ *pobj = nobj; /* Must trace the object later */ - LIST_APPEND(old_objects_pointing_to_young, nobj); + LIST_APPEND(STM_PSEGMENT->old_objects_pointing_to_nursery, nobj); } static void collect_roots_in_nursery(void) { - stm_thread_local_t *tl = stm_all_thread_locals; - do { - object_t **current = tl->shadowstack; - object_t **base = tl->shadowstack_base; - while (current-- != base) { - minor_trace_if_young(current); - } - tl = tl->next; - } while (tl != stm_all_thread_locals); -} - -static void trace_and_drag_out_of_nursery(object_t *obj) -{ - long i; - for (i = 0; i < NB_SEGMENTS; i++) { - struct object_s *realobj = - (struct object_s *)REAL_ADDRESS(get_segment_base(i), obj); - - realobj->stm_flags |= GCFLAG_WRITE_BARRIER; - - stmcb_trace((struct object_s *)realobj, &minor_trace_if_young); - - if (i == 0 && is_in_shared_pages(obj)) { - /* the object needs fixing only in one copy, because all copies - are shared and identical. */ - break; - } + stm_thread_local_t *tl = STM_SEGMENT->running_thread; + object_t **current = tl->shadowstack; + object_t **base = tl->shadowstack_base; + while (current-- != base) { + minor_trace_if_young(current); } } -static void collect_oldrefs_to_nursery(struct list_s *lst) +static void collect_oldrefs_to_nursery(void) { + struct list_s *lst = STM_PSEGMENT->old_objects_pointing_to_nursery; + while (!list_is_empty(lst)) { object_t *obj = (object_t *)list_pop_item(lst); assert(!_is_in_nursery(obj)); - /* We must have GCFLAG_WRITE_BARRIER_CALLED so far. If we - don't, it's because the same object was stored in several - segment's old_objects_pointing_to_young. It's fine to - ignore duplicates. */ - abort();//... - //if ((obj->stm_flags & GCFLAG_WRITE_BARRIER_CALLED) == 0) - // continue; - - /* The flag GCFLAG_WRITE_BARRIER_CALLED is going to be removed: - no live object should have this flag set after a nursery - collection. It is done in either one or NB_SEGMENTS copies. */ + /* We must not have GCFLAG_WRITE_BARRIER so far. Add it now. */ + assert(!(obj->stm_flags & GCFLAG_WRITE_BARRIER)); + obj->stm_flags |= GCFLAG_WRITE_BARRIER; /* Trace the 'obj' to replace pointers to nursery with pointers outside the nursery, possibly forcing nursery objects out - and adding them to 'old_objects_pointing_to_young' as well. */ - trace_and_drag_out_of_nursery(obj); + and adding them to 'old_objects_pointing_to_nursery' as well. */ + char *realobj = REAL_ADDRESS(STM_SEGMENT->segment_base, obj); + stmcb_trace((struct object_s *)realobj, &minor_trace_if_young); } } static void reset_nursery(void) { - abort();//... - /* reset the global amount-of-nursery-used-so-far */ - nursery_ctl.used = nursery_ctl.initial_value_of_used; + /* reset the nursery by zeroing it */ + size_t size; + char *realnursery; - long i; - for (i = 0; i < NB_SEGMENTS; i++) { - struct stm_priv_segment_info_s *other_pseg = get_priv_segment(i); - /* no race condition here, because all other threads are paused - in safe points, so cannot be e.g. in _stm_allocate_slowpath() */ - uintptr_t old_end = other_pseg->real_nursery_section_end; - other_pseg->real_nursery_section_end = 0; - other_pseg->pub.v_nursery_section_end = 0; + realnursery = REAL_ADDRESS(STM_SEGMENT->segment_base, _stm_nursery_start); + size = STM_SEGMENT->nursery_current - (stm_char *)_stm_nursery_start; + memset(realnursery, 0, size); - /* we don't need to actually reset the read markers, unless - we run too many nursery collections in the same transaction: - in the normal case it is enough to increase - 'transaction_read_version' without changing - 'min_read_version_outside_nursery'. - */ - if (other_pseg->transaction_state == TS_NONE) { - /* no transaction running now, nothing to do */ - } - else if (other_pseg->pub.transaction_read_version < 0xff) { - other_pseg->pub.transaction_read_version++; - abort();//... - /*assert(0 < other_pseg->min_read_version_outside_nursery && - other_pseg->min_read_version_outside_nursery - < other_pseg->pub.transaction_read_version);*/ - } - else { - /* however, when the value 0xff is reached, we are stuck - and we need to clean all the nursery read markers. - We'll be un-stuck when this transaction finishes. */ - char *read_markers = REAL_ADDRESS(other_pseg->pub.segment_base, - NURSERY_START >> 4); - memset(read_markers, 0, NURSERY_SIZE >> 4); - } + STM_SEGMENT->nursery_current = (stm_char *)_stm_nursery_start; +} - /* reset the creation markers */ - if (old_end > NURSERY_START) { - char *creation_markers = REAL_ADDRESS(other_pseg->pub.segment_base, - NURSERY_START >> 8); - assert(old_end <= NURSERY_END); - memset(creation_markers, 0, (old_end - NURSERY_START) >> 8); - } - else { - assert(old_end == 0 || old_end == NURSERY_START); - } - } -} -#endif - -static void minor_collection(void) +static void minor_collection(bool commit) { assert(!_has_mutex()); abort_if_needed(); - dprintf(("minor_collection\n")); + /* We must move out of the nursery any object found within the + nursery. All objects touched are either from the current + transaction, or are from 'old_objects_pointing_to_young'. + In all cases, we should only read and change objects belonging + to the current segment. - abort();//... -#if 0 + XXX improve: it might be possible to run this function in + a safe-point but without the mutex, if we are careful + */ - /* List of what we need to do and invariants we need to preserve - ------------------------------------------------------------- + dprintf(("minor_collection commit=%d\n", (int)commit)); - We must move out of the nursery any object found within the - nursery. This requires either one or NB_SEGMENTS copies, - depending on the current write-state of the object. - - We need to move the mark stored in the write_locks, read_markers - and creation_markers arrays. The creation_markers need some care - because they work at a coarser granularity of 256 bytes, so - objects with an "on" mark should not be moved too close to - objects with an "off" mark and vice-versa. - - Then we must trace (= look inside) some objects outside the - nursery, and fix any pointer found that goes to a nursery object. - This tracing itself needs to be done either once or NB_SEGMENTS - times, depending on whether the object is fully in shared pages - or not. We assume that 'stmcb_size_rounded_up' produce the same - results on all copies (i.e. don't depend on modifiable - information). - */ + if (STM_PSEGMENT->old_objects_pointing_to_nursery == NULL) + STM_PSEGMENT->old_objects_pointing_to_nursery = list_create(); collect_roots_in_nursery(); - long i; - for (i = 0; i < NB_SEGMENTS; i++) { - struct stm_priv_segment_info_s *other_pseg = get_priv_segment(i); - collect_oldrefs_to_nursery(other_pseg->old_objects_pointing_to_young); - } - - collect_oldrefs_to_nursery(old_objects_pointing_to_young); + collect_oldrefs_to_nursery(); reset_nursery(); - pages_make_shared_again(FIRST_NURSERY_PAGE, NB_NURSERY_PAGES); -#endif + assert(list_is_empty(STM_PSEGMENT->old_objects_pointing_to_nursery)); + if (!commit && STM_PSEGMENT->overflow_objects_pointing_to_nursery == NULL) + STM_PSEGMENT->overflow_objects_pointing_to_nursery = list_create(); } - void stm_collect(long level) { assert(level == 0); - minor_collection(); + minor_collection(/*commit=*/ false); } @@ -391,7 +222,7 @@ return (object_t *)p; } - minor_collection(); + minor_collection(/*commit=*/ false); goto restart; } diff --git a/c7/stm/nursery.h b/c7/stm/nursery.h --- a/c7/stm/nursery.h +++ b/c7/stm/nursery.h @@ -5,4 +5,5 @@ static uint32_t highest_overflow_number; +static void minor_collection(bool commit); static void check_nursery_at_transaction_start(void) __attribute__((unused)); diff --git a/c7/stm/pages.c b/c7/stm/pages.c --- a/c7/stm/pages.c +++ b/c7/stm/pages.c @@ -3,12 +3,35 @@ #endif +/************************************************************/ + +static union { + uint8_t mutex_pages; + char reserved[64]; +} pages_ctl __attribute__((aligned(64))); + +static void mutex_pages_lock(void) +{ + while (__sync_lock_test_and_set(&pages_ctl.mutex_pages, 1) != 0) { + spin_loop(); + } +} + +static void mutex_pages_unlock(void) +{ + __sync_lock_release(&pages_ctl.mutex_pages); +} + +/************************************************************/ + + static void pages_initialize_shared(uintptr_t pagenum, uintptr_t count) { /* call remap_file_pages() to make all pages in the range(pagenum, pagenum+count) refer to the same physical range of pages from segment 0. */ uintptr_t i; + mutex_pages_lock(); for (i = 1; i < NB_SEGMENTS; i++) { char *segment_base = get_segment_base(i); int res = remap_file_pages(segment_base + pagenum * 4096UL, @@ -21,6 +44,7 @@ } for (i = 0; i < count; i++) flag_page_private[pagenum + i] = SHARED_PAGE; + mutex_pages_unlock(); } #if 0 @@ -45,8 +69,7 @@ } #endif -static void privatize_range_and_unlock(uintptr_t pagenum, uintptr_t count, - bool full) +static void privatize_range(uintptr_t pagenum, uintptr_t count, bool full) { ssize_t pgoff1 = pagenum; ssize_t pgoff2 = pagenum + NB_PAGES; @@ -73,61 +96,41 @@ pagecopy(localpg + 4096 * (count-1), otherpg + 4096 * (count-1)); } write_fence(); - for (i = 0; i < count; i++) { - assert(flag_page_private[pagenum + i] == REMAPPING_PAGE); - flag_page_private[pagenum + i] = PRIVATE_PAGE; - } + memset(flag_page_private + pagenum, PRIVATE_PAGE, count); } static void _pages_privatize(uintptr_t pagenum, uintptr_t count, bool full) { - uintptr_t page_start_range = pagenum; - uintptr_t pagestop = pagenum + count; - while (flag_page_private[pagenum + count - 1] == PRIVATE_PAGE) { if (!--count) return; } + mutex_pages_lock(); + + uintptr_t page_start_range = pagenum; + uintptr_t pagestop = pagenum + count; + for (; pagenum < pagestop; pagenum++) { -#ifdef HAVE_FULL_EXCHANGE_INSN - /* use __sync_lock_test_and_set() as a cheaper alternative to - __sync_bool_compare_and_swap(). */ - int prev = __sync_lock_test_and_set(&flag_page_private[pagenum], - REMAPPING_PAGE); - assert(prev != FREE_PAGE); - if (prev == PRIVATE_PAGE) { - flag_page_private[pagenum] = PRIVATE_PAGE; - } - bool was_shared = (prev == SHARED_PAGE); -#else - bool was_shared = __sync_bool_compare_and_swap( - &flag_page_private[pagenum + cnt1], - SHARED_PAGE, REMAPPING_PAGE); -#endif - if (!was_shared) { + uint8_t prev = flag_page_private[pagenum]; + if (prev == SHARED_PAGE) { if (pagenum > page_start_range) { - privatize_range_and_unlock(page_start_range, - pagenum - page_start_range, full); + privatize_range(page_start_range, + pagenum - page_start_range, full); } page_start_range = pagenum + 1; - - while (1) { - uint8_t state; - state = ((uint8_t volatile *)flag_page_private)[pagenum]; - if (state != REMAPPING_PAGE) { - assert(state == PRIVATE_PAGE); - break; - } - spin_loop(); - } + } + else { + assert(prev == PRIVATE_PAGE); } } if (pagenum > page_start_range) { - privatize_range_and_unlock(page_start_range, - pagenum - page_start_range, full); + privatize_range(page_start_range, + pagenum - page_start_range, full); } + + mutex_pages_unlock(); } #if 0 diff --git a/c7/stm/pages.h b/c7/stm/pages.h --- a/c7/stm/pages.h +++ b/c7/stm/pages.h @@ -30,4 +30,7 @@ _pages_privatize(pagenum, count, full); } +static void mutex_pages_lock(void); +static void mutex_pages_unlock(void); + //static bool is_in_shared_pages(object_t *obj); diff --git a/c7/stm/sync.c b/c7/stm/sync.c --- a/c7/stm/sync.c +++ b/c7/stm/sync.c @@ -62,6 +62,9 @@ perror("pthread_mutex_lock"); abort(); } + + if (STM_PSEGMENT->transaction_state == TS_MUST_ABORT) + abort_with_mutex(); } static inline void mutex_unlock(void) @@ -77,7 +80,13 @@ static inline bool _has_mutex(void) { - return pthread_mutex_trylock(&sync_ctl.global_mutex) == EBUSY; + if (pthread_mutex_trylock(&sync_ctl.global_mutex) == EBUSY) { + return true; + } + else { + pthread_mutex_unlock(&sync_ctl.global_mutex); + return false; + } } static inline void cond_wait(void) @@ -197,38 +206,33 @@ #endif -static bool try_wait_for_other_safe_points(int requested_safe_point_kind) +static void wait_for_other_safe_points(int requested_safe_point_kind) { - /* Must be called with the mutex. If all other threads are in a - safe point of at least the requested kind, returns true. Otherwise, - asks them to enter a safe point, issues a cond_wait(), and returns - false; you can call repeatedly this function in this case. + /* Must be called with the mutex. When all other threads are in a + safe point of at least the requested kind, returns. Otherwise, + asks them to enter a safe point, issues a cond_wait(), and wait. - When this function returns true, the other threads are all - blocked at safe points as requested. They may be either in their - own cond_wait(), or running at SP_NO_TRANSACTION, in which case - they should not do anything related to stm until the next time - they call mutex_lock(). + When this function returns, the other threads are all blocked at + safe points as requested. They may be either in their own + cond_wait(), or running at SP_NO_TRANSACTION, in which case they + should not do anything related to stm until the next time they + call mutex_lock(). The next time we unlock the mutex (with mutex_unlock() or cond_wait()), they will proceed. This function requires that the calling thread is in a safe-point right now, so there is no deadlock if one thread calls - try_wait_for_other_safe_points() while another is currently blocked + wait_for_other_safe_points() while another is currently blocked in the cond_wait() in this same function. */ - abort();//... -#if 0 + + restart: assert(_has_mutex()); assert(STM_PSEGMENT->safe_point == SP_SAFE_POINT_CAN_COLLECT); long i; - bool must_wait = false; for (i = 0; i < NB_SEGMENTS; i++) { - if (i == STM_SEGMENT->segment_num) - continue; /* ignore myself */ - /* If the other thread is SP_NO_TRANSACTION, then it can be ignored here: as long as we have the mutex, it will remain SP_NO_TRANSACTION. If it is already at a suitable safe point, @@ -241,32 +245,18 @@ (requested_safe_point_kind == SP_SAFE_POINT_CAN_COLLECT && other_pseg->safe_point == SP_SAFE_POINT_CANNOT_COLLECT)) { - /* we need to wait for this thread. Use NSE_SIGNAL to - ask it to enter a safe-point soon. */ - other_pseg->pub.v_nursery_section_end = NSE_SIGNAL; - must_wait = true; + /* we need to wait for this thread. Use NSE_SIGNAL to ask + it (and possibly all other threads in the same case) to + enter a safe-point soon. */ + _stm_nursery_end = NSE_SIGNAL; + cond_wait(); + goto restart; } } - if (must_wait) { - cond_wait(); - return false; - } - /* done! All NSE_SIGNAL threads become NSE_SIGNAL_DONE now, which - mean they will actually run again the next time they grab the - mutex. */ - for (i = 0; i < NB_SEGMENTS; i++) { - if (i == STM_SEGMENT->segment_num) - continue; /* ignore myself */ - - struct stm_segment_info_s *other_seg = get_segment(i); - if (other_seg->v_nursery_section_end == NSE_SIGNAL) - other_seg->v_nursery_section_end = NSE_SIGNAL_DONE; - } + /* all threads are at a safe-point now. */ cond_broadcast(); /* to wake up the other threads, but later, when they get the mutex again */ - return true; -#endif } void _stm_collectable_safe_point(void) @@ -274,7 +264,7 @@ /* If nursery_section_end was set to NSE_SIGNAL by another thread, we end up here as soon as we try to call stm_allocate() or do a call to stm_safe_point(). - See try_wait_for_other_safe_points() for details. + See wait_for_other_safe_points() for details. */ mutex_lock(); assert(STM_PSEGMENT->safe_point == SP_RUNNING); diff --git a/c7/stm/sync.h b/c7/stm/sync.h --- a/c7/stm/sync.h +++ b/c7/stm/sync.h @@ -16,4 +16,4 @@ static void release_thread_segment(stm_thread_local_t *tl); /* see the source for an exact description */ -static bool try_wait_for_other_safe_points(int requested_safe_point_kind); +static void wait_for_other_safe_points(int requested_safe_point_kind); From noreply at buildbot.pypy.org Mon Feb 24 12:43:23 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 24 Feb 2014 12:43:23 +0100 (CET) Subject: [pypy-commit] pypy default: fix dtype field access case Message-ID: <20140224114323.816CD1C0907@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69352:a703a516c204 Date: 2014-02-24 06:42 -0500 http://bitbucket.org/pypy/pypy/changeset/a703a516c204/ Log: fix dtype field access case diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -289,7 +289,7 @@ return space.w_False def descr_getitem(self, space, w_item): - if self.fields is None: + if not self.fields: raise OperationError(space.w_KeyError, space.wrap( "There are no fields in dtype %s." % self.name)) if space.isinstance_w(w_item, space.w_basestring): @@ -311,7 +311,7 @@ "Field named '%s' not found." % item)) def descr_len(self, space): - if self.fields is None: + if not self.fields: return space.wrap(0) return space.wrap(len(self.fields)) diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -65,13 +65,10 @@ assert dtype(None) is dtype(float) - e = dtype('int8') - exc = raises(KeyError, "e[2]") - assert exc.value.message == "There are no fields in dtype int8." - exc = raises(KeyError, "e['z']") - assert exc.value.message == "There are no fields in dtype int8." - exc = raises(KeyError, "e[None]") - assert exc.value.message == "There are no fields in dtype int8." + for d in [dtype('i4')]: + for key in ["d[2]", "d['z']", "d[None]"]: + exc = raises(KeyError, key) + assert exc.value[0] == "There are no fields in dtype %s." % str(d) exc = raises(TypeError, dtype, (1, 2)) assert exc.value[0] == 'data type not understood' diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -118,14 +118,12 @@ def __init__(self, native=True): self.native = native - def _unimplemented_ufunc(self, *args): - raise NotImplementedError + def __repr__(self): + return self.__class__.__name__ def malloc(self, size): return alloc_raw_storage(size, track_allocation=False, zero=True) - def __repr__(self): - return self.__class__.__name__ class Primitive(object): _mixin_ = True From noreply at buildbot.pypy.org Mon Feb 24 13:51:00 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Mon, 24 Feb 2014 13:51:00 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: switch from classes to functions because... Message-ID: <20140224125100.09CEC1C03FC@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7-refactor Changeset: r822:ccff4094a83c Date: 2014-02-24 13:51 +0100 http://bitbucket.org/pypy/stmgc/changeset/ccff4094a83c/ Log: switch from classes to functions because... diff --git a/c7/test/test_random.py b/c7/test/test_random.py --- a/c7/test/test_random.py +++ b/c7/test/test_random.py @@ -1,8 +1,6 @@ from support import * import sys, random import py -from cStringIO import StringIO - @@ -299,242 +297,234 @@ trs.objs_in_conflict) -# ========== STM OPERATIONS ========== +################################################################### +################################################################### +######################## STM OPERATIONS ########################### +################################################################### +################################################################### -class Operation(object): - def do(self, ex, global_state, thread_state): - raise NotImplemented -class OpStartTransaction(Operation): - def do(self, ex, global_state, thread_state): - thread_state.start_transaction() - # - ex.do('self.start_transaction()') - thread_state.reload_roots(ex) - # - # assert that everything known is old: - old_objs = thread_state.saved_roots - for o in old_objs: - ex.do("assert not is_in_nursery(%s)" % o) +def op_start_transaction(ex, global_state, thread_state): + thread_state.start_transaction() + # + ex.do('self.start_transaction()') + thread_state.reload_roots(ex) + # + # assert that everything known is old: + old_objs = thread_state.saved_roots + for o in old_objs: + ex.do("assert not is_in_nursery(%s)" % o) -class OpCommitTransaction(Operation): - def do(self, ex, global_state, thread_state): - # - # push all new roots - ex.do("# push new objs before commit:") - thread_state.push_roots(ex) - aborts = thread_state.commit_transaction() - # - if aborts: - thread_state.abort_transaction() - ex.do(raising_call(aborts, "self.commit_transaction")) +def op_commit_transaction(ex, global_state, thread_state): + # + # push all new roots + ex.do("# push new objs before commit:") + thread_state.push_roots(ex) + aborts = thread_state.commit_transaction() + # + if aborts: + thread_state.abort_transaction() + ex.do(raising_call(aborts, "self.commit_transaction")) -class OpAbortTransaction(Operation): - def do(self, ex, global_state, thread_state): - trs = thread_state.transaction_state - if trs.inevitable: - return - trs.set_must_abort() +def op_abort_transaction(ex, global_state, thread_state): + trs = thread_state.transaction_state + if trs.inevitable: + return + trs.set_must_abort() + thread_state.abort_transaction() + ex.do('self.abort_transaction()') + +def op_become_inevitable(ex, global_state, thread_state): + trs = thread_state.transaction_state + global_state.check_if_can_become_inevitable(trs) + + thread_state.push_roots(ex) + ex.do(raising_call(trs.check_must_abort(), + "stm_become_inevitable")) + if trs.check_must_abort(): thread_state.abort_transaction() - ex.do('self.abort_transaction()') - -class OpBecomeInevitable(Operation): - def do(self, ex, global_state, thread_state): - trs = thread_state.transaction_state - global_state.check_if_can_become_inevitable(trs) - - thread_state.push_roots(ex) - ex.do(raising_call(trs.check_must_abort(), - "stm_become_inevitable")) - if trs.check_must_abort(): - thread_state.abort_transaction() - else: - trs.inevitable = True - thread_state.pop_roots(ex) - thread_state.reload_roots(ex) - - -class OpAllocate(Operation): - def do(self, ex, global_state, thread_state): - size = global_state.rnd.choice([ - "16", - "SOME_MEDIUM_SIZE+16", - #"SOME_LARGE_SIZE+16", - ]) - r = global_state.get_new_root_name(False, size) - thread_state.push_roots(ex) - - ex.do('%s = stm_allocate(%s)' % (r, size)) - thread_state.transaction_state.add_root(r, 0, True) - - thread_state.pop_roots(ex) - thread_state.reload_roots(ex) - thread_state.register_root(r) - -class OpAllocateRef(Operation): - def do(self, ex, global_state, thread_state): - num = str(global_state.rnd.randrange(1, 100)) - r = global_state.get_new_root_name(True, num) - thread_state.push_roots(ex) - ex.do('%s = stm_allocate_refs(%s)' % (r, num)) - thread_state.transaction_state.add_root(r, "ffi.NULL", True) - - thread_state.pop_roots(ex) - thread_state.reload_roots(ex) - thread_state.register_root(r) - -class OpMinorCollect(Operation): - def do(self, ex, global_state, thread_state): - thread_state.push_roots(ex) - ex.do('stm_minor_collect()') + else: + trs.inevitable = True thread_state.pop_roots(ex) thread_state.reload_roots(ex) -class OpForgetRoot(Operation): - def do(self, ex, global_state, thread_state): - r = thread_state.forget_random_root() - if thread_state.transaction_state.inevitable: - ex.do('# inevitable forget %s' % r) +def op_allocate(ex, global_state, thread_state): + size = global_state.rnd.choice([ + "16", + "SOME_MEDIUM_SIZE+16", + #"SOME_LARGE_SIZE+16", + ]) + r = global_state.get_new_root_name(False, size) + thread_state.push_roots(ex) + + ex.do('%s = stm_allocate(%s)' % (r, size)) + thread_state.transaction_state.add_root(r, 0, True) + + thread_state.pop_roots(ex) + thread_state.reload_roots(ex) + thread_state.register_root(r) + +def op_allocate_ref(ex, global_state, thread_state): + num = str(global_state.rnd.randrange(1, 100)) + r = global_state.get_new_root_name(True, num) + thread_state.push_roots(ex) + ex.do('%s = stm_allocate_refs(%s)' % (r, num)) + thread_state.transaction_state.add_root(r, "ffi.NULL", True) + + thread_state.pop_roots(ex) + thread_state.reload_roots(ex) + thread_state.register_root(r) + +def op_minor_collect(ex, global_state, thread_state): + thread_state.push_roots(ex) + ex.do('stm_minor_collect()') + thread_state.pop_roots(ex) + thread_state.reload_roots(ex) + + +def op_forget_root(ex, global_state, thread_state): + r = thread_state.forget_random_root() + if thread_state.transaction_state.inevitable: + ex.do('# inevitable forget %s' % r) + else: + ex.do('# forget %s' % r) + +def op_write(ex, global_state, thread_state): + r = thread_state.get_random_root() + trs = thread_state.transaction_state + is_ref = global_state.has_ref_type(r) + # + # check for possible write-write conflict: + was_written = False + try: + # HACK to avoid calling write_root() just yet because we have to + # undo it in case of the exception :( + was_written = r in trs.write_set + trs.write_set.add(r) + global_state.check_for_write_write_conflicts(trs) + except WriteWriteConflictNotTestable: + if not was_written: + trs.write_set.remove(r) + ex.do("# writing to %s produces an untestable write-write" % r) + ex.do("# conflict between an inevitable and a normal transaction :(") + return + # + # decide on a value to write + if is_ref: + v = thread_state.get_random_root() + else: + v = ord(global_state.rnd.choice("abcdefghijklmnop")) + assert trs.write_root(r, v) is not None + # + aborts = trs.check_must_abort() + if aborts: + thread_state.abort_transaction() + offset = global_state.get_root_size(r) + " - 1" + if is_ref: + ex.do(raising_call(aborts, "stm_set_ref", r, offset, v)) + if not aborts: + ex.do(raising_call(False, "stm_set_ref", r, "0", v)) + else: + ex.do(raising_call(aborts, "stm_set_char", r, repr(chr(v)), offset)) + if not aborts: + ex.do(raising_call(False, "stm_set_char", r, repr(chr(v)), "HDR")) + +def op_read(ex, global_state, thread_state): + r = thread_state.get_random_root() + trs = thread_state.transaction_state + v = trs.read_root(r) + # + offset = global_state.get_root_size(r) + " - 1" + if global_state.has_ref_type(r): + if v in thread_state.saved_roots or v in global_state.prebuilt_roots: + # v = root known to this transaction; or prebuilt + ex.do("assert stm_get_ref(%s, %s) == %s" % (r, offset, v)) + ex.do("assert stm_get_ref(%s, 0) == %s" % (r, v)) + elif v != "ffi.NULL": + # if v came from this transaction: re-add it to saved_roots because + # it survived by being referenced by another saved root + # if v is from a different transaction: + # we fish its value from somewhere and add it to our known roots + global_trs = global_state.committed_transaction_state + if v not in trs.values: + # not from this transaction AND not known at the start of this + # transaction + trs.add_root(v, global_trs.values[v], False) + ex.do("# get %r from other thread" % v) + elif v not in global_trs.values: + # created and forgotten earlier in this thread + ex.do("# revive %r in this thread" % v) + else: + # created in an earlier transaction, now also known here + ex.do("# register %r in this thread" % v) + # + ex.do("%s = stm_get_ref(%s, %s)" % (v, r, offset)) + ex.do("%s = stm_get_ref(%s, 0)" % (v, r)) + thread_state.register_root(v) else: - ex.do('# forget %s' % r) + # v is NULL; we still need to read it (as it should be in the read-set): + ex.do("assert stm_get_ref(%s, %s) == %s" % (r,offset,v)) + ex.do("assert stm_get_ref(%s, 0) == %s" % (r,v)) + else: + ex.do("assert stm_get_char(%s, %s) == %s" % (r, offset, repr(chr(v)))) + ex.do("assert stm_get_char(%s, HDR) == %s" % (r, repr(chr(v)))) -class OpWrite(Operation): - def do(self, ex, global_state, thread_state): - r = thread_state.get_random_root() - trs = thread_state.transaction_state - is_ref = global_state.has_ref_type(r) +def op_assert_size(ex, global_state, thread_state): + r = thread_state.get_random_root() + size = global_state.get_root_size(r) + if global_state.has_ref_type(r): + ex.do("assert stm_get_obj_size(%s) == %s" % (r, size + " * WORD + HDR")) + else: + ex.do("assert stm_get_obj_size(%s) == %s" % (r, size)) + +def op_assert_modified(ex, global_state, thread_state): + trs = thread_state.transaction_state + modified = trs.get_old_modified() + ex.do("# modified = %s" % modified) + ex.do("modified = modified_objects()") + if not modified: + ex.do("assert modified == []") + else: + saved = [m for m in modified + if m in thread_state.saved_roots or m in global_state.prebuilt_roots] + ex.do("assert {%s}.issubset(set(modified))" % ( + ", ".join(saved) + )) + + +def op_switch_thread(ex, global_state, thread_state, new_thread_state=None): + if new_thread_state is None: + new_thread_state = global_state.rnd.choice(global_state.thread_states) + + if new_thread_state != thread_state: + if thread_state.transaction_state: + thread_state.push_roots(ex) + ex.do('#') # - # check for possible write-write conflict: - was_written = False - try: - # HACK to avoid calling write_root() just yet because we have to - # undo it in case of the exception :( - was_written = r in trs.write_set - trs.write_set.add(r) - global_state.check_for_write_write_conflicts(trs) - except WriteWriteConflictNotTestable: - if not was_written: - trs.write_set.remove(r) - ex.do("# writing to %s produces an untestable write-write" % r) - ex.do("# conflict between an inevitable and a normal transaction :(") - return + trs = new_thread_state.transaction_state + conflicts = trs is not None and trs.check_must_abort() + ex.thread_num = new_thread_state.num # - # decide on a value to write - if is_ref: - v = thread_state.get_random_root() + ex.do(raising_call(conflicts, + "self.switch", new_thread_state.num)) + if conflicts: + new_thread_state.abort_transaction() else: - v = ord(global_state.rnd.choice("abcdefghijklmnop")) - assert trs.write_root(r, v) is not None - # - aborts = trs.check_must_abort() - if aborts: - thread_state.abort_transaction() - offset = global_state.get_root_size(r) + " - 1" - if is_ref: - ex.do(raising_call(aborts, "stm_set_ref", r, offset, v)) - if not aborts: - ex.do(raising_call(False, "stm_set_ref", r, "0", v)) - else: - ex.do(raising_call(aborts, "stm_set_char", r, repr(chr(v)), offset)) - if not aborts: - ex.do(raising_call(False, "stm_set_char", r, repr(chr(v)), "HDR")) + new_thread_state.pop_roots(ex) + new_thread_state.reload_roots(ex) -class OpRead(Operation): - def do(self, ex, global_state, thread_state): - r = thread_state.get_random_root() - trs = thread_state.transaction_state - v = trs.read_root(r) - # - offset = global_state.get_root_size(r) + " - 1" - if global_state.has_ref_type(r): - if v in thread_state.saved_roots or v in global_state.prebuilt_roots: - # v = root known to this transaction; or prebuilt - ex.do("assert stm_get_ref(%s, %s) == %s" % (r, offset, v)) - ex.do("assert stm_get_ref(%s, 0) == %s" % (r, v)) - elif v != "ffi.NULL": - # if v came from this transaction: re-add it to saved_roots because - # it survived by being referenced by another saved root - # if v is from a different transaction: - # we fish its value from somewhere and add it to our known roots - global_trs = global_state.committed_transaction_state - if v not in trs.values: - # not from this transaction AND not known at the start of this - # transaction - trs.add_root(v, global_trs.values[v], False) - ex.do("# get %r from other thread" % v) - elif v not in global_trs.values: - # created and forgotten earlier in this thread - ex.do("# revive %r in this thread" % v) - else: - # created in an earlier transaction, now also known here - ex.do("# register %r in this thread" % v) - # - ex.do("%s = stm_get_ref(%s, %s)" % (v, r, offset)) - ex.do("%s = stm_get_ref(%s, 0)" % (v, r)) - thread_state.register_root(v) - else: - # v is NULL; we still need to read it (as it should be in the read-set): - ex.do("assert stm_get_ref(%s, %s) == %s" % (r,offset,v)) - ex.do("assert stm_get_ref(%s, 0) == %s" % (r,v)) - else: - ex.do("assert stm_get_char(%s, %s) == %s" % (r, offset, repr(chr(v)))) - ex.do("assert stm_get_char(%s, HDR) == %s" % (r, repr(chr(v)))) + return new_thread_state -class OpAssertSize(Operation): - def do(self, ex, global_state, thread_state): - r = thread_state.get_random_root() - size = global_state.get_root_size(r) - if global_state.has_ref_type(r): - ex.do("assert stm_get_obj_size(%s) == %s" % (r, size + " * WORD + HDR")) - else: - ex.do("assert stm_get_obj_size(%s) == %s" % (r, size)) -class OpAssertModified(Operation): - def do(self, ex, global_state, thread_state): - trs = thread_state.transaction_state - modified = trs.get_old_modified() - ex.do("# modified = %s" % modified) - ex.do("modified = modified_objects()") - if not modified: - ex.do("assert modified == []") - else: - saved = [m for m in modified - if m in thread_state.saved_roots or m in global_state.prebuilt_roots] - ex.do("assert {%s}.issubset(set(modified))" % ( - ", ".join(saved) - )) +################################################################### +################################################################### +####################### TEST GENERATION ########################### +################################################################### +################################################################### -class OpSwitchThread(Operation): - def do(self, ex, global_state, thread_state, new_thread_state=None): - if new_thread_state is None: - new_thread_state = global_state.rnd.choice(global_state.thread_states) - - if new_thread_state != thread_state: - if thread_state.transaction_state: - thread_state.push_roots(ex) - ex.do('#') - # - trs = new_thread_state.transaction_state - conflicts = trs is not None and trs.check_must_abort() - ex.thread_num = new_thread_state.num - # - ex.do(raising_call(conflicts, - "self.switch", new_thread_state.num)) - if conflicts: - new_thread_state.abort_transaction() - else: - new_thread_state.pop_roots(ex) - new_thread_state.reload_roots(ex) - - return new_thread_state - - - -# ========== TEST GENERATION ========== - class TestRandom(BaseTest): def test_fixed_16_bytes_objects(self, seed=1010): @@ -567,28 +557,28 @@ # random steps: possible_actions = [ - OpAllocate, - OpAllocateRef, OpAllocateRef, - OpWrite, OpWrite, OpWrite, - OpRead, OpRead, OpRead, OpRead, OpRead, OpRead, OpRead, OpRead, - OpCommitTransaction, - OpAbortTransaction, - OpForgetRoot, - OpBecomeInevitable, - OpAssertSize, - #OpAssertModified, - OpMinorCollect, + op_allocate, + op_allocate_ref, op_allocate_ref, + op_write, op_write, op_write, + op_read, op_read, op_read, op_read, op_read, op_read, op_read, op_read, + op_commit_transaction, + op_abort_transaction, + op_forget_root, + op_become_inevitable, + op_assert_size, + #op_assert_modified, + op_minor_collect, ] for _ in range(200): # make sure we are in a transaction: - curr_thread = OpSwitchThread().do(ex, global_state, curr_thread) + curr_thread = op_switch_thread(ex, global_state, curr_thread) if curr_thread.transaction_state is None: - OpStartTransaction().do(ex, global_state, curr_thread) + op_start_transaction(ex, global_state, curr_thread) # do something random action = rnd.choice(possible_actions) - action().do(ex, global_state, curr_thread) + action(ex, global_state, curr_thread) # to make sure we don't have aborts in the test's teardown method, # we will simply stop all running transactions @@ -596,12 +586,12 @@ if ts.transaction_state is not None: if curr_thread != ts: ex.do('#') - curr_thread = OpSwitchThread().do(ex, global_state, curr_thread, - new_thread_state=ts) + curr_thread = op_switch_thread(ex, global_state, curr_thread, + new_thread_state=ts) # could have aborted in the switch() above: if curr_thread.transaction_state: - OpCommitTransaction().do(ex, global_state, curr_thread) + op_commit_transaction(ex, global_state, curr_thread) From noreply at buildbot.pypy.org Mon Feb 24 14:05:44 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 24 Feb 2014 14:05:44 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: Refactor the next test Message-ID: <20140224130544.0DCFB1C08B9@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r823:d9fd728c638d Date: 2014-02-24 14:05 +0100 http://bitbucket.org/pypy/stmgc/changeset/d9fd728c638d/ Log: Refactor the next test diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -63,8 +63,8 @@ /* List of old objects (older than the current transaction) that the current transaction attempts to modify. This is used to track - the STM status: it's old objects that where written to and that - need to be copied to other segments upon commit. */ + the STM status: they are old objects that where written to and + that need to be copied to other segments upon commit. */ struct list_s *modified_old_objects; /* List of the modified old objects that may point to the nursery. @@ -77,9 +77,9 @@ /* List of overflowed objects (from the same transaction but outside the nursery) on which the write-barrier was triggered, so that they likely contain a pointer to a nursery object. This is used - by the GC: it's roots for the next minor collection. This is - NULL if the current transaction didn't span a minor collection - so far. */ + by the GC: it's additional roots for the next minor collection. + This is NULL if the current transaction didn't span a minor + collection so far. */ struct list_s *overflow_objects_pointing_to_nursery; /* Start time: to know approximately for how long a transaction has diff --git a/c7/stm/misc.c b/c7/stm/misc.c --- a/c7/stm/misc.c +++ b/c7/stm/misc.c @@ -47,22 +47,42 @@ } #ifdef STM_TESTS -object_t *_stm_enum_overflow_objects_pointing_to_nursery(void) +long _stm_count_modified_old_objects(void) { - static long index = 0; - struct list_s *lst = STM_PSEGMENT->overflow_objects_pointing_to_nursery; - if (index < list_count(lst)) - return (object_t *)list_item(lst, index++); - index = 0; - return (object_t *)-1; + if (STM_PSEGMENT->modified_old_objects == NULL) + return -1; + return list_count(STM_PSEGMENT->modified_old_objects); } -object_t *_stm_enum_modified_old_objects(void) + +long _stm_count_old_objects_pointing_to_nursery(void) { - static long index = 0; - struct list_s *lst = STM_PSEGMENT->modified_old_objects; - if (index < list_count(lst)) - return (object_t *)list_item(lst, index++); - index = 0; - return (object_t *)-1; + if (STM_PSEGMENT->old_objects_pointing_to_nursery == NULL) + return -1; + return list_count(STM_PSEGMENT->old_objects_pointing_to_nursery); +} + +long _stm_count_overflow_objects_pointing_to_nursery(void) +{ + if (STM_PSEGMENT->overflow_objects_pointing_to_nursery == NULL) + return -1; + return list_count(STM_PSEGMENT->overflow_objects_pointing_to_nursery); +} + +object_t *_stm_enum_modified_old_objects(long index) +{ + return (object_t *)list_item( + STM_PSEGMENT->modified_old_objects, index); +} + +object_t *_stm_enum_old_objects_pointing_to_nursery(long index) +{ + return (object_t *)list_item( + STM_PSEGMENT->old_objects_pointing_to_nursery, index); +} + +object_t *_stm_enum_overflow_objects_pointing_to_nursery(long index) +{ + return (object_t *)list_item( + STM_PSEGMENT->overflow_objects_pointing_to_nursery, index); } #endif diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -81,8 +81,12 @@ void _stm_start_safe_point(void); void _stm_stop_safe_point(void); void _stm_set_nursery_free_count(uint64_t free_count); -object_t *_stm_enum_overflow_objects_pointing_to_nursery(void); -object_t *_stm_enum_modified_old_objects(void); +long _stm_count_modified_old_objects(void); +long _stm_count_old_objects_pointing_to_nursery(void); +long _stm_count_overflow_objects_pointing_to_nursery(void); +object_t *_stm_enum_modified_old_objects(long index); +object_t *_stm_enum_old_objects_pointing_to_nursery(long index); +object_t *_stm_enum_overflow_objects_pointing_to_nursery(long index); #endif #define _STM_GCFLAG_WRITE_BARRIER 0x01 diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -76,8 +76,12 @@ ssize_t stmcb_size_rounded_up(struct object_s *obj); -object_t *_stm_enum_overflow_objects_pointing_to_nursery(void); -object_t *_stm_enum_modified_old_objects(void); +long _stm_count_modified_old_objects(void); +long _stm_count_old_objects_pointing_to_nursery(void); +long _stm_count_overflow_objects_pointing_to_nursery(void); +object_t *_stm_enum_modified_old_objects(long index); +object_t *_stm_enum_old_objects_pointing_to_nursery(long index); +object_t *_stm_enum_overflow_objects_pointing_to_nursery(long index); void stm_collect(long level); """) @@ -365,13 +369,23 @@ def stm_get_flags(o): return lib._stm_get_flags(o) -def old_objects_pointing_to_young(): - return list(iter(lib._stm_enum_old_objects_pointing_to_young, - ffi.cast("object_t *", -1))) +def modified_old_objects(): + count = lib._stm_count_modified_old_objects() + if count < 0: + return None + return map(lib._stm_enum_modified_old_objects, range(count)) -def modified_objects(): - return list(iter(lib._stm_enum_modified_objects, - ffi.cast("object_t *", -1))) +def old_objects_pointing_to_nursery(): + count = lib._stm_count_old_objects_pointing_to_nursery() + if count < 0: + return None + return map(lib._stm_enum_old_objects_pointing_to_nursery, range(count)) + +def overflow_objects_pointing_to_nursery(): + count = lib._stm_count_overflow_objects_pointing_to_nursery() + if count < 0: + return None + return map(lib._stm_enum_overflow_objects_pointing_to_nursery,range(count)) SHADOWSTACK_LENGTH = 1000 diff --git a/c7/test/test_basic.py b/c7/test/test_basic.py --- a/c7/test/test_basic.py +++ b/c7/test/test_basic.py @@ -50,7 +50,9 @@ assert stm_was_written(lp1) stm_write(lp1) assert stm_was_written(lp1) - assert modified_objects() == [] # because same transaction + assert modified_old_objects() == [] # object not old + assert old_objects_pointing_to_nursery() == None # short transac. + assert overflow_objects_pointing_to_nursery() == None # short transac. self.commit_transaction() def test_allocate_old(self): From noreply at buildbot.pypy.org Mon Feb 24 14:14:43 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 24 Feb 2014 14:14:43 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: next few tests Message-ID: <20140224131443.743881C03FC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r824:806e9c1a3eb4 Date: 2014-02-24 14:14 +0100 http://bitbucket.org/pypy/stmgc/changeset/806e9c1a3eb4/ Log: next few tests diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -98,23 +98,9 @@ object_t *_stm_allocate_old(ssize_t size_rounded_up) { - /* XXX not thread-safe! and only for tests, don't use when a - transaction might be running! */ - assert(size_rounded_up >= 16); - assert((size_rounded_up & 7) == 0); - - char *addr = large_malloc(size_rounded_up); - - if (addr + size_rounded_up > uninitialized_page_start) { - uintptr_t npages; - npages = (addr + size_rounded_up - uninitialized_page_start) / 4096UL; - npages += GCPAGE_NUM_PAGES; - setup_N_pages(uninitialized_page_start, npages); - uninitialized_page_start += npages * 4096UL; - } - - memset(addr, 0, size_rounded_up); - - stm_char* o = (stm_char *)(addr - stm_object_pages); - return (object_t *)o; + /* only for tests */ + object_t *o = allocate_outside_nursery_large(size_rounded_up); + memset(REAL_ADDRESS(stm_object_pages, o), 0, size_rounded_up); + o->stm_flags = STM_FLAGS_PREBUILT; + return o; } diff --git a/c7/stm/pages.c b/c7/stm/pages.c --- a/c7/stm/pages.c +++ b/c7/stm/pages.c @@ -22,6 +22,11 @@ __sync_lock_release(&pages_ctl.mutex_pages); } +static bool _has_mutex_pages(void) +{ + return pages_ctl.mutex_pages != 0; +} + /************************************************************/ @@ -31,7 +36,7 @@ pagenum+count) refer to the same physical range of pages from segment 0. */ uintptr_t i; - mutex_pages_lock(); + assert(_has_mutex_pages()); for (i = 1; i < NB_SEGMENTS; i++) { char *segment_base = get_segment_base(i); int res = remap_file_pages(segment_base + pagenum * 4096UL, @@ -44,7 +49,6 @@ } for (i = 0; i < count; i++) flag_page_private[pagenum + i] = SHARED_PAGE; - mutex_pages_unlock(); } #if 0 diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -92,7 +92,7 @@ #define _STM_GCFLAG_WRITE_BARRIER 0x01 #define _STM_NSE_SIGNAL 0 #define _STM_FAST_ALLOC (66*1024) -#define STM_FLAGS_PREBUILT 0 +#define STM_FLAGS_PREBUILT _STM_GCFLAG_WRITE_BARRIER /* ==================== HELPERS ==================== */ From noreply at buildbot.pypy.org Mon Feb 24 14:20:42 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Mon, 24 Feb 2014 14:20:42 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: some cleanup Message-ID: <20140224132042.8B4D91C03FC@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7-refactor Changeset: r825:e531569d6bd4 Date: 2014-02-24 14:20 +0100 http://bitbucket.org/pypy/stmgc/changeset/e531569d6bd4/ Log: some cleanup diff --git a/c7/test/test_random.py b/c7/test/test_random.py --- a/c7/test/test_random.py +++ b/c7/test/test_random.py @@ -88,6 +88,8 @@ if only_new: for w in committed.write_set: self.values[w] = committed.values[w] + for w in committed.created_in_this_transaction: + self.values[w] = committed.values[w] else: self.values.update(committed.values) @@ -107,8 +109,9 @@ self.created_in_this_transaction.add(r) def write_root(self, r, v): - self.read_set.add(r) - self.write_set.add(r) + if r not in self.created_in_this_transaction: + self.read_set.add(r) + self.write_set.add(r) old = self.values.get(r, None) self.values[r] = v return old @@ -132,13 +135,6 @@ assert len(self.saved_roots) < SHADOWSTACK_LENGTH def forget_random_root(self): - # # forget some non-pushed root for now - # if self.roots_on_stack < len(self.saved_roots): - # idx = self.global_state.rnd.randrange(self.roots_on_stack, len(self.saved_roots)) - # r = self.saved_roots[idx] - # del self.saved_roots[idx] - # return r - if self.transaction_state.inevitable: # forget *all* roots self.roots_on_stack = 0 @@ -204,6 +200,7 @@ def abort_transaction(self): assert self.transaction_state.check_must_abort() + assert not self.transaction_state.inevitable self.roots_on_stack = self.roots_on_transaction_start del self.saved_roots[self.roots_on_stack:] self.transaction_state = None @@ -443,21 +440,18 @@ ex.do("assert stm_get_ref(%s, %s) == %s" % (r, offset, v)) ex.do("assert stm_get_ref(%s, 0) == %s" % (r, v)) elif v != "ffi.NULL": - # if v came from this transaction: re-add it to saved_roots because - # it survived by being referenced by another saved root - # if v is from a different transaction: - # we fish its value from somewhere and add it to our known roots global_trs = global_state.committed_transaction_state if v not in trs.values: # not from this transaction AND not known at the start of this - # transaction - trs.add_root(v, global_trs.values[v], False) - ex.do("# get %r from other thread" % v) + # transaction AND not pushed to us by a commit + assert False elif v not in global_trs.values: - # created and forgotten earlier in this thread - ex.do("# revive %r in this thread" % v) + # created and forgotten earlier in this transaction, we still + # know its latest value (v in trs.values) + ex.do("# revive %r in this transaction" % v) else: - # created in an earlier transaction, now also known here + # created in an earlier transaction, now also known here. We + # know its value (v in trs.values) ex.do("# register %r in this thread" % v) # ex.do("%s = stm_get_ref(%s, %s)" % (v, r, offset)) From noreply at buildbot.pypy.org Mon Feb 24 14:28:14 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Mon, 24 Feb 2014 14:28:14 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: enable assertion of modified objects Message-ID: <20140224132814.338C71C03FC@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7-refactor Changeset: r826:214702395e15 Date: 2014-02-24 14:28 +0100 http://bitbucket.org/pypy/stmgc/changeset/214702395e15/ Log: enable assertion of modified objects diff --git a/c7/test/test_random.py b/c7/test/test_random.py --- a/c7/test/test_random.py +++ b/c7/test/test_random.py @@ -477,7 +477,7 @@ trs = thread_state.transaction_state modified = trs.get_old_modified() ex.do("# modified = %s" % modified) - ex.do("modified = modified_objects()") + ex.do("modified = modified_old_objects()") if not modified: ex.do("assert modified == []") else: @@ -560,7 +560,7 @@ op_forget_root, op_become_inevitable, op_assert_size, - #op_assert_modified, + op_assert_modified, op_minor_collect, ] for _ in range(200): From noreply at buildbot.pypy.org Mon Feb 24 15:06:30 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Mon, 24 Feb 2014 15:06:30 +0100 (CET) Subject: [pypy-commit] pypy default: remove duplication of name attribute on dtype descriptors Message-ID: <20140224140630.DAC331C02EA@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69353:a55cda9fb045 Date: 2014-02-24 09:03 -0500 http://bitbucket.org/pypy/pypy/changeset/a55cda9fb045/ Log: remove duplication of name attribute on dtype descriptors diff --git a/pypy/module/micronumpy/arrayimpl/sort.py b/pypy/module/micronumpy/arrayimpl/sort.py --- a/pypy/module/micronumpy/arrayimpl/sort.py +++ b/pypy/module/micronumpy/arrayimpl/sort.py @@ -177,7 +177,7 @@ # XXX this should probably be changed raise oefmt(space.w_NotImplementedError, "sorting of non-numeric types '%s' is not implemented", - arr.dtype.name) + arr.dtype.get_name()) all_types = (types.all_float_types + types.all_complex_types + types.all_int_types) @@ -320,7 +320,7 @@ # XXX this should probably be changed raise oefmt(space.w_NotImplementedError, "sorting of non-numeric types '%s' is not implemented", - arr.dtype.name) + arr.dtype.get_name()) all_types = (types.all_float_types + types.all_complex_types + types.all_int_types) diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -218,7 +218,7 @@ return w_type.lookup(name) def gettypefor(self, w_obj): - return None + return W_TypeObject(w_obj.typedef.name) def call_function(self, tp, w_dtype): return w_dtype diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -38,20 +38,19 @@ class W_Dtype(W_Root): _immutable_fields_ = [ - "num", "kind", "name", "char", "w_box_type", "float_type", + "num", "kind", "char", "w_box_type", "float_type", "itemtype?", "byteorder?", "names?", "fields?", "size?", "shape?", "subdtype?", "base?", "alternate_constructors", "aliases", ] - def __init__(self, itemtype, num, kind, name, char, w_box_type, + def __init__(self, itemtype, num, kind, char, w_box_type, float_type=None, byteorder=None, names=[], fields={}, size=1, shape=[], subdtype=None, alternate_constructors=[], aliases=[]): self.itemtype = itemtype self.num = num self.kind = kind - self.name = name self.char = char self.w_box_type = w_box_type self.float_type = float_type @@ -180,10 +179,16 @@ return space.w_None return space.newtuple([space.wrap(self.subdtype), self.descr_get_shape(space)]) + def get_name(self): + return self.w_box_type.name + def descr_get_name(self, space): + name = self.get_name() + if name[-1] == '_': + name = name[:-1] if self.is_flexible_type(): - return space.wrap(self.name + str(self.get_size() * 8)) - return space.wrap(self.name) + return space.wrap(name + str(self.get_size() * 8)) + return space.wrap(name) def descr_get_str(self, space): size = self.get_size() @@ -290,8 +295,8 @@ def descr_getitem(self, space, w_item): if not self.fields: - raise OperationError(space.w_KeyError, space.wrap( - "There are no fields in dtype %s." % self.name)) + raise oefmt(space.w_KeyError, "There are no fields in dtype %s.", + self.get_name()) if space.isinstance_w(w_item, space.w_basestring): item = space.str_w(w_item) elif space.isinstance_w(w_item, space.w_int): @@ -421,7 +426,7 @@ elif newendian != NPY.IGNORE: endian = newendian itemtype = self.itemtype.__class__(endian in (NPY.NATIVE, NPY.NATBYTE)) - return W_Dtype(itemtype, self.num, self.kind, self.name, self.char, + return W_Dtype(itemtype, self.num, self.kind, self.char, self.w_box_type, self.float_type, byteorder=endian, size=self.size) @@ -455,8 +460,8 @@ fields[fldname] = (offset, subdtype) offset += subdtype.get_size() names.append(fldname) - return W_Dtype(types.RecordType(), NPY.VOID, NPY.VOIDLTR, "void", - NPY.VOIDLTR, space.gettypefor(interp_boxes.W_VoidBox), + return W_Dtype(types.RecordType(), NPY.VOID, NPY.VOIDLTR, NPY.VOIDLTR, + space.gettypefor(interp_boxes.W_VoidBox), names=names, fields=fields, size=offset) @@ -496,11 +501,10 @@ size *= dim if size == 1: return subdtype - return W_Dtype(types.VoidType(), NPY.VOID, NPY.VOIDLTR, - "void" + str(8 * subdtype.get_size() * size), - NPY.VOIDLTR, space.gettypefor(interp_boxes.W_VoidBox), - shape=shape, subdtype=subdtype, - size=subdtype.get_size() * size) + size *= subdtype.get_size() + return W_Dtype(types.VoidType(), NPY.VOID, NPY.VOIDLTR, NPY.VOIDLTR, + space.gettypefor(interp_boxes.W_VoidBox), + shape=shape, subdtype=subdtype, size=size) if space.is_none(w_dtype): return cache.w_float64dtype @@ -615,7 +619,6 @@ size=size, num=NPY.STRING, kind=NPY.STRINGLTR, - name='string', char=NPY.STRINGLTR, w_box_type=space.gettypefor(interp_boxes.W_StringBox), ) @@ -627,7 +630,6 @@ size=size, num=NPY.UNICODE, kind=NPY.UNICODELTR, - name='unicode', char=NPY.UNICODELTR, w_box_type=space.gettypefor(interp_boxes.W_UnicodeBox), ) @@ -639,7 +641,6 @@ size=size, num=NPY.VOID, kind=NPY.VOIDLTR, - name='void', char=NPY.VOIDLTR, w_box_type=space.gettypefor(interp_boxes.W_VoidBox), ) @@ -651,17 +652,15 @@ types.Bool(), num=NPY.BOOL, kind=NPY.GENBOOLLTR, - name="bool", char=NPY.BOOLLTR, w_box_type=space.gettypefor(interp_boxes.W_BoolBox), alternate_constructors=[space.w_bool], - aliases=['bool8'], + aliases=['bool', 'bool8'], ) self.w_int8dtype = W_Dtype( types.Int8(), num=NPY.BYTE, kind=NPY.SIGNEDLTR, - name="int8", char=NPY.BYTELTR, w_box_type=space.gettypefor(interp_boxes.W_Int8Box), aliases=['byte'], @@ -670,7 +669,6 @@ types.UInt8(), num=NPY.UBYTE, kind=NPY.UNSIGNEDLTR, - name="uint8", char=NPY.UBYTELTR, w_box_type=space.gettypefor(interp_boxes.W_UInt8Box), aliases=['ubyte'], @@ -679,7 +677,6 @@ types.Int16(), num=NPY.SHORT, kind=NPY.SIGNEDLTR, - name="int16", char=NPY.SHORTLTR, w_box_type=space.gettypefor(interp_boxes.W_Int16Box), aliases=['short'], @@ -688,7 +685,6 @@ types.UInt16(), num=NPY.USHORT, kind=NPY.UNSIGNEDLTR, - name="uint16", char=NPY.USHORTLTR, w_box_type=space.gettypefor(interp_boxes.W_UInt16Box), aliases=['ushort'], @@ -697,7 +693,6 @@ types.Int32(), num=NPY.INT, kind=NPY.SIGNEDLTR, - name="int32", char=NPY.INTLTR, w_box_type=space.gettypefor(interp_boxes.W_Int32Box), ) @@ -705,7 +700,6 @@ types.UInt32(), num=NPY.UINT, kind=NPY.UNSIGNEDLTR, - name="uint32", char=NPY.UINTLTR, w_box_type=space.gettypefor(interp_boxes.W_UInt32Box), ) @@ -713,31 +707,28 @@ types.Long(), num=NPY.LONG, kind=NPY.SIGNEDLTR, - name="int%d" % LONG_BIT, char=NPY.LONGLTR, w_box_type=space.gettypefor(interp_boxes.W_LongBox), alternate_constructors=[space.w_int, space.gettypefor(interp_boxes.W_IntegerBox), space.gettypefor(interp_boxes.W_SignedIntegerBox), ], - aliases=['int'], + aliases=['int', 'intp', 'p'], ) self.w_ulongdtype = W_Dtype( types.ULong(), num=NPY.ULONG, kind=NPY.UNSIGNEDLTR, - name="uint%d" % LONG_BIT, char=NPY.ULONGLTR, w_box_type=space.gettypefor(interp_boxes.W_ULongBox), alternate_constructors=[space.gettypefor(interp_boxes.W_UnsignedIntegerBox), ], - aliases=['uint'], + aliases=['uint', 'uintp', 'P'], ) self.w_int64dtype = W_Dtype( types.Int64(), num=NPY.LONGLONG, kind=NPY.SIGNEDLTR, - name="int64", char=NPY.LONGLONGLTR, w_box_type=space.gettypefor(interp_boxes.W_Int64Box), alternate_constructors=[space.w_long], @@ -747,7 +738,6 @@ types.UInt64(), num=NPY.ULONGLONG, kind=NPY.UNSIGNEDLTR, - name="uint64", char=NPY.ULONGLONGLTR, w_box_type=space.gettypefor(interp_boxes.W_UInt64Box), aliases=['ulonglong'], @@ -756,7 +746,6 @@ types.Float32(), num=NPY.FLOAT, kind=NPY.FLOATINGLTR, - name="float32", char=NPY.FLOATLTR, w_box_type=space.gettypefor(interp_boxes.W_Float32Box), aliases=['single'] @@ -765,9 +754,8 @@ types.Float64(), num=NPY.DOUBLE, kind=NPY.FLOATINGLTR, - name="float64", char=NPY.DOUBLELTR, - w_box_type = space.gettypefor(interp_boxes.W_Float64Box), + w_box_type=space.gettypefor(interp_boxes.W_Float64Box), alternate_constructors=[space.w_float, space.gettypefor(interp_boxes.W_NumberBox), space.gettypefor(interp_boxes.W_FloatingBox), @@ -778,7 +766,6 @@ types.FloatLong(), num=NPY.LONGDOUBLE, kind=NPY.FLOATINGLTR, - name="float%d" % (interp_boxes.long_double_size * 8), char=NPY.LONGDOUBLELTR, w_box_type=space.gettypefor(interp_boxes.W_FloatLongBox), aliases=["longdouble", "longfloat"], @@ -787,9 +774,8 @@ types.Complex64(), num=NPY.CFLOAT, kind=NPY.COMPLEXLTR, - name="complex64", char=NPY.CFLOATLTR, - w_box_type = space.gettypefor(interp_boxes.W_Complex64Box), + w_box_type=space.gettypefor(interp_boxes.W_Complex64Box), aliases=['csingle'], float_type=NPY.FLOATLTR, ) @@ -797,9 +783,8 @@ types.Complex128(), num=NPY.CDOUBLE, kind=NPY.COMPLEXLTR, - name="complex128", char=NPY.CDOUBLELTR, - w_box_type = space.gettypefor(interp_boxes.W_Complex128Box), + w_box_type=space.gettypefor(interp_boxes.W_Complex128Box), alternate_constructors=[space.w_complex, space.gettypefor(interp_boxes.W_ComplexFloatingBox)], aliases=["complex", 'cfloat', 'cdouble'], @@ -809,9 +794,8 @@ types.ComplexLong(), num=NPY.CLONGDOUBLE, kind=NPY.COMPLEXLTR, - name="complex%d" % (interp_boxes.long_double_size * 16), char=NPY.CLONGDOUBLELTR, - w_box_type = space.gettypefor(interp_boxes.W_ComplexLongBox), + w_box_type=space.gettypefor(interp_boxes.W_ComplexLongBox), aliases=["clongdouble", "clongfloat"], float_type=NPY.LONGDOUBLELTR, ) @@ -820,31 +804,29 @@ size=0, num=NPY.STRING, kind=NPY.STRINGLTR, - name='string', char=NPY.STRINGLTR, - w_box_type = space.gettypefor(interp_boxes.W_StringBox), + w_box_type=space.gettypefor(interp_boxes.W_StringBox), alternate_constructors=[space.w_str, space.gettypefor(interp_boxes.W_CharacterBox)], - aliases=["str"], + aliases=['string', "str"], ) self.w_unicodedtype = W_Dtype( types.UnicodeType(), size=0, num=NPY.UNICODE, kind=NPY.UNICODELTR, - name='unicode', char=NPY.UNICODELTR, - w_box_type = space.gettypefor(interp_boxes.W_UnicodeBox), + w_box_type=space.gettypefor(interp_boxes.W_UnicodeBox), alternate_constructors=[space.w_unicode], + aliases=['unicode'], ) self.w_voiddtype = W_Dtype( types.VoidType(), size=0, num=NPY.VOID, kind=NPY.VOIDLTR, - name='void', char=NPY.VOIDLTR, - w_box_type = space.gettypefor(interp_boxes.W_VoidBox), + w_box_type=space.gettypefor(interp_boxes.W_VoidBox), #alternate_constructors=[space.w_buffer], # XXX no buffer in space #alternate_constructors=[space.gettypefor(interp_boxes.W_GenericBox)], @@ -854,7 +836,6 @@ types.Float16(), num=NPY.HALF, kind=NPY.FLOATINGLTR, - name="float16", char=NPY.HALFLTR, w_box_type=space.gettypefor(interp_boxes.W_Float16Box), ) @@ -862,17 +843,15 @@ types.Long(), num=NPY.LONG, kind=NPY.SIGNEDLTR, - name='intp', char=NPY.INTPLTR, - w_box_type = space.gettypefor(interp_boxes.W_LongBox), + w_box_type=space.gettypefor(interp_boxes.W_LongBox), ) self.w_uintpdtype = W_Dtype( types.ULong(), num=NPY.ULONG, kind=NPY.UNSIGNEDLTR, - name='uintp', char=NPY.UINTPLTR, - w_box_type = space.gettypefor(interp_boxes.W_ULongBox), + w_box_type=space.gettypefor(interp_boxes.W_ULongBox), ) float_dtypes = [self.w_float16dtype, self.w_float32dtype, self.w_float64dtype, self.w_floatlongdtype] @@ -900,7 +879,7 @@ for dtype in reversed(self.builtin_dtypes): dtype.fields = None # mark these as builtin self.dtypes_by_num[dtype.num] = dtype - self.dtypes_by_name[dtype.name] = dtype + self.dtypes_by_name[dtype.get_name()] = dtype for can_name in [dtype.kind + str(dtype.get_size()), dtype.char]: self.dtypes_by_name[can_name] = dtype diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -573,7 +573,8 @@ space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype)) if new_dtype.num == NPY.VOID: raise oefmt(space.w_NotImplementedError, - "%s.astype(%s) not implemented yet", cur_dtype.name, new_dtype.name) + "astype(%s) not implemented yet", + new_dtype.get_name()) if new_dtype.num == NPY.STRING and new_dtype.size == 0: if cur_dtype.num == NPY.STRING: new_dtype = interp_dtype.variable_dtype(space, @@ -1029,7 +1030,7 @@ except AttributeError: raise oefmt(space.w_NotImplementedError, '%s not implemented for %s', - op_name, self.get_dtype().name) + op_name, self.get_dtype().get_name()) return space.wrap(res) return func_with_new_name(impl, "reduce_arg%s_impl" % op_name) diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -386,8 +386,9 @@ return space.w_NotImplemented else: raise oefmt(space.w_TypeError, - 'unsupported operand dtypes %s and %s for "%s"', - w_rdtype.name, w_ldtype.name, self.name) + 'unsupported operand dtypes %s and %s for "%s"', + w_rdtype.get_name(), w_ldtype.get_name(), + self.name) if self.are_common_types(w_ldtype, w_rdtype): if not w_lhs.is_scalar() and w_rhs.is_scalar(): @@ -612,7 +613,7 @@ except AttributeError: raise oefmt(space.w_NotImplementedError, "%s not implemented for %s", - ufunc_name, dtype.name) + ufunc_name, dtype.get_name()) if argcount == 1: def impl(res_dtype, value): res = get_op(res_dtype)(value) diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -900,22 +900,22 @@ def test_intp(self): from numpypy import dtype - assert dtype('p') is dtype('intp') - assert dtype('P') is dtype('uintp') - #assert dtype('p') is dtype('int') - #assert dtype('P') is dtype('uint') + for s in ['p', 'int']: + assert dtype(s) is dtype('intp') + for s in ['P', 'uint']: + assert dtype(s) is dtype('uintp') assert dtype('p').num == 7 assert dtype('P').num == 8 - #assert dtype('p').char == 'l' - #assert dtype('P').char == 'L' + assert dtype('p').char == 'l' + assert dtype('P').char == 'L' assert dtype('p').kind == 'i' assert dtype('P').kind == 'u' - #if self.ptr_size == 4: - # assert dtype('p').name == 'int32' - # assert dtype('P').name == 'uint32' - #else: - # assert dtype('p').name == 'int64' - # assert dtype('P').name == 'uint64' + if self.ptr_size == 4: + assert dtype('p').name == 'int32' + assert dtype('P').name == 'uint32' + else: + assert dtype('p').name == 'int64' + assert dtype('P').name == 'uint64' def test_alignment(self): from numpypy import dtype diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py --- a/pypy/objspace/fake/objspace.py +++ b/pypy/objspace/fake/objspace.py @@ -60,6 +60,8 @@ pass class W_MyType(W_MyObject): + name = "foobar" + def __init__(self): self.mro_w = [w_some_obj(), w_some_obj()] self.dict_w = {'__str__': w_some_obj()} From noreply at buildbot.pypy.org Mon Feb 24 15:09:19 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 24 Feb 2014 15:09:19 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: Better debug prints. Fix next test. Message-ID: <20140224140919.AACAA1C02EA@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r827:f19ec8c06cfd Date: 2014-02-24 15:09 +0100 http://bitbucket.org/pypy/stmgc/changeset/f19ec8c06cfd/ Log: Better debug prints. Fix next test. diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -13,6 +13,7 @@ { assert(_running_transaction()); assert(!_is_in_nursery(obj)); + dprintf(("write_slowpath %p\n", obj)); /* is this an object from the same transaction, outside the nursery? */ if ((obj->stm_flags & -GCFLAG_OVERFLOW_NUMBER_bit0) == diff --git a/c7/stm/list.c b/c7/stm/list.c --- a/c7/stm/list.c +++ b/c7/stm/list.c @@ -11,10 +11,9 @@ { uintptr_t initial_allocation = 32; struct list_s *lst = malloc(LIST_SETSIZE(initial_allocation)); - if (lst == NULL) { - perror("out of memory in list_create"); - abort(); - } + if (lst == NULL) + stm_fatalerror("out of memory in list_create\n"); /* XXX */ + lst->count = 0; lst->last_allocated = initial_allocation - 1; return lst; @@ -24,10 +23,9 @@ { nalloc = LIST_OVERCNT(nalloc); lst = realloc(lst, LIST_SETSIZE(nalloc)); - if (lst == NULL) { - perror("out of memory in _list_grow"); - abort(); - } + if (lst == NULL) + stm_fatalerror("out of memory in _list_grow\n"); /* XXX */ + lst->last_allocated = nalloc - 1; return lst; } diff --git a/c7/stm/pages.c b/c7/stm/pages.c --- a/c7/stm/pages.c +++ b/c7/stm/pages.c @@ -30,6 +30,20 @@ /************************************************************/ +static void d_remap_file_pages(char *addr, size_t size, ssize_t pgoff) +{ + dprintf(("remap_file_pages: 0x%lx bytes: (seg%ld %p) --> (seg%ld %p)\n", + (long)size, + (long)((addr - stm_object_pages) / 4096UL) / NB_PAGES, + (void *)((addr - stm_object_pages) % (4096UL * NB_PAGES)), + (long)pgoff / NB_PAGES, + (void *)((pgoff % NB_PAGES) * 4096UL))); + + int res = remap_file_pages(addr, size, 0, pgoff, 0); + if (UNLIKELY(res < 0)) + stm_fatalerror("remap_file_pages: %m\n"); +} + static void pages_initialize_shared(uintptr_t pagenum, uintptr_t count) { /* call remap_file_pages() to make all pages in the range(pagenum, @@ -39,13 +53,8 @@ assert(_has_mutex_pages()); for (i = 1; i < NB_SEGMENTS; i++) { char *segment_base = get_segment_base(i); - int res = remap_file_pages(segment_base + pagenum * 4096UL, - count * 4096UL, - 0, pagenum, 0); - if (res != 0) { - perror("remap_file_pages"); - abort(); - } + d_remap_file_pages(segment_base + pagenum * 4096UL, + count * 4096UL, pagenum); } for (i = 0; i < count; i++) flag_page_private[pagenum + i] = SHARED_PAGE; @@ -83,11 +92,7 @@ void *localpg = stm_object_pages + localpgoff * 4096UL; void *otherpg = stm_object_pages + otherpgoff * 4096UL; - int res = remap_file_pages(localpg, count * 4096, 0, pgoff2, 0); - if (res < 0) { - perror("remap_file_pages"); - abort(); - } + d_remap_file_pages(localpg, count * 4096, pgoff2); uintptr_t i; if (full) { for (i = 0; i < count; i++) { @@ -117,7 +122,7 @@ for (; pagenum < pagestop; pagenum++) { uint8_t prev = flag_page_private[pagenum]; - if (prev == SHARED_PAGE) { + if (prev == PRIVATE_PAGE) { if (pagenum > page_start_range) { privatize_range(page_start_range, pagenum - page_start_range, full); @@ -125,7 +130,7 @@ page_start_range = pagenum + 1; } else { - assert(prev == PRIVATE_PAGE); + assert(prev == SHARED_PAGE); } } diff --git a/c7/stm/setup.c b/c7/stm/setup.c --- a/c7/stm/setup.c +++ b/c7/stm/setup.c @@ -22,10 +22,8 @@ stm_object_pages = mmap(NULL, TOTAL_MEMORY, PROT_READ | PROT_WRITE, MAP_PAGES_FLAGS, -1, 0); - if (stm_object_pages == MAP_FAILED) { - perror("stm_object_pages mmap"); - abort(); - } + if (stm_object_pages == MAP_FAILED) + stm_fatalerror("initial stm_object_pages mmap() failed: %m\n"); long i; for (i = 0; i < NB_SEGMENTS; i++) { diff --git a/c7/stm/sync.c b/c7/stm/sync.c --- a/c7/stm/sync.c +++ b/c7/stm/sync.c @@ -32,36 +32,29 @@ static void setup_sync(void) { if (pthread_mutex_init(&sync_ctl.global_mutex, NULL) != 0 || - pthread_cond_init(&sync_ctl.global_cond, NULL) != 0) { - perror("mutex/cond initialization"); - abort(); - } + pthread_cond_init(&sync_ctl.global_cond, NULL) != 0) + stm_fatalerror("mutex/cond initialization: %m\n"); } static void teardown_sync(void) { if (pthread_mutex_destroy(&sync_ctl.global_mutex) != 0 || - pthread_cond_destroy(&sync_ctl.global_cond) != 0) { - perror("mutex/cond destroy"); - abort(); - } + pthread_cond_destroy(&sync_ctl.global_cond) != 0) + stm_fatalerror("mutex/cond destroy: %m\n"); + memset(&sync_ctl, 0, sizeof(sync_ctl.in_use)); } static void set_gs_register(char *value) { - if (syscall(SYS_arch_prctl, ARCH_SET_GS, (uint64_t)value) != 0) { - perror("syscall(arch_prctl, ARCH_SET_GS)"); - abort(); - } + if (UNLIKELY(syscall(SYS_arch_prctl, ARCH_SET_GS, (uint64_t)value) != 0)) + stm_fatalerror("syscall(arch_prctl, ARCH_SET_GS): %m\n"); } static inline void mutex_lock(void) { - if (UNLIKELY(pthread_mutex_lock(&sync_ctl.global_mutex) != 0)) { - perror("pthread_mutex_lock"); - abort(); - } + if (UNLIKELY(pthread_mutex_lock(&sync_ctl.global_mutex) != 0)) + stm_fatalerror("pthread_mutex_lock: %m\n"); if (STM_PSEGMENT->transaction_state == TS_MUST_ABORT) abort_with_mutex(); @@ -72,10 +65,8 @@ assert(STM_PSEGMENT->safe_point == SP_NO_TRANSACTION || STM_PSEGMENT->safe_point == SP_RUNNING); - if (UNLIKELY(pthread_mutex_unlock(&sync_ctl.global_mutex) != 0)) { - perror("pthread_mutex_unlock"); - abort(); - } + if (UNLIKELY(pthread_mutex_unlock(&sync_ctl.global_mutex) != 0)) + stm_fatalerror("pthread_mutex_unlock: %m\n"); } static inline bool _has_mutex(void) @@ -97,10 +88,8 @@ #endif if (UNLIKELY(pthread_cond_wait(&sync_ctl.global_cond, - &sync_ctl.global_mutex) != 0)) { - perror("pthread_cond_wait"); - abort(); - } + &sync_ctl.global_mutex) != 0)) + stm_fatalerror("pthread_cond_wait: %m\n"); if (STM_PSEGMENT->transaction_state == TS_MUST_ABORT) abort_with_mutex(); @@ -108,10 +97,8 @@ static inline void cond_broadcast(void) { - if (UNLIKELY(pthread_cond_broadcast(&sync_ctl.global_cond) != 0)) { - perror("pthread_cond_broadcast"); - abort(); - } + if (UNLIKELY(pthread_cond_broadcast(&sync_ctl.global_cond) != 0)) + stm_fatalerror("pthread_cond_broadcast: %m\n"); } static void acquire_thread_segment(stm_thread_local_t *tl) From noreply at buildbot.pypy.org Mon Feb 24 15:10:49 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 24 Feb 2014 15:10:49 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: fix next test Message-ID: <20140224141049.86FA41C02EA@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r828:14fbca47caec Date: 2014-02-24 15:10 +0100 http://bitbucket.org/pypy/stmgc/changeset/14fbca47caec/ Log: fix next test diff --git a/c7/test/test_basic.py b/c7/test/test_basic.py --- a/c7/test/test_basic.py +++ b/c7/test/test_basic.py @@ -86,9 +86,10 @@ # self.switch(1) self.start_transaction() - assert modified_objects() == [] + assert modified_old_objects() == [] stm_write(lp1) - assert modified_objects() == [lp1] + assert modified_old_objects() == [lp1] + assert old_objects_pointing_to_nursery() == None assert stm_get_char(lp1) == 'a' stm_set_char(lp1, 'b') # From noreply at buildbot.pypy.org Mon Feb 24 15:18:56 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 24 Feb 2014 15:18:56 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: Fix tests, next one is real. Message-ID: <20140224141856.78CB21C02EA@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r829:f490a4d837b5 Date: 2014-02-24 15:18 +0100 http://bitbucket.org/pypy/stmgc/changeset/f490a4d837b5/ Log: Fix tests, next one is real. diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -73,6 +73,7 @@ object_t *obj = *pobj; if (obj == NULL) return; + assert((uintptr_t)obj < NB_PAGES * 4096UL); if (!_is_in_nursery(obj)) return; @@ -125,6 +126,7 @@ object_t **current = tl->shadowstack; object_t **base = tl->shadowstack_base; while (current-- != base) { + assert(*current != (object_t *)-1); minor_trace_if_young(current); } } diff --git a/c7/test/test_basic.py b/c7/test/test_basic.py --- a/c7/test/test_basic.py +++ b/c7/test/test_basic.py @@ -104,16 +104,18 @@ py.test.raises(Conflict, self.switch, 0) # detects rw conflict def test_commit_fresh_objects(self): - self.push_root_no_gc() self.start_transaction() lp = stm_allocate(16) stm_set_char(lp, 'u') + self.push_root(lp) self.commit_transaction() + lp = self.pop_root() p1 = stm_get_real_address(lp) self.switch(1) self.start_transaction() + assert stm_get_char(lp) == 'u' stm_write(lp) # privatize page p2 = stm_get_real_address(lp) assert p1 != p2 # we see the other segment, but same object @@ -159,13 +161,16 @@ self.commit_transaction() def test_commit_fresh_objects3(self): - # make objects lpx; then privatize the page by committing changes + # make object lpx; then privatize the page by committing changes # to it; then create lpy in the same page. Check that lpy is # visible from the other thread. self.start_transaction() lpx = stm_allocate(16) stm_set_char(lpx, '.') + self.push_root(lpx) self.commit_transaction() + lpx = self.pop_root() + self.push_root(lpx) self.start_transaction() stm_set_char(lpx, 'X') @@ -174,7 +179,9 @@ self.start_transaction() lpy = stm_allocate(16) stm_set_char(lpy, 'y') + self.push_root(lpy) self.commit_transaction() + lpy = self.pop_root() self.switch(1) self.start_transaction() From noreply at buildbot.pypy.org Mon Feb 24 15:30:53 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 24 Feb 2014 15:30:53 +0100 (CET) Subject: [pypy-commit] pypy remove-remaining-smm: Kill slice SMMs. Message-ID: <20140224143053.EDA4D1C0132@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: remove-remaining-smm Changeset: r69354:069b92af2e4e Date: 2014-02-24 15:12 +0100 http://bitbucket.org/pypy/pypy/changeset/069b92af2e4e/ Log: Kill slice SMMs. diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -16,13 +16,13 @@ interp2app) from pypy.interpreter.generator import GeneratorIterator from pypy.interpreter.signature import Signature -from pypy.objspace.std import slicetype from pypy.objspace.std.bytesobject import W_BytesObject from pypy.objspace.std.floatobject import W_FloatObject from pypy.objspace.std.intobject import W_IntObject from pypy.objspace.std.iterobject import (W_FastListIterObject, W_ReverseSeqIterObject) -from pypy.objspace.std.sliceobject import W_SliceObject, normalize_simple_slice +from pypy.objspace.std.sliceobject import (W_SliceObject, unwrap_start_stop, + normalize_simple_slice) from pypy.objspace.std.stdtypedef import StdTypeDef from pypy.objspace.std.tupleobject import W_AbstractTupleObject from pypy.objspace.std.unicodeobject import W_UnicodeObject @@ -624,8 +624,7 @@ first index of value''' # needs to be safe against eq_w() mutating the w_list behind our back size = self.length() - i, stop = slicetype.unwrap_start_stop( - space, size, w_start, w_stop, True) + i, stop = unwrap_start_stop(space, size, w_start, w_stop, True) try: i = self.find(w_value, i, stop) except ValueError: diff --git a/pypy/objspace/std/model.py b/pypy/objspace/std/model.py --- a/pypy/objspace/std/model.py +++ b/pypy/objspace/std/model.py @@ -33,7 +33,6 @@ class result: from pypy.objspace.std.objecttype import object_typedef from pypy.objspace.std.typeobject import type_typedef - from pypy.objspace.std.slicetype import slice_typedef self.pythontypes = [value for key, value in result.__dict__.items() if not key.startswith('_')] # don't look @@ -82,6 +81,7 @@ self.pythontypes.append(longobject.W_LongObject.typedef) self.pythontypes.append(floatobject.W_FloatObject.typedef) self.pythontypes.append(complexobject.W_ComplexObject.typedef) + self.pythontypes.append(sliceobject.W_SliceObject.typedef) # the set of implementation types self.typeorder = { diff --git a/pypy/objspace/std/sliceobject.py b/pypy/objspace/std/sliceobject.py --- a/pypy/objspace/std/sliceobject.py +++ b/pypy/objspace/std/sliceobject.py @@ -1,13 +1,14 @@ """Slice object""" +from pypy.interpreter import gateway +from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.error import OperationError -from pypy.interpreter import gateway -from pypy.objspace.std.model import registerimplementation, W_Object -from pypy.objspace.std.register_all import register_all -from pypy.objspace.std.slicetype import _eval_slice_index +from pypy.interpreter.typedef import GetSetProperty +from pypy.objspace.std.stdtypedef import StdTypeDef +from rpython.rlib.objectmodel import specialize -class W_SliceObject(W_Object): - from pypy.objspace.std.slicetype import slice_typedef as typedef + +class W_SliceObject(W_Root): _immutable_fields_ = ['w_start', 'w_stop', 'w_step'] def __init__(w_self, w_start, w_stop, w_step): @@ -83,11 +84,156 @@ return "" % ( self.w_start, self.w_stop, self.w_step) -registerimplementation(W_SliceObject) + @staticmethod + def descr__new__(space, w_slicetype, args_w): + from pypy.objspace.std.sliceobject import W_SliceObject + w_start = space.w_None + w_stop = space.w_None + w_step = space.w_None + if len(args_w) == 1: + w_stop, = args_w + elif len(args_w) == 2: + w_start, w_stop = args_w + elif len(args_w) == 3: + w_start, w_stop, w_step = args_w + elif len(args_w) > 3: + raise OperationError(space.w_TypeError, + space.wrap("slice() takes at most 3 arguments")) + else: + raise OperationError(space.w_TypeError, + space.wrap("slice() takes at least 1 argument")) + w_obj = space.allocate_instance(W_SliceObject, w_slicetype) + W_SliceObject.__init__(w_obj, w_start, w_stop, w_step) + return w_obj + def descr_repr(self, space): + return space.wrap("slice(%s, %s, %s)" % ( + space.str_w(space.repr(self.w_start)), + space.str_w(space.repr(self.w_stop)), + space.str_w(space.repr(self.w_step)))) + + def descr__reduce__(self, space): + from pypy.objspace.std.sliceobject import W_SliceObject + assert isinstance(self, W_SliceObject) + return space.newtuple([ + space.type(self), + space.newtuple([self.w_start, self.w_stop, self.w_step])]) + + def descr_eq(self, space, w_other): + # We need this because CPython considers that slice1 == slice1 + # is *always* True (e.g. even if slice1 was built with non-comparable + # parameters + if space.is_w(self, w_other): + return space.w_True + if space.eq_w(self.w_start, w_other.w_start) and \ + space.eq_w(self.w_stop, w_other.w_stop) and \ + space.eq_w(self.w_step, w_other.w_step): + return space.w_True + else: + return space.w_False + + def descr_lt(self, space, w_other): + if space.is_w(self, w_other): + return space.w_False # see comments in descr_eq() + if space.eq_w(self.w_start, w_other.w_start): + if space.eq_w(self.w_stop, w_other.w_stop): + return space.lt(self.w_step, w_other.w_step) + else: + return space.lt(self.w_stop, w_other.w_stop) + else: + return space.lt(self.w_start, w_other.w_start) + + def descr_indices(self, space, w_length): + """S.indices(len) -> (start, stop, stride) + + Assuming a sequence of length len, calculate the start and stop + indices, and the stride length of the extended slice described by + S. Out of bounds indices are clipped in a manner consistent with the + handling of normal slices. + """ + length = space.getindex_w(w_length, space.w_OverflowError) + start, stop, step = self.indices3(space, length) + return space.newtuple([space.wrap(start), space.wrap(stop), + space.wrap(step)]) + + +def slicewprop(name): + def fget(space, w_obj): + from pypy.objspace.std.sliceobject import W_SliceObject + if not isinstance(w_obj, W_SliceObject): + raise OperationError(space.w_TypeError, + space.wrap("descriptor is for 'slice'")) + return getattr(w_obj, name) + return GetSetProperty(fget) + +W_SliceObject.typedef = StdTypeDef("slice", + __doc__ = '''slice([start,] stop[, step]) + +Create a slice object. This is used for extended slicing (e.g. a[0:10:2]).''', + __new__ = gateway.interp2app(W_SliceObject.descr__new__), + __repr__ = gateway.interp2app(W_SliceObject.descr_repr), + __hash__ = None, + __reduce__ = gateway.interp2app(W_SliceObject.descr__reduce__), + + __eq__ = gateway.interp2app(W_SliceObject.descr_eq), + __lt__ = gateway.interp2app(W_SliceObject.descr_lt), + + start = slicewprop('w_start'), + stop = slicewprop('w_stop'), + step = slicewprop('w_step'), + indices = gateway.interp2app(W_SliceObject.descr_indices), +) +W_SliceObject.typedef.acceptable_as_base_class = False + + +# utility functions +def _eval_slice_index(space, w_int): + # note that it is the *callers* responsibility to check for w_None + # otherwise you can get funny error messages + try: + return space.getindex_w(w_int, None) # clamp if long integer too large + except OperationError, err: + if not err.match(space, space.w_TypeError): + raise + raise OperationError(space.w_TypeError, + space.wrap("slice indices must be integers or " + "None or have an __index__ method")) + +def adapt_lower_bound(space, size, w_index): + index = _eval_slice_index(space, w_index) + if index < 0: + index = index + size + if index < 0: + index = 0 + assert index >= 0 + return index + +def adapt_bound(space, size, w_index): + index = adapt_lower_bound(space, size, w_index) + if index > size: + index = size + assert index >= 0 + return index + + at specialize.arg(4) +def unwrap_start_stop(space, size, w_start, w_end, upper_bound=False): + if space.is_none(w_start): + start = 0 + elif upper_bound: + start = adapt_bound(space, size, w_start) + else: + start = adapt_lower_bound(space, size, w_start) + + if space.is_none(w_end): + end = size + elif upper_bound: + end = adapt_bound(space, size, w_end) + else: + end = adapt_lower_bound(space, size, w_end) + return start, end def normalize_simple_slice(space, length, w_start, w_stop): - """Helper for the {get,set,del}slice multimethod implementations.""" + """Helper for the {get,set,del}slice implementations.""" # this returns a pair (start, stop) which is usable for slicing # a sequence of the given length in the most friendly way, i.e. # guaranteeing that 0 <= start <= stop <= length. @@ -103,45 +249,3 @@ if start > length: start = length return start, stop - - -repr__Slice = gateway.applevel(""" - def repr__Slice(aslice): - return 'slice(%r, %r, %r)' % (aslice.start, aslice.stop, aslice.step) -""", filename=__file__).interphook("repr__Slice") - -def eq__Slice_Slice(space, w_slice1, w_slice2): - # We need this because CPython considers that slice1 == slice1 - # is *always* True (e.g. even if slice1 was built with non-comparable - # parameters - if space.is_w(w_slice1, w_slice2): - return space.w_True - if space.eq_w(w_slice1.w_start, w_slice2.w_start) and \ - space.eq_w(w_slice1.w_stop, w_slice2.w_stop) and \ - space.eq_w(w_slice1.w_step, w_slice2.w_step): - return space.w_True - else: - return space.w_False - -def lt__Slice_Slice(space, w_slice1, w_slice2): - if space.is_w(w_slice1, w_slice2): - return space.w_False # see comments in eq__Slice_Slice() - if space.eq_w(w_slice1.w_start, w_slice2.w_start): - if space.eq_w(w_slice1.w_stop, w_slice2.w_stop): - return space.lt(w_slice1.w_step, w_slice2.w_step) - else: - return space.lt(w_slice1.w_stop, w_slice2.w_stop) - else: - return space.lt(w_slice1.w_start, w_slice2.w_start) - -# indices impl - -def slice_indices__Slice_ANY(space, w_slice, w_length): - length = space.getindex_w(w_length, space.w_OverflowError) - start, stop, step = w_slice.indices3(space, length) - return space.newtuple([space.wrap(start), space.wrap(stop), - space.wrap(step)]) - -# register all methods -from pypy.objspace.std import slicetype -register_all(vars(), slicetype) diff --git a/pypy/objspace/std/slicetype.py b/pypy/objspace/std/slicetype.py deleted file mode 100644 --- a/pypy/objspace/std/slicetype.py +++ /dev/null @@ -1,122 +0,0 @@ -from pypy.interpreter import baseobjspace, gateway -from pypy.interpreter.typedef import GetSetProperty -from pypy.objspace.std.stdtypedef import StdTypeDef, SMM -from pypy.objspace.std.register_all import register_all -from pypy.interpreter.error import OperationError -from rpython.rlib.objectmodel import specialize - -# indices multimehtod -slice_indices = SMM('indices', 2, - doc='S.indices(len) -> (start, stop, stride)\n\nAssuming a' - ' sequence of length len, calculate the start and' - ' stop\nindices, and the stride length of the extended' - ' slice described by\nS. Out of bounds indices are' - ' clipped in a manner consistent with the\nhandling of' - ' normal slices.') - -# utility functions -def _eval_slice_index(space, w_int): - # note that it is the *callers* responsibility to check for w_None - # otherwise you can get funny error messages - try: - return space.getindex_w(w_int, None) # clamp if long integer too large - except OperationError, err: - if not err.match(space, space.w_TypeError): - raise - raise OperationError(space.w_TypeError, - space.wrap("slice indices must be integers or " - "None or have an __index__ method")) - -def adapt_lower_bound(space, size, w_index): - index = _eval_slice_index(space, w_index) - if index < 0: - index = index + size - if index < 0: - index = 0 - assert index >= 0 - return index - -def adapt_bound(space, size, w_index): - index = adapt_lower_bound(space, size, w_index) - if index > size: - index = size - assert index >= 0 - return index - - at specialize.arg(4) -def unwrap_start_stop(space, size, w_start, w_end, upper_bound=False): - if space.is_none(w_start): - start = 0 - elif upper_bound: - start = adapt_bound(space, size, w_start) - else: - start = adapt_lower_bound(space, size, w_start) - - if space.is_none(w_end): - end = size - elif upper_bound: - end = adapt_bound(space, size, w_end) - else: - end = adapt_lower_bound(space, size, w_end) - return start, end - -register_all(vars(), globals()) - -# ____________________________________________________________ - -def descr__new__(space, w_slicetype, args_w): - from pypy.objspace.std.sliceobject import W_SliceObject - w_start = space.w_None - w_stop = space.w_None - w_step = space.w_None - if len(args_w) == 1: - w_stop, = args_w - elif len(args_w) == 2: - w_start, w_stop = args_w - elif len(args_w) == 3: - w_start, w_stop, w_step = args_w - elif len(args_w) > 3: - raise OperationError(space.w_TypeError, - space.wrap("slice() takes at most 3 arguments")) - else: - raise OperationError(space.w_TypeError, - space.wrap("slice() takes at least 1 argument")) - w_obj = space.allocate_instance(W_SliceObject, w_slicetype) - W_SliceObject.__init__(w_obj, w_start, w_stop, w_step) - return w_obj - -def descr__reduce__(space, w_self): - from pypy.objspace.std.sliceobject import W_SliceObject - assert isinstance(w_self, W_SliceObject) - return space.newtuple([ - space.type(w_self), - space.newtuple([w_self.w_start, - w_self.w_stop, - w_self.w_step]), - ]) - -# ____________________________________________________________ - -def slicewprop(name): - def fget(space, w_obj): - from pypy.objspace.std.sliceobject import W_SliceObject - if not isinstance(w_obj, W_SliceObject): - raise OperationError(space.w_TypeError, - space.wrap("descriptor is for 'slice'")) - return getattr(w_obj, name) - return GetSetProperty(fget) - - -slice_typedef = StdTypeDef("slice", - __doc__ = '''slice([start,] stop[, step]) - -Create a slice object. This is used for extended slicing (e.g. a[0:10:2]).''', - __new__ = gateway.interp2app(descr__new__), - __hash__ = None, - __reduce__ = gateway.interp2app(descr__reduce__), - start = slicewprop('w_start'), - stop = slicewprop('w_stop'), - step = slicewprop('w_step'), - ) -slice_typedef.acceptable_as_base_class = False -slice_typedef.registermethods(globals()) diff --git a/pypy/objspace/std/stringmethods.py b/pypy/objspace/std/stringmethods.py --- a/pypy/objspace/std/stringmethods.py +++ b/pypy/objspace/std/stringmethods.py @@ -7,8 +7,8 @@ from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import WrappedDefault, unwrap_spec -from pypy.objspace.std import slicetype -from pypy.objspace.std.sliceobject import W_SliceObject, normalize_simple_slice +from pypy.objspace.std.sliceobject import (W_SliceObject, unwrap_start_stop, + normalize_simple_slice) class StringMethods(object): @@ -24,8 +24,8 @@ def _convert_idx_params(self, space, w_start, w_end, upper_bound=False): value = self._val(space) lenself = len(value) - start, end = slicetype.unwrap_start_stop( - space, lenself, w_start, w_end, upper_bound=upper_bound) + start, end = unwrap_start_stop(space, lenself, w_start, w_end, + upper_bound=upper_bound) return (value, start, end) def descr_len(self, space): diff --git a/pypy/objspace/std/tupleobject.py b/pypy/objspace/std/tupleobject.py --- a/pypy/objspace/std/tupleobject.py +++ b/pypy/objspace/std/tupleobject.py @@ -6,8 +6,8 @@ from pypy.interpreter.error import OperationError from pypy.interpreter.gateway import ( WrappedDefault, interp2app, interpindirect2app, unwrap_spec) -from pypy.objspace.std import slicetype -from pypy.objspace.std.sliceobject import W_SliceObject, normalize_simple_slice +from pypy.objspace.std.sliceobject import (W_SliceObject, unwrap_start_stop, + normalize_simple_slice) from pypy.objspace.std.stdtypedef import StdTypeDef from pypy.objspace.std.util import negate from rpython.rlib import jit @@ -202,8 +202,7 @@ tuple """ length = self.length() - start, stop = slicetype.unwrap_start_stop(space, length, w_start, - w_stop) + start, stop = unwrap_start_stop(space, length, w_start, w_stop) for i in range(start, min(stop, length)): w_item = self.tolist()[i] if space.eq_w(w_item, w_obj): From noreply at buildbot.pypy.org Mon Feb 24 15:30:55 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 24 Feb 2014 15:30:55 +0100 (CET) Subject: [pypy-commit] pypy remove-remaining-smm: Integer unwrapping should be done with space.int_w(w_obj) instead of w_obj.intval, same for float. Thanks cfbolz for noticing. Message-ID: <20140224143055.19AA11C0132@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: remove-remaining-smm Changeset: r69355:5f0f8311bd7e Date: 2014-02-24 15:30 +0100 http://bitbucket.org/pypy/pypy/changeset/5f0f8311bd7e/ Log: Integer unwrapping should be done with space.int_w(w_obj) instead of w_obj.intval, same for float. Thanks cfbolz for noticing. diff --git a/pypy/objspace/std/complexobject.py b/pypy/objspace/std/complexobject.py --- a/pypy/objspace/std/complexobject.py +++ b/pypy/objspace/std/complexobject.py @@ -278,12 +278,12 @@ if isinstance(w_obj, W_ComplexObject): return w_obj if space.isinstance_w(w_obj, space.w_int): - return W_ComplexObject(w_obj.intval, 0.0) + return W_ComplexObject(space.int_w(w_obj), 0.0) if space.isinstance_w(w_obj, space.w_long): dval = w_obj.tofloat(space) return W_ComplexObject(dval, 0.0) if space.isinstance_w(w_obj, space.w_float): - return W_ComplexObject(w_obj.floatval, 0.0) + return W_ComplexObject(space.float_w(w_obj), 0.0) @staticmethod @unwrap_spec(w_real=WrappedDefault(0.0)) diff --git a/pypy/objspace/std/floatobject.py b/pypy/objspace/std/floatobject.py --- a/pypy/objspace/std/floatobject.py +++ b/pypy/objspace/std/floatobject.py @@ -104,7 +104,7 @@ return space.newbool(op(self.floatval, w_other.floatval)) if space.isinstance_w(w_other, space.w_int): f1 = self.floatval - i2 = w_other.intval + i2 = space.int_w(w_other) f2 = float(i2) if LONG_BIT > 32 and int(f2) != i2: res = do_compare_bigint(f1, rbigint.fromint(i2)) @@ -350,7 +350,7 @@ if isinstance(w_obj, W_FloatObject): return w_obj if space.isinstance_w(w_obj, space.w_int): - return W_FloatObject(float(w_obj.intval)) + return W_FloatObject(float(space.int_w(w_obj))) if space.isinstance_w(w_obj, space.w_long): return W_FloatObject(w_obj.tofloat(space)) From noreply at buildbot.pypy.org Mon Feb 24 16:17:13 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 24 Feb 2014 16:17:13 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: synchronize_overflow_object_now(). Message-ID: <20140224151713.D78331C02EA@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r830:f661c584decd Date: 2014-02-24 16:17 +0100 http://bitbucket.org/pypy/stmgc/changeset/f661c584decd/ Log: synchronize_overflow_object_now(). diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -171,6 +171,7 @@ assert(list_is_empty(STM_PSEGMENT->modified_old_objects)); assert(STM_PSEGMENT->old_objects_pointing_to_nursery == NULL); assert(STM_PSEGMENT->overflow_objects_pointing_to_nursery == NULL); + assert(STM_PSEGMENT->large_overflow_objects == NULL); #ifdef STM_TESTS check_nursery_at_transaction_start(); @@ -214,6 +215,60 @@ })); } +static void synchronize_overflow_object_now(object_t *obj) +{ + assert(!_is_in_nursery(obj)); + assert((obj->stm_flags & GCFLAG_SMALL_UNIFORM) == 0); + + char *realobj = REAL_ADDRESS(STM_SEGMENT->segment_base, obj); + ssize_t obj_size = stmcb_size_rounded_up((struct object_s *)realobj); + uintptr_t start = (uintptr_t)obj; + uintptr_t end = start + obj_size; + uintptr_t first_page = start / 4096UL; + uintptr_t last_page = (end - 1) / 4096UL; + + do { + if (flag_page_private[first_page] != SHARED_PAGE) { + /* The page is a PRIVATE_PAGE. We need to diffuse this fragment + of our object from our own segment to all other segments. */ + + uintptr_t copy_size; + if (first_page == last_page) { + /* this is the final fragment */ + copy_size = end - start; + } + else { + /* this is a non-final fragment, going up to the page's end */ + copy_size = 4096 - (start & 4095); + } + + /* double-check that the result fits in one page */ + assert(copy_size > 0); + assert(copy_size + (start & 4095) <= 4096); + + long i; + char *src = REAL_ADDRESS(STM_SEGMENT->segment_base, start); + for (i = 0; i < NB_SEGMENTS; i++) { + if (i != STM_SEGMENT->segment_num) { + char *dst = REAL_ADDRESS(get_segment_base(i), start); + memcpy(dst, src, copy_size); + } + } + } + + start = (start + 4096) & ~4095; + } while (first_page++ < last_page); +} + +static void push_overflow_objects_from_privatized_pages(void) +{ + if (STM_PSEGMENT->large_overflow_objects == NULL) + return; + + LIST_FOREACH_R(STM_PSEGMENT->large_overflow_objects, object_t *, + synchronize_overflow_object_now(item)); +} + static void push_modified_to_other_segments(void) { long remote_num = 1 - STM_SEGMENT->segment_num; @@ -261,6 +316,7 @@ /* reset these lists to NULL for the next transaction */ LIST_FREE(STM_PSEGMENT->old_objects_pointing_to_nursery); LIST_FREE(STM_PSEGMENT->overflow_objects_pointing_to_nursery); + LIST_FREE(STM_PSEGMENT->large_overflow_objects); stm_thread_local_t *tl = STM_SEGMENT->running_thread; release_thread_segment(tl); @@ -292,7 +348,10 @@ assert(STM_PSEGMENT->transaction_state != TS_MUST_ABORT); STM_SEGMENT->jmpbuf_ptr = NULL; - /* copy modified object versions to other threads */ + /* synchronize overflow objects living in privatized pages */ + push_overflow_objects_from_privatized_pages(); + + /* synchronize modified old objects to other threads */ push_modified_to_other_segments(); /* update 'overflow_number' if needed */ diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -82,6 +82,10 @@ collection so far. */ struct list_s *overflow_objects_pointing_to_nursery; + /* List of all large, overflowed objects. Only non-NULL after the + current transaction spanned a minor collection. */ + struct list_s *large_overflow_objects; + /* Start time: to know approximately for how long a transaction has been running, in contention management */ uint64_t start_time; @@ -108,6 +112,9 @@ only accessed when we hold the mutex. */ uint8_t transaction_state; + /* Temp for minor collection */ + bool minor_collect_will_commit_now; + /* In case of abort, we restore the 'shadowstack' field. */ object_t **shadowstack_at_start_of_transaction; }; @@ -190,3 +197,5 @@ assert(!"commit: bad transaction_state"); } } + +static void synchronize_overflow_object_now(object_t *obj); diff --git a/c7/stm/list.h b/c7/stm/list.h --- a/c7/stm/list.h +++ b/c7/stm/list.h @@ -40,7 +40,7 @@ return (lst->count == 0); } -static inline bool list_count(struct list_s *lst) +static inline uintptr_t list_count(struct list_s *lst) { return lst->count; } diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -53,20 +53,6 @@ #define GCWORD_MOVED ((object_t *) -42) -static inline void minor_copy_in_page_to_other_segments(uintptr_t p, - size_t size) -{ - uintptr_t dataofs = (char *)p - stm_object_pages; - assert((dataofs & 4095) + size <= 4096); /* fits in one page */ - - if (flag_page_private[dataofs / 4096UL] != SHARED_PAGE) { - long i; - for (i = 1; i < NB_SEGMENTS; i++) { - memcpy(get_segment_base(i) + dataofs, (char *)p, size); - } - } -} - static void minor_trace_if_young(object_t **pobj) { /* takes a normal pointer to a thread-local pointer to an object */ @@ -104,6 +90,11 @@ /* Copy the object */ char *realnobj = REAL_ADDRESS(STM_SEGMENT->segment_base, nobj); memcpy(realnobj, realobj, size); + + if (STM_PSEGMENT->minor_collect_will_commit_now) + synchronize_overflow_object_now(nobj); + else + LIST_APPEND(STM_PSEGMENT->large_overflow_objects, nobj); } else { /* case "small enough" */ @@ -181,8 +172,11 @@ dprintf(("minor_collection commit=%d\n", (int)commit)); + STM_PSEGMENT->minor_collect_will_commit_now = commit; if (STM_PSEGMENT->old_objects_pointing_to_nursery == NULL) STM_PSEGMENT->old_objects_pointing_to_nursery = list_create(); + if (!commit && STM_PSEGMENT->large_overflow_objects == NULL) + STM_PSEGMENT->large_overflow_objects = list_create(); collect_roots_in_nursery(); diff --git a/c7/stm/pages.c b/c7/stm/pages.c --- a/c7/stm/pages.c +++ b/c7/stm/pages.c @@ -92,6 +92,7 @@ void *localpg = stm_object_pages + localpgoff * 4096UL; void *otherpg = stm_object_pages + otherpgoff * 4096UL; + memset(flag_page_private + pagenum, PRIVATE_PAGE, count); d_remap_file_pages(localpg, count * 4096, pgoff2); uintptr_t i; if (full) { @@ -104,8 +105,6 @@ if (count > 1) pagecopy(localpg + 4096 * (count-1), otherpg + 4096 * (count-1)); } - write_fence(); - memset(flag_page_private + pagenum, PRIVATE_PAGE, count); } static void _pages_privatize(uintptr_t pagenum, uintptr_t count, bool full) @@ -143,7 +142,7 @@ } #if 0 -static bool is_in_shared_pages(object_t *obj) +static bool is_fully_in_shared_pages(object_t *obj) { uintptr_t first_page = ((uintptr_t)obj) / 4096UL; @@ -155,9 +154,11 @@ uintptr_t last_page = (((uintptr_t)obj) + obj_size - 1) / 4096UL; - while (first_page <= last_page) + do { if (flag_page_private[first_page++] != SHARED_PAGE) return false; + } while (first_page <= last_page); + return true; } #endif diff --git a/c7/stm/pages.h b/c7/stm/pages.h --- a/c7/stm/pages.h +++ b/c7/stm/pages.h @@ -7,10 +7,9 @@ physical page (the one that is within the segment 0 mmap address). */ SHARED_PAGE, - /* Page being in the process of privatization */ - REMAPPING_PAGE, - - /* Page is private for each segment. */ + /* Page is private for each segment. If we obtain this value outside + a mutex_pages_lock(), there might be a race: the value can say + PRIVATE_PAGE before the page is really un-shared. */ PRIVATE_PAGE, }; @@ -33,4 +32,4 @@ static void mutex_pages_lock(void); static void mutex_pages_unlock(void); -//static bool is_in_shared_pages(object_t *obj); +//static bool is_fully_in_shared_pages(object_t *obj); -- not needed? diff --git a/c7/stm/setup.c b/c7/stm/setup.c --- a/c7/stm/setup.c +++ b/c7/stm/setup.c @@ -54,6 +54,7 @@ pr->pub.segment_num = i; pr->pub.segment_base = segment_base; pr->overflow_objects_pointing_to_nursery = NULL; + pr->large_overflow_objects = NULL; pr->modified_old_objects = list_create(); pr->overflow_number = GCFLAG_OVERFLOW_NUMBER_bit0 * (i + 1); highest_overflow_number = pr->overflow_number; @@ -85,6 +86,7 @@ for (i = 0; i < NB_SEGMENTS; i++) { struct stm_priv_segment_info_s *pr = get_priv_segment(i); assert(pr->overflow_objects_pointing_to_nursery == NULL); + assert(pr->large_overflow_objects == NULL); assert(pr->old_objects_pointing_to_nursery == NULL); list_free(pr->modified_old_objects); } diff --git a/c7/test/test_basic.py b/c7/test/test_basic.py --- a/c7/test/test_basic.py +++ b/c7/test/test_basic.py @@ -180,6 +180,7 @@ lpy = stm_allocate(16) stm_set_char(lpy, 'y') self.push_root(lpy) + assert modified_old_objects() == [] self.commit_transaction() lpy = self.pop_root() From noreply at buildbot.pypy.org Mon Feb 24 16:18:34 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 24 Feb 2014 16:18:34 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: test_basic passes again Message-ID: <20140224151834.590731C08B9@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r831:e6165529e9fb Date: 2014-02-24 16:18 +0100 http://bitbucket.org/pypy/stmgc/changeset/e6165529e9fb/ Log: test_basic passes again diff --git a/c7/test/test_basic.py b/c7/test/test_basic.py --- a/c7/test/test_basic.py +++ b/c7/test/test_basic.py @@ -395,7 +395,10 @@ lp1 = stm_allocate(4104) stm_set_char(lp1, '0') stm_set_char(lp1, '1', offset=4103) + self.push_root(lp1) self.commit_transaction() + lp1 = self.pop_root() + self.push_root(lp1) # self.start_transaction() stm_set_char(lp1, 'a') From noreply at buildbot.pypy.org Mon Feb 24 16:25:03 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 24 Feb 2014 16:25:03 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: Update demo2.c, but it still fails probably because of a core bug Message-ID: <20140224152503.90D8C1C08B9@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r832:894bb43b4c0a Date: 2014-02-24 16:24 +0100 http://bitbucket.org/pypy/stmgc/changeset/894bb43b4c0a/ Log: Update demo2.c, but it still fails probably because of a core bug diff --git a/c7/demo/demo2.c b/c7/demo/demo2.c --- a/c7/demo/demo2.c +++ b/c7/demo/demo2.c @@ -158,11 +158,15 @@ w_prev = w_newnode; } - //_stm_minor_collect(); /* hack.. */ - //POP_ROOT(global_chained_list); --- remains in the shadowstack + POP_ROOT(global_chained_list); /* update value */ + assert(global_chained_list->value == -1); + PUSH_ROOT(global_chained_list); stm_commit_transaction(); + POP_ROOT(global_chained_list); /* update value */ + assert(global_chained_list->value == -1); + PUSH_ROOT(global_chained_list); /* remains forever in the shadow stack */ printf("setup ok\n"); } @@ -175,11 +179,13 @@ { int status; stm_register_thread_local(&stm_thread_local); + PUSH_ROOT(global_chained_list); /* remains forever in the shadow stack */ while (check_sorted() == -1) { bubble_run(); } + POP_ROOT(global_chained_list); assert(stm_thread_local.shadowstack == stm_thread_local.shadowstack_base); stm_unregister_thread_local(&stm_thread_local); status = sem_post(&done); assert(status == 0); From noreply at buildbot.pypy.org Mon Feb 24 16:40:17 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 24 Feb 2014 16:40:17 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: Passing test Message-ID: <20140224154017.CF5E01C0132@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r833:5434c22f9999 Date: 2014-02-24 16:40 +0100 http://bitbucket.org/pypy/stmgc/changeset/5434c22f9999/ Log: Passing test diff --git a/c7/demo/demo2.c b/c7/demo/demo2.c --- a/c7/demo/demo2.c +++ b/c7/demo/demo2.c @@ -164,9 +164,11 @@ stm_commit_transaction(); + stm_start_inevitable_transaction(&stm_thread_local); POP_ROOT(global_chained_list); /* update value */ assert(global_chained_list->value == -1); PUSH_ROOT(global_chained_list); /* remains forever in the shadow stack */ + stm_commit_transaction(); printf("setup ok\n"); } diff --git a/c7/stm/misc.c b/c7/stm/misc.c --- a/c7/stm/misc.c +++ b/c7/stm/misc.c @@ -13,15 +13,9 @@ return REAL_ADDRESS(STM_SEGMENT->segment_base, o); } -object_t *_stm_segment_address(char *ptr) +char *_stm_get_segment_base(long index) { - if (ptr == NULL) - return NULL; - - uintptr_t res = ptr - STM_SEGMENT->segment_base; - assert(FIRST_OBJECT_PAGE * 4096UL <= res - && res < NB_PAGES * 4096UL); - return (object_t*)res; + return get_segment_base(index); } struct stm_priv_segment_info_s *_stm_segment(void) diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -74,7 +74,7 @@ bool _stm_in_nursery(object_t *obj); bool _stm_in_transaction(stm_thread_local_t *tl); char *_stm_real_address(object_t *o); -object_t *_stm_segment_address(char *ptr); +char *_stm_get_segment_base(long index); void _stm_test_switch(stm_thread_local_t *tl); object_t *_stm_allocate_old(ssize_t size_rounded_up); void _stm_large_dump(void); diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -55,7 +55,7 @@ bool _stm_was_written(object_t *obj); bool _stm_in_nursery(object_t *obj); char *_stm_real_address(object_t *obj); -object_t *_stm_segment_address(char *ptr); +char *_stm_get_segment_base(long index); bool _stm_in_transaction(stm_thread_local_t *tl); void _stm_test_switch(stm_thread_local_t *tl); @@ -259,11 +259,6 @@ HDR = lib.SIZEOF_MYOBJ assert HDR == 8 -# from nursery.c -SOME_MEDIUM_SIZE = 32*1024 - 48 -SOME_LARGE_SIZE = 100*1024 - 48 -NURSERY_SECTION_SIZE = 128*1024 - class Conflict(Exception): pass @@ -317,9 +312,6 @@ def stm_get_real_address(obj): return lib._stm_real_address(ffi.cast('object_t*', obj)) -def stm_get_segment_address(ptr): - return int(ffi.cast('uintptr_t', lib._stm_segment_address(ptr))) - def stm_read(o): lib.stm_read(o) @@ -478,3 +470,9 @@ def push_root_no_gc(self): "Pushes an invalid object, to crash in case the GC is called" self.push_root(ffi.cast("object_t *", -1)) + + def check_char_everywhere(self, obj, expected_content, offset=HDR): + for i in range(len(self.tls)): + addr = lib._stm_get_segment_base(i) + content = addr[int(ffi.cast("uintptr_t", obj)) + offset] + assert content == expected_content diff --git a/c7/test/test_basic.py b/c7/test/test_basic.py --- a/c7/test/test_basic.py +++ b/c7/test/test_basic.py @@ -425,6 +425,15 @@ self.abort_transaction() py.test.raises(EmptyStack, self.pop_root) + def test_check_content_after_commit(self): + self.start_transaction() + lp1 = stm_allocate(16) + stm_set_char(lp1, 'X') + self.push_root(lp1) + self.commit_transaction() + lp1 = self.pop_root() + self.check_char_everywhere(lp1, 'X') + # def test_resolve_write_write_no_conflict(self): # self.start_transaction() # p1 = stm_allocate(16) From noreply at buildbot.pypy.org Mon Feb 24 16:42:58 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Mon, 24 Feb 2014 16:42:58 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: no MEDIUM_SIZE anymore Message-ID: <20140224154258.BDA111C03FC@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7-refactor Changeset: r834:b19e2928004f Date: 2014-02-24 16:43 +0100 http://bitbucket.org/pypy/stmgc/changeset/b19e2928004f/ Log: no MEDIUM_SIZE anymore diff --git a/c7/test/test_random.py b/c7/test/test_random.py --- a/c7/test/test_random.py +++ b/c7/test/test_random.py @@ -350,7 +350,8 @@ def op_allocate(ex, global_state, thread_state): size = global_state.rnd.choice([ "16", - "SOME_MEDIUM_SIZE+16", + str(4096+16), + #"SOME_MEDIUM_SIZE+16", #"SOME_LARGE_SIZE+16", ]) r = global_state.get_new_root_name(False, size) From noreply at buildbot.pypy.org Mon Feb 24 16:46:53 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Mon, 24 Feb 2014 16:46:53 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: fix Message-ID: <20140224154653.46BF41C03FC@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7-refactor Changeset: r835:bc9b7085aa1b Date: 2014-02-24 16:45 +0100 http://bitbucket.org/pypy/stmgc/changeset/bc9b7085aa1b/ Log: fix diff --git a/c7/test/test_random.py b/c7/test/test_random.py --- a/c7/test/test_random.py +++ b/c7/test/test_random.py @@ -506,7 +506,7 @@ "self.switch", new_thread_state.num)) if conflicts: new_thread_state.abort_transaction() - else: + elif trs: new_thread_state.pop_roots(ex) new_thread_state.reload_roots(ex) From noreply at buildbot.pypy.org Mon Feb 24 16:49:40 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 24 Feb 2014 16:49:40 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: Add the shadowstack Message-ID: <20140224154940.D30E21C0132@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r836:62c2f89f90b9 Date: 2014-02-24 16:49 +0100 http://bitbucket.org/pypy/stmgc/changeset/62c2f89f90b9/ Log: Add the shadowstack diff --git a/c7/demo/demo2.c b/c7/demo/demo2.c --- a/c7/demo/demo2.c +++ b/c7/demo/demo2.c @@ -21,8 +21,23 @@ __thread stm_thread_local_t stm_thread_local; -#define PUSH_ROOT(p) (void)0 // XXX... -#define POP_ROOT(p) (void)0 // XXX... +#define PUSH_ROOT(p) (*stm_thread_local.shadowstack++ = (object_t *)(p)) +#define POP_ROOT(p) ((p) = (typeof(p))*--stm_thread_local.shadowstack) + +void init_shadow_stack(void) +{ + object_t **s = (object_t **)malloc(1000 * sizeof(object_t *)); + assert(s); + stm_thread_local.shadowstack = s; + stm_thread_local.shadowstack_base = s; +} + +void done_shadow_stack(void) +{ + free(stm_thread_local.shadowstack); + stm_thread_local.shadowstack = NULL; + stm_thread_local.shadowstack_base = NULL; +} ssize_t stmcb_size_rounded_up(struct object_s *ob) @@ -181,6 +196,7 @@ { int status; stm_register_thread_local(&stm_thread_local); + init_shadow_stack(); PUSH_ROOT(global_chained_list); /* remains forever in the shadow stack */ while (check_sorted() == -1) { @@ -189,6 +205,7 @@ POP_ROOT(global_chained_list); assert(stm_thread_local.shadowstack == stm_thread_local.shadowstack_base); + done_shadow_stack(); stm_unregister_thread_local(&stm_thread_local); status = sem_post(&done); assert(status == 0); return NULL; @@ -230,6 +247,7 @@ stm_setup(); stm_register_thread_local(&stm_thread_local); + init_shadow_stack(); setup_list(); @@ -241,6 +259,7 @@ final_check(); + done_shadow_stack(); stm_unregister_thread_local(&stm_thread_local); stm_teardown(); From noreply at buildbot.pypy.org Mon Feb 24 17:12:08 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 24 Feb 2014 17:12:08 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: Forgot to set 'overflow_number' when doing a non-commit-time nursery collection Message-ID: <20140224161208.4BB331C0132@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r837:59bc4d2dd305 Date: 2014-02-24 17:11 +0100 http://bitbucket.org/pypy/stmgc/changeset/59bc4d2dd305/ Log: Forgot to set 'overflow_number' when doing a non-commit-time nursery collection diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -101,6 +101,11 @@ abort(); //... } + assert((nobj->stm_flags & -GCFLAG_OVERFLOW_NUMBER_bit0) == 0); + if (!STM_PSEGMENT->minor_collect_will_commit_now) { + nobj->stm_flags |= STM_PSEGMENT->overflow_number; + } + /* Done copying the object. */ //dprintf(("%p -> %p\n", obj, nobj)); pforwarded_array[0] = GCWORD_MOVED; From noreply at buildbot.pypy.org Mon Feb 24 17:12:09 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 24 Feb 2014 17:12:09 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: Fix _has_mutex() to actually work if there are several threads running Message-ID: <20140224161209.5412E1C0132@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r838:707536c3c87f Date: 2014-02-24 17:11 +0100 http://bitbucket.org/pypy/stmgc/changeset/707536c3c87f/ Log: Fix _has_mutex() to actually work if there are several threads running diff --git a/c7/stm/sync.c b/c7/stm/sync.c --- a/c7/stm/sync.c +++ b/c7/stm/sync.c @@ -45,6 +45,14 @@ memset(&sync_ctl, 0, sizeof(sync_ctl.in_use)); } +#ifndef NDEBUG +__thread bool _has_mutex_here; +static inline bool _has_mutex(void) +{ + return _has_mutex_here; +} +#endif + static void set_gs_register(char *value) { if (UNLIKELY(syscall(SYS_arch_prctl, ARCH_SET_GS, (uint64_t)value) != 0)) @@ -53,8 +61,10 @@ static inline void mutex_lock(void) { + assert(!_has_mutex_here); if (UNLIKELY(pthread_mutex_lock(&sync_ctl.global_mutex) != 0)) stm_fatalerror("pthread_mutex_lock: %m\n"); + assert((_has_mutex_here = true, 1)); if (STM_PSEGMENT->transaction_state == TS_MUST_ABORT) abort_with_mutex(); @@ -65,19 +75,10 @@ assert(STM_PSEGMENT->safe_point == SP_NO_TRANSACTION || STM_PSEGMENT->safe_point == SP_RUNNING); + assert(_has_mutex_here); if (UNLIKELY(pthread_mutex_unlock(&sync_ctl.global_mutex) != 0)) stm_fatalerror("pthread_mutex_unlock: %m\n"); -} - -static inline bool _has_mutex(void) -{ - if (pthread_mutex_trylock(&sync_ctl.global_mutex) == EBUSY) { - return true; - } - else { - pthread_mutex_unlock(&sync_ctl.global_mutex); - return false; - } + assert((_has_mutex_here = false, 1)); } static inline void cond_wait(void) @@ -87,6 +88,7 @@ abort(); #endif + assert(_has_mutex_here); if (UNLIKELY(pthread_cond_wait(&sync_ctl.global_cond, &sync_ctl.global_mutex) != 0)) stm_fatalerror("pthread_cond_wait: %m\n"); diff --git a/c7/stm/sync.h b/c7/stm/sync.h --- a/c7/stm/sync.h +++ b/c7/stm/sync.h @@ -8,7 +8,9 @@ static void mutex_unlock(void); static void cond_wait(void); static void cond_broadcast(void); +#ifndef NDEBUG static bool _has_mutex(void); +#endif /* acquire and release one of the segments for running the given thread (must have the mutex acquired!) */ From noreply at buildbot.pypy.org Mon Feb 24 17:14:41 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 24 Feb 2014 17:14:41 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: Fix non-debug compilation Message-ID: <20140224161441.C07AB1C02EA@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r839:23079fb72f0c Date: 2014-02-24 17:14 +0100 http://bitbucket.org/pypy/stmgc/changeset/23079fb72f0c/ Log: Fix non-debug compilation diff --git a/c7/stm/pages.c b/c7/stm/pages.c --- a/c7/stm/pages.c +++ b/c7/stm/pages.c @@ -22,6 +22,7 @@ __sync_lock_release(&pages_ctl.mutex_pages); } +static bool _has_mutex_pages(void) __attribute__((unused)); static bool _has_mutex_pages(void) { return pages_ctl.mutex_pages != 0; From noreply at buildbot.pypy.org Mon Feb 24 17:59:13 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 24 Feb 2014 17:59:13 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: fix the test that now passes. Message-ID: <20140224165913.0865A1C0132@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r842:568d7210f985 Date: 2014-02-24 17:58 +0100 http://bitbucket.org/pypy/stmgc/changeset/568d7210f985/ Log: fix the test that now passes. diff --git a/c7/test/test_nursery.py b/c7/test/test_nursery.py --- a/c7/test/test_nursery.py +++ b/c7/test/test_nursery.py @@ -55,26 +55,21 @@ stm_set_ref(prev, 0, lp3) assert modified_old_objects() == [] # only 1 transaction - ovf_o = overflow_objects_pointing_to_nursery() - old_o = old_objects_pointing_to_nursery() + opn = objects_pointing_to_nursery() if i < FIT: - assert ovf_o is None # no minor collection so far - assert old_o is None # no minor collection so far + assert opn is None # no minor collection so far else: - assert len(ovf_o) == 1 - assert old_o == [] + assert len(opn) == 1 prevprev = prev prev = lp3 lp1 = self.pop_root() - assert modified_objects() == [] + assert modified_old_objects() == [] lp2 = lp1 for i in range(N): assert lp2 - assert stm_creation_marker(lp2) == (0xff if is_in_nursery(lp2) - else 0x01) prev = lp2 lp2 = stm_get_ref(lp2, 0) assert lp2 == lp3 From noreply at buildbot.pypy.org Mon Feb 24 17:59:12 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 24 Feb 2014 17:59:12 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: Unify overflow_objects_pointing_to_nursery and Message-ID: <20140224165912.0123E1C0132@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r841:2286fb4f0127 Date: 2014-02-24 17:57 +0100 http://bitbucket.org/pypy/stmgc/changeset/2286fb4f0127/ Log: Unify overflow_objects_pointing_to_nursery and old_objects_pointing_to_nursery: unless I'm wrong, we can merge the two lists. diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -18,10 +18,10 @@ if ((obj->stm_flags & -GCFLAG_OVERFLOW_NUMBER_bit0) == STM_PSEGMENT->overflow_number) { - dprintf_test(("write_slowpath %p -> ovf_obj\n", obj)); + dprintf_test(("write_slowpath %p -> ovf obj_to_nurs\n", obj)); obj->stm_flags &= ~GCFLAG_WRITE_BARRIER; - assert(STM_PSEGMENT->overflow_objects_pointing_to_nursery != NULL); - LIST_APPEND(STM_PSEGMENT->overflow_objects_pointing_to_nursery, obj); + assert(STM_PSEGMENT->objects_pointing_to_nursery != NULL); + LIST_APPEND(STM_PSEGMENT->objects_pointing_to_nursery, obj); return; } @@ -32,7 +32,7 @@ /* claim the write-lock for this object. In case we're running the same transaction since a long while, the object can be already in 'modified_old_objects' (but, because it had GCFLAG_WRITE_BARRIER, - not in 'old_objects_pointing_to_nursery'). We'll detect this case + not in 'objects_pointing_to_nursery'). We'll detect this case by finding that we already own the write-lock. */ uintptr_t lock_idx = (((uintptr_t)obj) >> 4) - WRITELOCK_START; uint8_t lock_num = STM_PSEGMENT->write_lock_num; @@ -77,7 +77,7 @@ } } else if (write_locks[lock_idx] == lock_num) { - OPT_ASSERT(STM_PSEGMENT->old_objects_pointing_to_nursery != NULL); + OPT_ASSERT(STM_PSEGMENT->objects_pointing_to_nursery != NULL); #ifdef STM_TESTS bool found = false; LIST_FOREACH_R(STM_PSEGMENT->modified_old_objects, object_t *, @@ -97,11 +97,11 @@ } /* A common case for write_locks[] that was either 0 or lock_num: - we need to add the object to 'old_objects_pointing_to_nursery' + we need to add the object to 'objects_pointing_to_nursery' if there is such a list. */ - if (STM_PSEGMENT->old_objects_pointing_to_nursery != NULL) { - dprintf_test(("write_slowpath %p -> old_obj_pointing_to_nurs\n", obj)); - LIST_APPEND(STM_PSEGMENT->old_objects_pointing_to_nursery, obj); + if (STM_PSEGMENT->objects_pointing_to_nursery != NULL) { + dprintf_test(("write_slowpath %p -> old obj_to_nurs\n", obj)); + LIST_APPEND(STM_PSEGMENT->objects_pointing_to_nursery, obj); } /* add the write-barrier-already-called flag ONLY if we succeeded in @@ -173,8 +173,7 @@ } assert(list_is_empty(STM_PSEGMENT->modified_old_objects)); - assert(STM_PSEGMENT->old_objects_pointing_to_nursery == NULL); - assert(STM_PSEGMENT->overflow_objects_pointing_to_nursery == NULL); + assert(STM_PSEGMENT->objects_pointing_to_nursery == NULL); assert(STM_PSEGMENT->large_overflow_objects == NULL); #ifdef STM_TESTS @@ -318,8 +317,7 @@ STM_PSEGMENT->transaction_state = TS_NONE; /* reset these lists to NULL for the next transaction */ - LIST_FREE(STM_PSEGMENT->old_objects_pointing_to_nursery); - LIST_FREE(STM_PSEGMENT->overflow_objects_pointing_to_nursery); + LIST_FREE(STM_PSEGMENT->objects_pointing_to_nursery); LIST_FREE(STM_PSEGMENT->large_overflow_objects); stm_thread_local_t *tl = STM_SEGMENT->running_thread; @@ -332,6 +330,9 @@ assert(!_has_mutex()); assert(STM_PSEGMENT->safe_point == SP_RUNNING); + bool has_any_overflow_object = + (STM_PSEGMENT->objects_pointing_to_nursery != NULL); + minor_collection(/*commit=*/ true); mutex_lock(); @@ -359,7 +360,7 @@ push_modified_to_other_segments(); /* update 'overflow_number' if needed */ - if (STM_PSEGMENT->overflow_objects_pointing_to_nursery != NULL) { + if (has_any_overflow_object) { highest_overflow_number += GCFLAG_OVERFLOW_NUMBER_bit0; STM_PSEGMENT->overflow_number = highest_overflow_number; } diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -67,20 +67,15 @@ that need to be copied to other segments upon commit. */ struct list_s *modified_old_objects; - /* List of the modified old objects that may point to the nursery. - If the current transaction didn't span a minor collection so far, - this is NULL, understood as meaning implicitly "this is the same - as 'modified_old_objects'". Otherwise, this list is a subset of - 'modified_old_objects'. */ - struct list_s *old_objects_pointing_to_nursery; - - /* List of overflowed objects (from the same transaction but outside - the nursery) on which the write-barrier was triggered, so that - they likely contain a pointer to a nursery object. This is used - by the GC: it's additional roots for the next minor collection. - This is NULL if the current transaction didn't span a minor - collection so far. */ - struct list_s *overflow_objects_pointing_to_nursery; + /* List of out-of-nursery objects that may contain pointers to + nursery objects. This is used to track the GC status: they + are all objects outside the nursery on which an stm_write() + occurred since the last minor collection. If there was no + minor collection yet in the current transaction, this is NULL, + understood as meaning implicitly "this is the same as + 'modified_old_objects'. This list contains exactly the + objects without GCFLAG_WRITE_BARRIER. */ + struct list_s *objects_pointing_to_nursery; /* List of all large, overflowed objects. Only non-NULL after the current transaction spanned a minor collection. */ diff --git a/c7/stm/misc.c b/c7/stm/misc.c --- a/c7/stm/misc.c +++ b/c7/stm/misc.c @@ -48,18 +48,11 @@ return list_count(STM_PSEGMENT->modified_old_objects); } -long _stm_count_old_objects_pointing_to_nursery(void) +long _stm_count_objects_pointing_to_nursery(void) { - if (STM_PSEGMENT->old_objects_pointing_to_nursery == NULL) + if (STM_PSEGMENT->objects_pointing_to_nursery == NULL) return -1; - return list_count(STM_PSEGMENT->old_objects_pointing_to_nursery); -} - -long _stm_count_overflow_objects_pointing_to_nursery(void) -{ - if (STM_PSEGMENT->overflow_objects_pointing_to_nursery == NULL) - return -1; - return list_count(STM_PSEGMENT->overflow_objects_pointing_to_nursery); + return list_count(STM_PSEGMENT->objects_pointing_to_nursery); } object_t *_stm_enum_modified_old_objects(long index) @@ -68,15 +61,9 @@ STM_PSEGMENT->modified_old_objects, index); } -object_t *_stm_enum_old_objects_pointing_to_nursery(long index) +object_t *_stm_enum_objects_pointing_to_nursery(long index) { return (object_t *)list_item( - STM_PSEGMENT->old_objects_pointing_to_nursery, index); -} - -object_t *_stm_enum_overflow_objects_pointing_to_nursery(long index) -{ - return (object_t *)list_item( - STM_PSEGMENT->overflow_objects_pointing_to_nursery, index); + STM_PSEGMENT->objects_pointing_to_nursery, index); } #endif diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -113,7 +113,7 @@ *pobj = nobj; /* Must trace the object later */ - LIST_APPEND(STM_PSEGMENT->old_objects_pointing_to_nursery, nobj); + LIST_APPEND(STM_PSEGMENT->objects_pointing_to_nursery, nobj); } static void collect_roots_in_nursery(void) @@ -129,7 +129,7 @@ static void collect_oldrefs_to_nursery(void) { - struct list_s *lst = STM_PSEGMENT->old_objects_pointing_to_nursery; + struct list_s *lst = STM_PSEGMENT->objects_pointing_to_nursery; while (!list_is_empty(lst)) { object_t *obj = (object_t *)list_pop_item(lst); @@ -140,8 +140,8 @@ obj->stm_flags |= GCFLAG_WRITE_BARRIER; /* Trace the 'obj' to replace pointers to nursery with pointers - outside the nursery, possibly forcing nursery objects out - and adding them to 'old_objects_pointing_to_nursery' as well. */ + outside the nursery, possibly forcing nursery objects out and + adding them to 'objects_pointing_to_nursery' as well. */ char *realobj = REAL_ADDRESS(STM_SEGMENT->segment_base, obj); stmcb_trace((struct object_s *)realobj, &minor_trace_if_young); } @@ -167,9 +167,9 @@ /* We must move out of the nursery any object found within the nursery. All objects touched are either from the current - transaction, or are from 'old_objects_pointing_to_young'. - In all cases, we should only read and change objects belonging - to the current segment. + transaction, or are from 'modified_old_objects'. In all cases, + we should only read and change objects belonging to the current + segment. XXX improve: it might be possible to run this function in a safe-point but without the mutex, if we are careful @@ -178,8 +178,15 @@ dprintf(("minor_collection commit=%d\n", (int)commit)); STM_PSEGMENT->minor_collect_will_commit_now = commit; - if (STM_PSEGMENT->old_objects_pointing_to_nursery == NULL) - STM_PSEGMENT->old_objects_pointing_to_nursery = list_create(); + + /* All the objects we move out of the nursery become "overflow" + objects. We use the list 'objects_pointing_to_nursery' + to hold the ones we didn't trace so far. */ + if (STM_PSEGMENT->objects_pointing_to_nursery == NULL) + STM_PSEGMENT->objects_pointing_to_nursery = list_create(); + + /* We need this to track the large overflow objects for a future + commit. We don't need it if we're committing now. */ if (!commit && STM_PSEGMENT->large_overflow_objects == NULL) STM_PSEGMENT->large_overflow_objects = list_create(); @@ -189,9 +196,7 @@ reset_nursery(); - assert(list_is_empty(STM_PSEGMENT->old_objects_pointing_to_nursery)); - if (!commit && STM_PSEGMENT->overflow_objects_pointing_to_nursery == NULL) - STM_PSEGMENT->overflow_objects_pointing_to_nursery = list_create(); + assert(list_is_empty(STM_PSEGMENT->objects_pointing_to_nursery)); } void stm_collect(long level) diff --git a/c7/stm/setup.c b/c7/stm/setup.c --- a/c7/stm/setup.c +++ b/c7/stm/setup.c @@ -53,7 +53,7 @@ pr->write_lock_num = i + 1; pr->pub.segment_num = i; pr->pub.segment_base = segment_base; - pr->overflow_objects_pointing_to_nursery = NULL; + pr->objects_pointing_to_nursery = NULL; pr->large_overflow_objects = NULL; pr->modified_old_objects = list_create(); pr->overflow_number = GCFLAG_OVERFLOW_NUMBER_bit0 * (i + 1); @@ -85,9 +85,8 @@ long i; for (i = 0; i < NB_SEGMENTS; i++) { struct stm_priv_segment_info_s *pr = get_priv_segment(i); - assert(pr->overflow_objects_pointing_to_nursery == NULL); + assert(pr->objects_pointing_to_nursery == NULL); assert(pr->large_overflow_objects == NULL); - assert(pr->old_objects_pointing_to_nursery == NULL); list_free(pr->modified_old_objects); } diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -82,11 +82,9 @@ void _stm_stop_safe_point(void); void _stm_set_nursery_free_count(uint64_t free_count); long _stm_count_modified_old_objects(void); -long _stm_count_old_objects_pointing_to_nursery(void); -long _stm_count_overflow_objects_pointing_to_nursery(void); +long _stm_count_objects_pointing_to_nursery(void); object_t *_stm_enum_modified_old_objects(long index); -object_t *_stm_enum_old_objects_pointing_to_nursery(long index); -object_t *_stm_enum_overflow_objects_pointing_to_nursery(long index); +object_t *_stm_enum_objects_pointing_to_nursery(long index); #endif #define _STM_GCFLAG_WRITE_BARRIER 0x01 diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -77,11 +77,9 @@ ssize_t stmcb_size_rounded_up(struct object_s *obj); long _stm_count_modified_old_objects(void); -long _stm_count_old_objects_pointing_to_nursery(void); -long _stm_count_overflow_objects_pointing_to_nursery(void); +long _stm_count_objects_pointing_to_nursery(void); object_t *_stm_enum_modified_old_objects(long index); -object_t *_stm_enum_old_objects_pointing_to_nursery(long index); -object_t *_stm_enum_overflow_objects_pointing_to_nursery(long index); +object_t *_stm_enum_objects_pointing_to_nursery(long index); void stm_collect(long level); """) @@ -367,17 +365,11 @@ return None return map(lib._stm_enum_modified_old_objects, range(count)) -def old_objects_pointing_to_nursery(): - count = lib._stm_count_old_objects_pointing_to_nursery() +def objects_pointing_to_nursery(): + count = lib._stm_count_objects_pointing_to_nursery() if count < 0: return None - return map(lib._stm_enum_old_objects_pointing_to_nursery, range(count)) - -def overflow_objects_pointing_to_nursery(): - count = lib._stm_count_overflow_objects_pointing_to_nursery() - if count < 0: - return None - return map(lib._stm_enum_overflow_objects_pointing_to_nursery,range(count)) + return map(lib._stm_enum_objects_pointing_to_nursery, range(count)) SHADOWSTACK_LENGTH = 1000 diff --git a/c7/test/test_basic.py b/c7/test/test_basic.py --- a/c7/test/test_basic.py +++ b/c7/test/test_basic.py @@ -51,8 +51,7 @@ stm_write(lp1) assert stm_was_written(lp1) assert modified_old_objects() == [] # object not old - assert old_objects_pointing_to_nursery() == None # short transac. - assert overflow_objects_pointing_to_nursery() == None # short transac. + assert objects_pointing_to_nursery() == None # short transaction self.commit_transaction() def test_allocate_old(self): @@ -89,7 +88,7 @@ assert modified_old_objects() == [] stm_write(lp1) assert modified_old_objects() == [lp1] - assert old_objects_pointing_to_nursery() == None + assert objects_pointing_to_nursery() == None assert stm_get_char(lp1) == 'a' stm_set_char(lp1, 'b') # From noreply at buildbot.pypy.org Mon Feb 24 17:59:10 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 24 Feb 2014 17:59:10 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: in-progress Message-ID: <20140224165910.C75A51C0132@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r840:ebb32fe35923 Date: 2014-02-24 17:33 +0100 http://bitbucket.org/pypy/stmgc/changeset/ebb32fe35923/ Log: in-progress diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -13,12 +13,12 @@ { assert(_running_transaction()); assert(!_is_in_nursery(obj)); - dprintf(("write_slowpath %p\n", obj)); /* is this an object from the same transaction, outside the nursery? */ if ((obj->stm_flags & -GCFLAG_OVERFLOW_NUMBER_bit0) == STM_PSEGMENT->overflow_number) { + dprintf_test(("write_slowpath %p -> ovf_obj\n", obj)); obj->stm_flags &= ~GCFLAG_WRITE_BARRIER; assert(STM_PSEGMENT->overflow_objects_pointing_to_nursery != NULL); LIST_APPEND(STM_PSEGMENT->overflow_objects_pointing_to_nursery, obj); @@ -43,6 +43,8 @@ 0, lock_num))) goto retry; + dprintf_test(("write_slowpath %p -> mod_old\n", obj)); + /* First change to this old object from this transaction. Add it to the list 'modified_old_objects'. */ LIST_APPEND(STM_PSEGMENT->modified_old_objects, obj); @@ -97,8 +99,10 @@ /* A common case for write_locks[] that was either 0 or lock_num: we need to add the object to 'old_objects_pointing_to_nursery' if there is such a list. */ - if (STM_PSEGMENT->old_objects_pointing_to_nursery != NULL) + if (STM_PSEGMENT->old_objects_pointing_to_nursery != NULL) { + dprintf_test(("write_slowpath %p -> old_obj_pointing_to_nurs\n", obj)); LIST_APPEND(STM_PSEGMENT->old_objects_pointing_to_nursery, obj); + } /* add the write-barrier-already-called flag ONLY if we succeeded in getting the write-lock */ diff --git a/c7/stm/fprintcolor.h b/c7/stm/fprintcolor.h --- a/c7/stm/fprintcolor.h +++ b/c7/stm/fprintcolor.h @@ -12,14 +12,21 @@ static int threadcolor_printf(const char *format, ...) __attribute__((format (printf, 1, 2))); +#ifdef STM_TESTS +# define dprintf_test(args) dprintf(args) +#else +# define dprintf_test(args) do { } while(0) +#endif + /* ------------------------------------------------------------ */ #else /* ------------------------------------------------------------ */ -#define dprintf(args) do { } while(0) -#define dprintfcolor() 0 +#define dprintf(args) do { } while(0) +#define dprintf_test(args) do { } while(0) +#define dprintfcolor() 0 /* ------------------------------------------------------------ */ diff --git a/c7/test/test_nursery.py b/c7/test/test_nursery.py --- a/c7/test/test_nursery.py +++ b/c7/test/test_nursery.py @@ -3,41 +3,11 @@ class TestBasic(BaseTest): - def test_align_nursery_to_256_bytes(self): + def test_nursery_large(self): + py.test.skip("XXX later") self.start_transaction() - lp1 = stm_allocate(16) - self.commit_transaction() - self.start_transaction() - lp2 = stm_allocate(16) - # - u1 = int(ffi.cast("uintptr_t", lp1)) - u2 = int(ffi.cast("uintptr_t", lp2)) - assert (u1 & ~255) != (u2 & ~255) - - def test_creation_marker_in_nursery(self): - self.start_transaction() - lp1 = stm_allocate(16) - lp2 = stm_allocate(16) - assert stm_creation_marker(lp1) == 0xff - assert stm_creation_marker(lp2) == 0xff - u1 = int(ffi.cast("uintptr_t", lp1)) - u2 = int(ffi.cast("uintptr_t", lp2)) - assert u2 == u1 + 16 - self.commit_transaction() - - assert stm_creation_marker(lp1) == 0 - assert stm_creation_marker(lp2) == 0 - - self.start_transaction() - lp3 = stm_allocate(16) - assert stm_creation_marker(lp1) == 0 - assert stm_creation_marker(lp2) == 0 - assert stm_creation_marker(lp3) == 0xff - - def test_nursery_medium(self): - self.start_transaction() - lp1 = stm_allocate(SOME_MEDIUM_SIZE) - lp2 = stm_allocate(SOME_MEDIUM_SIZE) + lp1 = stm_allocate(SOME_LARGE_SIZE) + lp2 = stm_allocate(SOME_LARGE_SIZE) u1 = int(ffi.cast("uintptr_t", lp1)) u2 = int(ffi.cast("uintptr_t", lp2)) @@ -51,27 +21,27 @@ assert stm_creation_marker(lp2) == 0 def test_nursery_full(self): - lib._stm_set_nursery_free_count((SOME_MEDIUM_SIZE + 255) & ~255) + lib._stm_set_nursery_free_count(2048) + self.start_transaction() self.push_root_no_gc() - self.start_transaction() - lp1 = stm_allocate(SOME_MEDIUM_SIZE) + lp1 = stm_allocate(2048) # no collection here self.pop_root() # self.push_root(lp1) - lp2 = stm_allocate(SOME_MEDIUM_SIZE) + lp2 = stm_allocate(2048) lp1b = self.pop_root() assert lp1b != lp1 # collection occurred def test_several_minor_collections(self): # make a long, ever-growing linked list of objects, in one transaction - lib._stm_set_nursery_free_count(NURSERY_SECTION_SIZE * 2) + lib._stm_set_nursery_free_count(2048) self.start_transaction() lp1 = stm_allocate_refs(1) self.push_root(lp1) prev = lp1 prevprev = None - FIT = (NURSERY_SECTION_SIZE * 2) / 16 - 1 # without 'lp1' above - N = (NURSERY_SECTION_SIZE * 4) / 16 + 41 + FIT = 2048 / 16 - 1 # without 'lp1' above + N = 4096 / 16 + 41 for i in range(N): if prevprev: assert stm_get_ref(prevprev, 0) == prev @@ -83,15 +53,20 @@ prevprev = self.pop_root() assert prevprev != prev stm_set_ref(prev, 0, lp3) + + assert modified_old_objects() == [] # only 1 transaction + ovf_o = overflow_objects_pointing_to_nursery() + old_o = old_objects_pointing_to_nursery() + if i < FIT: + assert ovf_o is None # no minor collection so far + assert old_o is None # no minor collection so far + else: + assert len(ovf_o) == 1 + assert old_o == [] + prevprev = prev prev = lp3 - seeme = old_objects_pointing_to_young() - if i < FIT: - assert len(seeme) == 0 # no minor collection so far - else: - assert len(seeme) == 1 # the one from the prev minor coll - lp1 = self.pop_root() assert modified_objects() == [] From noreply at buildbot.pypy.org Mon Feb 24 18:01:12 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 24 Feb 2014 18:01:12 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: fix test Message-ID: <20140224170112.7FCEA1C0132@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r843:fa7148a2c2ad Date: 2014-02-24 18:01 +0100 http://bitbucket.org/pypy/stmgc/changeset/fa7148a2c2ad/ Log: fix test diff --git a/c7/test/test_nursery.py b/c7/test/test_nursery.py --- a/c7/test/test_nursery.py +++ b/c7/test/test_nursery.py @@ -75,9 +75,9 @@ assert lp2 == lp3 def test_many_allocs(self): - lib._stm_set_nursery_free_count(NURSERY_SECTION_SIZE * 2) - obj_size = 1024 - num = (NURSERY_SECTION_SIZE * 4) / obj_size + 41 + lib._stm_set_nursery_free_count(32768) + obj_size = 512 + num = 65536 / obj_size + 41 self.start_transaction() for i in range(num): From noreply at buildbot.pypy.org Mon Feb 24 18:11:15 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 24 Feb 2014 18:11:15 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: tweaks Message-ID: <20140224171115.65E8B1C0132@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r844:685f6c6003e3 Date: 2014-02-24 18:09 +0100 http://bitbucket.org/pypy/stmgc/changeset/685f6c6003e3/ Log: tweaks diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -176,9 +176,7 @@ assert(STM_PSEGMENT->objects_pointing_to_nursery == NULL); assert(STM_PSEGMENT->large_overflow_objects == NULL); -#ifdef STM_TESTS check_nursery_at_transaction_start(); -#endif } diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -253,8 +253,17 @@ static void check_nursery_at_transaction_start(void) { +#ifndef NDEBUG assert((uintptr_t)STM_SEGMENT->nursery_current == _stm_nursery_start); - uintptr_t i; - for (i = 0; i < _stm_nursery_end - _stm_nursery_start; i++) - assert(STM_SEGMENT->nursery_current[i] == 0); + uintptr_t i, limit; +# ifdef STM_TESTS + limit = _stm_nursery_end - _stm_nursery_start; +# else + limit = 64; +# endif + for (i = 0; i < limit; i += 8) { + assert(*(TLPREFIX uint64_t *)(STM_SEGMENT->nursery_current + i) == 0); + _duck(); + } +#endif } diff --git a/c7/stm/nursery.h b/c7/stm/nursery.h --- a/c7/stm/nursery.h +++ b/c7/stm/nursery.h @@ -6,4 +6,4 @@ static uint32_t highest_overflow_number; static void minor_collection(bool commit); -static void check_nursery_at_transaction_start(void) __attribute__((unused)); +static void check_nursery_at_transaction_start(void); diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -32,6 +32,7 @@ typedef ... object_t; typedef ... stm_jmpbuf_t; #define SIZEOF_MYOBJ ... +#define _STM_FAST_ALLOC ... typedef struct { object_t **shadowstack, **shadowstack_base; diff --git a/c7/test/test_nursery.py b/c7/test/test_nursery.py --- a/c7/test/test_nursery.py +++ b/c7/test/test_nursery.py @@ -98,8 +98,9 @@ assert old assert young - def test_larger_than_section(self): - obj_size = NURSERY_SECTION_SIZE + 16 + def test_larger_than_limit_for_nursery(self): + py.test.skip("XXX later") + obj_size = lib._STM_FAST_ALLOC + 16 self.start_transaction() seen = set() From noreply at buildbot.pypy.org Mon Feb 24 18:20:42 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 24 Feb 2014 18:20:42 +0100 (CET) Subject: [pypy-commit] pypy remove-remaining-smm: Make W_ObjectObject a W_Root. Message-ID: <20140224172042.71EC01C0132@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: remove-remaining-smm Changeset: r69356:15baebca729e Date: 2014-02-24 18:19 +0100 http://bitbucket.org/pypy/pypy/changeset/15baebca729e/ Log: Make W_ObjectObject a W_Root. diff --git a/pypy/objspace/std/model.py b/pypy/objspace/std/model.py --- a/pypy/objspace/std/model.py +++ b/pypy/objspace/std/model.py @@ -31,7 +31,6 @@ self.config = config # All the Python types that we want to provide in this StdObjSpace class result: - from pypy.objspace.std.objecttype import object_typedef from pypy.objspace.std.typeobject import type_typedef self.pythontypes = [value for key, value in result.__dict__.items() if not key.startswith('_')] # don't look @@ -65,6 +64,7 @@ # not-multimethod based types + self.pythontypes.append(objectobject.W_ObjectObject.typedef) self.pythontypes.append(noneobject.W_NoneObject.typedef) self.pythontypes.append(tupleobject.W_TupleObject.typedef) self.pythontypes.append(listobject.W_ListObject.typedef) diff --git a/pypy/objspace/std/objectobject.py b/pypy/objspace/std/objectobject.py --- a/pypy/objspace/std/objectobject.py +++ b/pypy/objspace/std/objectobject.py @@ -1,13 +1,231 @@ -from pypy.objspace.std.model import W_Object -from pypy.objspace.std.register_all import register_all +from pypy.interpreter.baseobjspace import W_Root +from pypy.interpreter.error import OperationError, oefmt +from pypy.interpreter.gateway import applevel, interp2app, unwrap_spec +from pypy.interpreter.typedef import GetSetProperty, default_identity_hash +from pypy.objspace.descroperation import Object +from pypy.objspace.std.stdtypedef import StdTypeDef -class W_ObjectObject(W_Object): +app = applevel(r''' +def _abstract_method_error(typ): + methods = ", ".join(sorted(typ.__abstractmethods__)) + err = "Can't instantiate abstract class %s with abstract methods %s" + raise TypeError(err % (typ.__name__, methods)) + +def reduce_1(obj, proto): + import copy_reg + return copy_reg._reduce_ex(obj, proto) + +def reduce_2(obj): + cls = obj.__class__ + + try: + getnewargs = obj.__getnewargs__ + except AttributeError: + args = () + else: + args = getnewargs() + if not isinstance(args, tuple): + raise TypeError, "__getnewargs__ should return a tuple" + + try: + getstate = obj.__getstate__ + except AttributeError: + state = getattr(obj, "__dict__", None) + names = slotnames(cls) # not checking for list + if names is not None: + slots = {} + for name in names: + try: + value = getattr(obj, name) + except AttributeError: + pass + else: + slots[name] = value + if slots: + state = state, slots + else: + state = getstate() + + if isinstance(obj, list): + listitems = iter(obj) + else: + listitems = None + + if isinstance(obj, dict): + dictitems = obj.iteritems() + else: + dictitems = None + + import copy_reg + newobj = copy_reg.__newobj__ + + args2 = (cls,) + args + return newobj, args2, state, listitems, dictitems + +def slotnames(cls): + if not isinstance(cls, type): + return None + + try: + return cls.__dict__["__slotnames__"] + except KeyError: + pass + + import copy_reg + slotnames = copy_reg._slotnames(cls) + if not isinstance(slotnames, list) and slotnames is not None: + raise TypeError, "copy_reg._slotnames didn't return a list or None" + return slotnames +''', filename=__file__) + +_abstract_method_error = app.interphook("_abstract_method_error") +reduce_1 = app.interphook('reduce_1') +reduce_2 = app.interphook('reduce_2') + + +class W_ObjectObject(W_Root): """Instances of this class are what the user can directly see with an 'object()' call.""" - from pypy.objspace.std.objecttype import object_typedef as typedef -# ____________________________________________________________ +def descr__new__(space, w_type, __args__): + from pypy.objspace.std.objectobject import W_ObjectObject + from pypy.objspace.std.typeobject import _precheck_for_new + # don't allow arguments if the default object.__init__() is about + # to be called + w_type = _precheck_for_new(space, w_type) + w_parentinit, w_ignored = w_type.lookup_where('__init__') + if w_parentinit is space.w_object: + try: + __args__.fixedunpack(0) + except ValueError: + raise OperationError(space.w_TypeError, + space.wrap("default __new__ takes " + "no parameters")) + if w_type.is_abstract(): + _abstract_method_error(space, w_type) + w_obj = space.allocate_instance(W_ObjectObject, w_type) + return w_obj -register_all(vars()) + +def descr___subclasshook__(space, __args__): + return space.w_NotImplemented + + +def descr__init__(space, w_obj, __args__): + # don't allow arguments unless __new__ is overridden + w_type = space.type(w_obj) + w_parent_new, _ = w_type.lookup_where('__new__') + if w_parent_new is space.w_object: + try: + __args__.fixedunpack(0) + except ValueError: + raise OperationError(space.w_TypeError, + space.wrap("object.__init__() takes no parameters")) + + +def descr_get___class__(space, w_obj): + return space.type(w_obj) + + +def descr_set___class__(space, w_obj, w_newcls): + from pypy.objspace.std.typeobject import W_TypeObject + if not isinstance(w_newcls, W_TypeObject): + raise oefmt(space.w_TypeError, + "__class__ must be set to new-style class, not '%T' " + "object", w_newcls) + if not w_newcls.is_heaptype(): + raise OperationError(space.w_TypeError, + space.wrap("__class__ assignment: only for heap types")) + w_oldcls = space.type(w_obj) + assert isinstance(w_oldcls, W_TypeObject) + if w_oldcls.get_full_instance_layout() == w_newcls.get_full_instance_layout(): + w_obj.setclass(space, w_newcls) + else: + raise oefmt(space.w_TypeError, + "__class__ assignment: '%N' object layout differs from " + "'%N'", w_oldcls, w_newcls) + + +def descr__repr__(space, w_obj): + w_type = space.type(w_obj) + classname = w_type.getname(space) + w_module = w_type.lookup("__module__") + if w_module is not None: + try: + modulename = space.str_w(w_module) + except OperationError, e: + if not e.match(space, space.w_TypeError): + raise + else: + classname = '%s.%s' % (modulename, classname) + return w_obj.getrepr(space, '%s object' % (classname,)) + + +def descr__str__(space, w_obj): + w_type = space.type(w_obj) + w_impl = w_type.lookup("__repr__") + if w_impl is None: + raise OperationError(space.w_TypeError, # can it really occur? + space.wrap("operand does not support unary str")) + return space.get_and_call_function(w_impl, w_obj) + + + at unwrap_spec(proto=int) +def descr__reduce__(space, w_obj, proto=0): + if proto >= 2: + return reduce_2(space, w_obj) + w_proto = space.wrap(proto) + return reduce_1(space, w_obj, w_proto) + + at unwrap_spec(proto=int) +def descr__reduce_ex__(space, w_obj, proto=0): + w_st_reduce = space.wrap('__reduce__') + w_reduce = space.findattr(w_obj, w_st_reduce) + if w_reduce is not None: + w_cls = space.getattr(w_obj, space.wrap('__class__')) + w_cls_reduce_meth = space.getattr(w_cls, w_st_reduce) + w_cls_reduce = space.getattr(w_cls_reduce_meth, space.wrap('im_func')) + w_objtype = space.w_object + w_obj_dict = space.getattr(w_objtype, space.wrap('__dict__')) + w_obj_reduce = space.getitem(w_obj_dict, w_st_reduce) + override = not space.is_w(w_cls_reduce, w_obj_reduce) + # print 'OVR', override, w_cls_reduce, w_obj_reduce + if override: + return space.call(w_reduce, space.newtuple([])) + return descr__reduce__(space, w_obj, proto) + +def descr___format__(space, w_obj, w_format_spec): + if space.isinstance_w(w_format_spec, space.w_unicode): + w_as_str = space.call_function(space.w_unicode, w_obj) + elif space.isinstance_w(w_format_spec, space.w_str): + w_as_str = space.str(w_obj) + else: + msg = "format_spec must be a string" + raise OperationError(space.w_TypeError, space.wrap(msg)) + if space.len_w(w_format_spec) > 0: + msg = "object.__format__ with a non-empty format string is deprecated" + space.warn(space.wrap(msg), space.w_PendingDeprecationWarning) + return space.format(w_as_str, w_format_spec) + + +W_ObjectObject.typedef = StdTypeDef("object", + __doc__ = "The most base type", + __new__ = interp2app(descr__new__), + __subclasshook__ = interp2app(descr___subclasshook__, as_classmethod=True), + + # these are actually implemented in pypy.objspace.descroperation + __getattribute__ = interp2app(Object.descr__getattribute__.im_func), + __setattr__ = interp2app(Object.descr__setattr__.im_func), + __delattr__ = interp2app(Object.descr__delattr__.im_func), + + __init__ = interp2app(descr__init__), + __class__ = GetSetProperty(descr_get___class__, descr_set___class__), + __repr__ = interp2app(descr__repr__), + __str__ = interp2app(descr__str__), + __hash__ = interp2app(default_identity_hash), + __reduce__ = interp2app(descr__reduce__), + __reduce_ex__ = interp2app(descr__reduce_ex__), + __format__ = interp2app(descr___format__), +) diff --git a/pypy/objspace/std/objecttype.py b/pypy/objspace/std/objecttype.py deleted file mode 100644 --- a/pypy/objspace/std/objecttype.py +++ /dev/null @@ -1,221 +0,0 @@ -from pypy.interpreter.error import OperationError, oefmt -from pypy.interpreter.typedef import GetSetProperty, default_identity_hash -from pypy.interpreter import gateway -from pypy.objspace.descroperation import Object -from pypy.objspace.std.stdtypedef import StdTypeDef - -def descr__repr__(space, w_obj): - w_type = space.type(w_obj) - classname = w_type.getname(space) - w_module = w_type.lookup("__module__") - if w_module is not None: - try: - modulename = space.str_w(w_module) - except OperationError, e: - if not e.match(space, space.w_TypeError): - raise - else: - classname = '%s.%s' % (modulename, classname) - return w_obj.getrepr(space, '%s object' % (classname,)) - -def descr__str__(space, w_obj): - w_type = space.type(w_obj) - w_impl = w_type.lookup("__repr__") - if w_impl is None: - raise OperationError(space.w_TypeError, # can it really occur? - space.wrap("operand does not support unary str")) - return space.get_and_call_function(w_impl, w_obj) - -def descr__class__(space, w_obj): - return space.type(w_obj) - -def descr_set___class__(space, w_obj, w_newcls): - from pypy.objspace.std.typeobject import W_TypeObject - if not isinstance(w_newcls, W_TypeObject): - raise oefmt(space.w_TypeError, - "__class__ must be set to new-style class, not '%T' " - "object", w_newcls) - if not w_newcls.is_heaptype(): - raise OperationError(space.w_TypeError, - space.wrap("__class__ assignment: only for heap types")) - w_oldcls = space.type(w_obj) - assert isinstance(w_oldcls, W_TypeObject) - if w_oldcls.get_full_instance_layout() == w_newcls.get_full_instance_layout(): - w_obj.setclass(space, w_newcls) - else: - raise oefmt(space.w_TypeError, - "__class__ assignment: '%N' object layout differs from " - "'%N'", w_oldcls, w_newcls) - - -app = gateway.applevel(""" -def _abstract_method_error(typ): - methods = ", ".join(sorted(typ.__abstractmethods__)) - err = "Can't instantiate abstract class %s with abstract methods %s" - raise TypeError(err % (typ.__name__, methods)) -""") -_abstract_method_error = app.interphook("_abstract_method_error") - - -def descr__new__(space, w_type, __args__): - from pypy.objspace.std.objectobject import W_ObjectObject - from pypy.objspace.std.typeobject import _precheck_for_new - # don't allow arguments if the default object.__init__() is about - # to be called - w_type = _precheck_for_new(space, w_type) - w_parentinit, w_ignored = w_type.lookup_where('__init__') - if w_parentinit is space.w_object: - try: - __args__.fixedunpack(0) - except ValueError: - raise OperationError(space.w_TypeError, - space.wrap("default __new__ takes " - "no parameters")) - if w_type.is_abstract(): - _abstract_method_error(space, w_type) - w_obj = space.allocate_instance(W_ObjectObject, w_type) - return w_obj - -def descr__init__(space, w_obj, __args__): - # don't allow arguments unless __new__ is overridden - w_type = space.type(w_obj) - w_parent_new, _ = w_type.lookup_where('__new__') - if w_parent_new is space.w_object: - try: - __args__.fixedunpack(0) - except ValueError: - raise OperationError(space.w_TypeError, - space.wrap("object.__init__() takes no parameters")) - - - at gateway.unwrap_spec(proto=int) -def descr__reduce__(space, w_obj, proto=0): - if proto >= 2: - return reduce_2(space, w_obj) - w_proto = space.wrap(proto) - return reduce_1(space, w_obj, w_proto) - - at gateway.unwrap_spec(proto=int) -def descr__reduce_ex__(space, w_obj, proto=0): - w_st_reduce = space.wrap('__reduce__') - w_reduce = space.findattr(w_obj, w_st_reduce) - if w_reduce is not None: - w_cls = space.getattr(w_obj, space.wrap('__class__')) - w_cls_reduce_meth = space.getattr(w_cls, w_st_reduce) - w_cls_reduce = space.getattr(w_cls_reduce_meth, space.wrap('im_func')) - w_objtype = space.w_object - w_obj_dict = space.getattr(w_objtype, space.wrap('__dict__')) - w_obj_reduce = space.getitem(w_obj_dict, w_st_reduce) - override = not space.is_w(w_cls_reduce, w_obj_reduce) - # print 'OVR', override, w_cls_reduce, w_obj_reduce - if override: - return space.call(w_reduce, space.newtuple([])) - return descr__reduce__(space, w_obj, proto) - -def descr___format__(space, w_obj, w_format_spec): - if space.isinstance_w(w_format_spec, space.w_unicode): - w_as_str = space.call_function(space.w_unicode, w_obj) - elif space.isinstance_w(w_format_spec, space.w_str): - w_as_str = space.str(w_obj) - else: - msg = "format_spec must be a string" - raise OperationError(space.w_TypeError, space.wrap(msg)) - if space.len_w(w_format_spec) > 0: - msg = "object.__format__ with a non-empty format string is deprecated" - space.warn(space.wrap(msg), space.w_PendingDeprecationWarning) - return space.format(w_as_str, w_format_spec) - -def descr___subclasshook__(space, __args__): - return space.w_NotImplemented - - -app = gateway.applevel(r''' -def reduce_1(obj, proto): - import copy_reg - return copy_reg._reduce_ex(obj, proto) - -def reduce_2(obj): - cls = obj.__class__ - - try: - getnewargs = obj.__getnewargs__ - except AttributeError: - args = () - else: - args = getnewargs() - if not isinstance(args, tuple): - raise TypeError, "__getnewargs__ should return a tuple" - - try: - getstate = obj.__getstate__ - except AttributeError: - state = getattr(obj, "__dict__", None) - names = slotnames(cls) # not checking for list - if names is not None: - slots = {} - for name in names: - try: - value = getattr(obj, name) - except AttributeError: - pass - else: - slots[name] = value - if slots: - state = state, slots - else: - state = getstate() - - if isinstance(obj, list): - listitems = iter(obj) - else: - listitems = None - - if isinstance(obj, dict): - dictitems = obj.iteritems() - else: - dictitems = None - - import copy_reg - newobj = copy_reg.__newobj__ - - args2 = (cls,) + args - return newobj, args2, state, listitems, dictitems - -def slotnames(cls): - if not isinstance(cls, type): - return None - - try: - return cls.__dict__["__slotnames__"] - except KeyError: - pass - - import copy_reg - slotnames = copy_reg._slotnames(cls) - if not isinstance(slotnames, list) and slotnames is not None: - raise TypeError, "copy_reg._slotnames didn't return a list or None" - return slotnames -''', filename=__file__) - -reduce_1 = app.interphook('reduce_1') -reduce_2 = app.interphook('reduce_2') - -# ____________________________________________________________ - -object_typedef = StdTypeDef("object", - __getattribute__ = gateway.interp2app(Object.descr__getattribute__.im_func), - __setattr__ = gateway.interp2app(Object.descr__setattr__.im_func), - __delattr__ = gateway.interp2app(Object.descr__delattr__.im_func), - __str__ = gateway.interp2app(descr__str__), - __repr__ = gateway.interp2app(descr__repr__), - __class__ = GetSetProperty(descr__class__, descr_set___class__), - __doc__ = '''The most base type''', - __new__ = gateway.interp2app(descr__new__), - __hash__ = gateway.interp2app(default_identity_hash), - __reduce_ex__ = gateway.interp2app(descr__reduce_ex__), - __reduce__ = gateway.interp2app(descr__reduce__), - __format__ = gateway.interp2app(descr___format__), - __subclasshook__ = gateway.interp2app(descr___subclasshook__, - as_classmethod=True), - __init__ = gateway.interp2app(descr__init__), - ) diff --git a/pypy/objspace/std/stdtypedef.py b/pypy/objspace/std/stdtypedef.py --- a/pypy/objspace/std/stdtypedef.py +++ b/pypy/objspace/std/stdtypedef.py @@ -29,8 +29,8 @@ @jit.unroll_safe def issubtypedef(a, b): - from pypy.objspace.std.objecttype import object_typedef - if b is object_typedef: + from pypy.objspace.std.objectobject import W_ObjectObject + if b is W_ObjectObject.typedef: return True if a is None: return False @@ -56,7 +56,7 @@ "NOT_RPYTHON: initialization-time only." # build a W_TypeObject from this StdTypeDef from pypy.objspace.std.typeobject import W_TypeObject - from pypy.objspace.std.objecttype import object_typedef + from pypy.objspace.std.objectobject import W_ObjectObject space = cache.space w = space.wrap @@ -75,10 +75,10 @@ lazyloaders[name] = loader # compute the bases - if typedef is object_typedef: + if typedef is W_ObjectObject.typedef: bases_w = [] else: - bases = typedef.bases or [object_typedef] + bases = typedef.bases or [W_ObjectObject.typedef] bases_w = [space.gettypeobject(base) for base in bases] # wrap everything From noreply at buildbot.pypy.org Mon Feb 24 20:31:18 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 24 Feb 2014 20:31:18 +0100 (CET) Subject: [pypy-commit] pypy remove-remaining-smm: Remove SMM -> StdObjSpaceMultiMethod alias. Message-ID: <20140224193118.AC6391C0907@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: remove-remaining-smm Changeset: r69357:236e61adfc39 Date: 2014-02-24 18:23 +0100 http://bitbucket.org/pypy/pypy/changeset/236e61adfc39/ Log: Remove SMM -> StdObjSpaceMultiMethod alias. diff --git a/pypy/objspace/std/stdtypedef.py b/pypy/objspace/std/stdtypedef.py --- a/pypy/objspace/std/stdtypedef.py +++ b/pypy/objspace/std/stdtypedef.py @@ -10,9 +10,7 @@ from rpython.rlib import jit from rpython.tool.sourcetools import compile2 -__all__ = ['StdTypeDef', 'SMM'] - -SMM = StdObjSpaceMultiMethod +__all__ = ['StdTypeDef'] class StdTypeDef(TypeDef): From noreply at buildbot.pypy.org Mon Feb 24 20:31:19 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 24 Feb 2014 20:31:19 +0100 (CET) Subject: [pypy-commit] pypy remove-remaining-smm: Kill type's SMMs. Message-ID: <20140224193119.DBBC31C0907@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: remove-remaining-smm Changeset: r69358:b0c687345738 Date: 2014-02-24 20:30 +0100 http://bitbucket.org/pypy/pypy/changeset/b0c687345738/ Log: Kill type's SMMs. diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -6,7 +6,6 @@ descr_get_dict from pypy.interpreter.astcompiler.misc import mangle from pypy.objspace.std.model import W_Object -from pypy.objspace.std.register_all import register_all from pypy.objspace.std.stdtypedef import std_dict_descr, issubtypedef, Member from pypy.objspace.std.stdtypedef import StdTypeDef @@ -550,6 +549,78 @@ def delweakref(self): self._lifeline_ = None + def descr_call(self, space, __args__): + promote(self) + # invoke the __new__ of the type + if not we_are_jitted(): + # note that the annotator will figure out that self.w_new_function + # can only be None if the newshortcut config option is not set + w_newfunc = self.w_new_function + else: + # for the JIT it is better to take the slow path because normal lookup + # is nicely optimized, but the self.w_new_function attribute is not + # known to the JIT + w_newfunc = None + if w_newfunc is None: + w_newtype, w_newdescr = self.lookup_where('__new__') + w_newfunc = space.get(w_newdescr, self) + if (space.config.objspace.std.newshortcut and + not we_are_jitted() and + isinstance(w_newtype, W_TypeObject)): + self.w_new_function = w_newfunc + w_newobject = space.call_obj_args(w_newfunc, self, __args__) + call_init = space.isinstance_w(w_newobject, self) + + # maybe invoke the __init__ of the type + if (call_init and not (space.is_w(self, space.w_type) and + not __args__.keywords and len(__args__.arguments_w) == 1)): + w_descr = space.lookup(w_newobject, '__init__') + w_result = space.get_and_call_args(w_descr, w_newobject, __args__) + if not space.is_w(w_result, space.w_None): + raise OperationError(space.w_TypeError, + space.wrap("__init__() should return None")) + return w_newobject + + def descr_repr(self, space): + w_mod = self.get_module() + if not space.isinstance_w(w_mod, space.w_str): + mod = None + else: + mod = space.str_w(w_mod) + if not self.is_heaptype(): + kind = 'type' + else: + kind = 'class' + if mod is not None and mod != '__builtin__': + return space.wrap("<%s '%s.%s'>" % (kind, mod, self.name)) + else: + return space.wrap("<%s '%s'>" % (kind, self.name)) + + def descr_getattribute(self, space, w_name): + name = space.str_w(w_name) + w_descr = space.lookup(self, name) + if w_descr is not None: + if space.is_data_descr(w_descr): + w_get = space.lookup(w_descr, "__get__") + if w_get is not None: + return space.get_and_call_function(w_get, w_descr, self, + space.type(self)) + w_value = self.lookup(name) + if w_value is not None: + # __get__(None, type): turns e.g. functions into unbound methods + return space.get(w_value, space.w_None, self) + if w_descr is not None: + return space.get(w_descr, self) + raise oefmt(space.w_AttributeError, + "type object '%N' has no attribute %R", self, w_name) + + def descr_eq(self, space, w_other): + return space.is_(self, w_other) + + def descr_ne(self, space, w_other): + return space.newbool(not space.is_w(self, w_other)) + + def descr__new__(space, w_typetype, w_name, w_bases=None, w_dict=None): "This is used to create user-defined classes only." # XXX check types @@ -810,7 +881,13 @@ __weakref__ = weakref_descr, __instancecheck__ = gateway.interp2app(type_isinstance), __subclasscheck__ = gateway.interp2app(type_issubtype), - ) + + __call__ = gateway.interp2app(W_TypeObject.descr_call), + __repr__ = gateway.interp2app(W_TypeObject.descr_repr), + __getattribute__ = gateway.interp2app(W_TypeObject.descr_getattribute), + __eq__ = gateway.interp2app(W_TypeObject.descr_eq), + __ne__ = gateway.interp2app(W_TypeObject.descr_ne), +) W_TypeObject.typedef = type_typedef # ____________________________________________________________ @@ -1057,38 +1134,6 @@ # ____________________________________________________________ -def call__Type(space, w_type, __args__): - promote(w_type) - # invoke the __new__ of the type - if not we_are_jitted(): - # note that the annotator will figure out that w_type.w_new_function - # can only be None if the newshortcut config option is not set - w_newfunc = w_type.w_new_function - else: - # for the JIT it is better to take the slow path because normal lookup - # is nicely optimized, but the w_type.w_new_function attribute is not - # known to the JIT - w_newfunc = None - if w_newfunc is None: - w_newtype, w_newdescr = w_type.lookup_where('__new__') - w_newfunc = space.get(w_newdescr, w_type) - if (space.config.objspace.std.newshortcut and - not we_are_jitted() and - isinstance(w_newtype, W_TypeObject)): - w_type.w_new_function = w_newfunc - w_newobject = space.call_obj_args(w_newfunc, w_type, __args__) - call_init = space.isinstance_w(w_newobject, w_type) - - # maybe invoke the __init__ of the type - if (call_init and not (space.is_w(w_type, space.w_type) and - not __args__.keywords and len(__args__.arguments_w) == 1)): - w_descr = space.lookup(w_newobject, '__init__') - w_result = space.get_and_call_args(w_descr, w_newobject, __args__) - if not space.is_w(w_result, space.w_None): - raise OperationError(space.w_TypeError, - space.wrap("__init__() should return None")) - return w_newobject - def _issubtype(w_sub, w_type): return w_type in w_sub.mro_w @@ -1096,42 +1141,6 @@ def _pure_issubtype(w_sub, w_type, version_tag1, version_tag2): return _issubtype(w_sub, w_type) -def repr__Type(space, w_obj): - w_mod = w_obj.get_module() - if not space.isinstance_w(w_mod, space.w_str): - mod = None - else: - mod = space.str_w(w_mod) - if not w_obj.is_heaptype(): - kind = 'type' - else: - kind = 'class' - if mod is not None and mod != '__builtin__': - return space.wrap("<%s '%s.%s'>" % (kind, mod, w_obj.name)) - else: - return space.wrap("<%s '%s'>" % (kind, w_obj.name)) - -def getattr__Type_ANY(space, w_type, w_name): - name = space.str_w(w_name) - w_descr = space.lookup(w_type, name) - if w_descr is not None: - if space.is_data_descr(w_descr): - w_get = space.lookup(w_descr, "__get__") - if w_get is not None: - return space.get_and_call_function(w_get, w_descr, w_type, - space.type(w_type)) - w_value = w_type.lookup(name) - if w_value is not None: - # __get__(None, type): turns e.g. functions into unbound methods - return space.get(w_value, space.w_None, w_type) - if w_descr is not None: - return space.get(w_descr, w_type) - raise oefmt(space.w_AttributeError, - "type object '%N' has no attribute %R", w_type, w_name) - -def eq__Type_Type(space, w_self, w_other): - return space.is_(w_self, w_other) - # ____________________________________________________________ @@ -1201,7 +1210,3 @@ names = [cls.getname(space) for cls in cycle] raise OperationError(space.w_TypeError, space.wrap("cycle among base classes: " + ' < '.join(names))) - -# ____________________________________________________________ - -register_all(vars()) From noreply at buildbot.pypy.org Mon Feb 24 20:48:33 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 24 Feb 2014 20:48:33 +0100 (CET) Subject: [pypy-commit] pypy remove-remaining-smm: Use oefmt. Message-ID: <20140224194833.6B1571C0132@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: remove-remaining-smm Changeset: r69359:a9197ed339ea Date: 2014-02-24 20:47 +0100 http://bitbucket.org/pypy/pypy/changeset/a9197ed339ea/ Log: Use oefmt. diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -1,6 +1,6 @@ from pypy.interpreter import gateway from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.error import OperationError, oefmt +from pypy.interpreter.error import oefmt from pypy.interpreter.function import Function, StaticMethod from pypy.interpreter.typedef import weakref_descr, GetSetProperty,\ descr_get_dict @@ -526,10 +526,9 @@ def get_subclasses(w_self): space = w_self.space if not space.config.translation.rweakref: - msg = ("this feature requires weakrefs, " - "which are not available in this build of PyPy") - raise OperationError(space.w_RuntimeError, - space.wrap(msg)) + raise oefmt(space.w_RuntimeError, + "this feature requires weakrefs, " + "which are not available in this build of PyPy") subclasses_w = [] for ref in w_self.weak_subclasses: w_ob = ref() @@ -577,8 +576,7 @@ w_descr = space.lookup(w_newobject, '__init__') w_result = space.get_and_call_args(w_descr, w_newobject, __args__) if not space.is_w(w_result, space.w_None): - raise OperationError(space.w_TypeError, - space.wrap("__init__() should return None")) + raise oefmt(space.w_TypeError, "__init__() should return None") return w_newobject def descr_repr(self, space): @@ -639,7 +637,7 @@ # this is in its own function because we want the special case 'type(x)' # above to be seen by the jit. if w_bases is None or w_dict is None: - raise OperationError(space.w_TypeError, space.wrap("type() takes 1 or 3 arguments")) + raise oefmt(space.w_TypeError, "type() takes 1 or 3 arguments") bases_w = space.fixedview(w_bases) @@ -653,11 +651,10 @@ if space.is_true(space.issubtype(w_typ, w_winner)): w_winner = w_typ continue - raise OperationError(space.w_TypeError, - space.wrap("metaclass conflict: " - "the metaclass of a derived class " - "must be a (non-strict) subclass " - "of the metaclasses of all its bases")) + raise oefmt(space.w_TypeError, + "metaclass conflict: the metaclass of a derived class must" + " be a (non-strict) subclass of the metaclasses of all its" + " bases") if not space.is_w(w_winner, w_typetype): newfunc = space.getattr(w_winner, space.wrap('__new__')) @@ -685,11 +682,9 @@ # ____________________________________________________________ -def _check(space, w_type, w_msg=None): +def _check(space, w_type, msg="descriptor is for 'type'"): if not isinstance(w_type, W_TypeObject): - if w_msg is None: - w_msg = space.wrap("descriptor is for 'type'") - raise OperationError(space.w_TypeError, w_msg) + raise oefmt(space.w_TypeError, msg) return w_type @@ -709,7 +704,7 @@ def descr_mro(space, w_type): """Return a type's method resolution order.""" - w_type = _check(space, w_type, space.wrap("expected type")) + w_type = _check(space, w_type, "expected type") return space.newlist(w_type.compute_default_mro()) def descr_get__bases__(space, w_type): @@ -741,9 +736,8 @@ for w_newbase in newbases_w: if isinstance(w_newbase, W_TypeObject): if w_type in w_newbase.compute_default_mro(): - raise OperationError(space.w_TypeError, - space.wrap("a __bases__ item causes" - " an inheritance cycle")) + raise oefmt(space.w_TypeError, + "a __bases__ item causes an inheritance cycle") w_oldbestbase = check_and_find_best_base(space, w_type.bases_w) w_newbestbase = check_and_find_best_base(space, newbases_w) @@ -833,8 +827,7 @@ w_result = w_type.getdictvalue(space, "__abstractmethods__") if w_result is not None: return w_result - raise OperationError(space.w_AttributeError, - space.wrap("__abstractmethods__")) + raise oefmt(space.w_AttributeError, "__abstractmethods__") def descr_set___abstractmethods__(space, w_type, w_new): w_type = _check(space, w_type) @@ -844,8 +837,7 @@ def descr_del___abstractmethods__(space, w_type): w_type = _check(space, w_type) if not w_type.deldictvalue(space, "__abstractmethods__"): - raise OperationError(space.w_AttributeError, - space.wrap("__abstractmethods__")) + raise oefmt(space.w_AttributeError, "__abstractmethods__") w_type.set_abstract(False) def descr___subclasses__(space, w_type): @@ -949,9 +941,8 @@ """ w_bestbase = find_best_base(space, bases_w) if w_bestbase is None: - raise OperationError(space.w_TypeError, - space.wrap("a new-style class can't have " - "only classic bases")) + raise oefmt(space.w_TypeError, + "a new-style class can't have only classic bases") if not w_bestbase.instancetypedef.acceptable_as_base_class: raise oefmt(space.w_TypeError, "type '%N' is not an acceptable base class", w_bestbase) @@ -962,9 +953,8 @@ if isinstance(w_base, W_TypeObject): w_layout = w_base.w_same_layout_as or w_base if not issublayout(w_bestlayout, w_layout): - raise OperationError(space.w_TypeError, - space.wrap("instance layout conflicts in " - "multiple inheritance")) + raise oefmt(space.w_TypeError, + "instance layout conflicts in multiple inheritance") return w_bestbase def copy_flags_from_bases(w_self, w_bestbase): @@ -998,15 +988,13 @@ slot_name = space.str_w(w_slot_name) if slot_name == '__dict__': if wantdict or w_self.hasdict: - raise OperationError(space.w_TypeError, - space.wrap("__dict__ slot disallowed: " - "we already got one")) + raise oefmt(space.w_TypeError, + "__dict__ slot disallowed: we already got one") wantdict = True elif slot_name == '__weakref__': if wantweakref or w_self.weakrefable: - raise OperationError(space.w_TypeError, - space.wrap("__weakref__ slot disallowed: " - "we already got one")) + raise oefmt(space.w_TypeError, + "__weakref__ slot disallowed: we already got one") wantweakref = True else: create_slot(w_self, slot_name) @@ -1021,8 +1009,7 @@ def create_slot(w_self, slot_name): space = w_self.space if not valid_slot_name(slot_name): - raise OperationError(space.w_TypeError, - space.wrap('__slots__ must be identifiers')) + raise oefmt(space.w_TypeError, "__slots__ must be identifiers") # create member slot_name = mangle(slot_name, w_self.name) if slot_name not in w_self.dict_w: @@ -1122,8 +1109,7 @@ # the elements in the mro seem to be (old- or new-style) classes. for w_class in mro_w: if not space.abstract_isclass_w(w_class): - raise OperationError(space.w_TypeError, - space.wrap("mro() returned a non-class")) + raise oefmt(space.w_TypeError, "mro() returned a non-class") return mro_w def is_mro_purely_of_types(mro_w): @@ -1208,5 +1194,5 @@ cycle.append(candidate) cycle.reverse() names = [cls.getname(space) for cls in cycle] - raise OperationError(space.w_TypeError, - space.wrap("cycle among base classes: " + ' < '.join(names))) + raise oefmt(space.w_TypeError, + "cycle among base classes: " + ' < '.join(names)) From noreply at buildbot.pypy.org Mon Feb 24 21:56:00 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 24 Feb 2014 21:56:00 +0100 (CET) Subject: [pypy-commit] pypy remove-remaining-smm: Make sure float comparison methods get different names. Message-ID: <20140224205600.285ED1C08B9@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: remove-remaining-smm Changeset: r69360:72388bf30fed Date: 2014-02-24 21:00 +0100 http://bitbucket.org/pypy/pypy/changeset/72388bf30fed/ Log: Make sure float comparison methods get different names. diff --git a/pypy/objspace/std/floatobject.py b/pypy/objspace/std/floatobject.py --- a/pypy/objspace/std/floatobject.py +++ b/pypy/objspace/std/floatobject.py @@ -17,6 +17,7 @@ isinf, isnan, isfinite, INFINITY, NAN, copysign, formatd, DTSF_ADD_DOT_0, DTSF_STR_PRECISION, float_as_rbigint_ratio) from rpython.rlib.rstring import ParseStringError +from rpython.tool.sourcetools import func_with_new_name from rpython.rlib.unroll import unrolling_iterable @@ -114,7 +115,7 @@ if space.isinstance_w(w_other, space.w_long): return space.newbool(do_compare_bigint(self.floatval, w_other.num)) return space.w_NotImplemented - return _compare + return func_with_new_name(_compare, 'descr_' + opname) class W_FloatObject(W_Root): From noreply at buildbot.pypy.org Mon Feb 24 21:56:01 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 24 Feb 2014 21:56:01 +0100 (CET) Subject: [pypy-commit] pypy remove-remaining-smm: Revert some uses of oefmt where the message is not a constant string. Message-ID: <20140224205601.6EFA11C08B9@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: remove-remaining-smm Changeset: r69361:dcdb95496ffb Date: 2014-02-24 21:14 +0100 http://bitbucket.org/pypy/pypy/changeset/dcdb95496ffb/ Log: Revert some uses of oefmt where the message is not a constant string. diff --git a/pypy/objspace/std/complexobject.py b/pypy/objspace/std/complexobject.py --- a/pypy/objspace/std/complexobject.py +++ b/pypy/objspace/std/complexobject.py @@ -410,7 +410,7 @@ try: return space.newfloat(math.hypot(self.realval, self.imagval)) except OverflowError, e: - raise oefmt(space.w_OverflowError, str(e)) + raise OperationError(space.w_OverflowError, space.wrap(str(e))) def descr_eq(self, space, w_other): if isinstance(w_other, W_ComplexObject): @@ -487,7 +487,7 @@ try: return self.div(w_rhs) except ZeroDivisionError, e: - raise oefmt(space.w_ZeroDivisionError, str(e)) + raise OperationError(space.w_ZeroDivisionError, space.wrap(str(e))) def descr_rtruediv(self, space, w_lhs): w_lhs = self._to_complex(space, w_lhs) @@ -496,7 +496,7 @@ try: return w_lhs.div(self) except ZeroDivisionError, e: - raise oefmt(space.w_ZeroDivisionError, str(e)) + raise OperationError(space.w_ZeroDivisionError, space.wrap(str(e))) def descr_floordiv(self, space, w_rhs): w_rhs = self._to_complex(space, w_rhs) @@ -506,7 +506,7 @@ try: return self.divmod(space, w_rhs)[0] except ZeroDivisionError, e: - raise oefmt(space.w_ZeroDivisionError, str(e)) + raise OperationError(space.w_ZeroDivisionError, space.wrap(str(e))) def descr_rfloordiv(self, space, w_lhs): w_lhs = self._to_complex(space, w_lhs) @@ -516,7 +516,7 @@ try: return w_lhs.divmod(space, self)[0] except ZeroDivisionError, e: - raise oefmt(space.w_ZeroDivisionError, str(e)) + raise OperationError(space.w_ZeroDivisionError, space.wrap(str(e))) def descr_mod(self, space, w_rhs): w_rhs = self._to_complex(space, w_rhs) @@ -525,7 +525,7 @@ try: return self.divmod(space, w_rhs)[1] except ZeroDivisionError, e: - raise oefmt(space.w_ZeroDivisionError, str(e)) + raise OperationError(space.w_ZeroDivisionError, space.wrap(str(e))) def descr_rmod(self, space, w_lhs): w_lhs = self._to_complex(space, w_lhs) @@ -534,7 +534,7 @@ try: return w_lhs.divmod(space, self)[1] except ZeroDivisionError, e: - raise oefmt(space.w_ZeroDivisionError, str(e)) + raise OperationError(space.w_ZeroDivisionError, space.wrap(str(e))) def descr_divmod(self, space, w_rhs): w_rhs = self._to_complex(space, w_rhs) @@ -543,7 +543,7 @@ try: div, mod = self.divmod(space, w_rhs) except ZeroDivisionError, e: - raise oefmt(space.w_ZeroDivisionError, str(e)) + raise OperationError(space.w_ZeroDivisionError, space.wrap(str(e))) return space.newtuple([div, mod]) def descr_rdivmod(self, space, w_lhs): @@ -553,7 +553,7 @@ try: div, mod = w_lhs.divmod(space, self) except ZeroDivisionError, e: - raise oefmt(space.w_ZeroDivisionError, str(e)) + raise OperationError(space.w_ZeroDivisionError, space.wrap(str(e))) return space.newtuple([div, mod]) @unwrap_spec(w_third_arg=WrappedDefault(None)) diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -1,6 +1,6 @@ from pypy.interpreter import gateway from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.error import oefmt +from pypy.interpreter.error import oefmt, OperationError from pypy.interpreter.function import Function, StaticMethod from pypy.interpreter.typedef import weakref_descr, GetSetProperty,\ descr_get_dict @@ -684,7 +684,7 @@ def _check(space, w_type, msg="descriptor is for 'type'"): if not isinstance(w_type, W_TypeObject): - raise oefmt(space.w_TypeError, msg) + raise OperationError(space.w_TypeError, space.wrap(msg)) return w_type From noreply at buildbot.pypy.org Mon Feb 24 21:56:02 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 24 Feb 2014 21:56:02 +0100 (CET) Subject: [pypy-commit] pypy remove-remaining-smm: Fix. Message-ID: <20140224205602.9D8911C08B9@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: remove-remaining-smm Changeset: r69362:b7f041f08b3a Date: 2014-02-24 21:18 +0100 http://bitbucket.org/pypy/pypy/changeset/b7f041f08b3a/ Log: Fix. diff --git a/pypy/objspace/std/sliceobject.py b/pypy/objspace/std/sliceobject.py --- a/pypy/objspace/std/sliceobject.py +++ b/pypy/objspace/std/sliceobject.py @@ -125,9 +125,11 @@ # parameters if space.is_w(self, w_other): return space.w_True + if not isinstance(w_other, W_SliceObject): + return space.w_NotImplemented if space.eq_w(self.w_start, w_other.w_start) and \ - space.eq_w(self.w_stop, w_other.w_stop) and \ - space.eq_w(self.w_step, w_other.w_step): + space.eq_w(self.w_stop, w_other.w_stop) and \ + space.eq_w(self.w_step, w_other.w_step): return space.w_True else: return space.w_False From noreply at buildbot.pypy.org Mon Feb 24 21:56:03 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 24 Feb 2014 21:56:03 +0100 (CET) Subject: [pypy-commit] pypy remove-remaining-smm: Fix. Message-ID: <20140224205603.CEEDF1C08B9@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: remove-remaining-smm Changeset: r69363:d9a4f6186d70 Date: 2014-02-24 21:24 +0100 http://bitbucket.org/pypy/pypy/changeset/d9a4f6186d70/ Log: Fix. diff --git a/pypy/objspace/std/floatobject.py b/pypy/objspace/std/floatobject.py --- a/pypy/objspace/std/floatobject.py +++ b/pypy/objspace/std/floatobject.py @@ -113,7 +113,8 @@ res = op(f1, f2) return space.newbool(res) if space.isinstance_w(w_other, space.w_long): - return space.newbool(do_compare_bigint(self.floatval, w_other.num)) + return space.newbool(do_compare_bigint(self.floatval, + space.bigint_w(w_other))) return space.w_NotImplemented return func_with_new_name(_compare, 'descr_' + opname) From noreply at buildbot.pypy.org Mon Feb 24 21:56:05 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 24 Feb 2014 21:56:05 +0100 (CET) Subject: [pypy-commit] pypy remove-remaining-smm: Fix. Message-ID: <20140224205605.050061C08B9@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: remove-remaining-smm Changeset: r69364:a3176fb0a3e3 Date: 2014-02-24 21:31 +0100 http://bitbucket.org/pypy/pypy/changeset/a3176fb0a3e3/ Log: Fix. diff --git a/pypy/objspace/std/complexobject.py b/pypy/objspace/std/complexobject.py --- a/pypy/objspace/std/complexobject.py +++ b/pypy/objspace/std/complexobject.py @@ -280,8 +280,7 @@ if space.isinstance_w(w_obj, space.w_int): return W_ComplexObject(space.int_w(w_obj), 0.0) if space.isinstance_w(w_obj, space.w_long): - dval = w_obj.tofloat(space) - return W_ComplexObject(dval, 0.0) + return W_ComplexObject(space.float_w(w_obj), 0.0) if space.isinstance_w(w_obj, space.w_float): return W_ComplexObject(space.float_w(w_obj), 0.0) From noreply at buildbot.pypy.org Mon Feb 24 21:56:06 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 24 Feb 2014 21:56:06 +0100 (CET) Subject: [pypy-commit] pypy remove-remaining-smm: Revert one more use of oefmt with non-constant message. Message-ID: <20140224205606.2E7A91C08B9@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: remove-remaining-smm Changeset: r69365:ddff94c8e8e3 Date: 2014-02-24 21:33 +0100 http://bitbucket.org/pypy/pypy/changeset/ddff94c8e8e3/ Log: Revert one more use of oefmt with non-constant message. diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -1194,5 +1194,5 @@ cycle.append(candidate) cycle.reverse() names = [cls.getname(space) for cls in cycle] - raise oefmt(space.w_TypeError, - "cycle among base classes: " + ' < '.join(names)) + raise OperationError(space.w_TypeError, space.wrap( + "cycle among base classes: " + ' < '.join(names))) From noreply at buildbot.pypy.org Mon Feb 24 21:56:07 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 24 Feb 2014 21:56:07 +0100 (CET) Subject: [pypy-commit] pypy remove-remaining-smm: Kill W_AbstractComplexObject (it has only one subclass). Message-ID: <20140224205607.42A151C08B9@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: remove-remaining-smm Changeset: r69366:a73fba564942 Date: 2014-02-24 21:37 +0100 http://bitbucket.org/pypy/pypy/changeset/a73fba564942/ Log: Kill W_AbstractComplexObject (it has only one subclass). diff --git a/pypy/objspace/std/complexobject.py b/pypy/objspace/std/complexobject.py --- a/pypy/objspace/std/complexobject.py +++ b/pypy/objspace/std/complexobject.py @@ -14,38 +14,6 @@ from rpython.rlib.rstring import ParseStringError -class W_AbstractComplexObject(W_Root): - __slots__ = () - - def is_w(self, space, w_other): - from rpython.rlib.longlong2float import float2longlong - if not isinstance(w_other, W_AbstractComplexObject): - return False - if self.user_overridden_class or w_other.user_overridden_class: - return self is w_other - real1 = space.float_w(space.getattr(self, space.wrap("real"))) - real2 = space.float_w(space.getattr(w_other, space.wrap("real"))) - imag1 = space.float_w(space.getattr(self, space.wrap("imag"))) - imag2 = space.float_w(space.getattr(w_other, space.wrap("imag"))) - real1 = float2longlong(real1) - real2 = float2longlong(real2) - imag1 = float2longlong(imag1) - imag2 = float2longlong(imag2) - return real1 == real2 and imag1 == imag2 - - def immutable_unique_id(self, space): - if self.user_overridden_class: - return None - from rpython.rlib.longlong2float import float2longlong - from pypy.objspace.std.model import IDTAG_COMPLEX as tag - real = space.float_w(space.getattr(self, space.wrap("real"))) - imag = space.float_w(space.getattr(self, space.wrap("imag"))) - real_b = rbigint.fromrarith_int(float2longlong(real)) - imag_b = rbigint.fromrarith_int(r_ulonglong(float2longlong(imag))) - val = real_b.lshift(64).or_(imag_b).lshift(3).or_(rbigint.fromint(tag)) - return space.newlong_from_rbigint(val) - - def _split_complex(s): slen = len(s) if slen == 0: @@ -206,7 +174,7 @@ ERR_MALFORMED = "complex() arg is a malformed string" -class W_ComplexObject(W_AbstractComplexObject): +class W_ComplexObject(W_Root): """This is a reimplementation of the CPython "PyComplexObject" """ _immutable_fields_ = ['realval', 'imagval'] @@ -270,6 +238,34 @@ return w_result + def is_w(self, space, w_other): + from rpython.rlib.longlong2float import float2longlong + if not isinstance(w_other, W_ComplexObject): + return False + if self.user_overridden_class or w_other.user_overridden_class: + return self is w_other + real1 = space.float_w(space.getattr(self, space.wrap("real"))) + real2 = space.float_w(space.getattr(w_other, space.wrap("real"))) + imag1 = space.float_w(space.getattr(self, space.wrap("imag"))) + imag2 = space.float_w(space.getattr(w_other, space.wrap("imag"))) + real1 = float2longlong(real1) + real2 = float2longlong(real2) + imag1 = float2longlong(imag1) + imag2 = float2longlong(imag2) + return real1 == real2 and imag1 == imag2 + + def immutable_unique_id(self, space): + if self.user_overridden_class: + return None + from rpython.rlib.longlong2float import float2longlong + from pypy.objspace.std.model import IDTAG_COMPLEX as tag + real = space.float_w(space.getattr(self, space.wrap("real"))) + imag = space.float_w(space.getattr(self, space.wrap("imag"))) + real_b = rbigint.fromrarith_int(float2longlong(real)) + imag_b = rbigint.fromrarith_int(r_ulonglong(float2longlong(imag))) + val = real_b.lshift(64).or_(imag_b).lshift(3).or_(rbigint.fromint(tag)) + return space.newlong_from_rbigint(val) + def int(self, space): raise oefmt(space.w_TypeError, "can't convert complex to int; use int(abs(z))") From noreply at buildbot.pypy.org Mon Feb 24 21:56:08 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 24 Feb 2014 21:56:08 +0100 (CET) Subject: [pypy-commit] pypy remove-remaining-smm: Remove obvious comment. Message-ID: <20140224205608.620C81C08B9@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: remove-remaining-smm Changeset: r69367:73890a99591b Date: 2014-02-24 21:37 +0100 http://bitbucket.org/pypy/pypy/changeset/73890a99591b/ Log: Remove obvious comment. diff --git a/pypy/objspace/std/complexobject.py b/pypy/objspace/std/complexobject.py --- a/pypy/objspace/std/complexobject.py +++ b/pypy/objspace/std/complexobject.py @@ -175,8 +175,6 @@ class W_ComplexObject(W_Root): - """This is a reimplementation of the CPython "PyComplexObject" - """ _immutable_fields_ = ['realval', 'imagval'] def __init__(self, realval=0.0, imgval=0.0): From noreply at buildbot.pypy.org Mon Feb 24 21:56:09 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 24 Feb 2014 21:56:09 +0100 (CET) Subject: [pypy-commit] pypy remove-remaining-smm: Inline this constant. Message-ID: <20140224205609.6EC5E1C08B9@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: remove-remaining-smm Changeset: r69368:57581836303c Date: 2014-02-24 21:40 +0100 http://bitbucket.org/pypy/pypy/changeset/57581836303c/ Log: Inline this constant. diff --git a/pypy/objspace/std/complexobject.py b/pypy/objspace/std/complexobject.py --- a/pypy/objspace/std/complexobject.py +++ b/pypy/objspace/std/complexobject.py @@ -171,9 +171,6 @@ return (space.float_w(space.float(w_complex)), 0.0) -ERR_MALFORMED = "complex() arg is a malformed string" - - class W_ComplexObject(W_Root): _immutable_fields_ = ['realval', 'imagval'] @@ -301,12 +298,14 @@ try: realstr, imagstr = _split_complex(space.str_w(w_real)) except ValueError: - raise oefmt(space.w_ValueError, ERR_MALFORMED) + raise oefmt(space.w_ValueError, + "complex() arg is a malformed string") try: realval = string_to_float(realstr) imagval = string_to_float(imagstr) except ParseStringError: - raise oefmt(space.w_ValueError, ERR_MALFORMED) + raise oefmt(space.w_ValueError, + "complex() arg is a malformed string") else: # non-string arguments From noreply at buildbot.pypy.org Mon Feb 24 21:56:10 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 24 Feb 2014 21:56:10 +0100 (CET) Subject: [pypy-commit] pypy remove-remaining-smm: Fix. Message-ID: <20140224205610.818BA1C08B9@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: remove-remaining-smm Changeset: r69369:89210628cd59 Date: 2014-02-24 21:44 +0100 http://bitbucket.org/pypy/pypy/changeset/89210628cd59/ Log: Fix. diff --git a/pypy/objspace/std/sliceobject.py b/pypy/objspace/std/sliceobject.py --- a/pypy/objspace/std/sliceobject.py +++ b/pypy/objspace/std/sliceobject.py @@ -137,6 +137,8 @@ def descr_lt(self, space, w_other): if space.is_w(self, w_other): return space.w_False # see comments in descr_eq() + if not isinstance(w_other, W_SliceObject): + return space.w_NotImplemented if space.eq_w(self.w_start, w_other.w_start): if space.eq_w(self.w_stop, w_other.w_stop): return space.lt(self.w_step, w_other.w_step) From noreply at buildbot.pypy.org Mon Feb 24 21:56:11 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 24 Feb 2014 21:56:11 +0100 (CET) Subject: [pypy-commit] pypy remove-remaining-smm: Use space.float_w here. Message-ID: <20140224205611.A0C7A1C08B9@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: remove-remaining-smm Changeset: r69370:9b9f11802928 Date: 2014-02-24 21:49 +0100 http://bitbucket.org/pypy/pypy/changeset/9b9f11802928/ Log: Use space.float_w here. diff --git a/pypy/objspace/std/floatobject.py b/pypy/objspace/std/floatobject.py --- a/pypy/objspace/std/floatobject.py +++ b/pypy/objspace/std/floatobject.py @@ -352,9 +352,9 @@ if isinstance(w_obj, W_FloatObject): return w_obj if space.isinstance_w(w_obj, space.w_int): - return W_FloatObject(float(space.int_w(w_obj))) + return W_FloatObject(space.float_w(w_obj)) if space.isinstance_w(w_obj, space.w_long): - return W_FloatObject(w_obj.tofloat(space)) + return W_FloatObject(space.float_w(w_obj)) def _float2string(self, x, code, precision): # we special-case explicitly inf and nan here From noreply at buildbot.pypy.org Mon Feb 24 21:56:12 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 24 Feb 2014 21:56:12 +0100 (CET) Subject: [pypy-commit] pypy remove-remaining-smm: Revert another oefmt. Message-ID: <20140224205612.B34EA1C08B9@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: remove-remaining-smm Changeset: r69371:1852ff9bd7f5 Date: 2014-02-24 21:50 +0100 http://bitbucket.org/pypy/pypy/changeset/1852ff9bd7f5/ Log: Revert another oefmt. diff --git a/pypy/objspace/std/util.py b/pypy/objspace/std/util.py --- a/pypy/objspace/std/util.py +++ b/pypy/objspace/std/util.py @@ -1,4 +1,4 @@ -from pypy.interpreter.error import oefmt +from pypy.interpreter.error import oefmt, OperationError from rpython.rlib.rstring import InvalidBaseError @@ -30,7 +30,7 @@ def wrap_parsestringerror(space, e, w_source): if isinstance(e, InvalidBaseError): - raise oefmt(space.w_ValueError, e.msg) + raise OperationError(space.w_ValueError, space.wrap(e.msg)) else: raise oefmt(space.w_ValueError, '%s: %s', e.msg, space.str_w(space.repr(w_source))) From noreply at buildbot.pypy.org Mon Feb 24 22:20:57 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 24 Feb 2014 22:20:57 +0100 (CET) Subject: [pypy-commit] pypy remove-remaining-smm: Remove trailing whitespace. Message-ID: <20140224212057.B636D1C3599@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: remove-remaining-smm Changeset: r69372:193fa2192fe9 Date: 2014-02-24 22:04 +0100 http://bitbucket.org/pypy/pypy/changeset/193fa2192fe9/ Log: Remove trailing whitespace. diff --git a/pypy/module/marshal/__init__.py b/pypy/module/marshal/__init__.py --- a/pypy/module/marshal/__init__.py +++ b/pypy/module/marshal/__init__.py @@ -8,7 +8,7 @@ appleveldefs = { } - + interpleveldefs = { 'dump' : 'interp_marshal.dump', 'dumps' : 'interp_marshal.dumps', From noreply at buildbot.pypy.org Mon Feb 24 22:20:58 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 24 Feb 2014 22:20:58 +0100 (CET) Subject: [pypy-commit] pypy remove-remaining-smm: Kill type_typedef, use W_TypeObject.typedef directly. Message-ID: <20140224212058.EC3E11C3599@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: remove-remaining-smm Changeset: r69373:741db1af98fd Date: 2014-02-24 22:13 +0100 http://bitbucket.org/pypy/pypy/changeset/741db1af98fd/ Log: Kill type_typedef, use W_TypeObject.typedef directly. diff --git a/pypy/objspace/std/model.py b/pypy/objspace/std/model.py --- a/pypy/objspace/std/model.py +++ b/pypy/objspace/std/model.py @@ -30,10 +30,6 @@ """NOT_RPYTHON: inititialization only""" self.config = config # All the Python types that we want to provide in this StdObjSpace - class result: - from pypy.objspace.std.typeobject import type_typedef - self.pythontypes = [value for key, value in result.__dict__.items() - if not key.startswith('_')] # don't look # The object implementations that we want to 'link' into PyPy must be # imported here. This registers them into the multimethod tables, @@ -62,9 +58,10 @@ import pypy.objspace.std.marshal_impl # install marshal multimethods - # not-multimethod based types + self.pythontypes = [] self.pythontypes.append(objectobject.W_ObjectObject.typedef) + self.pythontypes.append(typeobject.W_TypeObject.typedef) self.pythontypes.append(noneobject.W_NoneObject.typedef) self.pythontypes.append(tupleobject.W_TupleObject.typedef) self.pythontypes.append(listobject.W_ListObject.typedef) diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -855,7 +855,7 @@ def type_isinstance(w_obj, space, w_inst): return space.newbool(space.type(w_inst).issubtype(w_obj)) -type_typedef = StdTypeDef("type", +W_TypeObject.typedef = StdTypeDef("type", __new__ = gateway.interp2app(descr__new__), __name__ = GetSetProperty(descr_get__name__, descr_set__name__), __bases__ = GetSetProperty(descr_get__bases__, descr_set__bases__), @@ -880,7 +880,7 @@ __eq__ = gateway.interp2app(W_TypeObject.descr_eq), __ne__ = gateway.interp2app(W_TypeObject.descr_ne), ) -W_TypeObject.typedef = type_typedef + # ____________________________________________________________ # Initialization of type objects From noreply at buildbot.pypy.org Mon Feb 24 22:21:00 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Mon, 24 Feb 2014 22:21:00 +0100 (CET) Subject: [pypy-commit] pypy remove-remaining-smm: Kill default init MM implementation. Message-ID: <20140224212100.24E231C3599@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: remove-remaining-smm Changeset: r69374:b010db133995 Date: 2014-02-24 22:20 +0100 http://bitbucket.org/pypy/pypy/changeset/b010db133995/ Log: Kill default init MM implementation. diff --git a/pypy/objspace/std/default.py b/pypy/objspace/std/default.py deleted file mode 100644 --- a/pypy/objspace/std/default.py +++ /dev/null @@ -1,11 +0,0 @@ -"""Default implementation for some operation.""" - -from pypy.objspace.std.register_all import register_all - - -# __init__ should succeed if called internally as a multimethod - -def init__ANY(space, w_obj, __args__): - pass - -register_all(vars()) diff --git a/pypy/objspace/std/model.py b/pypy/objspace/std/model.py --- a/pypy/objspace/std/model.py +++ b/pypy/objspace/std/model.py @@ -54,7 +54,6 @@ from pypy.objspace.std import unicodeobject from pypy.objspace.std import dictproxyobject from pypy.objspace.std import proxyobject - import pypy.objspace.std.default # register a few catch-all multimethods import pypy.objspace.std.marshal_impl # install marshal multimethods From noreply at buildbot.pypy.org Mon Feb 24 23:46:52 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 24 Feb 2014 23:46:52 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: Phew. I *think* that by now this model with several condition variables should, ideally, Message-ID: <20140224224652.1DC3C1D26BC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r845:bf3c8f9d6f21 Date: 2014-02-24 23:46 +0100 http://bitbucket.org/pypy/stmgc/changeset/bf3c8f9d6f21/ Log: Phew. I *think* that by now this model with several condition variables should, ideally, work. diff --git a/c7/demo/Makefile b/c7/demo/Makefile --- a/c7/demo/Makefile +++ b/c7/demo/Makefile @@ -24,7 +24,7 @@ -Wall -Werror ../stmgc.c build-%: %.c ${H_FILES} ${C_FILES} - clang -I.. -pthread -g -O1 $< -o build-$* \ + clang -I.. -pthread -g -O0 $< -o build-$* \ -Wall -Werror ../stmgc.c release-%: %.c ${H_FILES} ${C_FILES} diff --git a/c7/demo/demo2.c b/c7/demo/demo2.c --- a/c7/demo/demo2.c +++ b/c7/demo/demo2.c @@ -6,8 +6,8 @@ #include "stmgc.h" -#define LIST_LENGTH 6000 -#define BUNCH 400 +#define LIST_LENGTH 2000 +#define BUNCH 100 typedef TLPREFIX struct node_s node_t; typedef node_t* nodeptr_t; @@ -34,7 +34,7 @@ void done_shadow_stack(void) { - free(stm_thread_local.shadowstack); + free(stm_thread_local.shadowstack_base); stm_thread_local.shadowstack = NULL; stm_thread_local.shadowstack_base = NULL; } diff --git a/c7/stm/contention.c b/c7/stm/contention.c --- a/c7/stm/contention.c +++ b/c7/stm/contention.c @@ -3,7 +3,7 @@ #endif -static void contention_management(uint8_t other_segment_num, bool wait) +static void contention_management(uint8_t other_segment_num) { /* A simple contention manager. Called when some other thread holds the write lock on an object. The current thread tries @@ -36,23 +36,58 @@ abort. */ abort_with_mutex(); } - else if (wait) { + else { + /* signal the other thread; it must abort. + + Note that we know that the target thread is running now, and + so it is or will soon be blocked at a mutex_lock() or a + cond_wait(C_SAFE_POINT). Thus broadcasting C_SAFE_POINT is + enough to wake it up in the second case. + */ + cond_broadcast(C_SAFE_POINT); + } +} + +static void write_write_contention_management(uintptr_t lock_idx) +{ + mutex_lock(); + + if (STM_PSEGMENT->transaction_state == TS_MUST_ABORT) + abort_with_mutex(); + + uint8_t prev_owner = ((volatile uint8_t *)write_locks)[lock_idx]; + if (prev_owner != 0 && prev_owner != STM_PSEGMENT->write_lock_num) { + + uint8_t other_segment_num = prev_owner - 1; + contention_management(other_segment_num); + + /* the rest of this code is for the case where we continue to + run, and the other thread is asked to abort */ + #ifdef STM_TESTS /* abort anyway for tests. We mustn't call cond_wait() */ abort_with_mutex(); #endif - /* otherwise, we will issue a safe point and wait: */ + + /* first mark the other thread as "needing a safe-point" */ + struct stm_priv_segment_info_s* other_pseg; + other_pseg = get_priv_segment(other_segment_num); + assert(other_pseg->transaction_state == TS_MUST_ABORT); + other_pseg->pub.nursery_end = NSE_SIGNAL; + + /* we will issue a safe point and wait: */ STM_PSEGMENT->safe_point = SP_SAFE_POINT_CANNOT_COLLECT; - /* signal the other thread; it must abort */ - cond_broadcast(); + /* wait, hopefully until the other thread broadcasts "I'm + done aborting" (spurious wake-ups are ok). */ + cond_wait(C_SAFE_POINT); - /* then wait, hopefully until the other thread broadcasts "I'm - done aborting" (spurious wake-ups are ok) */ - cond_wait(); + cond_broadcast(C_RESUME); /* now we return into _stm_write_slowpath() and will try again to acquire the write lock on our object. */ STM_PSEGMENT->safe_point = SP_RUNNING; } + + mutex_unlock(); } diff --git a/c7/stm/contention.h b/c7/stm/contention.h --- a/c7/stm/contention.h +++ b/c7/stm/contention.h @@ -1,2 +1,3 @@ -static void contention_management(uint8_t other_segment_num, bool wait); +static void contention_management(uint8_t other_segment_num); +static void write_write_contention_management(uintptr_t lock_idx); diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -88,11 +88,7 @@ else { /* call the contention manager, and then retry (unless we were aborted). */ - mutex_lock(); - uint8_t prev_owner = ((volatile uint8_t *)write_locks)[lock_idx]; - if (prev_owner != 0 && prev_owner != lock_num) - contention_management(prev_owner - 1, true); - mutex_unlock(); + write_write_contention_management(lock_idx); goto retry; } @@ -156,6 +152,7 @@ : TS_INEVITABLE); STM_SEGMENT->jmpbuf_ptr = jmpbuf; STM_PSEGMENT->shadowstack_at_start_of_transaction = tl->shadowstack; + STM_SEGMENT->nursery_end = NURSERY_END; dprintf(("start_transaction\n")); @@ -205,7 +202,7 @@ ({ if (was_read_remote(remote_base, item, remote_version)) { /* A write-read conflict! */ - contention_management(remote_num, false); + contention_management(remote_num); /* If we reach this point, it means we aborted the other thread. We're done here. */ @@ -311,6 +308,12 @@ static void _finish_transaction(void) { + /* signal all the threads blocked in wait_for_other_safe_points() */ + if (STM_SEGMENT->nursery_end == NSE_SIGNAL) { + STM_SEGMENT->nursery_end = NURSERY_END; + cond_broadcast(C_SAFE_POINT); + } + STM_PSEGMENT->safe_point = SP_NO_TRANSACTION; STM_PSEGMENT->transaction_state = TS_NONE; @@ -339,8 +342,8 @@ /* wait until the other thread is at a safe-point */ wait_for_other_safe_points(SP_SAFE_POINT_CANNOT_COLLECT); - /* the rest of this function runs either atomically without releasing - the mutex, or it needs to restart. */ + /* the rest of this function either runs atomically without + releasing the mutex, or aborts the current thread. */ /* detect conflicts */ detect_write_read_conflicts(); @@ -366,9 +369,9 @@ /* done */ _finish_transaction(); - /* we did cond_broadcast() above already, in - try_wait_for_other_safe_points(). It may wake up - other threads in cond_wait() for a free segment. */ + /* wake up one other thread waiting for a segment. */ + cond_signal(C_RELEASE_THREAD_SEGMENT); + mutex_unlock(); } @@ -446,7 +449,14 @@ _finish_transaction(); - cond_broadcast(); + /* wake up one other thread waiting for a segment. In order to support + contention.c, we use a broadcast, to make sure that all threads are + signalled, including the one that requested an abort, if any. + Moreover, we wake up any thread waiting for this one to do a safe + point, if any. + */ + cond_broadcast(C_RELEASE_THREAD_SEGMENT); + mutex_unlock(); assert(jmpbuf_ptr != NULL); diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -16,7 +16,6 @@ #define NURSERY_END (NURSERY_START + NURSERY_SIZE) static uintptr_t _stm_nursery_start; -uintptr_t _stm_nursery_end; /************************************************************/ @@ -25,11 +24,12 @@ { assert(_STM_FAST_ALLOC <= NURSERY_SIZE); _stm_nursery_start = NURSERY_START; - _stm_nursery_end = NURSERY_END; long i; - for (i = 0; i < NB_SEGMENTS; i++) + for (i = 0; i < NB_SEGMENTS; i++) { get_segment(i)->nursery_current = (stm_char *)NURSERY_START; + get_segment(i)->nursery_end = NURSERY_END; + } } static void teardown_nursery(void) @@ -163,6 +163,8 @@ static void minor_collection(bool commit) { assert(!_has_mutex()); + + stm_safe_point(); abort_if_needed(); /* We must move out of the nursery any object found within the @@ -257,7 +259,7 @@ assert((uintptr_t)STM_SEGMENT->nursery_current == _stm_nursery_start); uintptr_t i, limit; # ifdef STM_TESTS - limit = _stm_nursery_end - _stm_nursery_start; + limit = NURSERY_END - _stm_nursery_start; # else limit = 64; # endif diff --git a/c7/stm/sync.c b/c7/stm/sync.c --- a/c7/stm/sync.c +++ b/c7/stm/sync.c @@ -11,36 +11,43 @@ additionally it wants to wait until the global state is changed by someone else, it waits on the condition variable. This should be all we need for synchronization. - - Maybe look at https://github.com/neosmart/pevents for how they do - WaitForMultipleObjects(). */ static union { struct { pthread_mutex_t global_mutex; - pthread_cond_t global_cond; + pthread_cond_t cond[_C_TOTAL]; /* some additional pieces of global state follow */ uint8_t in_use[NB_SEGMENTS]; /* 1 if running a pthread */ uint64_t global_time; }; - char reserved[128]; + char reserved[192]; } sync_ctl __attribute__((aligned(64))); static void setup_sync(void) { - if (pthread_mutex_init(&sync_ctl.global_mutex, NULL) != 0 || - pthread_cond_init(&sync_ctl.global_cond, NULL) != 0) - stm_fatalerror("mutex/cond initialization: %m\n"); + if (pthread_mutex_init(&sync_ctl.global_mutex, NULL) != 0) + stm_fatalerror("mutex initialization: %m\n"); + + long i; + for (i = 0; i < _C_TOTAL; i++) { + if (pthread_cond_init(&sync_ctl.cond[i], NULL) != 0) + stm_fatalerror("cond initialization: %m\n"); + } } static void teardown_sync(void) { - if (pthread_mutex_destroy(&sync_ctl.global_mutex) != 0 || - pthread_cond_destroy(&sync_ctl.global_cond) != 0) - stm_fatalerror("mutex/cond destroy: %m\n"); + if (pthread_mutex_destroy(&sync_ctl.global_mutex) != 0) + stm_fatalerror("mutex destroy: %m\n"); + + long i; + for (i = 0; i < _C_TOTAL; i++) { + if (pthread_cond_destroy(&sync_ctl.cond[i]) != 0) + stm_fatalerror("cond destroy: %m\n"); + } memset(&sync_ctl, 0, sizeof(sync_ctl.in_use)); } @@ -81,26 +88,31 @@ assert((_has_mutex_here = false, 1)); } -static inline void cond_wait(void) +static inline void cond_wait(enum cond_type_e ctype) { #ifdef STM_NO_COND_WAIT - fprintf(stderr, "*** cond_wait called!"); - abort(); + stm_fatalerror("*** cond_wait/%d called!\n", (int)ctype); #endif assert(_has_mutex_here); - if (UNLIKELY(pthread_cond_wait(&sync_ctl.global_cond, + if (UNLIKELY(pthread_cond_wait(&sync_ctl.cond[ctype], &sync_ctl.global_mutex) != 0)) - stm_fatalerror("pthread_cond_wait: %m\n"); + stm_fatalerror("pthread_cond_wait/%d: %m\n", (int)ctype); if (STM_PSEGMENT->transaction_state == TS_MUST_ABORT) abort_with_mutex(); } -static inline void cond_broadcast(void) +static inline void cond_broadcast(enum cond_type_e ctype) { - if (UNLIKELY(pthread_cond_broadcast(&sync_ctl.global_cond) != 0)) - stm_fatalerror("pthread_cond_broadcast: %m\n"); + if (UNLIKELY(pthread_cond_broadcast(&sync_ctl.cond[ctype]) != 0)) + stm_fatalerror("pthread_cond_broadcast/%d: %m\n", (int)ctype); +} + +static inline void cond_signal(enum cond_type_e ctype) +{ + if (UNLIKELY(pthread_cond_signal(&sync_ctl.cond[ctype]) != 0)) + stm_fatalerror("pthread_cond_signal/%d: %m\n", (int)ctype); } static void acquire_thread_segment(stm_thread_local_t *tl) @@ -134,8 +146,10 @@ goto got_num; } } - /* Wait and retry */ - cond_wait(); + /* Wait and retry. It is guaranteed that any thread releasing its + segment will do so by acquiring the mutex and calling + cond_signal(C_RELEASE_THREAD_SEGMENT). */ + cond_wait(C_RELEASE_THREAD_SEGMENT); goto retry; got_num: @@ -216,11 +230,18 @@ in the cond_wait() in this same function. */ + /* XXX review what occurs for the other kind! */ + assert(requested_safe_point_kind == SP_SAFE_POINT_CANNOT_COLLECT); + restart: assert(_has_mutex()); assert(STM_PSEGMENT->safe_point == SP_SAFE_POINT_CAN_COLLECT); + if (STM_PSEGMENT->transaction_state == TS_MUST_ABORT) + abort_with_mutex(); + long i; + bool wait = false; for (i = 0; i < NB_SEGMENTS; i++) { /* If the other thread is SP_NO_TRANSACTION, then it can be ignored here: as long as we have the mutex, it will remain @@ -237,36 +258,49 @@ /* we need to wait for this thread. Use NSE_SIGNAL to ask it (and possibly all other threads in the same case) to enter a safe-point soon. */ - _stm_nursery_end = NSE_SIGNAL; - cond_wait(); - goto restart; + other_pseg->pub.nursery_end = NSE_SIGNAL; + wait = true; } } - /* all threads are at a safe-point now. */ - cond_broadcast(); /* to wake up the other threads, but later, - when they get the mutex again */ + if (wait) { + cond_wait(C_SAFE_POINT); + goto restart; + } + + /* all threads are at a safe-point now. Broadcast C_RESUME, which + will allow them to resume --- but only when we release the mutex. */ + cond_broadcast(C_RESUME); } void _stm_collectable_safe_point(void) { - /* If nursery_section_end was set to NSE_SIGNAL by another thread, + /* If _stm_nursery_end was set to NSE_SIGNAL by another thread, we end up here as soon as we try to call stm_allocate() or do a call to stm_safe_point(). - See wait_for_other_safe_points() for details. + + This works together with wait_for_other_safe_points() to + signal the C_SAFE_POINT condition. */ mutex_lock(); + collectable_safe_point(); + mutex_unlock(); +} + +static void collectable_safe_point(void) +{ assert(STM_PSEGMENT->safe_point == SP_RUNNING); - if (_stm_nursery_end == NSE_SIGNAL) { + while (STM_SEGMENT->nursery_end == NSE_SIGNAL) { STM_PSEGMENT->safe_point = SP_SAFE_POINT_CAN_COLLECT; + STM_SEGMENT->nursery_end = NURSERY_END; - cond_broadcast(); + /* signal all the threads blocked in + wait_for_other_safe_points() */ + cond_broadcast(C_SAFE_POINT); - do { cond_wait(); } while (_stm_nursery_end == NSE_SIGNAL); + cond_wait(C_RESUME); STM_PSEGMENT->safe_point = SP_RUNNING; } - - mutex_unlock(); } diff --git a/c7/stm/sync.h b/c7/stm/sync.h --- a/c7/stm/sync.h +++ b/c7/stm/sync.h @@ -3,11 +3,18 @@ static void setup_sync(void); static void teardown_sync(void); -/* all synchronization is done via a mutex and condition variable */ +/* all synchronization is done via a mutex and a few condition variables */ +enum cond_type_e { + C_RELEASE_THREAD_SEGMENT, + C_SAFE_POINT, + C_RESUME, + _C_TOTAL +}; static void mutex_lock(void); static void mutex_unlock(void); -static void cond_wait(void); -static void cond_broadcast(void); +static void cond_wait(enum cond_type_e); +static void cond_broadcast(enum cond_type_e); +static void cond_signal(enum cond_type_e); #ifndef NDEBUG static bool _has_mutex(void); #endif @@ -19,3 +26,4 @@ /* see the source for an exact description */ static void wait_for_other_safe_points(int requested_safe_point_kind); +static void collectable_safe_point(void); diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -43,6 +43,7 @@ int segment_num; char *segment_base; stm_char *nursery_current; + uintptr_t nursery_end; struct stm_thread_local_s *running_thread; stm_jmpbuf_t *jmpbuf_ptr; }; @@ -65,8 +66,6 @@ void _stm_start_transaction(stm_thread_local_t *, stm_jmpbuf_t *); void _stm_collectable_safe_point(void); -extern uintptr_t _stm_nursery_end; - #ifdef STM_TESTS bool _stm_was_read(object_t *obj); bool _stm_was_written(object_t *obj); @@ -173,7 +172,7 @@ stm_char *p = STM_SEGMENT->nursery_current; stm_char *end = p + size_rounded_up; STM_SEGMENT->nursery_current = end; - if (UNLIKELY((uintptr_t)end > _stm_nursery_end)) + if (UNLIKELY((uintptr_t)end > STM_SEGMENT->nursery_end)) return _stm_allocate_slowpath(size_rounded_up); return (object_t *)p; @@ -231,7 +230,7 @@ /* Forces a safe-point if needed. Normally not needed: this is automatic if you call stm_allocate(). */ static inline void stm_safe_point(void) { - if (_stm_nursery_end == _STM_NSE_SIGNAL) + if (STM_SEGMENT->nursery_end == _STM_NSE_SIGNAL) _stm_collectable_safe_point(); } From noreply at buildbot.pypy.org Mon Feb 24 23:54:37 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 24 Feb 2014 23:54:37 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: Skip these two tests, see message Message-ID: <20140224225437.1F5A31D2720@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r846:1f4d5cf924de Date: 2014-02-24 23:50 +0100 http://bitbucket.org/pypy/stmgc/changeset/1f4d5cf924de/ Log: Skip these two tests, see message diff --git a/c7/test/test_nursery.py b/c7/test/test_nursery.py --- a/c7/test/test_nursery.py +++ b/c7/test/test_nursery.py @@ -112,6 +112,8 @@ assert len(seen) < 5 # addresses are reused def test_reset_partial_alloc_pages(self): + py.test.skip("a would-be-nice feature, but not actually needed: " + "the next major GC will take care of it") self.start_transaction() new = stm_allocate(16) stm_set_char(new, 'a') @@ -129,6 +131,8 @@ assert stm_get_char(newer) == '\0' def test_reuse_page(self): + py.test.skip("a would-be-nice feature, but not actually needed: " + "the next major GC will take care of it") self.start_transaction() new = stm_allocate(16) self.push_root(new) From noreply at buildbot.pypy.org Mon Feb 24 23:54:38 2014 From: noreply at buildbot.pypy.org (arigo) Date: Mon, 24 Feb 2014 23:54:38 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: Throw away the nursery upon abort Message-ID: <20140224225438.2C3AA1D2720@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r847:83904087abbc Date: 2014-02-24 23:54 +0100 http://bitbucket.org/pypy/stmgc/changeset/83904087abbc/ Log: Throw away the nursery upon abort diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -440,6 +440,9 @@ assert(!"abort: bad transaction_state"); } + /* throw away the content of the nursery */ + throw_away_nursery(); + /* reset all the modified objects (incl. re-adding GCFLAG_WRITE_BARRIER) */ reset_modified_from_other_segments(); diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -147,7 +147,7 @@ } } -static void reset_nursery(void) +static void throw_away_nursery(void) { /* reset the nursery by zeroing it */ size_t size; @@ -196,7 +196,7 @@ collect_oldrefs_to_nursery(); - reset_nursery(); + throw_away_nursery(); assert(list_is_empty(STM_PSEGMENT->objects_pointing_to_nursery)); } diff --git a/c7/stm/nursery.h b/c7/stm/nursery.h --- a/c7/stm/nursery.h +++ b/c7/stm/nursery.h @@ -7,3 +7,4 @@ static void minor_collection(bool commit); static void check_nursery_at_transaction_start(void); +static void throw_away_nursery(void); From noreply at buildbot.pypy.org Tue Feb 25 00:45:22 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 25 Feb 2014 00:45:22 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: Fixes. Message-ID: <20140224234522.563A11C0132@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r848:c2dc9f28006f Date: 2014-02-25 00:45 +0100 http://bitbucket.org/pypy/stmgc/changeset/c2dc9f28006f/ Log: Fixes. diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -104,6 +104,15 @@ getting the write-lock */ assert(obj->stm_flags & GCFLAG_WRITE_BARRIER); obj->stm_flags &= ~GCFLAG_WRITE_BARRIER; + + /* for sanity, check that all other segment copies of this object + still have the flag */ + long i; + for (i = 0; i < NB_SEGMENTS; i++) { + assert(i == STM_SEGMENT->segment_num || + (((struct object_s *)REAL_ADDRESS(get_segment_base(i), obj)) + ->stm_flags & GCFLAG_WRITE_BARRIER)); + } } static void reset_transaction_read_version(void) @@ -292,9 +301,9 @@ assert(write_locks[lock_idx] == STM_PSEGMENT->write_lock_num); write_locks[lock_idx] = 0; - /* set again the WRITE_BARRIER flag */ - assert((item->stm_flags & GCFLAG_WRITE_BARRIER) == 0); - item->stm_flags |= GCFLAG_WRITE_BARRIER; + /* the WRITE_BARRIER flag should have been set again by + minor_collection() */ + assert((item->stm_flags & GCFLAG_WRITE_BARRIER) != 0); /* copy the modified object to the other segment */ char *src = REAL_ADDRESS(local_base, item); @@ -363,6 +372,7 @@ /* update 'overflow_number' if needed */ if (has_any_overflow_object) { highest_overflow_number += GCFLAG_OVERFLOW_NUMBER_bit0; + assert(highest_overflow_number != 0); /* XXX else, overflow! */ STM_PSEGMENT->overflow_number = highest_overflow_number; } @@ -393,9 +403,6 @@ STM_PSEGMENT->modified_old_objects, object_t * /*item*/, ({ - /* all objects in 'modified_objects' have this flag removed */ - assert((item->stm_flags & GCFLAG_WRITE_BARRIER) == 0); - /* memcpy in the opposite direction than push_modified_to_other_segments() */ char *src = REAL_ADDRESS(remote_base, item); @@ -403,8 +410,10 @@ ssize_t size = stmcb_size_rounded_up((struct object_s *)src); memcpy(dst, src, size); - /* copying from the other segment added again the - WRITE_BARRIER flag */ + /* objects in 'modified_old_objects' usually have the + WRITE_BARRIER flag, unless they have been modified + recently. Ignore the old flag; after copying from the + other segment, we should have the flag. */ assert(item->stm_flags & GCFLAG_WRITE_BARRIER); /* write all changes to the object before we release the diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -68,13 +68,13 @@ struct list_s *modified_old_objects; /* List of out-of-nursery objects that may contain pointers to - nursery objects. This is used to track the GC status: they - are all objects outside the nursery on which an stm_write() - occurred since the last minor collection. If there was no - minor collection yet in the current transaction, this is NULL, + nursery objects. This is used to track the GC status: they are + all objects outside the nursery on which an stm_write() occurred + since the last minor collection. This list contains exactly the + objects without GCFLAG_WRITE_BARRIER. If there was no minor + collection yet in the current transaction, this is NULL, understood as meaning implicitly "this is the same as - 'modified_old_objects'. This list contains exactly the - objects without GCFLAG_WRITE_BARRIER. */ + 'modified_old_objects'". */ struct list_s *objects_pointing_to_nursery; /* List of all large, overflowed objects. Only non-NULL after the diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -51,6 +51,7 @@ /************************************************************/ #define GCWORD_MOVED ((object_t *) -42) +#define FLAG_SYNC_LARGE_NOW 0x01 static void minor_trace_if_young(object_t **pobj) @@ -80,19 +81,21 @@ char *realobj = REAL_ADDRESS(STM_SEGMENT->segment_base, obj); size_t size = stmcb_size_rounded_up((struct object_s *)realobj); object_t *nobj; + uintptr_t nobj_sync_now; if (1 /*size >= GC_MEDIUM_REQUEST*/) { /* case 1: object is not small enough. Ask gcpage.c for an allocation via largemalloc. */ nobj = allocate_outside_nursery_large(size); + nobj_sync_now = (uintptr_t)nobj; /* Copy the object */ char *realnobj = REAL_ADDRESS(STM_SEGMENT->segment_base, nobj); memcpy(realnobj, realobj, size); if (STM_PSEGMENT->minor_collect_will_commit_now) - synchronize_overflow_object_now(nobj); + nobj_sync_now |= FLAG_SYNC_LARGE_NOW; else LIST_APPEND(STM_PSEGMENT->large_overflow_objects, nobj); } @@ -107,13 +110,13 @@ } /* Done copying the object. */ - //dprintf(("%p -> %p\n", obj, nobj)); + //dprintf(("\t\t\t\t\t%p -> %p\n", obj, nobj)); pforwarded_array[0] = GCWORD_MOVED; pforwarded_array[1] = nobj; *pobj = nobj; /* Must trace the object later */ - LIST_APPEND(STM_PSEGMENT->objects_pointing_to_nursery, nobj); + LIST_APPEND(STM_PSEGMENT->objects_pointing_to_nursery, nobj_sync_now); } static void collect_roots_in_nursery(void) @@ -127,26 +130,46 @@ } } +static inline void _collect_now(object_t *obj) +{ + assert(!_is_in_nursery(obj)); + + /* We must not have GCFLAG_WRITE_BARRIER so far. Add it now. */ + assert(!(obj->stm_flags & GCFLAG_WRITE_BARRIER)); + obj->stm_flags |= GCFLAG_WRITE_BARRIER; + + /* Trace the 'obj' to replace pointers to nursery with pointers + outside the nursery, possibly forcing nursery objects out and + adding them to 'objects_pointing_to_nursery' as well. */ + char *realobj = REAL_ADDRESS(STM_SEGMENT->segment_base, obj); + stmcb_trace((struct object_s *)realobj, &minor_trace_if_young); +} + static void collect_oldrefs_to_nursery(void) { struct list_s *lst = STM_PSEGMENT->objects_pointing_to_nursery; while (!list_is_empty(lst)) { - object_t *obj = (object_t *)list_pop_item(lst); - assert(!_is_in_nursery(obj)); + uintptr_t obj_sync_now = list_pop_item(lst); + object_t *obj = (object_t *)(obj_sync_now & ~FLAG_SYNC_LARGE_NOW); - /* We must not have GCFLAG_WRITE_BARRIER so far. Add it now. */ - assert(!(obj->stm_flags & GCFLAG_WRITE_BARRIER)); - obj->stm_flags |= GCFLAG_WRITE_BARRIER; + _collect_now(obj); - /* Trace the 'obj' to replace pointers to nursery with pointers - outside the nursery, possibly forcing nursery objects out and - adding them to 'objects_pointing_to_nursery' as well. */ - char *realobj = REAL_ADDRESS(STM_SEGMENT->segment_base, obj); - stmcb_trace((struct object_s *)realobj, &minor_trace_if_young); + if (obj_sync_now & FLAG_SYNC_LARGE_NOW) { + /* synchronize the object to other segments *now* -- which + means, just after we added the WRITE_BARRIER flag and + traced into it, because tracing might change it again. */ + synchronize_overflow_object_now(obj); + } } } +static void collect_modified_old_objects(void) +{ + LIST_FOREACH_R(STM_PSEGMENT->modified_old_objects, object_t * /*item*/, + _collect_now(item)); +} + static void throw_away_nursery(void) { /* reset the nursery by zeroing it */ @@ -181,17 +204,25 @@ STM_PSEGMENT->minor_collect_will_commit_now = commit; - /* All the objects we move out of the nursery become "overflow" - objects. We use the list 'objects_pointing_to_nursery' - to hold the ones we didn't trace so far. */ - if (STM_PSEGMENT->objects_pointing_to_nursery == NULL) - STM_PSEGMENT->objects_pointing_to_nursery = list_create(); - /* We need this to track the large overflow objects for a future commit. We don't need it if we're committing now. */ if (!commit && STM_PSEGMENT->large_overflow_objects == NULL) STM_PSEGMENT->large_overflow_objects = list_create(); + /* All the objects we move out of the nursery become "overflow" + objects. We use the list 'objects_pointing_to_nursery' + to hold the ones we didn't trace so far. */ + if (STM_PSEGMENT->objects_pointing_to_nursery == NULL) { + STM_PSEGMENT->objects_pointing_to_nursery = list_create(); + + /* See the doc of 'objects_pointing_to_nursery': if it is NULL, + then it is implicitly understood to be equal to + 'modified_old_objects'. We could copy modified_old_objects + into objects_pointing_to_nursery, but instead we use the + following shortcut */ + collect_modified_old_objects(); + } + collect_roots_in_nursery(); collect_oldrefs_to_nursery(); diff --git a/c7/test/test_random.py b/c7/test/test_random.py --- a/c7/test/test_random.py +++ b/c7/test/test_random.py @@ -164,6 +164,7 @@ def pop_roots(self, ex): for r in reversed(self.saved_roots[self.roots_on_transaction_start:]): ex.do('%s = self.pop_root()' % r) + ex.do('# 0x%x' % (int(ffi.cast("uintptr_t", ex.content[r])),)) self.roots_on_stack -= 1 assert self.roots_on_stack == self.roots_on_transaction_start @@ -175,7 +176,8 @@ for r in reversed(to_reload): ex.do('%s = self.pop_root()' % r) for r in to_reload: - ex.do('self.push_root(%s)' % r) + ex.do('self.push_root(%s) # 0x%x' % ( + r, int(ffi.cast("uintptr_t", ex.content[r])))) def start_transaction(self): assert self.transaction_state is None From noreply at buildbot.pypy.org Tue Feb 25 02:03:41 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Tue, 25 Feb 2014 02:03:41 +0100 (CET) Subject: [pypy-commit] pypy remove-remaining-smm: Work around change in py library. Message-ID: <20140225010341.F31341C08B9@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: remove-remaining-smm Changeset: r69375:c0abbab51946 Date: 2014-02-25 02:02 +0100 http://bitbucket.org/pypy/pypy/changeset/c0abbab51946/ Log: Work around change in py library. diff --git a/pypy/tool/pytest/appsupport.py b/pypy/tool/pytest/appsupport.py --- a/pypy/tool/pytest/appsupport.py +++ b/pypy/tool/pytest/appsupport.py @@ -129,9 +129,13 @@ exprinfo = None def __init__(self, space, tb): - self.frame = AppFrame(space, space.getattr(tb, space.wrap('tb_frame'))) + self._frame = AppFrame(space, space.getattr(tb, space.wrap('tb_frame'))) self.lineno = space.unwrap(space.getattr(tb, space.wrap('tb_lineno'))) - 1 + @property + def frame(self): + return self._frame + def reinterpret(self): # XXX we need to solve a general problem: how to prevent # reinterpretation from generating a different exception? From noreply at buildbot.pypy.org Tue Feb 25 03:24:58 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Tue, 25 Feb 2014 03:24:58 +0100 (CET) Subject: [pypy-commit] pypy remove-remaining-smm: Kill marshal_w multimethod and clean up interp_marshal. Message-ID: <20140225022458.3A8DA1C10A8@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: remove-remaining-smm Changeset: r69376:478e415bb18d Date: 2014-02-25 03:24 +0100 http://bitbucket.org/pypy/pypy/changeset/478e415bb18d/ Log: Kill marshal_w multimethod and clean up interp_marshal. diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1732,5 +1732,4 @@ 'newdict', 'newslice', 'call_args', - 'marshal_w', ] diff --git a/pypy/module/marshal/interp_marshal.py b/pypy/module/marshal/interp_marshal.py --- a/pypy/module/marshal/interp_marshal.py +++ b/pypy/module/marshal/interp_marshal.py @@ -3,6 +3,7 @@ from rpython.rlib.rarithmetic import intmask from rpython.rlib import rstackovf from pypy.module._file.interp_file import W_File +from pypy.objspace.std.marshal_impl import marshal, get_unmarshallers Py_MARSHAL_VERSION = 2 @@ -136,6 +137,26 @@ raise OperationError(space.w_ValueError, space.wrap(msg)) class Marshaller(_Base): + """ + atomic types including typecode: + + atom(tc) puts single typecode + atom_int(tc, int) puts code and int + atom_int64(tc, int64) puts code and int64 + atom_str(tc, str) puts code, len and string + atom_strlist(tc, strlist) puts code, len and list of strings + + building blocks for compound types: + + start(typecode) sets the type character + put(s) puts a string with fixed length + put_short(int) puts a short integer + put_int(int) puts an integer + put_pascal(s) puts a short string + put_w_obj(w_obj) puts a wrapped object + put_tuple_w(TYPE, tuple_w) puts tuple_w, an unwrapped list of wrapped objects + """ + # _annspecialcase_ = "specialize:ctr_location" # polymorphic # does not work with subclassing @@ -214,7 +235,7 @@ self.put(x) def put_w_obj(self, w_obj): - self.space.marshal_w(w_obj, self) + marshal(self.space, w_obj, self) def dump_w_obj(self, w_obj): space = self.space @@ -235,7 +256,7 @@ idx = 0 while idx < lng: w_obj = lst_w[idx] - self.space.marshal_w(w_obj, self) + marshal(self.space, w_obj, self) idx += 1 def _overflow(self): @@ -338,14 +359,11 @@ q = '"' u.raise_exc('invalid typecode in unmarshal: ' + q + s + q) -def register(codes, func): - """NOT_RPYTHON""" - for code in codes: - Unmarshaller._dispatch[ord(code)] = func - class Unmarshaller(_Base): _dispatch = [invalid_typecode] * 256 + for tc, func in get_unmarshallers(): + _dispatch[ord(tc)] = func def __init__(self, space, reader): self.space = space diff --git a/pypy/module/marshal/test/test_marshalimpl.py b/pypy/module/marshal/test/test_marshalimpl.py --- a/pypy/module/marshal/test/test_marshalimpl.py +++ b/pypy/module/marshal/test/test_marshalimpl.py @@ -75,7 +75,7 @@ expected = marshal.dumps(long(x)) w_obj = space.wraplong(x) m = FakeM() - space.marshal_w(w_obj, m) + interp_marshal.marshal(space, w_obj, m) assert ''.join(m.seen) == expected # u = interp_marshal.StringUnmarshaller(space, space.wrap(expected)) diff --git a/pypy/objspace/std/marshal_impl.py b/pypy/objspace/std/marshal_impl.py --- a/pypy/objspace/std/marshal_impl.py +++ b/pypy/objspace/std/marshal_impl.py @@ -1,38 +1,26 @@ -# implementation of marshalling by multimethods +from rpython.rlib.rarithmetic import LONG_BIT, r_longlong, r_uint +from rpython.rlib.rstring import StringBuilder +from rpython.rlib.rstruct import ieee +from rpython.rlib.unroll import unrolling_iterable -""" -The idea is to have an effective but flexible -way to implement marshalling for the native types. - -The marshal_w operation is called with an object, -a callback and a state variable. -""" - -from pypy.interpreter.error import OperationError -from pypy.objspace.std.register_all import register_all -from rpython.rlib.rarithmetic import LONG_BIT, r_longlong, r_uint, intmask -from pypy.objspace.std import model -from pypy.objspace.std.dictmultiobject import W_DictMultiObject +from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.special import Ellipsis from pypy.interpreter.pycode import PyCode -from pypy.interpreter import gateway, unicodehelper -from rpython.rlib.rstruct import ieee -from rpython.rlib.rstring import StringBuilder - -from pypy.objspace.std.boolobject import W_BoolObject -from pypy.objspace.std.bytesobject import W_BytesObject +from pypy.interpreter import unicodehelper +from pypy.objspace.std.boolobject import W_BoolObject +from pypy.objspace.std.bytesobject import W_BytesObject from pypy.objspace.std.complexobject import W_ComplexObject -from pypy.objspace.std.intobject import W_IntObject -from pypy.objspace.std.floatobject import W_FloatObject -from pypy.objspace.std.tupleobject import W_AbstractTupleObject -from pypy.objspace.std.listobject import W_ListObject -from pypy.objspace.std.typeobject import W_TypeObject -from pypy.objspace.std.longobject import W_LongObject, newlong -from pypy.objspace.std.smalllongobject import W_SmallLongObject -from pypy.objspace.std.noneobject import W_NoneObject +from pypy.objspace.std.dictmultiobject import W_DictMultiObject +from pypy.objspace.std.intobject import W_IntObject +from pypy.objspace.std.floatobject import W_FloatObject +from pypy.objspace.std.listobject import W_ListObject +from pypy.objspace.std.longobject import W_AbstractLongObject +from pypy.objspace.std.noneobject import W_NoneObject +from pypy.objspace.std.setobject import W_FrozensetObject, W_SetObject +from pypy.objspace.std.tupleobject import W_AbstractTupleObject +from pypy.objspace.std.typeobject import W_TypeObject from pypy.objspace.std.unicodeobject import W_UnicodeObject -from pypy.module.marshal.interp_marshal import register TYPE_NULL = '0' TYPE_NONE = 'N' @@ -59,71 +47,87 @@ TYPE_SET = '<' TYPE_FROZENSET = '>' -""" -simple approach: -a call to marshal_w has the following semantics: -marshal_w receives a marshaller object which contains -state and several methods. +_marshallers = [] +_unmarshallers = [] -atomic types including typecode: +def marshaller(type): + def _decorator(f): + _marshallers.append((type, f)) + return f + return _decorator -atom(tc) puts single typecode -atom_int(tc, int) puts code and int -atom_int64(tc, int64) puts code and int64 -atom_str(tc, str) puts code, len and string -atom_strlist(tc, strlist) puts code, len and list of strings +def unmarshaller(tc): + def _decorator(f): + _unmarshallers.append((tc, f)) + return f + return _decorator -building blocks for compound types: +def marshal(space, w_obj, m): + # _marshallers_unroll is defined at the end of the file + for type, func in _marshallers_unroll: + if isinstance(w_obj, type): + func(space, w_obj, m) + return -start(typecode) sets the type character -put(s) puts a string with fixed length -put_short(int) puts a short integer -put_int(int) puts an integer -put_pascal(s) puts a short string -put_w_obj(w_obj) puts a wrapped object -put_tuple_w(TYPE, tuple_w) puts tuple_w, an unwrapped list of wrapped objects -""" + # any unknown object implementing the buffer protocol is + # accepted and encoded as a plain string + try: + s = space.bufferstr_w(w_obj) + except OperationError as e: + if e.match(space, space.w_TypeError): + raise oefmt(space.w_ValueError, "unmarshallable object") + raise + m.atom_str(TYPE_STRING, s) -handled_by_any = [] +def get_unmarshallers(): + return _unmarshallers -def raise_exception(space, msg): - raise OperationError(space.w_ValueError, space.wrap(msg)) -def marshal_w__None(space, w_none, m): + at marshaller(W_NoneObject) +def marshal_none(space, w_none, m): m.atom(TYPE_NONE) -def unmarshal_None(space, u, tc): + at unmarshaller(TYPE_NONE) +def unmarshal_none(space, u, tc): return space.w_None -register(TYPE_NONE, unmarshal_None) -def marshal_w__Bool(space, w_bool, m): + + at marshaller(W_BoolObject) +def marshal_bool(space, w_bool, m): m.atom(TYPE_TRUE if w_bool.intval else TYPE_FALSE) -def unmarshal_Bool(space, u, tc): - return space.newbool(tc == TYPE_TRUE) -register(TYPE_TRUE + TYPE_FALSE, unmarshal_Bool) + at unmarshaller(TYPE_TRUE) +def unmarshal_bool(space, u, tc): + return space.w_True -def marshal_w__Type(space, w_type, m): + at unmarshaller(TYPE_FALSE) +def unmarshal_false(space, u, tc): + return space.w_False + + + at marshaller(W_TypeObject) +def marshal_stopiter(space, w_type, m): if not space.is_w(w_type, space.w_StopIteration): - raise_exception(space, "unmarshallable object") + raise oefmt(space.w_ValueError, "unmarshallable object") m.atom(TYPE_STOPITER) -def unmarshal_Type(space, u, tc): + at unmarshaller(TYPE_STOPITER) +def unmarshal_stopiter(space, u, tc): return space.w_StopIteration -register(TYPE_STOPITER, unmarshal_Type) -# not directly supported: -def marshal_w_Ellipsis(space, w_ellipsis, m): + + at marshaller(Ellipsis) +def marshal_ellipsis(space, w_ellipsis, m): m.atom(TYPE_ELLIPSIS) -model.MM.marshal_w.register(marshal_w_Ellipsis, Ellipsis) + at unmarshaller(TYPE_ELLIPSIS) +def unmarshal_ellipsis(space, u, tc): + return space.w_Ellipsis -def unmarshal_Ellipsis(space, u, tc): - return space.w_Ellipsis -register(TYPE_ELLIPSIS, unmarshal_Ellipsis) -def marshal_w__Int(space, w_int, m): + at marshaller(W_IntObject) +def marshal_int(space, w_int, m): if LONG_BIT == 32: m.atom_int(TYPE_INT, w_int.intval) else: @@ -133,11 +137,12 @@ else: m.atom_int(TYPE_INT, w_int.intval) -def unmarshal_Int(space, u, tc): + at unmarshaller(TYPE_INT) +def unmarshal_int(space, u, tc): return space.newint(u.get_int()) -register(TYPE_INT, unmarshal_Int) -def unmarshal_Int64(space, u, tc): + at unmarshaller(TYPE_INT64) +def unmarshal_int64(space, u, tc): lo = u.get_int() # get the first 32 bits hi = u.get_int() # get the next 32 bits if LONG_BIT >= 64: @@ -145,63 +150,10 @@ else: x = (r_longlong(hi) << 32) | r_longlong(r_uint(lo)) # get a r_longlong return space.wrap(x) -register(TYPE_INT64, unmarshal_Int64) -def pack_float(f): - result = StringBuilder(8) - ieee.pack_float(result, f, 8, False) - return result.build() -def unpack_float(s): - return ieee.unpack_float(s, False) - -def marshal_w__Float(space, w_float, m): - if m.version > 1: - m.start(TYPE_BINARY_FLOAT) - m.put(pack_float(w_float.floatval)) - else: - m.start(TYPE_FLOAT) - m.put_pascal(space.str_w(space.repr(w_float))) - -def unmarshal_Float(space, u, tc): - return space.call_function(space.builtin.get('float'), - space.wrap(u.get_pascal())) -register(TYPE_FLOAT, unmarshal_Float) - -def unmarshal_Float_bin(space, u, tc): - return space.newfloat(unpack_float(u.get(8))) -register(TYPE_BINARY_FLOAT, unmarshal_Float_bin) - -def marshal_w__Complex(space, w_complex, m): - if m.version > 1: - m.start(TYPE_BINARY_COMPLEX) - m.put(pack_float(w_complex.realval)) - m.put(pack_float(w_complex.imagval)) - else: - # XXX a bit too wrap-happy - w_real = space.wrap(w_complex.realval) - w_imag = space.wrap(w_complex.imagval) - m.start(TYPE_COMPLEX) - m.put_pascal(space.str_w(space.repr(w_real))) - m.put_pascal(space.str_w(space.repr(w_imag))) - -def unmarshal_Complex(space, u, tc): - w_real = space.call_function(space.builtin.get('float'), - space.wrap(u.get_pascal())) - w_imag = space.call_function(space.builtin.get('float'), - space.wrap(u.get_pascal())) - w_t = space.builtin.get('complex') - return space.call_function(w_t, w_real, w_imag) -register(TYPE_COMPLEX, unmarshal_Complex) - -def unmarshal_Complex_bin(space, u, tc): - real = unpack_float(u.get(8)) - imag = unpack_float(u.get(8)) - return space.newcomplex(real, imag) -register(TYPE_BINARY_COMPLEX, unmarshal_Complex_bin) - -def marshal_w__Long(space, w_long, m): - from rpython.rlib.rbigint import rbigint + at marshaller(W_AbstractLongObject) +def marshal_long(space, w_long, m): from rpython.rlib.rarithmetic import r_ulonglong m.start(TYPE_LONG) SHIFT = 15 @@ -216,9 +168,9 @@ next = num.abs_rshift_and_mask(bigshiftcount, MASK) m.put_short(next) bigshiftcount += SHIFT -marshal_w__SmallLong = marshal_w__Long -def unmarshal_Long(space, u, tc): + at unmarshaller(TYPE_LONG) +def unmarshal_long(space, u, tc): from rpython.rlib.rbigint import rbigint lng = u.get_int() if lng < 0: @@ -229,12 +181,68 @@ digits = [u.get_short() for i in range(lng)] result = rbigint.from_list_n_bits(digits, 15) if lng and not result.tobool(): - raise_exception(space, 'bad marshal data') + raise oefmt(space.w_ValueError, "bad marshal data") if negative: result = result.neg() - w_long = newlong(space, result) - return w_long -register(TYPE_LONG, unmarshal_Long) + return space.newlong_from_rbigint(result) + + +def pack_float(f): + result = StringBuilder(8) + ieee.pack_float(result, f, 8, False) + return result.build() + +def unpack_float(s): + return ieee.unpack_float(s, False) + + at marshaller(W_FloatObject) +def marshal_float(space, w_float, m): + if m.version > 1: + m.start(TYPE_BINARY_FLOAT) + m.put(pack_float(w_float.floatval)) + else: + m.start(TYPE_FLOAT) + m.put_pascal(space.str_w(space.repr(w_float))) + + at unmarshaller(TYPE_FLOAT) +def unmarshal_float(space, u, tc): + return space.call_function(space.builtin.get('float'), + space.wrap(u.get_pascal())) + + at unmarshaller(TYPE_BINARY_FLOAT) +def unmarshal_float_bin(space, u, tc): + return space.newfloat(unpack_float(u.get(8))) + + + at marshaller(W_ComplexObject) +def marshal_complex(space, w_complex, m): + if m.version > 1: + m.start(TYPE_BINARY_COMPLEX) + m.put(pack_float(w_complex.realval)) + m.put(pack_float(w_complex.imagval)) + else: + # XXX a bit too wrap-happy + w_real = space.wrap(w_complex.realval) + w_imag = space.wrap(w_complex.imagval) + m.start(TYPE_COMPLEX) + m.put_pascal(space.str_w(space.repr(w_real))) + m.put_pascal(space.str_w(space.repr(w_imag))) + + at unmarshaller(TYPE_COMPLEX) +def unmarshal_complex(space, u, tc): + w_real = space.call_function(space.builtin.get('float'), + space.wrap(u.get_pascal())) + w_imag = space.call_function(space.builtin.get('float'), + space.wrap(u.get_pascal())) + w_t = space.builtin.get('complex') + return space.call_function(w_t, w_real, w_imag) + + at unmarshaller(TYPE_BINARY_COMPLEX) +def unmarshal_complex_bin(space, u, tc): + real = unpack_float(u.get(8)) + imag = unpack_float(u.get(8)) + return space.newcomplex(real, imag) + # XXX currently, intern() is at applevel, # and there is no interface to get at the @@ -244,10 +252,8 @@ def PySTRING_CHECK_INTERNED(w_str): return False + at marshaller(W_BytesObject) def marshal_bytes(space, w_str, m): - if not isinstance(w_str, W_BytesObject): - raise_exception(space, "unmarshallable object") - s = space.str_w(w_str) if m.version >= 1 and PySTRING_CHECK_INTERNED(w_str): # we use a native rtyper stringdict for speed @@ -260,63 +266,60 @@ m.atom_str(TYPE_INTERNED, s) else: m.atom_str(TYPE_STRING, s) -handled_by_any.append(('str', marshal_bytes)) + at unmarshaller(TYPE_STRING) def unmarshal_bytes(space, u, tc): return space.wrap(u.get_str()) -register(TYPE_STRING, unmarshal_bytes) + at unmarshaller(TYPE_INTERNED) def unmarshal_interned(space, u, tc): w_ret = space.wrap(u.get_str()) u.stringtable_w.append(w_ret) w_intern = space.builtin.get('intern') space.call_function(w_intern, w_ret) return w_ret -register(TYPE_INTERNED, unmarshal_interned) + at unmarshaller(TYPE_STRINGREF) def unmarshal_stringref(space, u, tc): idx = u.get_int() try: return u.stringtable_w[idx] except IndexError: - raise_exception(space, 'bad marshal data') -register(TYPE_STRINGREF, unmarshal_stringref) + raise oefmt(space.w_ValueError, "bad marshal data") + + at marshaller(W_AbstractTupleObject) def marshal_tuple(space, w_tuple, m): - if not isinstance(w_tuple, W_AbstractTupleObject): - raise_exception(space, "unmarshallable object") items = w_tuple.tolist() m.put_tuple_w(TYPE_TUPLE, items) -handled_by_any.append(('tuple', marshal_tuple)) + at unmarshaller(TYPE_TUPLE) def unmarshal_tuple(space, u, tc): items_w = u.get_tuple_w() return space.newtuple(items_w) -register(TYPE_TUPLE, unmarshal_tuple) + + at marshaller(W_ListObject) def marshal_list(space, w_list, m): - if not isinstance(w_list, W_ListObject): - raise_exception(space, "unmarshallable object") items = w_list.getitems()[:] m.put_tuple_w(TYPE_LIST, items) -handled_by_any.append(('list', marshal_list)) + at unmarshaller(TYPE_LIST) def unmarshal_list(space, u, tc): items_w = u.get_list_w() return space.newlist(items_w) -register(TYPE_LIST, unmarshal_list) -def marshal_w_dict(space, w_dict, m): - if not isinstance(w_dict, W_DictMultiObject): - raise_exception(space, "unmarshallable object") + + at marshaller(W_DictMultiObject) +def marshal_dict(space, w_dict, m): m.start(TYPE_DICT) for w_tuple in w_dict.items(): w_key, w_value = space.fixedview(w_tuple, 2) m.put_w_obj(w_key) m.put_w_obj(w_value) m.atom(TYPE_NULL) -handled_by_any.append(('dict', marshal_w_dict)) + at unmarshaller(TYPE_DICT) def unmarshal_dict(space, u, tc): # since primitive lists are not optimized and we don't know # the dict size in advance, use the dict's setitem instead @@ -329,14 +332,14 @@ w_value = u.get_w_obj() space.setitem(w_dic, w_key, w_value) return w_dic -register(TYPE_DICT, unmarshal_dict) + at unmarshaller(TYPE_NULL) def unmarshal_NULL(self, u, tc): return None -register(TYPE_NULL, unmarshal_NULL) -# this one is registered by hand: -def marshal_w_pycode(space, w_pycode, m): + + at marshaller(PyCode) +def marshal_pycode(space, w_pycode, m): m.start(TYPE_CODE) # see pypy.interpreter.pycode for the layout x = space.interp_w(PyCode, w_pycode) @@ -355,8 +358,6 @@ m.put_int(x.co_firstlineno) m.atom_str(TYPE_STRING, x.co_lnotab) -model.MM.marshal_w.register(marshal_w_pycode, PyCode) - # helper for unmarshalling string lists of code objects. # unfortunately they now can be interned or referenced, # so we no longer can handle it in interp_marshal.atom_strlist @@ -365,22 +366,16 @@ w_obj = u.get_w_obj() try: return u.space.str_w(w_obj) - except OperationError, e: + except OperationError as e: if e.match(u.space, u.space.w_TypeError): u.raise_exc('invalid marshal data for code object') - else: - raise + raise def unmarshal_strlist(u, tc): lng = u.atom_lng(tc) - res = [None] * lng - idx = 0 - space = u.space - while idx < lng: - res[idx] = unmarshal_str(u) - idx += 1 - return res + return [unmarshal_str(u) for i in range(lng)] + at unmarshaller(TYPE_CODE) def unmarshal_pycode(space, u, tc): argcount = u.get_int() nlocals = u.get_int() @@ -398,78 +393,39 @@ name = unmarshal_str(u) firstlineno = u.get_int() lnotab = unmarshal_str(u) - code = PyCode(space, argcount, nlocals, stacksize, flags, + return PyCode(space, argcount, nlocals, stacksize, flags, code, consts_w[:], names, varnames, filename, name, firstlineno, lnotab, freevars, cellvars) - return space.wrap(code) -register(TYPE_CODE, unmarshal_pycode) + + at marshaller(W_UnicodeObject) def marshal_unicode(space, w_unicode, m): - if not isinstance(w_unicode, W_UnicodeObject): - raise_exception(space, "unmarshallable object") s = unicodehelper.encode_utf8(space, space.unicode_w(w_unicode)) m.atom_str(TYPE_UNICODE, s) -handled_by_any.append(('unicode', marshal_unicode)) + at unmarshaller(TYPE_UNICODE) def unmarshal_unicode(space, u, tc): return space.wrap(unicodehelper.decode_utf8(space, u.get_str())) -register(TYPE_UNICODE, unmarshal_unicode) -app = gateway.applevel(r''' - def tuple_to_set(datalist, frozen=False): - if frozen: - return frozenset(datalist) - return set(datalist) -''') -tuple_to_set = app.interphook('tuple_to_set') - -# not directly supported: -def marshal_w_set(space, w_set, m): - # cannot access this list directly, because it's - # type is not exactly known through applevel. + at marshaller(W_SetObject) +def marshal_set(space, w_set, m): lis_w = space.fixedview(w_set) m.put_tuple_w(TYPE_SET, lis_w) -handled_by_any.append( ('set', marshal_w_set) ) + at unmarshaller(TYPE_SET) +def unmarshal_set(space, u, tc): + return space.newset(u.get_tuple_w()) -# not directly supported: -def marshal_w_frozenset(space, w_frozenset, m): + + at marshaller(W_FrozensetObject) +def marshal_frozenset(space, w_frozenset, m): lis_w = space.fixedview(w_frozenset) m.put_tuple_w(TYPE_FROZENSET, lis_w) -handled_by_any.append( ('frozenset', marshal_w_frozenset) ) + at unmarshaller(TYPE_FROZENSET) +def unmarshal_frozenset(space, u, tc): + return space.newfrozenset(u.get_tuple_w()) -def unmarshal_set_frozenset(space, u, tc): - items_w = u.get_tuple_w() - if tc == TYPE_SET: - w_frozen = space.w_False - else: - w_frozen = space.w_True - w_tup = space.newtuple(items_w) - return tuple_to_set(space, w_tup, w_frozen) -register(TYPE_SET + TYPE_FROZENSET, unmarshal_set_frozenset) -# dispatching for all not directly dispatched types -def marshal_w__ANY(space, w_obj, m): - w_type = space.type(w_obj) - for name, func in handled_by_any: - w_t = space.builtin.get(name) - if space.is_true(space.issubtype(w_type, w_t)): - func(space, w_obj, m) - return - - # any unknown object implementing the buffer protocol is - # accepted and encoded as a plain string - try: - s = space.bufferstr_w(w_obj) - except OperationError, e: - if not e.match(space, space.w_TypeError): - raise - else: - m.atom_str(TYPE_STRING, s) - return - - raise_exception(space, "unmarshallable object") - -register_all(vars()) +_marshallers_unroll = unrolling_iterable(_marshallers) diff --git a/pypy/objspace/std/model.py b/pypy/objspace/std/model.py --- a/pypy/objspace/std/model.py +++ b/pypy/objspace/std/model.py @@ -55,8 +55,6 @@ from pypy.objspace.std import dictproxyobject from pypy.objspace.std import proxyobject - import pypy.objspace.std.marshal_impl # install marshal multimethods - self.pythontypes = [] self.pythontypes.append(objectobject.W_ObjectObject.typedef) @@ -344,10 +342,6 @@ general__args__=True) init = StdObjSpaceMultiMethod('__init__', 1, general__args__=True) getnewargs = StdObjSpaceMultiMethod('__getnewargs__', 1) - # special visible multimethods - # NOTE: when adding more sometype_w() methods, you need to write a - # stub in default.py to raise a space.w_TypeError - marshal_w = StdObjSpaceMultiMethod('marshal_w', 1, [], extra_args=['marshaller']) # add all regular multimethods here for _name, _symbol, _arity, _specialnames in ObjSpace.MethodTable: diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -304,9 +304,15 @@ self, module=module, instance=instance, strdict=strdict, kwargs=kwargs) - def newset(self): - from pypy.objspace.std.setobject import newset - return W_SetObject(self, None) + def newset(self, iterable_w=None): + if iterable_w is None: + return W_FrozensetObject(self, None) + return W_SetObject(self, self.newtuple(iterable_w)) + + def newfrozenset(self, iterable_w=None): + if iterable_w is None: + return W_FrozensetObject(self, None) + return W_FrozensetObject(self, self.newtuple(iterable_w)) def newslice(self, w_start, w_end, w_step): return W_SliceObject(w_start, w_end, w_step) From noreply at buildbot.pypy.org Tue Feb 25 03:45:16 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Tue, 25 Feb 2014 03:45:16 +0100 (CET) Subject: [pypy-commit] pypy remove-remaining-smm: Remove dead imports in dictproxyobject.py. Message-ID: <20140225024516.5D5611C0132@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: remove-remaining-smm Changeset: r69377:236e966a4386 Date: 2014-02-25 03:32 +0100 http://bitbucket.org/pypy/pypy/changeset/236e966a4386/ Log: Remove dead imports in dictproxyobject.py. diff --git a/pypy/objspace/std/dictproxyobject.py b/pypy/objspace/std/dictproxyobject.py --- a/pypy/objspace/std/dictproxyobject.py +++ b/pypy/objspace/std/dictproxyobject.py @@ -1,7 +1,4 @@ -from pypy.objspace.std.model import registerimplementation, W_Object -from pypy.objspace.std.register_all import register_all -from pypy.objspace.std.dictmultiobject import W_DictMultiObject, create_iterator_classes -from pypy.objspace.std.dictmultiobject import DictStrategy +from pypy.objspace.std.dictmultiobject import DictStrategy, create_iterator_classes from pypy.objspace.std.typeobject import unwrap_cell from pypy.interpreter.error import OperationError, oefmt @@ -62,7 +59,6 @@ w_type.dict_w[key] = w_value def setdefault(self, w_dict, w_key, w_default): - space = self.space w_result = self.getitem(w_dict, w_key) if w_result is not None: return w_result From noreply at buildbot.pypy.org Tue Feb 25 03:45:17 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Tue, 25 Feb 2014 03:45:17 +0100 (CET) Subject: [pypy-commit] pypy remove-remaining-smm: Fix. Message-ID: <20140225024517.99AEB1C0132@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: remove-remaining-smm Changeset: r69378:8724ffd1d1bf Date: 2014-02-25 03:36 +0100 http://bitbucket.org/pypy/pypy/changeset/8724ffd1d1bf/ Log: Fix. diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -306,7 +306,7 @@ def newset(self, iterable_w=None): if iterable_w is None: - return W_FrozensetObject(self, None) + return W_SetObject(self, None) return W_SetObject(self, self.newtuple(iterable_w)) def newfrozenset(self, iterable_w=None): From noreply at buildbot.pypy.org Tue Feb 25 03:45:18 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Tue, 25 Feb 2014 03:45:18 +0100 (CET) Subject: [pypy-commit] pypy remove-remaining-smm: Add missing complex.__rpow__. Message-ID: <20140225024518.C81A71C0132@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: remove-remaining-smm Changeset: r69379:bd2ba0a147bb Date: 2014-02-25 03:44 +0100 http://bitbucket.org/pypy/pypy/changeset/bd2ba0a147bb/ Log: Add missing complex.__rpow__. diff --git a/pypy/objspace/std/complexobject.py b/pypy/objspace/std/complexobject.py --- a/pypy/objspace/std/complexobject.py +++ b/pypy/objspace/std/complexobject.py @@ -569,6 +569,13 @@ raise oefmt(space.w_OverflowError, "complex exponentiation") return w_p + @unwrap_spec(w_third_arg=WrappedDefault(None)) + def descr_rpow(self, space, w_lhs, w_third_arg): + w_lhs = self._to_complex(space, w_lhs) + if w_lhs is None: + return space.w_NotImplemented + return w_lhs.descr_pow(space, self, w_third_arg) + def descr_conjugate(self, space): """(A+Bj).conjugate() -> A-Bj""" return space.newcomplex(self.realval, -self.imagval) @@ -629,6 +636,7 @@ __divmod__ = interp2app(W_ComplexObject.descr_divmod), __rdivmod__ = interp2app(W_ComplexObject.descr_rdivmod), __pow__ = interp2app(W_ComplexObject.descr_pow), + __rpow__ = interp2app(W_ComplexObject.descr_rpow), conjugate = interp2app(W_ComplexObject.descr_conjugate), ) From noreply at buildbot.pypy.org Tue Feb 25 03:47:35 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Tue, 25 Feb 2014 03:47:35 +0100 (CET) Subject: [pypy-commit] pypy remove-remaining-smm: hg merge default Message-ID: <20140225024735.75EC51C02EA@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: remove-remaining-smm Changeset: r69380:d708c09fcb34 Date: 2014-02-25 03:46 +0100 http://bitbucket.org/pypy/pypy/changeset/d708c09fcb34/ Log: hg merge default diff too long, truncating to 2000 out of 2756 lines diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -27,7 +27,7 @@ class UMathModule(MixedModule): appleveldefs = {} - interpleveldefs = {} + interpleveldefs = {'FLOATING_POINT_SUPPORT': 'space.wrap(1)'} # ufuncs for exposed, impl in [ ("absolute", "absolute"), diff --git a/pypy/module/micronumpy/arrayimpl/scalar.py b/pypy/module/micronumpy/arrayimpl/scalar.py --- a/pypy/module/micronumpy/arrayimpl/scalar.py +++ b/pypy/module/micronumpy/arrayimpl/scalar.py @@ -1,7 +1,6 @@ from pypy.module.micronumpy.arrayimpl import base from pypy.module.micronumpy.base import W_NDimArray, convert_to_array from pypy.module.micronumpy import support -from pypy.module.micronumpy.interp_boxes import W_GenericBox from pypy.interpreter.error import OperationError class ScalarIterator(base.BaseArrayIterator): diff --git a/pypy/module/micronumpy/arrayimpl/sort.py b/pypy/module/micronumpy/arrayimpl/sort.py --- a/pypy/module/micronumpy/arrayimpl/sort.py +++ b/pypy/module/micronumpy/arrayimpl/sort.py @@ -10,9 +10,9 @@ from rpython.rlib.unroll import unrolling_iterable from rpython.rlib.rarithmetic import widen from rpython.rlib.objectmodel import specialize -from pypy.interpreter.error import OperationError +from pypy.interpreter.error import OperationError, oefmt from pypy.module.micronumpy.base import W_NDimArray -from pypy.module.micronumpy import interp_dtype, types +from pypy.module.micronumpy import interp_dtype, types, constants as NPY from pypy.module.micronumpy.iter import AxisIterator INT_SIZE = rffi.sizeof(lltype.Signed) @@ -175,9 +175,9 @@ return cache._lookup(tp)(arr, space, w_axis, itemtype.get_element_size()) # XXX this should probably be changed - raise OperationError(space.w_NotImplementedError, - space.wrap("sorting of non-numeric types " + \ - "'%s' is not implemented" % arr.dtype.get_name(), )) + raise oefmt(space.w_NotImplementedError, + "sorting of non-numeric types '%s' is not implemented", + arr.dtype.get_name()) all_types = (types.all_float_types + types.all_complex_types + types.all_int_types) @@ -310,17 +310,17 @@ def sort_array(arr, space, w_axis, w_order): cache = space.fromcache(SortCache) # that populates SortClasses itemtype = arr.dtype.itemtype - if not arr.dtype.is_native(): - raise OperationError(space.w_NotImplementedError, - space.wrap("sorting of non-native btyeorder not supported yet")) + if arr.dtype.byteorder == NPY.OPPBYTE: + raise oefmt(space.w_NotImplementedError, + "sorting of non-native byteorder not supported yet") for tp in all_types: if isinstance(itemtype, tp[0]): return cache._lookup(tp)(arr, space, w_axis, itemtype.get_element_size()) # XXX this should probably be changed - raise OperationError(space.w_NotImplementedError, - space.wrap("sorting of non-numeric types " + \ - "'%s' is not implemented" % arr.dtype.get_name(), )) + raise oefmt(space.w_NotImplementedError, + "sorting of non-numeric types '%s' is not implemented", + arr.dtype.get_name()) all_types = (types.all_float_types + types.all_complex_types + types.all_int_types) diff --git a/pypy/module/micronumpy/bench/dot.py b/pypy/module/micronumpy/bench/dot.py new file mode 100644 --- /dev/null +++ b/pypy/module/micronumpy/bench/dot.py @@ -0,0 +1,28 @@ +import time + +try: + import numpypy +except ImportError: + pass + +import numpy + +def get_matrix(): + import random + n = 502 + x = numpy.zeros((n,n), dtype=numpy.float64) + for i in range(n): + for j in range(n): + x[i][j] = random.random() + return x + +def main(): + x = get_matrix() + y = get_matrix() + a = time.time() + #z = numpy.dot(x, y) # uses numpy possibly-blas-lib dot + z = numpy.core.multiarray.dot(x, y) # uses strictly numpy C dot + b = time.time() + print '%.2f seconds' % (b-a) + +main() diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -75,6 +75,7 @@ def __init__(self): """NOT_RPYTHON""" self.fromcache = InternalSpaceCache(self).getorbuild + self.w_Ellipsis = special.Ellipsis(self) self.w_NotImplemented = special.NotImplemented(self) def _freeze_(self): @@ -217,7 +218,7 @@ return w_type.lookup(name) def gettypefor(self, w_obj): - return None + return W_TypeObject(w_obj.typedef.name) def call_function(self, tp, w_dtype): return w_dtype diff --git a/pypy/module/micronumpy/constants.py b/pypy/module/micronumpy/constants.py --- a/pypy/module/micronumpy/constants.py +++ b/pypy/module/micronumpy/constants.py @@ -1,88 +1,88 @@ -NPY_BOOL = 0 -NPY_BYTE = 1 -NPY_UBYTE = 2 -NPY_SHORT = 3 -NPY_USHORT = 4 -NPY_INT = 5 -NPY_UINT = 6 -NPY_LONG = 7 -NPY_ULONG = 8 -NPY_LONGLONG = 9 -NPY_ULONGLONG = 10 -NPY_FLOAT = 11 -NPY_DOUBLE = 12 -NPY_LONGDOUBLE = 13 -NPY_CFLOAT = 14 -NPY_CDOUBLE = 15 -NPY_CLONGDOUBLE = 16 -NPY_OBJECT = 17 -NPY_STRING = 18 -NPY_UNICODE = 19 -NPY_VOID = 20 -NPY_DATETIME = 21 -NPY_TIMEDELTA = 22 -NPY_HALF = 23 -NPY_NTYPES = 24 -NPY_NOTYPE = 25 -NPY_CHAR = 26 -NPY_USERDEF = 256 +BOOL = 0 +BYTE = 1 +UBYTE = 2 +SHORT = 3 +USHORT = 4 +INT = 5 +UINT = 6 +LONG = 7 +ULONG = 8 +LONGLONG = 9 +ULONGLONG = 10 +FLOAT = 11 +DOUBLE = 12 +LONGDOUBLE = 13 +CFLOAT = 14 +CDOUBLE = 15 +CLONGDOUBLE = 16 +OBJECT = 17 +STRING = 18 +UNICODE = 19 +VOID = 20 +DATETIME = 21 +TIMEDELTA = 22 +HALF = 23 +NTYPES = 24 +NOTYPE = 25 +CHAR = 26 +USERDEF = 256 -NPY_BOOLLTR = '?' -NPY_BYTELTR = 'b' -NPY_UBYTELTR = 'B' -NPY_SHORTLTR = 'h' -NPY_USHORTLTR = 'H' -NPY_INTLTR = 'i' -NPY_UINTLTR = 'I' -NPY_LONGLTR = 'l' -NPY_ULONGLTR = 'L' -NPY_LONGLONGLTR = 'q' -NPY_ULONGLONGLTR = 'Q' -NPY_HALFLTR = 'e' -NPY_FLOATLTR = 'f' -NPY_DOUBLELTR = 'd' -NPY_LONGDOUBLELTR = 'g' -NPY_CFLOATLTR = 'F' -NPY_CDOUBLELTR = 'D' -NPY_CLONGDOUBLELTR = 'G' -NPY_OBJECTLTR = 'O' -NPY_STRINGLTR = 'S' -NPY_STRINGLTR2 = 'a' -NPY_UNICODELTR = 'U' -NPY_VOIDLTR = 'V' -NPY_DATETIMELTR = 'M' -NPY_TIMEDELTALTR = 'm' -NPY_CHARLTR = 'c' +BOOLLTR = '?' +BYTELTR = 'b' +UBYTELTR = 'B' +SHORTLTR = 'h' +USHORTLTR = 'H' +INTLTR = 'i' +UINTLTR = 'I' +LONGLTR = 'l' +ULONGLTR = 'L' +LONGLONGLTR = 'q' +ULONGLONGLTR = 'Q' +HALFLTR = 'e' +FLOATLTR = 'f' +DOUBLELTR = 'd' +LONGDOUBLELTR = 'g' +CFLOATLTR = 'F' +CDOUBLELTR = 'D' +CLONGDOUBLELTR = 'G' +OBJECTLTR = 'O' +STRINGLTR = 'S' +STRINGLTR2 = 'a' +UNICODELTR = 'U' +VOIDLTR = 'V' +DATETIMELTR = 'M' +TIMEDELTALTR = 'm' +CHARLTR = 'c' -NPY_INTPLTR = 'p' -NPY_UINTPLTR = 'P' +INTPLTR = 'p' +UINTPLTR = 'P' -NPY_GENBOOLLTR ='b' -NPY_SIGNEDLTR = 'i' -NPY_UNSIGNEDLTR = 'u' -NPY_FLOATINGLTR = 'f' -NPY_COMPLEXLTR = 'c' +GENBOOLLTR ='b' +SIGNEDLTR = 'i' +UNSIGNEDLTR = 'u' +FLOATINGLTR = 'f' +COMPLEXLTR = 'c' -NPY_ANYORDER = -1 -NPY_CORDER = 0 -NPY_FORTRANORDER = 1 -NPY_KEEPORDER = 2 +ANYORDER = -1 +CORDER = 0 +FORTRANORDER = 1 +KEEPORDER = 2 -NPY_CLIP = 0 -NPY_WRAP = 1 -NPY_RAISE = 2 +CLIP = 0 +WRAP = 1 +RAISE = 2 -NPY_LITTLE = '<' -NPY_BIG = '>' -NPY_NATIVE = '=' -NPY_SWAP = 's' -NPY_IGNORE = '|' +LITTLE = '<' +BIG = '>' +NATIVE = '=' +SWAP = 's' +IGNORE = '|' import sys if sys.byteorder == 'big': - NPY_NATBYTE = NPY_BIG - NPY_OPPBYTE = NPY_LITTLE + NATBYTE = BIG + OPPBYTE = LITTLE else: - NPY_NATBYTE = NPY_LITTLE - NPY_OPPBYTE = NPY_BIG + NATBYTE = LITTLE + OPPBYTE = BIG del sys diff --git a/pypy/module/micronumpy/conversion_utils.py b/pypy/module/micronumpy/conversion_utils.py --- a/pypy/module/micronumpy/conversion_utils.py +++ b/pypy/module/micronumpy/conversion_utils.py @@ -1,21 +1,21 @@ from pypy.interpreter.error import OperationError -from pypy.module.micronumpy.constants import * +from pypy.module.micronumpy import constants as NPY def byteorder_converter(space, new_order): endian = new_order[0] - if endian not in (NPY_BIG, NPY_LITTLE, NPY_NATIVE, NPY_IGNORE, NPY_SWAP): + if endian not in (NPY.BIG, NPY.LITTLE, NPY.NATIVE, NPY.IGNORE, NPY.SWAP): ch = endian if ch in ('b', 'B'): - endian = NPY_BIG + endian = NPY.BIG elif ch in ('l', 'L'): - endian = NPY_LITTLE + endian = NPY.LITTLE elif ch in ('n', 'N'): - endian = NPY_NATIVE + endian = NPY.NATIVE elif ch in ('i', 'I'): - endian = NPY_IGNORE + endian = NPY.IGNORE elif ch in ('s', 'S'): - endian = NPY_SWAP + endian = NPY.SWAP else: raise OperationError(space.w_ValueError, space.wrap( "%s is an unrecognized byteorder" % new_order)) @@ -24,18 +24,18 @@ def clipmode_converter(space, w_mode): if space.is_none(w_mode): - return NPY_RAISE + return NPY.RAISE if space.isinstance_w(w_mode, space.w_str): mode = space.str_w(w_mode) if mode.startswith('C') or mode.startswith('c'): - return NPY_CLIP + return NPY.CLIP if mode.startswith('W') or mode.startswith('w'): - return NPY_WRAP + return NPY.WRAP if mode.startswith('R') or mode.startswith('r'): - return NPY_RAISE + return NPY.RAISE elif space.isinstance_w(w_mode, space.w_int): mode = space.int_w(w_mode) - if NPY_CLIP <= mode <= NPY_RAISE: + if NPY.CLIP <= mode <= NPY.RAISE: return mode raise OperationError(space.w_TypeError, space.wrap("clipmode not understood")) @@ -46,19 +46,19 @@ return default if not space.isinstance_w(w_order, space.w_str): if space.is_true(w_order): - return NPY_FORTRANORDER + return NPY.FORTRANORDER else: - return NPY_CORDER + return NPY.CORDER else: order = space.str_w(w_order) if order.startswith('C') or order.startswith('c'): - return NPY_CORDER + return NPY.CORDER elif order.startswith('F') or order.startswith('f'): - return NPY_FORTRANORDER + return NPY.FORTRANORDER elif order.startswith('A') or order.startswith('a'): - return NPY_ANYORDER + return NPY.ANYORDER elif order.startswith('K') or order.startswith('k'): - return NPY_KEEPORDER + return NPY.KEEPORDER else: raise OperationError(space.w_TypeError, space.wrap( "order not understood")) diff --git a/pypy/module/micronumpy/interp_arrayops.py b/pypy/module/micronumpy/interp_arrayops.py --- a/pypy/module/micronumpy/interp_arrayops.py +++ b/pypy/module/micronumpy/interp_arrayops.py @@ -6,7 +6,7 @@ from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import unwrap_spec from pypy.module.micronumpy.conversion_utils import clipmode_converter -from pypy.module.micronumpy.constants import * +from pypy.module.micronumpy import constants as NPY def where(space, w_arr, w_x=None, w_y=None): """where(condition, [x, y]) @@ -238,12 +238,12 @@ index = index_w(space, idx) if index < 0 or index >= arr.get_size(): - if mode == NPY_RAISE: + if mode == NPY.RAISE: raise OperationError(space.w_IndexError, space.wrap( "index %d is out of bounds for axis 0 with size %d" % (index, arr.get_size()))) - elif mode == NPY_WRAP: + elif mode == NPY.WRAP: index = index % arr.get_size() - elif mode == NPY_CLIP: + elif mode == NPY.CLIP: if index < 0: index = 0 else: diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -16,7 +16,8 @@ from pypy.interpreter.mixedmodule import MixedModule from rpython.rtyper.lltypesystem import lltype from rpython.rlib.rstring import StringBuilder -from pypy.module.micronumpy.constants import * +from rpython.rlib import jit +from pypy.module.micronumpy import constants as NPY MIXIN_32 = (W_IntObject.typedef,) if LONG_BIT == 32 else () @@ -33,6 +34,7 @@ def new_dtype_getter(name): + @jit.elidable def _get_dtype(space): from pypy.module.micronumpy.interp_dtype import get_dtype_cache return get_dtype_cache(space).dtypes_by_name[name] @@ -443,10 +445,10 @@ if long_double_size in (8, 12, 16): class W_FloatLongBox(W_FloatingBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY_LONGDOUBLELTR) + descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY.LONGDOUBLELTR) class W_ComplexLongBox(ComplexBox, W_ComplexFloatingBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY_CLONGDOUBLELTR) + descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY.CLONGDOUBLELTR) _COMPONENTS_BOX = W_FloatLongBox class W_FlexibleBox(W_GenericBox): @@ -471,10 +473,10 @@ elif space.isinstance_w(w_item, space.w_int): indx = space.int_w(w_item) try: - item = self.dtype.fieldnames[indx] + item = self.dtype.names[indx] except IndexError: if indx < 0: - indx += len(self.dtype.fieldnames) + indx += len(self.dtype.names) raise OperationError(space.w_IndexError, space.wrap( "invalid index (%d)" % indx)) else: diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -7,11 +7,10 @@ from pypy.module.micronumpy import types, interp_boxes, base from rpython.rlib.objectmodel import specialize from rpython.rlib.rarithmetic import LONG_BIT, r_longlong, r_ulonglong -from rpython.rtyper.lltypesystem import rffi from rpython.rlib import jit from pypy.module.micronumpy.appbridge import get_appbridge_cache from pypy.module.micronumpy.conversion_utils import byteorder_converter -from pypy.module.micronumpy.constants import * +from pypy.module.micronumpy import constants as NPY def decode_w_dtype(space, w_dtype): @@ -38,34 +37,40 @@ class W_Dtype(W_Root): - _immutable_fields_ = ["itemtype?", "num", "kind", "name?", "char", - "w_box_type", "byteorder", "size?", "float_type", - "fields?", "fieldnames?", "shape", "subdtype", "base"] + _immutable_fields_ = [ + "num", "kind", "char", "w_box_type", "float_type", + "itemtype?", "byteorder?", "names?", "fields?", "size?", + "shape?", "subdtype?", "base?", + "alternate_constructors", "aliases", + ] - def __init__(self, itemtype, num, kind, name, char, w_box_type, byteorder=NPY_NATIVE, - size=1, alternate_constructors=[], aliases=[], float_type=None, - fields=None, fieldnames=None, shape=[], subdtype=None): + def __init__(self, itemtype, num, kind, char, w_box_type, + float_type=None, byteorder=None, names=[], fields={}, + size=1, shape=[], subdtype=None, + alternate_constructors=[], aliases=[]): self.itemtype = itemtype self.num = num self.kind = kind - self.name = name self.char = char self.w_box_type = w_box_type + self.float_type = float_type + if byteorder is None: + if itemtype.get_element_size() == 1: + byteorder = NPY.IGNORE + else: + byteorder = NPY.NATIVE self.byteorder = byteorder + self.names = names + self.fields = fields self.size = size - self.alternate_constructors = alternate_constructors - self.aliases = aliases - self.float_type = float_type - self.fields = fields - if fieldnames is None: - fieldnames = [] - self.fieldnames = fieldnames - self.shape = list(shape) + self.shape = shape self.subdtype = subdtype if not subdtype: self.base = self else: self.base = subdtype.base + self.alternate_constructors = alternate_constructors + self.aliases = aliases def __repr__(self): if self.fields is not None: @@ -87,54 +92,76 @@ return self.itemtype.coerce(space, self, w_item) def is_int_type(self): - return (self.kind == NPY_SIGNEDLTR or self.kind == NPY_UNSIGNEDLTR or - self.kind == NPY_GENBOOLLTR) + return (self.kind == NPY.SIGNEDLTR or self.kind == NPY.UNSIGNEDLTR or + self.kind == NPY.GENBOOLLTR) def is_signed(self): - return self.kind == NPY_SIGNEDLTR + return self.kind == NPY.SIGNEDLTR def is_complex_type(self): - return self.kind == NPY_COMPLEXLTR + return self.kind == NPY.COMPLEXLTR def is_float_type(self): - return self.kind == NPY_FLOATINGLTR or self.kind == NPY_COMPLEXLTR + return self.kind == NPY.FLOATINGLTR or self.kind == NPY.COMPLEXLTR def is_bool_type(self): - return self.kind == NPY_GENBOOLLTR + return self.kind == NPY.GENBOOLLTR def is_record_type(self): - return self.fields is not None + return bool(self.fields) def is_str_type(self): - return self.num == NPY_STRING + return self.num == NPY.STRING def is_str_or_unicode(self): - return (self.num == NPY_STRING or self.num == NPY_UNICODE) + return self.num == NPY.STRING or self.num == NPY.UNICODE def is_flexible_type(self): - return (self.is_str_or_unicode() or self.is_record_type()) + return self.is_str_or_unicode() or self.num == NPY.VOID def is_native(self): - return self.byteorder in (NPY_NATIVE, NPY_NATBYTE) + return self.byteorder in (NPY.NATIVE, NPY.NATBYTE) def get_size(self): return self.size * self.itemtype.get_element_size() - def get_name(self): - if self.char == 'S': - return '|S' + str(self.get_size()) - return self.name - def get_float_dtype(self, space): - assert self.kind == NPY_COMPLEXLTR + assert self.kind == NPY.COMPLEXLTR assert self.float_type is not None - return get_dtype_cache(space).dtypes_by_name[self.byteorder + self.float_type] + dtype = get_dtype_cache(space).dtypes_by_name[self.float_type] + if self.byteorder == NPY.OPPBYTE: + dtype = dtype.descr_newbyteorder(space) + return dtype def descr_str(self, space): - return space.wrap(self.get_name()) + if self.fields: + return space.str(self.descr_get_descr(space)) + elif self.subdtype is not None: + return space.str(space.newtuple([ + self.subdtype.descr_get_str(space), + self.descr_get_shape(space)])) + else: + if self.is_flexible_type(): + return self.descr_get_str(space) + else: + return self.descr_get_name(space) def descr_repr(self, space): - return space.wrap("dtype('%s')" % self.get_name()) + if self.fields: + r = self.descr_get_descr(space) + elif self.subdtype is not None: + r = space.newtuple([self.subdtype.descr_get_str(space), + self.descr_get_shape(space)]) + else: + if self.is_flexible_type(): + if self.byteorder != NPY.IGNORE: + byteorder = NPY.NATBYTE if self.is_native() else NPY.OPPBYTE + else: + byteorder = '' + r = space.wrap(byteorder + self.char + str(self.size)) + else: + r = self.descr_get_name(space) + return space.wrap("dtype(%s)" % space.str_w(space.repr(r))) def descr_get_itemsize(self, space): return space.wrap(self.get_size()) @@ -142,23 +169,39 @@ def descr_get_alignment(self, space): return space.wrap(self.itemtype.alignment) + def descr_get_isbuiltin(self, space): + if self.fields is None: + return space.wrap(1) + return space.wrap(0) + def descr_get_subdtype(self, space): if self.subdtype is None: return space.w_None return space.newtuple([space.wrap(self.subdtype), self.descr_get_shape(space)]) + def get_name(self): + return self.w_box_type.name + + def descr_get_name(self, space): + name = self.get_name() + if name[-1] == '_': + name = name[:-1] + if self.is_flexible_type(): + return space.wrap(name + str(self.get_size() * 8)) + return space.wrap(name) + def descr_get_str(self, space): size = self.get_size() basic = self.kind - if basic == NPY_UNICODELTR: + if basic == NPY.UNICODELTR: size >>= 2 - endian = NPY_NATBYTE + endian = NPY.NATBYTE elif size // (self.size or 1) <= 1: - endian = NPY_IGNORE + endian = NPY.IGNORE else: endian = self.byteorder - if endian == NPY_NATIVE: - endian = NPY_NATBYTE + if endian == NPY.NATIVE: + endian = NPY.NATBYTE return space.wrap("%s%s%s" % (endian, basic, size)) def descr_get_descr(self, space): @@ -167,7 +210,7 @@ self.descr_get_str(space)])]) else: descr = [] - for name in self.fieldnames: + for name in self.names: subdtype = self.fields[name][1] subdescr = [space.wrap(name)] if subdtype.is_record_type(): @@ -206,56 +249,42 @@ return space.wrap(not self.eq(space, w_other)) def descr_get_fields(self, space): - if self.fields is None: + if not self.fields: return space.w_None w_d = space.newdict() for name, (offset, subdtype) in self.fields.iteritems(): - space.setitem(w_d, space.wrap(name), space.newtuple([subdtype, - space.wrap(offset)])) + space.setitem(w_d, space.wrap(name), + space.newtuple([subdtype, space.wrap(offset)])) return w_d - def descr_set_fields(self, space, w_fields): - if w_fields == space.w_None: - self.fields = None - else: - self.fields = {} - size = 0 - for key in space.listview(w_fields): - value = space.getitem(w_fields, key) - - dtype = space.getitem(value, space.wrap(0)) - assert isinstance(dtype, W_Dtype) - - offset = space.int_w(space.getitem(value, space.wrap(1))) - self.fields[space.str_w(key)] = offset, dtype - - size += dtype.get_size() - - self.itemtype = types.RecordType() - self.size = size - self.name = "void" + str(8 * self.get_size()) - def descr_get_names(self, space): - if len(self.fieldnames) == 0: + if not self.fields: return space.w_None - return space.newtuple([space.wrap(name) for name in self.fieldnames]) + return space.newtuple([space.wrap(name) for name in self.names]) def descr_set_names(self, space, w_names): - fieldnames = [] - if w_names != space.w_None: - iter = space.iter(w_names) - while True: - try: - name = space.str_w(space.next(iter)) - except OperationError, e: - if not e.match(space, space.w_StopIteration): - raise - break - if name in fieldnames: - raise OperationError(space.w_ValueError, space.wrap( - "Duplicate field names given.")) - fieldnames.append(name) - self.fieldnames = fieldnames + if not self.fields: + raise oefmt(space.w_ValueError, "there are no fields defined") + if not space.issequence_w(w_names) or \ + space.len_w(w_names) != len(self.names): + raise oefmt(space.w_ValueError, + "must replace all names at once " + "with a sequence of length %d", + len(self.names)) + names = [] + for w_name in space.fixedview(w_names): + if not space.isinstance_w(w_name, space.w_str): + raise oefmt(space.w_ValueError, + "item #%d of names is of type %T and not string", + len(names), w_name) + names.append(space.str_w(w_name)) + fields = {} + for i in range(len(self.names)): + if names[i] in fields: + raise oefmt(space.w_ValueError, "Duplicate field names given.") + fields[names[i]] = self.fields[self.names[i]] + self.fields = fields + self.names = names def descr_del_names(self, space): raise OperationError(space.w_AttributeError, space.wrap( @@ -265,15 +294,15 @@ return space.w_False def descr_getitem(self, space, w_item): - if self.fields is None: - raise OperationError(space.w_KeyError, space.wrap( - "There are no fields in dtype %s." % self.name)) + if not self.fields: + raise oefmt(space.w_KeyError, "There are no fields in dtype %s.", + self.get_name()) if space.isinstance_w(w_item, space.w_basestring): item = space.str_w(w_item) elif space.isinstance_w(w_item, space.w_int): indx = space.int_w(w_item) try: - item = self.fieldnames[indx] + item = self.names[indx] except IndexError: raise OperationError(space.w_IndexError, space.wrap( "Field index %d out of range." % indx)) @@ -287,7 +316,7 @@ "Field named '%s' not found." % item)) def descr_len(self, space): - if self.fields is None: + if not self.fields: return space.wrap(0) return space.wrap(len(self.fields)) @@ -305,7 +334,7 @@ names = self.descr_get_names(space) values = self.descr_get_fields(space) if self.fields: - endian = NPY_IGNORE + endian = NPY.IGNORE #TODO: Implement this when subarrays are implemented subdescr = space.w_None size = 0 @@ -318,8 +347,8 @@ alignment = space.wrap(1) else: endian = self.byteorder - if endian == NPY_NATIVE: - endian = NPY_NATBYTE + if endian == NPY.NATIVE: + endian = NPY.NATBYTE subdescr = space.w_None w_size = space.wrap(-1) alignment = space.wrap(-1) @@ -329,32 +358,77 @@ return space.newtuple([w_class, builder_args, data]) def descr_setstate(self, space, w_data): - if space.int_w(space.getitem(w_data, space.wrap(0))) != 3: - raise OperationError(space.w_NotImplementedError, space.wrap("Pickling protocol version not supported")) + if self.fields is None: # if builtin dtype + return space.w_None + + version = space.int_w(space.getitem(w_data, space.wrap(0))) + if version != 3: + raise oefmt(space.w_ValueError, + "can't handle version %d of numpy.dtype pickle", + version) endian = space.str_w(space.getitem(w_data, space.wrap(1))) - if endian == NPY_NATBYTE: - endian = NPY_NATIVE + if endian == NPY.NATBYTE: + endian = NPY.NATIVE + + w_subarray = space.getitem(w_data, space.wrap(2)) + w_names = space.getitem(w_data, space.wrap(3)) + w_fields = space.getitem(w_data, space.wrap(4)) + size = space.int_w(space.getitem(w_data, space.wrap(5))) + + if (w_names == space.w_None) != (w_fields == space.w_None): + raise oefmt(space.w_ValueError, "inconsistent fields and names") + self.byteorder = endian + self.shape = [] + self.subdtype = None + self.base = self - fieldnames = space.getitem(w_data, space.wrap(3)) - self.descr_set_names(space, fieldnames) + if w_subarray != space.w_None: + if not space.isinstance_w(w_subarray, space.w_tuple) or \ + space.len_w(w_subarray) != 2: + raise oefmt(space.w_ValueError, + "incorrect subarray in __setstate__") + subdtype, w_shape = space.fixedview(w_subarray) + assert isinstance(subdtype, W_Dtype) + if not base.issequence_w(space, w_shape): + self.shape = [space.int_w(w_shape)] + else: + self.shape = [space.int_w(w_s) for w_s in space.fixedview(w_shape)] + self.subdtype = subdtype + self.base = subdtype.base - fields = space.getitem(w_data, space.wrap(4)) - self.descr_set_fields(space, fields) + if w_names != space.w_None: + self.names = [] + self.fields = {} + for w_name in space.fixedview(w_names): + name = space.str_w(w_name) + value = space.getitem(w_fields, w_name) + + dtype = space.getitem(value, space.wrap(0)) + assert isinstance(dtype, W_Dtype) + offset = space.int_w(space.getitem(value, space.wrap(1))) + + self.names.append(name) + self.fields[name] = offset, dtype + self.itemtype = types.RecordType() + + if self.is_flexible_type(): + self.size = size @unwrap_spec(new_order=str) - def descr_newbyteorder(self, space, new_order=NPY_SWAP): + def descr_newbyteorder(self, space, new_order=NPY.SWAP): newendian = byteorder_converter(space, new_order) endian = self.byteorder - if endian != NPY_IGNORE: - if newendian == NPY_SWAP: - endian = NPY_OPPBYTE if self.is_native() else NPY_NATBYTE - elif newendian != NPY_IGNORE: + if endian != NPY.IGNORE: + if newendian == NPY.SWAP: + endian = NPY.OPPBYTE if self.is_native() else NPY.NATBYTE + elif newendian != NPY.IGNORE: endian = newendian - itemtype = self.itemtype.__class__(endian in (NPY_NATIVE, NPY_NATBYTE)) - return W_Dtype(itemtype, self.num, self.kind, self.name, self.char, - self.w_box_type, endian, size=self.size) + itemtype = self.itemtype.__class__(endian in (NPY.NATIVE, NPY.NATBYTE)) + return W_Dtype(itemtype, self.num, self.kind, self.char, + self.w_box_type, self.float_type, byteorder=endian, + size=self.size) @specialize.arg(2) @@ -362,7 +436,7 @@ lst_w = space.listview(w_lst) fields = {} offset = 0 - fieldnames = [] + names = [] for i in range(len(lst_w)): w_elem = lst_w[i] if simple: @@ -385,12 +459,10 @@ assert isinstance(subdtype, W_Dtype) fields[fldname] = (offset, subdtype) offset += subdtype.get_size() - fieldnames.append(fldname) - itemtype = types.RecordType() - return W_Dtype(itemtype, NPY_VOID, NPY_VOIDLTR, - "void" + str(8 * offset * itemtype.get_element_size()), - NPY_VOIDLTR, space.gettypefor(interp_boxes.W_VoidBox), - fields=fields, fieldnames=fieldnames, size=offset) + names.append(fldname) + return W_Dtype(types.RecordType(), NPY.VOID, NPY.VOIDLTR, NPY.VOIDLTR, + space.gettypefor(interp_boxes.W_VoidBox), + names=names, fields=fields, size=offset) def dtype_from_dict(space, w_dict): @@ -429,11 +501,10 @@ size *= dim if size == 1: return subdtype - return W_Dtype(types.VoidType(), NPY_VOID, NPY_VOIDLTR, - "void" + str(8 * subdtype.get_size() * size), - NPY_VOIDLTR, space.gettypefor(interp_boxes.W_VoidBox), - shape=shape, subdtype=subdtype, - size=subdtype.get_size() * size) + size *= subdtype.get_size() + return W_Dtype(types.VoidType(), NPY.VOID, NPY.VOIDLTR, NPY.VOIDLTR, + space.gettypefor(interp_boxes.W_VoidBox), + shape=shape, subdtype=subdtype, size=size) if space.is_none(w_dtype): return cache.w_float64dtype @@ -443,10 +514,15 @@ name = space.str_w(w_dtype) if ',' in name: return dtype_from_spec(space, w_dtype) + cname = name[1:] if name[0] == NPY.OPPBYTE else name try: - return cache.dtypes_by_name[name] + dtype = cache.dtypes_by_name[cname] except KeyError: pass + else: + if name[0] == NPY.OPPBYTE: + dtype = dtype.descr_newbyteorder(space) + return dtype if name[0] in 'VSUc' or name[0] in '<>=|' and name[1] in 'VSUc': return variable_dtype(space, name) raise oefmt(space.w_TypeError, 'data type "%s" not understood', name) @@ -495,10 +571,11 @@ byteorder = interp_attrproperty("byteorder", cls=W_Dtype), itemsize = GetSetProperty(W_Dtype.descr_get_itemsize), alignment = GetSetProperty(W_Dtype.descr_get_alignment), + isbuiltin = GetSetProperty(W_Dtype.descr_get_isbuiltin), subdtype = GetSetProperty(W_Dtype.descr_get_subdtype), str = GetSetProperty(W_Dtype.descr_get_str), - name = interp_attrproperty("name", cls=W_Dtype), + name = GetSetProperty(W_Dtype.descr_get_name), base = GetSetProperty(W_Dtype.descr_get_base), shape = GetSetProperty(W_Dtype.descr_get_shape), isnative = GetSetProperty(W_Dtype.descr_get_isnative), @@ -522,57 +599,50 @@ try: size = int(name[1:]) except ValueError: - raise OperationError(space.w_TypeError, space.wrap("data type not understood")) - if char == NPY_CHARLTR: - char = NPY_STRINGLTR + raise oefmt(space.w_TypeError, "data type not understood") + if char == NPY.CHARLTR: + char = NPY.STRINGLTR size = 1 - if char == NPY_STRINGLTR: - itemtype = types.StringType() - basename = 'string' - num = NPY_STRING - w_box_type = space.gettypefor(interp_boxes.W_StringBox) - elif char == NPY_VOIDLTR: - itemtype = types.VoidType() - basename = 'void' - num = NPY_VOID - w_box_type = space.gettypefor(interp_boxes.W_VoidBox) - elif char == NPY_UNICODELTR: - itemtype = types.UnicodeType() - basename = 'unicode' - num = NPY_UNICODE - w_box_type = space.gettypefor(interp_boxes.W_UnicodeBox) - else: - assert False - - return W_Dtype(itemtype, num, char, - basename + str(8 * size * itemtype.get_element_size()), - char, w_box_type, size=size) + if char == NPY.STRINGLTR: + return new_string_dtype(space, size) + elif char == NPY.UNICODELTR: + return new_unicode_dtype(space, size) + elif char == NPY.VOIDLTR: + return new_void_dtype(space, size) + assert False def new_string_dtype(space, size): - itemtype = types.StringType() return W_Dtype( - itemtype, + types.StringType(), size=size, - num=NPY_STRING, - kind=NPY_STRINGLTR, - name='string' + str(8 * size * itemtype.get_element_size()), - char=NPY_STRINGLTR, - w_box_type = space.gettypefor(interp_boxes.W_StringBox), + num=NPY.STRING, + kind=NPY.STRINGLTR, + char=NPY.STRINGLTR, + w_box_type=space.gettypefor(interp_boxes.W_StringBox), ) def new_unicode_dtype(space, size): - itemtype = types.UnicodeType() return W_Dtype( - itemtype, + types.UnicodeType(), size=size, - num=NPY_UNICODE, - kind=NPY_UNICODELTR, - name='unicode' + str(8 * size * itemtype.get_element_size()), - char=NPY_UNICODELTR, - w_box_type = space.gettypefor(interp_boxes.W_UnicodeBox), + num=NPY.UNICODE, + kind=NPY.UNICODELTR, + char=NPY.UNICODELTR, + w_box_type=space.gettypefor(interp_boxes.W_UnicodeBox), + ) + + +def new_void_dtype(space, size): + return W_Dtype( + types.VoidType(), + size=size, + num=NPY.VOID, + kind=NPY.VOIDLTR, + char=NPY.VOIDLTR, + w_box_type=space.gettypefor(interp_boxes.W_VoidBox), ) @@ -580,125 +650,112 @@ def __init__(self, space): self.w_booldtype = W_Dtype( types.Bool(), - num=NPY_BOOL, - kind=NPY_GENBOOLLTR, - name="bool", - char=NPY_BOOLLTR, + num=NPY.BOOL, + kind=NPY.GENBOOLLTR, + char=NPY.BOOLLTR, w_box_type=space.gettypefor(interp_boxes.W_BoolBox), alternate_constructors=[space.w_bool], - aliases=['bool8'], + aliases=['bool', 'bool8'], ) self.w_int8dtype = W_Dtype( types.Int8(), - num=NPY_BYTE, - kind=NPY_SIGNEDLTR, - name="int8", - char=NPY_BYTELTR, + num=NPY.BYTE, + kind=NPY.SIGNEDLTR, + char=NPY.BYTELTR, w_box_type=space.gettypefor(interp_boxes.W_Int8Box), aliases=['byte'], ) self.w_uint8dtype = W_Dtype( types.UInt8(), - num=NPY_UBYTE, - kind=NPY_UNSIGNEDLTR, - name="uint8", - char=NPY_UBYTELTR, + num=NPY.UBYTE, + kind=NPY.UNSIGNEDLTR, + char=NPY.UBYTELTR, w_box_type=space.gettypefor(interp_boxes.W_UInt8Box), aliases=['ubyte'], ) self.w_int16dtype = W_Dtype( types.Int16(), - num=NPY_SHORT, - kind=NPY_SIGNEDLTR, - name="int16", - char=NPY_SHORTLTR, + num=NPY.SHORT, + kind=NPY.SIGNEDLTR, + char=NPY.SHORTLTR, w_box_type=space.gettypefor(interp_boxes.W_Int16Box), aliases=['short'], ) self.w_uint16dtype = W_Dtype( types.UInt16(), - num=NPY_USHORT, - kind=NPY_UNSIGNEDLTR, - name="uint16", - char=NPY_USHORTLTR, + num=NPY.USHORT, + kind=NPY.UNSIGNEDLTR, + char=NPY.USHORTLTR, w_box_type=space.gettypefor(interp_boxes.W_UInt16Box), aliases=['ushort'], ) self.w_int32dtype = W_Dtype( types.Int32(), - num=NPY_INT, - kind=NPY_SIGNEDLTR, - name="int32", - char=NPY_INTLTR, + num=NPY.INT, + kind=NPY.SIGNEDLTR, + char=NPY.INTLTR, w_box_type=space.gettypefor(interp_boxes.W_Int32Box), ) self.w_uint32dtype = W_Dtype( types.UInt32(), - num=NPY_UINT, - kind=NPY_UNSIGNEDLTR, - name="uint32", - char=NPY_UINTLTR, + num=NPY.UINT, + kind=NPY.UNSIGNEDLTR, + char=NPY.UINTLTR, w_box_type=space.gettypefor(interp_boxes.W_UInt32Box), ) self.w_longdtype = W_Dtype( types.Long(), - num=NPY_LONG, - kind=NPY_SIGNEDLTR, - name="int%d" % LONG_BIT, - char=NPY_LONGLTR, + num=NPY.LONG, + kind=NPY.SIGNEDLTR, + char=NPY.LONGLTR, w_box_type=space.gettypefor(interp_boxes.W_LongBox), alternate_constructors=[space.w_int, space.gettypefor(interp_boxes.W_IntegerBox), space.gettypefor(interp_boxes.W_SignedIntegerBox), ], - aliases=['int'], + aliases=['int', 'intp', 'p'], ) self.w_ulongdtype = W_Dtype( types.ULong(), - num=NPY_ULONG, - kind=NPY_UNSIGNEDLTR, - name="uint%d" % LONG_BIT, - char=NPY_ULONGLTR, + num=NPY.ULONG, + kind=NPY.UNSIGNEDLTR, + char=NPY.ULONGLTR, w_box_type=space.gettypefor(interp_boxes.W_ULongBox), alternate_constructors=[space.gettypefor(interp_boxes.W_UnsignedIntegerBox), ], - aliases=['uint'], + aliases=['uint', 'uintp', 'P'], ) self.w_int64dtype = W_Dtype( types.Int64(), - num=NPY_LONGLONG, - kind=NPY_SIGNEDLTR, - name="int64", - char=NPY_LONGLONGLTR, + num=NPY.LONGLONG, + kind=NPY.SIGNEDLTR, + char=NPY.LONGLONGLTR, w_box_type=space.gettypefor(interp_boxes.W_Int64Box), alternate_constructors=[space.w_long], aliases=['longlong'], ) self.w_uint64dtype = W_Dtype( types.UInt64(), - num=NPY_ULONGLONG, - kind=NPY_UNSIGNEDLTR, - name="uint64", - char=NPY_ULONGLONGLTR, + num=NPY.ULONGLONG, + kind=NPY.UNSIGNEDLTR, + char=NPY.ULONGLONGLTR, w_box_type=space.gettypefor(interp_boxes.W_UInt64Box), aliases=['ulonglong'], ) self.w_float32dtype = W_Dtype( types.Float32(), - num=NPY_FLOAT, - kind=NPY_FLOATINGLTR, - name="float32", - char=NPY_FLOATLTR, + num=NPY.FLOAT, + kind=NPY.FLOATINGLTR, + char=NPY.FLOATLTR, w_box_type=space.gettypefor(interp_boxes.W_Float32Box), aliases=['single'] ) self.w_float64dtype = W_Dtype( types.Float64(), - num=NPY_DOUBLE, - kind=NPY_FLOATINGLTR, - name="float64", - char=NPY_DOUBLELTR, - w_box_type = space.gettypefor(interp_boxes.W_Float64Box), + num=NPY.DOUBLE, + kind=NPY.FLOATINGLTR, + char=NPY.DOUBLELTR, + w_box_type=space.gettypefor(interp_boxes.W_Float64Box), alternate_constructors=[space.w_float, space.gettypefor(interp_boxes.W_NumberBox), space.gettypefor(interp_boxes.W_FloatingBox), @@ -707,75 +764,69 @@ ) self.w_floatlongdtype = W_Dtype( types.FloatLong(), - num=NPY_LONGDOUBLE, - kind=NPY_FLOATINGLTR, - name="float%d" % (interp_boxes.long_double_size * 8), - char=NPY_LONGDOUBLELTR, + num=NPY.LONGDOUBLE, + kind=NPY.FLOATINGLTR, + char=NPY.LONGDOUBLELTR, w_box_type=space.gettypefor(interp_boxes.W_FloatLongBox), aliases=["longdouble", "longfloat"], ) self.w_complex64dtype = W_Dtype( types.Complex64(), - num=NPY_CFLOAT, - kind=NPY_COMPLEXLTR, - name="complex64", - char=NPY_CFLOATLTR, - w_box_type = space.gettypefor(interp_boxes.W_Complex64Box), + num=NPY.CFLOAT, + kind=NPY.COMPLEXLTR, + char=NPY.CFLOATLTR, + w_box_type=space.gettypefor(interp_boxes.W_Complex64Box), aliases=['csingle'], - float_type=NPY_FLOATLTR, + float_type=NPY.FLOATLTR, ) self.w_complex128dtype = W_Dtype( types.Complex128(), - num=NPY_CDOUBLE, - kind=NPY_COMPLEXLTR, - name="complex128", - char=NPY_CDOUBLELTR, - w_box_type = space.gettypefor(interp_boxes.W_Complex128Box), + num=NPY.CDOUBLE, + kind=NPY.COMPLEXLTR, + char=NPY.CDOUBLELTR, + w_box_type=space.gettypefor(interp_boxes.W_Complex128Box), alternate_constructors=[space.w_complex, space.gettypefor(interp_boxes.W_ComplexFloatingBox)], aliases=["complex", 'cfloat', 'cdouble'], - float_type=NPY_DOUBLELTR, + float_type=NPY.DOUBLELTR, ) self.w_complexlongdtype = W_Dtype( types.ComplexLong(), - num=NPY_CLONGDOUBLE, - kind=NPY_COMPLEXLTR, - name="complex%d" % (interp_boxes.long_double_size * 16), - char=NPY_CLONGDOUBLELTR, - w_box_type = space.gettypefor(interp_boxes.W_ComplexLongBox), + num=NPY.CLONGDOUBLE, + kind=NPY.COMPLEXLTR, + char=NPY.CLONGDOUBLELTR, + w_box_type=space.gettypefor(interp_boxes.W_ComplexLongBox), aliases=["clongdouble", "clongfloat"], - float_type=NPY_LONGDOUBLELTR, + float_type=NPY.LONGDOUBLELTR, ) self.w_stringdtype = W_Dtype( types.StringType(), size=0, - num=NPY_STRING, - kind=NPY_STRINGLTR, - name='string', - char=NPY_STRINGLTR, - w_box_type = space.gettypefor(interp_boxes.W_StringBox), + num=NPY.STRING, + kind=NPY.STRINGLTR, + char=NPY.STRINGLTR, + w_box_type=space.gettypefor(interp_boxes.W_StringBox), alternate_constructors=[space.w_str, space.gettypefor(interp_boxes.W_CharacterBox)], - aliases=["str"], + aliases=['string', "str"], ) self.w_unicodedtype = W_Dtype( types.UnicodeType(), size=0, - num=NPY_UNICODE, - kind=NPY_UNICODELTR, - name='unicode', - char=NPY_UNICODELTR, - w_box_type = space.gettypefor(interp_boxes.W_UnicodeBox), + num=NPY.UNICODE, + kind=NPY.UNICODELTR, + char=NPY.UNICODELTR, + w_box_type=space.gettypefor(interp_boxes.W_UnicodeBox), alternate_constructors=[space.w_unicode], + aliases=['unicode'], ) self.w_voiddtype = W_Dtype( types.VoidType(), size=0, - num=NPY_VOID, - kind=NPY_VOIDLTR, - name='void', - char=NPY_VOIDLTR, - w_box_type = space.gettypefor(interp_boxes.W_VoidBox), + num=NPY.VOID, + kind=NPY.VOIDLTR, + char=NPY.VOIDLTR, + w_box_type=space.gettypefor(interp_boxes.W_VoidBox), #alternate_constructors=[space.w_buffer], # XXX no buffer in space #alternate_constructors=[space.gettypefor(interp_boxes.W_GenericBox)], @@ -783,27 +834,24 @@ ) self.w_float16dtype = W_Dtype( types.Float16(), - num=NPY_HALF, - kind=NPY_FLOATINGLTR, - name="float16", - char=NPY_HALFLTR, + num=NPY.HALF, + kind=NPY.FLOATINGLTR, + char=NPY.HALFLTR, w_box_type=space.gettypefor(interp_boxes.W_Float16Box), ) self.w_intpdtype = W_Dtype( types.Long(), - num=NPY_LONG, - kind=NPY_SIGNEDLTR, - name='intp', - char=NPY_INTPLTR, - w_box_type = space.gettypefor(interp_boxes.W_LongBox), + num=NPY.LONG, + kind=NPY.SIGNEDLTR, + char=NPY.INTPLTR, + w_box_type=space.gettypefor(interp_boxes.W_LongBox), ) self.w_uintpdtype = W_Dtype( types.ULong(), - num=NPY_ULONG, - kind=NPY_UNSIGNEDLTR, - name='uintp', - char=NPY_UINTPLTR, - w_box_type = space.gettypefor(interp_boxes.W_ULongBox), + num=NPY.ULONG, + kind=NPY.UNSIGNEDLTR, + char=NPY.UINTPLTR, + w_box_type=space.gettypefor(interp_boxes.W_ULongBox), ) float_dtypes = [self.w_float16dtype, self.w_float32dtype, self.w_float64dtype, self.w_floatlongdtype] @@ -829,20 +877,15 @@ # we reverse, so the stuff with lower numbers override stuff with # higher numbers for dtype in reversed(self.builtin_dtypes): + dtype.fields = None # mark these as builtin self.dtypes_by_num[dtype.num] = dtype - self.dtypes_by_name[dtype.name] = dtype + self.dtypes_by_name[dtype.get_name()] = dtype for can_name in [dtype.kind + str(dtype.get_size()), dtype.char]: self.dtypes_by_name[can_name] = dtype - self.dtypes_by_name[NPY_NATBYTE + can_name] = dtype - self.dtypes_by_name[NPY_NATIVE + can_name] = dtype - self.dtypes_by_name[NPY_IGNORE + can_name] = dtype - new_name = NPY_OPPBYTE + can_name - itemtype = type(dtype.itemtype)(False) - self.dtypes_by_name[new_name] = W_Dtype( - itemtype, dtype.num, dtype.kind, new_name, dtype.char, - dtype.w_box_type, byteorder=NPY_OPPBYTE, - float_type=dtype.float_type) + self.dtypes_by_name[NPY.NATBYTE + can_name] = dtype + self.dtypes_by_name[NPY.NATIVE + can_name] = dtype + self.dtypes_by_name[NPY.IGNORE + can_name] = dtype for alias in dtype.aliases: self.dtypes_by_name[alias] = dtype @@ -898,7 +941,7 @@ space.wrap(itembits), space.wrap(dtype.itemtype.get_element_size())] if dtype.is_int_type(): - if dtype.kind == NPY_GENBOOLLTR: + if dtype.kind == NPY.GENBOOLLTR: w_maxobj = space.wrap(1) w_minobj = space.wrap(0) elif dtype.is_signed(): diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -23,7 +23,7 @@ from pypy.module.micronumpy.arrayimpl.base import BaseArrayImplementation from pypy.module.micronumpy.conversion_utils import order_converter, multi_axis_converter from pypy.module.micronumpy import support -from pypy.module.micronumpy.constants import * +from pypy.module.micronumpy import constants as NPY def _find_shape(space, w_size, dtype): if space.is_none(w_size): @@ -110,8 +110,8 @@ self.fill(space, self.get_dtype().coerce(space, w_value)) def descr_tostring(self, space, w_order=None): - order = order_converter(space, w_order, NPY_CORDER) - if order == NPY_FORTRANORDER: + order = order_converter(space, w_order, NPY.CORDER) + if order == NPY.FORTRANORDER: raise OperationError(space.w_NotImplementedError, space.wrap( "unsupported value for order")) return space.wrap(loop.tostring(space, self)) @@ -218,7 +218,9 @@ prefix) def descr_getitem(self, space, w_idx): - if isinstance(w_idx, W_NDimArray) and w_idx.get_dtype().is_bool_type() \ + if space.is_w(w_idx, space.w_Ellipsis): + return self + elif isinstance(w_idx, W_NDimArray) and w_idx.get_dtype().is_bool_type() \ and len(w_idx.get_shape()) > 0: return self.getitem_filter(space, w_idx) try: @@ -320,8 +322,8 @@ return self.implementation.get_scalar_value() def descr_copy(self, space, w_order=None): - order = order_converter(space, w_order, NPY_KEEPORDER) - if order == NPY_FORTRANORDER: + order = order_converter(space, w_order, NPY.KEEPORDER) + if order == NPY.FORTRANORDER: raise OperationError(space.w_NotImplementedError, space.wrap( "unsupported value for order")) copy = self.implementation.copy(space) @@ -375,7 +377,7 @@ numpy.reshape : equivalent function """ args_w, kw_w = __args__.unpack() - order = NPY_CORDER + order = NPY.CORDER if kw_w: if "order" in kw_w: order = order_converter(space, kw_w["order"], order) @@ -383,10 +385,10 @@ if kw_w: raise OperationError(space.w_TypeError, space.wrap( "reshape() got unexpected keyword argument(s)")) - if order == NPY_KEEPORDER: + if order == NPY.KEEPORDER: raise OperationError(space.w_ValueError, space.wrap( "order 'K' is not permitted for reshaping")) - if order != NPY_CORDER and order != NPY_ANYORDER: + if order != NPY.CORDER and order != NPY.ANYORDER: raise OperationError(space.w_NotImplementedError, space.wrap( "unsupported value for order")) if len(args_w) == 1: @@ -561,7 +563,7 @@ # by converting nonnative byte order. if self.is_scalar(): return space.wrap(0) - dtype = self.get_dtype().descr_newbyteorder(space, NPY_NATIVE) + dtype = self.get_dtype().descr_newbyteorder(space, NPY.NATIVE) contig = self.implementation.astype(space, dtype) return contig.argsort(space, w_axis) @@ -569,9 +571,14 @@ cur_dtype = self.get_dtype() new_dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype)) - if new_dtype.shape: + if new_dtype.num == NPY.VOID: raise oefmt(space.w_NotImplementedError, - "%s.astype(%s) not implemented yet", cur_dtype.name, new_dtype.name) + "astype(%s) not implemented yet", + new_dtype.get_name()) + if new_dtype.num == NPY.STRING and new_dtype.size == 0: + if cur_dtype.num == NPY.STRING: + new_dtype = interp_dtype.variable_dtype(space, + 'S' + str(cur_dtype.size)) impl = self.implementation if isinstance(impl, scalar.Scalar): return W_NDimArray.new_scalar(space, new_dtype, impl.value) @@ -662,7 +669,7 @@ "getfield not implemented yet")) @unwrap_spec(new_order=str) - def descr_newbyteorder(self, space, new_order=NPY_SWAP): + def descr_newbyteorder(self, space, new_order=NPY.SWAP): return self.descr_view(space, self.get_dtype().descr_newbyteorder(space, new_order)) @@ -1023,7 +1030,7 @@ except AttributeError: raise oefmt(space.w_NotImplementedError, '%s not implemented for %s', - op_name, self.get_dtype().name) + op_name, self.get_dtype().get_name()) return space.wrap(res) return func_with_new_name(impl, "reduce_arg%s_impl" % op_name) @@ -1138,7 +1145,7 @@ "__setstate__ called with len(args[1])==%d, not 5 or 4" % lens)) shape = space.getitem(w_state, space.wrap(base_index)) dtype = space.getitem(w_state, space.wrap(base_index+1)) - isfortran = space.getitem(w_state, space.wrap(base_index+2)) + #isfortran = space.getitem(w_state, space.wrap(base_index+2)) storage = space.getitem(w_state, space.wrap(base_index+3)) if not isinstance(dtype, interp_dtype.W_Dtype): raise OperationError(space.w_ValueError, space.wrap( @@ -1192,8 +1199,8 @@ w_base=w_buffer, writable=buf.is_writable()) - order = order_converter(space, w_order, NPY_CORDER) - if order == NPY_CORDER: + order = order_converter(space, w_order, NPY.CORDER) + if order == NPY.CORDER: order = 'C' else: order = 'F' @@ -1230,7 +1237,8 @@ app_take = applevel(r""" def take(a, indices, axis, out, mode): - assert mode == 'raise' + if mode != 'raise': + raise NotImplementedError("mode != raise not implemented") if axis is None: from numpy import array indices = array(indices) @@ -1440,7 +1448,7 @@ # scalars and strings w/o __array__ method isstr = space.isinstance_w(w_object, space.w_str) if not issequence_w(space, w_object) or isstr: - if dtype is None or dtype.is_str_or_unicode(): + if dtype is None or (dtype.is_str_or_unicode() and dtype.get_size() < 1): dtype = interp_ufuncs.find_dtype_for_scalar(space, w_object) return W_NDimArray.new_scalar(space, dtype, w_object) @@ -1493,6 +1501,8 @@ def zeros(space, w_shape, w_dtype=None, w_order=None): dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype)) + if dtype.is_str_or_unicode() and dtype.get_size() < 1: + dtype = interp_dtype.variable_dtype(space, dtype.char + '1') shape = _find_shape(space, w_shape, dtype) return W_NDimArray.from_shape(space, shape, dtype=dtype) @@ -1504,6 +1514,8 @@ else: dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype)) + if dtype.is_str_or_unicode() and dtype.get_size() < 1: + dtype = interp_dtype.variable_dtype(space, dtype.char + '1') return W_NDimArray.from_shape(space, w_a.get_shape(), dtype=dtype, w_instance=w_a if subok else None) diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -9,7 +9,7 @@ from pypy.module.micronumpy.interp_support import unwrap_axis_arg from pypy.module.micronumpy.strides import shape_agreement from pypy.module.micronumpy.base import convert_to_array, W_NDimArray -from pypy.module.micronumpy.constants import * +from pypy.module.micronumpy import constants as NPY def done_if_true(dtype, val): return dtype.itemtype.bool(val) @@ -312,7 +312,7 @@ else: res_dtype = calc_dtype if self.complex_to_float and calc_dtype.is_complex_type(): - if calc_dtype.name == 'complex64': + if calc_dtype.num == NPY.CFLOAT: res_dtype = interp_dtype.get_dtype_cache(space).w_float32dtype else: res_dtype = interp_dtype.get_dtype_cache(space).w_float64dtype @@ -386,8 +386,9 @@ return space.w_NotImplemented else: raise oefmt(space.w_TypeError, - 'unsupported operand dtypes %s and %s for "%s"', - w_rdtype.name, w_ldtype.name, self.name) + 'unsupported operand dtypes %s and %s for "%s"', + w_rdtype.get_name(), w_ldtype.get_name(), + self.name) if self.are_common_types(w_ldtype, w_rdtype): if not w_lhs.is_scalar() and w_rhs.is_scalar(): @@ -462,24 +463,24 @@ if dt1.num > dt2.num: dt1, dt2 = dt2, dt1 # Some operations promote op(bool, bool) to return int8, rather than bool - if promote_bools and (dt1.kind == dt2.kind == NPY_GENBOOLLTR): + if promote_bools and (dt1.kind == dt2.kind == NPY.GENBOOLLTR): return interp_dtype.get_dtype_cache(space).w_int8dtype # Everything numeric promotes to complex if dt2.is_complex_type() or dt1.is_complex_type(): - if dt2.num == NPY_HALF: + if dt2.num == NPY.HALF: dt1, dt2 = dt2, dt1 - if dt2.num == NPY_CFLOAT: - if dt1.num == NPY_DOUBLE: + if dt2.num == NPY.CFLOAT: + if dt1.num == NPY.DOUBLE: return interp_dtype.get_dtype_cache(space).w_complex128dtype - elif dt1.num == NPY_LONGDOUBLE: + elif dt1.num == NPY.LONGDOUBLE: return interp_dtype.get_dtype_cache(space).w_complexlongdtype return interp_dtype.get_dtype_cache(space).w_complex64dtype - elif dt2.num == NPY_CDOUBLE: - if dt1.num == NPY_LONGDOUBLE: + elif dt2.num == NPY.CDOUBLE: + if dt1.num == NPY.LONGDOUBLE: return interp_dtype.get_dtype_cache(space).w_complexlongdtype return interp_dtype.get_dtype_cache(space).w_complex128dtype - elif dt2.num == NPY_CLONGDOUBLE: + elif dt2.num == NPY.CLONGDOUBLE: return interp_dtype.get_dtype_cache(space).w_complexlongdtype else: raise OperationError(space.w_TypeError, space.wrap("Unsupported types")) @@ -488,30 +489,30 @@ return find_unaryop_result_dtype(space, dt2, promote_to_float=True) # If they're the same kind, choose the greater one. if dt1.kind == dt2.kind and not dt2.is_flexible_type(): - if dt2.num == NPY_HALF: + if dt2.num == NPY.HALF: return dt1 return dt2 # Everything promotes to float, and bool promotes to everything. - if dt2.kind == NPY_FLOATINGLTR or dt1.kind == NPY_GENBOOLLTR: - if dt2.num == NPY_HALF and dt1.itemtype.get_element_size() == 2: + if dt2.kind == NPY.FLOATINGLTR or dt1.kind == NPY.GENBOOLLTR: + if dt2.num == NPY.HALF and dt1.itemtype.get_element_size() == 2: return interp_dtype.get_dtype_cache(space).w_float32dtype - if dt2.num == NPY_HALF and dt1.itemtype.get_element_size() >= 4: + if dt2.num == NPY.HALF and dt1.itemtype.get_element_size() >= 4: return interp_dtype.get_dtype_cache(space).w_float64dtype - if dt2.num == NPY_FLOAT and dt1.itemtype.get_element_size() >= 4: + if dt2.num == NPY.FLOAT and dt1.itemtype.get_element_size() >= 4: return interp_dtype.get_dtype_cache(space).w_float64dtype return dt2 # for now this means mixing signed and unsigned - if dt2.kind == NPY_SIGNEDLTR: + if dt2.kind == NPY.SIGNEDLTR: # if dt2 has a greater number of bytes, then just go with it if dt1.itemtype.get_element_size() < dt2.itemtype.get_element_size(): return dt2 # we need to promote both dtypes dtypenum = dt2.num + 2 - elif dt2.num == NPY_ULONGLONG or (LONG_BIT == 64 and dt2.num == NPY_ULONG): + elif dt2.num == NPY.ULONGLONG or (LONG_BIT == 64 and dt2.num == NPY.ULONG): # UInt64 + signed = Float64 - dtypenum = NPY_DOUBLE + dtypenum = NPY.DOUBLE elif dt2.is_flexible_type(): # For those operations that get here (concatenate, stack), # flexible types take precedence over numeric type @@ -528,7 +529,7 @@ newdtype = interp_dtype.get_dtype_cache(space).dtypes_by_num[dtypenum] if (newdtype.itemtype.get_element_size() > dt2.itemtype.get_element_size() or - newdtype.kind == NPY_FLOATINGLTR): + newdtype.kind == NPY.FLOATINGLTR): return newdtype else: # we only promoted to long on 32-bit or to longlong on 64-bit @@ -540,24 +541,24 @@ def find_unaryop_result_dtype(space, dt, promote_to_float=False, promote_bools=False, promote_to_largest=False): if promote_to_largest: - if dt.kind == NPY_GENBOOLLTR or dt.kind == NPY_SIGNEDLTR: + if dt.kind == NPY.GENBOOLLTR or dt.kind == NPY.SIGNEDLTR: if dt.get_size() * 8 < LONG_BIT: return interp_dtype.get_dtype_cache(space).w_longdtype - elif dt.kind == NPY_UNSIGNEDLTR: + elif dt.kind == NPY.UNSIGNEDLTR: if dt.get_size() * 8 < LONG_BIT: return interp_dtype.get_dtype_cache(space).w_ulongdtype else: - assert dt.kind == NPY_FLOATINGLTR or dt.kind == NPY_COMPLEXLTR + assert dt.kind == NPY.FLOATINGLTR or dt.kind == NPY.COMPLEXLTR return dt - if promote_bools and (dt.kind == NPY_GENBOOLLTR): + if promote_bools and (dt.kind == NPY.GENBOOLLTR): return interp_dtype.get_dtype_cache(space).w_int8dtype if promote_to_float: - if dt.kind == NPY_FLOATINGLTR or dt.kind == NPY_COMPLEXLTR: + if dt.kind == NPY.FLOATINGLTR or dt.kind == NPY.COMPLEXLTR: return dt - if dt.num >= NPY_INT: + if dt.num >= NPY.INT: return interp_dtype.get_dtype_cache(space).w_float64dtype for bytes, dtype in interp_dtype.get_dtype_cache(space).float_dtypes_by_num_bytes: - if (dtype.kind == NPY_FLOATINGLTR and + if (dtype.kind == NPY.FLOATINGLTR and dtype.itemtype.get_element_size() > dt.itemtype.get_element_size()): return dtype return dt @@ -594,7 +595,7 @@ if current_guess is None: return interp_dtype.variable_dtype(space, 'S%d' % space.len_w(w_obj)) - elif current_guess.num == NPY_STRING: + elif current_guess.num == NPY.STRING: if current_guess.get_size() < space.len_w(w_obj): return interp_dtype.variable_dtype(space, 'S%d' % space.len_w(w_obj)) @@ -612,7 +613,7 @@ except AttributeError: raise oefmt(space.w_NotImplementedError, "%s not implemented for %s", - ufunc_name, dtype.name) + ufunc_name, dtype.get_name()) if argcount == 1: def impl(res_dtype, value): res = get_op(res_dtype)(value) diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -11,7 +11,7 @@ from pypy.module.micronumpy.base import W_NDimArray from pypy.module.micronumpy.iter import PureShapeIterator from pypy.module.micronumpy.support import index_w -from pypy.module.micronumpy.constants import * +from pypy.module.micronumpy import constants as NPY call2_driver = jit.JitDriver(name='numpy_call2', greens = ['shapelen', 'func', 'calc_dtype', @@ -90,19 +90,11 @@ obj_iter.next() return out -setslice_driver1 = jit.JitDriver(name='numpy_setslice1', - greens = ['shapelen', 'dtype'], - reds = 'auto') -setslice_driver2 = jit.JitDriver(name='numpy_setslice2', +setslice_driver = jit.JitDriver(name='numpy_setslice', greens = ['shapelen', 'dtype'], reds = 'auto') def setslice(space, shape, target, source): - if target.dtype.is_str_or_unicode(): - return setslice_build_and_convert(space, shape, target, source) - return setslice_to(space, shape, target, source) - -def setslice_to(space, shape, target, source): # note that unlike everything else, target and source here are # array implementations, not arrays target_iter = target.create_iter(shape) @@ -110,22 +102,11 @@ dtype = target.dtype shapelen = len(shape) while not target_iter.done(): - setslice_driver1.jit_merge_point(shapelen=shapelen, dtype=dtype) - target_iter.setitem(source_iter.getitem().convert_to(space, dtype)) - target_iter.next() - source_iter.next() - return target - -def setslice_build_and_convert(space, shape, target, source): - # note that unlike everything else, target and source here are - # array implementations, not arrays - target_iter = target.create_iter(shape) - source_iter = source.create_iter(shape) - dtype = target.dtype - shapelen = len(shape) - while not target_iter.done(): - setslice_driver2.jit_merge_point(shapelen=shapelen, dtype=dtype) - target_iter.setitem(dtype.build_and_convert(space, source_iter.getitem())) + setslice_driver.jit_merge_point(shapelen=shapelen, dtype=dtype) + if dtype.is_str_or_unicode(): + target_iter.setitem(dtype.coerce(space, source_iter.getitem())) + else: + target_iter.setitem(source_iter.getitem().convert_to(space, dtype)) target_iter.next() source_iter.next() return target @@ -434,43 +415,21 @@ ri.next() return res -flatiter_setitem_driver1 = jit.JitDriver(name = 'numpy_flatiter_setitem1', - greens = ['dtype'], - reds = 'auto') - -flatiter_setitem_driver2 = jit.JitDriver(name = 'numpy_flatiter_setitem2', +flatiter_setitem_driver = jit.JitDriver(name = 'numpy_flatiter_setitem', greens = ['dtype'], reds = 'auto') def flatiter_setitem(space, arr, val, start, step, length): dtype = arr.get_dtype() - if dtype.is_str_or_unicode(): - return flatiter_setitem_build_and_convert(space, arr, val, start, step, length) - return flatiter_setitem_to(space, arr, val, start, step, length) - -def flatiter_setitem_to(space, arr, val, start, step, length): - dtype = arr.get_dtype() arr_iter = arr.create_iter() val_iter = val.create_iter() arr_iter.next_skip_x(start) while length > 0: - flatiter_setitem_driver1.jit_merge_point(dtype=dtype) - arr_iter.setitem(val_iter.getitem().convert_to(space, dtype)) - # need to repeat i_nput values until all assignments are done - arr_iter.next_skip_x(step) - length -= 1 - val_iter.next() - # WTF numpy? - val_iter.reset() - -def flatiter_setitem_build_and_convert(space, arr, val, start, step, length): - dtype = arr.get_dtype() - arr_iter = arr.create_iter() - val_iter = val.create_iter() - arr_iter.next_skip_x(start) - while length > 0: - flatiter_setitem_driver2.jit_merge_point(dtype=dtype) - arr_iter.setitem(dtype.build_and_convert(space, val_iter.getitem())) + flatiter_setitem_driver.jit_merge_point(dtype=dtype) + if dtype.is_str_or_unicode(): + arr_iter.setitem(dtype.coerce(space, val_iter.getitem())) + else: + arr_iter.setitem(val_iter.getitem().convert_to(space, dtype)) # need to repeat i_nput values until all assignments are done arr_iter.next_skip_x(step) length -= 1 @@ -597,13 +556,13 @@ mode=mode) index = index_w(space, arr_iter.getitem()) if index < 0 or index >= len(iterators): - if mode == NPY_RAISE: + if mode == NPY.RAISE: raise OperationError(space.w_ValueError, space.wrap( "invalid entry in choice array")) - elif mode == NPY_WRAP: + elif mode == NPY.WRAP: index = index % (len(iterators)) else: - assert mode == NPY_CLIP + assert mode == NPY.CLIP if index < 0: index = 0 else: diff --git a/pypy/module/micronumpy/test/test_appbridge.py b/pypy/module/micronumpy/test/test_appbridge.py --- a/pypy/module/micronumpy/test/test_appbridge.py +++ b/pypy/module/micronumpy/test/test_appbridge.py @@ -1,5 +1,6 @@ from pypy.module.micronumpy.test.test_base import BaseNumpyAppTest + class AppTestAppBridge(BaseNumpyAppTest): def test_array_methods(self): import numpy as np diff --git a/pypy/module/micronumpy/test/test_arrayops.py b/pypy/module/micronumpy/test/test_arrayops.py --- a/pypy/module/micronumpy/test/test_arrayops.py +++ b/pypy/module/micronumpy/test/test_arrayops.py @@ -1,5 +1,5 @@ +from pypy.module.micronumpy.test.test_base import BaseNumpyAppTest -from pypy.module.micronumpy.test.test_base import BaseNumpyAppTest class AppTestNumSupport(BaseNumpyAppTest): def test_where(self): diff --git a/pypy/module/micronumpy/test/test_base.py b/pypy/module/micronumpy/test/test_base.py --- a/pypy/module/micronumpy/test/test_base.py +++ b/pypy/module/micronumpy/test/test_base.py @@ -1,5 +1,6 @@ -from pypy.module.micronumpy.interp_dtype import NPY_NATBYTE, NPY_OPPBYTE from pypy.conftest import option +from pypy.module.micronumpy import constants as NPY + class BaseNumpyAppTest(object): spaceconfig = dict(usemodules=['micronumpy']) @@ -23,5 +24,5 @@ import sys sys.modules['numpypy'] = numpy """) - cls.w_non_native_prefix = cls.space.wrap(NPY_OPPBYTE) - cls.w_native_prefix = cls.space.wrap(NPY_NATBYTE) + cls.w_non_native_prefix = cls.space.wrap(NPY.OPPBYTE) + cls.w_native_prefix = cls.space.wrap(NPY.NATBYTE) diff --git a/pypy/module/micronumpy/test/test_compile.py b/pypy/module/micronumpy/test/test_compile.py --- a/pypy/module/micronumpy/test/test_compile.py +++ b/pypy/module/micronumpy/test/test_compile.py @@ -1,4 +1,3 @@ - import py from pypy.module.micronumpy.compile import (numpy_compile, Assignment, ArrayConstant, FloatConstant, Operator, Variable, RangeConstant, Execute, diff --git a/pypy/module/micronumpy/test/test_complex.py b/pypy/module/micronumpy/test/test_complex.py --- a/pypy/module/micronumpy/test/test_complex.py +++ b/pypy/module/micronumpy/test/test_complex.py @@ -61,6 +61,7 @@ '%r and %r are not sufficiently close, %g > %g' %\ (a, b, absolute_error, max(abs_err, rel_err*abs(a)))) + def parse_testfile(fname): """Parse a file with test values @@ -85,6 +86,7 @@ flags ) + class AppTestUfuncs(BaseNumpyAppTest): def setup_class(cls): import os diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -1,4 +1,3 @@ -import py, sys from pypy.conftest import option from pypy.module.micronumpy.test.test_base import BaseNumpyAppTest from pypy.interpreter.gateway import interp2app @@ -41,6 +40,7 @@ def test_dtype_basic(self): from numpypy import dtype + import sys d = dtype('?') assert d.num == 0 @@ -48,7 +48,13 @@ assert dtype(d) is d assert dtype('bool') is d assert dtype('|b1') is d + b = '>' if sys.byteorder == 'little' else '<' + assert dtype(b + 'i4') is not dtype(b + 'i4') assert repr(type(d)) == "" + exc = raises(ValueError, "d.names = []") + assert exc.value[0] == "there are no fields defined" + exc = raises(ValueError, "d.names = None") + assert exc.value[0] == "there are no fields defined" assert dtype('int8').num == 1 assert dtype('int8').name == 'int8' @@ -59,13 +65,10 @@ assert dtype(None) is dtype(float) - e = dtype('int8') - exc = raises(KeyError, "e[2]") - assert exc.value.message == "There are no fields in dtype int8." - exc = raises(KeyError, "e['z']") - assert exc.value.message == "There are no fields in dtype int8." - exc = raises(KeyError, "e[None]") - assert exc.value.message == "There are no fields in dtype int8." + for d in [dtype('i4')]: + for key in ["d[2]", "d['z']", "d[None]"]: + exc = raises(KeyError, key) + assert exc.value[0] == "There are no fields in dtype %s." % str(d) exc = raises(TypeError, dtype, (1, 2)) assert exc.value[0] == 'data type not understood' @@ -154,13 +157,48 @@ a = array(range(5), long) assert a.dtype is dtype(long) + def test_isbuiltin(self): + import numpy as np + import sys + assert np.dtype('?').isbuiltin == 1 + assert np.dtype(int).newbyteorder().isbuiltin == 0 + assert np.dtype(np.dtype(int)).isbuiltin == 1 From noreply at buildbot.pypy.org Tue Feb 25 03:59:24 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 25 Feb 2014 03:59:24 +0100 (CET) Subject: [pypy-commit] pypy default: support dtype reduce with subarrays Message-ID: <20140225025924.9DF4D1D23C7@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69381:f63a8bfab74b Date: 2014-02-24 09:30 -0500 http://bitbucket.org/pypy/pypy/changeset/f63a8bfab74b/ Log: support dtype reduce with subarrays diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -177,7 +177,8 @@ def descr_get_subdtype(self, space): if self.subdtype is None: return space.w_None - return space.newtuple([space.wrap(self.subdtype), self.descr_get_shape(space)]) + return space.newtuple([space.wrap(self.subdtype), + self.descr_get_shape(space)]) def get_name(self): return self.w_box_type.name @@ -251,11 +252,11 @@ def descr_get_fields(self, space): if not self.fields: return space.w_None - w_d = space.newdict() + w_fields = space.newdict() for name, (offset, subdtype) in self.fields.iteritems(): - space.setitem(w_d, space.wrap(name), + space.setitem(w_fields, space.wrap(name), space.newtuple([subdtype, space.wrap(offset)])) - return w_d + return w_fields def descr_get_names(self, space): if not self.fields: @@ -326,35 +327,27 @@ def descr_reduce(self, space): w_class = space.type(self) - kind = self.kind - elemsize = self.get_size() - builder_args = space.newtuple([space.wrap("%s%d" % (kind, elemsize)), space.wrap(0), space.wrap(1)]) + size = self.get_size() + builder_args = space.newtuple([space.wrap("%s%d" % (self.kind, size)), + space.wrap(0), space.wrap(1)]) version = space.wrap(3) + endian = self.byteorder + if endian == NPY.NATIVE: + endian = NPY.NATBYTE + subdescr = self.descr_get_subdtype(space) names = self.descr_get_names(space) values = self.descr_get_fields(space) - if self.fields: - endian = NPY.IGNORE - #TODO: Implement this when subarrays are implemented - subdescr = space.w_None - size = 0 - for key in self.fields: - dtype = self.fields[key][1] - assert isinstance(dtype, W_Dtype) - size += dtype.get_size() + if self.is_flexible_type(): w_size = space.wrap(size) - #TODO: Change this when alignment is implemented - alignment = space.wrap(1) + alignment = space.wrap(self.itemtype.alignment) else: - endian = self.byteorder - if endian == NPY.NATIVE: - endian = NPY.NATBYTE - subdescr = space.w_None w_size = space.wrap(-1) alignment = space.wrap(-1) flags = space.wrap(0) - data = space.newtuple([version, space.wrap(endian), subdescr, names, values, w_size, alignment, flags]) + data = space.newtuple([version, space.wrap(endian), subdescr, + names, values, w_size, alignment, flags]) return space.newtuple([w_class, builder_args, data]) def descr_setstate(self, space, w_data): diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -371,6 +371,7 @@ raises(TypeError, hash, d) def test_pickle(self): + import numpy as np from numpypy import array, dtype from cPickle import loads, dumps a = array([1,2,3]) @@ -379,6 +380,9 @@ else: assert a.dtype.__reduce__() == (dtype, ('i4', 0, 1), (3, '<', None, None, None, -1, -1, 0)) assert loads(dumps(a.dtype)) == a.dtype + assert np.dtype('bool').__reduce__() == (dtype, ('b1', 0, 1), (3, '|', None, None, None, -1, -1, 0)) + assert np.dtype('|V16').__reduce__() == (dtype, ('V16', 0, 1), (3, '|', None, None, None, 16, 1, 0)) + assert np.dtype((' Author: Brian Kearns Branch: Changeset: r69382:a49ae1b5d3b4 Date: 2014-02-24 12:03 -0500 http://bitbucket.org/pypy/pypy/changeset/a49ae1b5d3b4/ Log: store size directly on dtype diff --git a/pypy/module/cpyext/ndarrayobject.py b/pypy/module/cpyext/ndarrayobject.py --- a/pypy/module/cpyext/ndarrayobject.py +++ b/pypy/module/cpyext/ndarrayobject.py @@ -94,12 +94,12 @@ @cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL) def _PyArray_ITEMSIZE(space, w_array): assert isinstance(w_array, W_NDimArray) - return w_array.get_dtype().get_size() + return w_array.get_dtype().elsize @cpython_api([PyObject], Py_ssize_t, error=CANNOT_FAIL) def _PyArray_NBYTES(space, w_array): assert isinstance(w_array, W_NDimArray) - return w_array.get_size() * w_array.get_dtype().get_size() + return w_array.get_size() * w_array.get_dtype().elsize @cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL) def _PyArray_TYPE(space, w_array): diff --git a/pypy/module/micronumpy/arrayimpl/concrete.py b/pypy/module/micronumpy/arrayimpl/concrete.py --- a/pypy/module/micronumpy/arrayimpl/concrete.py +++ b/pypy/module/micronumpy/arrayimpl/concrete.py @@ -55,7 +55,7 @@ loop.setslice(space, shape, self, impl) def get_size(self): - return self.size // self.dtype.get_size() + return self.size // self.dtype.elsize def get_storage_size(self): return self.size @@ -105,7 +105,7 @@ backstrides = self.get_backstrides() if self.dtype.is_complex_type(): dtype = self.dtype.get_float_dtype(space) - return SliceArray(self.start + dtype.get_size(), strides, + return SliceArray(self.start + dtype.elsize, strides, backstrides, self.get_shape(), self, orig_array, dtype=dtype) impl = NonWritableArray(self.get_shape(), self.dtype, self.order, strides, backstrides) @@ -324,7 +324,7 @@ make_sure_not_resized(strides) make_sure_not_resized(backstrides) self.shape = shape - self.size = support.product(shape) * dtype.get_size() + self.size = support.product(shape) * dtype.elsize self.order = order self.dtype = dtype self.strides = strides @@ -352,7 +352,7 @@ self.get_shape()) def fill(self, space, box): - self.dtype.itemtype.fill(self.storage, self.dtype.get_size(), + self.dtype.itemtype.fill(self.storage, self.dtype.elsize, box, 0, self.size, 0) def set_shape(self, space, orig_array, new_shape): @@ -425,7 +425,7 @@ self.storage = parent.storage self.order = parent.order self.dtype = dtype - self.size = support.product(shape) * self.dtype.get_size() + self.size = support.product(shape) * self.dtype.elsize self.start = start self.orig_arr = orig_arr @@ -460,12 +460,12 @@ strides = [] backstrides = [] dtype = self.dtype - s = self.get_strides()[0] // dtype.get_size() + s = self.get_strides()[0] // dtype.elsize if self.order == 'C': new_shape.reverse() for sh in new_shape: - strides.append(s * dtype.get_size()) - backstrides.append(s * (sh - 1) * dtype.get_size()) + strides.append(s * dtype.elsize) + backstrides.append(s * (sh - 1) * dtype.elsize) s *= max(1, sh) if self.order == 'C': strides.reverse() diff --git a/pypy/module/micronumpy/arrayimpl/sort.py b/pypy/module/micronumpy/arrayimpl/sort.py --- a/pypy/module/micronumpy/arrayimpl/sort.py +++ b/pypy/module/micronumpy/arrayimpl/sort.py @@ -71,10 +71,10 @@ def __init__(self, index_stride_size, stride_size, size): start = 0 dtype = interp_dtype.get_dtype_cache(space).w_longdtype - indexes = dtype.itemtype.malloc(size*dtype.get_size()) + indexes = dtype.itemtype.malloc(size * dtype.elsize) values = alloc_raw_storage(size * stride_size, track_allocation=False) - Repr.__init__(self, dtype.get_size(), stride_size, + Repr.__init__(self, dtype.elsize, stride_size, size, values, indexes, start, start) def __del__(self): diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -303,10 +303,10 @@ else: dtype = space.interp_w(W_Dtype, space.call_function(space.gettypefor(W_Dtype), w_dtype)) - if dtype.get_size() == 0: + if dtype.elsize == 0: raise OperationError(space.w_TypeError, space.wrap( "data-type must not be 0-sized")) - if dtype.get_size() != self.get_dtype(space).get_size(): + if dtype.elsize != self.get_dtype(space).elsize: raise OperationError(space.w_ValueError, space.wrap( "new type not compatible with array.")) if dtype.is_str_or_unicode(): @@ -327,7 +327,7 @@ return space.wrap(1) def descr_get_itemsize(self, space): - return self.get_dtype(space).descr_get_itemsize(space) + return space.wrap(self.get_dtype(space).elsize) def descr_get_shape(self, space): return space.newtuple([]) diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -6,7 +6,7 @@ interp_attrproperty, interp_attrproperty_w) from pypy.module.micronumpy import types, interp_boxes, base from rpython.rlib.objectmodel import specialize -from rpython.rlib.rarithmetic import LONG_BIT, r_longlong, r_ulonglong +from rpython.rlib.rarithmetic import r_longlong, r_ulonglong from rpython.rlib import jit from pypy.module.micronumpy.appbridge import get_appbridge_cache from pypy.module.micronumpy.conversion_utils import byteorder_converter @@ -39,14 +39,14 @@ class W_Dtype(W_Root): _immutable_fields_ = [ "num", "kind", "char", "w_box_type", "float_type", - "itemtype?", "byteorder?", "names?", "fields?", "size?", + "itemtype?", "byteorder?", "names?", "fields?", "elsize?", "shape?", "subdtype?", "base?", "alternate_constructors", "aliases", ] def __init__(self, itemtype, num, kind, char, w_box_type, float_type=None, byteorder=None, names=[], fields={}, - size=1, shape=[], subdtype=None, + elsize=None, shape=[], subdtype=None, alternate_constructors=[], aliases=[]): self.itemtype = itemtype self.num = num @@ -62,7 +62,9 @@ self.byteorder = byteorder self.names = names self.fields = fields - self.size = size + if elsize is None: + elsize = itemtype.get_element_size() + self.elsize = elsize self.shape = shape self.subdtype = subdtype if not subdtype: @@ -122,9 +124,6 @@ def is_native(self): return self.byteorder in (NPY.NATIVE, NPY.NATBYTE) - def get_size(self): - return self.size * self.itemtype.get_element_size() - def get_float_dtype(self, space): assert self.kind == NPY.COMPLEXLTR assert self.float_type is not None @@ -158,14 +157,14 @@ byteorder = NPY.NATBYTE if self.is_native() else NPY.OPPBYTE else: byteorder = '' - r = space.wrap(byteorder + self.char + str(self.size)) + size = self.elsize + if self.num == NPY.UNICODE: + size >>= 2 + r = space.wrap(byteorder + self.char + str(size)) else: r = self.descr_get_name(space) return space.wrap("dtype(%s)" % space.str_w(space.repr(r))) - def descr_get_itemsize(self, space): - return space.wrap(self.get_size()) - def descr_get_alignment(self, space): return space.wrap(self.itemtype.alignment) @@ -187,22 +186,18 @@ name = self.get_name() if name[-1] == '_': name = name[:-1] - if self.is_flexible_type(): - return space.wrap(name + str(self.get_size() * 8)) + if self.is_flexible_type() and self.elsize != 0: + return space.wrap(name + str(self.elsize * 8)) return space.wrap(name) def descr_get_str(self, space): - size = self.get_size() basic = self.kind - if basic == NPY.UNICODELTR: + endian = self.byteorder + size = self.elsize + if endian == NPY.NATIVE: + endian = NPY.NATBYTE + if self.num == NPY.UNICODE: size >>= 2 - endian = NPY.NATBYTE - elif size // (self.size or 1) <= 1: - endian = NPY.IGNORE - else: - endian = self.byteorder - if endian == NPY.NATIVE: - endian = NPY.NATBYTE return space.wrap("%s%s%s" % (endian, basic, size)) def descr_get_descr(self, space): @@ -240,7 +235,8 @@ if space.is_w(self, w_other): return True if isinstance(w_other, W_Dtype): - return space.eq_w(self.descr_reduce(space), w_other.descr_reduce(space)) + return space.eq_w(self.descr_reduce(space), + w_other.descr_reduce(space)) return False def descr_eq(self, space, w_other): @@ -326,10 +322,9 @@ def descr_reduce(self, space): w_class = space.type(self) - - size = self.get_size() - builder_args = space.newtuple([space.wrap("%s%d" % (self.kind, size)), - space.wrap(0), space.wrap(1)]) + builder_args = space.newtuple([ + space.wrap("%s%d" % (self.kind, self.elsize)), + space.wrap(0), space.wrap(1)]) version = space.wrap(3) endian = self.byteorder @@ -339,7 +334,7 @@ names = self.descr_get_names(space) values = self.descr_get_fields(space) if self.is_flexible_type(): - w_size = space.wrap(size) + w_size = space.wrap(self.elsize) alignment = space.wrap(self.itemtype.alignment) else: w_size = space.wrap(-1) @@ -407,7 +402,7 @@ self.itemtype = types.RecordType() if self.is_flexible_type(): - self.size = size + self.elsize = size @unwrap_spec(new_order=str) def descr_newbyteorder(self, space, new_order=NPY.SWAP): @@ -421,7 +416,7 @@ itemtype = self.itemtype.__class__(endian in (NPY.NATIVE, NPY.NATBYTE)) return W_Dtype(itemtype, self.num, self.kind, self.char, self.w_box_type, self.float_type, byteorder=endian, - size=self.size) + elsize=self.elsize) @specialize.arg(2) @@ -451,11 +446,11 @@ raise oefmt(space.w_ValueError, "two fields with the same name") assert isinstance(subdtype, W_Dtype) fields[fldname] = (offset, subdtype) - offset += subdtype.get_size() + offset += subdtype.elsize names.append(fldname) return W_Dtype(types.RecordType(), NPY.VOID, NPY.VOIDLTR, NPY.VOIDLTR, space.gettypefor(interp_boxes.W_VoidBox), - names=names, fields=fields, size=offset) + names=names, fields=fields, elsize=offset) def dtype_from_dict(space, w_dict): @@ -494,10 +489,10 @@ size *= dim if size == 1: return subdtype - size *= subdtype.get_size() + size *= subdtype.elsize return W_Dtype(types.VoidType(), NPY.VOID, NPY.VOIDLTR, NPY.VOIDLTR, space.gettypefor(interp_boxes.W_VoidBox), - shape=shape, subdtype=subdtype, size=size) + shape=shape, subdtype=subdtype, elsize=size) if space.is_none(w_dtype): return cache.w_float64dtype @@ -526,7 +521,7 @@ w_dtype1 = space.getitem(w_dtype, space.wrap(1)) subdtype = descr__new__(space, w_subtype, w_dtype0, w_align, w_copy) assert isinstance(subdtype, W_Dtype) - if subdtype.get_size() == 0: + if subdtype.elsize == 0: name = "%s%d" % (subdtype.kind, space.int_w(w_dtype1)) return descr__new__(space, w_subtype, space.wrap(name), w_align, w_copy) return descr__new__(space, w_subtype, w_dtype0, w_align, w_copy, w_shape=w_dtype1) @@ -562,22 +557,22 @@ char = interp_attrproperty("char", cls=W_Dtype), num = interp_attrproperty("num", cls=W_Dtype), byteorder = interp_attrproperty("byteorder", cls=W_Dtype), - itemsize = GetSetProperty(W_Dtype.descr_get_itemsize), + itemsize = interp_attrproperty("elsize", cls=W_Dtype), alignment = GetSetProperty(W_Dtype.descr_get_alignment), - isbuiltin = GetSetProperty(W_Dtype.descr_get_isbuiltin), subdtype = GetSetProperty(W_Dtype.descr_get_subdtype), + descr = GetSetProperty(W_Dtype.descr_get_descr), str = GetSetProperty(W_Dtype.descr_get_str), name = GetSetProperty(W_Dtype.descr_get_name), base = GetSetProperty(W_Dtype.descr_get_base), shape = GetSetProperty(W_Dtype.descr_get_shape), + isbuiltin = GetSetProperty(W_Dtype.descr_get_isbuiltin), isnative = GetSetProperty(W_Dtype.descr_get_isnative), fields = GetSetProperty(W_Dtype.descr_get_fields), names = GetSetProperty(W_Dtype.descr_get_names, W_Dtype.descr_set_names, W_Dtype.descr_del_names), hasobject = GetSetProperty(W_Dtype.descr_get_hasobject), - descr = GetSetProperty(W_Dtype.descr_get_descr), ) W_Dtype.typedef.acceptable_as_base_class = False @@ -609,7 +604,7 @@ def new_string_dtype(space, size): return W_Dtype( types.StringType(), - size=size, + elsize=size, num=NPY.STRING, kind=NPY.STRINGLTR, char=NPY.STRINGLTR, @@ -618,9 +613,10 @@ def new_unicode_dtype(space, size): + itemtype = types.UnicodeType() return W_Dtype( - types.UnicodeType(), - size=size, + itemtype, + elsize=size * itemtype.get_element_size(), num=NPY.UNICODE, kind=NPY.UNICODELTR, char=NPY.UNICODELTR, @@ -631,7 +627,7 @@ def new_void_dtype(space, size): return W_Dtype( types.VoidType(), - size=size, + elsize=size, num=NPY.VOID, kind=NPY.VOIDLTR, char=NPY.VOIDLTR, @@ -794,7 +790,7 @@ ) self.w_stringdtype = W_Dtype( types.StringType(), - size=0, + elsize=0, num=NPY.STRING, kind=NPY.STRINGLTR, char=NPY.STRINGLTR, @@ -805,7 +801,7 @@ ) self.w_unicodedtype = W_Dtype( types.UnicodeType(), - size=0, + elsize=0, num=NPY.UNICODE, kind=NPY.UNICODELTR, char=NPY.UNICODELTR, @@ -815,7 +811,7 @@ ) self.w_voiddtype = W_Dtype( types.VoidType(), - size=0, + elsize=0, num=NPY.VOID, kind=NPY.VOIDLTR, char=NPY.VOIDLTR, @@ -862,7 +858,7 @@ self.w_intpdtype, self.w_uintpdtype, ] self.float_dtypes_by_num_bytes = sorted( - (dtype.get_size(), dtype) + (dtype.elsize, dtype) for dtype in float_dtypes ) self.dtypes_by_num = {} @@ -873,7 +869,7 @@ dtype.fields = None # mark these as builtin self.dtypes_by_num[dtype.num] = dtype self.dtypes_by_name[dtype.get_name()] = dtype - for can_name in [dtype.kind + str(dtype.get_size()), + for can_name in [dtype.kind + str(dtype.elsize), dtype.char]: self.dtypes_by_name[can_name] = dtype self.dtypes_by_name[NPY.NATBYTE + can_name] = dtype @@ -928,7 +924,7 @@ for k, v in typeinfo_partial.iteritems(): space.setitem(w_typeinfo, space.wrap(k), space.gettypefor(v)) for k, dtype in typeinfo_full.iteritems(): - itembits = dtype.get_size() * 8 + itembits = dtype.elsize * 8 items_w = [space.wrap(dtype.char), space.wrap(dtype.num), space.wrap(itembits), diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -87,7 +87,7 @@ def descr_set_dtype(self, space, w_dtype): dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype)) - if (dtype.get_size() != self.get_dtype().get_size() or + if (dtype.elsize != self.get_dtype().elsize or dtype.is_flexible_type() or self.get_dtype().is_flexible_type()): raise OperationError(space.w_ValueError, space.wrap( "new type not compatible with array.")) @@ -101,10 +101,10 @@ return space.wrap(len(self.get_shape())) def descr_get_itemsize(self, space): - return space.wrap(self.get_dtype().get_size()) + return space.wrap(self.get_dtype().elsize) def descr_get_nbytes(self, space): - return space.wrap(self.get_size() * self.get_dtype().get_size()) + return space.wrap(self.get_size() * self.get_dtype().elsize) def descr_fill(self, space, w_value): self.fill(space, self.get_dtype().coerce(space, w_value)) @@ -575,10 +575,10 @@ raise oefmt(space.w_NotImplementedError, "astype(%s) not implemented yet", new_dtype.get_name()) - if new_dtype.num == NPY.STRING and new_dtype.size == 0: + if new_dtype.num == NPY.STRING and new_dtype.elsize == 0: if cur_dtype.num == NPY.STRING: new_dtype = interp_dtype.variable_dtype(space, - 'S' + str(cur_dtype.size)) + 'S' + str(cur_dtype.elsize)) impl = self.implementation if isinstance(impl, scalar.Scalar): return W_NDimArray.new_scalar(space, new_dtype, impl.value) @@ -781,8 +781,8 @@ w_dtype)) else: dtype = self.get_dtype() - old_itemsize = self.get_dtype().get_size() - new_itemsize = dtype.get_size() + old_itemsize = self.get_dtype().elsize + new_itemsize = dtype.elsize impl = self.implementation if new_itemsize == 0: raise OperationError(space.w_TypeError, space.wrap( @@ -1188,7 +1188,7 @@ if not shape: raise OperationError(space.w_TypeError, space.wrap( "numpy scalars from buffers not supported yet")) - totalsize = support.product(shape) * dtype.get_size() + totalsize = support.product(shape) * dtype.elsize if totalsize+offset > buf.getlength(): raise OperationError(space.w_TypeError, space.wrap( "buffer is too small for requested array")) @@ -1448,7 +1448,7 @@ # scalars and strings w/o __array__ method isstr = space.isinstance_w(w_object, space.w_str) if not issequence_w(space, w_object) or isstr: - if dtype is None or (dtype.is_str_or_unicode() and dtype.get_size() < 1): + if dtype is None or (dtype.is_str_or_unicode() and dtype.elsize < 1): dtype = interp_ufuncs.find_dtype_for_scalar(space, w_object) return W_NDimArray.new_scalar(space, dtype, w_object) @@ -1478,14 +1478,14 @@ # not an array or incorrect dtype shape, elems_w = find_shape_and_elems(space, w_object, dtype) - if dtype is None or (dtype.is_str_or_unicode() and dtype.get_size() < 1): + if dtype is None or (dtype.is_str_or_unicode() and dtype.elsize < 1): for w_elem in elems_w: if isinstance(w_elem, W_NDimArray) and w_elem.is_scalar(): w_elem = w_elem.get_scalar_value() dtype = interp_ufuncs.find_dtype_for_scalar(space, w_elem, dtype) if dtype is None: dtype = interp_dtype.get_dtype_cache(space).w_float64dtype - elif dtype.is_str_or_unicode() and dtype.get_size() < 1: + elif dtype.is_str_or_unicode() and dtype.elsize < 1: # promote S0 -> S1, U0 -> U1 dtype = interp_dtype.variable_dtype(space, dtype.char + '1') @@ -1501,7 +1501,7 @@ def zeros(space, w_shape, w_dtype=None, w_order=None): dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype)) - if dtype.is_str_or_unicode() and dtype.get_size() < 1: + if dtype.is_str_or_unicode() and dtype.elsize < 1: dtype = interp_dtype.variable_dtype(space, dtype.char + '1') shape = _find_shape(space, w_shape, dtype) return W_NDimArray.from_shape(space, shape, dtype=dtype) @@ -1514,7 +1514,7 @@ else: dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype)) - if dtype.is_str_or_unicode() and dtype.get_size() < 1: + if dtype.is_str_or_unicode() and dtype.elsize < 1: dtype = interp_dtype.variable_dtype(space, dtype.char + '1') return W_NDimArray.from_shape(space, w_a.get_shape(), dtype=dtype, w_instance=w_a if subok else None) diff --git a/pypy/module/micronumpy/interp_support.py b/pypy/module/micronumpy/interp_support.py --- a/pypy/module/micronumpy/interp_support.py +++ b/pypy/module/micronumpy/interp_support.py @@ -59,7 +59,7 @@ return space.wrap(a) def _fromstring_bin(space, s, count, length, dtype): - itemsize = dtype.get_size() + itemsize = dtype.elsize assert itemsize >= 0 if count == -1: count = length / itemsize diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -519,7 +519,7 @@ if dt2.is_record_type(): return dt2 if dt1.is_str_or_unicode(): - if dt2.get_size() >= dt1.get_size(): + if dt2.elsize >= dt1.elsize: return dt2 return dt1 return dt2 @@ -542,10 +542,10 @@ promote_bools=False, promote_to_largest=False): if promote_to_largest: if dt.kind == NPY.GENBOOLLTR or dt.kind == NPY.SIGNEDLTR: - if dt.get_size() * 8 < LONG_BIT: + if dt.elsize * 8 < LONG_BIT: return interp_dtype.get_dtype_cache(space).w_longdtype elif dt.kind == NPY.UNSIGNEDLTR: - if dt.get_size() * 8 < LONG_BIT: + if dt.elsize * 8 < LONG_BIT: return interp_dtype.get_dtype_cache(space).w_ulongdtype else: assert dt.kind == NPY.FLOATINGLTR or dt.kind == NPY.COMPLEXLTR @@ -596,7 +596,7 @@ return interp_dtype.variable_dtype(space, 'S%d' % space.len_w(w_obj)) elif current_guess.num == NPY.STRING: - if current_guess.get_size() < space.len_w(w_obj): + if current_guess.elsize < space.len_w(w_obj): return interp_dtype.variable_dtype(space, 'S%d' % space.len_w(w_obj)) return current_guess diff --git a/pypy/module/micronumpy/iter.py b/pypy/module/micronumpy/iter.py --- a/pypy/module/micronumpy/iter.py +++ b/pypy/module/micronumpy/iter.py @@ -174,7 +174,7 @@ def __init__(self, array): self.array = array self.offset = 0 - self.skip = array.dtype.get_size() + self.skip = array.dtype.elsize self.size = array.size def setitem(self, elem): diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -459,7 +459,7 @@ builder = StringBuilder() iter = arr.create_iter() w_res_str = W_NDimArray.from_shape(space, [1], arr.get_dtype(), order='C') - itemsize = arr.get_dtype().get_size() + itemsize = arr.get_dtype().elsize res_str_casted = rffi.cast(rffi.CArrayPtr(lltype.Char), w_res_str.implementation.get_storage_as_int(space)) while not iter.done(): diff --git a/pypy/module/micronumpy/support.py b/pypy/module/micronumpy/support.py --- a/pypy/module/micronumpy/support.py +++ b/pypy/module/micronumpy/support.py @@ -28,8 +28,8 @@ shape_rev.reverse() for sh in shape_rev: slimit = max(sh, 1) - strides.append(s * dtype.get_size()) - backstrides.append(s * (slimit - 1) * dtype.get_size()) + strides.append(s * dtype.elsize) + backstrides.append(s * (slimit - 1) * dtype.elsize) s *= slimit if order == 'C': strides.reverse() diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -58,6 +58,7 @@ assert dtype('int8').num == 1 assert dtype('int8').name == 'int8' + assert dtype('void').name == 'void' assert dtype(int).fields is None assert dtype(int).names is None assert dtype(int).hasobject is False diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -14,15 +14,9 @@ def malloc(size): return None - @staticmethod - def get_element_size(): - return 1 - def __init__(self): self.base = self - - def get_size(self): - return 1 + self.elsize = 1 def create_slice(space, a, chunks): diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -1628,7 +1628,7 @@ builder = StringBuilder() assert isinstance(item, interp_boxes.W_FlexibleBox) i = item.ofs - end = i + item.dtype.get_size() + end = i + item.dtype.elsize while i < end: assert isinstance(item.arr.storage[i], str) if item.arr.storage[i] == '\x00': @@ -1664,17 +1664,17 @@ if w_item is None: w_item = space.wrap('') arg = space.str_w(space.str(w_item)) - arr = VoidBoxStorage(dtype.size, dtype) - j = min(len(arg), dtype.size) + arr = VoidBoxStorage(dtype.elsize, dtype) + j = min(len(arg), dtype.elsize) for i in range(j): arr.storage[i] = arg[i] - for j in range(j, dtype.size): + for j in range(j, dtype.elsize): arr.storage[j] = '\x00' return interp_boxes.W_StringBox(arr, 0, arr.dtype) def store(self, arr, i, offset, box): assert isinstance(box, interp_boxes.W_StringBox) - size = min(arr.dtype.size - offset, box.arr.size - box.ofs) + size = min(arr.dtype.elsize - offset, box.arr.size - box.ofs) return self._store(arr.storage, i, offset, box, size) @jit.unroll_safe @@ -1784,7 +1784,7 @@ ofs += size def coerce(self, space, dtype, w_items): - arr = VoidBoxStorage(dtype.get_size(), dtype) + arr = VoidBoxStorage(dtype.elsize, dtype) self._coerce(space, arr, 0, dtype, w_items, dtype.shape) return interp_boxes.W_VoidBox(arr, 0, dtype) @@ -1793,7 +1793,7 @@ assert i == 0 assert isinstance(box, interp_boxes.W_VoidBox) assert box.dtype is box.arr.dtype - for k in range(box.arr.dtype.get_size()): + for k in range(box.arr.dtype.elsize): arr.storage[k + ofs] = box.arr.storage[k + box.ofs] def readarray(self, arr, i, offset, dtype=None): @@ -1867,7 +1867,7 @@ items_w = [w_item] else: items_w = [None] * len(dtype.fields) - arr = VoidBoxStorage(dtype.get_size(), dtype) + arr = VoidBoxStorage(dtype.elsize, dtype) for i in range(len(dtype.fields)): ofs, subdtype = dtype.fields[dtype.names[i]] itemtype = subdtype.itemtype @@ -1884,7 +1884,7 @@ def store(self, arr, i, ofs, box): assert isinstance(box, interp_boxes.W_VoidBox) - self._store(arr.storage, i, ofs, box, box.dtype.get_size()) + self._store(arr.storage, i, ofs, box, box.dtype.elsize) @jit.unroll_safe def _store(self, storage, i, ofs, box, size): @@ -1893,7 +1893,7 @@ def fill(self, storage, width, box, start, stop, offset): assert isinstance(box, interp_boxes.W_VoidBox) - assert width == box.dtype.get_size() + assert width == box.dtype.elsize for i in xrange(start, stop, width): self._store(storage, i, offset, box, width) @@ -1932,8 +1932,8 @@ def eq(self, v1, v2): assert isinstance(v1, interp_boxes.W_VoidBox) assert isinstance(v2, interp_boxes.W_VoidBox) - s1 = v1.dtype.get_size() - s2 = v2.dtype.get_size() + s1 = v1.dtype.elsize + s2 = v2.dtype.elsize assert s1 == s2 for i in range(s1): if v1.arr.storage[v1.ofs + i] != v2.arr.storage[v2.ofs + i]: From noreply at buildbot.pypy.org Tue Feb 25 03:59:27 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 25 Feb 2014 03:59:27 +0100 (CET) Subject: [pypy-commit] pypy default: simplify dtype methods/attributes Message-ID: <20140225025927.612071D23C7@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69383:1e1e64b26713 Date: 2014-02-24 13:22 -0500 http://bitbucket.org/pypy/pypy/changeset/1e1e64b26713/ Log: simplify dtype methods/attributes diff --git a/pypy/module/micronumpy/arrayimpl/concrete.py b/pypy/module/micronumpy/arrayimpl/concrete.py --- a/pypy/module/micronumpy/arrayimpl/concrete.py +++ b/pypy/module/micronumpy/arrayimpl/concrete.py @@ -89,7 +89,7 @@ def get_real(self, space, orig_array): strides = self.get_strides() backstrides = self.get_backstrides() - if self.dtype.is_complex_type(): + if self.dtype.is_complex(): dtype = self.dtype.get_float_dtype(space) return SliceArray(self.start, strides, backstrides, self.get_shape(), self, orig_array, dtype=dtype) @@ -103,13 +103,13 @@ def get_imag(self, space, orig_array): strides = self.get_strides() backstrides = self.get_backstrides() - if self.dtype.is_complex_type(): + if self.dtype.is_complex(): dtype = self.dtype.get_float_dtype(space) return SliceArray(self.start + dtype.elsize, strides, backstrides, self.get_shape(), self, orig_array, dtype=dtype) impl = NonWritableArray(self.get_shape(), self.dtype, self.order, strides, backstrides) - if not self.dtype.is_flexible_type(): + if not self.dtype.is_flexible(): impl.fill(space, self.dtype.box(0)) return impl @@ -204,7 +204,7 @@ if space.isinstance_w(w_idx, space.w_str): idx = space.str_w(w_idx) dtype = self.dtype - if not dtype.is_record_type() or idx not in dtype.fields: + if not dtype.is_record() or idx not in dtype.fields: raise OperationError(space.w_ValueError, space.wrap( "field named %s not found" % idx)) return RecordChunk(idx) diff --git a/pypy/module/micronumpy/arrayimpl/scalar.py b/pypy/module/micronumpy/arrayimpl/scalar.py --- a/pypy/module/micronumpy/arrayimpl/scalar.py +++ b/pypy/module/micronumpy/arrayimpl/scalar.py @@ -70,7 +70,7 @@ scalar = Scalar(dtype) if dtype.is_str_or_unicode(): scalar.value = dtype.coerce(space, space.wrap(self.value.raw_str())) - elif dtype.is_record_type(): + elif dtype.is_record(): raise OperationError(space.w_NotImplementedError, space.wrap( "viewing scalar as record not implemented")) else: @@ -78,7 +78,7 @@ return scalar def get_real(self, space, orig_array): - if self.dtype.is_complex_type(): + if self.dtype.is_complex(): scalar = Scalar(self.dtype.get_float_dtype(space)) scalar.value = self.value.convert_real_to(scalar.dtype) return scalar @@ -91,7 +91,7 @@ "could not broadcast input array from shape " + "(%s) into shape ()" % ( ','.join([str(x) for x in w_arr.get_shape()],)))) - if self.dtype.is_complex_type(): + if self.dtype.is_complex(): dtype = self.dtype.get_float_dtype(space) self.value = self.dtype.itemtype.composite( w_arr.get_scalar_value().convert_to(space, dtype), @@ -100,7 +100,7 @@ self.value = w_arr.get_scalar_value() def get_imag(self, space, orig_array): - if self.dtype.is_complex_type(): + if self.dtype.is_complex(): scalar = Scalar(self.dtype.get_float_dtype(space)) scalar.value = self.value.convert_imag_to(scalar.dtype) return scalar @@ -110,7 +110,7 @@ def set_imag(self, space, orig_array, w_val): #Only called on complex dtype - assert self.dtype.is_complex_type() + assert self.dtype.is_complex() w_arr = convert_to_array(space, w_val) if len(w_arr.get_shape()) > 0: raise OperationError(space.w_ValueError, space.wrap( @@ -127,7 +127,7 @@ if space.len_w(w_idx) == 0: return self.get_scalar_value() elif space.isinstance_w(w_idx, space.w_str): - if self.dtype.is_record_type(): + if self.dtype.is_record(): w_val = self.value.descr_getitem(space, w_idx) return convert_to_array(space, w_val) elif space.is_none(w_idx): @@ -148,7 +148,7 @@ if space.len_w(w_idx) == 0: return self.set_scalar_value(self.dtype.coerce(space, w_val)) elif space.isinstance_w(w_idx, space.w_str): - if self.dtype.is_record_type(): + if self.dtype.is_record(): return self.value.descr_setitem(space, w_idx, w_val) raise OperationError(space.w_IndexError, space.wrap("0-d arrays can't be indexed")) diff --git a/pypy/module/micronumpy/interp_arrayops.py b/pypy/module/micronumpy/interp_arrayops.py --- a/pypy/module/micronumpy/interp_arrayops.py +++ b/pypy/module/micronumpy/interp_arrayops.py @@ -137,14 +137,14 @@ "all the input array dimensions except for the " "concatenation axis must match exactly")) a_dt = arr.get_dtype() - if dtype.is_record_type() and a_dt.is_record_type(): + if dtype.is_record() and a_dt.is_record(): # Record types must match for f in dtype.fields: if f not in a_dt.fields or \ dtype.fields[f] != a_dt.fields[f]: raise OperationError(space.w_TypeError, space.wrap("invalid type promotion")) - elif dtype.is_record_type() or a_dt.is_record_type(): + elif dtype.is_record() or a_dt.is_record(): raise OperationError(space.w_TypeError, space.wrap("invalid type promotion")) dtype = interp_ufuncs.find_binop_result_dtype(space, dtype, diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -311,7 +311,7 @@ "new type not compatible with array.")) if dtype.is_str_or_unicode(): return dtype.coerce(space, space.wrap(self.raw_str())) - elif dtype.is_record_type(): + elif dtype.is_record(): raise OperationError(space.w_NotImplementedError, space.wrap( "viewing scalar as record not implemented")) else: diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -39,7 +39,7 @@ class W_Dtype(W_Root): _immutable_fields_ = [ "num", "kind", "char", "w_box_type", "float_type", - "itemtype?", "byteorder?", "names?", "fields?", "elsize?", + "itemtype?", "byteorder?", "names?", "fields?", "elsize?", "alignment?", "shape?", "subdtype?", "base?", "alternate_constructors", "aliases", ] @@ -65,6 +65,7 @@ if elsize is None: elsize = itemtype.get_element_size() self.elsize = elsize + self.alignment = itemtype.alignment self.shape = shape self.subdtype = subdtype if not subdtype: @@ -87,40 +88,40 @@ def box_complex(self, real, imag): return self.itemtype.box_complex(real, imag) - def build_and_convert(self, space, box): - return self.itemtype.build_and_convert(space, self, box) - def coerce(self, space, w_item): return self.itemtype.coerce(space, self, w_item) - def is_int_type(self): - return (self.kind == NPY.SIGNEDLTR or self.kind == NPY.UNSIGNEDLTR or - self.kind == NPY.GENBOOLLTR) + def is_bool(self): + return self.kind == NPY.GENBOOLLTR def is_signed(self): return self.kind == NPY.SIGNEDLTR - def is_complex_type(self): + def is_unsigned(self): + return self.kind == NPY.UNSIGNEDLTR + + def is_int(self): + return (self.kind == NPY.SIGNEDLTR or self.kind == NPY.UNSIGNEDLTR or + self.kind == NPY.GENBOOLLTR) + + def is_float(self): + return self.kind == NPY.FLOATINGLTR + + def is_complex(self): return self.kind == NPY.COMPLEXLTR - def is_float_type(self): - return self.kind == NPY.FLOATINGLTR or self.kind == NPY.COMPLEXLTR - - def is_bool_type(self): - return self.kind == NPY.GENBOOLLTR - - def is_record_type(self): - return bool(self.fields) - - def is_str_type(self): + def is_str(self): return self.num == NPY.STRING def is_str_or_unicode(self): return self.num == NPY.STRING or self.num == NPY.UNICODE - def is_flexible_type(self): + def is_flexible(self): return self.is_str_or_unicode() or self.num == NPY.VOID + def is_record(self): + return bool(self.fields) + def is_native(self): return self.byteorder in (NPY.NATIVE, NPY.NATBYTE) @@ -132,53 +133,6 @@ dtype = dtype.descr_newbyteorder(space) return dtype - def descr_str(self, space): - if self.fields: - return space.str(self.descr_get_descr(space)) - elif self.subdtype is not None: - return space.str(space.newtuple([ - self.subdtype.descr_get_str(space), - self.descr_get_shape(space)])) - else: - if self.is_flexible_type(): - return self.descr_get_str(space) - else: - return self.descr_get_name(space) - - def descr_repr(self, space): - if self.fields: - r = self.descr_get_descr(space) - elif self.subdtype is not None: - r = space.newtuple([self.subdtype.descr_get_str(space), - self.descr_get_shape(space)]) - else: - if self.is_flexible_type(): - if self.byteorder != NPY.IGNORE: - byteorder = NPY.NATBYTE if self.is_native() else NPY.OPPBYTE - else: - byteorder = '' - size = self.elsize - if self.num == NPY.UNICODE: - size >>= 2 - r = space.wrap(byteorder + self.char + str(size)) - else: - r = self.descr_get_name(space) - return space.wrap("dtype(%s)" % space.str_w(space.repr(r))) - - def descr_get_alignment(self, space): - return space.wrap(self.itemtype.alignment) - - def descr_get_isbuiltin(self, space): - if self.fields is None: - return space.wrap(1) - return space.wrap(0) - - def descr_get_subdtype(self, space): - if self.subdtype is None: - return space.w_None - return space.newtuple([space.wrap(self.subdtype), - self.descr_get_shape(space)]) - def get_name(self): return self.w_box_type.name @@ -186,7 +140,7 @@ name = self.get_name() if name[-1] == '_': name = name[:-1] - if self.is_flexible_type() and self.elsize != 0: + if self.is_flexible() and self.elsize != 0: return space.wrap(name + str(self.elsize * 8)) return space.wrap(name) @@ -201,7 +155,7 @@ return space.wrap("%s%s%s" % (endian, basic, size)) def descr_get_descr(self, space): - if not self.is_record_type(): + if not self.is_record(): return space.newlist([space.newtuple([space.wrap(""), self.descr_get_str(space)])]) else: @@ -209,7 +163,7 @@ for name in self.names: subdtype = self.fields[name][1] subdescr = [space.wrap(name)] - if subdtype.is_record_type(): + if subdtype.is_record(): subdescr.append(subdtype.descr_get_descr(space)) elif subdtype.subdtype is not None: subdescr.append(subdtype.subdtype.descr_get_str(space)) @@ -220,30 +174,28 @@ descr.append(space.newtuple(subdescr[:])) return space.newlist(descr) - def descr_get_base(self, space): - return space.wrap(self.base) + def descr_get_hasobject(self, space): + return space.w_False + + def descr_get_isbuiltin(self, space): + if self.fields is None: + return space.wrap(1) + return space.wrap(0) def descr_get_isnative(self, space): return space.wrap(self.is_native()) + def descr_get_base(self, space): + return space.wrap(self.base) + + def descr_get_subdtype(self, space): + if self.subdtype is None: + return space.w_None + return space.newtuple([space.wrap(self.subdtype), + self.descr_get_shape(space)]) + def descr_get_shape(self, space): - w_shape = [space.wrap(dim) for dim in self.shape] - return space.newtuple(w_shape) - - def eq(self, space, w_other): - w_other = space.call_function(space.gettypefor(W_Dtype), w_other) - if space.is_w(self, w_other): - return True - if isinstance(w_other, W_Dtype): - return space.eq_w(self.descr_reduce(space), - w_other.descr_reduce(space)) - return False - - def descr_eq(self, space, w_other): - return space.wrap(self.eq(space, w_other)) - - def descr_ne(self, space, w_other): - return space.wrap(not self.eq(space, w_other)) + return space.newtuple([space.wrap(dim) for dim in self.shape]) def descr_get_fields(self, space): if not self.fields: @@ -287,8 +239,56 @@ raise OperationError(space.w_AttributeError, space.wrap( "Cannot delete dtype names attribute")) - def descr_get_hasobject(self, space): - return space.w_False + def eq(self, space, w_other): + w_other = space.call_function(space.gettypefor(W_Dtype), w_other) + if space.is_w(self, w_other): + return True + if isinstance(w_other, W_Dtype): + return space.eq_w(self.descr_reduce(space), + w_other.descr_reduce(space)) + return False + + def descr_eq(self, space, w_other): + return space.wrap(self.eq(space, w_other)) + + def descr_ne(self, space, w_other): + return space.wrap(not self.eq(space, w_other)) + + def descr_hash(self, space): + return space.hash(self.descr_reduce(space)) + + def descr_str(self, space): + if self.fields: + return space.str(self.descr_get_descr(space)) + elif self.subdtype is not None: + return space.str(space.newtuple([ + self.subdtype.descr_get_str(space), + self.descr_get_shape(space)])) + else: + if self.is_flexible(): + return self.descr_get_str(space) + else: + return self.descr_get_name(space) + + def descr_repr(self, space): + if self.fields: + r = self.descr_get_descr(space) + elif self.subdtype is not None: + r = space.newtuple([self.subdtype.descr_get_str(space), + self.descr_get_shape(space)]) + else: + if self.is_flexible(): + if self.byteorder != NPY.IGNORE: + byteorder = NPY.NATBYTE if self.is_native() else NPY.OPPBYTE + else: + byteorder = '' + size = self.elsize + if self.num == NPY.UNICODE: + size >>= 2 + r = space.wrap(byteorder + self.char + str(size)) + else: + r = self.descr_get_name(space) + return space.wrap("dtype(%s)" % space.str_w(space.repr(r))) def descr_getitem(self, space, w_item): if not self.fields: @@ -317,9 +317,6 @@ return space.wrap(0) return space.wrap(len(self.fields)) - def descr_hash(self, space): - return space.hash(self.descr_reduce(space)) - def descr_reduce(self, space): w_class = space.type(self) builder_args = space.newtuple([ @@ -333,9 +330,9 @@ subdescr = self.descr_get_subdtype(space) names = self.descr_get_names(space) values = self.descr_get_fields(space) - if self.is_flexible_type(): + if self.is_flexible(): w_size = space.wrap(self.elsize) - alignment = space.wrap(self.itemtype.alignment) + alignment = space.wrap(self.alignment) else: w_size = space.wrap(-1) alignment = space.wrap(-1) @@ -363,6 +360,7 @@ w_names = space.getitem(w_data, space.wrap(3)) w_fields = space.getitem(w_data, space.wrap(4)) size = space.int_w(space.getitem(w_data, space.wrap(5))) + alignment = space.int_w(space.getitem(w_data, space.wrap(6))) if (w_names == space.w_None) != (w_fields == space.w_None): raise oefmt(space.w_ValueError, "inconsistent fields and names") @@ -401,8 +399,9 @@ self.fields[name] = offset, dtype self.itemtype = types.RecordType() - if self.is_flexible_type(): + if self.is_flexible(): self.elsize = size + self.alignment = alignment @unwrap_spec(new_order=str) def descr_newbyteorder(self, space, new_order=NPY.SWAP): @@ -540,39 +539,38 @@ __module__ = "numpy", __new__ = interp2app(descr__new__), - __str__= interp2app(W_Dtype.descr_str), - __repr__ = interp2app(W_Dtype.descr_repr), - __eq__ = interp2app(W_Dtype.descr_eq), - __ne__ = interp2app(W_Dtype.descr_ne), - __getitem__ = interp2app(W_Dtype.descr_getitem), - __len__ = interp2app(W_Dtype.descr_len), - - __hash__ = interp2app(W_Dtype.descr_hash), - __reduce__ = interp2app(W_Dtype.descr_reduce), - __setstate__ = interp2app(W_Dtype.descr_setstate), - newbyteorder = interp2app(W_Dtype.descr_newbyteorder), - type = interp_attrproperty_w("w_box_type", cls=W_Dtype), kind = interp_attrproperty("kind", cls=W_Dtype), char = interp_attrproperty("char", cls=W_Dtype), num = interp_attrproperty("num", cls=W_Dtype), byteorder = interp_attrproperty("byteorder", cls=W_Dtype), itemsize = interp_attrproperty("elsize", cls=W_Dtype), - alignment = GetSetProperty(W_Dtype.descr_get_alignment), + alignment = interp_attrproperty("alignment", cls=W_Dtype), - subdtype = GetSetProperty(W_Dtype.descr_get_subdtype), + name = GetSetProperty(W_Dtype.descr_get_name), + str = GetSetProperty(W_Dtype.descr_get_str), descr = GetSetProperty(W_Dtype.descr_get_descr), - str = GetSetProperty(W_Dtype.descr_get_str), - name = GetSetProperty(W_Dtype.descr_get_name), - base = GetSetProperty(W_Dtype.descr_get_base), - shape = GetSetProperty(W_Dtype.descr_get_shape), + hasobject = GetSetProperty(W_Dtype.descr_get_hasobject), isbuiltin = GetSetProperty(W_Dtype.descr_get_isbuiltin), isnative = GetSetProperty(W_Dtype.descr_get_isnative), + base = GetSetProperty(W_Dtype.descr_get_base), + subdtype = GetSetProperty(W_Dtype.descr_get_subdtype), + shape = GetSetProperty(W_Dtype.descr_get_shape), fields = GetSetProperty(W_Dtype.descr_get_fields), names = GetSetProperty(W_Dtype.descr_get_names, W_Dtype.descr_set_names, W_Dtype.descr_del_names), - hasobject = GetSetProperty(W_Dtype.descr_get_hasobject), + + __eq__ = interp2app(W_Dtype.descr_eq), + __ne__ = interp2app(W_Dtype.descr_ne), + __hash__ = interp2app(W_Dtype.descr_hash), + __str__= interp2app(W_Dtype.descr_str), + __repr__ = interp2app(W_Dtype.descr_repr), + __getitem__ = interp2app(W_Dtype.descr_getitem), + __len__ = interp2app(W_Dtype.descr_len), + __reduce__ = interp2app(W_Dtype.descr_reduce), + __setstate__ = interp2app(W_Dtype.descr_setstate), + newbyteorder = interp2app(W_Dtype.descr_newbyteorder), ) W_Dtype.typedef.acceptable_as_base_class = False @@ -929,8 +927,8 @@ space.wrap(dtype.num), space.wrap(itembits), space.wrap(dtype.itemtype.get_element_size())] - if dtype.is_int_type(): - if dtype.kind == NPY.GENBOOLLTR: + if dtype.is_int(): + if dtype.is_bool(): w_maxobj = space.wrap(1) w_minobj = space.wrap(0) elif dtype.is_signed(): diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -88,7 +88,7 @@ dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype)) if (dtype.elsize != self.get_dtype().elsize or - dtype.is_flexible_type() or self.get_dtype().is_flexible_type()): + dtype.is_flexible() or self.get_dtype().is_flexible()): raise OperationError(space.w_ValueError, space.wrap( "new type not compatible with array.")) self.implementation.set_dtype(space, dtype) @@ -220,7 +220,7 @@ def descr_getitem(self, space, w_idx): if space.is_w(w_idx, space.w_Ellipsis): return self - elif isinstance(w_idx, W_NDimArray) and w_idx.get_dtype().is_bool_type() \ + elif isinstance(w_idx, W_NDimArray) and w_idx.get_dtype().is_bool() \ and len(w_idx.get_shape()) > 0: return self.getitem_filter(space, w_idx) try: @@ -235,7 +235,7 @@ self.implementation.setitem_index(space, index_list, w_value) def descr_setitem(self, space, w_idx, w_value): - if isinstance(w_idx, W_NDimArray) and w_idx.get_dtype().is_bool_type() \ + if isinstance(w_idx, W_NDimArray) and w_idx.get_dtype().is_bool() \ and len(w_idx.get_shape()) > 0: self.setitem_filter(space, w_idx, convert_to_array(space, w_value)) return @@ -281,7 +281,7 @@ else: s.append(separator) s.append(' ') - if self.is_scalar() and dtype.is_str_type(): + if self.is_scalar() and dtype.is_str(): s.append(dtype.itemtype.to_str(i.getitem())) else: s.append(dtype.itemtype.str_format(i.getitem())) @@ -344,7 +344,7 @@ def descr_set_imag(self, space, w_value): # if possible, copy (broadcast) values into self - if not self.get_dtype().is_complex_type(): + if not self.get_dtype().is_complex(): raise OperationError(space.w_TypeError, space.wrap('array does not have imaginary part to set')) self.implementation.set_imag(space, self, w_value) @@ -689,7 +689,7 @@ @unwrap_spec(decimals=int) def descr_round(self, space, decimals=0, w_out=None): if space.is_none(w_out): - if self.get_dtype().is_bool_type(): + if self.get_dtype().is_bool(): #numpy promotes bool.round() to float16. Go figure. w_out = W_NDimArray.from_shape(space, self.get_shape(), interp_dtype.get_dtype_cache(space).w_float16dtype) @@ -700,7 +700,7 @@ "return arrays must be of ArrayType")) out = interp_dtype.dtype_agreement(space, [self], self.get_shape(), w_out) - if out.get_dtype().is_bool_type() and self.get_dtype().is_bool_type(): + if out.get_dtype().is_bool() and self.get_dtype().is_bool(): calc_dtype = interp_dtype.get_dtype_cache(space).w_longdtype else: calc_dtype = out.get_dtype() @@ -1093,7 +1093,7 @@ raise OperationError(space.w_TypeError, space.wrap( "only integer arrays with one element " "can be converted to an index")) - if not self.get_dtype().is_int_type() or self.get_dtype().is_bool_type(): + if not self.get_dtype().is_int() or self.get_dtype().is_bool(): raise OperationError(space.w_TypeError, space.wrap( "only integer arrays with one element " "can be converted to an index")) diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -168,7 +168,7 @@ "supported for binary functions")) assert isinstance(self, W_Ufunc2) obj = convert_to_array(space, w_obj) - if obj.get_dtype().is_flexible_type(): + if obj.get_dtype().is_flexible(): raise OperationError(space.w_TypeError, space.wrap('cannot perform reduce with flexible type')) obj_shape = obj.get_shape() @@ -287,12 +287,12 @@ out = None w_obj = convert_to_array(space, w_obj) dtype = w_obj.get_dtype() - if dtype.is_flexible_type(): + if dtype.is_flexible(): raise OperationError(space.w_TypeError, space.wrap('Not implemented for this type')) - if (self.int_only and not dtype.is_int_type() or - not self.allow_bool and dtype.is_bool_type() or - not self.allow_complex and dtype.is_complex_type()): + if (self.int_only and not dtype.is_int() or + not self.allow_bool and dtype.is_bool() or + not self.allow_complex and dtype.is_complex()): raise OperationError(space.w_TypeError, space.wrap( "ufunc %s not supported for the input type" % self.name)) calc_dtype = find_unaryop_result_dtype(space, @@ -311,7 +311,7 @@ res_dtype = interp_dtype.get_dtype_cache(space).w_booldtype else: res_dtype = calc_dtype - if self.complex_to_float and calc_dtype.is_complex_type(): + if self.complex_to_float and calc_dtype.is_complex(): if calc_dtype.num == NPY.CFLOAT: res_dtype = interp_dtype.get_dtype_cache(space).w_float32dtype else: @@ -351,11 +351,11 @@ self.done_func = None def are_common_types(self, dtype1, dtype2): - if dtype1.is_complex_type() and dtype2.is_complex_type(): - return True - elif not (dtype1.is_complex_type() or dtype2.is_complex_type()) and \ - (dtype1.is_int_type() and dtype2.is_int_type() or dtype1.is_float_type() and dtype2.is_float_type()) and \ - not (dtype1.is_bool_type() or dtype2.is_bool_type()): + if dtype1.is_bool() or dtype2.is_bool(): + return False + if (dtype1.is_int() and dtype2.is_int() or + dtype1.is_float() and dtype2.is_float() or + dtype1.is_complex() and dtype2.is_complex()): return True return False @@ -370,13 +370,13 @@ w_rhs = convert_to_array(space, w_rhs) w_ldtype = w_lhs.get_dtype() w_rdtype = w_rhs.get_dtype() - if w_ldtype.is_str_type() and w_rdtype.is_str_type() and \ + if w_ldtype.is_str() and w_rdtype.is_str() and \ self.comparison_func: pass - elif (w_ldtype.is_str_type() or w_rdtype.is_str_type()) and \ + elif (w_ldtype.is_str() or w_rdtype.is_str()) and \ self.comparison_func and w_out is None: return space.wrap(False) - elif w_ldtype.is_flexible_type() or w_rdtype.is_flexible_type(): + elif w_ldtype.is_flexible() or w_rdtype.is_flexible(): if self.comparison_func: if self.name == 'equal' or self.name == 'not_equal': res = w_ldtype.eq(space, w_rdtype) @@ -399,13 +399,13 @@ w_ldtype, w_rdtype, promote_to_float=self.promote_to_float, promote_bools=self.promote_bools) - if (self.int_only and (not w_ldtype.is_int_type() or - not w_rdtype.is_int_type() or - not calc_dtype.is_int_type()) or - not self.allow_bool and (w_ldtype.is_bool_type() or - w_rdtype.is_bool_type()) or - not self.allow_complex and (w_ldtype.is_complex_type() or - w_rdtype.is_complex_type())): + if (self.int_only and (not w_ldtype.is_int() or + not w_rdtype.is_int() or + not calc_dtype.is_int()) or + not self.allow_bool and (w_ldtype.is_bool() or + w_rdtype.is_bool()) or + not self.allow_complex and (w_ldtype.is_complex() or + w_rdtype.is_complex())): raise OperationError(space.w_TypeError, space.wrap( "ufunc '%s' not supported for the input types" % self.name)) if space.is_none(w_out): @@ -467,7 +467,7 @@ return interp_dtype.get_dtype_cache(space).w_int8dtype # Everything numeric promotes to complex - if dt2.is_complex_type() or dt1.is_complex_type(): + if dt2.is_complex() or dt1.is_complex(): if dt2.num == NPY.HALF: dt1, dt2 = dt2, dt1 if dt2.num == NPY.CFLOAT: @@ -488,7 +488,7 @@ if promote_to_float: return find_unaryop_result_dtype(space, dt2, promote_to_float=True) # If they're the same kind, choose the greater one. - if dt1.kind == dt2.kind and not dt2.is_flexible_type(): + if dt1.kind == dt2.kind and not dt2.is_flexible(): if dt2.num == NPY.HALF: return dt1 return dt2 @@ -513,10 +513,10 @@ elif dt2.num == NPY.ULONGLONG or (LONG_BIT == 64 and dt2.num == NPY.ULONG): # UInt64 + signed = Float64 dtypenum = NPY.DOUBLE - elif dt2.is_flexible_type(): + elif dt2.is_flexible(): # For those operations that get here (concatenate, stack), # flexible types take precedence over numeric type - if dt2.is_record_type(): + if dt2.is_record(): return dt2 if dt1.is_str_or_unicode(): if dt2.elsize >= dt1.elsize: diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py --- a/pypy/module/micronumpy/strides.py +++ b/pypy/module/micronumpy/strides.py @@ -69,7 +69,7 @@ return True def find_shape_and_elems(space, w_iterable, dtype): - is_rec_type = dtype is not None and dtype.is_record_type() + is_rec_type = dtype is not None and dtype.is_record() if is_rec_type and is_single_elem(space, w_iterable, is_rec_type): return [], [w_iterable] if isinstance(w_iterable, W_NDimArray) and w_iterable.is_scalar(): diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -1623,6 +1623,12 @@ def test_realimag_views(self): from numpypy import arange, array + a = array(1.5) + assert a.real == 1.5 + assert a.imag == 0.0 + a = array([1.5, 2.5]) + assert (a.real == [1.5, 2.5]).all() + assert (a.imag == [0.0, 0.0]).all() a = arange(15) b = a.real b[5]=50 diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -1961,7 +1961,7 @@ # compute alignment for tp in globals().values(): if isinstance(tp, type) and hasattr(tp, 'T'): - tp.alignment = clibffi.cast_type_to_ffitype(tp.T).c_alignment + tp.alignment = widen(clibffi.cast_type_to_ffitype(tp.T).c_alignment) if issubclass(tp, Float): all_float_types.append((tp, 'float')) if issubclass(tp, Integer): From noreply at buildbot.pypy.org Tue Feb 25 03:59:28 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 25 Feb 2014 03:59:28 +0100 (CET) Subject: [pypy-commit] pypy default: test/improve build_scalar function Message-ID: <20140225025928.B14C71D23C7@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69384:b8e50f96bf3c Date: 2014-02-24 18:44 -0500 http://bitbucket.org/pypy/pypy/changeset/b8e50f96bf3c/ Log: test/improve build_scalar function diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -1519,19 +1519,25 @@ return W_NDimArray.from_shape(space, w_a.get_shape(), dtype=dtype, w_instance=w_a if subok else None) -def _reconstruct(space, w_subtype, w_shape, w_dtype): - return descr_new_array(space, w_subtype, w_shape, w_dtype) - def build_scalar(space, w_dtype, w_state): from rpython.rtyper.lltypesystem import rffi, lltype - - assert isinstance(w_dtype, interp_dtype.W_Dtype) - + if not isinstance(w_dtype, interp_dtype.W_Dtype): + raise oefmt(space.w_TypeError, + "argument 1 must be numpy.dtype, not %T", w_dtype) + if w_dtype.elsize == 0: + raise oefmt(space.w_ValueError, "itemsize cannot be zero") + if not space.isinstance_w(w_state, space.w_str): + raise oefmt(space.w_TypeError, "initializing object must be a string") + if space.len_w(w_state) != w_dtype.elsize: + raise oefmt(space.w_ValueError, "initialization string is too small") state = rffi.str2charp(space.str_w(w_state)) box = w_dtype.itemtype.box_raw_data(state) lltype.free(state, flavor="raw") return box +def _reconstruct(space, w_subtype, w_shape, w_dtype): + return descr_new_array(space, w_subtype, w_shape, w_dtype) + W_FlatIterator.typedef = TypeDef("flatiter", __module__ = "numpy", diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -773,6 +773,23 @@ a[()] = 4 assert a == 4 + def test_build_scalar(self): + from numpy import dtype + try: + from numpy.core.multiarray import scalar + except ImportError: + from numpy import scalar + exc = raises(TypeError, scalar, int, 2) + assert exc.value[0] == 'argument 1 must be numpy.dtype, not type' + exc = raises(ValueError, scalar, dtype('void'), 'abc') + assert exc.value[0] == 'itemsize cannot be zero' + exc = raises(TypeError, scalar, dtype(float), 2.5) + assert exc.value[0] == 'initializing object must be a string' + exc = raises(ValueError, scalar, dtype(float), 'abc') + assert exc.value[0] == 'initialization string is too small' + a = scalar(dtype(' Author: Brian Kearns Branch: Changeset: r69385:96340d2a63eb Date: 2014-02-24 19:53 -0500 http://bitbucket.org/pypy/pypy/changeset/96340d2a63eb/ Log: fix real/imag on numpy scalars diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -352,6 +352,12 @@ w_meth = space.getattr(self.descr_ravel(space), space.wrap('reshape')) return space.call_args(w_meth, __args__) + def descr_get_real(self, space): + return self.get_dtype(space).itemtype.real(self) + + def descr_get_imag(self, space): + return self.get_dtype(space).itemtype.imag(self) + w_flags = None def descr_get_flags(self, space): if self.w_flags is None: @@ -423,25 +429,13 @@ return space.call_method(self.item(space), 'as_integer_ratio') class W_ComplexFloatingBox(W_InexactBox): - def descr_get_real(self, space): - dtype = self._COMPONENTS_BOX._get_dtype(space) - box = self.convert_real_to(dtype) - assert isinstance(box, self._COMPONENTS_BOX) - return space.wrap(box) - - def descr_get_imag(self, space): - dtype = self._COMPONENTS_BOX._get_dtype(space) - box = self.convert_imag_to(dtype) - assert isinstance(box, self._COMPONENTS_BOX) - return space.wrap(box) + pass class W_Complex64Box(ComplexBox, W_ComplexFloatingBox): descr__new__, _get_dtype, descr_reduce = new_dtype_getter("complex64") - _COMPONENTS_BOX = W_Float32Box class W_Complex128Box(ComplexBox, W_ComplexFloatingBox): descr__new__, _get_dtype, descr_reduce = new_dtype_getter("complex128") - _COMPONENTS_BOX = W_Float64Box if long_double_size in (8, 12, 16): class W_FloatLongBox(W_FloatingBox, PrimitiveBox): @@ -449,7 +443,6 @@ class W_ComplexLongBox(ComplexBox, W_ComplexFloatingBox): descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY.CLONGDOUBLELTR) - _COMPONENTS_BOX = W_FloatLongBox class W_FlexibleBox(W_GenericBox): _attrs_ = ['arr', 'ofs', 'dtype'] @@ -635,6 +628,8 @@ strides = GetSetProperty(W_GenericBox.descr_get_shape), ndim = GetSetProperty(W_GenericBox.descr_get_ndim), T = GetSetProperty(W_GenericBox.descr_self), + real = GetSetProperty(W_GenericBox.descr_get_real), + imag = GetSetProperty(W_GenericBox.descr_get_imag), flags = GetSetProperty(W_GenericBox.descr_get_flags), ) @@ -768,16 +763,12 @@ __new__ = interp2app(W_Complex64Box.descr__new__.im_func), __reduce__ = interp2app(W_Complex64Box.descr_reduce), __complex__ = interp2app(W_GenericBox.item), - real = GetSetProperty(W_ComplexFloatingBox.descr_get_real), - imag = GetSetProperty(W_ComplexFloatingBox.descr_get_imag), ) W_Complex128Box.typedef = TypeDef("complex128", (W_ComplexFloatingBox.typedef, complex_typedef), __module__ = "numpy", __new__ = interp2app(W_Complex128Box.descr__new__.im_func), __reduce__ = interp2app(W_Complex128Box.descr_reduce), - real = GetSetProperty(W_ComplexFloatingBox.descr_get_real), - imag = GetSetProperty(W_ComplexFloatingBox.descr_get_imag), ) if long_double_size in (8, 12, 16): @@ -792,8 +783,6 @@ __new__ = interp2app(W_ComplexLongBox.descr__new__.im_func), __reduce__ = interp2app(W_ComplexLongBox.descr_reduce), __complex__ = interp2app(W_GenericBox.item), - real = GetSetProperty(W_ComplexFloatingBox.descr_get_real), - imag = GetSetProperty(W_ComplexFloatingBox.descr_get_imag), ) W_FlexibleBox.typedef = TypeDef("flexible", W_GenericBox.typedef, diff --git a/pypy/module/micronumpy/test/test_scalar.py b/pypy/module/micronumpy/test/test_scalar.py --- a/pypy/module/micronumpy/test/test_scalar.py +++ b/pypy/module/micronumpy/test/test_scalar.py @@ -171,6 +171,18 @@ assert b == v raises(IndexError, "v['blah']") + def test_realimag(self): + import numpy as np + a = np.int64(2) + assert a.real == 2 + assert a.imag == 0 + a = np.float64(2.5) + assert a.real == 2.5 + assert a.imag == 0.0 + a = np.complex64(2.5-1.5j) + assert a.real == 2.5 + assert a.imag == -1.5 + def test_view(self): import numpy as np import sys diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -10,7 +10,7 @@ from pypy.module.micronumpy.base import W_NDimArray -class TestNumpyJIt(LLJitMixin): +class TestNumpyJit(LLJitMixin): graph = None interp = None diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -1594,7 +1594,7 @@ class ComplexLong(ComplexFloating, BaseType): T = rffi.DOUBLE BoxType = interp_boxes.W_ComplexLongBox - ComponentBoxType = interp_boxes.W_Float64Box + ComponentBoxType = interp_boxes.W_FloatLongBox elif interp_boxes.long_double_size in (12, 16): class FloatLong(BaseType, Float): From noreply at buildbot.pypy.org Tue Feb 25 04:38:56 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 25 Feb 2014 04:38:56 +0100 (CET) Subject: [pypy-commit] pypy default: kill float_type attribute on dtypes Message-ID: <20140225033856.5B7CE1C303A@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69386:d46ae9a6a473 Date: 2014-02-24 19:05 -0500 http://bitbucket.org/pypy/pypy/changeset/d46ae9a6a473/ Log: kill float_type attribute on dtypes diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -38,14 +38,14 @@ class W_Dtype(W_Root): _immutable_fields_ = [ - "num", "kind", "char", "w_box_type", "float_type", + "num", "kind", "char", "w_box_type", "itemtype?", "byteorder?", "names?", "fields?", "elsize?", "alignment?", "shape?", "subdtype?", "base?", "alternate_constructors", "aliases", ] def __init__(self, itemtype, num, kind, char, w_box_type, - float_type=None, byteorder=None, names=[], fields={}, + byteorder=None, names=[], fields={}, elsize=None, shape=[], subdtype=None, alternate_constructors=[], aliases=[]): self.itemtype = itemtype @@ -53,7 +53,6 @@ self.kind = kind self.char = char self.w_box_type = w_box_type - self.float_type = float_type if byteorder is None: if itemtype.get_element_size() == 1: byteorder = NPY.IGNORE @@ -126,9 +125,8 @@ return self.byteorder in (NPY.NATIVE, NPY.NATBYTE) def get_float_dtype(self, space): - assert self.kind == NPY.COMPLEXLTR - assert self.float_type is not None - dtype = get_dtype_cache(space).dtypes_by_name[self.float_type] + assert isinstance(self.itemtype, types.ComplexFloating) + dtype = self.itemtype.ComponentBoxType._get_dtype(space) if self.byteorder == NPY.OPPBYTE: dtype = dtype.descr_newbyteorder(space) return dtype @@ -414,8 +412,7 @@ endian = newendian itemtype = self.itemtype.__class__(endian in (NPY.NATIVE, NPY.NATBYTE)) return W_Dtype(itemtype, self.num, self.kind, self.char, - self.w_box_type, self.float_type, byteorder=endian, - elsize=self.elsize) + self.w_box_type, byteorder=endian, elsize=self.elsize) @specialize.arg(2) @@ -764,7 +761,6 @@ char=NPY.CFLOATLTR, w_box_type=space.gettypefor(interp_boxes.W_Complex64Box), aliases=['csingle'], - float_type=NPY.FLOATLTR, ) self.w_complex128dtype = W_Dtype( types.Complex128(), @@ -775,7 +771,6 @@ alternate_constructors=[space.w_complex, space.gettypefor(interp_boxes.W_ComplexFloatingBox)], aliases=["complex", 'cfloat', 'cdouble'], - float_type=NPY.DOUBLELTR, ) self.w_complexlongdtype = W_Dtype( types.ComplexLong(), @@ -784,7 +779,6 @@ char=NPY.CLONGDOUBLELTR, w_box_type=space.gettypefor(interp_boxes.W_ComplexLongBox), aliases=["clongdouble", "clongfloat"], - float_type=NPY.LONGDOUBLELTR, ) self.w_stringdtype = W_Dtype( types.StringType(), From noreply at buildbot.pypy.org Tue Feb 25 04:45:58 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Tue, 25 Feb 2014 04:45:58 +0100 (CET) Subject: [pypy-commit] pypy remove-remaining-smm: Move floatobject's float2string to module level again (for numpy). Message-ID: <20140225034558.CA5B51C08B9@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: remove-remaining-smm Changeset: r69387:499401528ec4 Date: 2014-02-25 04:33 +0100 http://bitbucket.org/pypy/pypy/changeset/499401528ec4/ Log: Move floatobject's float2string to module level again (for numpy). diff --git a/pypy/objspace/std/floatobject.py b/pypy/objspace/std/floatobject.py --- a/pypy/objspace/std/floatobject.py +++ b/pypy/objspace/std/floatobject.py @@ -21,6 +21,20 @@ from rpython.rlib.unroll import unrolling_iterable +def float2string(x, code, precision): + # we special-case explicitly inf and nan here + if isfinite(x): + s = formatd(x, code, precision, DTSF_ADD_DOT_0) + elif isinf(x): + if x > 0.0: + s = "inf" + else: + s = "-inf" + else: # isnan(x): + s = "nan" + return s + + def detect_floatformat(): from rpython.rtyper.lltypesystem import rffi, lltype buf = lltype.malloc(rffi.CCHARP.TO, 8, flavor='raw') @@ -356,25 +370,11 @@ if space.isinstance_w(w_obj, space.w_long): return W_FloatObject(space.float_w(w_obj)) - def _float2string(self, x, code, precision): - # we special-case explicitly inf and nan here - if isfinite(x): - s = formatd(x, code, precision, DTSF_ADD_DOT_0) - elif isinf(x): - if x > 0.0: - s = "inf" - else: - s = "-inf" - else: # isnan(x): - s = "nan" - return s - def descr_repr(self, space): - return space.wrap(self._float2string(self.floatval, 'r', 0)) + return space.wrap(float2string(self.floatval, 'r', 0)) def descr_str(self, space): - return space.wrap(self._float2string(self.floatval, 'g', - DTSF_STR_PRECISION)) + return space.wrap(float2string(self.floatval, 'g', DTSF_STR_PRECISION)) def descr_hash(self, space): return space.wrap(_hash_float(space, self.floatval)) From noreply at buildbot.pypy.org Tue Feb 25 04:46:00 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Tue, 25 Feb 2014 04:46:00 +0100 (CET) Subject: [pypy-commit] pypy remove-remaining-smm: Move complexobject's str_format to module level again (for numpy). Message-ID: <20140225034600.0BBE81C08B9@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: remove-remaining-smm Changeset: r69388:273232704a76 Date: 2014-02-25 04:37 +0100 http://bitbucket.org/pypy/pypy/changeset/273232704a76/ Log: Move complexobject's str_format to module level again (for numpy). diff --git a/pypy/objspace/std/complexobject.py b/pypy/objspace/std/complexobject.py --- a/pypy/objspace/std/complexobject.py +++ b/pypy/objspace/std/complexobject.py @@ -115,6 +115,25 @@ return realpart, imagpart +def format_float(x, code, precision): + # like float2string, except that the ".0" is not necessary + if isinf(x): + if x > 0.0: + return "inf" + else: + return "-inf" + elif isnan(x): + return "nan" + else: + return formatd(x, code, precision) + +def repr_format(self, x): + return format_float(x, 'r', 0) + +def str_format(self, x): + return format_float(x, 'g', DTSF_STR_PRECISION) + + def unpackcomplex(space, w_complex, strict_typing=True): """ convert w_complex into a complex and return the unwrapped (real, imag) @@ -335,39 +354,21 @@ return space.newtuple([space.newfloat(self.realval), space.newfloat(self.imagval)]) - def _format_float(self, x, code, precision): - # like float2string, except that the ".0" is not necessary - if isinf(x): - if x > 0.0: - return "inf" - else: - return "-inf" - elif isnan(x): - return "nan" - else: - return formatd(x, code, precision) - - def _repr_format(self, x): - return self._format_float(x, 'r', 0) - - def _str_format(self, x): - return self._format_float(x, 'g', DTSF_STR_PRECISION) - def descr_repr(self, space): if self.realval == 0 and copysign(1., self.realval) == 1.: - return space.wrap(self._repr_format(self.imagval) + 'j') + return space.wrap(repr_format(self.imagval) + 'j') sign = (copysign(1., self.imagval) == 1. or isnan(self.imagval)) and '+' or '' - return space.wrap('(' + self._repr_format(self.realval) - + sign + self._repr_format(self.imagval) + 'j)') + return space.wrap('(' + repr_format(self.realval) + + sign + repr_format(self.imagval) + 'j)') def descr_str(self, space): if self.realval == 0 and copysign(1., self.realval) == 1.: - return space.wrap(self._str_format(self.imagval) + 'j') + return space.wrap(str_format(self.imagval) + 'j') sign = (copysign(1., self.imagval) == 1. or isnan(self.imagval)) and '+' or '' - return space.wrap('(' + self._str_format(self.realval) - + sign + self._str_format(self.imagval) + 'j)') + return space.wrap('(' + str_format(self.realval) + + sign + str_format(self.imagval) + 'j)') def descr_hash(self, space): hashreal = _hash_float(space, self.realval) From noreply at buildbot.pypy.org Tue Feb 25 04:46:01 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Tue, 25 Feb 2014 04:46:01 +0100 (CET) Subject: [pypy-commit] pypy remove-remaining-smm: Fix. Message-ID: <20140225034601.308401C08B9@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: remove-remaining-smm Changeset: r69389:28453212be9c Date: 2014-02-25 04:41 +0100 http://bitbucket.org/pypy/pypy/changeset/28453212be9c/ Log: Fix. diff --git a/pypy/objspace/std/complexobject.py b/pypy/objspace/std/complexobject.py --- a/pypy/objspace/std/complexobject.py +++ b/pypy/objspace/std/complexobject.py @@ -127,10 +127,10 @@ else: return formatd(x, code, precision) -def repr_format(self, x): +def repr_format(x): return format_float(x, 'r', 0) -def str_format(self, x): +def str_format(x): return format_float(x, 'g', DTSF_STR_PRECISION) From noreply at buildbot.pypy.org Tue Feb 25 04:46:02 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Tue, 25 Feb 2014 04:46:02 +0100 (CET) Subject: [pypy-commit] pypy remove-remaining-smm: Use space.int_w because numpy's int boxes doesn't seem to support space.float_w. Message-ID: <20140225034602.5CCB81C08B9@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: remove-remaining-smm Changeset: r69390:81595295624b Date: 2014-02-25 04:42 +0100 http://bitbucket.org/pypy/pypy/changeset/81595295624b/ Log: Use space.int_w because numpy's int boxes doesn't seem to support space.float_w. diff --git a/pypy/objspace/std/floatobject.py b/pypy/objspace/std/floatobject.py --- a/pypy/objspace/std/floatobject.py +++ b/pypy/objspace/std/floatobject.py @@ -366,7 +366,7 @@ if isinstance(w_obj, W_FloatObject): return w_obj if space.isinstance_w(w_obj, space.w_int): - return W_FloatObject(space.float_w(w_obj)) + return W_FloatObject(space.int_w(w_obj)) if space.isinstance_w(w_obj, space.w_long): return W_FloatObject(space.float_w(w_obj)) From noreply at buildbot.pypy.org Tue Feb 25 05:18:11 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 25 Feb 2014 05:18:11 +0100 (CET) Subject: [pypy-commit] pypy default: enable dtype(np.generic) creation Message-ID: <20140225041811.B0B6B1C0907@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69393:dd3bfb14bdc7 Date: 2014-02-24 23:15 -0500 http://bitbucket.org/pypy/pypy/changeset/dd3bfb14bdc7/ Log: enable dtype(np.generic) creation diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -827,10 +827,8 @@ NPY.STRING: [space.w_str, space.gettypefor(interp_boxes.W_CharacterBox)], NPY.UNICODE: [space.w_unicode], - NPY.VOID: [], #space.w_buffer, - # XXX no buffer in space - #space.gettypefor(interp_boxes.W_GenericBox), - # XXX fix, leads to _coerce error + NPY.VOID: [space.gettypefor(interp_boxes.W_GenericBox)], + #space.w_buffer, # XXX no buffer in space } float_dtypes = [self.w_float16dtype, self.w_float32dtype, self.w_float64dtype, self.w_floatlongdtype] diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -860,6 +860,7 @@ raises(TypeError, lambda: float64(3) & 1) def test_alternate_constructs(self): + import numpy as np from numpypy import dtype nnp = self.non_native_prefix byteorder = self.native_prefix @@ -875,6 +876,7 @@ assert dtype(' Author: Brian Kearns Branch: Changeset: r69392:f0b1c9109172 Date: 2014-02-24 23:10 -0500 http://bitbucket.org/pypy/pypy/changeset/f0b1c9109172/ Log: kill aliases attribute on dtypes diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -41,13 +41,11 @@ "num", "kind", "char", "w_box_type", "itemtype?", "byteorder?", "names?", "fields?", "elsize?", "alignment?", "shape?", "subdtype?", "base?", - "aliases", ] def __init__(self, itemtype, num, kind, char, w_box_type, byteorder=None, names=[], fields={}, - elsize=None, shape=[], subdtype=None, - aliases=[]): + elsize=None, shape=[], subdtype=None): self.itemtype = itemtype self.num = num self.kind = kind @@ -71,7 +69,6 @@ self.base = self else: self.base = subdtype.base - self.aliases = aliases def __repr__(self): if self.fields is not None: @@ -638,7 +635,6 @@ kind=NPY.GENBOOLLTR, char=NPY.BOOLLTR, w_box_type=space.gettypefor(interp_boxes.W_BoolBox), - aliases=['bool', 'bool8'], ) self.w_int8dtype = W_Dtype( types.Int8(), @@ -646,7 +642,6 @@ kind=NPY.SIGNEDLTR, char=NPY.BYTELTR, w_box_type=space.gettypefor(interp_boxes.W_Int8Box), - aliases=['byte'], ) self.w_uint8dtype = W_Dtype( types.UInt8(), @@ -654,7 +649,6 @@ kind=NPY.UNSIGNEDLTR, char=NPY.UBYTELTR, w_box_type=space.gettypefor(interp_boxes.W_UInt8Box), - aliases=['ubyte'], ) self.w_int16dtype = W_Dtype( types.Int16(), @@ -662,7 +656,6 @@ kind=NPY.SIGNEDLTR, char=NPY.SHORTLTR, w_box_type=space.gettypefor(interp_boxes.W_Int16Box), - aliases=['short'], ) self.w_uint16dtype = W_Dtype( types.UInt16(), @@ -670,7 +663,6 @@ kind=NPY.UNSIGNEDLTR, char=NPY.USHORTLTR, w_box_type=space.gettypefor(interp_boxes.W_UInt16Box), - aliases=['ushort'], ) self.w_int32dtype = W_Dtype( types.Int32(), @@ -692,7 +684,6 @@ kind=NPY.SIGNEDLTR, char=NPY.LONGLTR, w_box_type=space.gettypefor(interp_boxes.W_LongBox), - aliases=['int', 'intp', 'p'], ) self.w_ulongdtype = W_Dtype( types.ULong(), @@ -700,7 +691,6 @@ kind=NPY.UNSIGNEDLTR, char=NPY.ULONGLTR, w_box_type=space.gettypefor(interp_boxes.W_ULongBox), - aliases=['uint', 'uintp', 'P'], ) self.w_int64dtype = W_Dtype( types.Int64(), @@ -708,7 +698,6 @@ kind=NPY.SIGNEDLTR, char=NPY.LONGLONGLTR, w_box_type=space.gettypefor(interp_boxes.W_Int64Box), - aliases=['longlong'], ) self.w_uint64dtype = W_Dtype( types.UInt64(), @@ -716,7 +705,6 @@ kind=NPY.UNSIGNEDLTR, char=NPY.ULONGLONGLTR, w_box_type=space.gettypefor(interp_boxes.W_UInt64Box), - aliases=['ulonglong'], ) self.w_float32dtype = W_Dtype( types.Float32(), @@ -724,7 +712,6 @@ kind=NPY.FLOATINGLTR, char=NPY.FLOATLTR, w_box_type=space.gettypefor(interp_boxes.W_Float32Box), - aliases=['single'] ) self.w_float64dtype = W_Dtype( types.Float64(), @@ -732,7 +719,6 @@ kind=NPY.FLOATINGLTR, char=NPY.DOUBLELTR, w_box_type=space.gettypefor(interp_boxes.W_Float64Box), - aliases=["float", "double"], ) self.w_floatlongdtype = W_Dtype( types.FloatLong(), @@ -740,7 +726,6 @@ kind=NPY.FLOATINGLTR, char=NPY.LONGDOUBLELTR, w_box_type=space.gettypefor(interp_boxes.W_FloatLongBox), - aliases=["longdouble", "longfloat"], ) self.w_complex64dtype = W_Dtype( types.Complex64(), @@ -748,7 +733,6 @@ kind=NPY.COMPLEXLTR, char=NPY.CFLOATLTR, w_box_type=space.gettypefor(interp_boxes.W_Complex64Box), - aliases=['csingle'], ) self.w_complex128dtype = W_Dtype( types.Complex128(), @@ -756,7 +740,6 @@ kind=NPY.COMPLEXLTR, char=NPY.CDOUBLELTR, w_box_type=space.gettypefor(interp_boxes.W_Complex128Box), - aliases=["complex", 'cfloat', 'cdouble'], ) self.w_complexlongdtype = W_Dtype( types.ComplexLong(), @@ -764,7 +747,6 @@ kind=NPY.COMPLEXLTR, char=NPY.CLONGDOUBLELTR, w_box_type=space.gettypefor(interp_boxes.W_ComplexLongBox), - aliases=["clongdouble", "clongfloat"], ) self.w_stringdtype = W_Dtype( types.StringType(), @@ -773,7 +755,6 @@ kind=NPY.STRINGLTR, char=NPY.STRINGLTR, w_box_type=space.gettypefor(interp_boxes.W_StringBox), - aliases=['string', "str"], ) self.w_unicodedtype = W_Dtype( types.UnicodeType(), @@ -782,7 +763,6 @@ kind=NPY.UNICODELTR, char=NPY.UNICODELTR, w_box_type=space.gettypefor(interp_boxes.W_UnicodeBox), - aliases=['unicode'], ) self.w_voiddtype = W_Dtype( types.VoidType(), @@ -813,6 +793,25 @@ char=NPY.UINTPLTR, w_box_type=space.gettypefor(interp_boxes.W_ULongBox), ) + self.aliases = { + NPY.BOOL: ['bool', 'bool8'], + NPY.BYTE: ['byte'], + NPY.UBYTE: ['ubyte'], + NPY.SHORT: ['short'], + NPY.USHORT: ['ushort'], + NPY.LONG: ['int', 'intp', 'p'], + NPY.ULONG: ['uint', 'uintp', 'P'], + NPY.LONGLONG: ['longlong'], + NPY.ULONGLONG: ['ulonglong'], + NPY.FLOAT: ['single'], + NPY.DOUBLE: ['float', 'double'], + NPY.LONGDOUBLE: ['longdouble', 'longfloat'], + NPY.CFLOAT: ['csingle'], + NPY.CDOUBLE: ['complex', 'cfloat', 'cdouble'], + NPY.CLONGDOUBLE: ['clongdouble', 'clongfloat'], + NPY.STRING: ['string', 'str'], + NPY.UNICODE: ['unicode'], + } self.alternate_constructors = { NPY.BOOL: [space.w_bool], NPY.LONG: [space.w_int, @@ -866,8 +865,9 @@ self.dtypes_by_name[NPY.NATBYTE + can_name] = dtype self.dtypes_by_name[NPY.NATIVE + can_name] = dtype self.dtypes_by_name[NPY.IGNORE + can_name] = dtype - for alias in dtype.aliases: - self.dtypes_by_name[alias] = dtype + if dtype.num in self.aliases: + for alias in self.aliases[dtype.num]: + self.dtypes_by_name[alias] = dtype typeinfo_full = { 'LONGLONG': self.w_int64dtype, From noreply at buildbot.pypy.org Tue Feb 25 05:18:09 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 25 Feb 2014 05:18:09 +0100 (CET) Subject: [pypy-commit] pypy default: kill alternate_constructors attr on dtypes Message-ID: <20140225041809.52E661C0907@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69391:0f2dba1d7aea Date: 2014-02-24 23:00 -0500 http://bitbucket.org/pypy/pypy/changeset/0f2dba1d7aea/ Log: kill alternate_constructors attr on dtypes diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -41,13 +41,13 @@ "num", "kind", "char", "w_box_type", "itemtype?", "byteorder?", "names?", "fields?", "elsize?", "alignment?", "shape?", "subdtype?", "base?", - "alternate_constructors", "aliases", + "aliases", ] def __init__(self, itemtype, num, kind, char, w_box_type, byteorder=None, names=[], fields={}, elsize=None, shape=[], subdtype=None, - alternate_constructors=[], aliases=[]): + aliases=[]): self.itemtype = itemtype self.num = num self.kind = kind @@ -71,7 +71,6 @@ self.base = self else: self.base = subdtype.base - self.alternate_constructors = alternate_constructors self.aliases = aliases def __repr__(self): @@ -524,7 +523,8 @@ elif space.isinstance_w(w_dtype, space.w_dict): return dtype_from_dict(space, w_dtype) for dtype in cache.builtin_dtypes: - if w_dtype in dtype.alternate_constructors: + if dtype.num in cache.alternate_constructors and \ + w_dtype in cache.alternate_constructors[dtype.num]: return dtype if w_dtype is dtype.w_box_type: return dtype @@ -638,7 +638,6 @@ kind=NPY.GENBOOLLTR, char=NPY.BOOLLTR, w_box_type=space.gettypefor(interp_boxes.W_BoolBox), - alternate_constructors=[space.w_bool], aliases=['bool', 'bool8'], ) self.w_int8dtype = W_Dtype( @@ -693,10 +692,6 @@ kind=NPY.SIGNEDLTR, char=NPY.LONGLTR, w_box_type=space.gettypefor(interp_boxes.W_LongBox), - alternate_constructors=[space.w_int, - space.gettypefor(interp_boxes.W_IntegerBox), - space.gettypefor(interp_boxes.W_SignedIntegerBox), - ], aliases=['int', 'intp', 'p'], ) self.w_ulongdtype = W_Dtype( @@ -705,8 +700,6 @@ kind=NPY.UNSIGNEDLTR, char=NPY.ULONGLTR, w_box_type=space.gettypefor(interp_boxes.W_ULongBox), - alternate_constructors=[space.gettypefor(interp_boxes.W_UnsignedIntegerBox), - ], aliases=['uint', 'uintp', 'P'], ) self.w_int64dtype = W_Dtype( @@ -715,7 +708,6 @@ kind=NPY.SIGNEDLTR, char=NPY.LONGLONGLTR, w_box_type=space.gettypefor(interp_boxes.W_Int64Box), - alternate_constructors=[space.w_long], aliases=['longlong'], ) self.w_uint64dtype = W_Dtype( @@ -740,10 +732,6 @@ kind=NPY.FLOATINGLTR, char=NPY.DOUBLELTR, w_box_type=space.gettypefor(interp_boxes.W_Float64Box), - alternate_constructors=[space.w_float, - space.gettypefor(interp_boxes.W_NumberBox), - space.gettypefor(interp_boxes.W_FloatingBox), - ], aliases=["float", "double"], ) self.w_floatlongdtype = W_Dtype( @@ -768,8 +756,6 @@ kind=NPY.COMPLEXLTR, char=NPY.CDOUBLELTR, w_box_type=space.gettypefor(interp_boxes.W_Complex128Box), - alternate_constructors=[space.w_complex, - space.gettypefor(interp_boxes.W_ComplexFloatingBox)], aliases=["complex", 'cfloat', 'cdouble'], ) self.w_complexlongdtype = W_Dtype( @@ -787,8 +773,6 @@ kind=NPY.STRINGLTR, char=NPY.STRINGLTR, w_box_type=space.gettypefor(interp_boxes.W_StringBox), - alternate_constructors=[space.w_str, - space.gettypefor(interp_boxes.W_CharacterBox)], aliases=['string', "str"], ) self.w_unicodedtype = W_Dtype( @@ -798,7 +782,6 @@ kind=NPY.UNICODELTR, char=NPY.UNICODELTR, w_box_type=space.gettypefor(interp_boxes.W_UnicodeBox), - alternate_constructors=[space.w_unicode], aliases=['unicode'], ) self.w_voiddtype = W_Dtype( @@ -808,10 +791,6 @@ kind=NPY.VOIDLTR, char=NPY.VOIDLTR, w_box_type=space.gettypefor(interp_boxes.W_VoidBox), - #alternate_constructors=[space.w_buffer], - # XXX no buffer in space - #alternate_constructors=[space.gettypefor(interp_boxes.W_GenericBox)], - # XXX fix, leads to _coerce error ) self.w_float16dtype = W_Dtype( types.Float16(), @@ -834,6 +813,26 @@ char=NPY.UINTPLTR, w_box_type=space.gettypefor(interp_boxes.W_ULongBox), ) + self.alternate_constructors = { + NPY.BOOL: [space.w_bool], + NPY.LONG: [space.w_int, + space.gettypefor(interp_boxes.W_IntegerBox), + space.gettypefor(interp_boxes.W_SignedIntegerBox)], + NPY.ULONG: [space.gettypefor(interp_boxes.W_UnsignedIntegerBox)], + NPY.LONGLONG: [space.w_long], + NPY.DOUBLE: [space.w_float, + space.gettypefor(interp_boxes.W_NumberBox), + space.gettypefor(interp_boxes.W_FloatingBox)], + NPY.CDOUBLE: [space.w_complex, + space.gettypefor(interp_boxes.W_ComplexFloatingBox)], + NPY.STRING: [space.w_str, + space.gettypefor(interp_boxes.W_CharacterBox)], + NPY.UNICODE: [space.w_unicode], + NPY.VOID: [], #space.w_buffer, + # XXX no buffer in space + #space.gettypefor(interp_boxes.W_GenericBox), + # XXX fix, leads to _coerce error + } float_dtypes = [self.w_float16dtype, self.w_float32dtype, self.w_float64dtype, self.w_floatlongdtype] complex_dtypes = [self.w_complex64dtype, self.w_complex128dtype, From noreply at buildbot.pypy.org Tue Feb 25 05:24:30 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 25 Feb 2014 05:24:30 +0100 (CET) Subject: [pypy-commit] pypy default: small cleanups Message-ID: <20140225042430.D1E6D1C10A8@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69394:c7fe32c610e0 Date: 2014-02-24 23:21 -0500 http://bitbucket.org/pypy/pypy/changeset/c7fe32c610e0/ Log: small cleanups diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -38,8 +38,8 @@ class W_Dtype(W_Root): _immutable_fields_ = [ - "num", "kind", "char", "w_box_type", - "itemtype?", "byteorder?", "names?", "fields?", "elsize?", "alignment?", + "itemtype?", "num", "kind", "char", "w_box_type", + "byteorder?", "names?", "fields?", "elsize?", "alignment?", "shape?", "subdtype?", "base?", ] @@ -793,7 +793,7 @@ char=NPY.UINTPLTR, w_box_type=space.gettypefor(interp_boxes.W_ULongBox), ) - self.aliases = { + aliases = { NPY.BOOL: ['bool', 'bool8'], NPY.BYTE: ['byte'], NPY.UBYTE: ['ubyte'], @@ -863,8 +863,8 @@ self.dtypes_by_name[NPY.NATBYTE + can_name] = dtype self.dtypes_by_name[NPY.NATIVE + can_name] = dtype self.dtypes_by_name[NPY.IGNORE + can_name] = dtype - if dtype.num in self.aliases: - for alias in self.aliases[dtype.num]: + if dtype.num in aliases: + for alias in aliases[dtype.num]: self.dtypes_by_name[alias] = dtype typeinfo_full = { From noreply at buildbot.pypy.org Tue Feb 25 05:32:21 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Tue, 25 Feb 2014 05:32:21 +0100 (CET) Subject: [pypy-commit] pypy remove-remaining-smm: Fix. Message-ID: <20140225043221.83ACF1C303A@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: remove-remaining-smm Changeset: r69395:a62416f087b4 Date: 2014-02-25 05:29 +0100 http://bitbucket.org/pypy/pypy/changeset/a62416f087b4/ Log: Fix. diff --git a/pypy/objspace/std/complexobject.py b/pypy/objspace/std/complexobject.py --- a/pypy/objspace/std/complexobject.py +++ b/pypy/objspace/std/complexobject.py @@ -288,7 +288,7 @@ if isinstance(w_obj, W_ComplexObject): return w_obj if space.isinstance_w(w_obj, space.w_int): - return W_ComplexObject(space.int_w(w_obj), 0.0) + return W_ComplexObject(float(space.int_w(w_obj)), 0.0) if space.isinstance_w(w_obj, space.w_long): return W_ComplexObject(space.float_w(w_obj), 0.0) if space.isinstance_w(w_obj, space.w_float): diff --git a/pypy/objspace/std/floatobject.py b/pypy/objspace/std/floatobject.py --- a/pypy/objspace/std/floatobject.py +++ b/pypy/objspace/std/floatobject.py @@ -366,7 +366,7 @@ if isinstance(w_obj, W_FloatObject): return w_obj if space.isinstance_w(w_obj, space.w_int): - return W_FloatObject(space.int_w(w_obj)) + return W_FloatObject(float(space.int_w(w_obj))) if space.isinstance_w(w_obj, space.w_long): return W_FloatObject(space.float_w(w_obj)) From noreply at buildbot.pypy.org Tue Feb 25 05:32:22 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Tue, 25 Feb 2014 05:32:22 +0100 (CET) Subject: [pypy-commit] pypy kill-multimethod: Start a branch to kill the multimethod machinery. It's separate from remove-remaining-smm to ease reviewing. Message-ID: <20140225043222.A8F261C303A@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: kill-multimethod Changeset: r69396:edfc1ea4a4d8 Date: 2014-02-25 04:59 +0100 http://bitbucket.org/pypy/pypy/changeset/edfc1ea4a4d8/ Log: Start a branch to kill the multimethod machinery. It's separate from remove-remaining-smm to ease reviewing. From noreply at buildbot.pypy.org Tue Feb 25 05:32:23 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Tue, 25 Feb 2014 05:32:23 +0100 (CET) Subject: [pypy-commit] pypy kill-multimethod: Kill register_all(). Message-ID: <20140225043223.D2E3A1C303A@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: kill-multimethod Changeset: r69397:7e3079dd2c8d Date: 2014-02-25 05:01 +0100 http://bitbucket.org/pypy/pypy/changeset/7e3079dd2c8d/ Log: Kill register_all(). diff --git a/pypy/objspace/std/register_all.py b/pypy/objspace/std/register_all.py deleted file mode 100644 --- a/pypy/objspace/std/register_all.py +++ /dev/null @@ -1,59 +0,0 @@ -from pypy.objspace.std import model, stdtypedef - -_name_mappings = { - 'and': 'and_', - 'or': 'or_', - } - -def register_all(module_dict, *alt_ns): - """register implementations for multimethods. - - By default a (name, object) pair of the given module dictionary - is registered on the multimethod 'name' of StdObjSpace. - If the name doesn't exist then the alternative namespace is tried - for registration. - """ - namespaces = list(alt_ns) + [model.MM] - - for name, obj in module_dict.items(): - if name.startswith('app_'): - print "%s: direct app definitions deprecated" % name - if name.find('__')<1 or name.startswith('app_') or name.startswith('descr_'): - continue - funcname, sig = name.split('__') - l = [] - for i in sig.split('_'): - if i == 'ANY': # just in case W_ANY is not in module_dict - icls = model.W_ANY - elif i == 'Object': # just in case W_Object is not in module_dict - icls = model.W_Object - else: - icls = (module_dict.get('W_%s' % i) or - module_dict.get('W_%sObject' % i)) - if icls is None: - x = module_dict.get(i) - if isinstance(x, stdtypedef.StdTypeDef): - icls = x.any - if icls is None: - raise ValueError, \ - "no W_%s or W_%sObject for the definition of %s" % ( - i, i, name) - l.append(icls) - funcname = _name_mappings.get(funcname, funcname) - - func = hack_func_by_name(funcname, namespaces) - func.register(obj, *l) - - model.add_extra_comparisons() - - -def hack_func_by_name(funcname, namespaces): - for ns in namespaces: - if isinstance(ns, dict): - if funcname in ns: - return ns[funcname] - else: - if hasattr(ns, funcname): - return getattr(ns, funcname) - raise NameError, ("trying hard but not finding a multimethod named %s" % - funcname) From noreply at buildbot.pypy.org Tue Feb 25 05:32:24 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Tue, 25 Feb 2014 05:32:24 +0100 (CET) Subject: [pypy-commit] pypy kill-multimethod: hg merge remove-remaining-smm Message-ID: <20140225043224.EFF521C303A@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: kill-multimethod Changeset: r69398:d30533ef4715 Date: 2014-02-25 05:31 +0100 http://bitbucket.org/pypy/pypy/changeset/d30533ef4715/ Log: hg merge remove-remaining-smm diff --git a/pypy/objspace/std/complexobject.py b/pypy/objspace/std/complexobject.py --- a/pypy/objspace/std/complexobject.py +++ b/pypy/objspace/std/complexobject.py @@ -288,7 +288,7 @@ if isinstance(w_obj, W_ComplexObject): return w_obj if space.isinstance_w(w_obj, space.w_int): - return W_ComplexObject(space.int_w(w_obj), 0.0) + return W_ComplexObject(float(space.int_w(w_obj)), 0.0) if space.isinstance_w(w_obj, space.w_long): return W_ComplexObject(space.float_w(w_obj), 0.0) if space.isinstance_w(w_obj, space.w_float): diff --git a/pypy/objspace/std/floatobject.py b/pypy/objspace/std/floatobject.py --- a/pypy/objspace/std/floatobject.py +++ b/pypy/objspace/std/floatobject.py @@ -366,7 +366,7 @@ if isinstance(w_obj, W_FloatObject): return w_obj if space.isinstance_w(w_obj, space.w_int): - return W_FloatObject(space.int_w(w_obj)) + return W_FloatObject(float(space.int_w(w_obj))) if space.isinstance_w(w_obj, space.w_long): return W_FloatObject(space.float_w(w_obj)) From noreply at buildbot.pypy.org Tue Feb 25 06:00:26 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Tue, 25 Feb 2014 06:00:26 +0100 (CET) Subject: [pypy-commit] pypy kill-multimethod: Fix comment in floatobject.py. Message-ID: <20140225050026.4EDB51C3599@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: kill-multimethod Changeset: r69399:78d85c08ebe1 Date: 2014-02-25 05:37 +0100 http://bitbucket.org/pypy/pypy/changeset/78d85c08ebe1/ Log: Fix comment in floatobject.py. diff --git a/pypy/objspace/std/floatobject.py b/pypy/objspace/std/floatobject.py --- a/pypy/objspace/std/floatobject.py +++ b/pypy/objspace/std/floatobject.py @@ -547,7 +547,7 @@ @unwrap_spec(w_third_arg=WrappedDefault(None)) def descr_pow(self, space, w_rhs, w_third_arg): - # This raises FailedToImplement in cases like overflow where a + # This returns space.w_NotImplemented in cases like overflow where a # (purely theoretical) big-precision float implementation would have # a chance to give a result, and directly OperationError for errors # that we want to force to be reported to the user. From noreply at buildbot.pypy.org Tue Feb 25 06:00:27 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Tue, 25 Feb 2014 06:00:27 +0100 (CET) Subject: [pypy-commit] pypy kill-multimethod: Remove pypy.objspace.std.multimethod and make tests pass again. Message-ID: <20140225050027.9699A1C3599@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: kill-multimethod Changeset: r69400:0f3b67d218ae Date: 2014-02-25 05:38 +0100 http://bitbucket.org/pypy/pypy/changeset/0f3b67d218ae/ Log: Remove pypy.objspace.std.multimethod and make tests pass again. diff --git a/pypy/objspace/std/builtinshortcut.py b/pypy/objspace/std/builtinshortcut.py --- a/pypy/objspace/std/builtinshortcut.py +++ b/pypy/objspace/std/builtinshortcut.py @@ -1,7 +1,6 @@ from pypy.interpreter.baseobjspace import ObjSpace from pypy.interpreter.error import OperationError from pypy.objspace.descroperation import DescrOperation -from pypy.objspace.std.multimethod import FailedToImplement from pypy.objspace.std.boolobject import W_BoolObject from rpython.tool.sourcetools import func_with_new_name diff --git a/pypy/objspace/std/model.py b/pypy/objspace/std/model.py --- a/pypy/objspace/std/model.py +++ b/pypy/objspace/std/model.py @@ -3,7 +3,6 @@ to provide in this version of PyPy, along with conversion rules. """ -from pypy.objspace.std.multimethod import MultiMethodTable, FailedToImplement from pypy.interpreter.baseobjspace import W_Root, ObjSpace import pypy.interpreter.pycode import pypy.interpreter.special @@ -266,88 +265,3 @@ class UnwrapError(Exception): pass - - -class StdObjSpaceMultiMethod(MultiMethodTable): - - def __init__(self, operatorsymbol, arity, specialnames=None, **extras): - """NOT_RPYTHON: cannot create new multimethods dynamically. - """ - MultiMethodTable.__init__(self, arity, W_ANY, - argnames_before = ['space']) - self.operatorsymbol = operatorsymbol - if specialnames is None: - specialnames = [operatorsymbol] - assert isinstance(specialnames, list) - self.specialnames = specialnames # e.g. ['__xxx__', '__rxxx__'] - self.extras = extras - # transform '+' => 'add' etc. - for line in ObjSpace.MethodTable: - realname, symbolname = line[:2] - if symbolname == operatorsymbol: - self.name = realname - break - else: - self.name = operatorsymbol - - if extras.get('general__args__', False): - self.argnames_after = ['__args__'] - if extras.get('varargs_w', False): - self.argnames_after = ['args_w'] - self.argnames_after += extras.get('extra_args', []) - - def install_not_sliced(self, typeorder, baked_perform_call=True): - return self.install(prefix = '__mm_' + self.name, - list_of_typeorders = [typeorder]*self.arity, - baked_perform_call=baked_perform_call) - - def merge_with(self, other): - # Make a new 'merged' multimethod including the union of the two - # tables. In case of conflict, pick the entry from 'self'. - if self.arity != other.arity: - return self # XXX that's the case of '**' - operatorsymbol = '%s_merge_%s' % (self.name, other.name) - assert self.extras == other.extras - mm = StdObjSpaceMultiMethod(operatorsymbol, self.arity, **self.extras) - # - def merge(node1, node2): - assert type(node1) is type(node2) - if isinstance(node1, dict): - d = node1.copy() - d.update(node2) - for key in node1: - if key in node2: - d[key] = merge(node1[key], node2[key]) - return d - else: - assert isinstance(node1, list) - assert node1 - return node1 # pick the entry from 'self' - # - mm.dispatch_tree = merge(self.dispatch_tree, other.dispatch_tree) - return mm - -NOT_MULTIMETHODS = set( - ['delattr', 'delete', 'get', 'id', 'inplace_div', 'inplace_floordiv', - 'inplace_lshift', 'inplace_mod', 'inplace_pow', 'inplace_rshift', - 'inplace_truediv', 'is_', 'set', 'setattr', 'type', 'userdel', - 'isinstance', 'issubtype', 'int', 'ord']) -# XXX should we just remove those from the method table or we're happy -# with just not having multimethods? - -class MM: - """StdObjSpace multimethods""" - - call = StdObjSpaceMultiMethod('call', 1, ['__call__'], - general__args__=True) - init = StdObjSpaceMultiMethod('__init__', 1, general__args__=True) - getnewargs = StdObjSpaceMultiMethod('__getnewargs__', 1) - - # add all regular multimethods here - for _name, _symbol, _arity, _specialnames in ObjSpace.MethodTable: - if _name not in locals() and _name not in NOT_MULTIMETHODS: - mm = StdObjSpaceMultiMethod(_symbol, _arity, _specialnames) - locals()[_name] = mm - del mm - - pow.extras['defaults'] = (None,) diff --git a/pypy/objspace/std/multimethod.py b/pypy/objspace/std/multimethod.py deleted file mode 100644 --- a/pypy/objspace/std/multimethod.py +++ /dev/null @@ -1,973 +0,0 @@ - -from rpython.tool.sourcetools import compile2 - -# This provide two compatible implementations of "multimethods". A -# multimethod is a callable object which chooses and calls a real -# function from a table of pre-registered functions. The choice depends -# on the '__class__' of all arguments. For example usages see -# test_multimethod. - -# These multimethods support delegation: for each class A we must -# provide a "typeorder", which is list of pairs (B, converter) where B -# is a class and 'converter' is a function that can convert from an -# instance of A to an instance of B. If 'converter' is None it is -# assumed that the instance needs no conversion. The first entry in the -# typeorder of a class A must almost always be (A, None). - -# A slightly non-standard feature of PyPy's multimethods is the way in -# which they interact with normal subclassing. Basically, they don't. -# Suppose that A is a parent class of B. Then a function registered for -# an argument class A only accepts an instance whose __class__ is A, not -# B. To make it accept an instance of B, the typeorder for B must -# contain (A, None). An exception to this strict rule is if C is -# another subclass of A which is not mentioned at all in the typeorder; -# in this case C is considered to be equivalent to A. - - -class FailedToImplement(Exception): - def __new__(cls, *args): - if cls is FailedToImplement: - assert not args, "use FailedToImplementArgs!" - return Exception.__new__(cls, *args) - - def get_w_value(self, space): - return None - - def get_w_type(self, space): - return None - - def __str__(self): - return '' - -class FailedToImplementArgs(FailedToImplement): - def __init__(self, w_type=None, w_value=None): - self.w_type = w_type - self.w_value = w_value - - def get_w_value(self, space): - # convenience: same semantics as with OperationError - return self.w_value - - def get_w_type(self, space): - return self.w_type - - def __str__(self): - return '' % (self.w_type, self.w_value) - - - -def raiseFailedToImplement(): - raise FailedToImplement - - -class MultiMethodTable: - - def __init__(self, arity, root_class, argnames_before=[], argnames_after=[]): - """NOT_RPYTHON: cannot create new multimethods dynamically. - MultiMethod-maker dispatching on exactly 'arity' arguments. - """ - if arity < 1: - raise ValueError, "multimethods cannot dispatch on nothing" - self.arity = arity - self.root_class = root_class - self.dispatch_tree = {} - self.argnames_before = list(argnames_before) - self.argnames_after = list(argnames_after) - - def register(self, function, *types, **kwds): - assert len(types) == self.arity - assert kwds.keys() == [] or kwds.keys() == ['order'] - order = kwds.get('order', 0) - node = self.dispatch_tree - for type in types[:-1]: - node = node.setdefault(type, {}) - lst = node.setdefault(types[-1], []) - if order >= len(lst): - lst += [None] * (order+1 - len(lst)) - assert lst[order] is None, "duplicate function for %r@%d" % ( - types, order) - lst[order] = function - - def install(self, prefix, list_of_typeorders, baked_perform_call=True, - base_typeorder=None, installercls=None): - "NOT_RPYTHON: initialization-time only" - assert len(list_of_typeorders) == self.arity - installercls = installercls or Installer - installer = installercls(self, prefix, list_of_typeorders, - baked_perform_call=baked_perform_call, - base_typeorder=base_typeorder) - return installer.install() - - def install_if_not_empty(self, prefix, list_of_typeorders, - base_typeorder=None, installercls=None): - "NOT_RPYTHON: initialization-time only" - assert len(list_of_typeorders) == self.arity - installercls = installercls or Installer - installer = installercls(self, prefix, list_of_typeorders, - base_typeorder=base_typeorder) - if installer.is_empty(): - return None - else: - return installer.install() - - - - # ____________________________________________________________ - # limited dict-like interface to the dispatch table - - def getfunctions(self, types): - assert len(types) == self.arity - node = self.dispatch_tree - for type in types: - node = node[type] - return [fn for fn in node if fn is not None] - - def has_signature(self, types): - try: - self.getfunctions(types) - except KeyError: - return False - else: - return True - - def signatures(self): - """NOT_RPYTHON""" - result = [] - def enum_keys(types_so_far, node): - for type, subnode in node.items(): - next_types = types_so_far+(type,) - if isinstance(subnode, dict): - enum_keys(next_types, subnode) - else: - assert len(next_types) == self.arity - result.append(next_types) - enum_keys((), self.dispatch_tree) - return result - -# ____________________________________________________________ -# Installer version 1 - -class InstallerVersion1: - """NOT_RPYTHON""" - - instance_counter = 0 - - mmfunccache = {} - - prefix_memo = {} - - def __init__(self, multimethod, prefix, list_of_typeorders, - baked_perform_call=True, base_typeorder=None): - self.__class__.instance_counter += 1 - self.multimethod = multimethod - # avoid prefix clashes, user code should supply different prefixes - # itself for nice names in tracebacks - base_prefix = prefix - n = 1 - while prefix in self.prefix_memo: - n += 1 - prefix = "%s%d" % (base_prefix, n) - self.prefix = prefix - self.prefix_memo[prefix] = 1 - self.list_of_typeorders = list_of_typeorders - self.check_typeorders() - self.subtree_cache = {} - self.to_install = [] - self.non_empty = self.build_tree([], multimethod.dispatch_tree) - - self.baked_perform_call = baked_perform_call - - if self.non_empty: - perform = [(None, prefix, 0)] - else: - perform = [] - - self.perform_call = self.build_function(None, prefix+'_perform_call', - None, perform) - - def check_typeorders(self): - # xxx we use a '__'-separated list of the '__name__' of the types - # in build_single_method(), so types with the same __name__ or - # with '__' in them would obscurely break this logic - for typeorder in self.list_of_typeorders: - for type in typeorder: - assert '__' not in type.__name__, ( - "avoid '__' in the name of %r" % (type,)) - names = dict.fromkeys([type.__name__ for type in typeorder]) - assert len(names) == len(typeorder), ( - "duplicate type.__name__ in %r" % (typeorder,)) - - def is_empty(self): - return not self.non_empty - - def install(self): - #f = open('LOGFILE', 'a') - #print >> f, '_'*60 - #import pprint - #pprint.pprint(self.list_of_typeorders, f) - - def class_key(cls): - "Returns an object such that class_key(subcls) > class_key(cls)." - return len(cls.__mro__) - - # Sort 'to_install' so that base classes come first, which is - # necessary for the 'parentfunc' logic in the loop below to work. - # Moreover, 'to_install' can contain two functions with the same - # name for the root class: the default fallback one and the real - # one. So we have to sort the real one just after the default one - # so that the default one gets overridden. - def key(target, funcname, func, source, fallback): - if target is None: - return () - return (class_key(target), not fallback) - self.to_install.sort(lambda a, b: cmp(key(*a), key(*b))) - - for target, funcname, func, source, fallback in self.to_install: - if target is not None: - # If the parent class provides a method of the same - # name which is actually the same 'func', we don't need - # to install it again. Useful with fallback functions. - parentfunc = getattr(target, funcname, None) - parentfunc = getattr(parentfunc, 'im_func', None) - if parentfunc is func: - continue - #print >> f, target.__name__, funcname - #if source: - # print >> f, source - #else: - # print >> f, '*\n' - setattr(target, funcname, func) - #f.close() - return self.perform_call - - def build_tree(self, types_so_far, dispatch_node): - key = tuple(types_so_far) - if key in self.subtree_cache: - return self.subtree_cache[key] - non_empty = False - typeorder = self.list_of_typeorders[len(types_so_far)] - for next_type in typeorder: - if self.build_single_method(typeorder, types_so_far, next_type, - dispatch_node): - non_empty = True - self.subtree_cache[key] = non_empty - return non_empty - - def build_single_method(self, typeorder, types_so_far, next_type, - dispatch_node): - funcname = '__'.join([self.prefix] + [t.__name__ for t in types_so_far]) - - order = typeorder[next_type] - #order = [(next_type, None)] + order - - things_to_call = [] - for type, conversion in order: - if type not in dispatch_node: - # there is no possible completion of types_so_far+[type] - # that could lead to a registered function. - continue - match = dispatch_node[type] - if isinstance(match, dict): - if self.build_tree(types_so_far+[type], match): - call = funcname + '__' + type.__name__ - call_selfarg_index = len(types_so_far) + 1 - things_to_call.append((conversion, call, - call_selfarg_index)) - else: - for func in match: # list of functions - if func is not None: - things_to_call.append((conversion, func, None)) - - funcname = intern(funcname) - self.build_function(next_type, funcname, len(types_so_far), - things_to_call) - return bool(things_to_call) - - def build_function(self, target, funcname, func_selfarg_index, - things_to_call): - # support for inventing names for the entries in things_to_call - # which are real function objects instead of strings - miniglobals = {'FailedToImplement': FailedToImplement, '__name__': __name__} - def invent_name(obj): - if isinstance(obj, str): - return obj - name = obj.__name__ - n = 1 - while name in miniglobals: - n += 1 - name = '%s%d' % (obj.__name__, n) - miniglobals[name] = obj - return name - - funcargs = ['arg%d' % i for i in range(self.multimethod.arity)] - - bodylines = [] - for conversion, call, call_selfarg_index in things_to_call: - callargs = funcargs[:] - if conversion is not None: - to_convert = func_selfarg_index - convert_callargs = (self.multimethod.argnames_before + - [callargs[to_convert]]) - callargs[to_convert] = '%s(%s)' % ( - invent_name(conversion), ', '.join(convert_callargs)) - callname = invent_name(call) - if call_selfarg_index is not None: - # fallback on root_class - self.build_function(self.multimethod.root_class, - callname, call_selfarg_index, []) - callname = '%s.%s' % (callargs.pop(call_selfarg_index), callname) - callargs = (self.multimethod.argnames_before + - callargs + self.multimethod.argnames_after) - bodylines.append('return %s(%s)' % (callname, ', '.join(callargs))) - - fallback = False - if not bodylines: - miniglobals['raiseFailedToImplement'] = raiseFailedToImplement - bodylines = ['return raiseFailedToImplement()'] - fallback = True - # NB. make sure that there is only one fallback function object, - # i.e. the key used in the mmfunccache below is always the same - # for all functions with the same name and an empty bodylines. - - # protect all lines apart from the last one by a try:except: - for i in range(len(bodylines)-2, -1, -1): - bodylines[i:i+1] = ['try:', - ' ' + bodylines[i], - 'except FailedToImplement:', - ' pass'] - - if func_selfarg_index is not None: - selfargs = [funcargs.pop(func_selfarg_index)] - else: - selfargs = [] - funcargs = (selfargs + self.multimethod.argnames_before + - funcargs + self.multimethod.argnames_after) - - if target is None and not self.baked_perform_call: - return funcargs, bodylines[0][len('return '):], miniglobals, fallback - - # indent mode - bodylines = [' ' + line for line in bodylines] - - bodylines.insert(0, 'def %s(%s):' % (funcname, ', '.join(funcargs))) - bodylines.append('') - source = '\n'.join(bodylines) - - # XXX find a better place (or way) to avoid duplicate functions - l = miniglobals.items() - l.sort() - l = tuple(l) - key = (source, l) - try: - func = self.mmfunccache[key] - except KeyError: - exec compile2(source) in miniglobals - func = miniglobals[funcname] - self.mmfunccache[key] = func - #else: - # print "avoided duplicate function", func - self.to_install.append((target, funcname, func, source, fallback)) - return func - -# ____________________________________________________________ -# Installer version 2 - -class MMDispatcher(object): - """NOT_RPYTHON - Explicit dispatcher class. The __call__ and dispatch() methods - are only present for documentation purposes. The InstallerVersion2 - uses the expressions() method to precompute fast RPython-friendly - dispatch tables. - """ - _revcache = None - - def __init__(self, multimethod, list_of_typeorders): - self.multimethod = multimethod - self.list_of_typeorders = list_of_typeorders - - def __call__(self, *args): - # for testing only: this is slow - i = len(self.multimethod.argnames_before) - j = i + self.multimethod.arity - k = j + len(self.multimethod.argnames_after) - assert len(args) == k - prefixargs = args[:i] - dispatchargs = args[i:j] - suffixargs = args[j:] - return self.dispatch([x.__class__ for x in dispatchargs], - prefixargs, - dispatchargs, - suffixargs) - - def dispatch(self, argtypes, prefixargs, args, suffixargs): - # for testing only: this is slow - def expr(v): - if isinstance(v, Call): - return v.function(*[expr(w) for w in v.arguments]) - else: - return v - # XXX this is incomplete: for each type in argtypes but not - # in the typeorder, we should look for the first base class - # that is in the typeorder. - e = None - for v in self.expressions(argtypes, prefixargs, args, suffixargs): - try: - return expr(v) - except FailedToImplement, e: - pass - else: - raise e or FailedToImplement() - - def expressions(self, argtypes, prefixargs, args, suffixargs): - """Lists the possible expressions that call the appropriate - function for the given argument types. Each expression is a Call - object. The intent is that at run-time the first Call that doesn't - cause FailedToImplement to be raised is the good one. - """ - prefixargs = tuple(prefixargs) - suffixargs = tuple(suffixargs) - - def walktree(node, args_so_far): - if isinstance(node, list): - for func in node: - if func is not None: - result.append(Call(func, prefixargs + - args_so_far + - suffixargs)) - else: - index = len(args_so_far) - typeorder = self.list_of_typeorders[index] - next_type = argtypes[index] - for target_type, converter in typeorder[next_type]: - if target_type not in node: - continue - next_arg = args[index] - if converter: - next_arg = Call(converter, prefixargs + (next_arg,)) - walktree(node[target_type], args_so_far + (next_arg,)) - - result = [] - walktree(self.multimethod.dispatch_tree, ()) - return result - - def anychance(self, typesprefix): - # is there any chance that a list of types starting with typesprefix - # could lead to a successful dispatch? - # (START-UP TIME OPTIMIZATION ONLY) - if self._revcache is None: - - def build_tree(types_so_far, dispatch_node): - non_empty = False - typeorder = self.list_of_typeorders[len(types_so_far)] - for next_type in typeorder: - if build_single_method(typeorder, types_so_far, next_type, - dispatch_node): - non_empty = True - if non_empty: - self._revcache[types_so_far] = True - return non_empty - - def build_single_method(typeorder, types_so_far, next_type, - dispatch_node): - order = typeorder[next_type] - things_to_call = False - for type, conversion in order: - if type not in dispatch_node: - # there is no possible completion of - # types_so_far+[type] that could lead to a - # registered function. - continue - match = dispatch_node[type] - if isinstance(match, dict): - if build_tree(types_so_far+(next_type,), match): - things_to_call = True - elif match: - things_to_call = True - return things_to_call - - self._revcache = {} - build_tree((), self.multimethod.dispatch_tree) - return tuple(typesprefix) in self._revcache - - -class Call(object): - """ Represents a call expression. - The arguments may themselves be Call objects. - """ - def __init__(self, function, arguments): - self.function = function - self.arguments = arguments - - -class CompressedArray(object): - def __init__(self, null_value): - self.null_value = null_value - self.items = [null_value] - - def ensure_length(self, newlen): - if newlen > len(self.items): - self.items.extend([self.null_value] * (newlen - len(self.items))) - - def insert_subarray(self, array): - # insert the given array of numbers into the indexlist, - # allowing null values to become non-null - if array.count(self.null_value) == len(array): - return 0 - test = 1 - while True: - self.ensure_length(test+len(array)) - for i in xrange(len(array)): - if not (array[i] == self.items[test+i] or - array[i] == self.null_value or - self.items[test+i] == self.null_value): - break - else: - # success - for i in range(len(array)): - if array[i] != self.null_value: - self.items[test+i] = array[i] - return test - test += 1 - - def _freeze_(self): - return True - - -class MRDTable(object): - # Multi-Method Dispatch Using Multiple Row Displacement, - # Candy Pang, Wade Holst, Yuri Leontiev, and Duane Szafron - # University of Alberta, Edmonton AB T6G 2H1 Canada - # can be found on http://web.cs.ualberta.ca/~yuri/publ.htm - - Counter = 0 - - def __init__(self, list_of_types): - self.id = MRDTable.Counter - MRDTable.Counter += 1 - self.list_of_types = list_of_types - self.typenum = dict(zip(list_of_types, range(len(list_of_types)))) - self.attrname = '__mrd%d_typenum' % self.id - for t1, num in self.typenum.items(): - setattr(t1, self.attrname, num) - self.indexarray = CompressedArray(0) - - def get_typenum(self, cls): - return self.typenum[cls] - - def is_anti_range(self, typenums): - # NB. typenums should be sorted. Returns (a, b) if typenums contains - # at least half of all typenums and its complement is range(a, b). - # Returns (None, None) otherwise. Returns (0, 0) if typenums contains - # everything. - n = len(self.list_of_types) - if len(typenums) <= n // 2: - return (None, None) - typenums = dict.fromkeys(typenums) - complement = [typenum for typenum in range(n) - if typenum not in typenums] - if not complement: - return (0, 0) - a = min(complement) - b = max(complement) + 1 - if complement == range(a, b): - return (a, b) - else: - return (None, None) - - def normalize_length(self, next_array): - # make sure that the indexarray is not smaller than any funcarray - self.indexarray.ensure_length(len(next_array.items)) - - -def invent_name(miniglobals, obj): - if isinstance(obj, str): - return obj - name = obj.__name__ - n = 1 - while name in miniglobals: - n += 1 - name = '%s%d' % (obj.__name__, n) - miniglobals[name] = obj - return name - - -class FuncEntry(object): - - def __init__(self, bodylines, miniglobals, fallback): - self.body = '\n '.join(bodylines) - self.miniglobals = miniglobals - self.fallback = fallback - self.possiblenames = [] - self.typetree = {} - self._function = None - - def key(self): - lst = self.miniglobals.items() - lst.sort() - return self.body, tuple(lst) - - def get_function_name(self): - # pick a name consistently based on self.possiblenames - length = min([len(parts) for parts in self.possiblenames]) - result = [] - for i in range(length): - choices = {} - for parts in self.possiblenames: - choices[parts[i]] = True - parts = choices.keys() - res = str(len(parts)) - for part in parts: - if type(part) is str: # there is a string at this pos - if '0_fail' in choices: - res = '0_fail' - elif len(parts) == 1: - res = part - break - else: - # only types at this location, try to find a common base - basecls = parts[0] - for cls in parts[1:]: - if issubclass(basecls, cls): - basecls = cls - for cls in parts[1:]: - if not issubclass(cls, basecls): - break # no common base - else: - res = basecls.__name__ - result.append(res) - return '_'.join(result) - - def make_function(self, fnargs, nbargs_before, mrdtable): - if self._function is not None: - return self._function - name = self.get_function_name() - self.compress_typechecks(mrdtable) - checklines = self.generate_typechecks(mrdtable, fnargs[nbargs_before:]) - if not checklines: - body = self.body - else: - checklines.append(self.body) - body = '\n '.join(checklines) - source = 'def %s(%s):\n %s\n' % (name, ', '.join(fnargs), body) - self.debug_dump(source) - exec compile2(source) in self.miniglobals - self._function = self.miniglobals[name] - return self._function - - def debug_dump(self, source): - if 0: # for debugging the generated mm sources - name = self.get_function_name() - f = open('/tmp/mm-source/%s' % name, 'a') - for possiblename in self.possiblenames: - print >> f, '#', - for part in possiblename: - print >> f, getattr(part, '__name__', part), - print >> f - print >> f - print >> f, source - f.close() - - def register_valid_types(self, types): - node = self.typetree - for t1 in types[:-1]: - if node is True: - return - node = node.setdefault(t1, {}) - if node is True: - return - node[types[-1]] = True - - def no_typecheck(self): - self.typetree = True - - def compress_typechecks(self, mrdtable): - def full(node): - if node is True: - return 1 - fulls = 0 - for key, subnode in node.items(): - if full(subnode): - node[key] = True - fulls += 1 - if fulls == types_total: - return 1 - return 0 - - types_total = len(mrdtable.list_of_types) - if full(self.typetree): - self.typetree = True - - def generate_typechecks(self, mrdtable, args): - attrname = mrdtable.attrname - possibletypes = [{} for _ in args] - any_type_is_ok = [False for _ in args] - - def generate(node, level=0): - # this generates type-checking code like the following: - # - # _argtypenum = arg1.__typenum - # if _argtypenum == 5: - # ... - # elif _argtypenum == 6 or _argtypenum == 8: - # ... - # else: - # _failedtoimplement = True - # - # or, in the common particular case of an "anti-range", we optimize it to: - # - # _argtypenum = arg1.__typenum - # if _argtypenum < 5 or _argtypenum >= 10: - # ... - # else: - # _failedtoimplement = True - # - result = [] - indent = ' '*level - if node is True: - for i in range(level, len(args)): - any_type_is_ok[i] = True - result.append('%s_failedtoimplement = False' % (indent,)) - return result - if not node: - result.append('%s_failedtoimplement = True' % (indent,)) - return result - result.append('%s_argtypenum = %s.%s' % (indent, args[level], - attrname)) - cases = {} - for key, subnode in node.items(): - possibletypes[level][key] = True - casebody = tuple(generate(subnode, level+1)) - typenum = mrdtable.get_typenum(key) - cases.setdefault(casebody, []).append(typenum) - for casebody, typenums in cases.items(): - typenums.sort() - cases = [(typenums, casebody) - for (casebody, typenums) in cases.items()] - cases.sort() - if len(cases) == 1: - typenums, casebody = cases[0] - a, b = mrdtable.is_anti_range(typenums) - else: - a, b = None, None - keyword = 'if' - for typenums, casebody in cases: - if a is not None: - if b - a == 1: - condition = '_argtypenum != %d' % a - elif b == a: - condition = 'True' - else: - condition = '_argtypenum < %d or _argtypenum >= %d' % ( - a, b) - else: - conditions = ['_argtypenum == %d' % typenum - for typenum in typenums] - condition = ' or '.join(conditions) - result.append('%s%s %s:' % (indent, keyword, condition)) - result.extend(casebody) - keyword = 'elif' - result.append('%selse:' % (indent,)) - result.append('%s _failedtoimplement = True' % (indent,)) - return result - - result = [] - if self.typetree is not True: - result.extend(generate(self.typetree)) - result.append('if _failedtoimplement:') - result.append(' raise FailedToImplement') - for level in range(len(args)): - if not any_type_is_ok[level]: - cls = commonbase(possibletypes[level].keys()) - clsname = invent_name(self.miniglobals, cls) - result.append('assert isinstance(%s, %s)' % (args[level], - clsname)) - return result - - -def commonbase(classlist): - def baseclasses(cls): - result = set([cls]) - for base in cls.__bases__: - if '_mixin_' not in base.__dict__: - result |= baseclasses(base) - return result - - bag = baseclasses(classlist[0]) - for cls in classlist[1:]: - bag &= baseclasses(cls) - _, candidate = max([(len(cls.__mro__), cls) for cls in bag]) - for cls in bag: - assert issubclass(candidate, cls) - return candidate - - -class InstallerVersion2(object): - """NOT_RPYTHON""" - - instance_counter = 0 - mrdtables = {} - - def __init__(self, multimethod, prefix, list_of_typeorders, - baked_perform_call=True, base_typeorder=None): - #print 'InstallerVersion2:', prefix - self.__class__.instance_counter += 1 - self.multimethod = multimethod - self.prefix = prefix - self.list_of_typeorders = list_of_typeorders - self.baked_perform_call = baked_perform_call - self.mmfunccache = {} - args = ['arg%d' % i for i in range(multimethod.arity)] - self.fnargs = (multimethod.argnames_before + args + - multimethod.argnames_after) - - # compute the complete table - base_typeorder = base_typeorder or list_of_typeorders[0] - for typeorder in list_of_typeorders: - for t1 in typeorder: - assert t1 in base_typeorder - - lst = list(base_typeorder) - def clskey(cls): - return cls.__mro__[::-1] - lst.sort(lambda cls1, cls2: cmp(clskey(cls1), clskey(cls2))) - key = tuple(lst) - try: - self.mrdtable = self.mrdtables[key] - except KeyError: - self.mrdtable = self.mrdtables[key] = MRDTable(key) - - dispatcher = MMDispatcher(multimethod, list_of_typeorders) - self.table = {} - def buildtable(prefixtypes): - if len(prefixtypes) == multimethod.arity: - calllist = dispatcher.expressions(prefixtypes, - multimethod.argnames_before, - args, - multimethod.argnames_after) - if calllist: - self.table[prefixtypes] = calllist - elif dispatcher.anychance(prefixtypes): - typeorder = list_of_typeorders[len(prefixtypes)] - for t1 in typeorder: - buildtable(prefixtypes + (t1,)) - buildtable(()) - self.dispatcher = dispatcher - - def is_empty(self): - return len(self.table) == 0 - - def install(self): - nskip = len(self.multimethod.argnames_before) - null_entry = self.build_funcentry([self.prefix, '0_fail'], []) - null_entry.no_typecheck() - if self.is_empty(): - return self.answer(null_entry) - - entryarray = CompressedArray(null_entry) - indexarray = self.mrdtable.indexarray - lst = self.mrdtable.list_of_types - indexline = [] - - def compress(typesprefix, typesnum): - if len(typesprefix) == self.multimethod.arity: - calllist = self.table.get(typesprefix, []) - funcname = [self.prefix] - funcname.extend(typesprefix) - entry = self.build_funcentry(funcname, calllist) - entry.register_valid_types(typesprefix) - return entry - elif self.dispatcher.anychance(typesprefix): - flatline = [] - for num1, t1 in enumerate(lst): - item = compress(typesprefix + (t1,), typesnum + (num1,)) - flatline.append(item) - if len(typesprefix) == self.multimethod.arity - 1: - array = entryarray - else: - array = indexarray - return array.insert_subarray(flatline) - else: - return 0 - - master_index = compress((), ()) - - null_func = null_entry.make_function(self.fnargs, nskip, self.mrdtable) - funcarray = CompressedArray(null_func) - # round up the length to a power of 2 - N = 1 - while N < len(entryarray.items): - N *= 2 - funcarray.ensure_length(N) - for i, entry in enumerate(entryarray.items): - func = entry.make_function(self.fnargs, nskip, self.mrdtable) - funcarray.items[i] = func - self.mrdtable.normalize_length(funcarray) - - #print master_index - #print indexarray.items - #print funcarray.items - - attrname = self.mrdtable.attrname - exprfn = "%d" % master_index - for n in range(self.multimethod.arity-1): - exprfn = "indexarray.items[%s + arg%d.%s]" % (exprfn, n, attrname) - n = self.multimethod.arity-1 - exprfn = "funcarray.items[(%s + arg%d.%s) & mmmask]" % (exprfn, n, - attrname) - expr = Call(exprfn, self.fnargs) - entry = self.build_funcentry([self.prefix, '0_perform_call'], - [expr], - indexarray = indexarray, - funcarray = funcarray, - mmmask = N-1) - entry.no_typecheck() - return self.answer(entry) - - def answer(self, entry): - if self.baked_perform_call: - nskip = len(self.multimethod.argnames_before) - return entry.make_function(self.fnargs, nskip, self.mrdtable) - else: - assert entry.body.startswith('return ') - expr = entry.body[len('return '):] - entry.debug_dump(entry.body) - return self.fnargs, expr, entry.miniglobals, entry.fallback - - def build_funcentry(self, funcnameparts, calllist, **extranames): - def expr(v): - if isinstance(v, Call): - return '%s(%s)' % (invent_name(miniglobals, v.function), - ', '.join([expr(w) for w in v.arguments])) - else: - return v - - fallback = len(calllist) == 0 - if fallback: - miniglobals = {'raiseFailedToImplement': raiseFailedToImplement} - bodylines = ['return raiseFailedToImplement()'] - else: - miniglobals = {'FailedToImplement': FailedToImplement} - miniglobals.update(extranames) - bodylines = [] - for v in calllist[:-1]: - bodylines.append('try:') - bodylines.append(' return %s' % expr(v)) - bodylines.append('except FailedToImplement:') - bodylines.append(' pass') - bodylines.append('return %s' % expr(calllist[-1])) - - miniglobals['__name__'] = __name__ - entry = FuncEntry(bodylines, miniglobals, fallback) - key = entry.key() - - try: - entry = self.mmfunccache[key] - except KeyError: - self.mmfunccache[key] = entry - entry.possiblenames.append(funcnameparts) - return entry - -# ____________________________________________________________ -# Selection of the version to use - -Installer = InstallerVersion1 # modified by translate.py targetpypystandalone diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -52,8 +52,6 @@ self.UnicodeObjectCls = W_UnicodeObject - self._install_multimethods() - # singletons self.w_None = W_NoneObject.w_None self.w_False = W_BoolObject.w_False @@ -87,39 +85,6 @@ def get_builtin_types(self): return self.builtin_types - def _install_multimethods(self): - """Install all the MultiMethods into the space instance.""" - for name, mm in model.MM.__dict__.items(): - if not isinstance(mm, model.StdObjSpaceMultiMethod): - continue - if not hasattr(self, name): - # int_w, str_w...: these do not return a wrapped object - if name.endswith('_w'): - func = mm.install_not_sliced(self.model.typeorder, - baked_perform_call=True) - else: - unsliced = mm.install_not_sliced(self.model.typeorder, - baked_perform_call=False) - exprargs, expr, miniglobals, fallback = unsliced - func = stdtypedef.make_perform_trampoline('__mm_'+name, - exprargs, expr, - miniglobals, mm) - - boundmethod = types.MethodType(func, self, self.__class__) - setattr(self, name, boundmethod) # store into 'space' instance - elif self.config.objspace.std.builtinshortcut: - if name.startswith('inplace_'): - fallback_name = name[len('inplace_'):] - if fallback_name in ('or', 'and'): - fallback_name += '_' - fallback_mm = model.MM.__dict__[fallback_name] - else: - fallback_mm = None - builtinshortcut.install(self, mm, fallback_mm) - if self.config.objspace.std.builtinshortcut: - builtinshortcut.install_is_true(self, model.MM.nonzero, - model.MM.len) - def createexecutioncontext(self): # add space specific fields to execution context # note that this method must not call space methods that might need an diff --git a/pypy/objspace/std/stdtypedef.py b/pypy/objspace/std/stdtypedef.py --- a/pypy/objspace/std/stdtypedef.py +++ b/pypy/objspace/std/stdtypedef.py @@ -1,14 +1,9 @@ -from pypy.interpreter import gateway, baseobjspace, argument -from pypy.interpreter.error import OperationError, oefmt +from pypy.interpreter import baseobjspace from pypy.interpreter.typedef import TypeDef, GetSetProperty, Member from pypy.interpreter.typedef import descr_get_dict, descr_set_dict from pypy.interpreter.typedef import descr_del_dict from pypy.interpreter.baseobjspace import SpaceCache -from pypy.objspace.std import model -from pypy.objspace.std.model import StdObjSpaceMultiMethod -from pypy.objspace.std.multimethod import FailedToImplement from rpython.rlib import jit -from rpython.tool.sourcetools import compile2 __all__ = ['StdTypeDef'] @@ -19,11 +14,6 @@ "NOT_RPYTHON: initialization-time only." TypeDef.__init__(self, __name, __base, **rawdict) self.any = type("W_Any"+__name.title(), (baseobjspace.W_Root,), {'typedef': self}) - self.local_multimethods = [] - - def registermethods(self, namespace): - "NOT_RPYTHON: initialization-time only." - self.local_multimethods += hack_out_multimethods(namespace) @jit.unroll_safe def issubtypedef(a, b): @@ -61,17 +51,6 @@ rawdict = typedef.rawdict lazyloaders = {} - if isinstance(typedef, StdTypeDef): - # get all the sliced multimethods - multimethods = slicemultimethods(space, typedef) - for name, loader in multimethods.items(): - if name in rawdict: - # the name specified in the rawdict has priority - continue - assert name not in lazyloaders, ( - 'name clash: %s in %s.lazyloaders' % (name, typedef.name)) - lazyloaders[name] = loader - # compute the bases if typedef is W_ObjectObject.typedef: bases_w = [] @@ -97,190 +76,3 @@ def ready(self, w_type): w_type.ready() - -def hack_out_multimethods(ns): - "NOT_RPYTHON: initialization-time only." - result = [] - seen = {} - for value in ns.itervalues(): - if isinstance(value, StdObjSpaceMultiMethod): - if value.name in seen: - raise Exception("duplicate multimethod name %r" % - (value.name,)) - seen[value.name] = True - result.append(value) - return result - -def is_relevant_for_slice(target_type, typedef): - targettypedef = getattr(target_type, 'typedef', None) - if targettypedef == typedef: - return True - method = getattr(target_type, "is_implementation_for", lambda t: False) - return method(typedef) - -def sliced_typeorders(typeorder, multimethod, typedef, i, local=False): - """NOT_RPYTHON""" - list_of_typeorders = [typeorder] * multimethod.arity - prefix = '_mm_' + multimethod.name - if not local: - # slice - sliced_typeorder = {} - for type, order in typeorder.items(): - thistypedef = getattr(type, 'typedef', None) - if issubtypedef(thistypedef, typedef): - lst = [] - for target_type, conversion in order: - if is_relevant_for_slice(target_type, typedef): - lst.append((target_type, conversion)) - sliced_typeorder[type] = lst - list_of_typeorders[i] = sliced_typeorder - prefix += '_%sS%d' % (typedef.name, i) - else: - prefix = typedef.name +'_mth'+prefix - return prefix, list_of_typeorders - -def _gettypeerrormsg(nbargs): - if nbargs > 1: - plural = 's' - else: - plural = '' - return "unsupported operand type%s for %%s: %s" % ( - plural, ', '.join(["'%T'"] * nbargs)) -_gettypeerrormsg._annspecialcase_ = 'specialize:memo' - -def gettypeerror(space, operatorsymbol, *args_w): - return oefmt(space.w_TypeError, _gettypeerrormsg(len(args_w)), - operatorsymbol, *args_w) - -def make_perform_trampoline(prefix, exprargs, expr, miniglobals, multimethod, selfindex=0, - allow_NotImplemented_results=False): - """NOT_RPYTHON""" - # mess to figure out how to put a gateway around executing expr - argnames = ['_%d'%(i+1) for i in range(multimethod.arity)] - explicit_argnames = multimethod.extras.get('argnames', []) - argnames[len(argnames)-len(explicit_argnames):] = explicit_argnames - solid_arglist = ['w_'+name for name in argnames] - wrapper_arglist = solid_arglist[:] - if multimethod.extras.get('varargs_w', False): - wrapper_arglist.append('args_w') - if multimethod.extras.get('keywords', False): - raise Exception, "no longer supported, use __args__" - if multimethod.extras.get('general__args__', False): - wrapper_arglist.append('__args__') - wrapper_arglist += multimethod.extras.get('extra_args', ()) - - miniglobals.update({ 'OperationError': OperationError, - 'gettypeerror': gettypeerror}) - - app_defaults = multimethod.extras.get('defaults', ()) - i = len(argnames) - len(app_defaults) - wrapper_signature = wrapper_arglist[:] - unwrap_spec_kwds = {} - for app_default in app_defaults: - name = wrapper_signature[i] - unwrap_spec_kwds[name] = gateway.WrappedDefault(app_default) - i += 1 - - wrapper_signature.insert(0, wrapper_signature.pop(selfindex)) - wrapper_sig = ', '.join(wrapper_signature) - - src = [] - dest = [] - for wrapper_arg,expr_arg in zip(['space']+wrapper_arglist, exprargs): - if wrapper_arg != expr_arg: - src.append(wrapper_arg) - dest.append(expr_arg) - renaming = ', '.join(dest) +" = "+', '.join(src) - - if allow_NotImplemented_results and (len(multimethod.specialnames) > 1 or - multimethod.name.startswith('inplace_')): - # turn FailedToImplement into NotImplemented - code = """def %s_perform_call(space, %s): - %s - try: - return %s - except FailedToImplement, e: - if e.get_w_type(space) is not None: - raise OperationError(e.w_type, e.get_w_value(space)) - else: - return space.w_NotImplemented -""" % (prefix, wrapper_sig, renaming, expr) - else: - # turn FailedToImplement into nice TypeErrors - code = """def %s_perform_call(space, %s): - %s - try: - w_res = %s - except FailedToImplement, e: - if e.get_w_type(space) is not None: - raise OperationError(e.w_type, e.get_w_value(space)) - else: - raise gettypeerror(space, %r, %s) - if w_res is None: - w_res = space.w_None - return w_res -""" % (prefix, wrapper_sig, renaming, expr, - multimethod.operatorsymbol, ', '.join(solid_arglist)) - exec compile2(code, '', 'exec') in miniglobals - func = miniglobals["%s_perform_call" % prefix] - if unwrap_spec_kwds: - func = gateway.unwrap_spec(**unwrap_spec_kwds)(func) - return func - -def wrap_trampoline_in_gateway(func, methname, multimethod): - """NOT_RPYTHON""" - if 'doc' in multimethod.extras: - func.__doc__ = multimethod.extras['doc'] - return gateway.interp2app(func, app_name=methname) - -def slicemultimethod(space, multimethod, typedef, result, local=False): - """NOT_RPYTHON""" - for i in range(len(multimethod.specialnames)): - methname = multimethod.specialnames[i] - if methname in result: - # conflict between e.g. __lt__ and - # __lt__-as-reversed-version-of-__gt__ - loader = result[methname] - if loader.bound_position < i: - continue - - def multimethod_loader(i=i, methname=methname): - """NOT_RPYTHON""" - prefix, list_of_typeorders = sliced_typeorders( - space.model.typeorder, multimethod, typedef, i, local=local) - exprargs, expr, miniglobals, fallback = multimethod.install(prefix, list_of_typeorders, - baked_perform_call=False, - base_typeorder=space.model.typeorder) - if fallback: - return None # skip empty multimethods - trampoline = make_perform_trampoline(prefix, exprargs, expr, miniglobals, - multimethod, i, - allow_NotImplemented_results=True) - gw = wrap_trampoline_in_gateway(trampoline, methname, multimethod) - return space.wrap(gw) - - multimethod_loader.bound_position = i # for the check above - result[methname] = multimethod_loader - -def slicemultimethods(space, typedef): - """NOT_RPYTHON""" - result = {} - # import and slice all multimethods of the MM container - for multimethod in hack_out_multimethods(model.MM.__dict__): - slicemultimethod(space, multimethod, typedef, result) - # import all multimethods defined directly on the type without slicing - for multimethod in typedef.local_multimethods: - slicemultimethod(space, multimethod, typedef, result, local=True) - return result - -def multimethods_defined_on(cls): - """NOT_RPYTHON: enumerate the (multimethod, local_flag) for all the - multimethods that have an implementation whose first typed argument - is 'cls'. - """ - typedef = cls.typedef - for multimethod in hack_out_multimethods(model.MM.__dict__): - if cls in multimethod.dispatch_tree: - yield multimethod, False - for multimethod in typedef.local_multimethods: - yield multimethod, True diff --git a/pypy/objspace/std/test/test_annmm.py b/pypy/objspace/std/test/test_annmm.py deleted file mode 100644 --- a/pypy/objspace/std/test/test_annmm.py +++ /dev/null @@ -1,56 +0,0 @@ -from pypy.objspace.std.multimethod import * -from rpython.annotator.annrpython import RPythonAnnotator - -class W_Root(object): - pass - -class W_Int(W_Root): - pass - -class W_Str(W_Root): - pass - - -str_w = MultiMethodTable(1, root_class=W_Root, argnames_before=['space']) -int_w = MultiMethodTable(1, root_class=W_Root, argnames_before=['space']) - - -def int_w__Int(space, w_x): - assert space == 'space' - assert isinstance(w_x, W_Int) - return 1 - -def str_w__Str(space, w_x): - assert space == 'space' - assert isinstance(w_x, W_Str) - return "string" - -int_w.register(int_w__Int, W_Int) -str_w.register(str_w__Str, W_Str) - - -def setup_module(mod): - typeorder = { - W_Int: [(W_Int, None)], - W_Str: [(W_Str, None)], - } - mod.typeorder = typeorder - mod.str_w1 = str_w.install('__str_w', [typeorder]) - mod.int_w1 = int_w.install('__int_w', [typeorder]) - - -def test_str_w_ann(): - a = RPythonAnnotator() - s1 = a.build_types(str_w1,[str, W_Str]) - s2 = a.build_types(str_w1,[str, W_Root]) - assert s1.knowntype == str - assert s2.knowntype == str - -def test_int_w_ann(): - a = RPythonAnnotator() - s1 = a.build_types(int_w1,[str, W_Int]) - s2 = a.build_types(int_w1,[str, W_Str]) - assert s1.knowntype == int - assert s2.knowntype == int - - diff --git a/pypy/objspace/std/test/test_builtinshortcut.py b/pypy/objspace/std/test/test_builtinshortcut.py --- a/pypy/objspace/std/test/test_builtinshortcut.py +++ b/pypy/objspace/std/test/test_builtinshortcut.py @@ -7,9 +7,6 @@ class AppTestUserObject(test_userobject.AppTestUserObject): spaceconfig = WITH_BUILTINSHORTCUT -class AppTestWithMultiMethodVersion2(test_userobject.AppTestWithMultiMethodVersion2): - spaceconfig = WITH_BUILTINSHORTCUT - class AppTestBug: spaceconfig = WITH_BUILTINSHORTCUT diff --git a/pypy/objspace/std/test/test_complexobject.py b/pypy/objspace/std/test/test_complexobject.py --- a/pypy/objspace/std/test/test_complexobject.py +++ b/pypy/objspace/std/test/test_complexobject.py @@ -1,6 +1,5 @@ import py from pypy.objspace.std.complexobject import W_ComplexObject, _split_complex -from pypy.objspace.std.multimethod import FailedToImplement from pypy.objspace.std import StdObjSpace EPS = 1e-9 diff --git a/pypy/objspace/std/test/test_intobject.py b/pypy/objspace/std/test/test_intobject.py --- a/pypy/objspace/std/test/test_intobject.py +++ b/pypy/objspace/std/test/test_intobject.py @@ -2,7 +2,6 @@ import py import sys from pypy.objspace.std import intobject as iobj -from pypy.objspace.std.multimethod import FailedToImplement from rpython.rlib.rarithmetic import r_uint, is_valid_int from rpython.rlib.rbigint import rbigint diff --git a/pypy/objspace/std/test/test_multimethod.py b/pypy/objspace/std/test/test_multimethod.py deleted file mode 100644 --- a/pypy/objspace/std/test/test_multimethod.py +++ /dev/null @@ -1,264 +0,0 @@ -from py.test import raises - -from pypy.objspace.std import multimethod -from pypy.objspace.std.multimethod import FailedToImplement -from pypy.objspace.std.multimethod import FailedToImplementArgs - - -class W_Root(object): - pass - -class W_IntObject(W_Root): - pass - -class W_BoolObject(W_Root): - pass - -class W_StringObject(W_Root): - pass - -def delegate_b2i(space, w_x): - assert isinstance(w_x, W_BoolObject) - return W_IntObject() - -def add__Int_Int(space, w_x, w_y): - assert space == 'space' - assert isinstance(w_x, W_IntObject) - assert isinstance(w_y, W_IntObject) - return 'fine' - - -def test_failedtoimplement(): - f = FailedToImplement() - assert f.get_w_type("space") is None - assert f.get_w_value("space") is None - f = FailedToImplementArgs("ab", "cd") - assert f.get_w_type("space") == "ab" - assert f.get_w_value("space") == "cd" - # for testing it's good to get the following behavior: - raises(AssertionError, FailedToImplement, "ab", "cd") - # but the class FailedToImplement should have no __init__ for translation: - assert '__init__' not in FailedToImplement.__dict__ - - -class TestMultiMethod1: - Installer = multimethod.InstallerVersion1 - - def setup_class(cls): - cls.prev_installer = multimethod.Installer - multimethod.Installer = cls.Installer - add = multimethod.MultiMethodTable(2, root_class=W_Root, - argnames_before=['space']) - add.register(add__Int_Int, W_IntObject, W_IntObject) - typeorder = { - W_IntObject: [(W_IntObject, None), (W_Root, None)], - W_BoolObject: [(W_BoolObject, None), (W_IntObject, delegate_b2i), - (W_Root, None)], - W_StringObject: [(W_StringObject, None), (W_Root, None)], - } - cls.typeorder = typeorder - cls.add = add - cls.add1 = staticmethod(add.install('__add', [typeorder, typeorder])) - - def teardown_class(cls): - multimethod.Installer = cls.prev_installer - - def test_simple(self): - space = 'space' - w_x = W_IntObject() - w_y = W_IntObject() - assert self.add1(space, w_x, w_y) == 'fine' - - def test_failtoimplement(self): - space = 'space' - w_x = W_IntObject() - w_s = W_StringObject() - raises(FailedToImplement, "self.add1(space, w_x, w_s)") - raises(FailedToImplement, "self.add1(space, w_s, w_x)") - - def test_delegate(self): - space = 'space' - w_x = W_IntObject() - w_s = W_StringObject() - w_b = W_BoolObject() - assert self.add1(space, w_x, w_b) == 'fine' - assert self.add1(space, w_b, w_x) == 'fine' - assert self.add1(space, w_b, w_b) == 'fine' - raises(FailedToImplement, "self.add1(space, w_b, w_s)") - raises(FailedToImplement, "self.add1(space, w_s, w_b)") - - def test_not_baked(self): - typeorder = self.typeorder - add2 = self.add.install('__add2', [typeorder, typeorder], - baked_perform_call=False) - assert add2[0] == ['space', 'arg0', 'arg1'] - if multimethod.Installer is multimethod.InstallerVersion1: - assert add2[1] == 'arg0.__add2(space, arg1)' - assert isinstance(add2[2], dict) - assert not add2[3] - - def test_empty(self): - add3_installer = multimethod.Installer(self.add, '__add3', [{},{}]) - assert add3_installer.is_empty() - if multimethod.Installer is multimethod.InstallerVersion1: - assert len(add3_installer.to_install) == 1 - assert add3_installer.to_install[0][0] is None - - def test_empty_direct(self): - assert not self.add.install_if_not_empty('__add4', [{},{}]) - - def test_empty_not_baked(self): - add5_installer = multimethod.Installer(self.add, '__add5', [{},{}], - baked_perform_call=False) - assert add5_installer.is_empty() - if multimethod.Installer is multimethod.InstallerVersion1: - assert len(add5_installer.to_install) == 0 - add5 = add5_installer.install() - assert add5[0] == ['space', 'arg0', 'arg1'] - assert add5[1] == 'raiseFailedToImplement()' - assert isinstance(add5[2], dict) - assert add5[3] - - def test_mmdispatcher(self): - typeorder = self.typeorder - add2 = multimethod.MMDispatcher(self.add, [typeorder, typeorder]) - space = 'space' - w_x = W_IntObject() - w_s = W_StringObject() - w_b1 = W_BoolObject() - w_b2 = W_BoolObject() - assert add2(space, w_x, w_b1) == 'fine' - assert add2(space, w_b2, w_x) == 'fine' - assert add2(space, w_b1, w_b2) == 'fine' - raises(FailedToImplement, "add2(space, w_b2, w_s)") - raises(FailedToImplement, "add2(space, w_s, w_b1)") - - def test_forbidden_subclasses(self): - mul = multimethod.MultiMethodTable(2, root_class=W_Root, - argnames_before=['space']) - class UserW_StringObject(W_StringObject): - pass - def mul__Int_String(space, w_x, w_y): - assert space == 'space' - assert isinstance(w_x, W_IntObject) - assert isinstance(w_y, W_StringObject) - return 'fine' - mul.register(mul__Int_String, W_IntObject, W_StringObject) - - mul1 = mul.install('__mul1', [self.typeorder, self.typeorder]) - assert mul1('space', W_IntObject(), W_StringObject()) == 'fine' - assert mul1('space', W_IntObject(), UserW_StringObject()) == 'fine' - - ext_typeorder = self.typeorder.copy() - ext_typeorder[UserW_StringObject] = [] - mul2 = mul.install('__mul2', [ext_typeorder, ext_typeorder]) - assert mul2('space', W_IntObject(), W_StringObject()) == 'fine' - raises(FailedToImplement, - mul2, 'baz', W_IntObject(), UserW_StringObject()) - - def test_more_forbidden_subclasses(self): - mul = multimethod.MultiMethodTable(2, root_class=W_Root, - argnames_before=['space']) - class UserW_StringObject(W_StringObject): - pass - def mul__String_String(space, w_x, w_y): - assert space == 'space' - assert isinstance(w_x, W_StringObject) - assert isinstance(w_y, W_StringObject) - return 'fine' - mul.register(mul__String_String, W_StringObject, W_StringObject) - - ext_typeorder = {W_StringObject: [(W_StringObject, None)], - UserW_StringObject: []} - mul2 = mul.install('__mul2', [ext_typeorder, ext_typeorder]) - assert mul2('space', W_StringObject(), W_StringObject()) == 'fine' - raises(FailedToImplement, - mul2, 'baz', W_StringObject(), UserW_StringObject()) - raises(FailedToImplement, - mul2, 'baz', UserW_StringObject(), W_StringObject()) - raises(FailedToImplement, - mul2, 'baz', UserW_StringObject(), UserW_StringObject()) - - def test_ANY(self): - setattr = multimethod.MultiMethodTable(3, root_class=W_Root, - argnames_before=['space']) - def setattr__Int_ANY_ANY(space, w_x, w_y, w_z): - assert space == 'space' - assert isinstance(w_x, W_IntObject) - assert isinstance(w_y, W_Root) - assert isinstance(w_z, W_Root) - return w_y.__class__.__name__ + w_z.__class__.__name__ - setattr.register(setattr__Int_ANY_ANY, W_IntObject, W_Root, W_Root) - setattr1 = setattr.install('__setattr1', [self.typeorder]*3) - for cls1 in self.typeorder: - for cls2 in self.typeorder: - assert setattr1('space', W_IntObject(), cls1(), cls2()) == ( - cls1.__name__ + cls2.__name__) - - def test_all_cases(self): - import random - space = 'space' - w_x = W_IntObject() - w_x.expected = [W_IntObject, W_Root] - w_s = W_StringObject() - w_s.expected = [W_StringObject, W_Root] - w_b = W_BoolObject() - w_b.expected = [W_BoolObject, W_IntObject, W_Root] - - def test(indices): - sub = multimethod.MultiMethodTable(2, root_class=W_Root, - argnames_before=['space']) - def addimpl(cls1, cls2): - token = random.random() - def sub__cls1_cls2(space, w_x, w_y): - assert space == 'space' - assert isinstance(w_x, cls1) - assert isinstance(w_y, cls2) - return token - sub.register(sub__cls1_cls2, cls1, cls2) - return token - - def check(w1, w2): - try: - res = sub1(space, w1, w2) - except FailedToImplement: - res = FailedToImplement - for cls1 in w1.expected: - for cls2 in w2.expected: - if (cls1, cls2) in expected: - assert res == expected[cls1, cls2] - return - else: - assert res is FailedToImplement - - random.shuffle(indices) - expected = {} - for index in indices: - cls1, cls2 = choices[index] - token = addimpl(cls1, cls2) - expected[cls1, cls2] = token - - typeorder = self.typeorder - sub1 = sub.install('__sub', [typeorder, typeorder]) - for w1 in [w_x, w_s, w_b]: - for w2 in [w_x, w_s, w_b]: - check(w1, w2) - - classes = [W_Root, W_StringObject, W_IntObject, W_BoolObject] - choices = [(cls1, cls2) for cls1 in classes - for cls2 in classes] - # each choice is a pair of classes which can be implemented or - # not by the multimethod 'sub'. Test all combinations that - # involve at most three implemented choices. - for i in range(len(choices)): - test([i]) - for j in range(i+1, len(choices)): - test([i, j]) - for k in range(j+1, len(choices)): - test([i, j, k]) - #for l in range(k+1, len(choices)): -- for a 4th choice - # test([i, j, k, l]) -- (takes a while) - - -class TestMultiMethod2(TestMultiMethod1): - Installer = multimethod.InstallerVersion2 diff --git a/pypy/objspace/std/test/test_userobject.py b/pypy/objspace/std/test/test_userobject.py --- a/pypy/objspace/std/test/test_userobject.py +++ b/pypy/objspace/std/test/test_userobject.py @@ -273,22 +273,6 @@ i += 1 -class AppTestWithMultiMethodVersion2(AppTestUserObject): - spaceconfig = {} - - def setup_class(cls): - from pypy.objspace.std import multimethod - - cls.prev_installer = multimethod.Installer - multimethod.Installer = multimethod.InstallerVersion2 - if cls.runappdirect: - py.test.skip("Cannot run different installers when runappdirect") - - def teardown_class(cls): - from pypy.objspace.std import multimethod - multimethod.Installer = cls.prev_installer - - class AppTestWithGetAttributeShortcut(AppTestUserObject): spaceconfig = {"objspace.std.getattributeshortcut": True} From noreply at buildbot.pypy.org Tue Feb 25 06:00:28 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Tue, 25 Feb 2014 06:00:28 +0100 (CET) Subject: [pypy-commit] pypy kill-multimethod: I *think* it's safe to remove builtinshortcut because it seems that it only speeds up multimethods. Message-ID: <20140225050028.CCE301C3599@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: kill-multimethod Changeset: r69401:fa0cc2a86b87 Date: 2014-02-25 05:59 +0100 http://bitbucket.org/pypy/pypy/changeset/fa0cc2a86b87/ Log: I *think* it's safe to remove builtinshortcut because it seems that it only speeds up multimethods. diff --git a/pypy/objspace/std/builtinshortcut.py b/pypy/objspace/std/builtinshortcut.py deleted file mode 100644 --- a/pypy/objspace/std/builtinshortcut.py +++ /dev/null @@ -1,136 +0,0 @@ -from pypy.interpreter.baseobjspace import ObjSpace -from pypy.interpreter.error import OperationError -from pypy.objspace.descroperation import DescrOperation -from pypy.objspace.std.boolobject import W_BoolObject -from rpython.tool.sourcetools import func_with_new_name - -# ____________________________________________________________ -# -# The sole purpose of this file is performance. -# It speeds up the dispatch of operations between -# built-in objects. -# - -# this is a selection... a few operations are missing because they are -# thought to be very rare or most commonly used with non-builtin types -METHODS_WITH_SHORTCUT = dict.fromkeys( - ['add', 'sub', 'mul', 'truediv', 'floordiv', 'div', - 'mod', 'lshift', 'rshift', 'and_', 'or_', 'xor', 'pow', - 'lt', 'le', 'eq', 'ne', 'gt', 'ge', 'contains', - # unary - 'len', 'nonzero', 'repr', 'str', 'hash', - 'neg', 'invert', 'index', 'iter', 'next', 'buffer', - 'getitem', 'setitem', 'int', - # in-place - 'inplace_add', 'inplace_sub', 'inplace_mul', 'inplace_truediv', - 'inplace_floordiv', 'inplace_div', 'inplace_mod', 'inplace_pow', - 'inplace_lshift', 'inplace_rshift', 'inplace_and', 'inplace_or', - 'inplace_xor', - # other - 'format', - ]) - -KNOWN_MISSING = ['getattr', # mostly non-builtins or optimized by CALL_METHOD - 'setattr', 'delattr', 'userdel', # mostly for non-builtins - 'get', 'set', 'delete', # uncommon (except on functions) - 'getslice', 'setslice', 'delslice', # see below - 'delitem', 'trunc', # rare stuff? - 'abs', 'hex', 'oct', # rare stuff? - 'pos', 'divmod', 'cmp', # rare stuff? - 'float', 'long', 'coerce', # rare stuff? - 'isinstance', 'issubtype', - ] -# We cannot support {get,set,del}slice right now because -# DescrOperation.{get,set,del}slice do a bit more work than just call -# the special methods: they call old_slice_range(). See e.g. -# test_builtinshortcut.AppTestString. - -for _name, _, _, _specialmethods in ObjSpace.MethodTable: - if _specialmethods: - assert _name in METHODS_WITH_SHORTCUT or _name in KNOWN_MISSING, ( - "operation %r should be in METHODS_WITH_SHORTCUT or KNOWN_MISSING" - % (_name,)) - - -def filter_out_conversions(typeorder): - res = {} - for cls, order in typeorder.iteritems(): - res[cls] = [(target_type, converter) for (target_type, converter) in - order if converter is None] - return res - - -def install(space, mm, fallback_mm=None): - """Install a function () on the space instance which invokes - a shortcut for built-in types. Returns the shortcutting multimethod - object or None. - """ - name = mm.name - if name not in METHODS_WITH_SHORTCUT: - return None - - # can be called multiple times without re-installing - if name in space.__dict__: - mm1, shortcut_method = space.__dict__[name].builtinshortcut - assert mm1 is mm - return shortcut_method - - #print 'installing shortcut for:', name - assert hasattr(DescrOperation, name) - - base_method = getattr(space.__class__, name) - - # Basic idea: we first try to dispatch the operation using purely - # the multimethod. If this is done naively, subclassing a built-in - # type like 'int' and overriding a special method like '__add__' - # doesn't work any more, because the multimethod will accept the int - # subclass and compute the result in the built-in way. To avoid - # this issue, we tweak the shortcut multimethods so that these ones - # (and only these ones) never match the interp-level subclasses - # built in pypy.interpreter.typedef.get_unique_interplevel_subclass. - expanded_order = space.model.get_typeorder_with_empty_usersubcls() - if fallback_mm: - mm = mm.merge_with(fallback_mm) - shortcut_method = mm.install_not_sliced(filter_out_conversions(expanded_order)) - - def operate(*args_w): - try: - return shortcut_method(space, *args_w) - except FailedToImplement: - pass - return base_method(space, *args_w) - - operate = func_with_new_name(operate, name) - operate.builtinshortcut = (mm, shortcut_method) - setattr(space, name, operate) - return shortcut_method - - -def install_is_true(space, mm_nonzero, mm_len): - shortcut = install(space, mm_nonzero, fallback_mm = mm_len) - assert 'is_true' not in space.__dict__ - - def is_true(w_obj): - # a bit of duplication of the logic from DescrOperation.is_true... - try: - w_res = shortcut(space, w_obj) - except FailedToImplement: - pass - else: - # the __nonzero__ method of built-in objects should - # always directly return a Bool; however, the __len__ method - # of built-in objects typically returns an unwrappable integer - if isinstance(w_res, W_BoolObject): - return bool(w_res.intval) - try: - return space.int_w(w_res) != 0 - except OperationError: - # I think no OperationError other than w_OverflowError - # could occur here - w_obj = w_res - - # general case fallback - return _DescrOperation_is_true(space, w_obj) - - _DescrOperation_is_true = DescrOperation.is_true.im_func - space.is_true = is_true diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -4,7 +4,7 @@ from pypy.interpreter.baseobjspace import ObjSpace, W_Root from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.typedef import get_unique_interplevel_subclass -from pypy.objspace.std import (builtinshortcut, stdtypedef, frame, model, +from pypy.objspace.std import (stdtypedef, frame, model, transparent, callmethod) from pypy.objspace.descroperation import DescrOperation, raiseattrerror from rpython.rlib.objectmodel import instantiate, specialize, is_annotation_constant @@ -485,7 +485,6 @@ def is_true(self, w_obj): # a shortcut for performance - # NOTE! this method is typically overridden by builtinshortcut.py. if type(w_obj) is W_BoolObject: return bool(w_obj.intval) return self._DescrOperation_is_true(w_obj) diff --git a/pypy/objspace/std/test/test_builtinshortcut.py b/pypy/objspace/std/test/test_builtinshortcut.py deleted file mode 100644 --- a/pypy/objspace/std/test/test_builtinshortcut.py +++ /dev/null @@ -1,98 +0,0 @@ -from pypy.objspace.std.test import test_userobject -from pypy.objspace.std.test import test_setobject -from pypy.objspace.std.test import test_bytesobject - -WITH_BUILTINSHORTCUT = {'objspace.std.builtinshortcut': True} - -class AppTestUserObject(test_userobject.AppTestUserObject): - spaceconfig = WITH_BUILTINSHORTCUT - -class AppTestBug: - spaceconfig = WITH_BUILTINSHORTCUT - - def test_frozen_subtype(self): - class S(set): pass - assert set("abc") == S("abc") - assert S("abc") == set("abc") - class F(frozenset): pass - assert frozenset("abc") == F("abc") - assert F("abc") == frozenset("abc") - - assert S("abc") in set([frozenset("abc")]) - assert F("abc") in set([frozenset("abc")]) - - s = set([frozenset("abc")]) - s.discard(S("abc")) - assert not s - - s = set([frozenset("abc")]) - s.discard(F("abc")) - assert not s - - def test_inplace_methods(self): - assert '__iadd__' not in int.__dict__ - assert '__iadd__' not in float.__dict__ - x = 5 - x += 6.5 - assert x == 11.5 - - def test_inplace_user_subclasses(self): - class I(int): pass - class F(float): pass - x = I(5) - x += F(6.5) - assert x == 11.5 - assert type(x) is float - - def test_inplace_override(self): - class I(int): - def __iadd__(self, other): - return 'foo' - x = I(5) - x += 6 - assert x == 'foo' - x = I(5) - x += 6.5 - assert x == 'foo' - assert 5 + 6.5 == 11.5 - - def test_unicode_string_compares(self): - assert u'a' == 'a' - assert 'a' == u'a' - assert not u'a' == 'b' - assert not 'a' == u'b' - assert u'a' != 'b' - assert 'a' != u'b' - assert not (u'a' == 5) - assert u'a' != 5 - assert u'a' < 5 or u'a' > 5 - - s = chr(128) - u = unichr(128) - assert not s == u # UnicodeWarning - assert s != u - assert not u == s - assert u != s - - -class AppTestSet(test_setobject.AppTestAppSetTest): - spaceconfig = WITH_BUILTINSHORTCUT - # this tests tons of funny comparison combinations that can easily go wrong - def setup_class(cls): - w_fakeint = cls.space.appexec([], """(): - class FakeInt(object): - def __init__(self, value): - self.value = value - def __hash__(self): - return hash(self.value) - - def __eq__(self, other): - if other == self.value: - return True - return False - return FakeInt - """) - cls.w_FakeInt = w_fakeint - -class AppTestString(test_bytesobject.AppTestBytesObject): - spaceconfig = WITH_BUILTINSHORTCUT diff --git a/pypy/objspace/test/test_descroperation.py b/pypy/objspace/test/test_descroperation.py --- a/pypy/objspace/test/test_descroperation.py +++ b/pypy/objspace/test/test_descroperation.py @@ -721,8 +721,3 @@ return CannotConvertToBool() x = X() raises(MyError, "'foo' in x") - - - -class AppTestWithBuiltinShortcut(AppTest_Descroperation): - spaceconfig = {'objspace.std.builtinshortcut': True} From noreply at buildbot.pypy.org Tue Feb 25 06:26:03 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 25 Feb 2014 06:26:03 +0100 (CET) Subject: [pypy-commit] pypy default: use constants for new_dtype_getter Message-ID: <20140225052603.64B621C02EA@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69402:3506835be9eb Date: 2014-02-25 00:22 -0500 http://bitbucket.org/pypy/pypy/changeset/3506835be9eb/ Log: use constants for new_dtype_getter diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -33,13 +33,13 @@ long_double_size = 8 -def new_dtype_getter(name): +def new_dtype_getter(num): @jit.elidable def _get_dtype(space): from pypy.module.micronumpy.interp_dtype import get_dtype_cache - return get_dtype_cache(space).dtypes_by_name[name] + return get_dtype_cache(space).dtypes_by_num[num] - def new(space, w_subtype, w_value=None): + def descr__new__(space, w_subtype, w_value=None): from pypy.module.micronumpy.interp_numarray import array dtype = _get_dtype(space) if not space.is_none(w_value): @@ -52,7 +52,7 @@ def descr_reduce(self, space): return self.reduce(space) - return func_with_new_name(new, name + "_box_new"), staticmethod(_get_dtype), func_with_new_name(descr_reduce, "descr_reduce") + return descr__new__, staticmethod(_get_dtype), descr_reduce class Box(object): @@ -365,7 +365,7 @@ return self.w_flags class W_BoolBox(W_GenericBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("bool") + descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY.BOOL) class W_NumberBox(W_GenericBox): pass @@ -381,34 +381,34 @@ pass class W_Int8Box(W_SignedIntegerBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("int8") + descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY.BYTE) class W_UInt8Box(W_UnsignedIntegerBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("uint8") + descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY.UBYTE) class W_Int16Box(W_SignedIntegerBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("int16") + descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY.SHORT) class W_UInt16Box(W_UnsignedIntegerBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("uint16") + descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY.USHORT) class W_Int32Box(W_SignedIntegerBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("i") + descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY.INT) class W_UInt32Box(W_UnsignedIntegerBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("I") + descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY.UINT) + +class W_LongBox(W_SignedIntegerBox, PrimitiveBox): + descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY.LONG) + +class W_ULongBox(W_UnsignedIntegerBox, PrimitiveBox): + descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY.ULONG) class W_Int64Box(W_SignedIntegerBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("q") + descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY.LONGLONG) class W_UInt64Box(W_UnsignedIntegerBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("Q") - -class W_LongBox(W_SignedIntegerBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("l") - -class W_ULongBox(W_UnsignedIntegerBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("L") + descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY.ULONGLONG) class W_InexactBox(W_NumberBox): pass @@ -417,13 +417,13 @@ pass class W_Float16Box(W_FloatingBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("float16") + descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY.HALF) class W_Float32Box(W_FloatingBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("float32") + descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY.FLOAT) class W_Float64Box(W_FloatingBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("float64") + descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY.DOUBLE) def descr_as_integer_ratio(self, space): return space.call_method(self.item(space), 'as_integer_ratio') @@ -432,17 +432,17 @@ pass class W_Complex64Box(ComplexBox, W_ComplexFloatingBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("complex64") + descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY.CFLOAT) class W_Complex128Box(ComplexBox, W_ComplexFloatingBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("complex128") + descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY.CDOUBLE) if long_double_size in (8, 12, 16): class W_FloatLongBox(W_FloatingBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY.LONGDOUBLELTR) + descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY.LONGDOUBLE) class W_ComplexLongBox(ComplexBox, W_ComplexFloatingBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY.CLONGDOUBLELTR) + descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY.CLONGDOUBLE) class W_FlexibleBox(W_GenericBox): _attrs_ = ['arr', 'ofs', 'dtype'] diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -855,6 +855,7 @@ # higher numbers for dtype in reversed(self.builtin_dtypes): dtype.fields = None # mark these as builtin + assert dtype.num not in self.dtypes_by_num self.dtypes_by_num[dtype.num] = dtype self.dtypes_by_name[dtype.get_name()] = dtype for can_name in [dtype.kind + str(dtype.elsize), From noreply at buildbot.pypy.org Tue Feb 25 06:28:47 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Tue, 25 Feb 2014 06:28:47 +0100 (CET) Subject: [pypy-commit] pypy kill-multimethod: Kill registerimplementation. Message-ID: <20140225052847.547651C02EA@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: kill-multimethod Changeset: r69403:017ee17ccbf2 Date: 2014-02-25 06:09 +0100 http://bitbucket.org/pypy/pypy/changeset/017ee17ccbf2/ Log: Kill registerimplementation. diff --git a/pypy/objspace/std/model.py b/pypy/objspace/std/model.py --- a/pypy/objspace/std/model.py +++ b/pypy/objspace/std/model.py @@ -7,12 +7,6 @@ import pypy.interpreter.pycode import pypy.interpreter.special -_registered_implementations = set() -def registerimplementation(implcls): - """Hint to objspace.std.model to register the implementation class.""" - assert issubclass(implcls, W_Object) - _registered_implementations.add(implcls) - option_to_typename = { "withsmalllong" : ["smalllongobject.W_SmallLongObject"], "withstrbuf" : ["strbufobject.W_StringBufferObject"], @@ -110,14 +104,6 @@ else: self.imported_but_not_registered[implcls] = True - # check if we missed implementations - for implcls in _registered_implementations: - if hasattr(implcls, 'register'): - implcls.register(self.typeorder) - assert (implcls in self.typeorder or - implcls in self.imported_but_not_registered), ( - "please add %r in StdTypeModel.typeorder" % (implcls,)) - for type in self.typeorder: self.typeorder[type].append((type, None)) From noreply at buildbot.pypy.org Tue Feb 25 06:29:23 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 25 Feb 2014 06:29:23 +0100 (CET) Subject: [pypy-commit] pypy default: oops this assert isnt valid for intp Message-ID: <20140225052923.A7A191C02EA@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69404:ed7ff32d337d Date: 2014-02-25 00:28 -0500 http://bitbucket.org/pypy/pypy/changeset/ed7ff32d337d/ Log: oops this assert isnt valid for intp diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -855,7 +855,6 @@ # higher numbers for dtype in reversed(self.builtin_dtypes): dtype.fields = None # mark these as builtin - assert dtype.num not in self.dtypes_by_num self.dtypes_by_num[dtype.num] = dtype self.dtypes_by_name[dtype.get_name()] = dtype for can_name in [dtype.kind + str(dtype.elsize), From noreply at buildbot.pypy.org Tue Feb 25 07:05:01 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 25 Feb 2014 07:05:01 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: All of test_random.py passes Message-ID: <20140225060501.300201C35F7@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r849:0a60d4ce2f84 Date: 2014-02-25 07:04 +0100 http://bitbucket.org/pypy/stmgc/changeset/0a60d4ce2f84/ Log: All of test_random.py passes diff --git a/c7/test/test_random.py b/c7/test/test_random.py --- a/c7/test/test_random.py +++ b/c7/test/test_random.py @@ -486,7 +486,7 @@ else: saved = [m for m in modified if m in thread_state.saved_roots or m in global_state.prebuilt_roots] - ex.do("assert {%s}.issubset(set(modified))" % ( + ex.do("assert set([%s]).issubset(set(modified))" % ( ", ".join(saved) )) From noreply at buildbot.pypy.org Tue Feb 25 07:10:56 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 25 Feb 2014 07:10:56 +0100 (CET) Subject: [pypy-commit] pypy default: cleanup Message-ID: <20140225061056.D2BE01C0132@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69405:3b2b6edf5c23 Date: 2014-02-25 00:56 -0500 http://bitbucket.org/pypy/pypy/changeset/3b2b6edf5c23/ Log: cleanup diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -52,7 +52,9 @@ def descr_reduce(self, space): return self.reduce(space) - return descr__new__, staticmethod(_get_dtype), descr_reduce + return (func_with_new_name(descr__new__, 'box_descr__new__%d' % num), + staticmethod(_get_dtype), + descr_reduce) class Box(object): diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -31,11 +31,10 @@ specialize.argtype(1)(func) @functools.wraps(func) def dispatcher(self, v): - raw = self.unbox(v) return self.box( func( self, - self.for_computation(raw), + self.for_computation(self.unbox(v)), ) ) return dispatcher @@ -145,7 +144,6 @@ array = rffi.cast(rffi.CArrayPtr(self.T), data) return self.box(array[0]) - @specialize.argtype(1) def unbox(self, box): assert isinstance(box, self.BoxType) return box.value @@ -1111,11 +1109,16 @@ array = rffi.cast(rffi.CArrayPtr(self.T), data) return self.box_complex(array[0], array[1]) + def composite(self, v1, v2): + assert isinstance(v1, self.ComponentBoxType) + assert isinstance(v2, self.ComponentBoxType) + real = v1.value + imag = v2.value + return self.box_complex(real, imag) + def unbox(self, box): assert isinstance(box, self.BoxType) - # do this in two stages since real, imag are read only - real, imag = box.real, box.imag - return real, imag + return box.real, box.imag def _read(self, storage, i, offset): real = raw_storage_getitem_unaligned(self.T, storage, i + offset) @@ -1167,14 +1170,6 @@ return rfloat.NAN, rfloat.NAN return rfloat.INFINITY, rfloat.INFINITY - @specialize.argtype(1) - def composite(self, v1, v2): - assert isinstance(v1, self.ComponentBoxType) - assert isinstance(v2, self.ComponentBoxType) - real = v1.value - imag = v2.value - return self.box_complex(real, imag) - @complex_unary_op def pos(self, v): return v From noreply at buildbot.pypy.org Tue Feb 25 07:39:54 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 25 Feb 2014 07:39:54 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: Fix test_largemalloc. Message-ID: <20140225063954.8C0851C08F3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r850:2cda4cd67403 Date: 2014-02-25 07:39 +0100 http://bitbucket.org/pypy/stmgc/changeset/2cda4cd67403/ Log: Fix test_largemalloc. diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -9,7 +9,7 @@ reset_all_creation_markers() */ char *base = stm_object_pages + END_NURSERY_PAGE * 4096UL; uintptr_t length = (NB_PAGES - END_NURSERY_PAGE - 1) * 4096UL; - largemalloc_init_arena(base, length); + _stm_largemalloc_init_arena(base, length); uninitialized_page_start = stm_object_pages + END_NURSERY_PAGE * 4096UL; uninitialized_page_stop = stm_object_pages + NB_PAGES * 4096UL; @@ -40,8 +40,8 @@ uninitialized_page_stop -= decrease_by; - if (!largemalloc_resize_arena(uninitialized_page_stop - - uninitialized_page_start)) + if (!_stm_largemalloc_resize_arena(uninitialized_page_stop - + uninitialized_page_start)) goto out_of_memory; setup_N_pages(uninitialized_page_start, GCPAGE_NUM_PAGES); @@ -70,14 +70,16 @@ } -static object_t *allocate_outside_nursery_large(uint64_t size) +static char *allocate_outside_nursery_large(uint64_t size) { /* thread-safe: use the lock of pages.c to prevent any remapping from occurring under our feet */ mutex_pages_lock(); /* Allocate the object with largemalloc.c from the lower addresses. */ - char *addr = large_malloc(size); + char *addr = _stm_large_malloc(size); + if (addr == NULL) + stm_fatalerror("not enough memory!\n"); if (addr + size > uninitialized_page_start) { uintptr_t npages; @@ -93,14 +95,16 @@ mutex_pages_unlock(); - return (object_t *)(addr - stm_object_pages); + return addr; } object_t *_stm_allocate_old(ssize_t size_rounded_up) { /* only for tests */ - object_t *o = allocate_outside_nursery_large(size_rounded_up); - memset(REAL_ADDRESS(stm_object_pages, o), 0, size_rounded_up); + char *p = allocate_outside_nursery_large(size_rounded_up); + memset(p, 0, size_rounded_up); + + object_t *o = (object_t *)(p - stm_object_pages); o->stm_flags = STM_FLAGS_PREBUILT; return o; } diff --git a/c7/stm/gcpage.h b/c7/stm/gcpage.h --- a/c7/stm/gcpage.h +++ b/c7/stm/gcpage.h @@ -27,7 +27,7 @@ static void setup_gcpage(void); static void teardown_gcpage(void); -static object_t *allocate_outside_nursery_large(uint64_t size); +static char *allocate_outside_nursery_large(uint64_t size); static char *_allocate_small_slowpath(uint64_t size); diff --git a/c7/stm/largemalloc.c b/c7/stm/largemalloc.c --- a/c7/stm/largemalloc.c +++ b/c7/stm/largemalloc.c @@ -170,7 +170,7 @@ really_sort_bin(index); } -static char *large_malloc(size_t request_size) +char *_stm_large_malloc(size_t request_size) { /* 'request_size' should already be a multiple of the word size here */ assert((request_size & (sizeof(char *)-1)) == 0); @@ -208,8 +208,6 @@ } /* not enough memory. */ - fprintf(stderr, "not enough memory!\n"); - abort(); return NULL; found: @@ -243,7 +241,7 @@ return (char *)&mscan->d; } -static void large_free(char *data) +void _stm_large_free(char *data) { mchunk_t *chunk = data2chunk(data); assert((chunk->size & (sizeof(char *) - 1)) == 0); @@ -341,7 +339,12 @@ assert(data - 16 == (char *)last_chunk); } -static void largemalloc_init_arena(char *data_start, size_t data_size) +char *_stm_largemalloc_data_start(void) +{ + return (char *)first_chunk; +} + +void _stm_largemalloc_init_arena(char *data_start, size_t data_size) { int i; for (i = 0; i < N_BINS; i++) { @@ -362,7 +365,7 @@ insert_unsorted(first_chunk); } -static int largemalloc_resize_arena(size_t new_size) +int _stm_largemalloc_resize_arena(size_t new_size) { if (new_size < 2 * sizeof(struct malloc_chunk)) return 0; @@ -413,7 +416,7 @@ assert(last_chunk == next_chunk_u(old_last_chunk)); /* then free the last_chunk (turn it from "used" to "free) */ - large_free((char *)&old_last_chunk->d); + _stm_large_free((char *)&old_last_chunk->d); } return 1; } diff --git a/c7/stm/largemalloc.h b/c7/stm/largemalloc.h --- a/c7/stm/largemalloc.h +++ b/c7/stm/largemalloc.h @@ -1,11 +1,14 @@ /* all addresses passed to this interface should be "char *" pointers in the segment 0. */ -static void largemalloc_init_arena(char *data_start, size_t data_size); -static int largemalloc_resize_arena(size_t new_size); +void _stm_largemalloc_init_arena(char *data_start, size_t data_size); +int _stm_largemalloc_resize_arena(size_t new_size); +char *_stm_largemalloc_data_start(void); /* large_malloc() and large_free() are not thread-safe. This is due to the fact that they should be mostly called during minor or major collections, which have their own synchronization mecanisms. */ -static char *large_malloc(size_t request_size); -static void large_free(char *data) __attribute__((unused)); +char *_stm_large_malloc(size_t request_size); +void _stm_large_free(char *data); + +void _stm_large_dump(void); diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -87,7 +87,8 @@ /* case 1: object is not small enough. Ask gcpage.c for an allocation via largemalloc. */ - nobj = allocate_outside_nursery_large(size); + char *allocated = allocate_outside_nursery_large(size); + nobj = (object_t *)(allocated - stm_object_pages); nobj_sync_now = (uintptr_t)nobj; /* Copy the object */ diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -76,6 +76,11 @@ char *_stm_get_segment_base(long index); void _stm_test_switch(stm_thread_local_t *tl); object_t *_stm_allocate_old(ssize_t size_rounded_up); +void _stm_largemalloc_init_arena(char *data_start, size_t data_size); +int _stm_largemalloc_resize_arena(size_t new_size); +char *_stm_largemalloc_data_start(void); +char *_stm_large_malloc(size_t request_size); +void _stm_large_free(char *data); void _stm_large_dump(void); void _stm_start_safe_point(void); void _stm_stop_safe_point(void); diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -74,6 +74,13 @@ bool _check_stop_safe_point(void); void _stm_set_nursery_free_count(uint64_t free_count); +void _stm_largemalloc_init_arena(char *data_start, size_t data_size); +int _stm_largemalloc_resize_arena(size_t new_size); +char *_stm_largemalloc_data_start(void); +char *_stm_large_malloc(size_t request_size); +void _stm_large_free(char *data); +void _stm_large_dump(void); +void *memset(void *s, int c, size_t n); ssize_t stmcb_size_rounded_up(struct object_s *obj); diff --git a/c7/test/test_largemalloc.py b/c7/test/test_largemalloc.py --- a/c7/test/test_largemalloc.py +++ b/c7/test/test_largemalloc.py @@ -1,7 +1,7 @@ from support import * import sys, random -ra = stm_get_real_address +ra = lambda x: x # backward compat. class TestLargeMalloc(BaseTest): def setup_method(self, meth): @@ -13,85 +13,85 @@ self.rawmem = lib._stm_largemalloc_data_start() lib.memset(self.rawmem, 0xcd, self.size) - lib.stm_largemalloc_init(self.rawmem, self.size) + lib._stm_largemalloc_init_arena(self.rawmem, self.size) def test_simple(self): - d1 = lib.stm_large_malloc(7000) - d2 = lib.stm_large_malloc(8000) + d1 = lib._stm_large_malloc(7000) + d2 = lib._stm_large_malloc(8000) print d1 print d2 assert ra(d2) - ra(d1) == 7016 - d3 = lib.stm_large_malloc(9000) + d3 = lib._stm_large_malloc(9000) assert ra(d3) - ra(d2) == 8016 # - lib.stm_large_free(d1) - lib.stm_large_free(d2) + lib._stm_large_free(d1) + lib._stm_large_free(d2) # - d4 = lib.stm_large_malloc(600) + d4 = lib._stm_large_malloc(600) assert d4 == d1 - d5 = lib.stm_large_malloc(600) + d5 = lib._stm_large_malloc(600) assert ra(d5) == ra(d4) + 616 # - lib.stm_large_free(d5) + lib._stm_large_free(d5) # - d6 = lib.stm_large_malloc(600) + d6 = lib._stm_large_malloc(600) assert d6 == d5 # - lib.stm_large_free(d4) + lib._stm_large_free(d4) # - d7 = lib.stm_large_malloc(608) + d7 = lib._stm_large_malloc(608) assert ra(d7) == ra(d6) + 616 - d8 = lib.stm_large_malloc(600) + d8 = lib._stm_large_malloc(600) assert d8 == d4 # lib._stm_large_dump() def test_overflow_1(self): - d = lib.stm_large_malloc(self.size - 32) + d = lib._stm_large_malloc(self.size - 32) assert ra(d) == self.rawmem + 16 lib._stm_large_dump() def test_overflow_2(self): - d = lib.stm_large_malloc(self.size - 16) + d = lib._stm_large_malloc(self.size - 16) assert d == ffi.NULL lib._stm_large_dump() def test_overflow_3(self): - d = lib.stm_large_malloc(sys.maxint & ~7) + d = lib._stm_large_malloc(sys.maxint & ~7) assert d == ffi.NULL lib._stm_large_dump() def test_resize_arena_reduce_1(self): - r = lib.stm_largemalloc_resize_arena(self.size - 32) + r = lib._stm_largemalloc_resize_arena(self.size - 32) assert r == 1 - d = lib.stm_large_malloc(self.size - 32) + d = lib._stm_large_malloc(self.size - 32) assert d == ffi.NULL lib._stm_large_dump() def test_resize_arena_reduce_2(self): - lib.stm_large_malloc(self.size // 2 - 64) - r = lib.stm_largemalloc_resize_arena(self.size // 2) + lib._stm_large_malloc(self.size // 2 - 64) + r = lib._stm_largemalloc_resize_arena(self.size // 2) assert r == 1 lib._stm_large_dump() def test_resize_arena_reduce_3(self): - d1 = lib.stm_large_malloc(128) - r = lib.stm_largemalloc_resize_arena(self.size // 2) + d1 = lib._stm_large_malloc(128) + r = lib._stm_largemalloc_resize_arena(self.size // 2) assert r == 1 - d2 = lib.stm_large_malloc(128) + d2 = lib._stm_large_malloc(128) assert ra(d1) == self.rawmem + 16 assert ra(d2) == ra(d1) + 128 + 16 lib._stm_large_dump() def test_resize_arena_cannot_reduce_1(self): - lib.stm_large_malloc(self.size // 2) - r = lib.stm_largemalloc_resize_arena(self.size // 2) + lib._stm_large_malloc(self.size // 2) + r = lib._stm_largemalloc_resize_arena(self.size // 2) assert r == 0 lib._stm_large_dump() def test_resize_arena_cannot_reduce_2(self): - lib.stm_large_malloc(self.size // 2 - 56) - r = lib.stm_largemalloc_resize_arena(self.size // 2) + lib._stm_large_malloc(self.size // 2 - 56) + r = lib._stm_largemalloc_resize_arena(self.size // 2) assert r == 0 lib._stm_large_dump() @@ -105,10 +105,10 @@ print ' free %5d (%s)' % (length, d) assert ra(d)[0] == content1 assert ra(d)[length - 1] == content2 - lib.stm_large_free(d) + lib._stm_large_free(d) else: sz = r.randrange(8, 160) * 8 - d = lib.stm_large_malloc(sz) + d = lib._stm_large_malloc(sz) print 'alloc %5d (%s)' % (sz, d) assert d != ffi.NULL lib.memset(ra(d), 0xdd, sz) From noreply at buildbot.pypy.org Tue Feb 25 07:45:02 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 25 Feb 2014 07:45:02 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: Use stm_fatalerror() here too Message-ID: <20140225064502.4E69F1C08F3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r851:3cf16bb8ac89 Date: 2014-02-25 07:44 +0100 http://bitbucket.org/pypy/stmgc/changeset/3cf16bb8ac89/ Log: Use stm_fatalerror() here too diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -139,7 +139,7 @@ MAP_FIXED | MAP_PAGES_FLAGS, -1, 0) != readmarkers) { /* fall-back */ #if STM_TESTS - abort(); + stm_fatalerror("reset_transaction_read_version: %m\n"); #endif memset(readmarkers, 0, NB_READMARKER_PAGES * 4096UL); } From noreply at buildbot.pypy.org Tue Feb 25 08:37:13 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 25 Feb 2014 08:37:13 +0100 (CET) Subject: [pypy-commit] pypy default: fix this assert Message-ID: <20140225073713.B8A0D1C02EA@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69406:66521897312a Date: 2014-02-25 02:13 -0500 http://bitbucket.org/pypy/pypy/changeset/66521897312a/ Log: fix this assert diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -52,7 +52,7 @@ def descr_reduce(self, space): return self.reduce(space) - return (func_with_new_name(descr__new__, 'box_descr__new__%d' % num), + return (func_with_new_name(descr__new__, 'descr__new__%d' % num), staticmethod(_get_dtype), descr_reduce) diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -121,10 +121,11 @@ return self.byteorder in (NPY.NATIVE, NPY.NATBYTE) def get_float_dtype(self, space): - assert isinstance(self.itemtype, types.ComplexFloating) - dtype = self.itemtype.ComponentBoxType._get_dtype(space) + assert self.is_complex() + dtype = get_dtype_cache(space).component_dtypes[self.num] if self.byteorder == NPY.OPPBYTE: dtype = dtype.descr_newbyteorder(space) + assert dtype.is_float() return dtype def get_name(self): @@ -834,6 +835,11 @@ self.w_float64dtype, self.w_floatlongdtype] complex_dtypes = [self.w_complex64dtype, self.w_complex128dtype, self.w_complexlongdtype] + self.component_dtypes = { + NPY.CFLOAT: self.w_float32dtype, + NPY.CDOUBLE: self.w_float64dtype, + NPY.CLONGDOUBLE: self.w_floatlongdtype, + } self.builtin_dtypes = [ self.w_booldtype, self.w_int8dtype, self.w_uint8dtype, From noreply at buildbot.pypy.org Tue Feb 25 09:23:03 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 25 Feb 2014 09:23:03 +0100 (CET) Subject: [pypy-commit] pypy default: fixes for newbyteorder on flexible dtypes Message-ID: <20140225082303.8CCBE1C0150@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69407:6928506f34e4 Date: 2014-02-25 03:22 -0500 http://bitbucket.org/pypy/pypy/changeset/6928506f34e4/ Log: fixes for newbyteorder on flexible dtypes diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -409,7 +409,9 @@ endian = newendian itemtype = self.itemtype.__class__(endian in (NPY.NATIVE, NPY.NATBYTE)) return W_Dtype(itemtype, self.num, self.kind, self.char, - self.w_box_type, byteorder=endian, elsize=self.elsize) + self.w_box_type, byteorder=endian, elsize=self.elsize, + names=self.names, fields=self.fields, + shape=self.shape, subdtype=self.subdtype) @specialize.arg(2) diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -428,6 +428,20 @@ s2 = np.array(123, dtype=dt2).byteswap().tostring() assert s1 == s2 + d = np.dtype([('', 'i8', 0) + assert d.subdtype is None + #assert d.descr == [('f0', '>i8')] + #assert str(d) == "[('f0', '>i8')]" + d = np.dtype(('i8', (2,))" + def test_object(self): import numpy as np import sys From noreply at buildbot.pypy.org Tue Feb 25 09:27:55 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 25 Feb 2014 09:27:55 +0100 (CET) Subject: [pypy-commit] pypy default: another fix Message-ID: <20140225082755.988C21C03FC@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69408:3bb2f09c6ca4 Date: 2014-02-25 03:27 -0500 http://bitbucket.org/pypy/pypy/changeset/3bb2f09c6ca4/ Log: another fix diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -408,9 +408,12 @@ elif newendian != NPY.IGNORE: endian = newendian itemtype = self.itemtype.__class__(endian in (NPY.NATIVE, NPY.NATBYTE)) + fields = self.fields + if fields is None: + fields = {} return W_Dtype(itemtype, self.num, self.kind, self.char, self.w_box_type, byteorder=endian, elsize=self.elsize, - names=self.names, fields=self.fields, + names=self.names, fields=fields, shape=self.shape, subdtype=self.subdtype) From noreply at buildbot.pypy.org Tue Feb 25 09:50:22 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 25 Feb 2014 09:50:22 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: Copy the dictionary-like trees from "c4". Message-ID: <20140225085022.E5BCA1C08F3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r852:85c1e725fba0 Date: 2014-02-25 09:19 +0100 http://bitbucket.org/pypy/stmgc/changeset/85c1e725fba0/ Log: Copy the dictionary-like trees from "c4". diff --git a/c7/stm/list.c b/c7/stm/list.c --- a/c7/stm/list.c +++ b/c7/stm/list.c @@ -29,3 +29,150 @@ lst->last_allocated = nalloc - 1; return lst; } + + +/************************************************************/ + +static void _tree_clear_node(wlog_node_t *node) +{ + memset(node, 0, sizeof(wlog_node_t)); +} + +static void tree_clear(struct tree_s *tree) +{ + if (tree->raw_current != tree->raw_start) { + _tree_clear_node(&tree->toplevel); + tree->raw_current = tree->raw_start; + } +} + +static struct tree_s *tree_create(void) +{ + return (struct tree_s *)calloc(1, sizeof(struct tree_s)); +} + +static void tree_free(struct tree_s *tree) +{ + free(tree->raw_start); + free(tree); +} + +static void _tree_compress(struct tree_s *tree) +{ + wlog_t *item; + struct tree_s tree_copy; + memset(&tree_copy, 0, sizeof(struct tree_s)); + + TREE_LOOP_FORWARD(*tree, item) + { + tree_insert(&tree_copy, item->addr, item->val); + + } TREE_LOOP_END; + + free(tree->raw_start); + *tree = tree_copy; +} + +static wlog_t *_tree_find(char *entry, uintptr_t addr) +{ + uintptr_t key = addr; + while (((long)entry) & 1) { + /* points to a further level */ + key >>= TREE_BITS; + entry = *(char **)((entry - 1) + (key & TREE_MASK)); + } + return (wlog_t *)entry; /* may be NULL */ +} + +static void _tree_grow(struct tree_s *tree, long extra) +{ + struct tree_s newtree; + wlog_t *item; + long alloc = tree->raw_end - tree->raw_start; + long newalloc = (alloc + extra + (alloc >> 2) + 31) & ~15; + //fprintf(stderr, "growth: %ld\n", newalloc); + char *newitems = malloc(newalloc); + if (newitems == NULL) { + stm_fatalerror("out of memory!\n"); /* XXX */ + } + newtree.raw_start = newitems; + newtree.raw_current = newitems; + newtree.raw_end = newitems + newalloc; + _tree_clear_node(&newtree.toplevel); + TREE_LOOP_FORWARD(*tree, item) + { + tree_insert(&newtree, item->addr, item->val); + } TREE_LOOP_END; + free(tree->raw_start); + *tree = newtree; +} + +static char *_tree_grab(struct tree_s *tree, long size) +{ + char *result; + result = tree->raw_current; + tree->raw_current += size; + if (tree->raw_current > tree->raw_end) { + _tree_grow(tree, size); + return NULL; + } + return result; +} + +static void tree_insert(struct tree_s *tree, uintptr_t addr, uintptr_t val) +{ + retry:; + wlog_t *wlog; + uintptr_t key = addr; + int shift = 0; + char *p = (char *)(tree->toplevel.items); + char *entry; + while (1) { + p += (key >> shift) & TREE_MASK; + shift += TREE_BITS; + entry = *(char **)p; + if (entry == NULL) + break; + else if (((long)entry) & 1) { + /* points to a further level */ + p = entry - 1; + } + else { + wlog_t *wlog1 = (wlog_t *)entry; + if (wlog1->addr == 0) { + /* reuse the deleted entry and that's it */ + wlog1->addr = addr; + wlog1->val = val; + return; + } + /* the key must not already be present */ + assert(wlog1->addr != addr); + /* collision: there is already a different wlog here */ + wlog_node_t *node = (wlog_node_t *) + _tree_grab(tree, sizeof(wlog_node_t)); + if (node == NULL) goto retry; + _tree_clear_node(node); + uintptr_t key1 = wlog1->addr; + char *p1 = (char *)(node->items); + *(wlog_t **)(p1 + ((key1 >> shift) & TREE_MASK)) = wlog1; + *(char **)p = ((char *)node) + 1; + p = p1; + } + } + wlog = (wlog_t *)_tree_grab(tree, sizeof(wlog_t)); + if (wlog == NULL) goto retry; + wlog->addr = addr; + wlog->val = val; + *(char **)p = (char *)wlog; +} + +static bool tree_delete_item(struct tree_s *tree, uintptr_t addr) +{ + wlog_t *entry; + TREE_FIND(*tree, addr, entry, goto missing); + entry->addr = 0; + return true; + + missing: + return false; +} diff --git a/c7/stm/list.h b/c7/stm/list.h --- a/c7/stm/list.h +++ b/c7/stm/list.h @@ -1,5 +1,7 @@ #include +/************************************************************/ + struct list_s { uintptr_t count; uintptr_t last_allocated; @@ -65,3 +67,121 @@ CODE; \ } \ } while (0) + +/************************************************************/ + +/* The tree_xx functions are, like the name hints, implemented as a tree, + supporting very high performance in TREE_FIND in the common case where + there are no or few elements in the tree, but scaling correctly + if the number of items becomes large. */ + +#define TREE_BITS 4 +#define TREE_ARITY (1 << TREE_BITS) + +#define TREE_DEPTH_MAX ((sizeof(void*)*8 - 2 + TREE_BITS-1) / TREE_BITS) +/* sizeof(void*) = total number of bits + 2 = bits that we ignore anyway (2 or 3, conservatively 2) + (x + TREE_BITS-1) / TREE_BITS = divide by TREE_BITS, rounding up +*/ + +#define TREE_MASK ((TREE_ARITY - 1) * sizeof(void*)) + +typedef struct { + uintptr_t addr; + uintptr_t val; +} wlog_t; + +typedef struct { + char *items[TREE_ARITY]; +} wlog_node_t; + +struct tree_s { + char *raw_start, *raw_current, *raw_end; + wlog_node_t toplevel; +}; + +static struct tree_s *tree_create(void); +static void tree_free(struct tree_s *tree); +static void tree_clear(struct tree_s *tree); +//static inline void tree_delete_not_used_any_more(struct tree_s *tree)... + +static inline bool tree_any_entry(struct tree_s *tree) { + return tree->raw_current != tree->raw_start; +} + +#define _TREE_LOOP(tree, item, INITIAL, _PLUS_) \ +{ \ + struct { char **next; char **end; } _stack[TREE_DEPTH_MAX], *_stackp; \ + char **_next, **_end, *_entry; \ + long _deleted_factor = 0; \ + struct tree_s *_tree = &(tree); \ + /* initialization */ \ + _stackp = _stack; /* empty stack */ \ + _next = _tree->toplevel.items + INITIAL; \ + _end = _next _PLUS_ TREE_ARITY; \ + /* loop */ \ + while (1) \ + { \ + if (_next == _end) \ + { \ + if (_stackp == _stack) \ + break; /* done */ \ + /* finished with this level, go to the next one */ \ + _stackp--; \ + _next = _stackp->next; \ + _end = _stackp->end; \ + continue; \ + } \ + _entry = *_next; \ + _next = _next _PLUS_ 1; \ + if (_entry == NULL) /* empty entry */ \ + continue; \ + if (((long)_entry) & 1) \ + { /* points to a further level: enter it */ \ + _stackp->next = _next; \ + _stackp->end = _end; \ + _stackp++; \ + _next = ((wlog_node_t *)(_entry - 1))->items + INITIAL; \ + _end = _next _PLUS_ TREE_ARITY; \ + continue; \ + } \ + /* points to a wlog_t item */ \ + if (((wlog_t *)_entry)->addr == 0) { /* deleted entry */ \ + _deleted_factor += 3; \ + continue; \ + } \ + _deleted_factor -= 4; \ + item = (wlog_t *)_entry; + +#define TREE_LOOP_FORWARD(tree, item) \ + _TREE_LOOP(tree, item, 0, +) +#define TREE_LOOP_BACKWARD(tree, item) \ + _TREE_LOOP(tree, item, (TREE_ARITY-1), -) +#define TREE_LOOP_END } } +#define TREE_LOOP_END_AND_COMPRESS \ + } if (_deleted_factor > 9) _tree_compress(_tree); } +#define TREE_LOOP_DELETE(item) { (item)->addr = NULL; _deleted_factor += 6; } + +#define TREE_FIND(tree, addr1, result, goto_not_found) \ +{ \ + uintptr_t _key = (addr1); \ + char *_p = (char *)((tree).toplevel.items); \ + char *_entry = *(char **)(_p + (_key & TREE_MASK)); \ + if (_entry == NULL) \ + goto_not_found; /* common case, hopefully */ \ + result = _tree_find(_entry, addr1); \ + if (result == NULL || result->addr != (addr1)) \ + goto_not_found; \ +} + +static wlog_t *_tree_find(char *entry, uintptr_t addr); +static void _tree_compress(struct tree_s *tree) __attribute__((unused)); +static void tree_insert(struct tree_s *tree, uintptr_t addr, uintptr_t val); +static bool tree_delete_item(struct tree_s *tree, uintptr_t addr); + +static inline bool tree_contains(struct tree_s *tree, uintptr_t addr) +{ + wlog_t *result; + TREE_FIND(*tree, addr, result, return false); + return true; +} diff --git a/c7/test/common.py b/c7/test/common.py new file mode 100644 --- /dev/null +++ b/c7/test/common.py @@ -0,0 +1,8 @@ +import os +import sys +assert sys.maxint == 9223372036854775807, "requires a 64-bit environment" + +# ---------- +os.environ['CC'] = 'clang' + +parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -1,12 +1,8 @@ import os import cffi, weakref -import sys -assert sys.maxint == 9223372036854775807, "requires a 64-bit environment" +from common import parent_dir # ---------- -os.environ['CC'] = 'clang' - -parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) source_files = [os.path.join(parent_dir, "stmgc.c")] all_files = [os.path.join(parent_dir, "stmgc.h"), diff --git a/c7/test/test_list.py b/c7/test/test_list.py new file mode 100644 --- /dev/null +++ b/c7/test/test_list.py @@ -0,0 +1,49 @@ +import cffi +from common import parent_dir + + +ffi = cffi.FFI() +ffi.cdef(""" +struct list_s *list_create(void); + +struct tree_s *tree_create(void); +void tree_free(struct tree_s *tree); +void tree_clear(struct tree_s *tree); +bool tree_contains(struct tree_s *tree, uintptr_t addr); +void tree_insert(struct tree_s *tree, uintptr_t addr, uintptr_t val); +bool tree_delete_item(struct tree_s *tree, uintptr_t addr); +""") + +lib = ffi.verify(''' +#include +#include +#include + +#define LIKELY(x) (x) +#define UNLIKELY(x) (x) +#define stm_fatalerror(x) abort() + +#include "stm/list.h" + +#define _STM_CORE_H_ +#include "stm/list.c" +''', define_macros=[('STM_TESTS', '1')], + undef_macros=['NDEBUG'], + include_dirs=[parent_dir], + extra_compile_args=['-g', '-O0', '-Werror', '-ferror-limit=1'], + force_generic_engine=True) + +# ____________________________________________________________ + +def test_tree_empty(): + t = lib.tree_create() + for i in range(100): + assert lib.tree_contains(t, i) == False + lib.tree_free(t) + +def test_tree_add(): + t = lib.tree_create() + lib.tree_insert(t, 23, 456) + for i in range(100): + assert lib.tree_contains(t, i) == (i == 23) + lib.tree_free(t) From noreply at buildbot.pypy.org Tue Feb 25 09:50:24 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 25 Feb 2014 09:50:24 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: Use the trees to implement young_outside_nursery, step 1. Message-ID: <20140225085024.18F991C08F3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r853:9fb1364ed14f Date: 2014-02-25 09:38 +0100 http://bitbucket.org/pypy/stmgc/changeset/9fb1364ed14f/ Log: Use the trees to implement young_outside_nursery, step 1. diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -179,6 +179,7 @@ } assert(list_is_empty(STM_PSEGMENT->modified_old_objects)); + assert(tree_is_cleared(STM_PSEGMENT->young_outside_nursery)); assert(STM_PSEGMENT->objects_pointing_to_nursery == NULL); assert(STM_PSEGMENT->large_overflow_objects == NULL); diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -81,6 +81,11 @@ current transaction spanned a minor collection. */ struct list_s *large_overflow_objects; + /* List of all young objects outside the nursery ("young" in the + sense that they should be in the nursery, but were too big for + that). */ + struct tree_s *young_outside_nursery; + /* Start time: to know approximately for how long a transaction has been running, in contention management */ uint64_t start_time; diff --git a/c7/stm/list.h b/c7/stm/list.h --- a/c7/stm/list.h +++ b/c7/stm/list.h @@ -105,8 +105,8 @@ static void tree_clear(struct tree_s *tree); //static inline void tree_delete_not_used_any_more(struct tree_s *tree)... -static inline bool tree_any_entry(struct tree_s *tree) { - return tree->raw_current != tree->raw_start; +static inline bool tree_is_cleared(struct tree_s *tree) { + return tree->raw_current == tree->raw_start; } #define _TREE_LOOP(tree, item, INITIAL, _PLUS_) \ @@ -177,7 +177,8 @@ static wlog_t *_tree_find(char *entry, uintptr_t addr); static void _tree_compress(struct tree_s *tree) __attribute__((unused)); static void tree_insert(struct tree_s *tree, uintptr_t addr, uintptr_t val); -static bool tree_delete_item(struct tree_s *tree, uintptr_t addr); +static bool tree_delete_item(struct tree_s *tree, uintptr_t addr) + __attribute__((unused)); static inline bool tree_contains(struct tree_s *tree, uintptr_t addr) { diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -182,6 +182,19 @@ memset(realnursery, 0, size); STM_SEGMENT->nursery_current = (stm_char *)_stm_nursery_start; + + /* free any object left from 'young_outside_nursery' */ + if (!tree_is_cleared(STM_PSEGMENT->young_outside_nursery)) { + mutex_pages_lock(); + + wlog_t *item; + TREE_LOOP_FORWARD(*STM_PSEGMENT->young_outside_nursery, item) { + _stm_large_free(stm_object_pages + item->addr); + } TREE_LOOP_END; + + tree_clear(STM_PSEGMENT->young_outside_nursery); + mutex_pages_unlock(); + } } static void minor_collection(bool commit) @@ -268,7 +281,14 @@ object_t *_stm_allocate_external(ssize_t size_rounded_up) { - abort();//... + /* XXX force a minor/major collection if needed */ + + char *result = allocate_outside_nursery_large(size_rounded_up); + memset(result, 0, size_rounded_up); + + object_t *o = (object_t *)(result - stm_object_pages); + tree_insert(STM_PSEGMENT->young_outside_nursery, (intptr_t)o, 0); + return o; } #ifdef STM_TESTS diff --git a/c7/stm/setup.c b/c7/stm/setup.c --- a/c7/stm/setup.c +++ b/c7/stm/setup.c @@ -56,6 +56,7 @@ pr->objects_pointing_to_nursery = NULL; pr->large_overflow_objects = NULL; pr->modified_old_objects = list_create(); + pr->young_outside_nursery = tree_create(); pr->overflow_number = GCFLAG_OVERFLOW_NUMBER_bit0 * (i + 1); highest_overflow_number = pr->overflow_number; } @@ -88,6 +89,7 @@ assert(pr->objects_pointing_to_nursery == NULL); assert(pr->large_overflow_objects == NULL); list_free(pr->modified_old_objects); + tree_free(pr->young_outside_nursery); } munmap(stm_object_pages, TOTAL_MEMORY); diff --git a/c7/test/test_list.py b/c7/test/test_list.py --- a/c7/test/test_list.py +++ b/c7/test/test_list.py @@ -1,3 +1,4 @@ +import random import cffi from common import parent_dir @@ -9,9 +10,11 @@ struct tree_s *tree_create(void); void tree_free(struct tree_s *tree); void tree_clear(struct tree_s *tree); +bool tree_is_cleared(struct tree_s *tree); bool tree_contains(struct tree_s *tree, uintptr_t addr); void tree_insert(struct tree_s *tree, uintptr_t addr, uintptr_t val); bool tree_delete_item(struct tree_s *tree, uintptr_t addr); +int test_tree_walk(struct tree_s *tree, uintptr_t addrs[]); """) lib = ffi.verify(''' @@ -27,6 +30,23 @@ #define _STM_CORE_H_ #include "stm/list.c" + +int test_tree_walk(struct tree_s *tree, uintptr_t addrs[]) +{ + int result = 0; + wlog_t *item; + TREE_LOOP_FORWARD(*tree, item) { + addrs[result++] = item->addr; + } TREE_LOOP_END; + int i = result; + TREE_LOOP_BACKWARD(*tree, item) { + assert(i > 0); + i--; + assert(addrs[i] == item->addr); + } TREE_LOOP_END; + assert(i == 0); + return result; +} ''', define_macros=[('STM_TESTS', '1')], undef_macros=['NDEBUG'], include_dirs=[parent_dir], @@ -35,6 +55,8 @@ # ____________________________________________________________ +# XXX need tests for list_xxx too + def test_tree_empty(): t = lib.tree_create() for i in range(100): @@ -47,3 +69,57 @@ for i in range(100): assert lib.tree_contains(t, i) == (i == 23) lib.tree_free(t) + +def test_tree_is_cleared(): + t = lib.tree_create() + assert lib.tree_is_cleared(t) + lib.tree_insert(t, 23, 456) + assert not lib.tree_is_cleared(t) + lib.tree_free(t) + +def test_tree_delete_item(): + t = lib.tree_create() + lib.tree_insert(t, 23, 456) + lib.tree_insert(t, 42, 34289) + assert not lib.tree_is_cleared(t) + assert lib.tree_contains(t, 23) + res = lib.tree_delete_item(t, 23) + assert res + assert not lib.tree_contains(t, 23) + res = lib.tree_delete_item(t, 23) + assert not res + res = lib.tree_delete_item(t, 21) + assert not res + assert not lib.tree_is_cleared(t) + assert lib.tree_contains(t, 42) + res = lib.tree_delete_item(t, 42) + assert res + assert not lib.tree_is_cleared(t) # not cleared, but still empty + for i in range(100): + assert not lib.tree_contains(t, i) + lib.tree_free(t) + +def test_tree_walk(): + t = lib.tree_create() + lib.tree_insert(t, 23, 456) + lib.tree_insert(t, 42, 34289) + a = ffi.new("uintptr_t[10]") + res = lib.test_tree_walk(t, a) + assert res == 2 + assert a[0] == 23 + assert a[1] == 42 + lib.tree_free(t) + +def test_tree_walk_big(): + t = lib.tree_create() + values = [random.randrange(0, 1000000) for i in range(300)] + for x in values: + lib.tree_insert(t, x, x) + a = ffi.new("uintptr_t[1000]") + res = lib.test_tree_walk(t, a) + assert res == 300 + found = set() + for i in range(res): + found.add(a[i]) + assert found == set(values) + lib.tree_free(t) diff --git a/c7/test/test_nursery.py b/c7/test/test_nursery.py --- a/c7/test/test_nursery.py +++ b/c7/test/test_nursery.py @@ -3,23 +3,6 @@ class TestBasic(BaseTest): - def test_nursery_large(self): - py.test.skip("XXX later") - self.start_transaction() - lp1 = stm_allocate(SOME_LARGE_SIZE) - lp2 = stm_allocate(SOME_LARGE_SIZE) - - u1 = int(ffi.cast("uintptr_t", lp1)) - u2 = int(ffi.cast("uintptr_t", lp2)) - assert (u1 & 255) == 0 - assert (u2 & 255) == 0 - assert stm_creation_marker(lp1) == 0xff - assert stm_creation_marker(lp2) == 0xff - - self.commit_transaction() - assert stm_creation_marker(lp1) == 0 - assert stm_creation_marker(lp2) == 0 - def test_nursery_full(self): lib._stm_set_nursery_free_count(2048) self.start_transaction() @@ -99,7 +82,6 @@ assert young def test_larger_than_limit_for_nursery(self): - py.test.skip("XXX later") obj_size = lib._STM_FAST_ALLOC + 16 self.start_transaction() From noreply at buildbot.pypy.org Tue Feb 25 09:50:25 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 25 Feb 2014 09:50:25 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: Actually implement surviving young_outside_nursery Message-ID: <20140225085025.317111C08F3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r854:4302246a0ad0 Date: 2014-02-25 09:50 +0100 http://bitbucket.org/pypy/stmgc/changeset/4302246a0ad0/ Log: Actually implement surviving young_outside_nursery diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -12,7 +12,7 @@ void _stm_write_slowpath(object_t *obj) { assert(_running_transaction()); - assert(!_is_in_nursery(obj)); + assert(!_is_young(obj)); /* is this an object from the same transaction, outside the nursery? */ if ((obj->stm_flags & -GCFLAG_OVERFLOW_NUMBER_bit0) == @@ -225,7 +225,7 @@ static void synchronize_overflow_object_now(object_t *obj) { - assert(!_is_in_nursery(obj)); + assert(!_is_young(obj)); assert((obj->stm_flags & GCFLAG_SMALL_UNIFORM) == 0); char *realobj = REAL_ADDRESS(STM_SEGMENT->segment_base, obj); diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -42,6 +42,12 @@ return (uintptr_t)obj < NURSERY_END; } +static inline bool _is_young(object_t *obj) +{ + return (_is_in_nursery(obj) || + tree_contains(STM_PSEGMENT->young_outside_nursery, (uintptr_t)obj)); +} + bool _stm_in_nursery(object_t *obj) { return _is_in_nursery(obj); @@ -54,6 +60,19 @@ #define FLAG_SYNC_LARGE_NOW 0x01 +static void minor_young_outside_nursery(object_t *obj) +{ + tree_delete_item(STM_PSEGMENT->young_outside_nursery, (uintptr_t)obj); + + uintptr_t nobj_sync_now = (uintptr_t)obj; + if (STM_PSEGMENT->minor_collect_will_commit_now) + nobj_sync_now |= FLAG_SYNC_LARGE_NOW; + else + LIST_APPEND(STM_PSEGMENT->large_overflow_objects, obj); + + LIST_APPEND(STM_PSEGMENT->objects_pointing_to_nursery, nobj_sync_now); +} + static void minor_trace_if_young(object_t **pobj) { /* takes a normal pointer to a thread-local pointer to an object */ @@ -61,8 +80,13 @@ if (obj == NULL) return; assert((uintptr_t)obj < NB_PAGES * 4096UL); - if (!_is_in_nursery(obj)) - return; + if (!_is_in_nursery(obj)) { + if (UNLIKELY(tree_contains(STM_PSEGMENT->young_outside_nursery, + (uintptr_t)obj))) { + minor_young_outside_nursery(obj); + } + return; /* else old object, nothing to do */ + } /* If the object was already seen here, its first word was set to GCWORD_MOVED. In that case, the forwarding location, i.e. @@ -133,7 +157,7 @@ static inline void _collect_now(object_t *obj) { - assert(!_is_in_nursery(obj)); + assert(!_is_young(obj)); /* We must not have GCFLAG_WRITE_BARRIER so far. Add it now. */ assert(!(obj->stm_flags & GCFLAG_WRITE_BARRIER)); diff --git a/c7/test/test_nursery.py b/c7/test/test_nursery.py --- a/c7/test/test_nursery.py +++ b/c7/test/test_nursery.py @@ -81,7 +81,7 @@ assert old assert young - def test_larger_than_limit_for_nursery(self): + def test_larger_than_limit_for_nursery_die(self): obj_size = lib._STM_FAST_ALLOC + 16 self.start_transaction() @@ -93,6 +93,28 @@ seen.add(new) assert len(seen) < 5 # addresses are reused + def test_larger_than_limit_for_nursery_dont_die(self): + obj_nrefs = (lib._STM_FAST_ALLOC + 16) // 8 + + self.start_transaction() + lp1 = ffi.cast("object_t *", 0) + seen = set() + for i in range(100): + self.push_root(lp1) + stm_minor_collect() + lp1 = self.pop_root() + new = stm_allocate_refs(obj_nrefs) + assert not is_in_nursery(new) + seen.add(new) + stm_set_ref(new, i, lp1) + lp1 = new + assert len(seen) == 100 # addresses are not reused + + for i in reversed(range(100)): + assert lp1 + lp1 = stm_get_ref(lp1, i) + assert not lp1 + def test_reset_partial_alloc_pages(self): py.test.skip("a would-be-nice feature, but not actually needed: " "the next major GC will take care of it") From noreply at buildbot.pypy.org Tue Feb 25 09:52:23 2014 From: noreply at buildbot.pypy.org (fijal) Date: Tue, 25 Feb 2014 09:52:23 +0100 (CET) Subject: [pypy-commit] pypy default: Replace @jit.elidable with @specialize.memo Message-ID: <20140225085223.230441C08F3@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r69409:3dbefef835d5 Date: 2014-02-25 10:51 +0200 http://bitbucket.org/pypy/pypy/changeset/3dbefef835d5/ Log: Replace @jit.elidable with @specialize.memo diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -16,7 +16,7 @@ from pypy.interpreter.mixedmodule import MixedModule from rpython.rtyper.lltypesystem import lltype from rpython.rlib.rstring import StringBuilder -from rpython.rlib import jit +from rpython.rlib.objectmodel import specialize from pypy.module.micronumpy import constants as NPY @@ -34,7 +34,7 @@ def new_dtype_getter(num): - @jit.elidable + @specialize.memo() def _get_dtype(space): from pypy.module.micronumpy.interp_dtype import get_dtype_cache return get_dtype_cache(space).dtypes_by_num[num] From noreply at buildbot.pypy.org Tue Feb 25 09:58:39 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 25 Feb 2014 09:58:39 +0100 (CET) Subject: [pypy-commit] pypy default: fix a char dtype case Message-ID: <20140225085839.E99D61C08F3@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69410:89a5865820d1 Date: 2014-02-25 03:52 -0500 http://bitbucket.org/pypy/pypy/changeset/89a5865820d1/ Log: fix a char dtype case diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -587,10 +587,8 @@ except ValueError: raise oefmt(space.w_TypeError, "data type not understood") if char == NPY.CHARLTR: - char = NPY.STRINGLTR - size = 1 - - if char == NPY.STRINGLTR: + return new_string_dtype(space, 1, NPY.CHARLTR) + elif char == NPY.STRINGLTR: return new_string_dtype(space, size) elif char == NPY.UNICODELTR: return new_unicode_dtype(space, size) @@ -599,13 +597,13 @@ assert False -def new_string_dtype(space, size): +def new_string_dtype(space, size, char=NPY.STRINGLTR): return W_Dtype( types.StringType(), elsize=size, num=NPY.STRING, kind=NPY.STRINGLTR, - char=NPY.STRINGLTR, + char=char, w_box_type=space.gettypefor(interp_boxes.W_StringBox), ) diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -891,6 +891,11 @@ assert dtype('void').byteorder == '|' assert dtype((int, 2)).byteorder == '|' assert dtype(np.generic).str == '|V0' + d = dtype(np.character) + assert d.num == 18 + assert d.char == 'S' + assert d.kind == 'S' + assert d.str == '|S0' def test_dtype_str(self): from numpypy import dtype @@ -1055,9 +1060,15 @@ assert isinstance(u, unicode) def test_character_dtype(self): + import numpy as np from numpypy import array, character x = array([["A", "B"], ["C", "D"]], character) assert (x == [["A", "B"], ["C", "D"]]).all() + d = np.dtype('c') + assert d.num == 18 + assert d.char == 'c' + assert d.kind == 'S' + assert d.str == '|S1' class AppTestRecordDtypes(BaseNumpyAppTest): spaceconfig = dict(usemodules=["micronumpy", "struct", "binascii"]) From noreply at buildbot.pypy.org Tue Feb 25 10:06:08 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 25 Feb 2014 10:06:08 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: Refactoring and small fix Message-ID: <20140225090608.77EFC1C35DF@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r855:76937b4f5d6e Date: 2014-02-25 10:05 +0100 http://bitbucket.org/pypy/stmgc/changeset/76937b4f5d6e/ Log: Refactoring and small fix diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -60,86 +60,88 @@ #define FLAG_SYNC_LARGE_NOW 0x01 -static void minor_young_outside_nursery(object_t *obj) +static uintptr_t minor_record_large_overflow_object(object_t *nobj) { - tree_delete_item(STM_PSEGMENT->young_outside_nursery, (uintptr_t)obj); - - uintptr_t nobj_sync_now = (uintptr_t)obj; + uintptr_t nobj_sync_now = (uintptr_t)nobj; if (STM_PSEGMENT->minor_collect_will_commit_now) nobj_sync_now |= FLAG_SYNC_LARGE_NOW; else - LIST_APPEND(STM_PSEGMENT->large_overflow_objects, obj); - - LIST_APPEND(STM_PSEGMENT->objects_pointing_to_nursery, nobj_sync_now); + LIST_APPEND(STM_PSEGMENT->large_overflow_objects, nobj); + return nobj_sync_now; } static void minor_trace_if_young(object_t **pobj) { /* takes a normal pointer to a thread-local pointer to an object */ object_t *obj = *pobj; + object_t *nobj; + uintptr_t nobj_sync_now; + if (obj == NULL) return; assert((uintptr_t)obj < NB_PAGES * 4096UL); - if (!_is_in_nursery(obj)) { - if (UNLIKELY(tree_contains(STM_PSEGMENT->young_outside_nursery, - (uintptr_t)obj))) { - minor_young_outside_nursery(obj); + + if (_is_in_nursery(obj)) { + /* If the object was already seen here, its first word was set + to GCWORD_MOVED. In that case, the forwarding location, i.e. + where the object moved to, is stored in the second word in 'obj'. */ + object_t *TLPREFIX *pforwarded_array = (object_t *TLPREFIX *)obj; + + if (pforwarded_array[0] == GCWORD_MOVED) { + *pobj = pforwarded_array[1]; /* already moved */ + return; } - return; /* else old object, nothing to do */ + + /* We need to make a copy of this object. It goes either in + a largemalloc.c-managed area, or if it's small enough, in + one of the small uniform pages from gcpage.c. + */ + char *realobj = REAL_ADDRESS(STM_SEGMENT->segment_base, obj); + size_t size = stmcb_size_rounded_up((struct object_s *)realobj); + + if (1 /*size >= GC_MEDIUM_REQUEST*/) { + + /* case 1: object is not small enough. + Ask gcpage.c for an allocation via largemalloc. */ + char *allocated = allocate_outside_nursery_large(size); + nobj = (object_t *)(allocated - stm_object_pages); + + /* Copy the object */ + char *realnobj = REAL_ADDRESS(STM_SEGMENT->segment_base, nobj); + memcpy(realnobj, realobj, size); + + nobj_sync_now = minor_record_large_overflow_object(nobj); + } + else { + /* case "small enough" */ + abort(); //... + } + + /* Done copying the object. */ + //dprintf(("\t\t\t\t\t%p -> %p\n", obj, nobj)); + pforwarded_array[0] = GCWORD_MOVED; + pforwarded_array[1] = nobj; + *pobj = nobj; } - /* If the object was already seen here, its first word was set - to GCWORD_MOVED. In that case, the forwarding location, i.e. - where the object moved to, is stored in the second word in 'obj'. */ - object_t *TLPREFIX *pforwarded_array = (object_t *TLPREFIX *)obj; + else { + /* The object was not in the nursery at all */ + if (LIKELY(!tree_contains(STM_PSEGMENT->young_outside_nursery, + (uintptr_t)obj))) + return; /* common case: it was an old object, nothing to do */ - if (pforwarded_array[0] == GCWORD_MOVED) { - *pobj = pforwarded_array[1]; /* already moved */ - return; + /* a young object outside the nursery */ + nobj = obj; + tree_delete_item(STM_PSEGMENT->young_outside_nursery, (uintptr_t)nobj); + nobj_sync_now = minor_record_large_overflow_object(nobj); } - /* We need to make a copy of this object. It goes either in - a largemalloc.c-managed area, or if it's small enough, in - one of the small uniform pages from gcpage.c. - */ - char *realobj = REAL_ADDRESS(STM_SEGMENT->segment_base, obj); - size_t size = stmcb_size_rounded_up((struct object_s *)realobj); - object_t *nobj; - uintptr_t nobj_sync_now; - - if (1 /*size >= GC_MEDIUM_REQUEST*/) { - - /* case 1: object is not small enough. - Ask gcpage.c for an allocation via largemalloc. */ - char *allocated = allocate_outside_nursery_large(size); - nobj = (object_t *)(allocated - stm_object_pages); - nobj_sync_now = (uintptr_t)nobj; - - /* Copy the object */ - char *realnobj = REAL_ADDRESS(STM_SEGMENT->segment_base, nobj); - memcpy(realnobj, realobj, size); - - if (STM_PSEGMENT->minor_collect_will_commit_now) - nobj_sync_now |= FLAG_SYNC_LARGE_NOW; - else - LIST_APPEND(STM_PSEGMENT->large_overflow_objects, nobj); - } - else { - /* case "small enough" */ - abort(); //... - } - + /* Set the overflow_number if nedeed */ assert((nobj->stm_flags & -GCFLAG_OVERFLOW_NUMBER_bit0) == 0); if (!STM_PSEGMENT->minor_collect_will_commit_now) { nobj->stm_flags |= STM_PSEGMENT->overflow_number; } - /* Done copying the object. */ - //dprintf(("\t\t\t\t\t%p -> %p\n", obj, nobj)); - pforwarded_array[0] = GCWORD_MOVED; - pforwarded_array[1] = nobj; - *pobj = nobj; - /* Must trace the object later */ LIST_APPEND(STM_PSEGMENT->objects_pointing_to_nursery, nobj_sync_now); } From noreply at buildbot.pypy.org Tue Feb 25 10:06:26 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 25 Feb 2014 10:06:26 +0100 (CET) Subject: [pypy-commit] pypy default: fix array init with char dtype Message-ID: <20140225090626.411131C35DF@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69411:0f6a27d08000 Date: 2014-02-25 04:05 -0500 http://bitbucket.org/pypy/pypy/changeset/0f6a27d08000/ Log: fix array init with char dtype diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -1448,9 +1448,10 @@ # scalars and strings w/o __array__ method isstr = space.isinstance_w(w_object, space.w_str) if not issequence_w(space, w_object) or isstr: - if dtype is None or (dtype.is_str_or_unicode() and dtype.elsize < 1): - dtype = interp_ufuncs.find_dtype_for_scalar(space, w_object) - return W_NDimArray.new_scalar(space, dtype, w_object) + if dtype is None or dtype.char != NPY.CHARLTR: + if dtype is None or (dtype.is_str_or_unicode() and dtype.elsize < 1): + dtype = interp_ufuncs.find_dtype_for_scalar(space, w_object) + return W_NDimArray.new_scalar(space, dtype, w_object) if space.is_none(w_order): order = 'C' diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -1697,16 +1697,12 @@ assert exc.value[0] == "data-type must not be 0-sized" assert a.view('S4') == '\x03' a = array('abc1', dtype='c') - import sys - if '__pypy__' in sys.builtin_module_names: - raises(ValueError, a.view, 'S4') - raises(ValueError, a.view, [('a', 'i2'), ('b', 'i2')]) - else: - assert a.view('S4') == 'abc1' - b = a.view([('a', 'i2'), ('b', 'i2')]) - assert b.shape == (1,) - assert b[0][0] == 25185 - assert b[0][1] == 12643 + assert (a == ['a', 'b', 'c', '1']).all() + assert a.view('S4') == 'abc1' + b = a.view([('a', 'i2'), ('b', 'i2')]) + assert b.shape == (1,) + assert b[0][0] == 25185 + assert b[0][1] == 12643 a = array([(1, 2)], dtype=[('a', 'int64'), ('b', 'int64')])[0] assert a.shape == () assert a.view('S16') == '\x01' + '\x00' * 7 + '\x02' From noreply at buildbot.pypy.org Tue Feb 25 10:07:56 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 25 Feb 2014 10:07:56 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: In trees, the NULL key is reserved Message-ID: <20140225090756.859C91C35DF@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r856:e47cc9d404c2 Date: 2014-02-25 10:07 +0100 http://bitbucket.org/pypy/stmgc/changeset/e47cc9d404c2/ Log: In trees, the NULL key is reserved diff --git a/c7/stm/list.c b/c7/stm/list.c --- a/c7/stm/list.c +++ b/c7/stm/list.c @@ -121,6 +121,7 @@ static void tree_insert(struct tree_s *tree, uintptr_t addr, uintptr_t val) { + assert(addr != 0); /* the NULL key is reserved */ retry:; wlog_t *wlog; uintptr_t key = addr; From noreply at buildbot.pypy.org Tue Feb 25 10:20:01 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 25 Feb 2014 10:20:01 +0100 (CET) Subject: [pypy-commit] pypy default: Baaah. _py3k_acquire() will raise OverflowError. Work around. Message-ID: <20140225092001.8B86C1C0132@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69412:570e6baf64ae Date: 2014-02-25 10:19 +0100 http://bitbucket.org/pypy/pypy/changeset/570e6baf64ae/ Log: Baaah. _py3k_acquire() will raise OverflowError. Work around. diff --git a/lib-python/2.7/threading.py b/lib-python/2.7/threading.py --- a/lib-python/2.7/threading.py +++ b/lib-python/2.7/threading.py @@ -246,7 +246,14 @@ else: # PyPy patch: use _py3k_acquire() if timeout > 0: - gotit = waiter._py3k_acquire(True, timeout) + try: + gotit = waiter._py3k_acquire(True, timeout) + except OverflowError: + # bah, in Python 3, acquire(True, timeout) raises + # OverflowError if the timeout is too huge. For + # forward-compatibility reasons we do the same. + waiter.acquire() + gotit = True else: gotit = waiter.acquire(False) if not gotit: From noreply at buildbot.pypy.org Tue Feb 25 11:17:00 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Tue, 25 Feb 2014 11:17:00 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: add very large objects Message-ID: <20140225101700.A8F801C0132@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7-refactor Changeset: r857:d3ae1ec8e5b4 Date: 2014-02-25 11:17 +0100 http://bitbucket.org/pypy/stmgc/changeset/d3ae1ec8e5b4/ Log: add very large objects diff --git a/c7/test/test_random.py b/c7/test/test_random.py --- a/c7/test/test_random.py +++ b/c7/test/test_random.py @@ -353,6 +353,7 @@ size = global_state.rnd.choice([ "16", str(4096+16), + str(80*1024+16), #"SOME_MEDIUM_SIZE+16", #"SOME_LARGE_SIZE+16", ]) From noreply at buildbot.pypy.org Tue Feb 25 11:17:48 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Tue, 25 Feb 2014 11:17:48 +0100 (CET) Subject: [pypy-commit] pypy default: more informative error when refusing to create dtypes Message-ID: <20140225101748.2FE601C0132@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69413:9aaa71abdd13 Date: 2014-02-25 04:45 -0500 http://bitbucket.org/pypy/pypy/changeset/9aaa71abdd13/ Log: more informative error when refusing to create dtypes diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -532,7 +532,8 @@ if w_dtype is dtype.w_box_type: return dtype if space.isinstance_w(w_dtype, space.w_type): - raise oefmt(space.w_NotImplementedError, "object dtype not implemented") + raise oefmt(space.w_NotImplementedError, + "cannot create dtype with type '%N'", w_dtype) raise oefmt(space.w_TypeError, "data type not understood") W_Dtype.typedef = TypeDef("dtype", diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -452,7 +452,7 @@ assert np.dtype(o).str == '|O8' else: exc = raises(NotImplementedError, "np.dtype(o)") - assert exc.value[0] == 'object dtype not implemented' + assert exc.value[0] == "cannot create dtype with type '%s'" % o.__name__ class AppTestTypes(BaseAppTestDtypes): def test_abstract_types(self): From noreply at buildbot.pypy.org Tue Feb 25 11:23:45 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 25 Feb 2014 11:23:45 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: Fix: memset the right segment Message-ID: <20140225102345.D2B391C0132@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r858:fb13763ca5b2 Date: 2014-02-25 11:23 +0100 http://bitbucket.org/pypy/stmgc/changeset/fb13763ca5b2/ Log: Fix: memset the right segment diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -310,10 +310,10 @@ /* XXX force a minor/major collection if needed */ char *result = allocate_outside_nursery_large(size_rounded_up); - memset(result, 0, size_rounded_up); - object_t *o = (object_t *)(result - stm_object_pages); tree_insert(STM_PSEGMENT->young_outside_nursery, (intptr_t)o, 0); + + memset(REAL_ADDRESS(STM_SEGMENT->segment_base, o), 0, size_rounded_up); return o; } From noreply at buildbot.pypy.org Tue Feb 25 12:18:03 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 25 Feb 2014 12:18:03 +0100 (CET) Subject: [pypy-commit] pypy default: Move the partial lib_pypy/disassembler.py to rpython/tool/. Message-ID: <20140225111803.D21E41C08B9@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69414:6749a8734e6c Date: 2014-02-25 12:17 +0100 http://bitbucket.org/pypy/pypy/changeset/6749a8734e6c/ Log: Move the partial lib_pypy/disassembler.py to rpython/tool/. Move pypy/tool/jitlogparser to rpython/root/. diff --git a/pypy/module/pypyjit/test_pypy_c/model.py b/pypy/module/pypyjit/test_pypy_c/model.py --- a/pypy/module/pypyjit/test_pypy_c/model.py +++ b/pypy/module/pypyjit/test_pypy_c/model.py @@ -6,8 +6,9 @@ from _pytest.assertion import newinterpret except ImportError: # e.g. Python 2.5 newinterpret = None -from pypy.tool.jitlogparser.parser import SimpleParser, Function, TraceForOpcode -from pypy.tool.jitlogparser.storage import LoopStorage +from rpython.tool.jitlogparser.parser import (SimpleParser, Function, + TraceForOpcode) +from rpython.tool.jitlogparser.storage import LoopStorage def find_ids_range(code): diff --git a/pypy/module/pypyjit/test_pypy_c/test_00_model.py b/pypy/module/pypyjit/test_pypy_c/test_00_model.py --- a/pypy/module/pypyjit/test_pypy_c/test_00_model.py +++ b/pypy/module/pypyjit/test_pypy_c/test_00_model.py @@ -3,7 +3,7 @@ import types import subprocess import py -from lib_pypy import disassembler +from rpython.tool import disassembler from rpython.tool.udir import udir from rpython.tool import logparser from rpython.jit.tool.jitoutput import parse_prof @@ -129,7 +129,7 @@ class TestOpMatcher_(object): def match(self, src1, src2, **kwds): - from pypy.tool.jitlogparser.parser import SimpleParser + from rpython.tool.jitlogparser.parser import SimpleParser loop = SimpleParser.parse_from_input(src1) matcher = OpMatcher(loop.operations) try: diff --git a/pypy/module/pypyjit/test_pypy_c/test_jitlogparser.py b/pypy/module/pypyjit/test_pypy_c/test_jitlogparser.py --- a/pypy/module/pypyjit/test_pypy_c/test_jitlogparser.py +++ b/pypy/module/pypyjit/test_pypy_c/test_jitlogparser.py @@ -4,7 +4,7 @@ from rpython.tool.logparser import extract_category from rpython.jit.backend.tool.viewcode import ObjdumpNotFound -from pypy.tool.jitlogparser.parser import (import_log, parse_log_counts, +from rpython.tool.jitlogparser.parser import (import_log, parse_log_counts, mangle_descr) from pypy.module.pypyjit.test_pypy_c.test_00_model import BaseTestPyPyC diff --git a/lib_pypy/disassembler.py b/rpython/tool/disassembler.py rename from lib_pypy/disassembler.py rename to rpython/tool/disassembler.py diff --git a/pypy/tool/jitlogparser/__init__.py b/rpython/tool/jitlogparser/__init__.py rename from pypy/tool/jitlogparser/__init__.py rename to rpython/tool/jitlogparser/__init__.py diff --git a/pypy/tool/jitlogparser/module_finder.py b/rpython/tool/jitlogparser/module_finder.py rename from pypy/tool/jitlogparser/module_finder.py rename to rpython/tool/jitlogparser/module_finder.py diff --git a/pypy/tool/jitlogparser/parser.py b/rpython/tool/jitlogparser/parser.py rename from pypy/tool/jitlogparser/parser.py rename to rpython/tool/jitlogparser/parser.py diff --git a/pypy/tool/jitlogparser/storage.py b/rpython/tool/jitlogparser/storage.py rename from pypy/tool/jitlogparser/storage.py rename to rpython/tool/jitlogparser/storage.py --- a/pypy/tool/jitlogparser/storage.py +++ b/rpython/tool/jitlogparser/storage.py @@ -5,8 +5,8 @@ import py import os -from lib_pypy.disassembler import dis -from pypy.tool.jitlogparser.module_finder import gather_all_code_objs +from rpython.tool.disassembler import dis +from rpython.tool.jitlogparser.module_finder import gather_all_code_objs class LoopStorage(object): def __init__(self, extrapath=None): diff --git a/pypy/tool/jitlogparser/test/__init__.py b/rpython/tool/jitlogparser/test/__init__.py rename from pypy/tool/jitlogparser/test/__init__.py rename to rpython/tool/jitlogparser/test/__init__.py diff --git a/pypy/tool/jitlogparser/test/logtest.log b/rpython/tool/jitlogparser/test/logtest.log rename from pypy/tool/jitlogparser/test/logtest.log rename to rpython/tool/jitlogparser/test/logtest.log diff --git a/pypy/tool/jitlogparser/test/logtest2.log b/rpython/tool/jitlogparser/test/logtest2.log rename from pypy/tool/jitlogparser/test/logtest2.log rename to rpython/tool/jitlogparser/test/logtest2.log diff --git a/pypy/tool/jitlogparser/test/test_modulefinder.py b/rpython/tool/jitlogparser/test/test_modulefinder.py rename from pypy/tool/jitlogparser/test/test_modulefinder.py rename to rpython/tool/jitlogparser/test/test_modulefinder.py --- a/pypy/tool/jitlogparser/test/test_modulefinder.py +++ b/rpython/tool/jitlogparser/test/test_modulefinder.py @@ -1,5 +1,5 @@ import py -from pypy.tool.jitlogparser.module_finder import gather_all_code_objs +from rpython.tool.jitlogparser.module_finder import gather_all_code_objs import re, sys def setup_module(mod): diff --git a/pypy/tool/jitlogparser/test/test_parser.py b/rpython/tool/jitlogparser/test/test_parser.py rename from pypy/tool/jitlogparser/test/test_parser.py rename to rpython/tool/jitlogparser/test/test_parser.py --- a/pypy/tool/jitlogparser/test/test_parser.py +++ b/rpython/tool/jitlogparser/test/test_parser.py @@ -1,8 +1,8 @@ -from pypy.tool.jitlogparser.parser import (SimpleParser, TraceForOpcode, - Function, adjust_bridges, - import_log, split_trace, Op, - parse_log_counts) -from pypy.tool.jitlogparser.storage import LoopStorage +from rpython.tool.jitlogparser.parser import (SimpleParser, TraceForOpcode, + Function, adjust_bridges, + import_log, split_trace, Op, + parse_log_counts) +from rpython.tool.jitlogparser.storage import LoopStorage import py, sys from rpython.jit.backend.detect_cpu import autodetect diff --git a/pypy/tool/jitlogparser/test/test_storage.py b/rpython/tool/jitlogparser/test/test_storage.py rename from pypy/tool/jitlogparser/test/test_storage.py rename to rpython/tool/jitlogparser/test/test_storage.py --- a/pypy/tool/jitlogparser/test/test_storage.py +++ b/rpython/tool/jitlogparser/test/test_storage.py @@ -1,5 +1,5 @@ import py -from pypy.tool.jitlogparser.storage import LoopStorage +from rpython.tool.jitlogparser.storage import LoopStorage def test_load_codes(): tmppath = py.test.ensuretemp('load_codes') diff --git a/pypy/tool/jitlogparser/test/x.py b/rpython/tool/jitlogparser/test/x.py rename from pypy/tool/jitlogparser/test/x.py rename to rpython/tool/jitlogparser/test/x.py From noreply at buildbot.pypy.org Tue Feb 25 12:23:10 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 25 Feb 2014 12:23:10 +0100 (CET) Subject: [pypy-commit] jitviewer default: Import everything needed from 'rpython', not from 'pypy'. Message-ID: <20140225112310.DDC531C08F3@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r255:41c224f97da8 Date: 2014-02-25 12:19 +0100 http://bitbucket.org/pypy/jitviewer/changeset/41c224f97da8/ Log: Import everything needed from 'rpython', not from 'pypy'. diff --git a/_jitviewer/app.py b/_jitviewer/app.py --- a/_jitviewer/app.py +++ b/_jitviewer/app.py @@ -29,12 +29,12 @@ import argparse try: - import pypy + import rpython except ImportError: import __pypy__ sys.path.append(os.path.join(__pypy__.__file__, '..', '..', '..')) try: - import pypy + import rpython except ImportError: failout('Could not import pypy module, make sure to ' 'add the pypy module to PYTHONPATH') @@ -47,13 +47,21 @@ import inspect import threading import time + try: from rpython.tool.logparser import extract_category except ImportError: from pypy.tool.logparser import extract_category -from pypy.tool.jitlogparser.storage import LoopStorage -from pypy.tool.jitlogparser.parser import adjust_bridges, import_log,\ - parse_log_counts +try: + from rpython.tool.jitlogparser.storage import LoopStorage +except ImportError: + from pypy.tool.jitlogparser.storage import LoopStorage +try: + from rpython.tool.jitlogparser.parser import adjust_bridges, import_log,\ + parse_log_counts +except ImportError: + from pypy.tool.jitlogparser.parser import adjust_bridges, import_log,\ + parse_log_counts # from _jitviewer.parser import ParserWithHtmlRepr, FunctionHtml from _jitviewer.display import CodeRepr, CodeReprNoFile diff --git a/_jitviewer/parser.py b/_jitviewer/parser.py --- a/_jitviewer/parser.py +++ b/_jitviewer/parser.py @@ -1,6 +1,9 @@ import re import cgi -from pypy.tool.jitlogparser import parser +try: + from rpython.tool.jitlogparser import parser +except ImportError: + from pypy.tool.jitlogparser import parser def cssclass(cls, s, **kwds): cls = re.sub("[^\w]", "_", cls) From noreply at buildbot.pypy.org Tue Feb 25 14:17:15 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 25 Feb 2014 14:17:15 +0100 (CET) Subject: [pypy-commit] pypy default: Complain for now when a function is both elidable (or loopinvariant), Message-ID: <20140225131715.645041C0150@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69415:6e5573ced07d Date: 2014-02-25 12:37 +0100 http://bitbucket.org/pypy/pypy/changeset/6e5573ced07d/ Log: Complain for now when a function is both elidable (or loopinvariant), and can have random effects. diff --git a/rpython/jit/codewriter/call.py b/rpython/jit/codewriter/call.py --- a/rpython/jit/codewriter/call.py +++ b/rpython/jit/codewriter/call.py @@ -240,6 +240,13 @@ extraeffect = EffectInfo.EF_CAN_RAISE else: extraeffect = EffectInfo.EF_CANNOT_RAISE + else: + assert not loopinvariant, ( + "in operation %r: this calls a _jit_loop_invariant_ function," + " but it can have random effects") + assert not elidable, ( + "in operation %r: this calls an _elidable_function_," + " but it can have random effects") # effectinfo = effectinfo_from_writeanalyze( self.readwrite_analyzer.analyze(op, self.seen), self.cpu, From noreply at buildbot.pypy.org Tue Feb 25 14:17:16 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 25 Feb 2014 14:17:16 +0100 (CET) Subject: [pypy-commit] pypy default: More precise crash, better explanation Message-ID: <20140225131716.9FC291C0150@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69416:8ccaadbf0ecf Date: 2014-02-25 12:58 +0100 http://bitbucket.org/pypy/pypy/changeset/8ccaadbf0ecf/ Log: More precise crash, better explanation diff --git a/rpython/jit/codewriter/call.py b/rpython/jit/codewriter/call.py --- a/rpython/jit/codewriter/call.py +++ b/rpython/jit/codewriter/call.py @@ -240,13 +240,19 @@ extraeffect = EffectInfo.EF_CAN_RAISE else: extraeffect = EffectInfo.EF_CANNOT_RAISE - else: - assert not loopinvariant, ( + # + # check that the result is really as expected + if loopinvariant: + assert extraeffect == EffectInfo.EF_LOOPINVARIANT, ( "in operation %r: this calls a _jit_loop_invariant_ function," - " but it can have random effects") - assert not elidable, ( + " but this contradicts other sources (e.g. it can have random" + " effects)" % (op,)) + if elidable: + assert extraeffect in (EffectInfo.EF_ELIDABLE_CANNOT_RAISE, + EffectInfo.EF_ELIDABLE_CAN_RAISE), ( "in operation %r: this calls an _elidable_function_," - " but it can have random effects") + " but this contradicts other sources (e.g. it can have random" + " effects)" % (op,)) # effectinfo = effectinfo_from_writeanalyze( self.readwrite_analyzer.analyze(op, self.seen), self.cpu, From noreply at buildbot.pypy.org Tue Feb 25 14:17:17 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 25 Feb 2014 14:17:17 +0100 (CET) Subject: [pypy-commit] pypy default: Test and fix: the _nowrapper C functions couldn't be elided before. Message-ID: <20140225131717.DF1E51C0150@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69417:3c477e9543db Date: 2014-02-25 13:16 +0100 http://bitbucket.org/pypy/pypy/changeset/3c477e9543db/ Log: Test and fix: the _nowrapper C functions couldn't be elided before. diff --git a/rpython/jit/codewriter/test/test_call.py b/rpython/jit/codewriter/test/test_call.py --- a/rpython/jit/codewriter/test/test_call.py +++ b/rpython/jit/codewriter/test/test_call.py @@ -248,3 +248,26 @@ op = block.operations[-1] call_descr = cc.getcalldescr(op) assert call_descr.extrainfo.has_random_effects() + +def test_no_random_effects_for_rotateLeft(): + from rpython.jit.backend.llgraph.runner import LLGraphCPU + from rpython.rlib.rarithmetic import r_uint + + if r_uint.BITS == 32: + py.test.skip("64-bit only") + + from rpython.rlib.rmd5 import _rotateLeft + def f(n, m): + return _rotateLeft(r_uint(n), m) + + rtyper = support.annotate(f, [7, 9]) + jitdriver_sd = FakeJitDriverSD(rtyper.annotator.translator.graphs[0]) + cc = CallControl(LLGraphCPU(rtyper), jitdrivers_sd=[jitdriver_sd]) + res = cc.find_all_graphs(FakePolicy()) + + [f_graph] = [x for x in res if x.func is f] + [block, _] = list(f_graph.iterblocks()) + op = block.operations[-1] + call_descr = cc.getcalldescr(op) + assert not call_descr.extrainfo.has_random_effects() + assert call_descr.extrainfo.check_is_elidable() diff --git a/rpython/rtyper/lltypesystem/rffi.py b/rpython/rtyper/lltypesystem/rffi.py --- a/rpython/rtyper/lltypesystem/rffi.py +++ b/rpython/rtyper/lltypesystem/rffi.py @@ -116,12 +116,14 @@ # default case: # invoke the around-handlers only for "not too small" external calls; # sandboxsafe is a hint for "too-small-ness" (e.g. math functions). - invoke_around_handlers = not sandboxsafe + # Also, _nowrapper functions cannot release the GIL, by default. + invoke_around_handlers = not sandboxsafe and not _nowrapper if random_effects_on_gcobjs not in (False, True): random_effects_on_gcobjs = ( invoke_around_handlers or # because it can release the GIL has_callback) # because the callback can do it + assert not (elidable_function and random_effects_on_gcobjs) funcptr = lltype.functionptr(ext_type, name, external='C', compilation_info=compilation_info, From noreply at buildbot.pypy.org Tue Feb 25 14:17:19 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 25 Feb 2014 14:17:19 +0100 (CET) Subject: [pypy-commit] pypy default: Replace the crash with a warning for now Message-ID: <20140225131719.17B501C0150@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69418:040f3ac28afb Date: 2014-02-25 14:02 +0100 http://bitbucket.org/pypy/pypy/changeset/040f3ac28afb/ Log: Replace the crash with a warning for now diff --git a/rpython/jit/codewriter/call.py b/rpython/jit/codewriter/call.py --- a/rpython/jit/codewriter/call.py +++ b/rpython/jit/codewriter/call.py @@ -243,13 +243,15 @@ # # check that the result is really as expected if loopinvariant: - assert extraeffect == EffectInfo.EF_LOOPINVARIANT, ( + if extraeffect != EffectInfo.EF_LOOPINVARIANT: + from rpython.jit.codewriter.policy import log; log.WARNING( "in operation %r: this calls a _jit_loop_invariant_ function," " but this contradicts other sources (e.g. it can have random" " effects)" % (op,)) if elidable: - assert extraeffect in (EffectInfo.EF_ELIDABLE_CANNOT_RAISE, - EffectInfo.EF_ELIDABLE_CAN_RAISE), ( + if extraeffect not in (EffectInfo.EF_ELIDABLE_CANNOT_RAISE, + EffectInfo.EF_ELIDABLE_CAN_RAISE): + from rpython.jit.codewriter.policy import log; log.WARNING( "in operation %r: this calls an _elidable_function_," " but this contradicts other sources (e.g. it can have random" " effects)" % (op,)) From noreply at buildbot.pypy.org Tue Feb 25 14:17:20 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 25 Feb 2014 14:17:20 +0100 (CET) Subject: [pypy-commit] pypy default: Write down a good idea found in module/_rawffi/alt/interp_ffitype.py. Message-ID: <20140225131720.2FB4C1C0150@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69419:1a6a0914772c Date: 2014-02-25 14:16 +0100 http://bitbucket.org/pypy/pypy/changeset/1a6a0914772c/ Log: Write down a good idea found in module/_rawffi/alt/interp_ffitype.py. diff --git a/pypy/module/_cffi_backend/ctypeobj.py b/pypy/module/_cffi_backend/ctypeobj.py --- a/pypy/module/_cffi_backend/ctypeobj.py +++ b/pypy/module/_cffi_backend/ctypeobj.py @@ -14,6 +14,8 @@ _immutable_fields_ = ['size?', 'name', 'name_position'] # note that 'size' is not strictly immutable, because it can change # from -1 to the real value in the W_CTypeStruct subclass. + # XXX this could be improved with an elidable method get_size() + # that raises in case it's still -1... cast_anything = False is_primitive_integer = False From noreply at buildbot.pypy.org Tue Feb 25 14:23:51 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 25 Feb 2014 14:23:51 +0100 (CET) Subject: [pypy-commit] pypy default: Fix for @jit.elidable. Previously, it would elidably return None, even Message-ID: <20140225132351.49D9D1C08B9@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69420:35e7f867cc40 Date: 2014-02-25 14:23 +0100 http://bitbucket.org/pypy/pypy/changeset/35e7f867cc40/ Log: Fix for @jit.elidable. Previously, it would elidably return None, even though it doesn't mean that it will stay None forever. diff --git a/pypy/module/_lsprof/interp_lsprof.py b/pypy/module/_lsprof/interp_lsprof.py --- a/pypy/module/_lsprof/interp_lsprof.py +++ b/pypy/module/_lsprof/interp_lsprof.py @@ -159,7 +159,7 @@ subentry = ProfilerSubEntry(entry.frame) self.calls[entry] = subentry return subentry - return None + raise class ProfilerContext(object): def __init__(self, profobj, entry): @@ -181,8 +181,11 @@ entry._stop(tt, it) if profobj.subcalls and self.previous: caller = jit.promote(self.previous.entry) - subentry = caller._get_or_make_subentry(entry, False) - if subentry is not None: + try: + subentry = caller._get_or_make_subentry(entry, False) + except KeyError: + pass + else: subentry._stop(tt, it) @@ -308,7 +311,7 @@ entry = ProfilerEntry(f_code) self.data[f_code] = entry return entry - return None + raise @jit.elidable def _get_or_make_builtin_entry(self, key, make=True): @@ -319,7 +322,7 @@ entry = ProfilerEntry(self.space.wrap(key)) self.builtin_data[key] = entry return entry - return None + raise def _enter_call(self, f_code): # we have a superb gc, no point in freelist :) @@ -332,8 +335,11 @@ if context is None: return self = jit.promote(self) - entry = self._get_or_make_entry(f_code, False) - if entry is not None: + try: + entry = self._get_or_make_entry(f_code, False) + except KeyError: + pass + else: context._stop(self, entry) self.current_context = context.previous @@ -347,8 +353,11 @@ if context is None: return self = jit.promote(self) - entry = self._get_or_make_builtin_entry(key, False) - if entry is not None: + try: + entry = self._get_or_make_builtin_entry(key, False) + except KeyError: + pass + else: context._stop(self, entry) self.current_context = context.previous From noreply at buildbot.pypy.org Tue Feb 25 14:58:01 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 25 Feb 2014 14:58:01 +0100 (CET) Subject: [pypy-commit] pypy default: Fix (probably) this jit.elidable here Message-ID: <20140225135801.D9D9D1C3973@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69421:f3e717c94913 Date: 2014-02-25 14:57 +0100 http://bitbucket.org/pypy/pypy/changeset/f3e717c94913/ Log: Fix (probably) this jit.elidable here diff --git a/pypy/objspace/std/intobject.py b/pypy/objspace/std/intobject.py --- a/pypy/objspace/std/intobject.py +++ b/pypy/objspace/std/intobject.py @@ -622,7 +622,6 @@ sys.maxint == 2147483647) - at jit.elidable def _string_to_int_or_long(space, w_source, string, base=10): w_longval = None value = 0 diff --git a/rpython/rlib/rarithmetic.py b/rpython/rlib/rarithmetic.py --- a/rpython/rlib/rarithmetic.py +++ b/rpython/rlib/rarithmetic.py @@ -709,5 +709,4 @@ result = ovfcheck(result + digit) except OverflowError: raise ParseStringOverflowError(p) - - +string_to_int._elidable_function_ = True From noreply at buildbot.pypy.org Tue Feb 25 15:23:18 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Tue, 25 Feb 2014 15:23:18 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: add demo_random.c Message-ID: <20140225142318.CC8601C0132@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7-refactor Changeset: r859:2028836db0fa Date: 2014-02-25 15:22 +0100 http://bitbucket.org/pypy/stmgc/changeset/2028836db0fa/ Log: add demo_random.c diff --git a/c7/demo/demo_random.c b/c7/demo/demo_random.c new file mode 100644 --- /dev/null +++ b/c7/demo/demo_random.c @@ -0,0 +1,391 @@ +#include +#include +#include +#include +#include +#include + +#include "stmgc.h" + +#define NUMTHREADS 2 +#define STEPS_PER_THREAD 5000 +#define THREAD_STARTS 100 // how many restarts of threads +#define SHARED_ROOTS 3 +#define MAXROOTS 1000 + + +// SUPPORT +struct node_s; +typedef TLPREFIX struct node_s node_t; +typedef node_t* nodeptr_t; +typedef object_t* objptr_t; + +struct node_s { + struct object_s hdr; + long value; + nodeptr_t next; +}; + + +static sem_t done; +__thread stm_thread_local_t stm_thread_local; + +// global and per-thread-data +time_t default_seed; +objptr_t shared_roots[SHARED_ROOTS]; + +struct thread_data { + unsigned int thread_seed; + objptr_t roots[MAXROOTS]; + int num_roots; + int num_roots_at_transaction_start; + int steps_left; +}; +__thread struct thread_data td; + + +#define PUSH_ROOT(p) (*(stm_thread_local.shadowstack++) = (object_t *)(p)) +#define POP_ROOT(p) ((p) = (typeof(p))*(--stm_thread_local.shadowstack)) + +void init_shadow_stack(void) +{ + object_t **s = (object_t **)malloc(1000 * sizeof(object_t *)); + assert(s); + stm_thread_local.shadowstack = s; + stm_thread_local.shadowstack_base = s; +} + +void done_shadow_stack(void) +{ + free(stm_thread_local.shadowstack_base); + stm_thread_local.shadowstack = NULL; + stm_thread_local.shadowstack_base = NULL; +} + + +ssize_t stmcb_size_rounded_up(struct object_s *ob) +{ + return sizeof(struct node_s); +} + +void stmcb_trace(struct object_s *obj, void visit(object_t **)) +{ + struct node_s *n; + n = (struct node_s*)obj; + visit((object_t **)&n->next); +} + +void _push_shared_roots() +{ + int i; + for (i = 0; i < SHARED_ROOTS; i++) { + PUSH_ROOT(shared_roots[i]); + } +} + +void _pop_shared_roots() +{ + int i; + for (i = 0; i < SHARED_ROOTS; i++) { + POP_ROOT(shared_roots[SHARED_ROOTS - i - 1]); + } +} + +int get_rand(int max) +{ + if (max == 0) + return 0; + return (int)(rand_r(&td.thread_seed) % (unsigned int)max); +} + +objptr_t get_random_root() +{ + int num = get_rand(2); + if (num == 0 && td.num_roots > 0) { + num = get_rand(td.num_roots); + return td.roots[num]; + } + else { + num = get_rand(SHARED_ROOTS); + return shared_roots[num]; + } +} + +void reload_roots() +{ + int i; + assert(td.num_roots == td.num_roots_at_transaction_start); + for (i = td.num_roots_at_transaction_start - 1; i >= 0; i--) { + if (td.roots[i]) + POP_ROOT(td.roots[i]); + } + + for (i = 0; i < td.num_roots_at_transaction_start; i++) { + if (td.roots[i]) + PUSH_ROOT(td.roots[i]); + } +} + +void push_roots() +{ + int i; + for (i = td.num_roots_at_transaction_start; i < td.num_roots; i++) { + if (td.roots[i]) + PUSH_ROOT(td.roots[i]); + } +} + +void pop_roots() +{ + int i; + for (i = td.num_roots - 1; i >= td.num_roots_at_transaction_start; i--) { + if (td.roots[i]) + POP_ROOT(td.roots[i]); + } +} + +void del_root(int idx) +{ + int i; + assert(idx >= td.num_roots_at_transaction_start); + + for (i = idx; i < td.num_roots - 1; i++) + td.roots[i] = td.roots[i + 1]; + td.num_roots--; +} + +void add_root(objptr_t r) +{ + if (r && td.num_roots < MAXROOTS) { + td.roots[td.num_roots++] = r; + } +} + + +void read_barrier(objptr_t p) +{ + if (p != NULL) { + stm_read(p); + } +} + +void write_barrier(objptr_t p) +{ + if (p != NULL) { + stm_write(p); + } +} + + + +objptr_t simple_events(objptr_t p, objptr_t _r) +{ + nodeptr_t w_r; + int k = get_rand(8); + int num; + + switch (k) { + case 0: // remove a root + if (td.num_roots > td.num_roots_at_transaction_start) { + num = td.num_roots_at_transaction_start + + get_rand(td.num_roots - td.num_roots_at_transaction_start); + del_root(num); + } + break; + case 1: // add 'p' to roots + add_root(p); + break; + case 2: // set 'p' to point to a root + if (_r) + p = _r; + break; + case 3: // allocate fresh 'p' + push_roots(); + p = stm_allocate(sizeof(struct node_s)); + pop_roots(); + /* reload_roots not necessary, all are old after start_transaction */ + break; + case 4: // read and validate 'p' + read_barrier(p); + break; + case 5: // only do a stm_write_barrier + write_barrier(p); + break; + case 6: // follow p->next + if (p) { + read_barrier(p); + p = (objptr_t)(((nodeptr_t)(p))->next); + } + break; + case 7: // set 'p' as *next in one of the roots + write_barrier(_r); + w_r = (nodeptr_t)_r; + w_r->next = (nodeptr_t)p; + break; + } + return p; +} + + +objptr_t do_step(objptr_t p) +{ + objptr_t _r; + int k; + + _r = get_random_root(); + k = get_rand(11); + + if (k < 10) + p = simple_events(p, _r); + else if (get_rand(20) == 1) { + return (objptr_t)-1; // break current + } + return p; +} + + + +void setup_thread() +{ + memset(&td, 0, sizeof(struct thread_data)); + + /* stupid check because gdb shows garbage + in td.roots: */ + int i; + for (i = 0; i < MAXROOTS; i++) + assert(td.roots[i] == NULL); + + td.thread_seed = default_seed++; + td.steps_left = STEPS_PER_THREAD; + td.num_roots = 0; + td.num_roots_at_transaction_start = 0; +} + + + +void *demo_random(void *arg) +{ + int status; + stm_register_thread_local(&stm_thread_local); + init_shadow_stack(); + + /* forever on the shadowstack: */ + _push_shared_roots(); + + setup_thread(); + + objptr_t p = NULL; + stm_jmpbuf_t here; + + STM_START_TRANSACTION(&stm_thread_local, here); + assert(td.num_roots >= td.num_roots_at_transaction_start); + td.num_roots = td.num_roots_at_transaction_start; + p = NULL; + pop_roots(); /* does nothing.. */ + reload_roots(); + + while (td.steps_left-->0) { + if (td.steps_left % 8 == 0) + fprintf(stdout, "#"); + + p = do_step(p); + + if (p == (objptr_t)-1) { + push_roots(); + stm_commit_transaction(); + + td.num_roots_at_transaction_start = td.num_roots; + + STM_START_TRANSACTION(&stm_thread_local, here); + td.num_roots = td.num_roots_at_transaction_start; + p = NULL; + pop_roots(); + reload_roots(); + } + } + stm_commit_transaction(); + + done_shadow_stack(); + stm_unregister_thread_local(&stm_thread_local); + + status = sem_post(&done); assert(status == 0); + return NULL; +} + +void newthread(void*(*func)(void*), void *arg) +{ + pthread_t th; + int status = pthread_create(&th, NULL, func, arg); + if (status != 0) + abort(); + pthread_detach(th); + printf("started new thread\n"); +} + + +void setup_globals() +{ + int i; + + stm_start_inevitable_transaction(&stm_thread_local); + for (i = 0; i < SHARED_ROOTS; i++) { + shared_roots[i] = stm_allocate(sizeof(struct node_s)); + PUSH_ROOT(shared_roots[i]); + } + stm_commit_transaction(); + + /* make them OLD */ + + stm_start_inevitable_transaction(&stm_thread_local); + /* update now old references: */ + _pop_shared_roots(); + _push_shared_roots(); + stm_commit_transaction(); + /* leave them on this shadow stack forever for major collections */ +} + +int main(void) +{ + int i, status; + + /* pick a random seed from the time in seconds. + A bit pointless for now... because the interleaving of the + threads is really random. */ + default_seed = time(NULL); + printf("running with seed=%lld\n", (long long)default_seed); + + status = sem_init(&done, 0, 0); + assert(status == 0); + + + stm_setup(); + stm_register_thread_local(&stm_thread_local); + init_shadow_stack(); + + setup_globals(); + + int thread_starts = NUMTHREADS * THREAD_STARTS; + for (i = 0; i < NUMTHREADS; i++) { + newthread(demo_random, NULL); + thread_starts--; + } + + for (i=0; i < NUMTHREADS * THREAD_STARTS; i++) { + status = sem_wait(&done); + assert(status == 0); + printf("thread finished\n"); + if (thread_starts) { + thread_starts--; + newthread(demo_random, NULL); + } + } + + printf("Test OK!\n"); + + _pop_shared_roots(); + done_shadow_stack(); + stm_unregister_thread_local(&stm_thread_local); + stm_teardown(); + + return 0; +} From noreply at buildbot.pypy.org Tue Feb 25 15:23:20 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Tue, 25 Feb 2014 15:23:20 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: fix abort_if_needed Message-ID: <20140225142320.051011C0132@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7-refactor Changeset: r860:a689a41e2fcd Date: 2014-02-25 15:22 +0100 http://bitbucket.org/pypy/stmgc/changeset/a689a41e2fcd/ Log: fix abort_if_needed diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -191,7 +191,7 @@ break; case TS_MUST_ABORT: - abort_with_mutex(); + stm_abort_transaction(); default: assert(!"commit: bad transaction_state"); From noreply at buildbot.pypy.org Tue Feb 25 15:28:04 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 25 Feb 2014 15:28:04 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: Add the WTM abstract. Message-ID: <20140225142804.5FC3B1C0150@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5161:ffa0a239d1a2 Date: 2014-02-25 15:27 +0100 http://bitbucket.org/pypy/extradoc/changeset/ffa0a239d1a2/ Log: Add the WTM abstract. diff --git a/talk/wtm2014/abstract.rst b/talk/wtm2014/abstract.rst new file mode 100644 --- /dev/null +++ b/talk/wtm2014/abstract.rst @@ -0,0 +1,47 @@ +Abstract +-------- + +As part of the PyPy project, we explore the usage of transactional +memory (TM) to enable parallelism for high-level, dynamic languages like +Python or Ruby. + +Most current software TM (STM) systems suffer from a big overhead when +they run on a single thread only (usually between 2-5x slowdown). They +try to scale to a large number of CPUs for the benefit of +parallelization to be greater than the penalty of the overhead. On the +other hand, while also software-based, the system presented here +initially focuses on a low CPU count (< 8). It uses an approach that can +keep the single-thread overhead very low (initial experiments with a +simple lisp interpreter suggest around 15%). As a consequence we already +see great speed-ups over single-threaded, non-STM execution by only +using 2 CPU cores. We achieve this with very-low-overhead read barriers +and very-low-overhead fast paths of write barriers. The enabling +mechanism, the Linux-only system call "remap_file_pages", allows for +creating several "views" on partially shared memory; every thread sees +one of these views. + +Our goal is to support a mixture of short to very long transactions. We +have an object-based STM system with an integrated GC handling the +typical high allocation rates of dynamic languages; in particular, it is +a generational GC, and the write barrier combines GC and STM roles, +always taking the fast path for recently-allocated objects. + +The next step is to finish integrating this system with PyPy, the Python +interpreter in Python, and its Just-In-Time compiler. This is +relatively straightforward and partly done already. We believe that the +result has got the potential to give good enough performance to rival or +exceed the HTM experiments which have been done before on Ruby [1]. +Future considerations also include optionally adding a hybrid (HyTM) +component to our system. + + +-------- + +[1] Eliminating Global Interpreter Locks in Ruby through Hardware +Transactional Memory. + +Rei Odaira, Jose G. Castanos and Hisanobu Tomari. + +PPoPP '14 Proceedings of the 19th ACM SIGPLAN symposium on Principles and practice of parallel programming + +Pages 131-142 From noreply at buildbot.pypy.org Tue Feb 25 15:28:05 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 25 Feb 2014 15:28:05 +0100 (CET) Subject: [pypy-commit] extradoc extradoc: Add a bug here (bug tracker is read-only!) Message-ID: <20140225142805.8350E1C0150@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: extradoc Changeset: r5162:279c414f8907 Date: 2014-02-25 15:27 +0100 http://bitbucket.org/pypy/extradoc/changeset/279c414f8907/ Log: Add a bug here (bug tracker is read-only!) diff --git a/planning/jit.txt b/planning/jit.txt --- a/planning/jit.txt +++ b/planning/jit.txt @@ -201,3 +201,14 @@ - Movinging loop-invariant setitems out of the loops entierly. + +Bugs (bug tracker is down right now) +------------------------------------ + + at jit.elidable annotations are completely ignored if the function has +"random side-effects". In 040f3ac28afb we display a warning, at least. +In order to turn the warning into an error, we need to review the +numerous places in PyPy where the warning occurs. The initial list: + + http://bpaste.net/show/182628/ + From noreply at buildbot.pypy.org Tue Feb 25 15:28:20 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Tue, 25 Feb 2014 15:28:20 +0100 (CET) Subject: [pypy-commit] pypy remove-remaining-smm: Fix complex <=> float comparison. Message-ID: <20140225142820.3DB4D1C0150@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: remove-remaining-smm Changeset: r69422:bc9db28fcce0 Date: 2014-02-25 15:06 +0100 http://bitbucket.org/pypy/pypy/changeset/bc9db28fcce0/ Log: Fix complex <=> float comparison. diff --git a/pypy/objspace/std/complexobject.py b/pypy/objspace/std/complexobject.py --- a/pypy/objspace/std/complexobject.py +++ b/pypy/objspace/std/complexobject.py @@ -410,7 +410,8 @@ return space.newbool((self.realval == w_other.realval) and (self.imagval == w_other.imagval)) if (space.isinstance_w(w_other, space.w_int) or - space.isinstance_w(w_other, space.w_long)): + space.isinstance_w(w_other, space.w_long) or + space.isinstance_w(w_other, space.w_float)): if self.imagval: return space.w_False return space.eq(space.newfloat(self.realval), w_other) From noreply at buildbot.pypy.org Tue Feb 25 15:28:21 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Tue, 25 Feb 2014 15:28:21 +0100 (CET) Subject: [pypy-commit] pypy remove-remaining-smm: Remove AppTestKeywordsToBuiltinSanity. I don't think it makes any sense nowadays. Message-ID: <20140225142821.70BE31C0150@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: remove-remaining-smm Changeset: r69423:eb5377110c02 Date: 2014-02-25 15:27 +0100 http://bitbucket.org/pypy/pypy/changeset/eb5377110c02/ Log: Remove AppTestKeywordsToBuiltinSanity. I don't think it makes any sense nowadays. diff --git a/pypy/interpreter/test/test_gateway.py b/pypy/interpreter/test/test_gateway.py --- a/pypy/interpreter/test/test_gateway.py +++ b/pypy/interpreter/test/test_gateway.py @@ -823,46 +823,3 @@ assert space.is_true(w_res) assert len(called) == 1 assert isinstance(called[0], argument.Arguments) - - -class AppTestKeywordsToBuiltinSanity(object): - - def test_type(self): - class X(object): - def __init__(self, **kw): - pass - clash = type.__call__.func_code.co_varnames[0] - - X(**{clash: 33}) - type.__call__(X, **{clash: 33}) - - def test_object_new(self): - class X(object): - def __init__(self, **kw): - pass - clash = object.__new__.func_code.co_varnames[0] - - X(**{clash: 33}) - object.__new__(X, **{clash: 33}) - - - def test_dict_new(self): - clash = dict.__new__.func_code.co_varnames[0] - - dict(**{clash: 33}) - dict.__new__(dict, **{clash: 33}) - - def test_dict_init(self): - d = {} - clash = dict.__init__.func_code.co_varnames[0] - - d.__init__(**{clash: 33}) - dict.__init__(d, **{clash: 33}) - - def test_dict_update(self): - d = {} - clash = dict.update.func_code.co_varnames[0] - - d.update(**{clash: 33}) - dict.update(d, **{clash: 33}) - From noreply at buildbot.pypy.org Tue Feb 25 15:59:57 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 25 Feb 2014 15:59:57 +0100 (CET) Subject: [pypy-commit] jitviewer default: This runs fine in CPython too, now. Message-ID: <20140225145957.A65FF1C03FC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r256:e0b82d426d01 Date: 2014-02-25 15:59 +0100 http://bitbucket.org/pypy/jitviewer/changeset/e0b82d426d01/ Log: This runs fine in CPython too, now. diff --git a/bin/jitviewer.py b/bin/jitviewer.py --- a/bin/jitviewer.py +++ b/bin/jitviewer.py @@ -6,10 +6,5 @@ pythonpath = os.path.dirname(os.path.dirname(script_path)) sys.path.append(pythonpath) -# Check we are running with PyPy first. -if not '__pypy__' in sys.builtin_module_names: - from _jitviewer.misc import failout - failout("jitviewer must be run with PyPy") - from _jitviewer.app import main main(sys.argv) From noreply at buildbot.pypy.org Tue Feb 25 16:00:52 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Tue, 25 Feb 2014 16:00:52 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: fix for race condition (see comment in pages.h for PRIVATE_PAGE) Message-ID: <20140225150052.824231C03FC@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7-refactor Changeset: r861:4915e227b68f Date: 2014-02-25 16:00 +0100 http://bitbucket.org/pypy/stmgc/changeset/4915e227b68f/ Log: fix for race condition (see comment in pages.h for PRIVATE_PAGE) diff --git a/c7/stm/pages.c b/c7/stm/pages.c --- a/c7/stm/pages.c +++ b/c7/stm/pages.c @@ -110,6 +110,7 @@ static void _pages_privatize(uintptr_t pagenum, uintptr_t count, bool full) { + /* narrow the range of pages to privatize from the end: */ while (flag_page_private[pagenum + count - 1] == PRIVATE_PAGE) { if (!--count) return; diff --git a/c7/stm/pages.h b/c7/stm/pages.h --- a/c7/stm/pages.h +++ b/c7/stm/pages.h @@ -19,17 +19,21 @@ static void pages_initialize_shared(uintptr_t pagenum, uintptr_t count); //static void pages_make_shared_again(uintptr_t pagenum, uintptr_t count); +static void mutex_pages_lock(void); +static void mutex_pages_unlock(void); + inline static void pages_privatize(uintptr_t pagenum, uintptr_t count, bool full) { + mutex_pages_lock(); while (flag_page_private[pagenum] == PRIVATE_PAGE) { - if (!--count) + if (!--count) { + mutex_pages_unlock(); return; + } pagenum++; } + mutex_pages_unlock(); _pages_privatize(pagenum, count, full); } -static void mutex_pages_lock(void); -static void mutex_pages_unlock(void); - //static bool is_fully_in_shared_pages(object_t *obj); -- not needed? From noreply at buildbot.pypy.org Tue Feb 25 16:19:44 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Tue, 25 Feb 2014 16:19:44 +0100 (CET) Subject: [pypy-commit] pypy remove-remaining-smm: Back out changeset eb5377110c02. Message-ID: <20140225151944.50CD31C0150@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: remove-remaining-smm Changeset: r69424:2282f75f89c8 Date: 2014-02-25 16:09 +0100 http://bitbucket.org/pypy/pypy/changeset/2282f75f89c8/ Log: Back out changeset eb5377110c02. diff --git a/pypy/interpreter/test/test_gateway.py b/pypy/interpreter/test/test_gateway.py --- a/pypy/interpreter/test/test_gateway.py +++ b/pypy/interpreter/test/test_gateway.py @@ -823,3 +823,46 @@ assert space.is_true(w_res) assert len(called) == 1 assert isinstance(called[0], argument.Arguments) + + +class AppTestKeywordsToBuiltinSanity(object): + + def test_type(self): + class X(object): + def __init__(self, **kw): + pass + clash = type.__call__.func_code.co_varnames[0] + + X(**{clash: 33}) + type.__call__(X, **{clash: 33}) + + def test_object_new(self): + class X(object): + def __init__(self, **kw): + pass + clash = object.__new__.func_code.co_varnames[0] + + X(**{clash: 33}) + object.__new__(X, **{clash: 33}) + + + def test_dict_new(self): + clash = dict.__new__.func_code.co_varnames[0] + + dict(**{clash: 33}) + dict.__new__(dict, **{clash: 33}) + + def test_dict_init(self): + d = {} + clash = dict.__init__.func_code.co_varnames[0] + + d.__init__(**{clash: 33}) + dict.__init__(d, **{clash: 33}) + + def test_dict_update(self): + d = {} + clash = dict.update.func_code.co_varnames[0] + + d.update(**{clash: 33}) + dict.update(d, **{clash: 33}) + From noreply at buildbot.pypy.org Tue Feb 25 16:19:45 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Tue, 25 Feb 2014 16:19:45 +0100 (CET) Subject: [pypy-commit] pypy remove-remaining-smm: I think this is what we actually want to test: normally providing a 'self' keyword when calling a type fails, but not if the first keyword of the type's __init__ method is renamed to something else than 'self'. Message-ID: <20140225151945.776A21C0150@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: remove-remaining-smm Changeset: r69425:c890c07b80a9 Date: 2014-02-25 16:18 +0100 http://bitbucket.org/pypy/pypy/changeset/c890c07b80a9/ Log: I think this is what we actually want to test: normally providing a 'self' keyword when calling a type fails, but not if the first keyword of the type's __init__ method is renamed to something else than 'self'. diff --git a/pypy/interpreter/test/test_gateway.py b/pypy/interpreter/test/test_gateway.py --- a/pypy/interpreter/test/test_gateway.py +++ b/pypy/interpreter/test/test_gateway.py @@ -826,10 +826,9 @@ class AppTestKeywordsToBuiltinSanity(object): - def test_type(self): class X(object): - def __init__(self, **kw): + def __init__(myself, **kw): pass clash = type.__call__.func_code.co_varnames[0] @@ -845,7 +844,6 @@ X(**{clash: 33}) object.__new__(X, **{clash: 33}) - def test_dict_new(self): clash = dict.__new__.func_code.co_varnames[0] @@ -865,4 +863,3 @@ d.update(**{clash: 33}) dict.update(d, **{clash: 33}) - From noreply at buildbot.pypy.org Tue Feb 25 16:21:36 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 25 Feb 2014 16:21:36 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: I think that with a REMAPPING_PAGE intermediate value it becomes Message-ID: <20140225152136.954DC1C0132@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r862:a396ed3087b8 Date: 2014-02-25 16:21 +0100 http://bitbucket.org/pypy/stmgc/changeset/a396ed3087b8/ Log: I think that with a REMAPPING_PAGE intermediate value it becomes simply this. diff --git a/c7/stm/pages.c b/c7/stm/pages.c --- a/c7/stm/pages.c +++ b/c7/stm/pages.c @@ -93,7 +93,7 @@ void *localpg = stm_object_pages + localpgoff * 4096UL; void *otherpg = stm_object_pages + otherpgoff * 4096UL; - memset(flag_page_private + pagenum, PRIVATE_PAGE, count); + memset(flag_page_private + pagenum, REMAPPING_PAGE, count); d_remap_file_pages(localpg, count * 4096, pgoff2); uintptr_t i; if (full) { @@ -106,6 +106,8 @@ if (count > 1) pagecopy(localpg + 4096 * (count-1), otherpg + 4096 * (count-1)); } + write_fence(); + memset(flag_page_private + pagenum, PRIVATE_PAGE, count); } static void _pages_privatize(uintptr_t pagenum, uintptr_t count, bool full) diff --git a/c7/stm/pages.h b/c7/stm/pages.h --- a/c7/stm/pages.h +++ b/c7/stm/pages.h @@ -7,9 +7,11 @@ physical page (the one that is within the segment 0 mmap address). */ SHARED_PAGE, - /* Page is private for each segment. If we obtain this value outside - a mutex_pages_lock(), there might be a race: the value can say - PRIVATE_PAGE before the page is really un-shared. */ + /* For only one range of pages at a time, around the call to + remap_file_pages() that un-shares the pages (SHARED -> PRIVATE). */ + REMAPPING_PAGE, + + /* Page is private for each segment. */ PRIVATE_PAGE, }; @@ -24,15 +26,14 @@ inline static void pages_privatize(uintptr_t pagenum, uintptr_t count, bool full) { - mutex_pages_lock(); + /* This is written a bit carefully so that a call with a constant + count == 1 will turn this loop into just one "if". */ while (flag_page_private[pagenum] == PRIVATE_PAGE) { if (!--count) { - mutex_pages_unlock(); return; } pagenum++; } - mutex_pages_unlock(); _pages_privatize(pagenum, count, full); } From noreply at buildbot.pypy.org Tue Feb 25 16:53:25 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 25 Feb 2014 16:53:25 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: Add a debugging check that fails right now if we run more than 2 threads Message-ID: <20140225155325.45BE81C03FC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r863:a1bc43587591 Date: 2014-02-25 16:53 +0100 http://bitbucket.org/pypy/stmgc/changeset/a1bc43587591/ Log: Add a debugging check that fails right now if we run more than 2 threads diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -160,6 +160,9 @@ STM_PSEGMENT->transaction_state = (jmpbuf != NULL ? TS_REGULAR : TS_INEVITABLE); STM_SEGMENT->jmpbuf_ptr = jmpbuf; +#ifndef NDEBUG + STM_PSEGMENT->running_pthread = pthread_self(); +#endif STM_PSEGMENT->shadowstack_at_start_of_transaction = tl->shadowstack; STM_SEGMENT->nursery_end = NURSERY_END; @@ -340,6 +343,7 @@ { assert(!_has_mutex()); assert(STM_PSEGMENT->safe_point == SP_RUNNING); + assert(STM_PSEGMENT->running_pthread == pthread_self()); bool has_any_overflow_object = (STM_PSEGMENT->objects_pointing_to_nursery != NULL); @@ -449,6 +453,7 @@ default: assert(!"abort: bad transaction_state"); } + assert(STM_PSEGMENT->running_pthread == pthread_self()); /* throw away the content of the nursery */ throw_away_nursery(); diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -5,6 +5,7 @@ #include #include #include +#include /************************************************************/ @@ -117,6 +118,11 @@ /* In case of abort, we restore the 'shadowstack' field. */ object_t **shadowstack_at_start_of_transaction; + + /* For debugging */ +#ifndef NDEBUG + pthread_t running_pthread; +#endif }; enum /* safe_point */ { diff --git a/c7/stm/sync.c b/c7/stm/sync.c --- a/c7/stm/sync.c +++ b/c7/stm/sync.c @@ -1,4 +1,3 @@ -#include #include #include #include From noreply at buildbot.pypy.org Tue Feb 25 17:16:29 2014 From: noreply at buildbot.pypy.org (arigo) Date: Tue, 25 Feb 2014 17:16:29 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: Add *_no_abort() versions for cond_wait() and mutex_lock(). Needed Message-ID: <20140225161629.3454E1C0132@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r864:30bde5ed0833 Date: 2014-02-25 17:16 +0100 http://bitbucket.org/pypy/stmgc/changeset/30bde5ed0833/ Log: Add *_no_abort() versions for cond_wait() and mutex_lock(). Needed if we don't have our own segment so far. diff --git a/c7/demo/demo_random.c b/c7/demo/demo_random.c --- a/c7/demo/demo_random.c +++ b/c7/demo/demo_random.c @@ -7,7 +7,7 @@ #include "stmgc.h" -#define NUMTHREADS 2 +#define NUMTHREADS 3 #define STEPS_PER_THREAD 5000 #define THREAD_STARTS 100 // how many restarts of threads #define SHARED_ROOTS 3 diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -149,7 +149,7 @@ void _stm_start_transaction(stm_thread_local_t *tl, stm_jmpbuf_t *jmpbuf) { - mutex_lock(); + mutex_lock_no_abort(); /* GS invalid before this point! */ acquire_thread_segment(tl); diff --git a/c7/stm/fprintcolor.c b/c7/stm/fprintcolor.c --- a/c7/stm/fprintcolor.c +++ b/c7/stm/fprintcolor.c @@ -13,7 +13,8 @@ char buffer[2048]; va_list ap; int result; - int size = (int)sprintf(buffer, "\033[%dm", dprintfcolor()); + int size = (int)sprintf(buffer, "\033[%dm[%lx]", dprintfcolor(), + (long)pthread_self()); assert(size >= 0); va_start(ap, format); diff --git a/c7/stm/sync.c b/c7/stm/sync.c --- a/c7/stm/sync.c +++ b/c7/stm/sync.c @@ -65,13 +65,17 @@ stm_fatalerror("syscall(arch_prctl, ARCH_SET_GS): %m\n"); } -static inline void mutex_lock(void) +static inline void mutex_lock_no_abort(void) { assert(!_has_mutex_here); if (UNLIKELY(pthread_mutex_lock(&sync_ctl.global_mutex) != 0)) stm_fatalerror("pthread_mutex_lock: %m\n"); assert((_has_mutex_here = true, 1)); +} +static inline void mutex_lock(void) +{ + mutex_lock_no_abort(); if (STM_PSEGMENT->transaction_state == TS_MUST_ABORT) abort_with_mutex(); } @@ -87,7 +91,7 @@ assert((_has_mutex_here = false, 1)); } -static inline void cond_wait(enum cond_type_e ctype) +static inline void cond_wait_no_abort(enum cond_type_e ctype) { #ifdef STM_NO_COND_WAIT stm_fatalerror("*** cond_wait/%d called!\n", (int)ctype); @@ -97,7 +101,11 @@ if (UNLIKELY(pthread_cond_wait(&sync_ctl.cond[ctype], &sync_ctl.global_mutex) != 0)) stm_fatalerror("pthread_cond_wait/%d: %m\n", (int)ctype); +} +static inline void cond_wait(enum cond_type_e ctype) +{ + cond_wait_no_abort(ctype); if (STM_PSEGMENT->transaction_state == TS_MUST_ABORT) abort_with_mutex(); } @@ -148,7 +156,7 @@ /* Wait and retry. It is guaranteed that any thread releasing its segment will do so by acquiring the mutex and calling cond_signal(C_RELEASE_THREAD_SEGMENT). */ - cond_wait(C_RELEASE_THREAD_SEGMENT); + cond_wait_no_abort(C_RELEASE_THREAD_SEGMENT); goto retry; got_num: From noreply at buildbot.pypy.org Tue Feb 25 17:57:56 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Tue, 25 Feb 2014 17:57:56 +0100 (CET) Subject: [pypy-commit] pypy kill-multimethod: Kill most of pypy.objspace.std.model, move the type registration to pypy.objspace.std.objspace. Message-ID: <20140225165757.014231C0150@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: kill-multimethod Changeset: r69426:c13b140abc63 Date: 2014-02-25 17:56 +0100 http://bitbucket.org/pypy/pypy/changeset/c13b140abc63/ Log: Kill most of pypy.objspace.std.model, move the type registration to pypy.objspace.std.objspace. diff --git a/pypy/objspace/std/intobject.py b/pypy/objspace/std/intobject.py --- a/pypy/objspace/std/intobject.py +++ b/pypy/objspace/std/intobject.py @@ -584,6 +584,16 @@ _divmod, ovf2small=_divmod_ovf2small) +def setup_prebuilt(space): + if space.config.objspace.std.withprebuiltint: + W_IntObject.PREBUILT = [] + for i in range(space.config.objspace.std.prebuiltintfrom, + space.config.objspace.std.prebuiltintto): + W_IntObject.PREBUILT.append(W_IntObject(i)) + else: + W_IntObject.PREBUILT = None + + def wrapint(space, x): if not space.config.objspace.std.withprebuiltint: return W_IntObject(x) diff --git a/pypy/objspace/std/model.py b/pypy/objspace/std/model.py --- a/pypy/objspace/std/model.py +++ b/pypy/objspace/std/model.py @@ -1,232 +1,19 @@ -""" -The full list of which Python types and which implementation we want -to provide in this version of PyPy, along with conversion rules. -""" +from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.baseobjspace import W_Root, ObjSpace -import pypy.interpreter.pycode -import pypy.interpreter.special - -option_to_typename = { - "withsmalllong" : ["smalllongobject.W_SmallLongObject"], - "withstrbuf" : ["strbufobject.W_StringBufferObject"], -} IDTAG_INT = 1 IDTAG_LONG = 3 IDTAG_FLOAT = 5 IDTAG_COMPLEX = 7 -class StdTypeModel: - - def __init__(self, config): - """NOT_RPYTHON: inititialization only""" - self.config = config - # All the Python types that we want to provide in this StdObjSpace - - # The object implementations that we want to 'link' into PyPy must be - # imported here. This registers them into the multimethod tables, - # *before* the type objects are built from these multimethod tables. - from pypy.objspace.std import objectobject - from pypy.objspace.std import boolobject - from pypy.objspace.std import intobject - from pypy.objspace.std import floatobject - from pypy.objspace.std import complexobject - from pypy.objspace.std import tupleobject - from pypy.objspace.std import listobject - from pypy.objspace.std import dictmultiobject - from pypy.objspace.std import setobject - from pypy.objspace.std import basestringtype - from pypy.objspace.std import bytesobject - from pypy.objspace.std import bytearrayobject - from pypy.objspace.std import typeobject - from pypy.objspace.std import sliceobject - from pypy.objspace.std import longobject - from pypy.objspace.std import noneobject - from pypy.objspace.std import iterobject - from pypy.objspace.std import unicodeobject - from pypy.objspace.std import dictproxyobject - from pypy.objspace.std import proxyobject - - - self.pythontypes = [] - self.pythontypes.append(objectobject.W_ObjectObject.typedef) - self.pythontypes.append(typeobject.W_TypeObject.typedef) - self.pythontypes.append(noneobject.W_NoneObject.typedef) - self.pythontypes.append(tupleobject.W_TupleObject.typedef) - self.pythontypes.append(listobject.W_ListObject.typedef) - self.pythontypes.append(dictmultiobject.W_DictMultiObject.typedef) - self.pythontypes.append(setobject.W_SetObject.typedef) - self.pythontypes.append(setobject.W_FrozensetObject.typedef) - self.pythontypes.append(iterobject.W_AbstractSeqIterObject.typedef) - self.pythontypes.append(basestringtype.basestring_typedef) - self.pythontypes.append(bytesobject.W_BytesObject.typedef) - self.pythontypes.append(bytearrayobject.W_BytearrayObject.typedef) - self.pythontypes.append(unicodeobject.W_UnicodeObject.typedef) - self.pythontypes.append(intobject.W_IntObject.typedef) - self.pythontypes.append(boolobject.W_BoolObject.typedef) - self.pythontypes.append(longobject.W_LongObject.typedef) - self.pythontypes.append(floatobject.W_FloatObject.typedef) - self.pythontypes.append(complexobject.W_ComplexObject.typedef) - self.pythontypes.append(sliceobject.W_SliceObject.typedef) - - # the set of implementation types - self.typeorder = { - objectobject.W_ObjectObject: [], - # XXX: Bool/Int/Long are pythontypes but still included here - # for delegation to Float/Complex - boolobject.W_BoolObject: [], - intobject.W_IntObject: [], - floatobject.W_FloatObject: [], - typeobject.W_TypeObject: [], - sliceobject.W_SliceObject: [], - longobject.W_LongObject: [], - noneobject.W_NoneObject: [], - complexobject.W_ComplexObject: [], - pypy.interpreter.pycode.PyCode: [], - pypy.interpreter.special.Ellipsis: [], - } - - self.imported_but_not_registered = { - bytesobject.W_BytesObject: True, - } - for option, value in config.objspace.std: - if option.startswith("with") and option in option_to_typename: - for classname in option_to_typename[option]: - modname = classname[:classname.index('.')] - classname = classname[classname.index('.')+1:] - d = {} - exec "from pypy.objspace.std.%s import %s" % ( - modname, classname) in d - implcls = d[classname] - if value: - self.typeorder[implcls] = [] - else: - self.imported_but_not_registered[implcls] = True - - - for type in self.typeorder: - self.typeorder[type].append((type, None)) - - # register the order in which types are converted into each others - # when trying to dispatch multimethods. - # XXX build these lists a bit more automatically later - - if config.objspace.std.withsmalllong: - from pypy.objspace.std import smalllongobject - self.typeorder[smalllongobject.W_SmallLongObject] += [ - (floatobject.W_FloatObject, smalllongobject.delegate_SmallLong2Float), - (complexobject.W_ComplexObject, smalllongobject.delegate_SmallLong2Complex), - ] - - if config.objspace.std.withstrbuf: - from pypy.objspace.std import strbufobject - - # put W_Root everywhere - self.typeorder[W_Root] = [] - for type in self.typeorder: - from pypy.objspace.std import stdtypedef - if type is not W_Root and isinstance(type.typedef, stdtypedef.StdTypeDef): - self.typeorder[type].append((type.typedef.any, None)) - self.typeorder[type].append((W_Root, None)) - - self._typeorder_with_empty_usersubcls = None - - # ____________________________________________________________ - # Prebuilt common integer values - - if config.objspace.std.withprebuiltint: - intobject.W_IntObject.PREBUILT = [] - for i in range(config.objspace.std.prebuiltintfrom, - config.objspace.std.prebuiltintto): - intobject.W_IntObject.PREBUILT.append(intobject.W_IntObject(i)) - del i - else: - intobject.W_IntObject.PREBUILT = None - - # ____________________________________________________________ - - def get_typeorder_with_empty_usersubcls(self): - if self._typeorder_with_empty_usersubcls is None: - from pypy.interpreter.typedef import enum_interplevel_subclasses - from pypy.objspace.std import stdtypedef - result = self.typeorder.copy() - for cls in self.typeorder: - if (hasattr(cls, 'typedef') and cls.typedef is not None and - cls.typedef.acceptable_as_base_class): - subclslist = enum_interplevel_subclasses(self.config, cls) - for subcls in subclslist: - if cls in subcls.__bases__: # only direct subclasses - # for user subclasses we only accept "generic" - # matches: "typedef.any" is the applevel-type-based - # matching, and "W_Root" is ANY. - matches = [] - if isinstance(cls.typedef, stdtypedef.StdTypeDef): - matches.append((cls.typedef.any, None)) - matches.append((W_Root, None)) - result[subcls] = matches - self._typeorder_with_empty_usersubcls = result - return self._typeorder_with_empty_usersubcls - -def _op_negated(function): - def op(space, w_1, w_2): - return space.not_(function(space, w_1, w_2)) - return op - -def _op_swapped(function): - def op(space, w_1, w_2): - return function(space, w_2, w_1) - return op - -def _op_swapped_negated(function): - def op(space, w_1, w_2): - return space.not_(function(space, w_2, w_1)) - return op - CMP_OPS = dict(lt='<', le='<=', eq='==', ne='!=', gt='>', ge='>=') -CMP_CORRESPONDANCES = [ - ('eq', 'ne', _op_negated), - ('lt', 'gt', _op_swapped), - ('le', 'ge', _op_swapped), - ('lt', 'ge', _op_negated), - ('le', 'gt', _op_negated), - ('lt', 'le', _op_swapped_negated), - ('gt', 'ge', _op_swapped_negated), - ] -for op1, op2, value in CMP_CORRESPONDANCES[:]: - i = CMP_CORRESPONDANCES.index((op1, op2, value)) - CMP_CORRESPONDANCES.insert(i+1, (op2, op1, value)) BINARY_BITWISE_OPS = {'and': '&', 'lshift': '<<', 'or': '|', 'rshift': '>>', 'xor': '^'} BINARY_OPS = dict(add='+', div='/', floordiv='//', mod='%', mul='*', sub='-', truediv='/', **BINARY_BITWISE_OPS) COMMUTATIVE_OPS = ('add', 'mul', 'and', 'or', 'xor') -def add_extra_comparisons(): - """ - Add the missing comparison operators if they were not explicitly - defined: eq <-> ne and lt <-> le <-> gt <-> ge. - We try to add them in the order defined by the CMP_CORRESPONDANCES - table, thus favouring swapping the arguments over negating the result. - """ - originalentries = {} - for op in CMP_OPS.iterkeys(): - originalentries[op] = getattr(MM, op).signatures() - - for op1, op2, correspondance in CMP_CORRESPONDANCES: - mirrorfunc = getattr(MM, op2) - for types in originalentries[op1]: - t1, t2 = types - if t1 is t2: - if not mirrorfunc.has_signature(types): - functions = getattr(MM, op1).getfunctions(types) - assert len(functions) == 1, ('Automatic' - ' registration of comparison functions' - ' only work when there is a single method for' - ' the operation.') - mirrorfunc.register(correspondance(functions[0]), *types) - # ____________________________________________________________ diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -1,5 +1,4 @@ import __builtin__ -import types from pypy.interpreter import special from pypy.interpreter.baseobjspace import ObjSpace, W_Root from pypy.interpreter.error import OperationError, oefmt @@ -10,32 +9,29 @@ from rpython.rlib.objectmodel import instantiate, specialize, is_annotation_constant from rpython.rlib.debug import make_sure_not_resized from rpython.rlib.rarithmetic import base_int, widen, is_valid_int -from rpython.rlib.objectmodel import we_are_translated, import_from_mixin +from rpython.rlib.objectmodel import import_from_mixin from rpython.rlib import jit # Object imports +from pypy.objspace.std.basestringtype import basestring_typedef from pypy.objspace.std.boolobject import W_BoolObject +from pypy.objspace.std.bytearrayobject import W_BytearrayObject from pypy.objspace.std.bytesobject import W_AbstractBytesObject, W_BytesObject, wrapstr -from pypy.objspace.std.bytearrayobject import W_BytearrayObject from pypy.objspace.std.complexobject import W_ComplexObject from pypy.objspace.std.dictmultiobject import W_DictMultiObject from pypy.objspace.std.floatobject import W_FloatObject -from pypy.objspace.std.intobject import W_IntObject -from pypy.objspace.std.iterobject import W_AbstractSeqIterObject +from pypy.objspace.std.intobject import W_IntObject, setup_prebuilt, wrapint +from pypy.objspace.std.iterobject import W_AbstractSeqIterObject, W_SeqIterObject from pypy.objspace.std.listobject import W_ListObject from pypy.objspace.std.longobject import W_LongObject, newlong from pypy.objspace.std.noneobject import W_NoneObject from pypy.objspace.std.objectobject import W_ObjectObject -from pypy.objspace.std.iterobject import W_SeqIterObject from pypy.objspace.std.setobject import W_SetObject, W_FrozensetObject from pypy.objspace.std.sliceobject import W_SliceObject -from pypy.objspace.std.unicodeobject import W_UnicodeObject -from pypy.objspace.std.tupleobject import W_AbstractTupleObject +from pypy.objspace.std.tupleobject import W_AbstractTupleObject, W_TupleObject from pypy.objspace.std.typeobject import W_TypeObject +from pypy.objspace.std.unicodeobject import W_UnicodeObject, wrapunicode -# types -from pypy.objspace.std.intobject import wrapint -from pypy.objspace.std.unicodeobject import wrapunicode class StdObjSpace(ObjSpace): """The standard object space, implementing a general-purpose object @@ -43,13 +39,14 @@ import_from_mixin(DescrOperation) def initialize(self): - "NOT_RPYTHON: only for initializing the space." - # setup all the object types and implementations - self.model = model.StdTypeModel(self.config) + """NOT_RPYTHON: only for initializing the space + Setup all the object types and implementations. + """ + + setup_prebuilt(self) self.FrameClass = frame.build_frame(self) self.StringObjectCls = W_BytesObject - self.UnicodeObjectCls = W_UnicodeObject # singletons @@ -60,13 +57,40 @@ self.w_Ellipsis = self.wrap(special.Ellipsis(self)) # types + builtin_type_classes = { + W_BoolObject.typedef: W_BoolObject, + W_BytearrayObject.typedef: W_BytearrayObject, + W_BytesObject.typedef: W_BytesObject, + W_ComplexObject.typedef: W_ComplexObject, + W_DictMultiObject.typedef: W_DictMultiObject, + W_FloatObject.typedef: W_FloatObject, + W_IntObject.typedef: W_IntObject, + W_AbstractSeqIterObject.typedef: W_AbstractSeqIterObject, + W_ListObject.typedef: W_ListObject, + W_LongObject.typedef: W_LongObject, + W_NoneObject.typedef: W_NoneObject, + W_ObjectObject.typedef: W_ObjectObject, + W_SetObject.typedef: W_SetObject, + W_FrozensetObject.typedef: W_FrozensetObject, + W_SliceObject.typedef: W_SliceObject, + W_TupleObject.typedef: W_TupleObject, + W_TypeObject.typedef: W_TypeObject, + W_UnicodeObject.typedef: W_UnicodeObject, + } + if self.config.objspace.std.withstrbuf: + builtin_type_classes[W_BytesObject.typedef] = W_AbstractBytesObject + self.builtin_types = {} - for typedef in self.model.pythontypes: + self._interplevel_classes = {} + for typedef, cls in builtin_type_classes.items(): w_type = self.gettypeobject(typedef) self.builtin_types[typedef.name] = w_type setattr(self, 'w_' + typedef.name, w_type) + self._interplevel_classes[w_type] = cls self.builtin_types["NotImplemented"] = self.w_NotImplemented self.builtin_types["Ellipsis"] = self.w_Ellipsis + self.w_basestring = self.builtin_types['basestring'] = \ + self.gettypeobject(basestring_typedef) # exceptions & builtins self.make_builtins() @@ -80,8 +104,6 @@ if self.config.objspace.std.withtproxy: transparent.setup(self) - self.setup_isinstance_cache() - def get_builtin_types(self): return self.builtin_types @@ -319,13 +341,6 @@ if cls.typedef.applevel_subclasses_base is not None: cls = cls.typedef.applevel_subclasses_base # - if not we_are_translated(): - if issubclass(cls, model.W_Object): - # If cls is missing from model.typeorder, then you - # need to add it there (including the inheritance - # relationship, if any) - assert cls in self.model.typeorder, repr(cls) - # if (self.config.objspace.std.withmapdict and cls is W_ObjectObject and not w_subtype.needsdel): from pypy.objspace.std.mapdict import get_subclass_of_correct_size @@ -601,78 +616,6 @@ return True return self.type(w_inst).issubtype(w_type) - def setup_isinstance_cache(self): - # This assumes that all classes in the stdobjspace implementing a - # particular app-level type are distinguished by a common base class. - # Alternatively, you can turn off the cache on specific classes, - # like e.g. proxyobject. It is just a bit less performant but - # should not have any bad effect. - from pypy.objspace.std.model import W_Root, W_Object - # - # Build a dict {class: w_typeobject-or-None}. The value None is used - # on classes that are known to be abstract base classes. - class2type = {} - class2type[W_Root] = None - class2type[W_Object] = None - for cls in self.model.typeorder.keys(): - if getattr(cls, 'typedef', None) is None: - continue - if getattr(cls, 'ignore_for_isinstance_cache', False): - continue - w_type = self.gettypefor(cls) - w_oldtype = class2type.setdefault(cls, w_type) - assert w_oldtype is w_type - # - # Build the real dict {w_typeobject: class-or-base-class}. For every - # w_typeobject we look for the most precise common base class of all - # the registered classes. If no such class is found, we will find - # W_Object or W_Root, and complain. Then you must either add an - # artificial common base class, or disable caching on one of the - # two classes with ignore_for_isinstance_cache. - def getmro(cls): - while True: - yield cls - if cls is W_Root: - break - cls = cls.__bases__[0] - self._interplevel_classes = {} - for cls, w_type in class2type.items(): - if w_type is None: - continue - if w_type not in self._interplevel_classes: - self._interplevel_classes[w_type] = cls - else: - cls1 = self._interplevel_classes[w_type] - mro1 = list(getmro(cls1)) - for base in getmro(cls): - if base in mro1: - break - if base in class2type and class2type[base] is not w_type: - if class2type.get(base) is None: - msg = ("cannot find a common interp-level base class" - " between %r and %r" % (cls1, cls)) - else: - msg = ("%s is a base class of both %r and %r" % ( - class2type[base], cls1, cls)) - raise AssertionError("%r: %s" % (w_type, msg)) - class2type[base] = w_type - self._interplevel_classes[w_type] = base - - # register other things - # XXX: fix automatic registration - self._interplevel_classes[self.w_dict] = W_DictMultiObject - self._interplevel_classes[self.w_list] = W_ListObject - self._interplevel_classes[self.w_set] = W_SetObject - self._interplevel_classes[self.w_tuple] = W_AbstractTupleObject - self._interplevel_classes[self.w_sequenceiterator] = \ - W_AbstractSeqIterObject - if self.config.objspace.std.withstrbuf: - self._interplevel_classes[self.w_str] = W_AbstractBytesObject - else: - self._interplevel_classes[self.w_str] = W_BytesObject - self._interplevel_classes[self.w_bytearray] = W_BytearrayObject - self._interplevel_classes[self.w_unicode] = W_UnicodeObject - @specialize.memo() def _get_interplevel_cls(self, w_type): if not hasattr(self, "_interplevel_classes"): From noreply at buildbot.pypy.org Tue Feb 25 18:05:58 2014 From: noreply at buildbot.pypy.org (antocuni) Date: Tue, 25 Feb 2014 18:05:58 +0100 (CET) Subject: [pypy-commit] pypy int_w-refactor: a branch where to refactor the semantics of space.int_w and space.float_w Message-ID: <20140225170558.248D81C0150@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: int_w-refactor Changeset: r69427:6bb8ab7e58a7 Date: 2014-02-24 11:46 +0100 http://bitbucket.org/pypy/pypy/changeset/6bb8ab7e58a7/ Log: a branch where to refactor the semantics of space.int_w and space.float_w From noreply at buildbot.pypy.org Tue Feb 25 18:05:59 2014 From: noreply at buildbot.pypy.org (antocuni) Date: Tue, 25 Feb 2014 18:05:59 +0100 (CET) Subject: [pypy-commit] pypy int_w-refactor: add a failing test which should pass at the end of this branch Message-ID: <20140225170559.5C68D1C0150@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: int_w-refactor Changeset: r69428:ce4d4ceb14e2 Date: 2014-02-24 11:50 +0100 http://bitbucket.org/pypy/pypy/changeset/ce4d4ceb14e2/ Log: add a failing test which should pass at the end of this branch diff --git a/pypy/module/struct/test/test_struct.py b/pypy/module/struct/test/test_struct.py --- a/pypy/module/struct/test/test_struct.py +++ b/pypy/module/struct/test/test_struct.py @@ -7,7 +7,7 @@ class AppTestStruct(object): - spaceconfig = dict(usemodules=['struct']) + spaceconfig = dict(usemodules=['struct', 'micronumpy']) def setup_class(cls): """ @@ -19,7 +19,7 @@ return struct """) cls.w_native_is_bigendian = cls.space.wrap(native_is_bigendian) - + cls.w_runappdirect = cls.space.wrap(cls.runappdirect) def test_error(self): """ @@ -384,6 +384,18 @@ assert self.struct.unpack("ii", b) == (62, 12) raises(self.struct.error, self.struct.unpack, "i", b) + def test_numpy_dtypes(self): + if self.runappdirect: + from numpy.core.multiarray import typeinfo + else: + from _numpypy.multiarray import typeinfo + float64 = typeinfo['DOUBLE'][4] + obj = float64(42.3) + data = self.struct.pack('d', obj) + obj2, = self.struct.unpack('d', data) + assert type(obj2) is float + assert obj2 == 42.3 + class AppTestStructBuffer(object): spaceconfig = dict(usemodules=['struct', '__pypy__']) From noreply at buildbot.pypy.org Tue Feb 25 18:06:00 2014 From: noreply at buildbot.pypy.org (antocuni) Date: Tue, 25 Feb 2014 18:06:00 +0100 (CET) Subject: [pypy-commit] pypy int_w-refactor: first step: change the meaning of space.int_w: now by default it also accepts objects which implements __int__, except floats. You can trigger the old behavior by passing allow_conversion=False Message-ID: <20140225170600.884621C0150@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: int_w-refactor Changeset: r69429:7e1beef017c9 Date: 2014-02-25 10:49 +0100 http://bitbucket.org/pypy/pypy/changeset/7e1beef017c9/ Log: first step: change the meaning of space.int_w: now by default it also accepts objects which implements __int__, except floats. You can trigger the old behavior by passing allow_conversion=False diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -201,7 +201,15 @@ def unicode_w(self, space): self._typed_unwrap_error(space, "unicode") - def int_w(self, space): + def int_w(self, space, allow_conversion=True): + # note that W_IntObject.int_w has a fast path and W_FloatObject.int_w + # raises + w_obj = self + if allow_conversion: + w_obj = space.int(self) + return w_obj._int_w(space) + + def _int_w(self, space): self._typed_unwrap_error(space, "integer") def float_w(self, space): @@ -220,8 +228,7 @@ def int(self, space): w_impl = space.lookup(self, '__int__') if w_impl is None: - raise oefmt(space.w_TypeError, - "unsupported operand type for int(): '%T'", self) + self._typed_unwrap_error(space, "integer") w_result = space.get_and_call_function(w_impl, self) if (space.isinstance_w(w_result, space.w_int) or @@ -1348,8 +1355,19 @@ 'argument must be a string without NUL characters')) return rstring.assert_str0(result) - def int_w(self, w_obj): - return w_obj.int_w(self) + def int_w(self, w_obj, allow_conversion=True): + """ + Unwrap an app-level int object into an interpret-level int. + + If allow_conversion==True, w_obj might be of any type which implements + __int__, *except* floats which are explicitly rejected. This is the + same logic as CPython's PyArg_ParseTuple. If you want to also allow + floats, you can call space.int_w(space.int(w_obj)). + + If allow_conversion=False, w_obj needs to be an app-level int or a + subclass. + """ + return w_obj.int_w(self, allow_conversion) def int(self, w_obj): return w_obj.int(self) diff --git a/pypy/interpreter/test/test_argument.py b/pypy/interpreter/test/test_argument.py --- a/pypy/interpreter/test/test_argument.py +++ b/pypy/interpreter/test/test_argument.py @@ -106,7 +106,7 @@ def len(self, x): return len(x) - def int_w(self, x): + def int_w(self, x, allow_conversion=True): return x def eq_w(self, x, y): diff --git a/pypy/interpreter/test/test_objspace.py b/pypy/interpreter/test/test_objspace.py --- a/pypy/interpreter/test/test_objspace.py +++ b/pypy/interpreter/test/test_objspace.py @@ -167,6 +167,40 @@ self.space.setattr(w_oldstyle, self.space.wrap("__call__"), w_func) assert is_callable(w_oldstyle) + def test_int_w(self): + space = self.space + w_x = space.wrap(42) + assert space.int_w(w_x) == 42 + assert space.int_w(w_x, allow_conversion=False) == 42 + # + w_x = space.wrap(44.0) + space.raises_w(space.w_TypeError, space.int_w, w_x) + space.raises_w(space.w_TypeError, space.int_w, w_x, allow_conversion=False) + # + w_instance = self.space.appexec([], """(): + class MyInt(object): + def __int__(self): + return 43 + return MyInt() + """) + assert space.int_w(w_instance) == 43 + space.raises_w(space.w_TypeError, space.int_w, w_instance, allow_conversion=False) + # + w_instance = self.space.appexec([], """(): + class MyInt(object): + def __int__(self): + return 43 + + class AnotherInt(object): + def __int__(self): + return MyInt() + + return AnotherInt() + """) + space.raises_w(space.w_TypeError, space.int_w, w_instance) + space.raises_w(space.w_TypeError, space.int_w, w_instance, allow_conversion=False) + + def test_interp_w(self): w = self.space.wrap w_bltinfunction = self.space.builtin.get('len') diff --git a/pypy/module/cppyy/test/test_zjit.py b/pypy/module/cppyy/test/test_zjit.py --- a/pypy/module/cppyy/test/test_zjit.py +++ b/pypy/module/cppyy/test/test_zjit.py @@ -141,7 +141,7 @@ def is_w(self, w_one, w_two): return w_one is w_two - def int_w(self, w_obj): + def int_w(self, w_obj, allow_conversion=True): assert isinstance(w_obj, FakeInt) return w_obj.val diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -157,6 +157,7 @@ return w_obj.floatval def int_w(self, w_obj): + XXX # fix this if isinstance(w_obj, IntObject): return w_obj.intval elif isinstance(w_obj, FloatObject): diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -366,6 +366,7 @@ class W_IntegerBox(W_NumberBox): def int_w(self, space): + XXX # fix this return space.int_w(self.descr_int(space)) class W_SignedIntegerBox(W_IntegerBox): diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py --- a/pypy/objspace/fake/objspace.py +++ b/pypy/objspace/fake/objspace.py @@ -45,7 +45,7 @@ def unicode_w(self, space): return NonConstant(u"foobar") - def int_w(self, space): + def int_w(self, space, allow_conversion=True): return NonConstant(-42) def uint_w(self, space): diff --git a/pypy/objspace/std/floatobject.py b/pypy/objspace/std/floatobject.py --- a/pypy/objspace/std/floatobject.py +++ b/pypy/objspace/std/floatobject.py @@ -34,6 +34,9 @@ def unwrap(self, space): return self.floatval + def int_w(self, space, allow_conversion=True): + self._typed_unwrap_error(space, "integer") + def float_w(self, space): return self.floatval diff --git a/pypy/objspace/std/intobject.py b/pypy/objspace/std/intobject.py --- a/pypy/objspace/std/intobject.py +++ b/pypy/objspace/std/intobject.py @@ -309,9 +309,13 @@ """representation for debugging purposes""" return "%s(%d)" % (self.__class__.__name__, self.intval) - def int_w(self, space): + def int_w(self, space, allow_conversion=True): return int(self.intval) - unwrap = int_w + + def _int_w(self, space): + return int(self.intval) + + unwrap = _int_w def uint_w(self, space): intval = self.intval diff --git a/pypy/objspace/std/longobject.py b/pypy/objspace/std/longobject.py --- a/pypy/objspace/std/longobject.py +++ b/pypy/objspace/std/longobject.py @@ -244,7 +244,7 @@ def fromrarith_int(i): return W_LongObject(rbigint.fromrarith_int(i)) - def int_w(self, space): + def _int_w(self, space): try: return self.num.toint() except OverflowError: diff --git a/pypy/objspace/std/smalllongobject.py b/pypy/objspace/std/smalllongobject.py --- a/pypy/objspace/std/smalllongobject.py +++ b/pypy/objspace/std/smalllongobject.py @@ -44,7 +44,7 @@ def __repr__(self): return '' % self.longlong - def int_w(self, space): + def _int_w(self, space): a = self.longlong b = intmask(a) if b == a: diff --git a/pypy/objspace/std/test/test_dictmultiobject.py b/pypy/objspace/std/test/test_dictmultiobject.py --- a/pypy/objspace/std/test/test_dictmultiobject.py +++ b/pypy/objspace/std/test/test_dictmultiobject.py @@ -1046,7 +1046,7 @@ assert isinstance(string, str) return string - def int_w(self, integer): + def int_w(self, integer, allow_conversion=True): assert isinstance(integer, int) return integer From noreply at buildbot.pypy.org Tue Feb 25 18:06:01 2014 From: noreply at buildbot.pypy.org (antocuni) Date: Tue, 25 Feb 2014 18:06:01 +0100 (CET) Subject: [pypy-commit] pypy int_w-refactor: start reviewing all usages of int_w(): we cannot use fake ints as indexes Message-ID: <20140225170601.A25D41C0150@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: int_w-refactor Changeset: r69430:ea3291c41c47 Date: 2014-02-25 17:30 +0100 http://bitbucket.org/pypy/pypy/changeset/ea3291c41c47/ Log: start reviewing all usages of int_w(): we cannot use fake ints as indexes diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -92,7 +92,7 @@ i = 2 * HUGEVAL_BYTES addrstring = [' '] * i while True: - n = space.int_w(space.and_(w_id, w_0x0F)) + n = space.int_w(space.and_(w_id, w_0x0F), allow_conversion=False) n += ord('0') if n > ord('9'): n += (ord('a') - ord('9') - 1) @@ -1238,7 +1238,7 @@ start, stop, step, length = w_index_or_slice.indices4(self, seqlength) else: - start = self.int_w(w_index_or_slice) + start = self.int_w(w_index_or_slice, allow_conversion=False) if start < 0: start += seqlength if not (0 <= start < seqlength): diff --git a/pypy/module/__builtin__/test/test_buffer.py b/pypy/module/__builtin__/test/test_buffer.py --- a/pypy/module/__builtin__/test/test_buffer.py +++ b/pypy/module/__builtin__/test/test_buffer.py @@ -170,6 +170,18 @@ for step in indices[1:]: assert b[start:stop:step] == s[start:stop:step] + def test_getitem_only_ints(self): + class MyInt(object): + def __init__(self, x): + self.x = x + + def __int__(self): + return self.x + + buf = buffer('hello world') + raises(TypeError, "buf[MyInt(0)]") + raises(TypeError, "buf[MyInt(0):MyInt(5)]") + class AppTestMemoryView: def test_basic(self): v = memoryview("abc") From noreply at buildbot.pypy.org Tue Feb 25 18:06:02 2014 From: noreply at buildbot.pypy.org (antocuni) Date: Tue, 25 Feb 2014 18:06:02 +0100 (CET) Subject: [pypy-commit] pypy int_w-refactor: more reviewing of int_w(): conversions are not allowed here Message-ID: <20140225170602.B89421C0150@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: int_w-refactor Changeset: r69431:8a8a32a743e3 Date: 2014-02-25 17:41 +0100 http://bitbucket.org/pypy/pypy/changeset/8a8a32a743e3/ Log: more reviewing of int_w(): conversions are not allowed here diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1217,7 +1217,7 @@ assert isinstance(w_index_or_slice, W_SliceObject) start, stop, step = w_index_or_slice.indices3(self, seqlength) else: - start = self.int_w(w_index_or_slice) + start = self.int_w(w_index_or_slice, allow_conversion=False) if start < 0: start += seqlength if not (0 <= start < seqlength): diff --git a/pypy/module/array/test/test_array.py b/pypy/module/array/test/test_array.py --- a/pypy/module/array/test/test_array.py +++ b/pypy/module/array/test/test_array.py @@ -1034,6 +1034,18 @@ assert len(b) == 13 assert str(b[12]) == "-0.0" + def test_getitem_only_ints(self): + class MyInt(object): + def __init__(self, x): + self.x = x + + def __int__(self): + return self.x + + a = self.array('i', [1, 2, 3, 4, 5, 6]) + raises(TypeError, "a[MyInt(0)]") + raises(TypeError, "a[MyInt(0):MyInt(5)]") + class AppTestArrayBuiltinShortcut(AppTestArray): spaceconfig = AppTestArray.spaceconfig.copy() From noreply at buildbot.pypy.org Tue Feb 25 18:06:03 2014 From: noreply at buildbot.pypy.org (antocuni) Date: Tue, 25 Feb 2014 18:06:03 +0100 (CET) Subject: [pypy-commit] pypy int_w-refactor: add a passing test and a comment explaining why the test was not failing Message-ID: <20140225170603.DFFE71C0150@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: int_w-refactor Changeset: r69432:dc1874f92984 Date: 2014-02-25 17:54 +0100 http://bitbucket.org/pypy/pypy/changeset/dc1874f92984/ Log: add a passing test and a comment explaining why the test was not failing diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1262,7 +1262,10 @@ raise oefmt(self.w_TypeError, "%s must be an integer, not %T", objdescr, w_obj) try: - index = self.int_w(w_index) + # allow_conversion=False it's not really necessary because the + # return type of __index__ is already checked by space.index(), + # but there is no reason to allow conversions anyway + index = self.int_w(w_index, allow_conversion=False) except OperationError, err: if not err.match(self, self.w_OverflowError): raise diff --git a/pypy/objspace/std/test/test_listobject.py b/pypy/objspace/std/test/test_listobject.py --- a/pypy/objspace/std/test/test_listobject.py +++ b/pypy/objspace/std/test/test_listobject.py @@ -842,6 +842,26 @@ except TypeError: pass + def test_mul___index__(self): + class MyInt(object): + def __init__(self, x): + self.x = x + + def __int__(self): + return self.x + + class MyIndex(object): + def __init__(self, x): + self.x = x + + def __index__(self): + return self.x + + assert [0] * MyIndex(3) == [0, 0, 0] + raises(TypeError, "[0]*MyInt(3)") + raises(TypeError, "[0]*MyIndex(MyInt(3))") + + def test_index(self): c = range(10) assert c.index(0) == 0 From noreply at buildbot.pypy.org Tue Feb 25 18:06:05 2014 From: noreply at buildbot.pypy.org (antocuni) Date: Tue, 25 Feb 2014 18:06:05 +0100 (CET) Subject: [pypy-commit] pypy int_w-refactor: gateway_int_w now has the very same semantics as int_w Message-ID: <20140225170605.0846A1C0150@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: int_w-refactor Changeset: r69433:970e75bb2953 Date: 2014-02-25 17:56 +0100 http://bitbucket.org/pypy/pypy/changeset/970e75bb2953/ Log: gateway_int_w now has the very same semantics as int_w diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1420,11 +1420,7 @@ return w_obj.ord(self) # This is all interface for gateway.py. - def gateway_int_w(self, w_obj): - if self.isinstance_w(w_obj, self.w_float): - raise OperationError(self.w_TypeError, - self.wrap("integer argument expected, got float")) - return self.int_w(self.int(w_obj)) + gateway_int_w = int_w def gateway_float_w(self, w_obj): return self.float_w(self.float(w_obj)) From noreply at buildbot.pypy.org Tue Feb 25 18:06:06 2014 From: noreply at buildbot.pypy.org (antocuni) Date: Tue, 25 Feb 2014 18:06:06 +0100 (CET) Subject: [pypy-commit] pypy int_w-refactor: this is hard to test, but CPython accepts only actual integers in sys.exit() Message-ID: <20140225170606.151A51C0150@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: int_w-refactor Changeset: r69434:8854c318cb66 Date: 2014-02-25 18:00 +0100 http://bitbucket.org/pypy/pypy/changeset/8854c318cb66/ Log: this is hard to test, but CPython accepts only actual integers in sys.exit() diff --git a/pypy/interpreter/main.py b/pypy/interpreter/main.py --- a/pypy/interpreter/main.py +++ b/pypy/interpreter/main.py @@ -134,7 +134,7 @@ exitcode = 0 else: try: - exitcode = space.int_w(w_exitcode) + exitcode = space.int_w(w_exitcode, allow_conversion=False) except OperationError: # not an integer: print it to stderr msg = space.str_w(space.str(w_exitcode)) From noreply at buildbot.pypy.org Tue Feb 25 18:22:50 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Tue, 25 Feb 2014 18:22:50 +0100 (CET) Subject: [pypy-commit] pypy kill-multimethod: Kill model.UnwrapError. Message-ID: <20140225172250.632971C02EA@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: kill-multimethod Changeset: r69435:646434ae7b6d Date: 2014-02-25 18:08 +0100 http://bitbucket.org/pypy/pypy/changeset/646434ae7b6d/ Log: Kill model.UnwrapError. diff --git a/pypy/objspace/std/model.py b/pypy/objspace/std/model.py --- a/pypy/objspace/std/model.py +++ b/pypy/objspace/std/model.py @@ -34,7 +34,3 @@ if w_cls is not None and w_cls is not self: s += ' instance of %s' % self.w__class__ return '<%s>' % s - - -class UnwrapError(Exception): - pass diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -3,8 +3,7 @@ from pypy.interpreter.baseobjspace import ObjSpace, W_Root from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.typedef import get_unique_interplevel_subclass -from pypy.objspace.std import (stdtypedef, frame, model, - transparent, callmethod) +from pypy.objspace.std import stdtypedef, frame, transparent, callmethod from pypy.objspace.descroperation import DescrOperation, raiseattrerror from rpython.rlib.objectmodel import instantiate, specialize, is_annotation_constant from rpython.rlib.debug import make_sure_not_resized @@ -245,7 +244,7 @@ # _____ this code is here to support testing only _____ if isinstance(w_obj, W_Root): return w_obj.unwrap(self) - raise model.UnwrapError("cannot unwrap: %r" % w_obj) + raise TypeError("cannot unwrap: %r" % w_obj) def newint(self, intval): return wrapint(self, intval) diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -439,10 +439,6 @@ storage = strategy.erase(w_self) return W_DictMultiObject(space, strategy, storage) - def unwrap(w_self, space): - from pypy.objspace.std.model import UnwrapError - raise UnwrapError(w_self) - def is_heaptype(w_self): return w_self.flag_heaptype From noreply at buildbot.pypy.org Tue Feb 25 18:22:51 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Tue, 25 Feb 2014 18:22:51 +0100 (CET) Subject: [pypy-commit] pypy kill-multimethod: Kill W_ANY. Message-ID: <20140225172251.9CD551C02EA@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: kill-multimethod Changeset: r69436:e0681470078b Date: 2014-02-25 18:10 +0100 http://bitbucket.org/pypy/pypy/changeset/e0681470078b/ Log: Kill W_ANY. diff --git a/pypy/objspace/std/model.py b/pypy/objspace/std/model.py --- a/pypy/objspace/std/model.py +++ b/pypy/objspace/std/model.py @@ -17,8 +17,6 @@ # ____________________________________________________________ -W_ANY = W_Root - class W_Object(W_Root): "Parent base class for wrapped objects provided by the StdObjSpace." # Note that not all wrapped objects in the interpreter inherit from diff --git a/pypy/objspace/std/proxy_helpers.py b/pypy/objspace/std/proxy_helpers.py --- a/pypy/objspace/std/proxy_helpers.py +++ b/pypy/objspace/std/proxy_helpers.py @@ -3,9 +3,8 @@ of cyclic imports """ -from pypy.objspace.std.model import W_ANY, W_Object -from pypy.interpreter import baseobjspace from pypy.interpreter.argument import Arguments +from pypy.interpreter.baseobjspace import W_Root from rpython.tool.sourcetools import func_with_new_name def create_mm_names(classname, mm, is_local): @@ -34,7 +33,7 @@ return space.call_args(w_transparent_list.w_controller, args) function = func_with_new_name(function, mm.name) - mm.register(function, type_, *([W_ANY] * (mm.arity - 1))) + mm.register(function, type_, *([W_Root] * (mm.arity - 1))) def install_mm_trampoline(type_, mm, is_local): classname = type_.__name__[2:] @@ -50,7 +49,7 @@ return space.call_function(w_transparent_list.w_controller, space.wrap\ (op_name), *args_w) function = func_with_new_name(function, mm_name) - mm.register(function, type_, *([W_ANY] * (mm.arity - 1))) + mm.register(function, type_, *([W_Root] * (mm.arity - 1))) def is_special_doublearg(mm, type_): """ We specialcase when we've got two argument method for which From noreply at buildbot.pypy.org Tue Feb 25 18:22:52 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Tue, 25 Feb 2014 18:22:52 +0100 (CET) Subject: [pypy-commit] pypy kill-multimethod: Kill W_Object. Message-ID: <20140225172252.BCFCA1C02EA@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: kill-multimethod Changeset: r69437:39f12449bcb7 Date: 2014-02-25 18:21 +0100 http://bitbucket.org/pypy/pypy/changeset/39f12449bcb7/ Log: Kill W_Object. diff --git a/pypy/objspace/std/model.py b/pypy/objspace/std/model.py --- a/pypy/objspace/std/model.py +++ b/pypy/objspace/std/model.py @@ -13,22 +13,3 @@ BINARY_OPS = dict(add='+', div='/', floordiv='//', mod='%', mul='*', sub='-', truediv='/', **BINARY_BITWISE_OPS) COMMUTATIVE_OPS = ('add', 'mul', 'and', 'or', 'xor') - - -# ____________________________________________________________ - -class W_Object(W_Root): - "Parent base class for wrapped objects provided by the StdObjSpace." - # Note that not all wrapped objects in the interpreter inherit from - # W_Object. (They inherit from W_Root.) - __slots__ = () - - def __repr__(self): - name = getattr(self, 'name', '') - if not isinstance(name, str): - name = '' - s = '%s(%s)' % (self.__class__.__name__, name) - w_cls = getattr(self, 'w__class__', None) - if w_cls is not None and w_cls is not self: - s += ' instance of %s' % self.w__class__ - return '<%s>' % s diff --git a/pypy/objspace/std/proxyobject.py b/pypy/objspace/std/proxyobject.py --- a/pypy/objspace/std/proxyobject.py +++ b/pypy/objspace/std/proxyobject.py @@ -2,13 +2,8 @@ """ transparent list implementation """ -from pypy.objspace.std.model import W_Object +from pypy.interpreter import baseobjspace from pypy.interpreter.error import OperationError -from pypy.interpreter import baseobjspace - -#class W_Transparent(W_Object): -# def __init__(self, w_controller): -# self.controller = w_controller def transparent_class(name, BaseCls): @@ -72,7 +67,6 @@ return W_Transparent W_Transparent = transparent_class('W_Transparent', baseobjspace.W_Root) -#W_TransparentObject = transparent_class('W_TransparentObject', W_Object) #from pypy.objspace.std.objecttype import object_typedef #W_TransparentObject.typedef = object_typedef diff --git a/pypy/objspace/std/test/test_typeobject.py b/pypy/objspace/std/test/test_typeobject.py --- a/pypy/objspace/std/test/test_typeobject.py +++ b/pypy/objspace/std/test/test_typeobject.py @@ -1,13 +1,12 @@ -from pypy.objspace.std.model import W_Object +from pypy.interpreter.baseobjspace import W_Root +from pypy.interpreter.gateway import interp2app from pypy.objspace.std.stdtypedef import StdTypeDef -from pypy.interpreter.gateway import interp2app - class TestTypeObject: def test_not_acceptable_as_base_class(self): space = self.space - class W_Stuff(W_Object): + class W_Stuff(W_Root): pass def descr__new__(space, w_subtype): return space.allocate_instance(W_Stuff, w_subtype) diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -5,7 +5,6 @@ from pypy.interpreter.typedef import weakref_descr, GetSetProperty,\ descr_get_dict from pypy.interpreter.astcompiler.misc import mangle -from pypy.objspace.std.model import W_Object from pypy.objspace.std.stdtypedef import std_dict_descr, issubtypedef, Member from pypy.objspace.std.stdtypedef import StdTypeDef @@ -56,7 +55,7 @@ COMPARES_BY_IDENTITY = 1 OVERRIDES_EQ_CMP_OR_HASH = 2 -class W_TypeObject(W_Object): +class W_TypeObject(W_Root): lazyloaders = {} # can be overridden by specific instances # the version_tag changes if the dict or the inheritance hierarchy changes From noreply at buildbot.pypy.org Tue Feb 25 18:49:40 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Tue, 25 Feb 2014 18:49:40 +0100 (CET) Subject: [pypy-commit] pypy kill-multimethod: Move rest of pypy.objspace.std.model into pypy.objspace.std.util. Message-ID: <20140225174940.18BA21C08F3@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: kill-multimethod Changeset: r69438:c7fe33d31de3 Date: 2014-02-25 18:30 +0100 http://bitbucket.org/pypy/pypy/changeset/c7fe33d31de3/ Log: Move rest of pypy.objspace.std.model into pypy.objspace.std.util. diff --git a/pypy/objspace/std/complexobject.py b/pypy/objspace/std/complexobject.py --- a/pypy/objspace/std/complexobject.py +++ b/pypy/objspace/std/complexobject.py @@ -272,7 +272,7 @@ if self.user_overridden_class: return None from rpython.rlib.longlong2float import float2longlong - from pypy.objspace.std.model import IDTAG_COMPLEX as tag + from pypy.objspace.std.util import IDTAG_COMPLEX as tag real = space.float_w(space.getattr(self, space.wrap("real"))) imag = space.float_w(space.getattr(self, space.wrap("imag"))) real_b = rbigint.fromrarith_int(float2longlong(real)) diff --git a/pypy/objspace/std/floatobject.py b/pypy/objspace/std/floatobject.py --- a/pypy/objspace/std/floatobject.py +++ b/pypy/objspace/std/floatobject.py @@ -172,7 +172,7 @@ if self.user_overridden_class: return None from rpython.rlib.longlong2float import float2longlong - from pypy.objspace.std.model import IDTAG_FLOAT as tag + from pypy.objspace.std.util import IDTAG_FLOAT as tag val = float2longlong(space.float_w(self)) b = rbigint.fromrarith_int(val) b = b.lshift(3).or_(rbigint.fromint(tag)) diff --git a/pypy/objspace/std/intobject.py b/pypy/objspace/std/intobject.py --- a/pypy/objspace/std/intobject.py +++ b/pypy/objspace/std/intobject.py @@ -9,7 +9,7 @@ import sys from rpython.rlib import jit -from rpython.rlib.objectmodel import instantiate, import_from_mixin, specialize +from rpython.rlib.objectmodel import instantiate from rpython.rlib.rarithmetic import ( LONG_BIT, is_valid_int, ovfcheck, r_longlong, r_uint, string_to_int) from rpython.rlib.rbigint import rbigint @@ -22,10 +22,9 @@ from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import WrappedDefault, interp2app, unwrap_spec from pypy.objspace.std import newformat -from pypy.objspace.std.model import ( - BINARY_OPS, CMP_OPS, COMMUTATIVE_OPS, IDTAG_INT) from pypy.objspace.std.stdtypedef import StdTypeDef -from pypy.objspace.std.util import wrap_parsestringerror +from pypy.objspace.std.util import ( + BINARY_OPS, CMP_OPS, COMMUTATIVE_OPS, IDTAG_INT, wrap_parsestringerror) SENTINEL = object() @@ -689,7 +688,6 @@ buf = space.interp_w(Buffer, w_buffer) value, w_longval = _string_to_int_or_long(space, w_value, buf.as_str()) - ok = True else: base = space.int_w(w_base) diff --git a/pypy/objspace/std/longobject.py b/pypy/objspace/std/longobject.py --- a/pypy/objspace/std/longobject.py +++ b/pypy/objspace/std/longobject.py @@ -15,10 +15,9 @@ WrappedDefault, interp2app, interpindirect2app, unwrap_spec) from pypy.objspace.std import newformat from pypy.objspace.std.intobject import W_AbstractIntObject -from pypy.objspace.std.model import ( - BINARY_OPS, CMP_OPS, COMMUTATIVE_OPS, IDTAG_LONG) from pypy.objspace.std.stdtypedef import StdTypeDef -from pypy.objspace.std.util import wrap_parsestringerror +from pypy.objspace.std.util import ( + BINARY_OPS, CMP_OPS, COMMUTATIVE_OPS, IDTAG_LONG, wrap_parsestringerror) def delegate_other(func): diff --git a/pypy/objspace/std/model.py b/pypy/objspace/std/model.py deleted file mode 100644 --- a/pypy/objspace/std/model.py +++ /dev/null @@ -1,15 +0,0 @@ -from pypy.interpreter.baseobjspace import W_Root - - -IDTAG_INT = 1 -IDTAG_LONG = 3 -IDTAG_FLOAT = 5 -IDTAG_COMPLEX = 7 - - -CMP_OPS = dict(lt='<', le='<=', eq='==', ne='!=', gt='>', ge='>=') -BINARY_BITWISE_OPS = {'and': '&', 'lshift': '<<', 'or': '|', 'rshift': '>>', - 'xor': '^'} -BINARY_OPS = dict(add='+', div='/', floordiv='//', mod='%', mul='*', sub='-', - truediv='/', **BINARY_BITWISE_OPS) -COMMUTATIVE_OPS = ('add', 'mul', 'and', 'or', 'xor') diff --git a/pypy/objspace/std/smalllongobject.py b/pypy/objspace/std/smalllongobject.py --- a/pypy/objspace/std/smalllongobject.py +++ b/pypy/objspace/std/smalllongobject.py @@ -13,7 +13,7 @@ from pypy.interpreter.gateway import WrappedDefault, unwrap_spec from pypy.objspace.std.intobject import W_AbstractIntObject from pypy.objspace.std.longobject import W_AbstractLongObject, W_LongObject -from pypy.objspace.std.model import COMMUTATIVE_OPS +from pypy.objspace.std.util import COMMUTATIVE_OPS # XXX: breaks translation #LONGLONG_MIN = r_longlong(-1 << (LONGLONG_BIT - 1)) diff --git a/pypy/objspace/std/util.py b/pypy/objspace/std/util.py --- a/pypy/objspace/std/util.py +++ b/pypy/objspace/std/util.py @@ -2,6 +2,19 @@ from rpython.rlib.rstring import InvalidBaseError +IDTAG_INT = 1 +IDTAG_LONG = 3 +IDTAG_FLOAT = 5 +IDTAG_COMPLEX = 7 + +CMP_OPS = dict(lt='<', le='<=', eq='==', ne='!=', gt='>', ge='>=') +BINARY_BITWISE_OPS = {'and': '&', 'lshift': '<<', 'or': '|', 'rshift': '>>', + 'xor': '^'} +BINARY_OPS = dict(add='+', div='/', floordiv='//', mod='%', mul='*', sub='-', + truediv='/', **BINARY_BITWISE_OPS) +COMMUTATIVE_OPS = ('add', 'mul', 'and', 'or', 'xor') + + def negate(f): """Create a function which calls `f` and negates its result. When the result is ``space.w_NotImplemented``, ``space.w_NotImplemented`` is From noreply at buildbot.pypy.org Tue Feb 25 18:49:41 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Tue, 25 Feb 2014 18:49:41 +0100 (CET) Subject: [pypy-commit] pypy kill-multimethod: Remove multimethods sections from documentation. Message-ID: <20140225174941.4E1E91C08F3@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: kill-multimethod Changeset: r69439:0d2003298f8b Date: 2014-02-25 18:34 +0100 http://bitbucket.org/pypy/pypy/changeset/0d2003298f8b/ Log: Remove multimethods sections from documentation. diff --git a/pypy/doc/objspace.rst b/pypy/doc/objspace.rst --- a/pypy/doc/objspace.rst +++ b/pypy/doc/objspace.rst @@ -384,105 +384,6 @@ .. _`Standard Interpreter Optimizations`: interpreter-optimizations.html -Multimethods ------------- - -The Standard Object Space allows multiple object implementations per -Python type - this is based on multimethods_. For a description of the -multimethod variant that we implemented and which features it supports, -see the comment at the start of `pypy/objspace/std/multimethod.py`_. However, multimethods -alone are not enough for the Standard Object Space: the complete picture -spans several levels in order to emulate the exact Python semantics. - -Consider the example of the ``space.getitem(w_a, w_b)`` operation, -corresponding to the application-level syntax ``a[b]``. The Standard -Object Space contains a corresponding ``getitem`` multimethod and a -family of functions that implement the multimethod for various -combination of argument classes - more precisely, for various -combinations of the *interpreter-level* classes of the arguments. Here -are some examples of functions implementing the ``getitem`` -multimethod: - -* ``getitem__Tuple_ANY``: called when the first argument is a - W_TupleObject, this function converts its second argument to an - integer and performs tuple indexing. - -* ``getitem__Tuple_Slice``: called when the first argument is a - W_TupleObject and the second argument is a W_SliceObject. This - version takes precedence over the previous one if the indexing is - done with a slice object, and performs tuple slicing instead. - -* ``getitem__String_Slice``: called when the first argument is a - W_StringObject and the second argument is a slice object. - -Note how the multimethod dispatch logic helps writing new object -implementations without having to insert hooks into existing code. Note -first how we could have defined a regular method-based API that new -object implementations must provide, and call these methods from the -space operations. The problem with this approach is that some Python -operators are naturally binary or N-ary. Consider for example the -addition operation: for the basic string implementation it is a simple -concatenation-by-copy, but it can have a rather more subtle -implementation for strings done as ropes. It is also likely that -concatenating a basic string with a rope string could have its own -dedicated implementation - and yet another implementation for a rope -string with a basic string. With multimethods, we can have an -orthogonally-defined implementation for each combination. - -The multimethods mechanism also supports delegate functions, which are -converters between two object implementations. The dispatch logic knows -how to insert calls to delegates if it encounters combinations of -interp-level classes which is not directly implemented. For example, we -have no specific implementation for the concatenation of a basic string -and a StringSlice object; when the user adds two such strings, then the -StringSlice object is converted to a basic string (that is, a -temporarily copy is built), and the concatenation is performed on the -resulting pair of basic strings. This is similar to the C++ method -overloading resolution mechanism (but occurs at runtime). - -.. _multimethods: http://en.wikipedia.org/wiki/Multimethods - - -Multimethod slicing -------------------- - -The complete picture is more complicated because the Python object model -is based on *descriptors*: the types ``int``, ``str``, etc. must have -methods ``__add__``, ``__mul__``, etc. that take two arguments including -the ``self``. These methods must perform the operation or return -``NotImplemented`` if the second argument is not of a type that it -doesn't know how to handle. - -The Standard Object Space creates these methods by *slicing* the -multimethod tables. Each method is automatically generated from a -subset of the registered implementations of the corresponding -multimethod. This slicing is performed on the first argument, in order -to keep only the implementations whose first argument's -interpreter-level class matches the declared Python-level type. - -For example, in a baseline PyPy, ``int.__add__`` is just calling the -function ``add__Int_Int``, which is the only registered implementation -for ``add`` whose first argument is an implementation of the ``int`` -Python type. On the other hand, if we enable integers implemented as -tagged pointers, then there is another matching implementation: -``add__SmallInt_SmallInt``. In this case, the Python-level method -``int.__add__`` is implemented by trying to dispatch between these two -functions based on the interp-level type of the two arguments. - -Similarly, the reverse methods (``__radd__`` and others) are obtained by -slicing the multimethod tables to keep only the functions whose *second* -argument has the correct Python-level type. - -Slicing is actually a good way to reproduce the details of the object -model as seen in CPython: slicing is attempted for every Python types -for every multimethod, but the ``__xyz__`` Python methods are only put -into the Python type when the resulting slices are not empty. This is -how our ``int`` type has no ``__getitem__`` method, for example. -Additionally, slicing ensures that ``5 .__add__(6L)`` correctly returns -``NotImplemented`` (because this particular slice does not include -``add__Long_Long`` and there is no ``add__Int_Long``), which leads to -``6L.__radd__(5)`` being called, as in CPython. - .. _`Flow Object Space`: The Flow Object Space From noreply at buildbot.pypy.org Tue Feb 25 18:49:42 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Tue, 25 Feb 2014 18:49:42 +0100 (CET) Subject: [pypy-commit] pypy kill-multimethod: Rename this test and uncomment additional asserts. Message-ID: <20140225174942.6AB521C08F3@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: kill-multimethod Changeset: r69440:745447db5195 Date: 2014-02-25 18:40 +0100 http://bitbucket.org/pypy/pypy/changeset/745447db5195/ Log: Rename this test and uncomment additional asserts. diff --git a/pypy/objspace/std/test/test_floatobject.py b/pypy/objspace/std/test/test_floatobject.py --- a/pypy/objspace/std/test/test_floatobject.py +++ b/pypy/objspace/std/test/test_floatobject.py @@ -420,16 +420,15 @@ raises(OverflowError, math.trunc, float("inf")) - def test_multimethod_slice(self): + def test_call_special(self): assert 5 .__add__(3.14) is NotImplemented assert 3.25 .__add__(5) == 8.25 - # xxx we are also a bit inconsistent about the following - #if hasattr(int, '__eq__'): # for py.test -A: CPython is inconsistent - # assert 5 .__eq__(3.14) is NotImplemented - # assert 3.14 .__eq__(5) is False - #if hasattr(long, '__eq__'): # for py.test -A: CPython is inconsistent - # assert 5L .__eq__(3.14) is NotImplemented - # assert 3.14 .__eq__(5L) is False + + assert 5 .__eq__(3.14) is NotImplemented + assert 3.14 .__eq__(5) is False + + assert 5L .__eq__(3.14) is NotImplemented + assert 3.14 .__eq__(5L) is False def test_from_string(self): raises(ValueError, float, "\0") From noreply at buildbot.pypy.org Tue Feb 25 18:49:43 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Tue, 25 Feb 2014 18:49:43 +0100 (CET) Subject: [pypy-commit] pypy kill-multimethod: Kill unused proxy_helpers.py. Make StdTypeDef an alias to TypeDef. Message-ID: <20140225174943.844C21C08F3@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: kill-multimethod Changeset: r69441:6d0d93cd5c53 Date: 2014-02-25 18:47 +0100 http://bitbucket.org/pypy/pypy/changeset/6d0d93cd5c53/ Log: Kill unused proxy_helpers.py. Make StdTypeDef an alias to TypeDef. diff --git a/pypy/objspace/std/proxy_helpers.py b/pypy/objspace/std/proxy_helpers.py deleted file mode 100644 --- a/pypy/objspace/std/proxy_helpers.py +++ /dev/null @@ -1,90 +0,0 @@ - -""" Some transparent helpers, put here because -of cyclic imports -""" - -from pypy.interpreter.argument import Arguments -from pypy.interpreter.baseobjspace import W_Root -from rpython.tool.sourcetools import func_with_new_name - -def create_mm_names(classname, mm, is_local): - s = "" - if is_local: - s += "list_" - s += mm.name + "__" - s += "_".join([classname] + ["ANY"] * (mm.arity - 1)) - #if '__' + mm.name + '__' in mm.specialnames: - # return s, '__' + mm.name + '__' - if mm.specialnames: - return s, mm.specialnames[0] - return s, mm.name - -def install_general_args_trampoline(type_, mm, is_local, op_name): - def function(space, w_transparent_list, __args__): - args = __args__.prepend(space.wrap(op_name)) - return space.call_args(w_transparent_list.w_controller, args) - - function = func_with_new_name(function, mm.name) - mm.register(function, type_) - -def install_args_w_trampoline(type_, mm, is_local, op_name): - def function(space, w_transparent_list, *args_w): - args = Arguments(space, [space.wrap(op_name)] + list(args_w[:-1]) + args_w[-1]) - return space.call_args(w_transparent_list.w_controller, args) - - function = func_with_new_name(function, mm.name) - mm.register(function, type_, *([W_Root] * (mm.arity - 1))) - -def install_mm_trampoline(type_, mm, is_local): - classname = type_.__name__[2:] - mm_name, op_name = create_mm_names(classname, mm, is_local) - - if ['__args__'] == mm.argnames_after: - return install_general_args_trampoline(type_, mm, is_local, op_name) - if ['args_w'] == mm.argnames_after: - return install_args_w_trampoline(type_, mm, is_local, op_name) - assert not mm.argnames_after - # we search here for special-cased stuff - def function(space, w_transparent_list, *args_w): - return space.call_function(w_transparent_list.w_controller, space.wrap\ - (op_name), *args_w) - function = func_with_new_name(function, mm_name) - mm.register(function, type_, *([W_Root] * (mm.arity - 1))) - -def is_special_doublearg(mm, type_): - """ We specialcase when we've got two argument method for which - there exist reverse operation - """ - if mm.arity != 2: - return False - - if len(mm.specialnames) != 2: - return False - - # search over the signatures - for signature in mm.signatures(): - if signature == (type_.original, type_.original): - return True - return False - -def install_mm_special(type_, mm, is_local): - classname = type_.__name__[2:] - #mm_name, op_name = create_mm_names(classname, mm, is_local) - - def function(space, w_any, w_transparent_list): - retval = space.call_function(w_transparent_list.w_controller, space.wrap(mm.specialnames[1]), - w_any) - return retval - - function = func_with_new_name(function, mm.specialnames[0]) - - mm.register(function, type_.typedef.any, type_) - -def register_type(type_): - from pypy.objspace.std.stdtypedef import multimethods_defined_on - - for mm, is_local in multimethods_defined_on(type_.original): - if not mm.name.startswith('__'): - install_mm_trampoline(type_, mm, is_local) - if is_special_doublearg(mm, type_): - install_mm_special(type_, mm, is_local) diff --git a/pypy/objspace/std/stdtypedef.py b/pypy/objspace/std/stdtypedef.py --- a/pypy/objspace/std/stdtypedef.py +++ b/pypy/objspace/std/stdtypedef.py @@ -8,12 +8,8 @@ __all__ = ['StdTypeDef'] -class StdTypeDef(TypeDef): +StdTypeDef = TypeDef - def __init__(self, __name, __base=None, **rawdict): - "NOT_RPYTHON: initialization-time only." - TypeDef.__init__(self, __name, __base, **rawdict) - self.any = type("W_Any"+__name.title(), (baseobjspace.W_Root,), {'typedef': self}) @jit.unroll_safe def issubtypedef(a, b): From noreply at buildbot.pypy.org Tue Feb 25 19:22:42 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Tue, 25 Feb 2014 19:22:42 +0100 (CET) Subject: [pypy-commit] pypy kill-multimethod: Move issubtypedef into typeobject.py. Message-ID: <20140225182242.04BF11C02EA@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: kill-multimethod Changeset: r69442:cb191419517d Date: 2014-02-25 18:56 +0100 http://bitbucket.org/pypy/pypy/changeset/cb191419517d/ Log: Move issubtypedef into typeobject.py. diff --git a/pypy/objspace/std/stdtypedef.py b/pypy/objspace/std/stdtypedef.py --- a/pypy/objspace/std/stdtypedef.py +++ b/pypy/objspace/std/stdtypedef.py @@ -1,9 +1,7 @@ -from pypy.interpreter import baseobjspace from pypy.interpreter.typedef import TypeDef, GetSetProperty, Member from pypy.interpreter.typedef import descr_get_dict, descr_set_dict from pypy.interpreter.typedef import descr_del_dict from pypy.interpreter.baseobjspace import SpaceCache -from rpython.rlib import jit __all__ = ['StdTypeDef'] @@ -11,29 +9,10 @@ StdTypeDef = TypeDef - at jit.unroll_safe -def issubtypedef(a, b): - from pypy.objspace.std.objectobject import W_ObjectObject - if b is W_ObjectObject.typedef: - return True - if a is None: - return False - if a is b: - return True - for a1 in a.bases: - if issubtypedef(a1, b): - return True - return False - std_dict_descr = GetSetProperty(descr_get_dict, descr_set_dict, descr_del_dict, doc="dictionary for instance variables (if defined)") std_dict_descr.name = '__dict__' -# ____________________________________________________________ -# -# All the code below fishes from the multimethod registration tables -# the descriptors to put into the W_TypeObjects. -# class TypeCache(SpaceCache): def build(cache, typedef): diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -5,7 +5,7 @@ from pypy.interpreter.typedef import weakref_descr, GetSetProperty,\ descr_get_dict from pypy.interpreter.astcompiler.misc import mangle -from pypy.objspace.std.stdtypedef import std_dict_descr, issubtypedef, Member +from pypy.objspace.std.stdtypedef import std_dict_descr, Member from pypy.objspace.std.stdtypedef import StdTypeDef from rpython.rlib.jit import (promote, elidable_promote, we_are_jitted, @@ -907,6 +907,20 @@ w_layout1 = w_layout1.w_same_layout_as or w_layout1 return True + at unroll_safe +def issubtypedef(a, b): + from pypy.objspace.std.objectobject import W_ObjectObject + if b is W_ObjectObject.typedef: + return True + if a is None: + return False + if a is b: + return True + for a1 in a.bases: + if issubtypedef(a1, b): + return True + return False + def find_best_base(space, bases_w): """The best base is one of the bases in the given list: the one whose layout a new type should use as a starting point. From noreply at buildbot.pypy.org Tue Feb 25 19:22:43 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Tue, 25 Feb 2014 19:22:43 +0100 (CET) Subject: [pypy-commit] pypy kill-multimethod: Move std_dict_descr to pypy.interpreter.typedef. Message-ID: <20140225182243.24BC61C02EA@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: kill-multimethod Changeset: r69443:738f370ede8a Date: 2014-02-25 19:02 +0100 http://bitbucket.org/pypy/pypy/changeset/738f370ede8a/ Log: Move std_dict_descr to pypy.interpreter.typedef. diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -617,6 +617,7 @@ from pypy.interpreter.nestedscope import Cell from pypy.interpreter.special import NotImplemented, Ellipsis + def descr_get_dict(space, w_obj): w_dict = w_obj.getdict(space) if w_dict is None: @@ -637,6 +638,11 @@ return space.w_None return lifeline.get_any_weakref(space) +dict_descr = GetSetProperty(descr_get_dict, descr_set_dict, descr_del_dict, + doc="dictionary for instance variables (if defined)") +dict_descr.name = '__dict__' + + def generic_ne(space, w_obj1, w_obj2): if space.eq_w(w_obj1, w_obj2): return space.w_False diff --git a/pypy/objspace/std/stdtypedef.py b/pypy/objspace/std/stdtypedef.py --- a/pypy/objspace/std/stdtypedef.py +++ b/pypy/objspace/std/stdtypedef.py @@ -1,6 +1,4 @@ from pypy.interpreter.typedef import TypeDef, GetSetProperty, Member -from pypy.interpreter.typedef import descr_get_dict, descr_set_dict -from pypy.interpreter.typedef import descr_del_dict from pypy.interpreter.baseobjspace import SpaceCache __all__ = ['StdTypeDef'] @@ -9,11 +7,6 @@ StdTypeDef = TypeDef -std_dict_descr = GetSetProperty(descr_get_dict, descr_set_dict, descr_del_dict, - doc="dictionary for instance variables (if defined)") -std_dict_descr.name = '__dict__' - - class TypeCache(SpaceCache): def build(cache, typedef): "NOT_RPYTHON: initialization-time only." diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -3,9 +3,9 @@ from pypy.interpreter.error import oefmt, OperationError from pypy.interpreter.function import Function, StaticMethod from pypy.interpreter.typedef import weakref_descr, GetSetProperty,\ - descr_get_dict + descr_get_dict, dict_descr from pypy.interpreter.astcompiler.misc import mangle -from pypy.objspace.std.stdtypedef import std_dict_descr, Member +from pypy.objspace.std.stdtypedef import Member from pypy.objspace.std.stdtypedef import StdTypeDef from rpython.rlib.jit import (promote, elidable_promote, we_are_jitted, @@ -1032,7 +1032,7 @@ def create_dict_slot(w_self): if not w_self.hasdict: w_self.dict_w.setdefault('__dict__', - w_self.space.wrap(std_dict_descr)) + w_self.space.wrap(dict_descr)) w_self.hasdict = True def create_weakref_slot(w_self): From noreply at buildbot.pypy.org Tue Feb 25 19:22:44 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Tue, 25 Feb 2014 19:22:44 +0100 (CET) Subject: [pypy-commit] pypy kill-multimethod: Remove StdTypeDef alias and change all references. Message-ID: <20140225182244.666DB1C02EA@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: kill-multimethod Changeset: r69444:5a5429019418 Date: 2014-02-25 19:21 +0100 http://bitbucket.org/pypy/pypy/changeset/5a5429019418/ Log: Remove StdTypeDef alias and change all references. diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py --- a/pypy/objspace/fake/objspace.py +++ b/pypy/objspace/fake/objspace.py @@ -2,7 +2,6 @@ from pypy.interpreter import argument, gateway from pypy.interpreter.baseobjspace import W_Root, ObjSpace, SpaceCache from pypy.interpreter.typedef import TypeDef, GetSetProperty -from pypy.objspace.std.stdtypedef import StdTypeDef from pypy.objspace.std.sliceobject import W_SliceObject from rpython.rlib.objectmodel import instantiate, we_are_translated, specialize from rpython.rlib.nonconst import NonConstant @@ -365,9 +364,8 @@ @specialize.memo() def see_typedef(space, typedef): assert isinstance(typedef, TypeDef) - if not isinstance(typedef, StdTypeDef): - for name, value in typedef.rawdict.items(): - space.wrap(value) + for name, value in typedef.rawdict.items(): + space.wrap(value) class FakeCompiler(object): pass diff --git a/pypy/objspace/std/basestringtype.py b/pypy/objspace/std/basestringtype.py --- a/pypy/objspace/std/basestringtype.py +++ b/pypy/objspace/std/basestringtype.py @@ -1,7 +1,7 @@ -from pypy.objspace.std.stdtypedef import StdTypeDef +from pypy.interpreter.typedef import TypeDef -basestring_typedef = StdTypeDef("basestring", +basestring_typedef = TypeDef("basestring", __doc__ = ("basestring cannot be instantiated; " "it is the base for str and unicode.") ) diff --git a/pypy/objspace/std/boolobject.py b/pypy/objspace/std/boolobject.py --- a/pypy/objspace/std/boolobject.py +++ b/pypy/objspace/std/boolobject.py @@ -6,8 +6,8 @@ from rpython.tool.sourcetools import func_renamer, func_with_new_name from pypy.interpreter.gateway import WrappedDefault, interp2app, unwrap_spec +from pypy.interpreter.typedef import TypeDef from pypy.objspace.std.intobject import W_AbstractIntObject, W_IntObject -from pypy.objspace.std.stdtypedef import StdTypeDef class W_BoolObject(W_IntObject): @@ -80,7 +80,7 @@ W_BoolObject.w_True = W_BoolObject(True) -W_BoolObject.typedef = StdTypeDef("bool", W_IntObject.typedef, +W_BoolObject.typedef = TypeDef("bool", W_IntObject.typedef, __doc__ = """bool(x) -> bool Returns True when the argument x is true, False otherwise. diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py --- a/pypy/objspace/std/bytearrayobject.py +++ b/pypy/objspace/std/bytearrayobject.py @@ -9,8 +9,8 @@ from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import WrappedDefault, interp2app, unwrap_spec from pypy.interpreter.signature import Signature +from pypy.interpreter.typedef import TypeDef from pypy.objspace.std.sliceobject import W_SliceObject -from pypy.objspace.std.stdtypedef import StdTypeDef from pypy.objspace.std.stringmethods import StringMethods from pypy.objspace.std.util import get_positive_index @@ -891,7 +891,7 @@ """ -W_BytearrayObject.typedef = StdTypeDef( +W_BytearrayObject.typedef = TypeDef( "bytearray", __doc__ = BytearrayDocstrings.__doc__, __new__ = interp2app(W_BytearrayObject.descr_new), diff --git a/pypy/objspace/std/bytesobject.py b/pypy/objspace/std/bytesobject.py --- a/pypy/objspace/std/bytesobject.py +++ b/pypy/objspace/std/bytesobject.py @@ -10,10 +10,10 @@ from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import ( WrappedDefault, interp2app, interpindirect2app, unwrap_spec) +from pypy.interpreter.typedef import TypeDef from pypy.objspace.std import newformat from pypy.objspace.std.basestringtype import basestring_typedef from pypy.objspace.std.formatting import mod_format -from pypy.objspace.std.stdtypedef import StdTypeDef from pypy.objspace.std.stringmethods import StringMethods from pypy.objspace.std.unicodeobject import ( _get_encoding_and_errors, decode_object, unicode_from_encoded_object, @@ -782,7 +782,7 @@ return W_BytesObject(c) -W_BytesObject.typedef = StdTypeDef( +W_BytesObject.typedef = TypeDef( "str", basestring_typedef, __new__ = interp2app(W_BytesObject.descr_new), __doc__ = """str(object='') -> string diff --git a/pypy/objspace/std/complexobject.py b/pypy/objspace/std/complexobject.py --- a/pypy/objspace/std/complexobject.py +++ b/pypy/objspace/std/complexobject.py @@ -3,9 +3,9 @@ from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault +from pypy.interpreter.typedef import GetSetProperty, TypeDef from pypy.objspace.std import newformat from pypy.objspace.std.floatobject import _hash_float -from pypy.objspace.std.stdtypedef import GetSetProperty, StdTypeDef from rpython.rlib import jit, rcomplex from rpython.rlib.rarithmetic import intmask, r_ulonglong from rpython.rlib.rbigint import rbigint @@ -593,7 +593,7 @@ return space.newfloat(getattr(w_obj, name)) return GetSetProperty(fget) -W_ComplexObject.typedef = StdTypeDef("complex", +W_ComplexObject.typedef = TypeDef("complex", __doc__ = """complex(real[, imag]) -> complex number Create a complex number from a real part and an optional imaginary part. diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py --- a/pypy/objspace/std/dictmultiobject.py +++ b/pypy/objspace/std/dictmultiobject.py @@ -11,7 +11,7 @@ WrappedDefault, applevel, interp2app, unwrap_spec) from pypy.interpreter.mixedmodule import MixedModule from pypy.interpreter.signature import Signature -from pypy.objspace.std.stdtypedef import StdTypeDef +from pypy.interpreter.typedef import TypeDef from pypy.objspace.std.util import negate @@ -372,7 +372,7 @@ dictrepr = app.interphook("dictrepr") -W_DictMultiObject.typedef = StdTypeDef("dict", +W_DictMultiObject.typedef = TypeDef("dict", __doc__ = '''dict() -> new empty dictionary. dict(mapping) -> new dictionary initialized from a mapping object\'s (key, value) pairs. @@ -1216,7 +1216,7 @@ return space.newtuple([w_key, w_value]) raise OperationError(space.w_StopIteration, space.w_None) -W_DictMultiIterItemsObject.typedef = StdTypeDef( +W_DictMultiIterItemsObject.typedef = TypeDef( "dict_iteritems", __iter__ = interp2app(W_DictMultiIterItemsObject.descr_iter), next = interp2app(W_DictMultiIterItemsObject.descr_next), @@ -1224,7 +1224,7 @@ __reduce__ = interp2app(W_BaseDictMultiIterObject.descr_reduce), ) -W_DictMultiIterKeysObject.typedef = StdTypeDef( +W_DictMultiIterKeysObject.typedef = TypeDef( "dict_iterkeys", __iter__ = interp2app(W_DictMultiIterKeysObject.descr_iter), next = interp2app(W_DictMultiIterKeysObject.descr_next), @@ -1232,7 +1232,7 @@ __reduce__ = interp2app(W_BaseDictMultiIterObject.descr_reduce), ) -W_DictMultiIterValuesObject.typedef = StdTypeDef( +W_DictMultiIterValuesObject.typedef = TypeDef( "dict_itervalues", __iter__ = interp2app(W_DictMultiIterValuesObject.descr_iter), next = interp2app(W_DictMultiIterValuesObject.descr_next), @@ -1340,7 +1340,7 @@ def descr_iter(self, space): return W_DictMultiIterValuesObject(space, self.w_dict.itervalues()) -W_DictViewItemsObject.typedef = StdTypeDef( +W_DictViewItemsObject.typedef = TypeDef( "dict_items", __repr__ = interp2app(W_DictViewItemsObject.descr_repr), __len__ = interp2app(W_DictViewItemsObject.descr_len), @@ -1363,7 +1363,7 @@ __rxor__ = interp2app(W_DictViewItemsObject.descr_rxor), ) -W_DictViewKeysObject.typedef = StdTypeDef( +W_DictViewKeysObject.typedef = TypeDef( "dict_keys", __repr__ = interp2app(W_DictViewKeysObject.descr_repr), __len__ = interp2app(W_DictViewKeysObject.descr_len), @@ -1386,7 +1386,7 @@ __rxor__ = interp2app(W_DictViewKeysObject.descr_rxor), ) -W_DictViewValuesObject.typedef = StdTypeDef( +W_DictViewValuesObject.typedef = TypeDef( "dict_values", __repr__ = interp2app(W_DictViewValuesObject.descr_repr), __len__ = interp2app(W_DictViewValuesObject.descr_len), diff --git a/pypy/objspace/std/floatobject.py b/pypy/objspace/std/floatobject.py --- a/pypy/objspace/std/floatobject.py +++ b/pypy/objspace/std/floatobject.py @@ -5,10 +5,9 @@ from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.error import oefmt from pypy.interpreter.gateway import interp2app, unwrap_spec, WrappedDefault -from pypy.interpreter.typedef import GetSetProperty +from pypy.interpreter.typedef import GetSetProperty, TypeDef from pypy.objspace.std import newformat from pypy.objspace.std.longobject import W_LongObject -from pypy.objspace.std.stdtypedef import StdTypeDef from pypy.objspace.std.util import wrap_parsestringerror from rpython.rlib import rarithmetic, rfloat from rpython.rlib.rarithmetic import ovfcheck_float_to_int, intmask, LONG_BIT @@ -641,7 +640,7 @@ return space.wrap("0x%sp%s%d" % (s, sign, exp)) -W_FloatObject.typedef = StdTypeDef("float", +W_FloatObject.typedef = TypeDef("float", __doc__ = '''float(x) -> floating point number Convert a string or number to a floating point number, if possible.''', diff --git a/pypy/objspace/std/intobject.py b/pypy/objspace/std/intobject.py --- a/pypy/objspace/std/intobject.py +++ b/pypy/objspace/std/intobject.py @@ -21,8 +21,8 @@ from pypy.interpreter.buffer import Buffer from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import WrappedDefault, interp2app, unwrap_spec +from pypy.interpreter.typedef import TypeDef from pypy.objspace.std import newformat -from pypy.objspace.std.stdtypedef import StdTypeDef from pypy.objspace.std.util import ( BINARY_OPS, CMP_OPS, COMMUTATIVE_OPS, IDTAG_INT, wrap_parsestringerror) @@ -718,7 +718,7 @@ return w_obj -W_IntObject.typedef = StdTypeDef("int", +W_IntObject.typedef = TypeDef("int", __doc__ = """int(x=0) -> int or long int(x, base=10) -> int or long diff --git a/pypy/objspace/std/iterobject.py b/pypy/objspace/std/iterobject.py --- a/pypy/objspace/std/iterobject.py +++ b/pypy/objspace/std/iterobject.py @@ -3,7 +3,7 @@ from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.gateway import interp2app, interpindirect2app from pypy.interpreter.error import OperationError -from pypy.objspace.std.stdtypedef import StdTypeDef +from pypy.interpreter.typedef import TypeDef class W_AbstractSeqIterObject(W_Root): @@ -44,7 +44,7 @@ def descr_length_hint(self, space): return self.getlength(space) -W_AbstractSeqIterObject.typedef = StdTypeDef( +W_AbstractSeqIterObject.typedef = TypeDef( "sequenceiterator", __doc__ = '''iter(collection) -> iterator iter(callable, sentinel) -> iterator @@ -167,7 +167,7 @@ raise OperationError(space.w_StopIteration, space.w_None) return w_item -W_ReverseSeqIterObject.typedef = StdTypeDef( +W_ReverseSeqIterObject.typedef = TypeDef( "reversesequenceiterator", __iter__ = interp2app(W_ReverseSeqIterObject.descr_iter), next = interp2app(W_ReverseSeqIterObject.descr_next), diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py --- a/pypy/objspace/std/listobject.py +++ b/pypy/objspace/std/listobject.py @@ -16,6 +16,7 @@ interp2app) from pypy.interpreter.generator import GeneratorIterator from pypy.interpreter.signature import Signature +from pypy.interpreter.typedef import TypeDef from pypy.objspace.std.bytesobject import W_BytesObject from pypy.objspace.std.floatobject import W_FloatObject from pypy.objspace.std.intobject import W_IntObject @@ -23,7 +24,6 @@ W_ReverseSeqIterObject) from pypy.objspace.std.sliceobject import (W_SliceObject, unwrap_start_stop, normalize_simple_slice) -from pypy.objspace.std.stdtypedef import StdTypeDef from pypy.objspace.std.tupleobject import W_AbstractTupleObject from pypy.objspace.std.unicodeobject import W_UnicodeObject from pypy.objspace.std.util import get_positive_index, negate @@ -1758,7 +1758,7 @@ return CustomCompareSort.lt(self, a.w_key, b.w_key) -W_ListObject.typedef = StdTypeDef("list", +W_ListObject.typedef = TypeDef("list", __doc__ = """list() -> new list list(sequence) -> new list initialized from sequence's items""", __new__ = interp2app(W_ListObject.descr_new), diff --git a/pypy/objspace/std/longobject.py b/pypy/objspace/std/longobject.py --- a/pypy/objspace/std/longobject.py +++ b/pypy/objspace/std/longobject.py @@ -13,9 +13,9 @@ from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import ( WrappedDefault, interp2app, interpindirect2app, unwrap_spec) +from pypy.interpreter.typedef import TypeDef from pypy.objspace.std import newformat from pypy.objspace.std.intobject import W_AbstractIntObject -from pypy.objspace.std.stdtypedef import StdTypeDef from pypy.objspace.std.util import ( BINARY_OPS, CMP_OPS, COMMUTATIVE_OPS, IDTAG_LONG, wrap_parsestringerror) @@ -563,7 +563,7 @@ return w_obj -W_AbstractLongObject.typedef = StdTypeDef("long", +W_AbstractLongObject.typedef = TypeDef("long", __doc__ = """long(x=0) -> long long(x, base=10) -> long diff --git a/pypy/objspace/std/noneobject.py b/pypy/objspace/std/noneobject.py --- a/pypy/objspace/std/noneobject.py +++ b/pypy/objspace/std/noneobject.py @@ -1,6 +1,6 @@ from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.gateway import interp2app -from pypy.objspace.std.stdtypedef import StdTypeDef +from pypy.interpreter.typedef import TypeDef class W_NoneObject(W_Root): @@ -16,7 +16,7 @@ W_NoneObject.w_None = W_NoneObject() -W_NoneObject.typedef = StdTypeDef("NoneType", +W_NoneObject.typedef = TypeDef("NoneType", __nonzero__ = interp2app(W_NoneObject.descr_nonzero), __repr__ = interp2app(W_NoneObject.descr_repr), ) diff --git a/pypy/objspace/std/objectobject.py b/pypy/objspace/std/objectobject.py --- a/pypy/objspace/std/objectobject.py +++ b/pypy/objspace/std/objectobject.py @@ -1,9 +1,8 @@ from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import applevel, interp2app, unwrap_spec -from pypy.interpreter.typedef import GetSetProperty, default_identity_hash +from pypy.interpreter.typedef import GetSetProperty, default_identity_hash, TypeDef from pypy.objspace.descroperation import Object -from pypy.objspace.std.stdtypedef import StdTypeDef app = applevel(r''' @@ -210,7 +209,7 @@ return space.format(w_as_str, w_format_spec) -W_ObjectObject.typedef = StdTypeDef("object", +W_ObjectObject.typedef = TypeDef("object", __doc__ = "The most base type", __new__ = interp2app(descr__new__), __subclasshook__ = interp2app(descr___subclasshook__, as_classmethod=True), diff --git a/pypy/objspace/std/setobject.py b/pypy/objspace/std/setobject.py --- a/pypy/objspace/std/setobject.py +++ b/pypy/objspace/std/setobject.py @@ -1,10 +1,10 @@ from pypy.interpreter import gateway +from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.error import OperationError from pypy.interpreter.signature import Signature -from pypy.interpreter.baseobjspace import W_Root +from pypy.interpreter.typedef import TypeDef from pypy.objspace.std.bytesobject import W_BytesObject from pypy.objspace.std.intobject import W_IntObject -from pypy.objspace.std.stdtypedef import StdTypeDef from pypy.objspace.std.unicodeobject import W_UnicodeObject from rpython.rlib.objectmodel import r_dict @@ -515,7 +515,7 @@ W_SetObject.__init__(w_obj, space) return w_obj -W_SetObject.typedef = StdTypeDef("set", +W_SetObject.typedef = TypeDef("set", __doc__ = """set(iterable) --> set object Build an unordered collection.""", @@ -616,7 +616,7 @@ return space.wrap(hash) -W_FrozensetObject.typedef = StdTypeDef("frozenset", +W_FrozensetObject.typedef = TypeDef("frozenset", __doc__ = """frozenset(iterable) --> frozenset object Build an immutable unordered collection.""", @@ -1521,7 +1521,7 @@ return w_key raise OperationError(space.w_StopIteration, space.w_None) -W_SetIterObject.typedef = StdTypeDef("setiterator", +W_SetIterObject.typedef = TypeDef("setiterator", __length_hint__ = gateway.interp2app(W_SetIterObject.descr_length_hint), __iter__ = gateway.interp2app(W_SetIterObject.descr_iter), next = gateway.interp2app(W_SetIterObject.descr_next) diff --git a/pypy/objspace/std/sliceobject.py b/pypy/objspace/std/sliceobject.py --- a/pypy/objspace/std/sliceobject.py +++ b/pypy/objspace/std/sliceobject.py @@ -3,8 +3,7 @@ from pypy.interpreter import gateway from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.error import OperationError -from pypy.interpreter.typedef import GetSetProperty -from pypy.objspace.std.stdtypedef import StdTypeDef +from pypy.interpreter.typedef import GetSetProperty, TypeDef from rpython.rlib.objectmodel import specialize @@ -170,7 +169,7 @@ return getattr(w_obj, name) return GetSetProperty(fget) -W_SliceObject.typedef = StdTypeDef("slice", +W_SliceObject.typedef = TypeDef("slice", __doc__ = '''slice([start,] stop[, step]) Create a slice object. This is used for extended slicing (e.g. a[0:10:2]).''', diff --git a/pypy/objspace/std/stdtypedef.py b/pypy/objspace/std/stdtypedef.py --- a/pypy/objspace/std/stdtypedef.py +++ b/pypy/objspace/std/stdtypedef.py @@ -1,11 +1,5 @@ -from pypy.interpreter.typedef import TypeDef, GetSetProperty, Member from pypy.interpreter.baseobjspace import SpaceCache -__all__ = ['StdTypeDef'] - - -StdTypeDef = TypeDef - class TypeCache(SpaceCache): def build(cache, typedef): diff --git a/pypy/objspace/std/strbufobject.py b/pypy/objspace/std/strbufobject.py --- a/pypy/objspace/std/strbufobject.py +++ b/pypy/objspace/std/strbufobject.py @@ -2,9 +2,7 @@ import py -from pypy.objspace.std.basestringtype import basestring_typedef from pypy.objspace.std.bytesobject import W_AbstractBytesObject, W_BytesObject -from pypy.objspace.std.stdtypedef import StdTypeDef from pypy.interpreter.gateway import interp2app, unwrap_spec from rpython.rlib.rstring import StringBuilder diff --git a/pypy/objspace/std/test/test_typeobject.py b/pypy/objspace/std/test/test_typeobject.py --- a/pypy/objspace/std/test/test_typeobject.py +++ b/pypy/objspace/std/test/test_typeobject.py @@ -1,6 +1,6 @@ from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.gateway import interp2app -from pypy.objspace.std.stdtypedef import StdTypeDef +from pypy.interpreter.typedef import TypeDef class TestTypeObject: @@ -10,7 +10,7 @@ pass def descr__new__(space, w_subtype): return space.allocate_instance(W_Stuff, w_subtype) - W_Stuff.typedef = StdTypeDef("stuff", + W_Stuff.typedef = TypeDef("stuff", __new__ = interp2app(descr__new__)) W_Stuff.typedef.acceptable_as_base_class = False w_stufftype = space.gettypeobject(W_Stuff.typedef) diff --git a/pypy/objspace/std/tupleobject.py b/pypy/objspace/std/tupleobject.py --- a/pypy/objspace/std/tupleobject.py +++ b/pypy/objspace/std/tupleobject.py @@ -6,9 +6,9 @@ from pypy.interpreter.error import OperationError from pypy.interpreter.gateway import ( WrappedDefault, interp2app, interpindirect2app, unwrap_spec) +from pypy.interpreter.typedef import TypeDef from pypy.objspace.std.sliceobject import (W_SliceObject, unwrap_start_stop, normalize_simple_slice) -from pypy.objspace.std.stdtypedef import StdTypeDef from pypy.objspace.std.util import negate from rpython.rlib import jit from rpython.rlib.debug import make_sure_not_resized @@ -210,7 +210,7 @@ raise OperationError(space.w_ValueError, space.wrap("tuple.index(x): x not in tuple")) -W_AbstractTupleObject.typedef = StdTypeDef( +W_AbstractTupleObject.typedef = TypeDef( "tuple", __doc__ = """tuple() -> an empty tuple tuple(sequence) -> tuple initialized from sequence's items diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -3,10 +3,8 @@ from pypy.interpreter.error import oefmt, OperationError from pypy.interpreter.function import Function, StaticMethod from pypy.interpreter.typedef import weakref_descr, GetSetProperty,\ - descr_get_dict, dict_descr + descr_get_dict, dict_descr, Member, TypeDef from pypy.interpreter.astcompiler.misc import mangle -from pypy.objspace.std.stdtypedef import Member -from pypy.objspace.std.stdtypedef import StdTypeDef from rpython.rlib.jit import (promote, elidable_promote, we_are_jitted, promote_string, elidable, dont_look_inside, unroll_safe) @@ -850,7 +848,7 @@ def type_isinstance(w_obj, space, w_inst): return space.newbool(space.type(w_inst).issubtype(w_obj)) -W_TypeObject.typedef = StdTypeDef("type", +W_TypeObject.typedef = TypeDef("type", __new__ = gateway.interp2app(descr__new__), __name__ = GetSetProperty(descr_get__name__, descr_set__name__), __bases__ = GetSetProperty(descr_get__bases__, descr_set__bases__), diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py --- a/pypy/objspace/std/unicodeobject.py +++ b/pypy/objspace/std/unicodeobject.py @@ -11,11 +11,11 @@ from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import WrappedDefault, interp2app, unwrap_spec +from pypy.interpreter.typedef import TypeDef from pypy.module.unicodedata import unicodedb from pypy.objspace.std import newformat from pypy.objspace.std.basestringtype import basestring_typedef from pypy.objspace.std.formatting import mod_format -from pypy.objspace.std.stdtypedef import StdTypeDef from pypy.objspace.std.stringmethods import StringMethods __all__ = ['W_UnicodeObject', 'wrapunicode', 'plain_str2unicode', @@ -915,7 +915,7 @@ """ -W_UnicodeObject.typedef = StdTypeDef( +W_UnicodeObject.typedef = TypeDef( "unicode", basestring_typedef, __new__ = interp2app(W_UnicodeObject.descr_new), __doc__ = UnicodeDocstrings.__doc__, From noreply at buildbot.pypy.org Tue Feb 25 19:55:54 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Tue, 25 Feb 2014 19:55:54 +0100 (CET) Subject: [pypy-commit] pypy remove-remaining-smm: hg merge default Message-ID: <20140225185554.9F92F1C08B9@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: remove-remaining-smm Changeset: r69445:61ebdb4e0083 Date: 2014-02-25 19:50 +0100 http://bitbucket.org/pypy/pypy/changeset/61ebdb4e0083/ Log: hg merge default diff too long, truncating to 2000 out of 2424 lines diff --git a/lib-python/2.7/threading.py b/lib-python/2.7/threading.py --- a/lib-python/2.7/threading.py +++ b/lib-python/2.7/threading.py @@ -246,7 +246,14 @@ else: # PyPy patch: use _py3k_acquire() if timeout > 0: - gotit = waiter._py3k_acquire(True, timeout) + try: + gotit = waiter._py3k_acquire(True, timeout) + except OverflowError: + # bah, in Python 3, acquire(True, timeout) raises + # OverflowError if the timeout is too huge. For + # forward-compatibility reasons we do the same. + waiter.acquire() + gotit = True else: gotit = waiter.acquire(False) if not gotit: diff --git a/pypy/module/_cffi_backend/ctypeobj.py b/pypy/module/_cffi_backend/ctypeobj.py --- a/pypy/module/_cffi_backend/ctypeobj.py +++ b/pypy/module/_cffi_backend/ctypeobj.py @@ -14,6 +14,8 @@ _immutable_fields_ = ['size?', 'name', 'name_position'] # note that 'size' is not strictly immutable, because it can change # from -1 to the real value in the W_CTypeStruct subclass. + # XXX this could be improved with an elidable method get_size() + # that raises in case it's still -1... cast_anything = False is_primitive_integer = False diff --git a/pypy/module/_lsprof/interp_lsprof.py b/pypy/module/_lsprof/interp_lsprof.py --- a/pypy/module/_lsprof/interp_lsprof.py +++ b/pypy/module/_lsprof/interp_lsprof.py @@ -159,7 +159,7 @@ subentry = ProfilerSubEntry(entry.frame) self.calls[entry] = subentry return subentry - return None + raise class ProfilerContext(object): def __init__(self, profobj, entry): @@ -181,8 +181,11 @@ entry._stop(tt, it) if profobj.subcalls and self.previous: caller = jit.promote(self.previous.entry) - subentry = caller._get_or_make_subentry(entry, False) - if subentry is not None: + try: + subentry = caller._get_or_make_subentry(entry, False) + except KeyError: + pass + else: subentry._stop(tt, it) @@ -308,7 +311,7 @@ entry = ProfilerEntry(f_code) self.data[f_code] = entry return entry - return None + raise @jit.elidable def _get_or_make_builtin_entry(self, key, make=True): @@ -319,7 +322,7 @@ entry = ProfilerEntry(self.space.wrap(key)) self.builtin_data[key] = entry return entry - return None + raise def _enter_call(self, f_code): # we have a superb gc, no point in freelist :) @@ -332,8 +335,11 @@ if context is None: return self = jit.promote(self) - entry = self._get_or_make_entry(f_code, False) - if entry is not None: + try: + entry = self._get_or_make_entry(f_code, False) + except KeyError: + pass + else: context._stop(self, entry) self.current_context = context.previous @@ -347,8 +353,11 @@ if context is None: return self = jit.promote(self) - entry = self._get_or_make_builtin_entry(key, False) - if entry is not None: + try: + entry = self._get_or_make_builtin_entry(key, False) + except KeyError: + pass + else: context._stop(self, entry) self.current_context = context.previous diff --git a/pypy/module/cpyext/ndarrayobject.py b/pypy/module/cpyext/ndarrayobject.py --- a/pypy/module/cpyext/ndarrayobject.py +++ b/pypy/module/cpyext/ndarrayobject.py @@ -94,12 +94,12 @@ @cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL) def _PyArray_ITEMSIZE(space, w_array): assert isinstance(w_array, W_NDimArray) - return w_array.get_dtype().get_size() + return w_array.get_dtype().elsize @cpython_api([PyObject], Py_ssize_t, error=CANNOT_FAIL) def _PyArray_NBYTES(space, w_array): assert isinstance(w_array, W_NDimArray) - return w_array.get_size() * w_array.get_dtype().get_size() + return w_array.get_size() * w_array.get_dtype().elsize @cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL) def _PyArray_TYPE(space, w_array): diff --git a/pypy/module/micronumpy/arrayimpl/concrete.py b/pypy/module/micronumpy/arrayimpl/concrete.py --- a/pypy/module/micronumpy/arrayimpl/concrete.py +++ b/pypy/module/micronumpy/arrayimpl/concrete.py @@ -55,7 +55,7 @@ loop.setslice(space, shape, self, impl) def get_size(self): - return self.size // self.dtype.get_size() + return self.size // self.dtype.elsize def get_storage_size(self): return self.size @@ -89,7 +89,7 @@ def get_real(self, space, orig_array): strides = self.get_strides() backstrides = self.get_backstrides() - if self.dtype.is_complex_type(): + if self.dtype.is_complex(): dtype = self.dtype.get_float_dtype(space) return SliceArray(self.start, strides, backstrides, self.get_shape(), self, orig_array, dtype=dtype) @@ -103,13 +103,13 @@ def get_imag(self, space, orig_array): strides = self.get_strides() backstrides = self.get_backstrides() - if self.dtype.is_complex_type(): + if self.dtype.is_complex(): dtype = self.dtype.get_float_dtype(space) - return SliceArray(self.start + dtype.get_size(), strides, + return SliceArray(self.start + dtype.elsize, strides, backstrides, self.get_shape(), self, orig_array, dtype=dtype) impl = NonWritableArray(self.get_shape(), self.dtype, self.order, strides, backstrides) - if not self.dtype.is_flexible_type(): + if not self.dtype.is_flexible(): impl.fill(space, self.dtype.box(0)) return impl @@ -204,7 +204,7 @@ if space.isinstance_w(w_idx, space.w_str): idx = space.str_w(w_idx) dtype = self.dtype - if not dtype.is_record_type() or idx not in dtype.fields: + if not dtype.is_record() or idx not in dtype.fields: raise OperationError(space.w_ValueError, space.wrap( "field named %s not found" % idx)) return RecordChunk(idx) @@ -324,7 +324,7 @@ make_sure_not_resized(strides) make_sure_not_resized(backstrides) self.shape = shape - self.size = support.product(shape) * dtype.get_size() + self.size = support.product(shape) * dtype.elsize self.order = order self.dtype = dtype self.strides = strides @@ -352,7 +352,7 @@ self.get_shape()) def fill(self, space, box): - self.dtype.itemtype.fill(self.storage, self.dtype.get_size(), + self.dtype.itemtype.fill(self.storage, self.dtype.elsize, box, 0, self.size, 0) def set_shape(self, space, orig_array, new_shape): @@ -425,7 +425,7 @@ self.storage = parent.storage self.order = parent.order self.dtype = dtype - self.size = support.product(shape) * self.dtype.get_size() + self.size = support.product(shape) * self.dtype.elsize self.start = start self.orig_arr = orig_arr @@ -460,12 +460,12 @@ strides = [] backstrides = [] dtype = self.dtype - s = self.get_strides()[0] // dtype.get_size() + s = self.get_strides()[0] // dtype.elsize if self.order == 'C': new_shape.reverse() for sh in new_shape: - strides.append(s * dtype.get_size()) - backstrides.append(s * (sh - 1) * dtype.get_size()) + strides.append(s * dtype.elsize) + backstrides.append(s * (sh - 1) * dtype.elsize) s *= max(1, sh) if self.order == 'C': strides.reverse() diff --git a/pypy/module/micronumpy/arrayimpl/scalar.py b/pypy/module/micronumpy/arrayimpl/scalar.py --- a/pypy/module/micronumpy/arrayimpl/scalar.py +++ b/pypy/module/micronumpy/arrayimpl/scalar.py @@ -70,7 +70,7 @@ scalar = Scalar(dtype) if dtype.is_str_or_unicode(): scalar.value = dtype.coerce(space, space.wrap(self.value.raw_str())) - elif dtype.is_record_type(): + elif dtype.is_record(): raise OperationError(space.w_NotImplementedError, space.wrap( "viewing scalar as record not implemented")) else: @@ -78,7 +78,7 @@ return scalar def get_real(self, space, orig_array): - if self.dtype.is_complex_type(): + if self.dtype.is_complex(): scalar = Scalar(self.dtype.get_float_dtype(space)) scalar.value = self.value.convert_real_to(scalar.dtype) return scalar @@ -91,7 +91,7 @@ "could not broadcast input array from shape " + "(%s) into shape ()" % ( ','.join([str(x) for x in w_arr.get_shape()],)))) - if self.dtype.is_complex_type(): + if self.dtype.is_complex(): dtype = self.dtype.get_float_dtype(space) self.value = self.dtype.itemtype.composite( w_arr.get_scalar_value().convert_to(space, dtype), @@ -100,7 +100,7 @@ self.value = w_arr.get_scalar_value() def get_imag(self, space, orig_array): - if self.dtype.is_complex_type(): + if self.dtype.is_complex(): scalar = Scalar(self.dtype.get_float_dtype(space)) scalar.value = self.value.convert_imag_to(scalar.dtype) return scalar @@ -110,7 +110,7 @@ def set_imag(self, space, orig_array, w_val): #Only called on complex dtype - assert self.dtype.is_complex_type() + assert self.dtype.is_complex() w_arr = convert_to_array(space, w_val) if len(w_arr.get_shape()) > 0: raise OperationError(space.w_ValueError, space.wrap( @@ -127,7 +127,7 @@ if space.len_w(w_idx) == 0: return self.get_scalar_value() elif space.isinstance_w(w_idx, space.w_str): - if self.dtype.is_record_type(): + if self.dtype.is_record(): w_val = self.value.descr_getitem(space, w_idx) return convert_to_array(space, w_val) elif space.is_none(w_idx): @@ -148,7 +148,7 @@ if space.len_w(w_idx) == 0: return self.set_scalar_value(self.dtype.coerce(space, w_val)) elif space.isinstance_w(w_idx, space.w_str): - if self.dtype.is_record_type(): + if self.dtype.is_record(): return self.value.descr_setitem(space, w_idx, w_val) raise OperationError(space.w_IndexError, space.wrap("0-d arrays can't be indexed")) diff --git a/pypy/module/micronumpy/arrayimpl/sort.py b/pypy/module/micronumpy/arrayimpl/sort.py --- a/pypy/module/micronumpy/arrayimpl/sort.py +++ b/pypy/module/micronumpy/arrayimpl/sort.py @@ -71,10 +71,10 @@ def __init__(self, index_stride_size, stride_size, size): start = 0 dtype = interp_dtype.get_dtype_cache(space).w_longdtype - indexes = dtype.itemtype.malloc(size*dtype.get_size()) + indexes = dtype.itemtype.malloc(size * dtype.elsize) values = alloc_raw_storage(size * stride_size, track_allocation=False) - Repr.__init__(self, dtype.get_size(), stride_size, + Repr.__init__(self, dtype.elsize, stride_size, size, values, indexes, start, start) def __del__(self): diff --git a/pypy/module/micronumpy/interp_arrayops.py b/pypy/module/micronumpy/interp_arrayops.py --- a/pypy/module/micronumpy/interp_arrayops.py +++ b/pypy/module/micronumpy/interp_arrayops.py @@ -137,14 +137,14 @@ "all the input array dimensions except for the " "concatenation axis must match exactly")) a_dt = arr.get_dtype() - if dtype.is_record_type() and a_dt.is_record_type(): + if dtype.is_record() and a_dt.is_record(): # Record types must match for f in dtype.fields: if f not in a_dt.fields or \ dtype.fields[f] != a_dt.fields[f]: raise OperationError(space.w_TypeError, space.wrap("invalid type promotion")) - elif dtype.is_record_type() or a_dt.is_record_type(): + elif dtype.is_record() or a_dt.is_record(): raise OperationError(space.w_TypeError, space.wrap("invalid type promotion")) dtype = interp_ufuncs.find_binop_result_dtype(space, dtype, diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -16,7 +16,7 @@ from pypy.interpreter.mixedmodule import MixedModule from rpython.rtyper.lltypesystem import lltype from rpython.rlib.rstring import StringBuilder -from rpython.rlib import jit +from rpython.rlib.objectmodel import specialize from pypy.module.micronumpy import constants as NPY @@ -33,13 +33,13 @@ long_double_size = 8 -def new_dtype_getter(name): - @jit.elidable +def new_dtype_getter(num): + @specialize.memo() def _get_dtype(space): from pypy.module.micronumpy.interp_dtype import get_dtype_cache - return get_dtype_cache(space).dtypes_by_name[name] + return get_dtype_cache(space).dtypes_by_num[num] - def new(space, w_subtype, w_value=None): + def descr__new__(space, w_subtype, w_value=None): from pypy.module.micronumpy.interp_numarray import array dtype = _get_dtype(space) if not space.is_none(w_value): @@ -52,7 +52,9 @@ def descr_reduce(self, space): return self.reduce(space) - return func_with_new_name(new, name + "_box_new"), staticmethod(_get_dtype), func_with_new_name(descr_reduce, "descr_reduce") + return (func_with_new_name(descr__new__, 'descr__new__%d' % num), + staticmethod(_get_dtype), + descr_reduce) class Box(object): @@ -303,15 +305,15 @@ else: dtype = space.interp_w(W_Dtype, space.call_function(space.gettypefor(W_Dtype), w_dtype)) - if dtype.get_size() == 0: + if dtype.elsize == 0: raise OperationError(space.w_TypeError, space.wrap( "data-type must not be 0-sized")) - if dtype.get_size() != self.get_dtype(space).get_size(): + if dtype.elsize != self.get_dtype(space).elsize: raise OperationError(space.w_ValueError, space.wrap( "new type not compatible with array.")) if dtype.is_str_or_unicode(): return dtype.coerce(space, space.wrap(self.raw_str())) - elif dtype.is_record_type(): + elif dtype.is_record(): raise OperationError(space.w_NotImplementedError, space.wrap( "viewing scalar as record not implemented")) else: @@ -327,7 +329,7 @@ return space.wrap(1) def descr_get_itemsize(self, space): - return self.get_dtype(space).descr_get_itemsize(space) + return space.wrap(self.get_dtype(space).elsize) def descr_get_shape(self, space): return space.newtuple([]) @@ -352,6 +354,12 @@ w_meth = space.getattr(self.descr_ravel(space), space.wrap('reshape')) return space.call_args(w_meth, __args__) + def descr_get_real(self, space): + return self.get_dtype(space).itemtype.real(self) + + def descr_get_imag(self, space): + return self.get_dtype(space).itemtype.imag(self) + w_flags = None def descr_get_flags(self, space): if self.w_flags is None: @@ -359,7 +367,7 @@ return self.w_flags class W_BoolBox(W_GenericBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("bool") + descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY.BOOL) class W_NumberBox(W_GenericBox): pass @@ -375,34 +383,34 @@ pass class W_Int8Box(W_SignedIntegerBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("int8") + descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY.BYTE) class W_UInt8Box(W_UnsignedIntegerBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("uint8") + descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY.UBYTE) class W_Int16Box(W_SignedIntegerBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("int16") + descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY.SHORT) class W_UInt16Box(W_UnsignedIntegerBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("uint16") + descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY.USHORT) class W_Int32Box(W_SignedIntegerBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("i") + descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY.INT) class W_UInt32Box(W_UnsignedIntegerBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("I") + descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY.UINT) + +class W_LongBox(W_SignedIntegerBox, PrimitiveBox): + descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY.LONG) + +class W_ULongBox(W_UnsignedIntegerBox, PrimitiveBox): + descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY.ULONG) class W_Int64Box(W_SignedIntegerBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("q") + descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY.LONGLONG) class W_UInt64Box(W_UnsignedIntegerBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("Q") - -class W_LongBox(W_SignedIntegerBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("l") - -class W_ULongBox(W_UnsignedIntegerBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("L") + descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY.ULONGLONG) class W_InexactBox(W_NumberBox): pass @@ -411,45 +419,32 @@ pass class W_Float16Box(W_FloatingBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("float16") + descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY.HALF) class W_Float32Box(W_FloatingBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("float32") + descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY.FLOAT) class W_Float64Box(W_FloatingBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("float64") + descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY.DOUBLE) def descr_as_integer_ratio(self, space): return space.call_method(self.item(space), 'as_integer_ratio') class W_ComplexFloatingBox(W_InexactBox): - def descr_get_real(self, space): - dtype = self._COMPONENTS_BOX._get_dtype(space) - box = self.convert_real_to(dtype) - assert isinstance(box, self._COMPONENTS_BOX) - return space.wrap(box) - - def descr_get_imag(self, space): - dtype = self._COMPONENTS_BOX._get_dtype(space) - box = self.convert_imag_to(dtype) - assert isinstance(box, self._COMPONENTS_BOX) - return space.wrap(box) + pass class W_Complex64Box(ComplexBox, W_ComplexFloatingBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("complex64") - _COMPONENTS_BOX = W_Float32Box + descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY.CFLOAT) class W_Complex128Box(ComplexBox, W_ComplexFloatingBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("complex128") - _COMPONENTS_BOX = W_Float64Box + descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY.CDOUBLE) if long_double_size in (8, 12, 16): class W_FloatLongBox(W_FloatingBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY.LONGDOUBLELTR) + descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY.LONGDOUBLE) class W_ComplexLongBox(ComplexBox, W_ComplexFloatingBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY.CLONGDOUBLELTR) - _COMPONENTS_BOX = W_FloatLongBox + descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY.CLONGDOUBLE) class W_FlexibleBox(W_GenericBox): _attrs_ = ['arr', 'ofs', 'dtype'] @@ -635,6 +630,8 @@ strides = GetSetProperty(W_GenericBox.descr_get_shape), ndim = GetSetProperty(W_GenericBox.descr_get_ndim), T = GetSetProperty(W_GenericBox.descr_self), + real = GetSetProperty(W_GenericBox.descr_get_real), + imag = GetSetProperty(W_GenericBox.descr_get_imag), flags = GetSetProperty(W_GenericBox.descr_get_flags), ) @@ -768,16 +765,12 @@ __new__ = interp2app(W_Complex64Box.descr__new__.im_func), __reduce__ = interp2app(W_Complex64Box.descr_reduce), __complex__ = interp2app(W_GenericBox.item), - real = GetSetProperty(W_ComplexFloatingBox.descr_get_real), - imag = GetSetProperty(W_ComplexFloatingBox.descr_get_imag), ) W_Complex128Box.typedef = TypeDef("complex128", (W_ComplexFloatingBox.typedef, W_ComplexObject.typedef), __module__ = "numpy", __new__ = interp2app(W_Complex128Box.descr__new__.im_func), __reduce__ = interp2app(W_Complex128Box.descr_reduce), - real = GetSetProperty(W_ComplexFloatingBox.descr_get_real), - imag = GetSetProperty(W_ComplexFloatingBox.descr_get_imag), ) if long_double_size in (8, 12, 16): @@ -792,8 +785,6 @@ __new__ = interp2app(W_ComplexLongBox.descr__new__.im_func), __reduce__ = interp2app(W_ComplexLongBox.descr_reduce), __complex__ = interp2app(W_GenericBox.item), - real = GetSetProperty(W_ComplexFloatingBox.descr_get_real), - imag = GetSetProperty(W_ComplexFloatingBox.descr_get_imag), ) W_FlexibleBox.typedef = TypeDef("flexible", W_GenericBox.typedef, diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -6,7 +6,7 @@ interp_attrproperty, interp_attrproperty_w) from pypy.module.micronumpy import types, interp_boxes, base from rpython.rlib.objectmodel import specialize -from rpython.rlib.rarithmetic import LONG_BIT, r_longlong, r_ulonglong +from rpython.rlib.rarithmetic import r_longlong, r_ulonglong from rpython.rlib import jit from pypy.module.micronumpy.appbridge import get_appbridge_cache from pypy.module.micronumpy.conversion_utils import byteorder_converter @@ -38,22 +38,19 @@ class W_Dtype(W_Root): _immutable_fields_ = [ - "num", "kind", "char", "w_box_type", "float_type", - "itemtype?", "byteorder?", "names?", "fields?", "size?", + "itemtype?", "num", "kind", "char", "w_box_type", + "byteorder?", "names?", "fields?", "elsize?", "alignment?", "shape?", "subdtype?", "base?", - "alternate_constructors", "aliases", ] def __init__(self, itemtype, num, kind, char, w_box_type, - float_type=None, byteorder=None, names=[], fields={}, - size=1, shape=[], subdtype=None, - alternate_constructors=[], aliases=[]): + byteorder=None, names=[], fields={}, + elsize=None, shape=[], subdtype=None): self.itemtype = itemtype self.num = num self.kind = kind self.char = char self.w_box_type = w_box_type - self.float_type = float_type if byteorder is None: if itemtype.get_element_size() == 1: byteorder = NPY.IGNORE @@ -62,15 +59,16 @@ self.byteorder = byteorder self.names = names self.fields = fields - self.size = size + if elsize is None: + elsize = itemtype.get_element_size() + self.elsize = elsize + self.alignment = itemtype.alignment self.shape = shape self.subdtype = subdtype if not subdtype: self.base = self else: self.base = subdtype.base - self.alternate_constructors = alternate_constructors - self.aliases = aliases def __repr__(self): if self.fields is not None: @@ -85,100 +83,51 @@ def box_complex(self, real, imag): return self.itemtype.box_complex(real, imag) - def build_and_convert(self, space, box): - return self.itemtype.build_and_convert(space, self, box) - def coerce(self, space, w_item): return self.itemtype.coerce(space, self, w_item) - def is_int_type(self): - return (self.kind == NPY.SIGNEDLTR or self.kind == NPY.UNSIGNEDLTR or - self.kind == NPY.GENBOOLLTR) + def is_bool(self): + return self.kind == NPY.GENBOOLLTR def is_signed(self): return self.kind == NPY.SIGNEDLTR - def is_complex_type(self): + def is_unsigned(self): + return self.kind == NPY.UNSIGNEDLTR + + def is_int(self): + return (self.kind == NPY.SIGNEDLTR or self.kind == NPY.UNSIGNEDLTR or + self.kind == NPY.GENBOOLLTR) + + def is_float(self): + return self.kind == NPY.FLOATINGLTR + + def is_complex(self): return self.kind == NPY.COMPLEXLTR - def is_float_type(self): - return self.kind == NPY.FLOATINGLTR or self.kind == NPY.COMPLEXLTR - - def is_bool_type(self): - return self.kind == NPY.GENBOOLLTR - - def is_record_type(self): - return bool(self.fields) - - def is_str_type(self): + def is_str(self): return self.num == NPY.STRING def is_str_or_unicode(self): return self.num == NPY.STRING or self.num == NPY.UNICODE - def is_flexible_type(self): + def is_flexible(self): return self.is_str_or_unicode() or self.num == NPY.VOID + def is_record(self): + return bool(self.fields) + def is_native(self): return self.byteorder in (NPY.NATIVE, NPY.NATBYTE) - def get_size(self): - return self.size * self.itemtype.get_element_size() - def get_float_dtype(self, space): - assert self.kind == NPY.COMPLEXLTR - assert self.float_type is not None - dtype = get_dtype_cache(space).dtypes_by_name[self.float_type] + assert self.is_complex() + dtype = get_dtype_cache(space).component_dtypes[self.num] if self.byteorder == NPY.OPPBYTE: dtype = dtype.descr_newbyteorder(space) + assert dtype.is_float() return dtype - def descr_str(self, space): - if self.fields: - return space.str(self.descr_get_descr(space)) - elif self.subdtype is not None: - return space.str(space.newtuple([ - self.subdtype.descr_get_str(space), - self.descr_get_shape(space)])) - else: - if self.is_flexible_type(): - return self.descr_get_str(space) - else: - return self.descr_get_name(space) - - def descr_repr(self, space): - if self.fields: - r = self.descr_get_descr(space) - elif self.subdtype is not None: - r = space.newtuple([self.subdtype.descr_get_str(space), - self.descr_get_shape(space)]) - else: - if self.is_flexible_type(): - if self.byteorder != NPY.IGNORE: - byteorder = NPY.NATBYTE if self.is_native() else NPY.OPPBYTE - else: - byteorder = '' - r = space.wrap(byteorder + self.char + str(self.size)) - else: - r = self.descr_get_name(space) - return space.wrap("dtype(%s)" % space.str_w(space.repr(r))) - - def descr_get_itemsize(self, space): - return space.wrap(self.get_size()) - - def descr_get_alignment(self, space): - return space.wrap(self.itemtype.alignment) - - def descr_get_isbuiltin(self, space): - if self.fields is None: - return space.wrap(1) - return space.wrap(0) - - def descr_get_subdtype(self, space): - if self.subdtype is None: - return space.w_None - return space.newtuple([space.wrap(self.subdtype), self.descr_get_shape(space)]) - def get_name(self): return self.w_box_type.name @@ -186,26 +135,22 @@ name = self.get_name() if name[-1] == '_': name = name[:-1] - if self.is_flexible_type(): - return space.wrap(name + str(self.get_size() * 8)) + if self.is_flexible() and self.elsize != 0: + return space.wrap(name + str(self.elsize * 8)) return space.wrap(name) def descr_get_str(self, space): - size = self.get_size() basic = self.kind - if basic == NPY.UNICODELTR: + endian = self.byteorder + size = self.elsize + if endian == NPY.NATIVE: + endian = NPY.NATBYTE + if self.num == NPY.UNICODE: size >>= 2 - endian = NPY.NATBYTE - elif size // (self.size or 1) <= 1: - endian = NPY.IGNORE - else: - endian = self.byteorder - if endian == NPY.NATIVE: - endian = NPY.NATBYTE return space.wrap("%s%s%s" % (endian, basic, size)) def descr_get_descr(self, space): - if not self.is_record_type(): + if not self.is_record(): return space.newlist([space.newtuple([space.wrap(""), self.descr_get_str(space)])]) else: @@ -213,7 +158,7 @@ for name in self.names: subdtype = self.fields[name][1] subdescr = [space.wrap(name)] - if subdtype.is_record_type(): + if subdtype.is_record(): subdescr.append(subdtype.descr_get_descr(space)) elif subdtype.subdtype is not None: subdescr.append(subdtype.subdtype.descr_get_str(space)) @@ -224,38 +169,37 @@ descr.append(space.newtuple(subdescr[:])) return space.newlist(descr) - def descr_get_base(self, space): - return space.wrap(self.base) + def descr_get_hasobject(self, space): + return space.w_False + + def descr_get_isbuiltin(self, space): + if self.fields is None: + return space.wrap(1) + return space.wrap(0) def descr_get_isnative(self, space): return space.wrap(self.is_native()) + def descr_get_base(self, space): + return space.wrap(self.base) + + def descr_get_subdtype(self, space): + if self.subdtype is None: + return space.w_None + return space.newtuple([space.wrap(self.subdtype), + self.descr_get_shape(space)]) + def descr_get_shape(self, space): - w_shape = [space.wrap(dim) for dim in self.shape] - return space.newtuple(w_shape) - - def eq(self, space, w_other): - w_other = space.call_function(space.gettypefor(W_Dtype), w_other) - if space.is_w(self, w_other): - return True - if isinstance(w_other, W_Dtype): - return space.eq_w(self.descr_reduce(space), w_other.descr_reduce(space)) - return False - - def descr_eq(self, space, w_other): - return space.wrap(self.eq(space, w_other)) - - def descr_ne(self, space, w_other): - return space.wrap(not self.eq(space, w_other)) + return space.newtuple([space.wrap(dim) for dim in self.shape]) def descr_get_fields(self, space): if not self.fields: return space.w_None - w_d = space.newdict() + w_fields = space.newdict() for name, (offset, subdtype) in self.fields.iteritems(): - space.setitem(w_d, space.wrap(name), + space.setitem(w_fields, space.wrap(name), space.newtuple([subdtype, space.wrap(offset)])) - return w_d + return w_fields def descr_get_names(self, space): if not self.fields: @@ -290,8 +234,56 @@ raise OperationError(space.w_AttributeError, space.wrap( "Cannot delete dtype names attribute")) - def descr_get_hasobject(self, space): - return space.w_False + def eq(self, space, w_other): + w_other = space.call_function(space.gettypefor(W_Dtype), w_other) + if space.is_w(self, w_other): + return True + if isinstance(w_other, W_Dtype): + return space.eq_w(self.descr_reduce(space), + w_other.descr_reduce(space)) + return False + + def descr_eq(self, space, w_other): + return space.wrap(self.eq(space, w_other)) + + def descr_ne(self, space, w_other): + return space.wrap(not self.eq(space, w_other)) + + def descr_hash(self, space): + return space.hash(self.descr_reduce(space)) + + def descr_str(self, space): + if self.fields: + return space.str(self.descr_get_descr(space)) + elif self.subdtype is not None: + return space.str(space.newtuple([ + self.subdtype.descr_get_str(space), + self.descr_get_shape(space)])) + else: + if self.is_flexible(): + return self.descr_get_str(space) + else: + return self.descr_get_name(space) + + def descr_repr(self, space): + if self.fields: + r = self.descr_get_descr(space) + elif self.subdtype is not None: + r = space.newtuple([self.subdtype.descr_get_str(space), + self.descr_get_shape(space)]) + else: + if self.is_flexible(): + if self.byteorder != NPY.IGNORE: + byteorder = NPY.NATBYTE if self.is_native() else NPY.OPPBYTE + else: + byteorder = '' + size = self.elsize + if self.num == NPY.UNICODE: + size >>= 2 + r = space.wrap(byteorder + self.char + str(size)) + else: + r = self.descr_get_name(space) + return space.wrap("dtype(%s)" % space.str_w(space.repr(r))) def descr_getitem(self, space, w_item): if not self.fields: @@ -320,41 +312,29 @@ return space.wrap(0) return space.wrap(len(self.fields)) - def descr_hash(self, space): - return space.hash(self.descr_reduce(space)) - def descr_reduce(self, space): w_class = space.type(self) - - kind = self.kind - elemsize = self.get_size() - builder_args = space.newtuple([space.wrap("%s%d" % (kind, elemsize)), space.wrap(0), space.wrap(1)]) + builder_args = space.newtuple([ + space.wrap("%s%d" % (self.kind, self.elsize)), + space.wrap(0), space.wrap(1)]) version = space.wrap(3) + endian = self.byteorder + if endian == NPY.NATIVE: + endian = NPY.NATBYTE + subdescr = self.descr_get_subdtype(space) names = self.descr_get_names(space) values = self.descr_get_fields(space) - if self.fields: - endian = NPY.IGNORE - #TODO: Implement this when subarrays are implemented - subdescr = space.w_None - size = 0 - for key in self.fields: - dtype = self.fields[key][1] - assert isinstance(dtype, W_Dtype) - size += dtype.get_size() - w_size = space.wrap(size) - #TODO: Change this when alignment is implemented - alignment = space.wrap(1) + if self.is_flexible(): + w_size = space.wrap(self.elsize) + alignment = space.wrap(self.alignment) else: - endian = self.byteorder - if endian == NPY.NATIVE: - endian = NPY.NATBYTE - subdescr = space.w_None w_size = space.wrap(-1) alignment = space.wrap(-1) flags = space.wrap(0) - data = space.newtuple([version, space.wrap(endian), subdescr, names, values, w_size, alignment, flags]) + data = space.newtuple([version, space.wrap(endian), subdescr, + names, values, w_size, alignment, flags]) return space.newtuple([w_class, builder_args, data]) def descr_setstate(self, space, w_data): @@ -375,6 +355,7 @@ w_names = space.getitem(w_data, space.wrap(3)) w_fields = space.getitem(w_data, space.wrap(4)) size = space.int_w(space.getitem(w_data, space.wrap(5))) + alignment = space.int_w(space.getitem(w_data, space.wrap(6))) if (w_names == space.w_None) != (w_fields == space.w_None): raise oefmt(space.w_ValueError, "inconsistent fields and names") @@ -413,8 +394,9 @@ self.fields[name] = offset, dtype self.itemtype = types.RecordType() - if self.is_flexible_type(): - self.size = size + if self.is_flexible(): + self.elsize = size + self.alignment = alignment @unwrap_spec(new_order=str) def descr_newbyteorder(self, space, new_order=NPY.SWAP): @@ -426,9 +408,13 @@ elif newendian != NPY.IGNORE: endian = newendian itemtype = self.itemtype.__class__(endian in (NPY.NATIVE, NPY.NATBYTE)) + fields = self.fields + if fields is None: + fields = {} return W_Dtype(itemtype, self.num, self.kind, self.char, - self.w_box_type, self.float_type, byteorder=endian, - size=self.size) + self.w_box_type, byteorder=endian, elsize=self.elsize, + names=self.names, fields=fields, + shape=self.shape, subdtype=self.subdtype) @specialize.arg(2) @@ -458,11 +444,11 @@ raise oefmt(space.w_ValueError, "two fields with the same name") assert isinstance(subdtype, W_Dtype) fields[fldname] = (offset, subdtype) - offset += subdtype.get_size() + offset += subdtype.elsize names.append(fldname) return W_Dtype(types.RecordType(), NPY.VOID, NPY.VOIDLTR, NPY.VOIDLTR, space.gettypefor(interp_boxes.W_VoidBox), - names=names, fields=fields, size=offset) + names=names, fields=fields, elsize=offset) def dtype_from_dict(space, w_dict): @@ -501,10 +487,10 @@ size *= dim if size == 1: return subdtype - size *= subdtype.get_size() + size *= subdtype.elsize return W_Dtype(types.VoidType(), NPY.VOID, NPY.VOIDLTR, NPY.VOIDLTR, space.gettypefor(interp_boxes.W_VoidBox), - shape=shape, subdtype=subdtype, size=size) + shape=shape, subdtype=subdtype, elsize=size) if space.is_none(w_dtype): return cache.w_float64dtype @@ -533,58 +519,59 @@ w_dtype1 = space.getitem(w_dtype, space.wrap(1)) subdtype = descr__new__(space, w_subtype, w_dtype0, w_align, w_copy) assert isinstance(subdtype, W_Dtype) - if subdtype.get_size() == 0: + if subdtype.elsize == 0: name = "%s%d" % (subdtype.kind, space.int_w(w_dtype1)) return descr__new__(space, w_subtype, space.wrap(name), w_align, w_copy) return descr__new__(space, w_subtype, w_dtype0, w_align, w_copy, w_shape=w_dtype1) elif space.isinstance_w(w_dtype, space.w_dict): return dtype_from_dict(space, w_dtype) for dtype in cache.builtin_dtypes: - if w_dtype in dtype.alternate_constructors: + if dtype.num in cache.alternate_constructors and \ + w_dtype in cache.alternate_constructors[dtype.num]: return dtype if w_dtype is dtype.w_box_type: return dtype if space.isinstance_w(w_dtype, space.w_type): - raise oefmt(space.w_NotImplementedError, "object dtype not implemented") + raise oefmt(space.w_NotImplementedError, + "cannot create dtype with type '%N'", w_dtype) raise oefmt(space.w_TypeError, "data type not understood") W_Dtype.typedef = TypeDef("dtype", __module__ = "numpy", __new__ = interp2app(descr__new__), - __str__= interp2app(W_Dtype.descr_str), - __repr__ = interp2app(W_Dtype.descr_repr), - __eq__ = interp2app(W_Dtype.descr_eq), - __ne__ = interp2app(W_Dtype.descr_ne), - __getitem__ = interp2app(W_Dtype.descr_getitem), - __len__ = interp2app(W_Dtype.descr_len), - - __hash__ = interp2app(W_Dtype.descr_hash), - __reduce__ = interp2app(W_Dtype.descr_reduce), - __setstate__ = interp2app(W_Dtype.descr_setstate), - newbyteorder = interp2app(W_Dtype.descr_newbyteorder), - type = interp_attrproperty_w("w_box_type", cls=W_Dtype), kind = interp_attrproperty("kind", cls=W_Dtype), char = interp_attrproperty("char", cls=W_Dtype), num = interp_attrproperty("num", cls=W_Dtype), byteorder = interp_attrproperty("byteorder", cls=W_Dtype), - itemsize = GetSetProperty(W_Dtype.descr_get_itemsize), - alignment = GetSetProperty(W_Dtype.descr_get_alignment), + itemsize = interp_attrproperty("elsize", cls=W_Dtype), + alignment = interp_attrproperty("alignment", cls=W_Dtype), + + name = GetSetProperty(W_Dtype.descr_get_name), + str = GetSetProperty(W_Dtype.descr_get_str), + descr = GetSetProperty(W_Dtype.descr_get_descr), + hasobject = GetSetProperty(W_Dtype.descr_get_hasobject), isbuiltin = GetSetProperty(W_Dtype.descr_get_isbuiltin), - + isnative = GetSetProperty(W_Dtype.descr_get_isnative), + base = GetSetProperty(W_Dtype.descr_get_base), subdtype = GetSetProperty(W_Dtype.descr_get_subdtype), - str = GetSetProperty(W_Dtype.descr_get_str), - name = GetSetProperty(W_Dtype.descr_get_name), - base = GetSetProperty(W_Dtype.descr_get_base), shape = GetSetProperty(W_Dtype.descr_get_shape), - isnative = GetSetProperty(W_Dtype.descr_get_isnative), fields = GetSetProperty(W_Dtype.descr_get_fields), names = GetSetProperty(W_Dtype.descr_get_names, W_Dtype.descr_set_names, W_Dtype.descr_del_names), - hasobject = GetSetProperty(W_Dtype.descr_get_hasobject), - descr = GetSetProperty(W_Dtype.descr_get_descr), + + __eq__ = interp2app(W_Dtype.descr_eq), + __ne__ = interp2app(W_Dtype.descr_ne), + __hash__ = interp2app(W_Dtype.descr_hash), + __str__= interp2app(W_Dtype.descr_str), + __repr__ = interp2app(W_Dtype.descr_repr), + __getitem__ = interp2app(W_Dtype.descr_getitem), + __len__ = interp2app(W_Dtype.descr_len), + __reduce__ = interp2app(W_Dtype.descr_reduce), + __setstate__ = interp2app(W_Dtype.descr_setstate), + newbyteorder = interp2app(W_Dtype.descr_newbyteorder), ) W_Dtype.typedef.acceptable_as_base_class = False @@ -601,10 +588,8 @@ except ValueError: raise oefmt(space.w_TypeError, "data type not understood") if char == NPY.CHARLTR: - char = NPY.STRINGLTR - size = 1 - - if char == NPY.STRINGLTR: + return new_string_dtype(space, 1, NPY.CHARLTR) + elif char == NPY.STRINGLTR: return new_string_dtype(space, size) elif char == NPY.UNICODELTR: return new_unicode_dtype(space, size) @@ -613,21 +598,22 @@ assert False -def new_string_dtype(space, size): +def new_string_dtype(space, size, char=NPY.STRINGLTR): return W_Dtype( types.StringType(), - size=size, + elsize=size, num=NPY.STRING, kind=NPY.STRINGLTR, - char=NPY.STRINGLTR, + char=char, w_box_type=space.gettypefor(interp_boxes.W_StringBox), ) def new_unicode_dtype(space, size): + itemtype = types.UnicodeType() return W_Dtype( - types.UnicodeType(), - size=size, + itemtype, + elsize=size * itemtype.get_element_size(), num=NPY.UNICODE, kind=NPY.UNICODELTR, char=NPY.UNICODELTR, @@ -638,7 +624,7 @@ def new_void_dtype(space, size): return W_Dtype( types.VoidType(), - size=size, + elsize=size, num=NPY.VOID, kind=NPY.VOIDLTR, char=NPY.VOIDLTR, @@ -654,8 +640,6 @@ kind=NPY.GENBOOLLTR, char=NPY.BOOLLTR, w_box_type=space.gettypefor(interp_boxes.W_BoolBox), - alternate_constructors=[space.w_bool], - aliases=['bool', 'bool8'], ) self.w_int8dtype = W_Dtype( types.Int8(), @@ -663,7 +647,6 @@ kind=NPY.SIGNEDLTR, char=NPY.BYTELTR, w_box_type=space.gettypefor(interp_boxes.W_Int8Box), - aliases=['byte'], ) self.w_uint8dtype = W_Dtype( types.UInt8(), @@ -671,7 +654,6 @@ kind=NPY.UNSIGNEDLTR, char=NPY.UBYTELTR, w_box_type=space.gettypefor(interp_boxes.W_UInt8Box), - aliases=['ubyte'], ) self.w_int16dtype = W_Dtype( types.Int16(), @@ -679,7 +661,6 @@ kind=NPY.SIGNEDLTR, char=NPY.SHORTLTR, w_box_type=space.gettypefor(interp_boxes.W_Int16Box), - aliases=['short'], ) self.w_uint16dtype = W_Dtype( types.UInt16(), @@ -687,7 +668,6 @@ kind=NPY.UNSIGNEDLTR, char=NPY.USHORTLTR, w_box_type=space.gettypefor(interp_boxes.W_UInt16Box), - aliases=['ushort'], ) self.w_int32dtype = W_Dtype( types.Int32(), @@ -709,11 +689,6 @@ kind=NPY.SIGNEDLTR, char=NPY.LONGLTR, w_box_type=space.gettypefor(interp_boxes.W_LongBox), - alternate_constructors=[space.w_int, - space.gettypefor(interp_boxes.W_IntegerBox), - space.gettypefor(interp_boxes.W_SignedIntegerBox), - ], - aliases=['int', 'intp', 'p'], ) self.w_ulongdtype = W_Dtype( types.ULong(), @@ -721,9 +696,6 @@ kind=NPY.UNSIGNEDLTR, char=NPY.ULONGLTR, w_box_type=space.gettypefor(interp_boxes.W_ULongBox), - alternate_constructors=[space.gettypefor(interp_boxes.W_UnsignedIntegerBox), - ], - aliases=['uint', 'uintp', 'P'], ) self.w_int64dtype = W_Dtype( types.Int64(), @@ -731,8 +703,6 @@ kind=NPY.SIGNEDLTR, char=NPY.LONGLONGLTR, w_box_type=space.gettypefor(interp_boxes.W_Int64Box), - alternate_constructors=[space.w_long], - aliases=['longlong'], ) self.w_uint64dtype = W_Dtype( types.UInt64(), @@ -740,7 +710,6 @@ kind=NPY.UNSIGNEDLTR, char=NPY.ULONGLONGLTR, w_box_type=space.gettypefor(interp_boxes.W_UInt64Box), - aliases=['ulonglong'], ) self.w_float32dtype = W_Dtype( types.Float32(), @@ -748,7 +717,6 @@ kind=NPY.FLOATINGLTR, char=NPY.FLOATLTR, w_box_type=space.gettypefor(interp_boxes.W_Float32Box), - aliases=['single'] ) self.w_float64dtype = W_Dtype( types.Float64(), @@ -756,11 +724,6 @@ kind=NPY.FLOATINGLTR, char=NPY.DOUBLELTR, w_box_type=space.gettypefor(interp_boxes.W_Float64Box), - alternate_constructors=[space.w_float, - space.gettypefor(interp_boxes.W_NumberBox), - space.gettypefor(interp_boxes.W_FloatingBox), - ], - aliases=["float", "double"], ) self.w_floatlongdtype = W_Dtype( types.FloatLong(), @@ -768,7 +731,6 @@ kind=NPY.FLOATINGLTR, char=NPY.LONGDOUBLELTR, w_box_type=space.gettypefor(interp_boxes.W_FloatLongBox), - aliases=["longdouble", "longfloat"], ) self.w_complex64dtype = W_Dtype( types.Complex64(), @@ -776,8 +738,6 @@ kind=NPY.COMPLEXLTR, char=NPY.CFLOATLTR, w_box_type=space.gettypefor(interp_boxes.W_Complex64Box), - aliases=['csingle'], - float_type=NPY.FLOATLTR, ) self.w_complex128dtype = W_Dtype( types.Complex128(), @@ -785,10 +745,6 @@ kind=NPY.COMPLEXLTR, char=NPY.CDOUBLELTR, w_box_type=space.gettypefor(interp_boxes.W_Complex128Box), - alternate_constructors=[space.w_complex, - space.gettypefor(interp_boxes.W_ComplexFloatingBox)], - aliases=["complex", 'cfloat', 'cdouble'], - float_type=NPY.DOUBLELTR, ) self.w_complexlongdtype = W_Dtype( types.ComplexLong(), @@ -796,41 +752,30 @@ kind=NPY.COMPLEXLTR, char=NPY.CLONGDOUBLELTR, w_box_type=space.gettypefor(interp_boxes.W_ComplexLongBox), - aliases=["clongdouble", "clongfloat"], - float_type=NPY.LONGDOUBLELTR, ) self.w_stringdtype = W_Dtype( types.StringType(), - size=0, + elsize=0, num=NPY.STRING, kind=NPY.STRINGLTR, char=NPY.STRINGLTR, w_box_type=space.gettypefor(interp_boxes.W_StringBox), - alternate_constructors=[space.w_str, - space.gettypefor(interp_boxes.W_CharacterBox)], - aliases=['string', "str"], ) self.w_unicodedtype = W_Dtype( types.UnicodeType(), - size=0, + elsize=0, num=NPY.UNICODE, kind=NPY.UNICODELTR, char=NPY.UNICODELTR, w_box_type=space.gettypefor(interp_boxes.W_UnicodeBox), - alternate_constructors=[space.w_unicode], - aliases=['unicode'], ) self.w_voiddtype = W_Dtype( types.VoidType(), - size=0, + elsize=0, num=NPY.VOID, kind=NPY.VOIDLTR, char=NPY.VOIDLTR, w_box_type=space.gettypefor(interp_boxes.W_VoidBox), - #alternate_constructors=[space.w_buffer], - # XXX no buffer in space - #alternate_constructors=[space.gettypefor(interp_boxes.W_GenericBox)], - # XXX fix, leads to _coerce error ) self.w_float16dtype = W_Dtype( types.Float16(), @@ -853,10 +798,52 @@ char=NPY.UINTPLTR, w_box_type=space.gettypefor(interp_boxes.W_ULongBox), ) + aliases = { + NPY.BOOL: ['bool', 'bool8'], + NPY.BYTE: ['byte'], + NPY.UBYTE: ['ubyte'], + NPY.SHORT: ['short'], + NPY.USHORT: ['ushort'], + NPY.LONG: ['int', 'intp', 'p'], + NPY.ULONG: ['uint', 'uintp', 'P'], + NPY.LONGLONG: ['longlong'], + NPY.ULONGLONG: ['ulonglong'], + NPY.FLOAT: ['single'], + NPY.DOUBLE: ['float', 'double'], + NPY.LONGDOUBLE: ['longdouble', 'longfloat'], + NPY.CFLOAT: ['csingle'], + NPY.CDOUBLE: ['complex', 'cfloat', 'cdouble'], + NPY.CLONGDOUBLE: ['clongdouble', 'clongfloat'], + NPY.STRING: ['string', 'str'], + NPY.UNICODE: ['unicode'], + } + self.alternate_constructors = { + NPY.BOOL: [space.w_bool], + NPY.LONG: [space.w_int, + space.gettypefor(interp_boxes.W_IntegerBox), + space.gettypefor(interp_boxes.W_SignedIntegerBox)], + NPY.ULONG: [space.gettypefor(interp_boxes.W_UnsignedIntegerBox)], + NPY.LONGLONG: [space.w_long], + NPY.DOUBLE: [space.w_float, + space.gettypefor(interp_boxes.W_NumberBox), + space.gettypefor(interp_boxes.W_FloatingBox)], + NPY.CDOUBLE: [space.w_complex, + space.gettypefor(interp_boxes.W_ComplexFloatingBox)], + NPY.STRING: [space.w_str, + space.gettypefor(interp_boxes.W_CharacterBox)], + NPY.UNICODE: [space.w_unicode], + NPY.VOID: [space.gettypefor(interp_boxes.W_GenericBox)], + #space.w_buffer, # XXX no buffer in space + } float_dtypes = [self.w_float16dtype, self.w_float32dtype, self.w_float64dtype, self.w_floatlongdtype] complex_dtypes = [self.w_complex64dtype, self.w_complex128dtype, self.w_complexlongdtype] + self.component_dtypes = { + NPY.CFLOAT: self.w_float32dtype, + NPY.CDOUBLE: self.w_float64dtype, + NPY.CLONGDOUBLE: self.w_floatlongdtype, + } self.builtin_dtypes = [ self.w_booldtype, self.w_int8dtype, self.w_uint8dtype, @@ -869,7 +856,7 @@ self.w_intpdtype, self.w_uintpdtype, ] self.float_dtypes_by_num_bytes = sorted( - (dtype.get_size(), dtype) + (dtype.elsize, dtype) for dtype in float_dtypes ) self.dtypes_by_num = {} @@ -880,14 +867,15 @@ dtype.fields = None # mark these as builtin self.dtypes_by_num[dtype.num] = dtype self.dtypes_by_name[dtype.get_name()] = dtype - for can_name in [dtype.kind + str(dtype.get_size()), + for can_name in [dtype.kind + str(dtype.elsize), dtype.char]: self.dtypes_by_name[can_name] = dtype self.dtypes_by_name[NPY.NATBYTE + can_name] = dtype self.dtypes_by_name[NPY.NATIVE + can_name] = dtype self.dtypes_by_name[NPY.IGNORE + can_name] = dtype - for alias in dtype.aliases: - self.dtypes_by_name[alias] = dtype + if dtype.num in aliases: + for alias in aliases[dtype.num]: + self.dtypes_by_name[alias] = dtype typeinfo_full = { 'LONGLONG': self.w_int64dtype, @@ -935,13 +923,13 @@ for k, v in typeinfo_partial.iteritems(): space.setitem(w_typeinfo, space.wrap(k), space.gettypefor(v)) for k, dtype in typeinfo_full.iteritems(): - itembits = dtype.get_size() * 8 + itembits = dtype.elsize * 8 items_w = [space.wrap(dtype.char), space.wrap(dtype.num), space.wrap(itembits), space.wrap(dtype.itemtype.get_element_size())] - if dtype.is_int_type(): - if dtype.kind == NPY.GENBOOLLTR: + if dtype.is_int(): + if dtype.is_bool(): w_maxobj = space.wrap(1) w_minobj = space.wrap(0) elif dtype.is_signed(): diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -87,8 +87,8 @@ def descr_set_dtype(self, space, w_dtype): dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype)) - if (dtype.get_size() != self.get_dtype().get_size() or - dtype.is_flexible_type() or self.get_dtype().is_flexible_type()): + if (dtype.elsize != self.get_dtype().elsize or + dtype.is_flexible() or self.get_dtype().is_flexible()): raise OperationError(space.w_ValueError, space.wrap( "new type not compatible with array.")) self.implementation.set_dtype(space, dtype) @@ -101,10 +101,10 @@ return space.wrap(len(self.get_shape())) def descr_get_itemsize(self, space): - return space.wrap(self.get_dtype().get_size()) + return space.wrap(self.get_dtype().elsize) def descr_get_nbytes(self, space): - return space.wrap(self.get_size() * self.get_dtype().get_size()) + return space.wrap(self.get_size() * self.get_dtype().elsize) def descr_fill(self, space, w_value): self.fill(space, self.get_dtype().coerce(space, w_value)) @@ -220,7 +220,7 @@ def descr_getitem(self, space, w_idx): if space.is_w(w_idx, space.w_Ellipsis): return self - elif isinstance(w_idx, W_NDimArray) and w_idx.get_dtype().is_bool_type() \ + elif isinstance(w_idx, W_NDimArray) and w_idx.get_dtype().is_bool() \ and len(w_idx.get_shape()) > 0: return self.getitem_filter(space, w_idx) try: @@ -235,7 +235,7 @@ self.implementation.setitem_index(space, index_list, w_value) def descr_setitem(self, space, w_idx, w_value): - if isinstance(w_idx, W_NDimArray) and w_idx.get_dtype().is_bool_type() \ + if isinstance(w_idx, W_NDimArray) and w_idx.get_dtype().is_bool() \ and len(w_idx.get_shape()) > 0: self.setitem_filter(space, w_idx, convert_to_array(space, w_value)) return @@ -281,7 +281,7 @@ else: s.append(separator) s.append(' ') - if self.is_scalar() and dtype.is_str_type(): + if self.is_scalar() and dtype.is_str(): s.append(dtype.itemtype.to_str(i.getitem())) else: s.append(dtype.itemtype.str_format(i.getitem())) @@ -344,7 +344,7 @@ def descr_set_imag(self, space, w_value): # if possible, copy (broadcast) values into self - if not self.get_dtype().is_complex_type(): + if not self.get_dtype().is_complex(): raise OperationError(space.w_TypeError, space.wrap('array does not have imaginary part to set')) self.implementation.set_imag(space, self, w_value) @@ -575,10 +575,10 @@ raise oefmt(space.w_NotImplementedError, "astype(%s) not implemented yet", new_dtype.get_name()) - if new_dtype.num == NPY.STRING and new_dtype.size == 0: + if new_dtype.num == NPY.STRING and new_dtype.elsize == 0: if cur_dtype.num == NPY.STRING: new_dtype = interp_dtype.variable_dtype(space, - 'S' + str(cur_dtype.size)) + 'S' + str(cur_dtype.elsize)) impl = self.implementation if isinstance(impl, scalar.Scalar): return W_NDimArray.new_scalar(space, new_dtype, impl.value) @@ -689,7 +689,7 @@ @unwrap_spec(decimals=int) def descr_round(self, space, decimals=0, w_out=None): if space.is_none(w_out): - if self.get_dtype().is_bool_type(): + if self.get_dtype().is_bool(): #numpy promotes bool.round() to float16. Go figure. w_out = W_NDimArray.from_shape(space, self.get_shape(), interp_dtype.get_dtype_cache(space).w_float16dtype) @@ -700,7 +700,7 @@ "return arrays must be of ArrayType")) out = interp_dtype.dtype_agreement(space, [self], self.get_shape(), w_out) - if out.get_dtype().is_bool_type() and self.get_dtype().is_bool_type(): + if out.get_dtype().is_bool() and self.get_dtype().is_bool(): calc_dtype = interp_dtype.get_dtype_cache(space).w_longdtype else: calc_dtype = out.get_dtype() @@ -781,8 +781,8 @@ w_dtype)) else: dtype = self.get_dtype() - old_itemsize = self.get_dtype().get_size() - new_itemsize = dtype.get_size() + old_itemsize = self.get_dtype().elsize + new_itemsize = dtype.elsize impl = self.implementation if new_itemsize == 0: raise OperationError(space.w_TypeError, space.wrap( @@ -1093,7 +1093,7 @@ raise OperationError(space.w_TypeError, space.wrap( "only integer arrays with one element " "can be converted to an index")) - if not self.get_dtype().is_int_type() or self.get_dtype().is_bool_type(): + if not self.get_dtype().is_int() or self.get_dtype().is_bool(): raise OperationError(space.w_TypeError, space.wrap( "only integer arrays with one element " "can be converted to an index")) @@ -1188,7 +1188,7 @@ if not shape: raise OperationError(space.w_TypeError, space.wrap( "numpy scalars from buffers not supported yet")) - totalsize = support.product(shape) * dtype.get_size() + totalsize = support.product(shape) * dtype.elsize if totalsize+offset > buf.getlength(): raise OperationError(space.w_TypeError, space.wrap( "buffer is too small for requested array")) @@ -1448,9 +1448,10 @@ # scalars and strings w/o __array__ method isstr = space.isinstance_w(w_object, space.w_str) if not issequence_w(space, w_object) or isstr: - if dtype is None or (dtype.is_str_or_unicode() and dtype.get_size() < 1): - dtype = interp_ufuncs.find_dtype_for_scalar(space, w_object) - return W_NDimArray.new_scalar(space, dtype, w_object) + if dtype is None or dtype.char != NPY.CHARLTR: + if dtype is None or (dtype.is_str_or_unicode() and dtype.elsize < 1): + dtype = interp_ufuncs.find_dtype_for_scalar(space, w_object) + return W_NDimArray.new_scalar(space, dtype, w_object) if space.is_none(w_order): order = 'C' @@ -1478,14 +1479,14 @@ # not an array or incorrect dtype shape, elems_w = find_shape_and_elems(space, w_object, dtype) - if dtype is None or (dtype.is_str_or_unicode() and dtype.get_size() < 1): + if dtype is None or (dtype.is_str_or_unicode() and dtype.elsize < 1): for w_elem in elems_w: if isinstance(w_elem, W_NDimArray) and w_elem.is_scalar(): w_elem = w_elem.get_scalar_value() dtype = interp_ufuncs.find_dtype_for_scalar(space, w_elem, dtype) if dtype is None: dtype = interp_dtype.get_dtype_cache(space).w_float64dtype - elif dtype.is_str_or_unicode() and dtype.get_size() < 1: + elif dtype.is_str_or_unicode() and dtype.elsize < 1: # promote S0 -> S1, U0 -> U1 dtype = interp_dtype.variable_dtype(space, dtype.char + '1') @@ -1501,7 +1502,7 @@ def zeros(space, w_shape, w_dtype=None, w_order=None): dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype)) - if dtype.is_str_or_unicode() and dtype.get_size() < 1: + if dtype.is_str_or_unicode() and dtype.elsize < 1: dtype = interp_dtype.variable_dtype(space, dtype.char + '1') shape = _find_shape(space, w_shape, dtype) return W_NDimArray.from_shape(space, shape, dtype=dtype) @@ -1514,24 +1515,30 @@ else: dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype)) - if dtype.is_str_or_unicode() and dtype.get_size() < 1: + if dtype.is_str_or_unicode() and dtype.elsize < 1: dtype = interp_dtype.variable_dtype(space, dtype.char + '1') return W_NDimArray.from_shape(space, w_a.get_shape(), dtype=dtype, w_instance=w_a if subok else None) -def _reconstruct(space, w_subtype, w_shape, w_dtype): - return descr_new_array(space, w_subtype, w_shape, w_dtype) - def build_scalar(space, w_dtype, w_state): from rpython.rtyper.lltypesystem import rffi, lltype - - assert isinstance(w_dtype, interp_dtype.W_Dtype) - + if not isinstance(w_dtype, interp_dtype.W_Dtype): + raise oefmt(space.w_TypeError, + "argument 1 must be numpy.dtype, not %T", w_dtype) + if w_dtype.elsize == 0: + raise oefmt(space.w_ValueError, "itemsize cannot be zero") + if not space.isinstance_w(w_state, space.w_str): + raise oefmt(space.w_TypeError, "initializing object must be a string") + if space.len_w(w_state) != w_dtype.elsize: + raise oefmt(space.w_ValueError, "initialization string is too small") state = rffi.str2charp(space.str_w(w_state)) box = w_dtype.itemtype.box_raw_data(state) lltype.free(state, flavor="raw") return box +def _reconstruct(space, w_subtype, w_shape, w_dtype): + return descr_new_array(space, w_subtype, w_shape, w_dtype) + W_FlatIterator.typedef = TypeDef("flatiter", __module__ = "numpy", diff --git a/pypy/module/micronumpy/interp_support.py b/pypy/module/micronumpy/interp_support.py --- a/pypy/module/micronumpy/interp_support.py +++ b/pypy/module/micronumpy/interp_support.py @@ -59,7 +59,7 @@ return space.wrap(a) def _fromstring_bin(space, s, count, length, dtype): - itemsize = dtype.get_size() + itemsize = dtype.elsize assert itemsize >= 0 if count == -1: count = length / itemsize diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -168,7 +168,7 @@ "supported for binary functions")) assert isinstance(self, W_Ufunc2) obj = convert_to_array(space, w_obj) - if obj.get_dtype().is_flexible_type(): + if obj.get_dtype().is_flexible(): raise OperationError(space.w_TypeError, space.wrap('cannot perform reduce with flexible type')) obj_shape = obj.get_shape() @@ -287,12 +287,12 @@ out = None w_obj = convert_to_array(space, w_obj) dtype = w_obj.get_dtype() - if dtype.is_flexible_type(): + if dtype.is_flexible(): raise OperationError(space.w_TypeError, space.wrap('Not implemented for this type')) - if (self.int_only and not dtype.is_int_type() or - not self.allow_bool and dtype.is_bool_type() or - not self.allow_complex and dtype.is_complex_type()): + if (self.int_only and not dtype.is_int() or + not self.allow_bool and dtype.is_bool() or + not self.allow_complex and dtype.is_complex()): raise OperationError(space.w_TypeError, space.wrap( "ufunc %s not supported for the input type" % self.name)) calc_dtype = find_unaryop_result_dtype(space, @@ -311,7 +311,7 @@ res_dtype = interp_dtype.get_dtype_cache(space).w_booldtype else: res_dtype = calc_dtype - if self.complex_to_float and calc_dtype.is_complex_type(): + if self.complex_to_float and calc_dtype.is_complex(): if calc_dtype.num == NPY.CFLOAT: res_dtype = interp_dtype.get_dtype_cache(space).w_float32dtype else: @@ -351,11 +351,11 @@ self.done_func = None def are_common_types(self, dtype1, dtype2): - if dtype1.is_complex_type() and dtype2.is_complex_type(): - return True - elif not (dtype1.is_complex_type() or dtype2.is_complex_type()) and \ - (dtype1.is_int_type() and dtype2.is_int_type() or dtype1.is_float_type() and dtype2.is_float_type()) and \ - not (dtype1.is_bool_type() or dtype2.is_bool_type()): + if dtype1.is_bool() or dtype2.is_bool(): + return False + if (dtype1.is_int() and dtype2.is_int() or + dtype1.is_float() and dtype2.is_float() or + dtype1.is_complex() and dtype2.is_complex()): return True return False @@ -370,13 +370,13 @@ w_rhs = convert_to_array(space, w_rhs) w_ldtype = w_lhs.get_dtype() w_rdtype = w_rhs.get_dtype() - if w_ldtype.is_str_type() and w_rdtype.is_str_type() and \ + if w_ldtype.is_str() and w_rdtype.is_str() and \ self.comparison_func: pass - elif (w_ldtype.is_str_type() or w_rdtype.is_str_type()) and \ + elif (w_ldtype.is_str() or w_rdtype.is_str()) and \ self.comparison_func and w_out is None: return space.wrap(False) - elif w_ldtype.is_flexible_type() or w_rdtype.is_flexible_type(): + elif w_ldtype.is_flexible() or w_rdtype.is_flexible(): if self.comparison_func: if self.name == 'equal' or self.name == 'not_equal': res = w_ldtype.eq(space, w_rdtype) @@ -399,13 +399,13 @@ w_ldtype, w_rdtype, promote_to_float=self.promote_to_float, promote_bools=self.promote_bools) - if (self.int_only and (not w_ldtype.is_int_type() or - not w_rdtype.is_int_type() or - not calc_dtype.is_int_type()) or - not self.allow_bool and (w_ldtype.is_bool_type() or - w_rdtype.is_bool_type()) or - not self.allow_complex and (w_ldtype.is_complex_type() or - w_rdtype.is_complex_type())): + if (self.int_only and (not w_ldtype.is_int() or + not w_rdtype.is_int() or + not calc_dtype.is_int()) or + not self.allow_bool and (w_ldtype.is_bool() or + w_rdtype.is_bool()) or + not self.allow_complex and (w_ldtype.is_complex() or + w_rdtype.is_complex())): raise OperationError(space.w_TypeError, space.wrap( "ufunc '%s' not supported for the input types" % self.name)) if space.is_none(w_out): @@ -467,7 +467,7 @@ return interp_dtype.get_dtype_cache(space).w_int8dtype # Everything numeric promotes to complex - if dt2.is_complex_type() or dt1.is_complex_type(): + if dt2.is_complex() or dt1.is_complex(): if dt2.num == NPY.HALF: dt1, dt2 = dt2, dt1 if dt2.num == NPY.CFLOAT: @@ -488,7 +488,7 @@ if promote_to_float: return find_unaryop_result_dtype(space, dt2, promote_to_float=True) # If they're the same kind, choose the greater one. - if dt1.kind == dt2.kind and not dt2.is_flexible_type(): + if dt1.kind == dt2.kind and not dt2.is_flexible(): if dt2.num == NPY.HALF: return dt1 return dt2 @@ -513,13 +513,13 @@ elif dt2.num == NPY.ULONGLONG or (LONG_BIT == 64 and dt2.num == NPY.ULONG): # UInt64 + signed = Float64 dtypenum = NPY.DOUBLE - elif dt2.is_flexible_type(): + elif dt2.is_flexible(): # For those operations that get here (concatenate, stack), # flexible types take precedence over numeric type - if dt2.is_record_type(): + if dt2.is_record(): return dt2 if dt1.is_str_or_unicode(): - if dt2.get_size() >= dt1.get_size(): + if dt2.elsize >= dt1.elsize: return dt2 return dt1 return dt2 @@ -542,10 +542,10 @@ promote_bools=False, promote_to_largest=False): if promote_to_largest: if dt.kind == NPY.GENBOOLLTR or dt.kind == NPY.SIGNEDLTR: - if dt.get_size() * 8 < LONG_BIT: + if dt.elsize * 8 < LONG_BIT: return interp_dtype.get_dtype_cache(space).w_longdtype elif dt.kind == NPY.UNSIGNEDLTR: - if dt.get_size() * 8 < LONG_BIT: + if dt.elsize * 8 < LONG_BIT: return interp_dtype.get_dtype_cache(space).w_ulongdtype else: assert dt.kind == NPY.FLOATINGLTR or dt.kind == NPY.COMPLEXLTR @@ -596,7 +596,7 @@ return interp_dtype.variable_dtype(space, 'S%d' % space.len_w(w_obj)) elif current_guess.num == NPY.STRING: - if current_guess.get_size() < space.len_w(w_obj): + if current_guess.elsize < space.len_w(w_obj): return interp_dtype.variable_dtype(space, 'S%d' % space.len_w(w_obj)) return current_guess diff --git a/pypy/module/micronumpy/iter.py b/pypy/module/micronumpy/iter.py --- a/pypy/module/micronumpy/iter.py +++ b/pypy/module/micronumpy/iter.py @@ -174,7 +174,7 @@ def __init__(self, array): self.array = array self.offset = 0 - self.skip = array.dtype.get_size() + self.skip = array.dtype.elsize self.size = array.size def setitem(self, elem): diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -459,7 +459,7 @@ builder = StringBuilder() iter = arr.create_iter() w_res_str = W_NDimArray.from_shape(space, [1], arr.get_dtype(), order='C') - itemsize = arr.get_dtype().get_size() + itemsize = arr.get_dtype().elsize res_str_casted = rffi.cast(rffi.CArrayPtr(lltype.Char), w_res_str.implementation.get_storage_as_int(space)) while not iter.done(): diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py --- a/pypy/module/micronumpy/strides.py +++ b/pypy/module/micronumpy/strides.py @@ -69,7 +69,7 @@ return True def find_shape_and_elems(space, w_iterable, dtype): - is_rec_type = dtype is not None and dtype.is_record_type() + is_rec_type = dtype is not None and dtype.is_record() if is_rec_type and is_single_elem(space, w_iterable, is_rec_type): return [], [w_iterable] if isinstance(w_iterable, W_NDimArray) and w_iterable.is_scalar(): diff --git a/pypy/module/micronumpy/support.py b/pypy/module/micronumpy/support.py --- a/pypy/module/micronumpy/support.py +++ b/pypy/module/micronumpy/support.py @@ -28,8 +28,8 @@ shape_rev.reverse() for sh in shape_rev: slimit = max(sh, 1) - strides.append(s * dtype.get_size()) - backstrides.append(s * (slimit - 1) * dtype.get_size()) + strides.append(s * dtype.elsize) + backstrides.append(s * (slimit - 1) * dtype.elsize) s *= slimit if order == 'C': strides.reverse() diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -58,6 +58,7 @@ assert dtype('int8').num == 1 assert dtype('int8').name == 'int8' + assert dtype('void').name == 'void' assert dtype(int).fields is None assert dtype(int).names is None assert dtype(int).hasobject is False @@ -371,6 +372,7 @@ raises(TypeError, hash, d) def test_pickle(self): + import numpy as np from numpypy import array, dtype from cPickle import loads, dumps a = array([1,2,3]) @@ -379,6 +381,9 @@ else: assert a.dtype.__reduce__() == (dtype, ('i4', 0, 1), (3, '<', None, None, None, -1, -1, 0)) assert loads(dumps(a.dtype)) == a.dtype + assert np.dtype('bool').__reduce__() == (dtype, ('b1', 0, 1), (3, '|', None, None, None, -1, -1, 0)) + assert np.dtype('|V16').__reduce__() == (dtype, ('V16', 0, 1), (3, '|', None, None, None, 16, 1, 0)) + assert np.dtype(('i8', 0) + assert d.subdtype is None + #assert d.descr == [('f0', '>i8')] + #assert str(d) == "[('f0', '>i8')]" + d = np.dtype(('i8', (2,))" + def test_object(self): import numpy as np import sys @@ -433,7 +452,7 @@ assert np.dtype(o).str == '|O8' else: exc = raises(NotImplementedError, "np.dtype(o)") - assert exc.value[0] == 'object dtype not implemented' + assert exc.value[0] == "cannot create dtype with type '%s'" % o.__name__ class AppTestTypes(BaseAppTestDtypes): def test_abstract_types(self): @@ -855,6 +874,7 @@ raises(TypeError, lambda: float64(3) & 1) def test_alternate_constructs(self): + import numpy as np from numpypy import dtype nnp = self.non_native_prefix byteorder = self.native_prefix @@ -870,6 +890,12 @@ assert dtype(' Author: Manuel Jacob Branch: kill-multimethod Changeset: r69446:99b795fd4044 Date: 2014-02-25 19:50 +0100 http://bitbucket.org/pypy/pypy/changeset/99b795fd4044/ Log: hg merge remove-remaining-smm diff too long, truncating to 2000 out of 2465 lines diff --git a/lib-python/2.7/threading.py b/lib-python/2.7/threading.py --- a/lib-python/2.7/threading.py +++ b/lib-python/2.7/threading.py @@ -246,7 +246,14 @@ else: # PyPy patch: use _py3k_acquire() if timeout > 0: - gotit = waiter._py3k_acquire(True, timeout) + try: + gotit = waiter._py3k_acquire(True, timeout) + except OverflowError: + # bah, in Python 3, acquire(True, timeout) raises + # OverflowError if the timeout is too huge. For + # forward-compatibility reasons we do the same. + waiter.acquire() + gotit = True else: gotit = waiter.acquire(False) if not gotit: diff --git a/pypy/interpreter/test/test_gateway.py b/pypy/interpreter/test/test_gateway.py --- a/pypy/interpreter/test/test_gateway.py +++ b/pypy/interpreter/test/test_gateway.py @@ -826,10 +826,9 @@ class AppTestKeywordsToBuiltinSanity(object): - def test_type(self): class X(object): - def __init__(self, **kw): + def __init__(myself, **kw): pass clash = type.__call__.func_code.co_varnames[0] @@ -845,7 +844,6 @@ X(**{clash: 33}) object.__new__(X, **{clash: 33}) - def test_dict_new(self): clash = dict.__new__.func_code.co_varnames[0] @@ -865,4 +863,3 @@ d.update(**{clash: 33}) dict.update(d, **{clash: 33}) - diff --git a/pypy/module/_cffi_backend/ctypeobj.py b/pypy/module/_cffi_backend/ctypeobj.py --- a/pypy/module/_cffi_backend/ctypeobj.py +++ b/pypy/module/_cffi_backend/ctypeobj.py @@ -14,6 +14,8 @@ _immutable_fields_ = ['size?', 'name', 'name_position'] # note that 'size' is not strictly immutable, because it can change # from -1 to the real value in the W_CTypeStruct subclass. + # XXX this could be improved with an elidable method get_size() + # that raises in case it's still -1... cast_anything = False is_primitive_integer = False diff --git a/pypy/module/_lsprof/interp_lsprof.py b/pypy/module/_lsprof/interp_lsprof.py --- a/pypy/module/_lsprof/interp_lsprof.py +++ b/pypy/module/_lsprof/interp_lsprof.py @@ -159,7 +159,7 @@ subentry = ProfilerSubEntry(entry.frame) self.calls[entry] = subentry return subentry - return None + raise class ProfilerContext(object): def __init__(self, profobj, entry): @@ -181,8 +181,11 @@ entry._stop(tt, it) if profobj.subcalls and self.previous: caller = jit.promote(self.previous.entry) - subentry = caller._get_or_make_subentry(entry, False) - if subentry is not None: + try: + subentry = caller._get_or_make_subentry(entry, False) + except KeyError: + pass + else: subentry._stop(tt, it) @@ -308,7 +311,7 @@ entry = ProfilerEntry(f_code) self.data[f_code] = entry return entry - return None + raise @jit.elidable def _get_or_make_builtin_entry(self, key, make=True): @@ -319,7 +322,7 @@ entry = ProfilerEntry(self.space.wrap(key)) self.builtin_data[key] = entry return entry - return None + raise def _enter_call(self, f_code): # we have a superb gc, no point in freelist :) @@ -332,8 +335,11 @@ if context is None: return self = jit.promote(self) - entry = self._get_or_make_entry(f_code, False) - if entry is not None: + try: + entry = self._get_or_make_entry(f_code, False) + except KeyError: + pass + else: context._stop(self, entry) self.current_context = context.previous @@ -347,8 +353,11 @@ if context is None: return self = jit.promote(self) - entry = self._get_or_make_builtin_entry(key, False) - if entry is not None: + try: + entry = self._get_or_make_builtin_entry(key, False) + except KeyError: + pass + else: context._stop(self, entry) self.current_context = context.previous diff --git a/pypy/module/cpyext/ndarrayobject.py b/pypy/module/cpyext/ndarrayobject.py --- a/pypy/module/cpyext/ndarrayobject.py +++ b/pypy/module/cpyext/ndarrayobject.py @@ -94,12 +94,12 @@ @cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL) def _PyArray_ITEMSIZE(space, w_array): assert isinstance(w_array, W_NDimArray) - return w_array.get_dtype().get_size() + return w_array.get_dtype().elsize @cpython_api([PyObject], Py_ssize_t, error=CANNOT_FAIL) def _PyArray_NBYTES(space, w_array): assert isinstance(w_array, W_NDimArray) - return w_array.get_size() * w_array.get_dtype().get_size() + return w_array.get_size() * w_array.get_dtype().elsize @cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL) def _PyArray_TYPE(space, w_array): diff --git a/pypy/module/micronumpy/arrayimpl/concrete.py b/pypy/module/micronumpy/arrayimpl/concrete.py --- a/pypy/module/micronumpy/arrayimpl/concrete.py +++ b/pypy/module/micronumpy/arrayimpl/concrete.py @@ -55,7 +55,7 @@ loop.setslice(space, shape, self, impl) def get_size(self): - return self.size // self.dtype.get_size() + return self.size // self.dtype.elsize def get_storage_size(self): return self.size @@ -89,7 +89,7 @@ def get_real(self, space, orig_array): strides = self.get_strides() backstrides = self.get_backstrides() - if self.dtype.is_complex_type(): + if self.dtype.is_complex(): dtype = self.dtype.get_float_dtype(space) return SliceArray(self.start, strides, backstrides, self.get_shape(), self, orig_array, dtype=dtype) @@ -103,13 +103,13 @@ def get_imag(self, space, orig_array): strides = self.get_strides() backstrides = self.get_backstrides() - if self.dtype.is_complex_type(): + if self.dtype.is_complex(): dtype = self.dtype.get_float_dtype(space) - return SliceArray(self.start + dtype.get_size(), strides, + return SliceArray(self.start + dtype.elsize, strides, backstrides, self.get_shape(), self, orig_array, dtype=dtype) impl = NonWritableArray(self.get_shape(), self.dtype, self.order, strides, backstrides) - if not self.dtype.is_flexible_type(): + if not self.dtype.is_flexible(): impl.fill(space, self.dtype.box(0)) return impl @@ -204,7 +204,7 @@ if space.isinstance_w(w_idx, space.w_str): idx = space.str_w(w_idx) dtype = self.dtype - if not dtype.is_record_type() or idx not in dtype.fields: + if not dtype.is_record() or idx not in dtype.fields: raise OperationError(space.w_ValueError, space.wrap( "field named %s not found" % idx)) return RecordChunk(idx) @@ -324,7 +324,7 @@ make_sure_not_resized(strides) make_sure_not_resized(backstrides) self.shape = shape - self.size = support.product(shape) * dtype.get_size() + self.size = support.product(shape) * dtype.elsize self.order = order self.dtype = dtype self.strides = strides @@ -352,7 +352,7 @@ self.get_shape()) def fill(self, space, box): - self.dtype.itemtype.fill(self.storage, self.dtype.get_size(), + self.dtype.itemtype.fill(self.storage, self.dtype.elsize, box, 0, self.size, 0) def set_shape(self, space, orig_array, new_shape): @@ -425,7 +425,7 @@ self.storage = parent.storage self.order = parent.order self.dtype = dtype - self.size = support.product(shape) * self.dtype.get_size() + self.size = support.product(shape) * self.dtype.elsize self.start = start self.orig_arr = orig_arr @@ -460,12 +460,12 @@ strides = [] backstrides = [] dtype = self.dtype - s = self.get_strides()[0] // dtype.get_size() + s = self.get_strides()[0] // dtype.elsize if self.order == 'C': new_shape.reverse() for sh in new_shape: - strides.append(s * dtype.get_size()) - backstrides.append(s * (sh - 1) * dtype.get_size()) + strides.append(s * dtype.elsize) + backstrides.append(s * (sh - 1) * dtype.elsize) s *= max(1, sh) if self.order == 'C': strides.reverse() diff --git a/pypy/module/micronumpy/arrayimpl/scalar.py b/pypy/module/micronumpy/arrayimpl/scalar.py --- a/pypy/module/micronumpy/arrayimpl/scalar.py +++ b/pypy/module/micronumpy/arrayimpl/scalar.py @@ -70,7 +70,7 @@ scalar = Scalar(dtype) if dtype.is_str_or_unicode(): scalar.value = dtype.coerce(space, space.wrap(self.value.raw_str())) - elif dtype.is_record_type(): + elif dtype.is_record(): raise OperationError(space.w_NotImplementedError, space.wrap( "viewing scalar as record not implemented")) else: @@ -78,7 +78,7 @@ return scalar def get_real(self, space, orig_array): - if self.dtype.is_complex_type(): + if self.dtype.is_complex(): scalar = Scalar(self.dtype.get_float_dtype(space)) scalar.value = self.value.convert_real_to(scalar.dtype) return scalar @@ -91,7 +91,7 @@ "could not broadcast input array from shape " + "(%s) into shape ()" % ( ','.join([str(x) for x in w_arr.get_shape()],)))) - if self.dtype.is_complex_type(): + if self.dtype.is_complex(): dtype = self.dtype.get_float_dtype(space) self.value = self.dtype.itemtype.composite( w_arr.get_scalar_value().convert_to(space, dtype), @@ -100,7 +100,7 @@ self.value = w_arr.get_scalar_value() def get_imag(self, space, orig_array): - if self.dtype.is_complex_type(): + if self.dtype.is_complex(): scalar = Scalar(self.dtype.get_float_dtype(space)) scalar.value = self.value.convert_imag_to(scalar.dtype) return scalar @@ -110,7 +110,7 @@ def set_imag(self, space, orig_array, w_val): #Only called on complex dtype - assert self.dtype.is_complex_type() + assert self.dtype.is_complex() w_arr = convert_to_array(space, w_val) if len(w_arr.get_shape()) > 0: raise OperationError(space.w_ValueError, space.wrap( @@ -127,7 +127,7 @@ if space.len_w(w_idx) == 0: return self.get_scalar_value() elif space.isinstance_w(w_idx, space.w_str): - if self.dtype.is_record_type(): + if self.dtype.is_record(): w_val = self.value.descr_getitem(space, w_idx) return convert_to_array(space, w_val) elif space.is_none(w_idx): @@ -148,7 +148,7 @@ if space.len_w(w_idx) == 0: return self.set_scalar_value(self.dtype.coerce(space, w_val)) elif space.isinstance_w(w_idx, space.w_str): - if self.dtype.is_record_type(): + if self.dtype.is_record(): return self.value.descr_setitem(space, w_idx, w_val) raise OperationError(space.w_IndexError, space.wrap("0-d arrays can't be indexed")) diff --git a/pypy/module/micronumpy/arrayimpl/sort.py b/pypy/module/micronumpy/arrayimpl/sort.py --- a/pypy/module/micronumpy/arrayimpl/sort.py +++ b/pypy/module/micronumpy/arrayimpl/sort.py @@ -71,10 +71,10 @@ def __init__(self, index_stride_size, stride_size, size): start = 0 dtype = interp_dtype.get_dtype_cache(space).w_longdtype - indexes = dtype.itemtype.malloc(size*dtype.get_size()) + indexes = dtype.itemtype.malloc(size * dtype.elsize) values = alloc_raw_storage(size * stride_size, track_allocation=False) - Repr.__init__(self, dtype.get_size(), stride_size, + Repr.__init__(self, dtype.elsize, stride_size, size, values, indexes, start, start) def __del__(self): diff --git a/pypy/module/micronumpy/interp_arrayops.py b/pypy/module/micronumpy/interp_arrayops.py --- a/pypy/module/micronumpy/interp_arrayops.py +++ b/pypy/module/micronumpy/interp_arrayops.py @@ -137,14 +137,14 @@ "all the input array dimensions except for the " "concatenation axis must match exactly")) a_dt = arr.get_dtype() - if dtype.is_record_type() and a_dt.is_record_type(): + if dtype.is_record() and a_dt.is_record(): # Record types must match for f in dtype.fields: if f not in a_dt.fields or \ dtype.fields[f] != a_dt.fields[f]: raise OperationError(space.w_TypeError, space.wrap("invalid type promotion")) - elif dtype.is_record_type() or a_dt.is_record_type(): + elif dtype.is_record() or a_dt.is_record(): raise OperationError(space.w_TypeError, space.wrap("invalid type promotion")) dtype = interp_ufuncs.find_binop_result_dtype(space, dtype, diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -16,7 +16,7 @@ from pypy.interpreter.mixedmodule import MixedModule from rpython.rtyper.lltypesystem import lltype from rpython.rlib.rstring import StringBuilder -from rpython.rlib import jit +from rpython.rlib.objectmodel import specialize from pypy.module.micronumpy import constants as NPY @@ -33,13 +33,13 @@ long_double_size = 8 -def new_dtype_getter(name): - @jit.elidable +def new_dtype_getter(num): + @specialize.memo() def _get_dtype(space): from pypy.module.micronumpy.interp_dtype import get_dtype_cache - return get_dtype_cache(space).dtypes_by_name[name] + return get_dtype_cache(space).dtypes_by_num[num] - def new(space, w_subtype, w_value=None): + def descr__new__(space, w_subtype, w_value=None): from pypy.module.micronumpy.interp_numarray import array dtype = _get_dtype(space) if not space.is_none(w_value): @@ -52,7 +52,9 @@ def descr_reduce(self, space): return self.reduce(space) - return func_with_new_name(new, name + "_box_new"), staticmethod(_get_dtype), func_with_new_name(descr_reduce, "descr_reduce") + return (func_with_new_name(descr__new__, 'descr__new__%d' % num), + staticmethod(_get_dtype), + descr_reduce) class Box(object): @@ -303,15 +305,15 @@ else: dtype = space.interp_w(W_Dtype, space.call_function(space.gettypefor(W_Dtype), w_dtype)) - if dtype.get_size() == 0: + if dtype.elsize == 0: raise OperationError(space.w_TypeError, space.wrap( "data-type must not be 0-sized")) - if dtype.get_size() != self.get_dtype(space).get_size(): + if dtype.elsize != self.get_dtype(space).elsize: raise OperationError(space.w_ValueError, space.wrap( "new type not compatible with array.")) if dtype.is_str_or_unicode(): return dtype.coerce(space, space.wrap(self.raw_str())) - elif dtype.is_record_type(): + elif dtype.is_record(): raise OperationError(space.w_NotImplementedError, space.wrap( "viewing scalar as record not implemented")) else: @@ -327,7 +329,7 @@ return space.wrap(1) def descr_get_itemsize(self, space): - return self.get_dtype(space).descr_get_itemsize(space) + return space.wrap(self.get_dtype(space).elsize) def descr_get_shape(self, space): return space.newtuple([]) @@ -352,6 +354,12 @@ w_meth = space.getattr(self.descr_ravel(space), space.wrap('reshape')) return space.call_args(w_meth, __args__) + def descr_get_real(self, space): + return self.get_dtype(space).itemtype.real(self) + + def descr_get_imag(self, space): + return self.get_dtype(space).itemtype.imag(self) + w_flags = None def descr_get_flags(self, space): if self.w_flags is None: @@ -359,7 +367,7 @@ return self.w_flags class W_BoolBox(W_GenericBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("bool") + descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY.BOOL) class W_NumberBox(W_GenericBox): pass @@ -375,34 +383,34 @@ pass class W_Int8Box(W_SignedIntegerBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("int8") + descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY.BYTE) class W_UInt8Box(W_UnsignedIntegerBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("uint8") + descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY.UBYTE) class W_Int16Box(W_SignedIntegerBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("int16") + descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY.SHORT) class W_UInt16Box(W_UnsignedIntegerBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("uint16") + descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY.USHORT) class W_Int32Box(W_SignedIntegerBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("i") + descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY.INT) class W_UInt32Box(W_UnsignedIntegerBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("I") + descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY.UINT) + +class W_LongBox(W_SignedIntegerBox, PrimitiveBox): + descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY.LONG) + +class W_ULongBox(W_UnsignedIntegerBox, PrimitiveBox): + descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY.ULONG) class W_Int64Box(W_SignedIntegerBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("q") + descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY.LONGLONG) class W_UInt64Box(W_UnsignedIntegerBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("Q") - -class W_LongBox(W_SignedIntegerBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("l") - -class W_ULongBox(W_UnsignedIntegerBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("L") + descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY.ULONGLONG) class W_InexactBox(W_NumberBox): pass @@ -411,45 +419,32 @@ pass class W_Float16Box(W_FloatingBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("float16") + descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY.HALF) class W_Float32Box(W_FloatingBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("float32") + descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY.FLOAT) class W_Float64Box(W_FloatingBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("float64") + descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY.DOUBLE) def descr_as_integer_ratio(self, space): return space.call_method(self.item(space), 'as_integer_ratio') class W_ComplexFloatingBox(W_InexactBox): - def descr_get_real(self, space): - dtype = self._COMPONENTS_BOX._get_dtype(space) - box = self.convert_real_to(dtype) - assert isinstance(box, self._COMPONENTS_BOX) - return space.wrap(box) - - def descr_get_imag(self, space): - dtype = self._COMPONENTS_BOX._get_dtype(space) - box = self.convert_imag_to(dtype) - assert isinstance(box, self._COMPONENTS_BOX) - return space.wrap(box) + pass class W_Complex64Box(ComplexBox, W_ComplexFloatingBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("complex64") - _COMPONENTS_BOX = W_Float32Box + descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY.CFLOAT) class W_Complex128Box(ComplexBox, W_ComplexFloatingBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("complex128") - _COMPONENTS_BOX = W_Float64Box + descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY.CDOUBLE) if long_double_size in (8, 12, 16): class W_FloatLongBox(W_FloatingBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY.LONGDOUBLELTR) + descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY.LONGDOUBLE) class W_ComplexLongBox(ComplexBox, W_ComplexFloatingBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY.CLONGDOUBLELTR) - _COMPONENTS_BOX = W_FloatLongBox + descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY.CLONGDOUBLE) class W_FlexibleBox(W_GenericBox): _attrs_ = ['arr', 'ofs', 'dtype'] @@ -635,6 +630,8 @@ strides = GetSetProperty(W_GenericBox.descr_get_shape), ndim = GetSetProperty(W_GenericBox.descr_get_ndim), T = GetSetProperty(W_GenericBox.descr_self), + real = GetSetProperty(W_GenericBox.descr_get_real), + imag = GetSetProperty(W_GenericBox.descr_get_imag), flags = GetSetProperty(W_GenericBox.descr_get_flags), ) @@ -768,16 +765,12 @@ __new__ = interp2app(W_Complex64Box.descr__new__.im_func), __reduce__ = interp2app(W_Complex64Box.descr_reduce), __complex__ = interp2app(W_GenericBox.item), - real = GetSetProperty(W_ComplexFloatingBox.descr_get_real), - imag = GetSetProperty(W_ComplexFloatingBox.descr_get_imag), ) W_Complex128Box.typedef = TypeDef("complex128", (W_ComplexFloatingBox.typedef, W_ComplexObject.typedef), __module__ = "numpy", __new__ = interp2app(W_Complex128Box.descr__new__.im_func), __reduce__ = interp2app(W_Complex128Box.descr_reduce), - real = GetSetProperty(W_ComplexFloatingBox.descr_get_real), - imag = GetSetProperty(W_ComplexFloatingBox.descr_get_imag), ) if long_double_size in (8, 12, 16): @@ -792,8 +785,6 @@ __new__ = interp2app(W_ComplexLongBox.descr__new__.im_func), __reduce__ = interp2app(W_ComplexLongBox.descr_reduce), __complex__ = interp2app(W_GenericBox.item), - real = GetSetProperty(W_ComplexFloatingBox.descr_get_real), - imag = GetSetProperty(W_ComplexFloatingBox.descr_get_imag), ) W_FlexibleBox.typedef = TypeDef("flexible", W_GenericBox.typedef, diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -6,7 +6,7 @@ interp_attrproperty, interp_attrproperty_w) from pypy.module.micronumpy import types, interp_boxes, base from rpython.rlib.objectmodel import specialize -from rpython.rlib.rarithmetic import LONG_BIT, r_longlong, r_ulonglong +from rpython.rlib.rarithmetic import r_longlong, r_ulonglong from rpython.rlib import jit from pypy.module.micronumpy.appbridge import get_appbridge_cache from pypy.module.micronumpy.conversion_utils import byteorder_converter @@ -38,22 +38,19 @@ class W_Dtype(W_Root): _immutable_fields_ = [ - "num", "kind", "char", "w_box_type", "float_type", - "itemtype?", "byteorder?", "names?", "fields?", "size?", + "itemtype?", "num", "kind", "char", "w_box_type", + "byteorder?", "names?", "fields?", "elsize?", "alignment?", "shape?", "subdtype?", "base?", - "alternate_constructors", "aliases", ] def __init__(self, itemtype, num, kind, char, w_box_type, - float_type=None, byteorder=None, names=[], fields={}, - size=1, shape=[], subdtype=None, - alternate_constructors=[], aliases=[]): + byteorder=None, names=[], fields={}, + elsize=None, shape=[], subdtype=None): self.itemtype = itemtype self.num = num self.kind = kind self.char = char self.w_box_type = w_box_type - self.float_type = float_type if byteorder is None: if itemtype.get_element_size() == 1: byteorder = NPY.IGNORE @@ -62,15 +59,16 @@ self.byteorder = byteorder self.names = names self.fields = fields - self.size = size + if elsize is None: + elsize = itemtype.get_element_size() + self.elsize = elsize + self.alignment = itemtype.alignment self.shape = shape self.subdtype = subdtype if not subdtype: self.base = self else: self.base = subdtype.base - self.alternate_constructors = alternate_constructors - self.aliases = aliases def __repr__(self): if self.fields is not None: @@ -85,100 +83,51 @@ def box_complex(self, real, imag): return self.itemtype.box_complex(real, imag) - def build_and_convert(self, space, box): - return self.itemtype.build_and_convert(space, self, box) - def coerce(self, space, w_item): return self.itemtype.coerce(space, self, w_item) - def is_int_type(self): - return (self.kind == NPY.SIGNEDLTR or self.kind == NPY.UNSIGNEDLTR or - self.kind == NPY.GENBOOLLTR) + def is_bool(self): + return self.kind == NPY.GENBOOLLTR def is_signed(self): return self.kind == NPY.SIGNEDLTR - def is_complex_type(self): + def is_unsigned(self): + return self.kind == NPY.UNSIGNEDLTR + + def is_int(self): + return (self.kind == NPY.SIGNEDLTR or self.kind == NPY.UNSIGNEDLTR or + self.kind == NPY.GENBOOLLTR) + + def is_float(self): + return self.kind == NPY.FLOATINGLTR + + def is_complex(self): return self.kind == NPY.COMPLEXLTR - def is_float_type(self): - return self.kind == NPY.FLOATINGLTR or self.kind == NPY.COMPLEXLTR - - def is_bool_type(self): - return self.kind == NPY.GENBOOLLTR - - def is_record_type(self): - return bool(self.fields) - - def is_str_type(self): + def is_str(self): return self.num == NPY.STRING def is_str_or_unicode(self): return self.num == NPY.STRING or self.num == NPY.UNICODE - def is_flexible_type(self): + def is_flexible(self): return self.is_str_or_unicode() or self.num == NPY.VOID + def is_record(self): + return bool(self.fields) + def is_native(self): return self.byteorder in (NPY.NATIVE, NPY.NATBYTE) - def get_size(self): - return self.size * self.itemtype.get_element_size() - def get_float_dtype(self, space): - assert self.kind == NPY.COMPLEXLTR - assert self.float_type is not None - dtype = get_dtype_cache(space).dtypes_by_name[self.float_type] + assert self.is_complex() + dtype = get_dtype_cache(space).component_dtypes[self.num] if self.byteorder == NPY.OPPBYTE: dtype = dtype.descr_newbyteorder(space) + assert dtype.is_float() return dtype - def descr_str(self, space): - if self.fields: - return space.str(self.descr_get_descr(space)) - elif self.subdtype is not None: - return space.str(space.newtuple([ - self.subdtype.descr_get_str(space), - self.descr_get_shape(space)])) - else: - if self.is_flexible_type(): - return self.descr_get_str(space) - else: - return self.descr_get_name(space) - - def descr_repr(self, space): - if self.fields: - r = self.descr_get_descr(space) - elif self.subdtype is not None: - r = space.newtuple([self.subdtype.descr_get_str(space), - self.descr_get_shape(space)]) - else: - if self.is_flexible_type(): - if self.byteorder != NPY.IGNORE: - byteorder = NPY.NATBYTE if self.is_native() else NPY.OPPBYTE - else: - byteorder = '' - r = space.wrap(byteorder + self.char + str(self.size)) - else: - r = self.descr_get_name(space) - return space.wrap("dtype(%s)" % space.str_w(space.repr(r))) - - def descr_get_itemsize(self, space): - return space.wrap(self.get_size()) - - def descr_get_alignment(self, space): - return space.wrap(self.itemtype.alignment) - - def descr_get_isbuiltin(self, space): - if self.fields is None: - return space.wrap(1) - return space.wrap(0) - - def descr_get_subdtype(self, space): - if self.subdtype is None: - return space.w_None - return space.newtuple([space.wrap(self.subdtype), self.descr_get_shape(space)]) - def get_name(self): return self.w_box_type.name @@ -186,26 +135,22 @@ name = self.get_name() if name[-1] == '_': name = name[:-1] - if self.is_flexible_type(): - return space.wrap(name + str(self.get_size() * 8)) + if self.is_flexible() and self.elsize != 0: + return space.wrap(name + str(self.elsize * 8)) return space.wrap(name) def descr_get_str(self, space): - size = self.get_size() basic = self.kind - if basic == NPY.UNICODELTR: + endian = self.byteorder + size = self.elsize + if endian == NPY.NATIVE: + endian = NPY.NATBYTE + if self.num == NPY.UNICODE: size >>= 2 - endian = NPY.NATBYTE - elif size // (self.size or 1) <= 1: - endian = NPY.IGNORE - else: - endian = self.byteorder - if endian == NPY.NATIVE: - endian = NPY.NATBYTE return space.wrap("%s%s%s" % (endian, basic, size)) def descr_get_descr(self, space): - if not self.is_record_type(): + if not self.is_record(): return space.newlist([space.newtuple([space.wrap(""), self.descr_get_str(space)])]) else: @@ -213,7 +158,7 @@ for name in self.names: subdtype = self.fields[name][1] subdescr = [space.wrap(name)] - if subdtype.is_record_type(): + if subdtype.is_record(): subdescr.append(subdtype.descr_get_descr(space)) elif subdtype.subdtype is not None: subdescr.append(subdtype.subdtype.descr_get_str(space)) @@ -224,38 +169,37 @@ descr.append(space.newtuple(subdescr[:])) return space.newlist(descr) - def descr_get_base(self, space): - return space.wrap(self.base) + def descr_get_hasobject(self, space): + return space.w_False + + def descr_get_isbuiltin(self, space): + if self.fields is None: + return space.wrap(1) + return space.wrap(0) def descr_get_isnative(self, space): return space.wrap(self.is_native()) + def descr_get_base(self, space): + return space.wrap(self.base) + + def descr_get_subdtype(self, space): + if self.subdtype is None: + return space.w_None + return space.newtuple([space.wrap(self.subdtype), + self.descr_get_shape(space)]) + def descr_get_shape(self, space): - w_shape = [space.wrap(dim) for dim in self.shape] - return space.newtuple(w_shape) - - def eq(self, space, w_other): - w_other = space.call_function(space.gettypefor(W_Dtype), w_other) - if space.is_w(self, w_other): - return True - if isinstance(w_other, W_Dtype): - return space.eq_w(self.descr_reduce(space), w_other.descr_reduce(space)) - return False - - def descr_eq(self, space, w_other): - return space.wrap(self.eq(space, w_other)) - - def descr_ne(self, space, w_other): - return space.wrap(not self.eq(space, w_other)) + return space.newtuple([space.wrap(dim) for dim in self.shape]) def descr_get_fields(self, space): if not self.fields: return space.w_None - w_d = space.newdict() + w_fields = space.newdict() for name, (offset, subdtype) in self.fields.iteritems(): - space.setitem(w_d, space.wrap(name), + space.setitem(w_fields, space.wrap(name), space.newtuple([subdtype, space.wrap(offset)])) - return w_d + return w_fields def descr_get_names(self, space): if not self.fields: @@ -290,8 +234,56 @@ raise OperationError(space.w_AttributeError, space.wrap( "Cannot delete dtype names attribute")) - def descr_get_hasobject(self, space): - return space.w_False + def eq(self, space, w_other): + w_other = space.call_function(space.gettypefor(W_Dtype), w_other) + if space.is_w(self, w_other): + return True + if isinstance(w_other, W_Dtype): + return space.eq_w(self.descr_reduce(space), + w_other.descr_reduce(space)) + return False + + def descr_eq(self, space, w_other): + return space.wrap(self.eq(space, w_other)) + + def descr_ne(self, space, w_other): + return space.wrap(not self.eq(space, w_other)) + + def descr_hash(self, space): + return space.hash(self.descr_reduce(space)) + + def descr_str(self, space): + if self.fields: + return space.str(self.descr_get_descr(space)) + elif self.subdtype is not None: + return space.str(space.newtuple([ + self.subdtype.descr_get_str(space), + self.descr_get_shape(space)])) + else: + if self.is_flexible(): + return self.descr_get_str(space) + else: + return self.descr_get_name(space) + + def descr_repr(self, space): + if self.fields: + r = self.descr_get_descr(space) + elif self.subdtype is not None: + r = space.newtuple([self.subdtype.descr_get_str(space), + self.descr_get_shape(space)]) + else: + if self.is_flexible(): + if self.byteorder != NPY.IGNORE: + byteorder = NPY.NATBYTE if self.is_native() else NPY.OPPBYTE + else: + byteorder = '' + size = self.elsize + if self.num == NPY.UNICODE: + size >>= 2 + r = space.wrap(byteorder + self.char + str(size)) + else: + r = self.descr_get_name(space) + return space.wrap("dtype(%s)" % space.str_w(space.repr(r))) def descr_getitem(self, space, w_item): if not self.fields: @@ -320,41 +312,29 @@ return space.wrap(0) return space.wrap(len(self.fields)) - def descr_hash(self, space): - return space.hash(self.descr_reduce(space)) - def descr_reduce(self, space): w_class = space.type(self) - - kind = self.kind - elemsize = self.get_size() - builder_args = space.newtuple([space.wrap("%s%d" % (kind, elemsize)), space.wrap(0), space.wrap(1)]) + builder_args = space.newtuple([ + space.wrap("%s%d" % (self.kind, self.elsize)), + space.wrap(0), space.wrap(1)]) version = space.wrap(3) + endian = self.byteorder + if endian == NPY.NATIVE: + endian = NPY.NATBYTE + subdescr = self.descr_get_subdtype(space) names = self.descr_get_names(space) values = self.descr_get_fields(space) - if self.fields: - endian = NPY.IGNORE - #TODO: Implement this when subarrays are implemented - subdescr = space.w_None - size = 0 - for key in self.fields: - dtype = self.fields[key][1] - assert isinstance(dtype, W_Dtype) - size += dtype.get_size() - w_size = space.wrap(size) - #TODO: Change this when alignment is implemented - alignment = space.wrap(1) + if self.is_flexible(): + w_size = space.wrap(self.elsize) + alignment = space.wrap(self.alignment) else: - endian = self.byteorder - if endian == NPY.NATIVE: - endian = NPY.NATBYTE - subdescr = space.w_None w_size = space.wrap(-1) alignment = space.wrap(-1) flags = space.wrap(0) - data = space.newtuple([version, space.wrap(endian), subdescr, names, values, w_size, alignment, flags]) + data = space.newtuple([version, space.wrap(endian), subdescr, + names, values, w_size, alignment, flags]) return space.newtuple([w_class, builder_args, data]) def descr_setstate(self, space, w_data): @@ -375,6 +355,7 @@ w_names = space.getitem(w_data, space.wrap(3)) w_fields = space.getitem(w_data, space.wrap(4)) size = space.int_w(space.getitem(w_data, space.wrap(5))) + alignment = space.int_w(space.getitem(w_data, space.wrap(6))) if (w_names == space.w_None) != (w_fields == space.w_None): raise oefmt(space.w_ValueError, "inconsistent fields and names") @@ -413,8 +394,9 @@ self.fields[name] = offset, dtype self.itemtype = types.RecordType() - if self.is_flexible_type(): - self.size = size + if self.is_flexible(): + self.elsize = size + self.alignment = alignment @unwrap_spec(new_order=str) def descr_newbyteorder(self, space, new_order=NPY.SWAP): @@ -426,9 +408,13 @@ elif newendian != NPY.IGNORE: endian = newendian itemtype = self.itemtype.__class__(endian in (NPY.NATIVE, NPY.NATBYTE)) + fields = self.fields + if fields is None: + fields = {} return W_Dtype(itemtype, self.num, self.kind, self.char, - self.w_box_type, self.float_type, byteorder=endian, - size=self.size) + self.w_box_type, byteorder=endian, elsize=self.elsize, + names=self.names, fields=fields, + shape=self.shape, subdtype=self.subdtype) @specialize.arg(2) @@ -458,11 +444,11 @@ raise oefmt(space.w_ValueError, "two fields with the same name") assert isinstance(subdtype, W_Dtype) fields[fldname] = (offset, subdtype) - offset += subdtype.get_size() + offset += subdtype.elsize names.append(fldname) return W_Dtype(types.RecordType(), NPY.VOID, NPY.VOIDLTR, NPY.VOIDLTR, space.gettypefor(interp_boxes.W_VoidBox), - names=names, fields=fields, size=offset) + names=names, fields=fields, elsize=offset) def dtype_from_dict(space, w_dict): @@ -501,10 +487,10 @@ size *= dim if size == 1: return subdtype - size *= subdtype.get_size() + size *= subdtype.elsize return W_Dtype(types.VoidType(), NPY.VOID, NPY.VOIDLTR, NPY.VOIDLTR, space.gettypefor(interp_boxes.W_VoidBox), - shape=shape, subdtype=subdtype, size=size) + shape=shape, subdtype=subdtype, elsize=size) if space.is_none(w_dtype): return cache.w_float64dtype @@ -533,58 +519,59 @@ w_dtype1 = space.getitem(w_dtype, space.wrap(1)) subdtype = descr__new__(space, w_subtype, w_dtype0, w_align, w_copy) assert isinstance(subdtype, W_Dtype) - if subdtype.get_size() == 0: + if subdtype.elsize == 0: name = "%s%d" % (subdtype.kind, space.int_w(w_dtype1)) return descr__new__(space, w_subtype, space.wrap(name), w_align, w_copy) return descr__new__(space, w_subtype, w_dtype0, w_align, w_copy, w_shape=w_dtype1) elif space.isinstance_w(w_dtype, space.w_dict): return dtype_from_dict(space, w_dtype) for dtype in cache.builtin_dtypes: - if w_dtype in dtype.alternate_constructors: + if dtype.num in cache.alternate_constructors and \ + w_dtype in cache.alternate_constructors[dtype.num]: return dtype if w_dtype is dtype.w_box_type: return dtype if space.isinstance_w(w_dtype, space.w_type): - raise oefmt(space.w_NotImplementedError, "object dtype not implemented") + raise oefmt(space.w_NotImplementedError, + "cannot create dtype with type '%N'", w_dtype) raise oefmt(space.w_TypeError, "data type not understood") W_Dtype.typedef = TypeDef("dtype", __module__ = "numpy", __new__ = interp2app(descr__new__), - __str__= interp2app(W_Dtype.descr_str), - __repr__ = interp2app(W_Dtype.descr_repr), - __eq__ = interp2app(W_Dtype.descr_eq), - __ne__ = interp2app(W_Dtype.descr_ne), - __getitem__ = interp2app(W_Dtype.descr_getitem), - __len__ = interp2app(W_Dtype.descr_len), - - __hash__ = interp2app(W_Dtype.descr_hash), - __reduce__ = interp2app(W_Dtype.descr_reduce), - __setstate__ = interp2app(W_Dtype.descr_setstate), - newbyteorder = interp2app(W_Dtype.descr_newbyteorder), - type = interp_attrproperty_w("w_box_type", cls=W_Dtype), kind = interp_attrproperty("kind", cls=W_Dtype), char = interp_attrproperty("char", cls=W_Dtype), num = interp_attrproperty("num", cls=W_Dtype), byteorder = interp_attrproperty("byteorder", cls=W_Dtype), - itemsize = GetSetProperty(W_Dtype.descr_get_itemsize), - alignment = GetSetProperty(W_Dtype.descr_get_alignment), + itemsize = interp_attrproperty("elsize", cls=W_Dtype), + alignment = interp_attrproperty("alignment", cls=W_Dtype), + + name = GetSetProperty(W_Dtype.descr_get_name), + str = GetSetProperty(W_Dtype.descr_get_str), + descr = GetSetProperty(W_Dtype.descr_get_descr), + hasobject = GetSetProperty(W_Dtype.descr_get_hasobject), isbuiltin = GetSetProperty(W_Dtype.descr_get_isbuiltin), - + isnative = GetSetProperty(W_Dtype.descr_get_isnative), + base = GetSetProperty(W_Dtype.descr_get_base), subdtype = GetSetProperty(W_Dtype.descr_get_subdtype), - str = GetSetProperty(W_Dtype.descr_get_str), - name = GetSetProperty(W_Dtype.descr_get_name), - base = GetSetProperty(W_Dtype.descr_get_base), shape = GetSetProperty(W_Dtype.descr_get_shape), - isnative = GetSetProperty(W_Dtype.descr_get_isnative), fields = GetSetProperty(W_Dtype.descr_get_fields), names = GetSetProperty(W_Dtype.descr_get_names, W_Dtype.descr_set_names, W_Dtype.descr_del_names), - hasobject = GetSetProperty(W_Dtype.descr_get_hasobject), - descr = GetSetProperty(W_Dtype.descr_get_descr), + + __eq__ = interp2app(W_Dtype.descr_eq), + __ne__ = interp2app(W_Dtype.descr_ne), + __hash__ = interp2app(W_Dtype.descr_hash), + __str__= interp2app(W_Dtype.descr_str), + __repr__ = interp2app(W_Dtype.descr_repr), + __getitem__ = interp2app(W_Dtype.descr_getitem), + __len__ = interp2app(W_Dtype.descr_len), + __reduce__ = interp2app(W_Dtype.descr_reduce), + __setstate__ = interp2app(W_Dtype.descr_setstate), + newbyteorder = interp2app(W_Dtype.descr_newbyteorder), ) W_Dtype.typedef.acceptable_as_base_class = False @@ -601,10 +588,8 @@ except ValueError: raise oefmt(space.w_TypeError, "data type not understood") if char == NPY.CHARLTR: - char = NPY.STRINGLTR - size = 1 - - if char == NPY.STRINGLTR: + return new_string_dtype(space, 1, NPY.CHARLTR) + elif char == NPY.STRINGLTR: return new_string_dtype(space, size) elif char == NPY.UNICODELTR: return new_unicode_dtype(space, size) @@ -613,21 +598,22 @@ assert False -def new_string_dtype(space, size): +def new_string_dtype(space, size, char=NPY.STRINGLTR): return W_Dtype( types.StringType(), - size=size, + elsize=size, num=NPY.STRING, kind=NPY.STRINGLTR, - char=NPY.STRINGLTR, + char=char, w_box_type=space.gettypefor(interp_boxes.W_StringBox), ) def new_unicode_dtype(space, size): + itemtype = types.UnicodeType() return W_Dtype( - types.UnicodeType(), - size=size, + itemtype, + elsize=size * itemtype.get_element_size(), num=NPY.UNICODE, kind=NPY.UNICODELTR, char=NPY.UNICODELTR, @@ -638,7 +624,7 @@ def new_void_dtype(space, size): return W_Dtype( types.VoidType(), - size=size, + elsize=size, num=NPY.VOID, kind=NPY.VOIDLTR, char=NPY.VOIDLTR, @@ -654,8 +640,6 @@ kind=NPY.GENBOOLLTR, char=NPY.BOOLLTR, w_box_type=space.gettypefor(interp_boxes.W_BoolBox), - alternate_constructors=[space.w_bool], - aliases=['bool', 'bool8'], ) self.w_int8dtype = W_Dtype( types.Int8(), @@ -663,7 +647,6 @@ kind=NPY.SIGNEDLTR, char=NPY.BYTELTR, w_box_type=space.gettypefor(interp_boxes.W_Int8Box), - aliases=['byte'], ) self.w_uint8dtype = W_Dtype( types.UInt8(), @@ -671,7 +654,6 @@ kind=NPY.UNSIGNEDLTR, char=NPY.UBYTELTR, w_box_type=space.gettypefor(interp_boxes.W_UInt8Box), - aliases=['ubyte'], ) self.w_int16dtype = W_Dtype( types.Int16(), @@ -679,7 +661,6 @@ kind=NPY.SIGNEDLTR, char=NPY.SHORTLTR, w_box_type=space.gettypefor(interp_boxes.W_Int16Box), - aliases=['short'], ) self.w_uint16dtype = W_Dtype( types.UInt16(), @@ -687,7 +668,6 @@ kind=NPY.UNSIGNEDLTR, char=NPY.USHORTLTR, w_box_type=space.gettypefor(interp_boxes.W_UInt16Box), - aliases=['ushort'], ) self.w_int32dtype = W_Dtype( types.Int32(), @@ -709,11 +689,6 @@ kind=NPY.SIGNEDLTR, char=NPY.LONGLTR, w_box_type=space.gettypefor(interp_boxes.W_LongBox), - alternate_constructors=[space.w_int, - space.gettypefor(interp_boxes.W_IntegerBox), - space.gettypefor(interp_boxes.W_SignedIntegerBox), - ], - aliases=['int', 'intp', 'p'], ) self.w_ulongdtype = W_Dtype( types.ULong(), @@ -721,9 +696,6 @@ kind=NPY.UNSIGNEDLTR, char=NPY.ULONGLTR, w_box_type=space.gettypefor(interp_boxes.W_ULongBox), - alternate_constructors=[space.gettypefor(interp_boxes.W_UnsignedIntegerBox), - ], - aliases=['uint', 'uintp', 'P'], ) self.w_int64dtype = W_Dtype( types.Int64(), @@ -731,8 +703,6 @@ kind=NPY.SIGNEDLTR, char=NPY.LONGLONGLTR, w_box_type=space.gettypefor(interp_boxes.W_Int64Box), - alternate_constructors=[space.w_long], - aliases=['longlong'], ) self.w_uint64dtype = W_Dtype( types.UInt64(), @@ -740,7 +710,6 @@ kind=NPY.UNSIGNEDLTR, char=NPY.ULONGLONGLTR, w_box_type=space.gettypefor(interp_boxes.W_UInt64Box), - aliases=['ulonglong'], ) self.w_float32dtype = W_Dtype( types.Float32(), @@ -748,7 +717,6 @@ kind=NPY.FLOATINGLTR, char=NPY.FLOATLTR, w_box_type=space.gettypefor(interp_boxes.W_Float32Box), - aliases=['single'] ) self.w_float64dtype = W_Dtype( types.Float64(), @@ -756,11 +724,6 @@ kind=NPY.FLOATINGLTR, char=NPY.DOUBLELTR, w_box_type=space.gettypefor(interp_boxes.W_Float64Box), - alternate_constructors=[space.w_float, - space.gettypefor(interp_boxes.W_NumberBox), - space.gettypefor(interp_boxes.W_FloatingBox), - ], - aliases=["float", "double"], ) self.w_floatlongdtype = W_Dtype( types.FloatLong(), @@ -768,7 +731,6 @@ kind=NPY.FLOATINGLTR, char=NPY.LONGDOUBLELTR, w_box_type=space.gettypefor(interp_boxes.W_FloatLongBox), - aliases=["longdouble", "longfloat"], ) self.w_complex64dtype = W_Dtype( types.Complex64(), @@ -776,8 +738,6 @@ kind=NPY.COMPLEXLTR, char=NPY.CFLOATLTR, w_box_type=space.gettypefor(interp_boxes.W_Complex64Box), - aliases=['csingle'], - float_type=NPY.FLOATLTR, ) self.w_complex128dtype = W_Dtype( types.Complex128(), @@ -785,10 +745,6 @@ kind=NPY.COMPLEXLTR, char=NPY.CDOUBLELTR, w_box_type=space.gettypefor(interp_boxes.W_Complex128Box), - alternate_constructors=[space.w_complex, - space.gettypefor(interp_boxes.W_ComplexFloatingBox)], - aliases=["complex", 'cfloat', 'cdouble'], - float_type=NPY.DOUBLELTR, ) self.w_complexlongdtype = W_Dtype( types.ComplexLong(), @@ -796,41 +752,30 @@ kind=NPY.COMPLEXLTR, char=NPY.CLONGDOUBLELTR, w_box_type=space.gettypefor(interp_boxes.W_ComplexLongBox), - aliases=["clongdouble", "clongfloat"], - float_type=NPY.LONGDOUBLELTR, ) self.w_stringdtype = W_Dtype( types.StringType(), - size=0, + elsize=0, num=NPY.STRING, kind=NPY.STRINGLTR, char=NPY.STRINGLTR, w_box_type=space.gettypefor(interp_boxes.W_StringBox), - alternate_constructors=[space.w_str, - space.gettypefor(interp_boxes.W_CharacterBox)], - aliases=['string', "str"], ) self.w_unicodedtype = W_Dtype( types.UnicodeType(), - size=0, + elsize=0, num=NPY.UNICODE, kind=NPY.UNICODELTR, char=NPY.UNICODELTR, w_box_type=space.gettypefor(interp_boxes.W_UnicodeBox), - alternate_constructors=[space.w_unicode], - aliases=['unicode'], ) self.w_voiddtype = W_Dtype( types.VoidType(), - size=0, + elsize=0, num=NPY.VOID, kind=NPY.VOIDLTR, char=NPY.VOIDLTR, w_box_type=space.gettypefor(interp_boxes.W_VoidBox), - #alternate_constructors=[space.w_buffer], - # XXX no buffer in space - #alternate_constructors=[space.gettypefor(interp_boxes.W_GenericBox)], - # XXX fix, leads to _coerce error ) self.w_float16dtype = W_Dtype( types.Float16(), @@ -853,10 +798,52 @@ char=NPY.UINTPLTR, w_box_type=space.gettypefor(interp_boxes.W_ULongBox), ) + aliases = { + NPY.BOOL: ['bool', 'bool8'], + NPY.BYTE: ['byte'], + NPY.UBYTE: ['ubyte'], + NPY.SHORT: ['short'], + NPY.USHORT: ['ushort'], + NPY.LONG: ['int', 'intp', 'p'], + NPY.ULONG: ['uint', 'uintp', 'P'], + NPY.LONGLONG: ['longlong'], + NPY.ULONGLONG: ['ulonglong'], + NPY.FLOAT: ['single'], + NPY.DOUBLE: ['float', 'double'], + NPY.LONGDOUBLE: ['longdouble', 'longfloat'], + NPY.CFLOAT: ['csingle'], + NPY.CDOUBLE: ['complex', 'cfloat', 'cdouble'], + NPY.CLONGDOUBLE: ['clongdouble', 'clongfloat'], + NPY.STRING: ['string', 'str'], + NPY.UNICODE: ['unicode'], + } + self.alternate_constructors = { + NPY.BOOL: [space.w_bool], + NPY.LONG: [space.w_int, + space.gettypefor(interp_boxes.W_IntegerBox), + space.gettypefor(interp_boxes.W_SignedIntegerBox)], + NPY.ULONG: [space.gettypefor(interp_boxes.W_UnsignedIntegerBox)], + NPY.LONGLONG: [space.w_long], + NPY.DOUBLE: [space.w_float, + space.gettypefor(interp_boxes.W_NumberBox), + space.gettypefor(interp_boxes.W_FloatingBox)], + NPY.CDOUBLE: [space.w_complex, + space.gettypefor(interp_boxes.W_ComplexFloatingBox)], + NPY.STRING: [space.w_str, + space.gettypefor(interp_boxes.W_CharacterBox)], + NPY.UNICODE: [space.w_unicode], + NPY.VOID: [space.gettypefor(interp_boxes.W_GenericBox)], + #space.w_buffer, # XXX no buffer in space + } float_dtypes = [self.w_float16dtype, self.w_float32dtype, self.w_float64dtype, self.w_floatlongdtype] complex_dtypes = [self.w_complex64dtype, self.w_complex128dtype, self.w_complexlongdtype] + self.component_dtypes = { + NPY.CFLOAT: self.w_float32dtype, + NPY.CDOUBLE: self.w_float64dtype, + NPY.CLONGDOUBLE: self.w_floatlongdtype, + } self.builtin_dtypes = [ self.w_booldtype, self.w_int8dtype, self.w_uint8dtype, @@ -869,7 +856,7 @@ self.w_intpdtype, self.w_uintpdtype, ] self.float_dtypes_by_num_bytes = sorted( - (dtype.get_size(), dtype) + (dtype.elsize, dtype) for dtype in float_dtypes ) self.dtypes_by_num = {} @@ -880,14 +867,15 @@ dtype.fields = None # mark these as builtin self.dtypes_by_num[dtype.num] = dtype self.dtypes_by_name[dtype.get_name()] = dtype - for can_name in [dtype.kind + str(dtype.get_size()), + for can_name in [dtype.kind + str(dtype.elsize), dtype.char]: self.dtypes_by_name[can_name] = dtype self.dtypes_by_name[NPY.NATBYTE + can_name] = dtype self.dtypes_by_name[NPY.NATIVE + can_name] = dtype self.dtypes_by_name[NPY.IGNORE + can_name] = dtype - for alias in dtype.aliases: - self.dtypes_by_name[alias] = dtype + if dtype.num in aliases: + for alias in aliases[dtype.num]: + self.dtypes_by_name[alias] = dtype typeinfo_full = { 'LONGLONG': self.w_int64dtype, @@ -935,13 +923,13 @@ for k, v in typeinfo_partial.iteritems(): space.setitem(w_typeinfo, space.wrap(k), space.gettypefor(v)) for k, dtype in typeinfo_full.iteritems(): - itembits = dtype.get_size() * 8 + itembits = dtype.elsize * 8 items_w = [space.wrap(dtype.char), space.wrap(dtype.num), space.wrap(itembits), space.wrap(dtype.itemtype.get_element_size())] - if dtype.is_int_type(): - if dtype.kind == NPY.GENBOOLLTR: + if dtype.is_int(): + if dtype.is_bool(): w_maxobj = space.wrap(1) w_minobj = space.wrap(0) elif dtype.is_signed(): diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -87,8 +87,8 @@ def descr_set_dtype(self, space, w_dtype): dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype)) - if (dtype.get_size() != self.get_dtype().get_size() or - dtype.is_flexible_type() or self.get_dtype().is_flexible_type()): + if (dtype.elsize != self.get_dtype().elsize or + dtype.is_flexible() or self.get_dtype().is_flexible()): raise OperationError(space.w_ValueError, space.wrap( "new type not compatible with array.")) self.implementation.set_dtype(space, dtype) @@ -101,10 +101,10 @@ return space.wrap(len(self.get_shape())) def descr_get_itemsize(self, space): - return space.wrap(self.get_dtype().get_size()) + return space.wrap(self.get_dtype().elsize) def descr_get_nbytes(self, space): - return space.wrap(self.get_size() * self.get_dtype().get_size()) + return space.wrap(self.get_size() * self.get_dtype().elsize) def descr_fill(self, space, w_value): self.fill(space, self.get_dtype().coerce(space, w_value)) @@ -220,7 +220,7 @@ def descr_getitem(self, space, w_idx): if space.is_w(w_idx, space.w_Ellipsis): return self - elif isinstance(w_idx, W_NDimArray) and w_idx.get_dtype().is_bool_type() \ + elif isinstance(w_idx, W_NDimArray) and w_idx.get_dtype().is_bool() \ and len(w_idx.get_shape()) > 0: return self.getitem_filter(space, w_idx) try: @@ -235,7 +235,7 @@ self.implementation.setitem_index(space, index_list, w_value) def descr_setitem(self, space, w_idx, w_value): - if isinstance(w_idx, W_NDimArray) and w_idx.get_dtype().is_bool_type() \ + if isinstance(w_idx, W_NDimArray) and w_idx.get_dtype().is_bool() \ and len(w_idx.get_shape()) > 0: self.setitem_filter(space, w_idx, convert_to_array(space, w_value)) return @@ -281,7 +281,7 @@ else: s.append(separator) s.append(' ') - if self.is_scalar() and dtype.is_str_type(): + if self.is_scalar() and dtype.is_str(): s.append(dtype.itemtype.to_str(i.getitem())) else: s.append(dtype.itemtype.str_format(i.getitem())) @@ -344,7 +344,7 @@ def descr_set_imag(self, space, w_value): # if possible, copy (broadcast) values into self - if not self.get_dtype().is_complex_type(): + if not self.get_dtype().is_complex(): raise OperationError(space.w_TypeError, space.wrap('array does not have imaginary part to set')) self.implementation.set_imag(space, self, w_value) @@ -575,10 +575,10 @@ raise oefmt(space.w_NotImplementedError, "astype(%s) not implemented yet", new_dtype.get_name()) - if new_dtype.num == NPY.STRING and new_dtype.size == 0: + if new_dtype.num == NPY.STRING and new_dtype.elsize == 0: if cur_dtype.num == NPY.STRING: new_dtype = interp_dtype.variable_dtype(space, - 'S' + str(cur_dtype.size)) + 'S' + str(cur_dtype.elsize)) impl = self.implementation if isinstance(impl, scalar.Scalar): return W_NDimArray.new_scalar(space, new_dtype, impl.value) @@ -689,7 +689,7 @@ @unwrap_spec(decimals=int) def descr_round(self, space, decimals=0, w_out=None): if space.is_none(w_out): - if self.get_dtype().is_bool_type(): + if self.get_dtype().is_bool(): #numpy promotes bool.round() to float16. Go figure. w_out = W_NDimArray.from_shape(space, self.get_shape(), interp_dtype.get_dtype_cache(space).w_float16dtype) @@ -700,7 +700,7 @@ "return arrays must be of ArrayType")) out = interp_dtype.dtype_agreement(space, [self], self.get_shape(), w_out) - if out.get_dtype().is_bool_type() and self.get_dtype().is_bool_type(): + if out.get_dtype().is_bool() and self.get_dtype().is_bool(): calc_dtype = interp_dtype.get_dtype_cache(space).w_longdtype else: calc_dtype = out.get_dtype() @@ -781,8 +781,8 @@ w_dtype)) else: dtype = self.get_dtype() - old_itemsize = self.get_dtype().get_size() - new_itemsize = dtype.get_size() + old_itemsize = self.get_dtype().elsize + new_itemsize = dtype.elsize impl = self.implementation if new_itemsize == 0: raise OperationError(space.w_TypeError, space.wrap( @@ -1093,7 +1093,7 @@ raise OperationError(space.w_TypeError, space.wrap( "only integer arrays with one element " "can be converted to an index")) - if not self.get_dtype().is_int_type() or self.get_dtype().is_bool_type(): + if not self.get_dtype().is_int() or self.get_dtype().is_bool(): raise OperationError(space.w_TypeError, space.wrap( "only integer arrays with one element " "can be converted to an index")) @@ -1188,7 +1188,7 @@ if not shape: raise OperationError(space.w_TypeError, space.wrap( "numpy scalars from buffers not supported yet")) - totalsize = support.product(shape) * dtype.get_size() + totalsize = support.product(shape) * dtype.elsize if totalsize+offset > buf.getlength(): raise OperationError(space.w_TypeError, space.wrap( "buffer is too small for requested array")) @@ -1448,9 +1448,10 @@ # scalars and strings w/o __array__ method isstr = space.isinstance_w(w_object, space.w_str) if not issequence_w(space, w_object) or isstr: - if dtype is None or (dtype.is_str_or_unicode() and dtype.get_size() < 1): - dtype = interp_ufuncs.find_dtype_for_scalar(space, w_object) - return W_NDimArray.new_scalar(space, dtype, w_object) + if dtype is None or dtype.char != NPY.CHARLTR: + if dtype is None or (dtype.is_str_or_unicode() and dtype.elsize < 1): + dtype = interp_ufuncs.find_dtype_for_scalar(space, w_object) + return W_NDimArray.new_scalar(space, dtype, w_object) if space.is_none(w_order): order = 'C' @@ -1478,14 +1479,14 @@ # not an array or incorrect dtype shape, elems_w = find_shape_and_elems(space, w_object, dtype) - if dtype is None or (dtype.is_str_or_unicode() and dtype.get_size() < 1): + if dtype is None or (dtype.is_str_or_unicode() and dtype.elsize < 1): for w_elem in elems_w: if isinstance(w_elem, W_NDimArray) and w_elem.is_scalar(): w_elem = w_elem.get_scalar_value() dtype = interp_ufuncs.find_dtype_for_scalar(space, w_elem, dtype) if dtype is None: dtype = interp_dtype.get_dtype_cache(space).w_float64dtype - elif dtype.is_str_or_unicode() and dtype.get_size() < 1: + elif dtype.is_str_or_unicode() and dtype.elsize < 1: # promote S0 -> S1, U0 -> U1 dtype = interp_dtype.variable_dtype(space, dtype.char + '1') @@ -1501,7 +1502,7 @@ def zeros(space, w_shape, w_dtype=None, w_order=None): dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype)) - if dtype.is_str_or_unicode() and dtype.get_size() < 1: + if dtype.is_str_or_unicode() and dtype.elsize < 1: dtype = interp_dtype.variable_dtype(space, dtype.char + '1') shape = _find_shape(space, w_shape, dtype) return W_NDimArray.from_shape(space, shape, dtype=dtype) @@ -1514,24 +1515,30 @@ else: dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype)) - if dtype.is_str_or_unicode() and dtype.get_size() < 1: + if dtype.is_str_or_unicode() and dtype.elsize < 1: dtype = interp_dtype.variable_dtype(space, dtype.char + '1') return W_NDimArray.from_shape(space, w_a.get_shape(), dtype=dtype, w_instance=w_a if subok else None) -def _reconstruct(space, w_subtype, w_shape, w_dtype): - return descr_new_array(space, w_subtype, w_shape, w_dtype) - def build_scalar(space, w_dtype, w_state): from rpython.rtyper.lltypesystem import rffi, lltype - - assert isinstance(w_dtype, interp_dtype.W_Dtype) - + if not isinstance(w_dtype, interp_dtype.W_Dtype): + raise oefmt(space.w_TypeError, + "argument 1 must be numpy.dtype, not %T", w_dtype) + if w_dtype.elsize == 0: + raise oefmt(space.w_ValueError, "itemsize cannot be zero") + if not space.isinstance_w(w_state, space.w_str): + raise oefmt(space.w_TypeError, "initializing object must be a string") + if space.len_w(w_state) != w_dtype.elsize: + raise oefmt(space.w_ValueError, "initialization string is too small") state = rffi.str2charp(space.str_w(w_state)) box = w_dtype.itemtype.box_raw_data(state) lltype.free(state, flavor="raw") return box +def _reconstruct(space, w_subtype, w_shape, w_dtype): + return descr_new_array(space, w_subtype, w_shape, w_dtype) + W_FlatIterator.typedef = TypeDef("flatiter", __module__ = "numpy", diff --git a/pypy/module/micronumpy/interp_support.py b/pypy/module/micronumpy/interp_support.py --- a/pypy/module/micronumpy/interp_support.py +++ b/pypy/module/micronumpy/interp_support.py @@ -59,7 +59,7 @@ return space.wrap(a) def _fromstring_bin(space, s, count, length, dtype): - itemsize = dtype.get_size() + itemsize = dtype.elsize assert itemsize >= 0 if count == -1: count = length / itemsize diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -168,7 +168,7 @@ "supported for binary functions")) assert isinstance(self, W_Ufunc2) obj = convert_to_array(space, w_obj) - if obj.get_dtype().is_flexible_type(): + if obj.get_dtype().is_flexible(): raise OperationError(space.w_TypeError, space.wrap('cannot perform reduce with flexible type')) obj_shape = obj.get_shape() @@ -287,12 +287,12 @@ out = None w_obj = convert_to_array(space, w_obj) dtype = w_obj.get_dtype() - if dtype.is_flexible_type(): + if dtype.is_flexible(): raise OperationError(space.w_TypeError, space.wrap('Not implemented for this type')) - if (self.int_only and not dtype.is_int_type() or - not self.allow_bool and dtype.is_bool_type() or - not self.allow_complex and dtype.is_complex_type()): + if (self.int_only and not dtype.is_int() or + not self.allow_bool and dtype.is_bool() or + not self.allow_complex and dtype.is_complex()): raise OperationError(space.w_TypeError, space.wrap( "ufunc %s not supported for the input type" % self.name)) calc_dtype = find_unaryop_result_dtype(space, @@ -311,7 +311,7 @@ res_dtype = interp_dtype.get_dtype_cache(space).w_booldtype else: res_dtype = calc_dtype - if self.complex_to_float and calc_dtype.is_complex_type(): + if self.complex_to_float and calc_dtype.is_complex(): if calc_dtype.num == NPY.CFLOAT: res_dtype = interp_dtype.get_dtype_cache(space).w_float32dtype else: @@ -351,11 +351,11 @@ self.done_func = None def are_common_types(self, dtype1, dtype2): - if dtype1.is_complex_type() and dtype2.is_complex_type(): - return True - elif not (dtype1.is_complex_type() or dtype2.is_complex_type()) and \ - (dtype1.is_int_type() and dtype2.is_int_type() or dtype1.is_float_type() and dtype2.is_float_type()) and \ - not (dtype1.is_bool_type() or dtype2.is_bool_type()): + if dtype1.is_bool() or dtype2.is_bool(): + return False + if (dtype1.is_int() and dtype2.is_int() or + dtype1.is_float() and dtype2.is_float() or + dtype1.is_complex() and dtype2.is_complex()): return True return False @@ -370,13 +370,13 @@ w_rhs = convert_to_array(space, w_rhs) w_ldtype = w_lhs.get_dtype() w_rdtype = w_rhs.get_dtype() - if w_ldtype.is_str_type() and w_rdtype.is_str_type() and \ + if w_ldtype.is_str() and w_rdtype.is_str() and \ self.comparison_func: pass - elif (w_ldtype.is_str_type() or w_rdtype.is_str_type()) and \ + elif (w_ldtype.is_str() or w_rdtype.is_str()) and \ self.comparison_func and w_out is None: return space.wrap(False) - elif w_ldtype.is_flexible_type() or w_rdtype.is_flexible_type(): + elif w_ldtype.is_flexible() or w_rdtype.is_flexible(): if self.comparison_func: if self.name == 'equal' or self.name == 'not_equal': res = w_ldtype.eq(space, w_rdtype) @@ -399,13 +399,13 @@ w_ldtype, w_rdtype, promote_to_float=self.promote_to_float, promote_bools=self.promote_bools) - if (self.int_only and (not w_ldtype.is_int_type() or - not w_rdtype.is_int_type() or - not calc_dtype.is_int_type()) or - not self.allow_bool and (w_ldtype.is_bool_type() or - w_rdtype.is_bool_type()) or - not self.allow_complex and (w_ldtype.is_complex_type() or - w_rdtype.is_complex_type())): + if (self.int_only and (not w_ldtype.is_int() or + not w_rdtype.is_int() or + not calc_dtype.is_int()) or + not self.allow_bool and (w_ldtype.is_bool() or + w_rdtype.is_bool()) or + not self.allow_complex and (w_ldtype.is_complex() or + w_rdtype.is_complex())): raise OperationError(space.w_TypeError, space.wrap( "ufunc '%s' not supported for the input types" % self.name)) if space.is_none(w_out): @@ -467,7 +467,7 @@ return interp_dtype.get_dtype_cache(space).w_int8dtype # Everything numeric promotes to complex - if dt2.is_complex_type() or dt1.is_complex_type(): + if dt2.is_complex() or dt1.is_complex(): if dt2.num == NPY.HALF: dt1, dt2 = dt2, dt1 if dt2.num == NPY.CFLOAT: @@ -488,7 +488,7 @@ if promote_to_float: return find_unaryop_result_dtype(space, dt2, promote_to_float=True) # If they're the same kind, choose the greater one. - if dt1.kind == dt2.kind and not dt2.is_flexible_type(): + if dt1.kind == dt2.kind and not dt2.is_flexible(): if dt2.num == NPY.HALF: return dt1 return dt2 @@ -513,13 +513,13 @@ elif dt2.num == NPY.ULONGLONG or (LONG_BIT == 64 and dt2.num == NPY.ULONG): # UInt64 + signed = Float64 dtypenum = NPY.DOUBLE - elif dt2.is_flexible_type(): + elif dt2.is_flexible(): # For those operations that get here (concatenate, stack), # flexible types take precedence over numeric type - if dt2.is_record_type(): + if dt2.is_record(): return dt2 if dt1.is_str_or_unicode(): - if dt2.get_size() >= dt1.get_size(): + if dt2.elsize >= dt1.elsize: return dt2 return dt1 return dt2 @@ -542,10 +542,10 @@ promote_bools=False, promote_to_largest=False): if promote_to_largest: if dt.kind == NPY.GENBOOLLTR or dt.kind == NPY.SIGNEDLTR: - if dt.get_size() * 8 < LONG_BIT: + if dt.elsize * 8 < LONG_BIT: return interp_dtype.get_dtype_cache(space).w_longdtype elif dt.kind == NPY.UNSIGNEDLTR: - if dt.get_size() * 8 < LONG_BIT: + if dt.elsize * 8 < LONG_BIT: return interp_dtype.get_dtype_cache(space).w_ulongdtype else: assert dt.kind == NPY.FLOATINGLTR or dt.kind == NPY.COMPLEXLTR @@ -596,7 +596,7 @@ return interp_dtype.variable_dtype(space, 'S%d' % space.len_w(w_obj)) elif current_guess.num == NPY.STRING: - if current_guess.get_size() < space.len_w(w_obj): + if current_guess.elsize < space.len_w(w_obj): return interp_dtype.variable_dtype(space, 'S%d' % space.len_w(w_obj)) return current_guess diff --git a/pypy/module/micronumpy/iter.py b/pypy/module/micronumpy/iter.py --- a/pypy/module/micronumpy/iter.py +++ b/pypy/module/micronumpy/iter.py @@ -174,7 +174,7 @@ def __init__(self, array): self.array = array self.offset = 0 - self.skip = array.dtype.get_size() + self.skip = array.dtype.elsize self.size = array.size def setitem(self, elem): diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -459,7 +459,7 @@ builder = StringBuilder() iter = arr.create_iter() w_res_str = W_NDimArray.from_shape(space, [1], arr.get_dtype(), order='C') - itemsize = arr.get_dtype().get_size() + itemsize = arr.get_dtype().elsize res_str_casted = rffi.cast(rffi.CArrayPtr(lltype.Char), w_res_str.implementation.get_storage_as_int(space)) while not iter.done(): diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py --- a/pypy/module/micronumpy/strides.py +++ b/pypy/module/micronumpy/strides.py @@ -69,7 +69,7 @@ return True def find_shape_and_elems(space, w_iterable, dtype): - is_rec_type = dtype is not None and dtype.is_record_type() + is_rec_type = dtype is not None and dtype.is_record() if is_rec_type and is_single_elem(space, w_iterable, is_rec_type): return [], [w_iterable] if isinstance(w_iterable, W_NDimArray) and w_iterable.is_scalar(): diff --git a/pypy/module/micronumpy/support.py b/pypy/module/micronumpy/support.py --- a/pypy/module/micronumpy/support.py +++ b/pypy/module/micronumpy/support.py @@ -28,8 +28,8 @@ shape_rev.reverse() for sh in shape_rev: slimit = max(sh, 1) - strides.append(s * dtype.get_size()) - backstrides.append(s * (slimit - 1) * dtype.get_size()) + strides.append(s * dtype.elsize) + backstrides.append(s * (slimit - 1) * dtype.elsize) s *= slimit if order == 'C': strides.reverse() diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -58,6 +58,7 @@ assert dtype('int8').num == 1 assert dtype('int8').name == 'int8' + assert dtype('void').name == 'void' assert dtype(int).fields is None assert dtype(int).names is None assert dtype(int).hasobject is False @@ -371,6 +372,7 @@ raises(TypeError, hash, d) def test_pickle(self): + import numpy as np from numpypy import array, dtype from cPickle import loads, dumps a = array([1,2,3]) @@ -379,6 +381,9 @@ else: assert a.dtype.__reduce__() == (dtype, ('i4', 0, 1), (3, '<', None, None, None, -1, -1, 0)) assert loads(dumps(a.dtype)) == a.dtype + assert np.dtype('bool').__reduce__() == (dtype, ('b1', 0, 1), (3, '|', None, None, None, -1, -1, 0)) + assert np.dtype('|V16').__reduce__() == (dtype, ('V16', 0, 1), (3, '|', None, None, None, 16, 1, 0)) + assert np.dtype(('i8', 0) + assert d.subdtype is None + #assert d.descr == [('f0', '>i8')] + #assert str(d) == "[('f0', '>i8')]" + d = np.dtype(('i8', (2,))" + def test_object(self): import numpy as np import sys @@ -433,7 +452,7 @@ assert np.dtype(o).str == '|O8' else: exc = raises(NotImplementedError, "np.dtype(o)") - assert exc.value[0] == 'object dtype not implemented' + assert exc.value[0] == "cannot create dtype with type '%s'" % o.__name__ class AppTestTypes(BaseAppTestDtypes): def test_abstract_types(self): @@ -855,6 +874,7 @@ raises(TypeError, lambda: float64(3) & 1) def test_alternate_constructs(self): + import numpy as np from numpypy import dtype nnp = self.non_native_prefix byteorder = self.native_prefix @@ -870,6 +890,12 @@ assert dtype(' Author: Manuel Jacob Branch: kill-multimethod Changeset: r69447:044f6359573b Date: 2014-02-25 19:55 +0100 http://bitbucket.org/pypy/pypy/changeset/044f6359573b/ Log: Remove multimethod option and references to pypy.objspace.std.multimethod. diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -272,9 +272,6 @@ # weakrefs needed, because of get_subclasses() requires=[("translation.rweakref", True)]), - ChoiceOption("multimethods", "the multimethod implementation to use", - ["doubledispatch", "mrd"], - default="mrd"), BoolOption("withidentitydict", "track types that override __hash__, __eq__ or __cmp__ and use a special dict strategy for those which do not", default=False, diff --git a/pypy/doc/_ref.txt b/pypy/doc/_ref.txt --- a/pypy/doc/_ref.txt +++ b/pypy/doc/_ref.txt @@ -54,7 +54,6 @@ .. _`pypy/objspace/std`: .. _`pypy/objspace/std/`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/ .. _`pypy/objspace/std/listtype.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/listtype.py -.. _`pypy/objspace/std/multimethod.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/multimethod.py .. _`pypy/objspace/std/objspace.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/objspace.py .. _`pypy/objspace/std/proxy_helpers.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/proxy_helpers.py .. _`pypy/objspace/std/proxyobject.py`: https://bitbucket.org/pypy/pypy/src/default/pypy/objspace/std/proxyobject.py diff --git a/pypy/doc/config/objspace.std.multimethods.txt b/pypy/doc/config/objspace.std.multimethods.txt deleted file mode 100644 --- a/pypy/doc/config/objspace.std.multimethods.txt +++ /dev/null @@ -1,8 +0,0 @@ -Choose the multimethod implementation. - -* ``doubledispatch`` turns - a multimethod call into a sequence of normal method calls. - -* ``mrd`` uses a technique known as Multiple Row Displacement - which precomputes a few compact tables of numbers and - function pointers. diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -194,23 +194,6 @@ from pypy.config.pypyoption import set_pypy_opt_level set_pypy_opt_level(config, translateconfig.opt) - # as of revision 27081, multimethod.py uses the InstallerVersion1 by default - # because it is much faster both to initialize and run on top of CPython. - # The InstallerVersion2 is optimized for making a translator-friendly - # structure for low level backends. However, InstallerVersion1 is still - # preferable for high level backends, so we patch here. - - from pypy.objspace.std import multimethod - if config.objspace.std.multimethods == 'mrd': - assert multimethod.InstallerVersion1.instance_counter == 0,\ - 'The wrong Installer version has already been instatiated' - multimethod.Installer = multimethod.InstallerVersion2 - elif config.objspace.std.multimethods == 'doubledispatch': - # don't rely on the default, set again here - assert multimethod.InstallerVersion2.instance_counter == 0,\ - 'The wrong Installer version has already been instatiated' - multimethod.Installer = multimethod.InstallerVersion1 - def print_help(self, config): self.opt_parser(config).print_help() diff --git a/pypy/tool/pypyjit.py b/pypy/tool/pypyjit.py --- a/pypy/tool/pypyjit.py +++ b/pypy/tool/pypyjit.py @@ -17,7 +17,6 @@ from pypy.objspace.std import Space from rpython.config.translationoption import set_opt_level from pypy.config.pypyoption import get_pypy_config, set_pypy_opt_level -from pypy.objspace.std import multimethod from rpython.rtyper.annlowlevel import llhelper, llstr, hlstr from rpython.rtyper.lltypesystem.rstr import STR from rpython.rtyper.lltypesystem import lltype @@ -42,8 +41,6 @@ # set_pypy_opt_level(config, level='jit') -config.objspace.std.multimethods = 'mrd' -multimethod.Installer = multimethod.InstallerVersion2 print config import sys, pdb From noreply at buildbot.pypy.org Tue Feb 25 20:36:18 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Tue, 25 Feb 2014 20:36:18 +0100 (CET) Subject: [pypy-commit] pypy kill-multimethod: Remove ignore_for_isinstance_cache attributes. The isinstance cache is built explicitly now. Message-ID: <20140225193618.402241C02EA@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: kill-multimethod Changeset: r69448:6c758cd93812 Date: 2014-02-25 20:19 +0100 http://bitbucket.org/pypy/pypy/changeset/6c758cd93812/ Log: Remove ignore_for_isinstance_cache attributes. The isinstance cache is built explicitly now. diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py --- a/pypy/objspace/std/dictmultiobject.py +++ b/pypy/objspace/std/dictmultiobject.py @@ -1128,8 +1128,6 @@ class W_BaseDictMultiIterObject(W_Root): _immutable_fields_ = ["iteratorimplementation"] - ignore_for_isinstance_cache = True - def __init__(self, space, iteratorimplementation): self.space = space self.iteratorimplementation = iteratorimplementation diff --git a/pypy/objspace/std/proxyobject.py b/pypy/objspace/std/proxyobject.py --- a/pypy/objspace/std/proxyobject.py +++ b/pypy/objspace/std/proxyobject.py @@ -8,8 +8,6 @@ def transparent_class(name, BaseCls): class W_Transparent(BaseCls): - ignore_for_isinstance_cache = True - def __init__(self, space, w_type, w_controller): self.w_type = w_type self.w_controller = w_controller From noreply at buildbot.pypy.org Tue Feb 25 20:36:19 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Tue, 25 Feb 2014 20:36:19 +0100 (CET) Subject: [pypy-commit] pypy kill-multimethod: Remove unnecessary delegation methods. Message-ID: <20140225193619.805661C02EA@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: kill-multimethod Changeset: r69449:aa67858b4e18 Date: 2014-02-25 20:26 +0100 http://bitbucket.org/pypy/pypy/changeset/aa67858b4e18/ Log: Remove unnecessary delegation methods. diff --git a/pypy/objspace/std/smalllongobject.py b/pypy/objspace/std/smalllongobject.py --- a/pypy/objspace/std/smalllongobject.py +++ b/pypy/objspace/std/smalllongobject.py @@ -376,14 +376,6 @@ raise OverflowError("integer multiplication") -def delegate_SmallLong2Float(space, w_small): - return space.newfloat(float(w_small.longlong)) - - -def delegate_SmallLong2Complex(space, w_small): - return space.newcomplex(float(w_small.longlong), 0.0) - - def _int2small(space, w_int): # XXX: W_IntObject.descr_long should probably return W_SmallLongs return W_SmallLongObject.fromint(w_int.int_w(space)) From noreply at buildbot.pypy.org Tue Feb 25 20:43:47 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Tue, 25 Feb 2014 20:43:47 +0100 (CET) Subject: [pypy-commit] pypy kill-multimethod: Fix cpyext. Message-ID: <20140225194347.20D291C0150@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: kill-multimethod Changeset: r69450:fd070a9318e5 Date: 2014-02-25 20:42 +0100 http://bitbucket.org/pypy/pypy/changeset/fd070a9318e5/ Log: Fix cpyext. diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -312,11 +312,6 @@ @bootstrap_function def init_typeobject(space): - # Probably a hack - space.model.typeorder[W_PyCTypeObject] = [(W_PyCTypeObject, None), - (W_TypeObject, None), - (W_Root, None)] - make_typedescr(space.w_type.instancetypedef, basestruct=PyTypeObject, alloc=type_alloc, From noreply at buildbot.pypy.org Wed Feb 26 01:52:14 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Wed, 26 Feb 2014 01:52:14 +0100 (CET) Subject: [pypy-commit] buildbot default: link the osx64 test results in listing Message-ID: <20140226005214.3FDF71C02EA@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r907:8550f84840ee Date: 2014-02-25 19:51 -0500 http://bitbucket.org/pypy/buildbot/changeset/8550f84840ee/ Log: link the osx64 test results in listing diff --git a/bot2/pypybuildbot/pypylist.py b/bot2/pypybuildbot/pypylist.py --- a/bot2/pypybuildbot/pypylist.py +++ b/bot2/pypybuildbot/pypylist.py @@ -38,6 +38,7 @@ 'linux': 'linux-x86-32', 'linux64': 'linux-x86-64', 'osx': 'macosx-x86-32', + 'osx64': 'macosx-x86-64', 'win32': 'win-x86-32', } From noreply at buildbot.pypy.org Wed Feb 26 05:21:17 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Wed, 26 Feb 2014 05:21:17 +0100 (CET) Subject: [pypy-commit] pypy kill-multimethod: Kill pypy.objspace.std.stdtypedef and move TypeCache to pypy.objspace.std.typeobject. Message-ID: <20140226042117.C83B91C0150@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: kill-multimethod Changeset: r69451:750998b0ec09 Date: 2014-02-26 04:52 +0100 http://bitbucket.org/pypy/pypy/changeset/750998b0ec09/ Log: Kill pypy.objspace.std.stdtypedef and move TypeCache to pypy.objspace.std.typeobject. diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -3,7 +3,7 @@ from pypy.interpreter.baseobjspace import ObjSpace, W_Root from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.typedef import get_unique_interplevel_subclass -from pypy.objspace.std import stdtypedef, frame, transparent, callmethod +from pypy.objspace.std import frame, transparent, callmethod from pypy.objspace.descroperation import DescrOperation, raiseattrerror from rpython.rlib.objectmodel import instantiate, specialize, is_annotation_constant from rpython.rlib.debug import make_sure_not_resized @@ -28,7 +28,7 @@ from pypy.objspace.std.setobject import W_SetObject, W_FrozensetObject from pypy.objspace.std.sliceobject import W_SliceObject from pypy.objspace.std.tupleobject import W_AbstractTupleObject, W_TupleObject -from pypy.objspace.std.typeobject import W_TypeObject +from pypy.objspace.std.typeobject import W_TypeObject, TypeCache from pypy.objspace.std.unicodeobject import W_UnicodeObject, wrapunicode @@ -118,10 +118,10 @@ return self.gettypeobject(cls.typedef) def gettypeobject(self, typedef): - # stdtypedef.TypeCache maps each StdTypeDef instance to its + # typeobject.TypeCache maps a TypeDef instance to its # unique-for-this-space W_TypeObject instance assert typedef is not None - return self.fromcache(stdtypedef.TypeCache).getorbuild(typedef) + return self.fromcache(TypeCache).getorbuild(typedef) def wrap(self, x): "Wraps the Python value 'x' into one of the wrapper classes." diff --git a/pypy/objspace/std/stdtypedef.py b/pypy/objspace/std/stdtypedef.py deleted file mode 100644 --- a/pypy/objspace/std/stdtypedef.py +++ /dev/null @@ -1,40 +0,0 @@ -from pypy.interpreter.baseobjspace import SpaceCache - - -class TypeCache(SpaceCache): - def build(cache, typedef): - "NOT_RPYTHON: initialization-time only." - # build a W_TypeObject from this StdTypeDef - from pypy.objspace.std.typeobject import W_TypeObject - from pypy.objspace.std.objectobject import W_ObjectObject - - space = cache.space - w = space.wrap - rawdict = typedef.rawdict - lazyloaders = {} - - # compute the bases - if typedef is W_ObjectObject.typedef: - bases_w = [] - else: - bases = typedef.bases or [W_ObjectObject.typedef] - bases_w = [space.gettypeobject(base) for base in bases] - - # wrap everything - dict_w = {} - for descrname, descrvalue in rawdict.items(): - dict_w[descrname] = w(descrvalue) - - if typedef.applevel_subclasses_base is not None: - overridetypedef = typedef.applevel_subclasses_base.typedef - else: - overridetypedef = typedef - w_type = W_TypeObject(space, typedef.name, bases_w, dict_w, - overridetypedef=overridetypedef) - if typedef is not overridetypedef: - w_type.w_doc = space.wrap(typedef.doc) - w_type.lazyloaders = lazyloaders - return w_type - - def ready(self, w_type): - w_type.ready() diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -1,5 +1,5 @@ from pypy.interpreter import gateway -from pypy.interpreter.baseobjspace import W_Root +from pypy.interpreter.baseobjspace import W_Root, SpaceCache from pypy.interpreter.error import oefmt, OperationError from pypy.interpreter.function import Function, StaticMethod from pypy.interpreter.typedef import weakref_descr, GetSetProperty,\ @@ -1203,3 +1203,39 @@ names = [cls.getname(space) for cls in cycle] raise OperationError(space.w_TypeError, space.wrap( "cycle among base classes: " + ' < '.join(names))) + + +class TypeCache(SpaceCache): + def build(self, typedef): + "NOT_RPYTHON: initialization-time only." + from pypy.objspace.std.objectobject import W_ObjectObject + + space = self.space + rawdict = typedef.rawdict + lazyloaders = {} + + # compute the bases + if typedef is W_ObjectObject.typedef: + bases_w = [] + else: + bases = typedef.bases or [W_ObjectObject.typedef] + bases_w = [space.gettypeobject(base) for base in bases] + + # wrap everything + dict_w = {} + for descrname, descrvalue in rawdict.items(): + dict_w[descrname] = space.wrap(descrvalue) + + if typedef.applevel_subclasses_base is not None: + overridetypedef = typedef.applevel_subclasses_base.typedef + else: + overridetypedef = typedef + w_type = W_TypeObject(space, typedef.name, bases_w, dict_w, + overridetypedef=overridetypedef) + if typedef is not overridetypedef: + w_type.w_doc = space.wrap(typedef.doc) + w_type.lazyloaders = lazyloaders + return w_type + + def ready(self, w_type): + w_type.ready() From noreply at buildbot.pypy.org Wed Feb 26 09:17:35 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 26 Feb 2014 09:17:35 +0100 (CET) Subject: [pypy-commit] pypy default: Test and fix for "[None] * some_r_uint_number". Message-ID: <20140226081735.4FA541C3299@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69452:1e265793f593 Date: 2014-02-26 09:16 +0100 http://bitbucket.org/pypy/pypy/changeset/1e265793f593/ Log: Test and fix for "[None] * some_r_uint_number". diff --git a/rpython/rtyper/test/test_rlist.py b/rpython/rtyper/test/test_rlist.py --- a/rpython/rtyper/test/test_rlist.py +++ b/rpython/rtyper/test/test_rlist.py @@ -1619,3 +1619,17 @@ rgc.ll_arraycopy = old_arraycopy # assert 2 <= res <= 10 + + def test_alloc_and_set(self): + def fn(i): + lst = [0] * r_uint(i) + return lst + t, rtyper, graph = self.gengraph(fn, [int]) + block = graph.startblock + seen = 0 + for op in block.operations: + if op.opname in ['cast_int_to_uint', 'cast_uint_to_int']: + continue + assert op.opname == 'direct_call' + seen += 1 + assert seen == 1 diff --git a/rpython/translator/transform.py b/rpython/translator/transform.py --- a/rpython/translator/transform.py +++ b/rpython/translator/transform.py @@ -30,7 +30,7 @@ # [a] * b # --> # c = newlist(a) -# d = mul(c, int b) +# d = mul(c, b) # --> # d = alloc_and_set(b, a) @@ -44,8 +44,7 @@ len(op.args) == 1): length1_lists[op.result] = op.args[0] elif (op.opname == 'mul' and - op.args[0] in length1_lists and - self.gettype(op.args[1]) is int): + op.args[0] in length1_lists): new_op = SpaceOperation('alloc_and_set', (op.args[1], length1_lists[op.args[0]]), op.result) From noreply at buildbot.pypy.org Wed Feb 26 09:25:07 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 26 Feb 2014 09:25:07 +0100 (CET) Subject: [pypy-commit] pypy default: Two new tests that pass after 1e265793f593. Message-ID: <20140226082507.36D8A1C3299@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69453:0e9883e7f2f9 Date: 2014-02-26 09:24 +0100 http://bitbucket.org/pypy/pypy/changeset/0e9883e7f2f9/ Log: Two new tests that pass after 1e265793f593. diff --git a/rpython/jit/metainterp/test/test_list.py b/rpython/jit/metainterp/test/test_list.py --- a/rpython/jit/metainterp/test/test_list.py +++ b/rpython/jit/metainterp/test/test_list.py @@ -287,6 +287,48 @@ assert res == 5 self.check_resops(call=0) + def test_list_mul_virtual(self): + class Foo: + def __init__(self, l): + self.l = l + l[0] = self + + myjitdriver = JitDriver(greens = [], reds = ['y']) + def f(y): + while y > 0: + myjitdriver.jit_merge_point(y=y) + Foo([None] * 5) + y -= 1 + return 42 + + self.meta_interp(f, [5]) + self.check_resops({'int_sub': 2, + 'int_gt': 2, + 'guard_true': 2, + 'jump': 1}) + + def test_list_mul_unsigned_virtual(self): + from rpython.rlib.rarithmetic import r_uint + + class Foo: + def __init__(self, l): + self.l = l + l[0] = self + + myjitdriver = JitDriver(greens = [], reds = ['y']) + def f(y): + while y > 0: + myjitdriver.jit_merge_point(y=y) + Foo([None] * r_uint(5)) + y -= 1 + return 42 + + self.meta_interp(f, [5]) + self.check_resops({'int_sub': 2, + 'int_gt': 2, + 'guard_true': 2, + 'jump': 1}) + class TestLLtype(ListTests, LLJitMixin): def test_listops_dont_invalidate_caches(self): From noreply at buildbot.pypy.org Wed Feb 26 09:34:59 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Wed, 26 Feb 2014 09:34:59 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: extend demo_random with some objects of different sizes Message-ID: <20140226083459.1A6C91C3299@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7-refactor Changeset: r865:ca0644f22aa0 Date: 2014-02-26 09:35 +0100 http://bitbucket.org/pypy/stmgc/changeset/ca0644f22aa0/ Log: extend demo_random with some objects of different sizes diff --git a/c7/demo/Makefile b/c7/demo/Makefile --- a/c7/demo/Makefile +++ b/c7/demo/Makefile @@ -20,7 +20,7 @@ # note that 'build' is optimized but still contains all asserts debug-%: %.c ${H_FILES} ${C_FILES} - clang -I.. -pthread -DSTM_DEBUGPRINT -g $< -o debug-$* \ + clang -I.. -pthread -DSTM_DEBUGPRINT -g -O0 $< -o debug-$* \ -Wall -Werror ../stmgc.c build-%: %.c ${H_FILES} ${C_FILES} diff --git a/c7/demo/demo_random.c b/c7/demo/demo_random.c --- a/c7/demo/demo_random.c +++ b/c7/demo/demo_random.c @@ -22,7 +22,7 @@ struct node_s { struct object_s hdr; - long value; + long my_size; nodeptr_t next; }; @@ -65,14 +65,24 @@ ssize_t stmcb_size_rounded_up(struct object_s *ob) { - return sizeof(struct node_s); + return ((struct node_s*)ob)->my_size; } void stmcb_trace(struct object_s *obj, void visit(object_t **)) { struct node_s *n; n = (struct node_s*)obj; + + /* and the same value at the end: */ + /* note, ->next may be the same as last_next */ + nodeptr_t *last_next = (nodeptr_t*)((char*)n + n->my_size - sizeof(void*)); + + assert(n->next == *last_next); + visit((object_t **)&n->next); + visit((object_t **)last_next); + + assert(n->next == *last_next); } void _push_shared_roots() @@ -176,11 +186,33 @@ } } +void set_next(objptr_t p, objptr_t v) +{ + if (p != NULL) { + nodeptr_t n = (nodeptr_t)p; + + /* and the same value at the end: */ + nodeptr_t TLPREFIX *last_next = (nodeptr_t TLPREFIX *)((stm_char*)n + n->my_size - sizeof(void*)); + assert(n->next == *last_next); + n->next = (nodeptr_t)v; + *last_next = (nodeptr_t)v; + } +} + +nodeptr_t get_next(objptr_t p) +{ + nodeptr_t n = (nodeptr_t)p; + + /* and the same value at the end: */ + nodeptr_t TLPREFIX *last_next = (nodeptr_t TLPREFIX *)((stm_char*)n + n->my_size - sizeof(void*)); + assert(n->next == *last_next); + + return n->next; +} objptr_t simple_events(objptr_t p, objptr_t _r) { - nodeptr_t w_r; int k = get_rand(8); int num; @@ -201,7 +233,13 @@ break; case 3: // allocate fresh 'p' push_roots(); - p = stm_allocate(sizeof(struct node_s)); + size_t sizes[4] = {sizeof(struct node_s), + sizeof(struct node_s) + 48, + sizeof(struct node_s) + 4096, + sizeof(struct node_s) + 4096*70}; + size_t size = sizes[get_rand(4)]; + p = stm_allocate(size); + ((nodeptr_t)p)->my_size = size; pop_roots(); /* reload_roots not necessary, all are old after start_transaction */ break; @@ -214,13 +252,12 @@ case 6: // follow p->next if (p) { read_barrier(p); - p = (objptr_t)(((nodeptr_t)(p))->next); + p = (objptr_t)(get_next(p)); } break; case 7: // set 'p' as *next in one of the roots write_barrier(_r); - w_r = (nodeptr_t)_r; - w_r->next = (nodeptr_t)p; + set_next(_r, p); break; } return p; @@ -330,6 +367,7 @@ stm_start_inevitable_transaction(&stm_thread_local); for (i = 0; i < SHARED_ROOTS; i++) { shared_roots[i] = stm_allocate(sizeof(struct node_s)); + ((nodeptr_t)shared_roots[i])->my_size = sizeof(struct node_s); PUSH_ROOT(shared_roots[i]); } stm_commit_transaction(); From noreply at buildbot.pypy.org Wed Feb 26 09:35:17 2014 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 26 Feb 2014 09:35:17 +0100 (CET) Subject: [pypy-commit] pypy default: (arigo, fijal) add GIL handling around pypy_execute_source Message-ID: <20140226083517.5A8171C3299@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r69454:7c361c775aa8 Date: 2014-02-26 10:33 +0200 http://bitbucket.org/pypy/pypy/changeset/7c361c775aa8/ Log: (arigo, fijal) add GIL handling around pypy_execute_source diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -120,8 +120,10 @@ @entrypoint('main', [rffi.CCHARP], c_name='pypy_execute_source') def pypy_execute_source(ll_source): + rffi.aroundstate.after() source = rffi.charp2str(ll_source) res = _pypy_execute_source(source) + rffi.aroundstate.before() return rffi.cast(rffi.INT, res) @entrypoint('main', [], c_name='pypy_init_threads') From noreply at buildbot.pypy.org Wed Feb 26 09:35:19 2014 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 26 Feb 2014 09:35:19 +0100 (CET) Subject: [pypy-commit] pypy default: merge Message-ID: <20140226083519.AC8CA1C3299@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r69455:0746a182b067 Date: 2014-02-26 10:34 +0200 http://bitbucket.org/pypy/pypy/changeset/0746a182b067/ Log: merge diff --git a/lib-python/2.7/threading.py b/lib-python/2.7/threading.py --- a/lib-python/2.7/threading.py +++ b/lib-python/2.7/threading.py @@ -246,7 +246,14 @@ else: # PyPy patch: use _py3k_acquire() if timeout > 0: - gotit = waiter._py3k_acquire(True, timeout) + try: + gotit = waiter._py3k_acquire(True, timeout) + except OverflowError: + # bah, in Python 3, acquire(True, timeout) raises + # OverflowError if the timeout is too huge. For + # forward-compatibility reasons we do the same. + waiter.acquire() + gotit = True else: gotit = waiter.acquire(False) if not gotit: diff --git a/pypy/module/_cffi_backend/ctypeobj.py b/pypy/module/_cffi_backend/ctypeobj.py --- a/pypy/module/_cffi_backend/ctypeobj.py +++ b/pypy/module/_cffi_backend/ctypeobj.py @@ -14,6 +14,8 @@ _immutable_fields_ = ['size?', 'name', 'name_position'] # note that 'size' is not strictly immutable, because it can change # from -1 to the real value in the W_CTypeStruct subclass. + # XXX this could be improved with an elidable method get_size() + # that raises in case it's still -1... cast_anything = False is_primitive_integer = False diff --git a/pypy/module/_lsprof/interp_lsprof.py b/pypy/module/_lsprof/interp_lsprof.py --- a/pypy/module/_lsprof/interp_lsprof.py +++ b/pypy/module/_lsprof/interp_lsprof.py @@ -159,7 +159,7 @@ subentry = ProfilerSubEntry(entry.frame) self.calls[entry] = subentry return subentry - return None + raise class ProfilerContext(object): def __init__(self, profobj, entry): @@ -181,8 +181,11 @@ entry._stop(tt, it) if profobj.subcalls and self.previous: caller = jit.promote(self.previous.entry) - subentry = caller._get_or_make_subentry(entry, False) - if subentry is not None: + try: + subentry = caller._get_or_make_subentry(entry, False) + except KeyError: + pass + else: subentry._stop(tt, it) @@ -308,7 +311,7 @@ entry = ProfilerEntry(f_code) self.data[f_code] = entry return entry - return None + raise @jit.elidable def _get_or_make_builtin_entry(self, key, make=True): @@ -319,7 +322,7 @@ entry = ProfilerEntry(self.space.wrap(key)) self.builtin_data[key] = entry return entry - return None + raise def _enter_call(self, f_code): # we have a superb gc, no point in freelist :) @@ -332,8 +335,11 @@ if context is None: return self = jit.promote(self) - entry = self._get_or_make_entry(f_code, False) - if entry is not None: + try: + entry = self._get_or_make_entry(f_code, False) + except KeyError: + pass + else: context._stop(self, entry) self.current_context = context.previous @@ -347,8 +353,11 @@ if context is None: return self = jit.promote(self) - entry = self._get_or_make_builtin_entry(key, False) - if entry is not None: + try: + entry = self._get_or_make_builtin_entry(key, False) + except KeyError: + pass + else: context._stop(self, entry) self.current_context = context.previous diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -532,7 +532,8 @@ if w_dtype is dtype.w_box_type: return dtype if space.isinstance_w(w_dtype, space.w_type): - raise oefmt(space.w_NotImplementedError, "object dtype not implemented") + raise oefmt(space.w_NotImplementedError, + "cannot create dtype with type '%N'", w_dtype) raise oefmt(space.w_TypeError, "data type not understood") W_Dtype.typedef = TypeDef("dtype", @@ -587,10 +588,8 @@ except ValueError: raise oefmt(space.w_TypeError, "data type not understood") if char == NPY.CHARLTR: - char = NPY.STRINGLTR - size = 1 - - if char == NPY.STRINGLTR: + return new_string_dtype(space, 1, NPY.CHARLTR) + elif char == NPY.STRINGLTR: return new_string_dtype(space, size) elif char == NPY.UNICODELTR: return new_unicode_dtype(space, size) @@ -599,13 +598,13 @@ assert False -def new_string_dtype(space, size): +def new_string_dtype(space, size, char=NPY.STRINGLTR): return W_Dtype( types.StringType(), elsize=size, num=NPY.STRING, kind=NPY.STRINGLTR, - char=NPY.STRINGLTR, + char=char, w_box_type=space.gettypefor(interp_boxes.W_StringBox), ) diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -1448,9 +1448,10 @@ # scalars and strings w/o __array__ method isstr = space.isinstance_w(w_object, space.w_str) if not issequence_w(space, w_object) or isstr: - if dtype is None or (dtype.is_str_or_unicode() and dtype.elsize < 1): - dtype = interp_ufuncs.find_dtype_for_scalar(space, w_object) - return W_NDimArray.new_scalar(space, dtype, w_object) + if dtype is None or dtype.char != NPY.CHARLTR: + if dtype is None or (dtype.is_str_or_unicode() and dtype.elsize < 1): + dtype = interp_ufuncs.find_dtype_for_scalar(space, w_object) + return W_NDimArray.new_scalar(space, dtype, w_object) if space.is_none(w_order): order = 'C' diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -452,7 +452,7 @@ assert np.dtype(o).str == '|O8' else: exc = raises(NotImplementedError, "np.dtype(o)") - assert exc.value[0] == 'object dtype not implemented' + assert exc.value[0] == "cannot create dtype with type '%s'" % o.__name__ class AppTestTypes(BaseAppTestDtypes): def test_abstract_types(self): @@ -891,6 +891,11 @@ assert dtype('void').byteorder == '|' assert dtype((int, 2)).byteorder == '|' assert dtype(np.generic).str == '|V0' + d = dtype(np.character) + assert d.num == 18 + assert d.char == 'S' + assert d.kind == 'S' + assert d.str == '|S0' def test_dtype_str(self): from numpypy import dtype @@ -1055,9 +1060,15 @@ assert isinstance(u, unicode) def test_character_dtype(self): + import numpy as np from numpypy import array, character x = array([["A", "B"], ["C", "D"]], character) assert (x == [["A", "B"], ["C", "D"]]).all() + d = np.dtype('c') + assert d.num == 18 + assert d.char == 'c' + assert d.kind == 'S' + assert d.str == '|S1' class AppTestRecordDtypes(BaseNumpyAppTest): spaceconfig = dict(usemodules=["micronumpy", "struct", "binascii"]) diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -1697,16 +1697,12 @@ assert exc.value[0] == "data-type must not be 0-sized" assert a.view('S4') == '\x03' a = array('abc1', dtype='c') - import sys - if '__pypy__' in sys.builtin_module_names: - raises(ValueError, a.view, 'S4') - raises(ValueError, a.view, [('a', 'i2'), ('b', 'i2')]) - else: - assert a.view('S4') == 'abc1' - b = a.view([('a', 'i2'), ('b', 'i2')]) - assert b.shape == (1,) - assert b[0][0] == 25185 - assert b[0][1] == 12643 + assert (a == ['a', 'b', 'c', '1']).all() + assert a.view('S4') == 'abc1' + b = a.view([('a', 'i2'), ('b', 'i2')]) + assert b.shape == (1,) + assert b[0][0] == 25185 + assert b[0][1] == 12643 a = array([(1, 2)], dtype=[('a', 'int64'), ('b', 'int64')])[0] assert a.shape == () assert a.view('S16') == '\x01' + '\x00' * 7 + '\x02' diff --git a/pypy/module/pypyjit/test_pypy_c/model.py b/pypy/module/pypyjit/test_pypy_c/model.py --- a/pypy/module/pypyjit/test_pypy_c/model.py +++ b/pypy/module/pypyjit/test_pypy_c/model.py @@ -6,8 +6,9 @@ from _pytest.assertion import newinterpret except ImportError: # e.g. Python 2.5 newinterpret = None -from pypy.tool.jitlogparser.parser import SimpleParser, Function, TraceForOpcode -from pypy.tool.jitlogparser.storage import LoopStorage +from rpython.tool.jitlogparser.parser import (SimpleParser, Function, + TraceForOpcode) +from rpython.tool.jitlogparser.storage import LoopStorage def find_ids_range(code): diff --git a/pypy/module/pypyjit/test_pypy_c/test_00_model.py b/pypy/module/pypyjit/test_pypy_c/test_00_model.py --- a/pypy/module/pypyjit/test_pypy_c/test_00_model.py +++ b/pypy/module/pypyjit/test_pypy_c/test_00_model.py @@ -3,7 +3,7 @@ import types import subprocess import py -from lib_pypy import disassembler +from rpython.tool import disassembler from rpython.tool.udir import udir from rpython.tool import logparser from rpython.jit.tool.jitoutput import parse_prof @@ -129,7 +129,7 @@ class TestOpMatcher_(object): def match(self, src1, src2, **kwds): - from pypy.tool.jitlogparser.parser import SimpleParser + from rpython.tool.jitlogparser.parser import SimpleParser loop = SimpleParser.parse_from_input(src1) matcher = OpMatcher(loop.operations) try: diff --git a/pypy/module/pypyjit/test_pypy_c/test_jitlogparser.py b/pypy/module/pypyjit/test_pypy_c/test_jitlogparser.py --- a/pypy/module/pypyjit/test_pypy_c/test_jitlogparser.py +++ b/pypy/module/pypyjit/test_pypy_c/test_jitlogparser.py @@ -4,7 +4,7 @@ from rpython.tool.logparser import extract_category from rpython.jit.backend.tool.viewcode import ObjdumpNotFound -from pypy.tool.jitlogparser.parser import (import_log, parse_log_counts, +from rpython.tool.jitlogparser.parser import (import_log, parse_log_counts, mangle_descr) from pypy.module.pypyjit.test_pypy_c.test_00_model import BaseTestPyPyC diff --git a/pypy/objspace/std/intobject.py b/pypy/objspace/std/intobject.py --- a/pypy/objspace/std/intobject.py +++ b/pypy/objspace/std/intobject.py @@ -622,7 +622,6 @@ sys.maxint == 2147483647) - at jit.elidable def _string_to_int_or_long(space, w_source, string, base=10): w_longval = None value = 0 diff --git a/rpython/jit/codewriter/call.py b/rpython/jit/codewriter/call.py --- a/rpython/jit/codewriter/call.py +++ b/rpython/jit/codewriter/call.py @@ -241,6 +241,21 @@ else: extraeffect = EffectInfo.EF_CANNOT_RAISE # + # check that the result is really as expected + if loopinvariant: + if extraeffect != EffectInfo.EF_LOOPINVARIANT: + from rpython.jit.codewriter.policy import log; log.WARNING( + "in operation %r: this calls a _jit_loop_invariant_ function," + " but this contradicts other sources (e.g. it can have random" + " effects)" % (op,)) + if elidable: + if extraeffect not in (EffectInfo.EF_ELIDABLE_CANNOT_RAISE, + EffectInfo.EF_ELIDABLE_CAN_RAISE): + from rpython.jit.codewriter.policy import log; log.WARNING( + "in operation %r: this calls an _elidable_function_," + " but this contradicts other sources (e.g. it can have random" + " effects)" % (op,)) + # effectinfo = effectinfo_from_writeanalyze( self.readwrite_analyzer.analyze(op, self.seen), self.cpu, extraeffect, oopspecindex, can_invalidate, call_release_gil_target, diff --git a/rpython/jit/codewriter/test/test_call.py b/rpython/jit/codewriter/test/test_call.py --- a/rpython/jit/codewriter/test/test_call.py +++ b/rpython/jit/codewriter/test/test_call.py @@ -248,3 +248,26 @@ op = block.operations[-1] call_descr = cc.getcalldescr(op) assert call_descr.extrainfo.has_random_effects() + +def test_no_random_effects_for_rotateLeft(): + from rpython.jit.backend.llgraph.runner import LLGraphCPU + from rpython.rlib.rarithmetic import r_uint + + if r_uint.BITS == 32: + py.test.skip("64-bit only") + + from rpython.rlib.rmd5 import _rotateLeft + def f(n, m): + return _rotateLeft(r_uint(n), m) + + rtyper = support.annotate(f, [7, 9]) + jitdriver_sd = FakeJitDriverSD(rtyper.annotator.translator.graphs[0]) + cc = CallControl(LLGraphCPU(rtyper), jitdrivers_sd=[jitdriver_sd]) + res = cc.find_all_graphs(FakePolicy()) + + [f_graph] = [x for x in res if x.func is f] + [block, _] = list(f_graph.iterblocks()) + op = block.operations[-1] + call_descr = cc.getcalldescr(op) + assert not call_descr.extrainfo.has_random_effects() + assert call_descr.extrainfo.check_is_elidable() diff --git a/rpython/jit/metainterp/test/test_list.py b/rpython/jit/metainterp/test/test_list.py --- a/rpython/jit/metainterp/test/test_list.py +++ b/rpython/jit/metainterp/test/test_list.py @@ -287,6 +287,48 @@ assert res == 5 self.check_resops(call=0) + def test_list_mul_virtual(self): + class Foo: + def __init__(self, l): + self.l = l + l[0] = self + + myjitdriver = JitDriver(greens = [], reds = ['y']) + def f(y): + while y > 0: + myjitdriver.jit_merge_point(y=y) + Foo([None] * 5) + y -= 1 + return 42 + + self.meta_interp(f, [5]) + self.check_resops({'int_sub': 2, + 'int_gt': 2, + 'guard_true': 2, + 'jump': 1}) + + def test_list_mul_unsigned_virtual(self): + from rpython.rlib.rarithmetic import r_uint + + class Foo: + def __init__(self, l): + self.l = l + l[0] = self + + myjitdriver = JitDriver(greens = [], reds = ['y']) + def f(y): + while y > 0: + myjitdriver.jit_merge_point(y=y) + Foo([None] * r_uint(5)) + y -= 1 + return 42 + + self.meta_interp(f, [5]) + self.check_resops({'int_sub': 2, + 'int_gt': 2, + 'guard_true': 2, + 'jump': 1}) + class TestLLtype(ListTests, LLJitMixin): def test_listops_dont_invalidate_caches(self): diff --git a/rpython/rlib/rarithmetic.py b/rpython/rlib/rarithmetic.py --- a/rpython/rlib/rarithmetic.py +++ b/rpython/rlib/rarithmetic.py @@ -709,5 +709,4 @@ result = ovfcheck(result + digit) except OverflowError: raise ParseStringOverflowError(p) - - +string_to_int._elidable_function_ = True diff --git a/rpython/rtyper/lltypesystem/rffi.py b/rpython/rtyper/lltypesystem/rffi.py --- a/rpython/rtyper/lltypesystem/rffi.py +++ b/rpython/rtyper/lltypesystem/rffi.py @@ -116,12 +116,14 @@ # default case: # invoke the around-handlers only for "not too small" external calls; # sandboxsafe is a hint for "too-small-ness" (e.g. math functions). - invoke_around_handlers = not sandboxsafe + # Also, _nowrapper functions cannot release the GIL, by default. + invoke_around_handlers = not sandboxsafe and not _nowrapper if random_effects_on_gcobjs not in (False, True): random_effects_on_gcobjs = ( invoke_around_handlers or # because it can release the GIL has_callback) # because the callback can do it + assert not (elidable_function and random_effects_on_gcobjs) funcptr = lltype.functionptr(ext_type, name, external='C', compilation_info=compilation_info, diff --git a/rpython/rtyper/test/test_rlist.py b/rpython/rtyper/test/test_rlist.py --- a/rpython/rtyper/test/test_rlist.py +++ b/rpython/rtyper/test/test_rlist.py @@ -1619,3 +1619,17 @@ rgc.ll_arraycopy = old_arraycopy # assert 2 <= res <= 10 + + def test_alloc_and_set(self): + def fn(i): + lst = [0] * r_uint(i) + return lst + t, rtyper, graph = self.gengraph(fn, [int]) + block = graph.startblock + seen = 0 + for op in block.operations: + if op.opname in ['cast_int_to_uint', 'cast_uint_to_int']: + continue + assert op.opname == 'direct_call' + seen += 1 + assert seen == 1 diff --git a/lib_pypy/disassembler.py b/rpython/tool/disassembler.py rename from lib_pypy/disassembler.py rename to rpython/tool/disassembler.py diff --git a/pypy/tool/jitlogparser/__init__.py b/rpython/tool/jitlogparser/__init__.py rename from pypy/tool/jitlogparser/__init__.py rename to rpython/tool/jitlogparser/__init__.py diff --git a/pypy/tool/jitlogparser/module_finder.py b/rpython/tool/jitlogparser/module_finder.py rename from pypy/tool/jitlogparser/module_finder.py rename to rpython/tool/jitlogparser/module_finder.py diff --git a/pypy/tool/jitlogparser/parser.py b/rpython/tool/jitlogparser/parser.py rename from pypy/tool/jitlogparser/parser.py rename to rpython/tool/jitlogparser/parser.py diff --git a/pypy/tool/jitlogparser/storage.py b/rpython/tool/jitlogparser/storage.py rename from pypy/tool/jitlogparser/storage.py rename to rpython/tool/jitlogparser/storage.py --- a/pypy/tool/jitlogparser/storage.py +++ b/rpython/tool/jitlogparser/storage.py @@ -5,8 +5,8 @@ import py import os -from lib_pypy.disassembler import dis -from pypy.tool.jitlogparser.module_finder import gather_all_code_objs +from rpython.tool.disassembler import dis +from rpython.tool.jitlogparser.module_finder import gather_all_code_objs class LoopStorage(object): def __init__(self, extrapath=None): diff --git a/pypy/tool/jitlogparser/test/__init__.py b/rpython/tool/jitlogparser/test/__init__.py rename from pypy/tool/jitlogparser/test/__init__.py rename to rpython/tool/jitlogparser/test/__init__.py diff --git a/pypy/tool/jitlogparser/test/logtest.log b/rpython/tool/jitlogparser/test/logtest.log rename from pypy/tool/jitlogparser/test/logtest.log rename to rpython/tool/jitlogparser/test/logtest.log diff --git a/pypy/tool/jitlogparser/test/logtest2.log b/rpython/tool/jitlogparser/test/logtest2.log rename from pypy/tool/jitlogparser/test/logtest2.log rename to rpython/tool/jitlogparser/test/logtest2.log diff --git a/pypy/tool/jitlogparser/test/test_modulefinder.py b/rpython/tool/jitlogparser/test/test_modulefinder.py rename from pypy/tool/jitlogparser/test/test_modulefinder.py rename to rpython/tool/jitlogparser/test/test_modulefinder.py --- a/pypy/tool/jitlogparser/test/test_modulefinder.py +++ b/rpython/tool/jitlogparser/test/test_modulefinder.py @@ -1,5 +1,5 @@ import py -from pypy.tool.jitlogparser.module_finder import gather_all_code_objs +from rpython.tool.jitlogparser.module_finder import gather_all_code_objs import re, sys def setup_module(mod): diff --git a/pypy/tool/jitlogparser/test/test_parser.py b/rpython/tool/jitlogparser/test/test_parser.py rename from pypy/tool/jitlogparser/test/test_parser.py rename to rpython/tool/jitlogparser/test/test_parser.py --- a/pypy/tool/jitlogparser/test/test_parser.py +++ b/rpython/tool/jitlogparser/test/test_parser.py @@ -1,8 +1,8 @@ -from pypy.tool.jitlogparser.parser import (SimpleParser, TraceForOpcode, - Function, adjust_bridges, - import_log, split_trace, Op, - parse_log_counts) -from pypy.tool.jitlogparser.storage import LoopStorage +from rpython.tool.jitlogparser.parser import (SimpleParser, TraceForOpcode, + Function, adjust_bridges, + import_log, split_trace, Op, + parse_log_counts) +from rpython.tool.jitlogparser.storage import LoopStorage import py, sys from rpython.jit.backend.detect_cpu import autodetect diff --git a/pypy/tool/jitlogparser/test/test_storage.py b/rpython/tool/jitlogparser/test/test_storage.py rename from pypy/tool/jitlogparser/test/test_storage.py rename to rpython/tool/jitlogparser/test/test_storage.py --- a/pypy/tool/jitlogparser/test/test_storage.py +++ b/rpython/tool/jitlogparser/test/test_storage.py @@ -1,5 +1,5 @@ import py -from pypy.tool.jitlogparser.storage import LoopStorage +from rpython.tool.jitlogparser.storage import LoopStorage def test_load_codes(): tmppath = py.test.ensuretemp('load_codes') diff --git a/pypy/tool/jitlogparser/test/x.py b/rpython/tool/jitlogparser/test/x.py rename from pypy/tool/jitlogparser/test/x.py rename to rpython/tool/jitlogparser/test/x.py diff --git a/rpython/translator/transform.py b/rpython/translator/transform.py --- a/rpython/translator/transform.py +++ b/rpython/translator/transform.py @@ -30,7 +30,7 @@ # [a] * b # --> # c = newlist(a) -# d = mul(c, int b) +# d = mul(c, b) # --> # d = alloc_and_set(b, a) @@ -44,8 +44,7 @@ len(op.args) == 1): length1_lists[op.result] = op.args[0] elif (op.opname == 'mul' and - op.args[0] in length1_lists and - self.gettype(op.args[1]) is int): + op.args[0] in length1_lists): new_op = SpaceOperation('alloc_and_set', (op.args[1], length1_lists[op.args[0]]), op.result) From noreply at buildbot.pypy.org Wed Feb 26 09:36:29 2014 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 26 Feb 2014 09:36:29 +0100 (CET) Subject: [pypy-commit] pypy default: in-progress document about embedding Message-ID: <20140226083629.AB2B11C3299@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r69456:7f6f07e39700 Date: 2014-02-26 10:35 +0200 http://bitbucket.org/pypy/pypy/changeset/7f6f07e39700/ Log: in-progress document about embedding diff --git a/pypy/doc/embedding.rst b/pypy/doc/embedding.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/embedding.rst @@ -0,0 +1,66 @@ + +PyPy has a very minimal and a very strange embedding interface, based on +the usage of `cffi`_ and the philosophy that Python is a better language in C. +It was developed in collaboration with Roberto De Ioris from the `uwsgi`_ +project. The `PyPy uwsgi plugin`_ is a good example of usage of such interface. + +The first thing that you need, that we plan to change in the future, is to +compile PyPy yourself with an option ``--shared``. Consult the +`how to compile PyPy`_ doc for details. That should result in ``libpypy.so`` +or ``pypy.dll`` file or something similar, depending on your platform. Consult +your platform specification for details. + +The resulting shared library has very few functions that are however enough +to make a full API working, provided you'll follow a few principles. The API +is: + +.. function:: void rpython_startup_code(void); + + This is a function that you have to call (once) before calling anything. + It initializes the RPython/PyPy GC and does a bunch of necessary startup + code. This function cannot fail. + +.. function:: void pypy_init_threads(void); + + Initialize threads. Only need to be called if there are any threads involved + XXXX double check + +.. function:: long pypy_setup_home(char* home, int verbose); + + This is another function that you have to call at some point, without + it you would not be able to find the standard library (and run pretty much + nothing). Arguments: + + * ``home``: null terminated path + + * ``verbose``: if non-zero, would print error messages to stderr + + Function returns 0 on success or 1 on failure, can be called multiple times + until the library is found. + +.. function:: int pypy_execute_source(char* source); + + Execute the source code given in the ``source`` argument. Will print + the error message to stderr upon failure and return 1, otherwise returns 0. + You should really do your own error handling in the source. It'll acquire + + +.. function:: void pypy_thread_attach(void); + + In case your application uses threads that are initialized outside of PyPy, + you need to call this function to tell the PyPy GC to track this thread. + Note that this function is not thread-safe itself, so you need to guard it + with a mutex. + +Simple example +-------------- + + +Threading +--------- + +XXXX I don't understand what's going on, discuss with unbit + +.. _`cffi`: xxx +.. _`uwsgi`: xxx +.. _`PyPy uwsgi plugin`: xxx diff --git a/pypy/doc/getting-started.rst b/pypy/doc/getting-started.rst --- a/pypy/doc/getting-started.rst +++ b/pypy/doc/getting-started.rst @@ -145,11 +145,13 @@ After you successfully manage to get PyPy's source you can read more about: - `Building and using PyPy's Python interpreter`_ + - `Embedding PyPy`_ - `Learning more about the RPython toolchain and how to develop (with) PyPy`_ - `Tutorial for how to write an interpreter with the RPython toolchain and make it fast`_ - `Look at our benchmark results`_ .. _`Building and using PyPy's Python interpreter`: getting-started-python.html +.. _`Embedding PyPy`: embedding.html .. _`Learning more about the RPython toolchain and how to develop (with) PyPy`: getting-started-dev.html .. _`Tutorial for how to write an interpreter with the RPython toolchain and make it fast`: http://morepypy.blogspot.com/2011/04/tutorial-writing-interpreter-with-pypy.html .. _`Look at our benchmark results`: http://speed.pypy.org From noreply at buildbot.pypy.org Wed Feb 26 11:25:59 2014 From: noreply at buildbot.pypy.org (antocuni) Date: Wed, 26 Feb 2014 11:25:59 +0100 (CET) Subject: [pypy-commit] pypy int_w-refactor: add a test which is now passing, but failed before this branch Message-ID: <20140226102559.DCC011C3D68@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: int_w-refactor Changeset: r69457:de5ea5e1d969 Date: 2014-02-26 10:30 +0100 http://bitbucket.org/pypy/pypy/changeset/de5ea5e1d969/ Log: add a test which is now passing, but failed before this branch diff --git a/pypy/objspace/test/test_descroperation.py b/pypy/objspace/test/test_descroperation.py --- a/pypy/objspace/test/test_descroperation.py +++ b/pypy/objspace/test/test_descroperation.py @@ -721,8 +721,19 @@ return CannotConvertToBool() x = X() raises(MyError, "'foo' in x") - - + + def test___cmp___fake_int(self): + class MyInt(object): + def __init__(self, x): + self.x = x + def __int__(self): + return self.x + class X(object): + def __cmp__(self, other): + return MyInt(0) + + assert X() == 'hello' + class AppTestWithBuiltinShortcut(AppTest_Descroperation): spaceconfig = {'objspace.std.builtinshortcut': True} From noreply at buildbot.pypy.org Wed Feb 26 11:26:01 2014 From: noreply at buildbot.pypy.org (antocuni) Date: Wed, 26 Feb 2014 11:26:01 +0100 (CET) Subject: [pypy-commit] pypy int_w-refactor: another passing test which used to fail Message-ID: <20140226102601.1690C1C3D68@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: int_w-refactor Changeset: r69458:b401ef2acac2 Date: 2014-02-26 11:08 +0100 http://bitbucket.org/pypy/pypy/changeset/b401ef2acac2/ Log: another passing test which used to fail diff --git a/pypy/objspace/std/test/test_stringformat.py b/pypy/objspace/std/test/test_stringformat.py --- a/pypy/objspace/std/test/test_stringformat.py +++ b/pypy/objspace/std/test/test_stringformat.py @@ -186,6 +186,22 @@ def test_broken_unicode(self): raises(UnicodeDecodeError, 'Názov: %s'.__mod__, u'Jerry') + def test___int__(self): + class MyInt(object): + def __init__(self, x): + self.x = x + def __int__(self): + return self.x + # + x = MyInt(65) + assert '%c' % x == 'A' + + +class Foo(object): + def __cmp__(self, other): + return MyInt(0) + + class AppTestWidthPrec: def test_width(self): a = 'a' From noreply at buildbot.pypy.org Wed Feb 26 11:26:02 2014 From: noreply at buildbot.pypy.org (antocuni) Date: Wed, 26 Feb 2014 11:26:02 +0100 (CET) Subject: [pypy-commit] pypy int_w-refactor: given the previous ifs this is not strictily necessary, however I think that putting an explicit allow_conversion=False makes things clearer Message-ID: <20140226102602.3FE2B1C3D68@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: int_w-refactor Changeset: r69459:d40701cd789e Date: 2014-02-26 11:12 +0100 http://bitbucket.org/pypy/pypy/changeset/d40701cd789e/ Log: given the previous ifs this is not strictily necessary, however I think that putting an explicit allow_conversion=False makes things clearer diff --git a/pypy/objspace/std/intobject.py b/pypy/objspace/std/intobject.py --- a/pypy/objspace/std/intobject.py +++ b/pypy/objspace/std/intobject.py @@ -42,7 +42,8 @@ return False if self.user_overridden_class or w_other.user_overridden_class: return self is w_other - return space.int_w(self) == space.int_w(w_other) + return (space.int_w(self, allow_conversion=False) == + space.int_w(w_other, allow_conversion=False)) def immutable_unique_id(self, space): if self.user_overridden_class: From noreply at buildbot.pypy.org Wed Feb 26 11:26:03 2014 From: noreply at buildbot.pypy.org (antocuni) Date: Wed, 26 Feb 2014 11:26:03 +0100 (CET) Subject: [pypy-commit] pypy int_w-refactor: another case in which allow_conversion=False is semantically right, although the non-int cases are already caught before Message-ID: <20140226102603.6C8371C3D68@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: int_w-refactor Changeset: r69460:464d1fe86d91 Date: 2014-02-26 11:23 +0100 http://bitbucket.org/pypy/pypy/changeset/464d1fe86d91/ Log: another case in which allow_conversion=False is semantically right, although the non-int cases are already caught before diff --git a/pypy/objspace/std/intobject.py b/pypy/objspace/std/intobject.py --- a/pypy/objspace/std/intobject.py +++ b/pypy/objspace/std/intobject.py @@ -671,7 +671,7 @@ # int_w is effectively what we want in this case, # we cannot construct a subclass of int instance with an # an overflowing long - value = space.int_w(w_obj) + value = space.int_w(w_obj, allow_conversion=False) elif space.isinstance_w(w_value, space.w_str): value, w_longval = _string_to_int_or_long(space, w_value, space.str_w(w_value)) From noreply at buildbot.pypy.org Wed Feb 26 11:26:04 2014 From: noreply at buildbot.pypy.org (antocuni) Date: Wed, 26 Feb 2014 11:26:04 +0100 (CET) Subject: [pypy-commit] pypy int_w-refactor: another test which is now passing Message-ID: <20140226102604.876811C3D68@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: int_w-refactor Changeset: r69461:aeda53c595a1 Date: 2014-02-26 11:23 +0100 http://bitbucket.org/pypy/pypy/changeset/aeda53c595a1/ Log: another test which is now passing diff --git a/pypy/objspace/std/test/test_intobject.py b/pypy/objspace/std/test/test_intobject.py --- a/pypy/objspace/std/test/test_intobject.py +++ b/pypy/objspace/std/test/test_intobject.py @@ -553,6 +553,15 @@ assert 3 .__coerce__(4) == (3, 4) assert 3 .__coerce__(4L) == NotImplemented + def test_fake_int_as_base(self): + class MyInt(object): + def __init__(self, x): + self.x = x + def __int__(self): + return self.x + + base = MyInt(24) + assert int('10', base) == 24 class AppTestIntShortcut(AppTestInt): spaceconfig = {"objspace.std.intshortcut": True} From noreply at buildbot.pypy.org Wed Feb 26 12:38:00 2014 From: noreply at buildbot.pypy.org (cfbolz) Date: Wed, 26 Feb 2014 12:38:00 +0100 (CET) Subject: [pypy-commit] pypy default: another test for [non-None-ptr] * n being virtual, although only if n < 15 Message-ID: <20140226113800.D9E5D1C15A7@cobra.cs.uni-duesseldorf.de> Author: Carl Friedrich Bolz Branch: Changeset: r69462:2c8e18a5330a Date: 2014-02-26 12:37 +0100 http://bitbucket.org/pypy/pypy/changeset/2c8e18a5330a/ Log: another test for [non-None-ptr] * n being virtual, although only if n < 15 diff --git a/rpython/jit/metainterp/test/test_list.py b/rpython/jit/metainterp/test/test_list.py --- a/rpython/jit/metainterp/test/test_list.py +++ b/rpython/jit/metainterp/test/test_list.py @@ -98,8 +98,8 @@ self.check_resops(setarrayitem_gc=0, call=0, getarrayitem_gc=0) def test_vlist_alloc_and_set(self): - # the check_loops fails, because [non-null] * n is not supported yet - # (it is implemented as a residual call) + # the check_loops fails, because [non-null] * n is only supported + # if n < 15 (otherwise it is implemented as a residual call) jitdriver = JitDriver(greens = [], reds = ['n']) def f(n): l = [1] * 20 @@ -116,7 +116,7 @@ res = self.meta_interp(f, [10], listops=True) assert res == f(10) - py.test.skip("'[non-null] * n' gives a residual call so far") + py.test.skip("'[non-null] * n' for n >= 15 gives a residual call so far") self.check_loops(setarrayitem_gc=0, getarrayitem_gc=0, call=0) def test_arraycopy_simpleoptimize(self): @@ -307,6 +307,32 @@ 'guard_true': 2, 'jump': 1}) + def test_list_mul_virtual_nonzero(self): + class base: + pass + class Foo(base): + def __init__(self, l): + self.l = l + l[0] = self + class nil(base): + pass + + nil = nil() + + myjitdriver = JitDriver(greens = [], reds = ['y']) + def f(y): + while y > 0: + myjitdriver.jit_merge_point(y=y) + Foo([nil] * 5) + y -= 1 + return 42 + + self.meta_interp(f, [5]) + self.check_resops({'int_sub': 2, + 'int_gt': 2, + 'guard_true': 2, + 'jump': 1}) + def test_list_mul_unsigned_virtual(self): from rpython.rlib.rarithmetic import r_uint diff --git a/rpython/rtyper/test/test_generator.py b/rpython/rtyper/test/test_generator.py --- a/rpython/rtyper/test/test_generator.py +++ b/rpython/rtyper/test/test_generator.py @@ -88,3 +88,16 @@ return s res = self.interpret(g, []) assert res == 6 + + def test_send(self): + def f(): + yield (yield 1) + 1 + def g(): + gen = f() + res = f.send(2) + assert res == 1 + res = f.next() + assert res == 3 + + res = self.interpret(g, []) + From noreply at buildbot.pypy.org Wed Feb 26 13:40:44 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Wed, 26 Feb 2014 13:40:44 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: move shadow stack init/done to setup.c Message-ID: <20140226124044.AB2051C3369@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7-refactor Changeset: r866:22f8313b374c Date: 2014-02-26 13:40 +0100 http://bitbucket.org/pypy/stmgc/changeset/22f8313b374c/ Log: move shadow stack init/done to setup.c diff --git a/c7/demo/demo2.c b/c7/demo/demo2.c --- a/c7/demo/demo2.c +++ b/c7/demo/demo2.c @@ -21,24 +21,6 @@ __thread stm_thread_local_t stm_thread_local; -#define PUSH_ROOT(p) (*stm_thread_local.shadowstack++ = (object_t *)(p)) -#define POP_ROOT(p) ((p) = (typeof(p))*--stm_thread_local.shadowstack) - -void init_shadow_stack(void) -{ - object_t **s = (object_t **)malloc(1000 * sizeof(object_t *)); - assert(s); - stm_thread_local.shadowstack = s; - stm_thread_local.shadowstack_base = s; -} - -void done_shadow_stack(void) -{ - free(stm_thread_local.shadowstack_base); - stm_thread_local.shadowstack = NULL; - stm_thread_local.shadowstack_base = NULL; -} - ssize_t stmcb_size_rounded_up(struct object_s *ob) { @@ -157,14 +139,14 @@ global_chained_list->value = -1; global_chained_list->next = NULL; - PUSH_ROOT(global_chained_list); + STM_PUSH_ROOT(stm_thread_local, global_chained_list); w_prev = global_chained_list; for (i = 0; i < LIST_LENGTH; i++) { - PUSH_ROOT(w_prev); + STM_PUSH_ROOT(stm_thread_local, w_prev); w_newnode = (nodeptr_t)stm_allocate(sizeof(struct node_s)); - POP_ROOT(w_prev); + STM_POP_ROOT(stm_thread_local, w_prev); w_newnode->value = LIST_LENGTH - i; w_newnode->next = NULL; @@ -173,16 +155,16 @@ w_prev = w_newnode; } - POP_ROOT(global_chained_list); /* update value */ + STM_POP_ROOT(stm_thread_local, global_chained_list); /* update value */ assert(global_chained_list->value == -1); - PUSH_ROOT(global_chained_list); + STM_PUSH_ROOT(stm_thread_local, global_chained_list); stm_commit_transaction(); stm_start_inevitable_transaction(&stm_thread_local); - POP_ROOT(global_chained_list); /* update value */ + STM_POP_ROOT(stm_thread_local, global_chained_list); /* update value */ assert(global_chained_list->value == -1); - PUSH_ROOT(global_chained_list); /* remains forever in the shadow stack */ + STM_PUSH_ROOT(stm_thread_local, global_chained_list); /* remains forever in the shadow stack */ stm_commit_transaction(); printf("setup ok\n"); @@ -196,16 +178,16 @@ { int status; stm_register_thread_local(&stm_thread_local); - init_shadow_stack(); - PUSH_ROOT(global_chained_list); /* remains forever in the shadow stack */ + + STM_PUSH_ROOT(stm_thread_local, global_chained_list); /* remains forever in the shadow stack */ while (check_sorted() == -1) { bubble_run(); } - POP_ROOT(global_chained_list); + STM_POP_ROOT(stm_thread_local, global_chained_list); assert(stm_thread_local.shadowstack == stm_thread_local.shadowstack_base); - done_shadow_stack(); + stm_unregister_thread_local(&stm_thread_local); status = sem_post(&done); assert(status == 0); return NULL; @@ -216,9 +198,9 @@ long sum; printf("final check\n"); - + sum = check_sorted(); - + // little Gauss: if (sum == (1 + LIST_LENGTH) * (LIST_LENGTH / 2)) printf("check ok\n"); @@ -247,7 +229,7 @@ stm_setup(); stm_register_thread_local(&stm_thread_local); - init_shadow_stack(); + setup_list(); @@ -259,7 +241,7 @@ final_check(); - done_shadow_stack(); + stm_unregister_thread_local(&stm_thread_local); stm_teardown(); diff --git a/c7/demo/demo_random.c b/c7/demo/demo_random.c --- a/c7/demo/demo_random.c +++ b/c7/demo/demo_random.c @@ -44,25 +44,6 @@ __thread struct thread_data td; -#define PUSH_ROOT(p) (*(stm_thread_local.shadowstack++) = (object_t *)(p)) -#define POP_ROOT(p) ((p) = (typeof(p))*(--stm_thread_local.shadowstack)) - -void init_shadow_stack(void) -{ - object_t **s = (object_t **)malloc(1000 * sizeof(object_t *)); - assert(s); - stm_thread_local.shadowstack = s; - stm_thread_local.shadowstack_base = s; -} - -void done_shadow_stack(void) -{ - free(stm_thread_local.shadowstack_base); - stm_thread_local.shadowstack = NULL; - stm_thread_local.shadowstack_base = NULL; -} - - ssize_t stmcb_size_rounded_up(struct object_s *ob) { return ((struct node_s*)ob)->my_size; @@ -89,7 +70,7 @@ { int i; for (i = 0; i < SHARED_ROOTS; i++) { - PUSH_ROOT(shared_roots[i]); + STM_PUSH_ROOT(stm_thread_local, shared_roots[i]); } } @@ -97,7 +78,7 @@ { int i; for (i = 0; i < SHARED_ROOTS; i++) { - POP_ROOT(shared_roots[SHARED_ROOTS - i - 1]); + STM_POP_ROOT(stm_thread_local, shared_roots[SHARED_ROOTS - i - 1]); } } @@ -127,12 +108,12 @@ assert(td.num_roots == td.num_roots_at_transaction_start); for (i = td.num_roots_at_transaction_start - 1; i >= 0; i--) { if (td.roots[i]) - POP_ROOT(td.roots[i]); + STM_POP_ROOT(stm_thread_local, td.roots[i]); } for (i = 0; i < td.num_roots_at_transaction_start; i++) { if (td.roots[i]) - PUSH_ROOT(td.roots[i]); + STM_PUSH_ROOT(stm_thread_local, td.roots[i]); } } @@ -141,7 +122,7 @@ int i; for (i = td.num_roots_at_transaction_start; i < td.num_roots; i++) { if (td.roots[i]) - PUSH_ROOT(td.roots[i]); + STM_PUSH_ROOT(stm_thread_local, td.roots[i]); } } @@ -150,7 +131,7 @@ int i; for (i = td.num_roots - 1; i >= td.num_roots_at_transaction_start; i--) { if (td.roots[i]) - POP_ROOT(td.roots[i]); + STM_POP_ROOT(stm_thread_local, td.roots[i]); } } @@ -304,7 +285,6 @@ { int status; stm_register_thread_local(&stm_thread_local); - init_shadow_stack(); /* forever on the shadowstack: */ _push_shared_roots(); @@ -342,7 +322,6 @@ } stm_commit_transaction(); - done_shadow_stack(); stm_unregister_thread_local(&stm_thread_local); status = sem_post(&done); assert(status == 0); @@ -368,7 +347,7 @@ for (i = 0; i < SHARED_ROOTS; i++) { shared_roots[i] = stm_allocate(sizeof(struct node_s)); ((nodeptr_t)shared_roots[i])->my_size = sizeof(struct node_s); - PUSH_ROOT(shared_roots[i]); + STM_PUSH_ROOT(stm_thread_local, shared_roots[i]); } stm_commit_transaction(); @@ -398,7 +377,6 @@ stm_setup(); stm_register_thread_local(&stm_thread_local); - init_shadow_stack(); setup_globals(); @@ -421,7 +399,6 @@ printf("Test OK!\n"); _pop_shared_roots(); - done_shadow_stack(); stm_unregister_thread_local(&stm_thread_local); stm_teardown(); diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -29,6 +29,7 @@ #define WRITELOCK_START ((END_NURSERY_PAGE * 4096UL) >> 4) #define WRITELOCK_END READMARKER_END +#define SHADOW_STACK_SIZE 1000 enum /* stm_flags */ { /* This flag is set on non-nursery objects. It forces stm_write() diff --git a/c7/stm/setup.c b/c7/stm/setup.c --- a/c7/stm/setup.c +++ b/c7/stm/setup.c @@ -103,6 +103,22 @@ teardown_nursery(); } +void _init_shadow_stack(stm_thread_local_t *tl) +{ + object_t **s = (object_t **)malloc(SHADOW_STACK_SIZE * sizeof(object_t *)); + assert(s); + tl->shadowstack = s; + tl->shadowstack_base = s; +} + +void _done_shadow_stack(stm_thread_local_t *tl) +{ + free(tl->shadowstack_base); + tl->shadowstack = NULL; + tl->shadowstack_base = NULL; +} + + void stm_register_thread_local(stm_thread_local_t *tl) { int num; @@ -123,12 +139,14 @@ numbers automatically. */ num = num % NB_SEGMENTS; tl->associated_segment_num = num; + _init_shadow_stack(tl); set_gs_register(get_segment_base(num)); } void stm_unregister_thread_local(stm_thread_local_t *tl) { assert(tl->next != NULL); + _done_shadow_stack(tl); if (tl == stm_all_thread_locals) { stm_all_thread_locals = stm_all_thread_locals->next; if (tl == stm_all_thread_locals) { diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -191,6 +191,12 @@ void stm_setup(void); void stm_teardown(void); +/* Push and pop roots from/to the shadow stack. Only allowed inside + transaction. */ +#define STM_PUSH_ROOT(tl, p) (*((tl).shadowstack++) = (object_t *)(p)) +#define STM_POP_ROOT(tl, p) ((p) = (typeof(p))*(--(tl).shadowstack)) + + /* Every thread needs to have a corresponding stm_thread_local_t structure. It may be a "__thread" global variable or something else. Use the following functions at the start and at the end of a thread. From noreply at buildbot.pypy.org Wed Feb 26 14:13:54 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 26 Feb 2014 14:13:54 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: The tree data structure can explode when fed non-aligned addresses. Message-ID: <20140226131354.59D5C1C15A7@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r867:e6d52f7b3ef1 Date: 2014-02-26 14:13 +0100 http://bitbucket.org/pypy/stmgc/changeset/e6d52f7b3ef1/ Log: The tree data structure can explode when fed non-aligned addresses. Assert that they are aligned and fix the tests. diff --git a/c7/stm/list.c b/c7/stm/list.c --- a/c7/stm/list.c +++ b/c7/stm/list.c @@ -54,23 +54,23 @@ static void tree_free(struct tree_s *tree) { free(tree->raw_start); + assert(memset(tree, 0xDD, sizeof(struct tree_s))); free(tree); } static void _tree_compress(struct tree_s *tree) { - wlog_t *item; - struct tree_s tree_copy; - memset(&tree_copy, 0, sizeof(struct tree_s)); + wlog_t *item; + struct tree_s tree_copy; + memset(&tree_copy, 0, sizeof(struct tree_s)); - TREE_LOOP_FORWARD(*tree, item) - { - tree_insert(&tree_copy, item->addr, item->val); + TREE_LOOP_FORWARD(*tree, item) { + tree_insert(&tree_copy, item->addr, item->val); } TREE_LOOP_END; - free(tree->raw_start); - *tree = tree_copy; + free(tree->raw_start); + *tree = tree_copy; } static wlog_t *_tree_find(char *entry, uintptr_t addr) @@ -122,6 +122,7 @@ static void tree_insert(struct tree_s *tree, uintptr_t addr, uintptr_t val) { assert(addr != 0); /* the NULL key is reserved */ + assert(!(addr & (sizeof(void *) - 1))); /* the key must be aligned */ retry:; wlog_t *wlog; uintptr_t key = addr; @@ -129,6 +130,7 @@ char *p = (char *)(tree->toplevel.items); char *entry; while (1) { + assert(shift < TREE_DEPTH_MAX * TREE_BITS); p += (key >> shift) & TREE_MASK; shift += TREE_BITS; entry = *(char **)p; diff --git a/c7/stm/list.h b/c7/stm/list.h --- a/c7/stm/list.h +++ b/c7/stm/list.h @@ -138,6 +138,7 @@ continue; \ if (((long)_entry) & 1) \ { /* points to a further level: enter it */ \ + assert(_stackp - _stack < TREE_DEPTH_MAX); \ _stackp->next = _next; \ _stackp->end = _end; \ _stackp++; \ diff --git a/c7/test/test_list.py b/c7/test/test_list.py --- a/c7/test/test_list.py +++ b/c7/test/test_list.py @@ -65,34 +65,34 @@ def test_tree_add(): t = lib.tree_create() - lib.tree_insert(t, 23, 456) - for i in range(100): - assert lib.tree_contains(t, i) == (i == 23) + lib.tree_insert(t, 23000, 456) + for i in range(0, 100000, 1000): + assert lib.tree_contains(t, i) == (i == 23000) lib.tree_free(t) def test_tree_is_cleared(): t = lib.tree_create() assert lib.tree_is_cleared(t) - lib.tree_insert(t, 23, 456) + lib.tree_insert(t, 23000, 456) assert not lib.tree_is_cleared(t) lib.tree_free(t) def test_tree_delete_item(): t = lib.tree_create() - lib.tree_insert(t, 23, 456) - lib.tree_insert(t, 42, 34289) + lib.tree_insert(t, 23000, 456) + lib.tree_insert(t, 42000, 34289) assert not lib.tree_is_cleared(t) - assert lib.tree_contains(t, 23) - res = lib.tree_delete_item(t, 23) + assert lib.tree_contains(t, 23000) + res = lib.tree_delete_item(t, 23000) assert res - assert not lib.tree_contains(t, 23) - res = lib.tree_delete_item(t, 23) + assert not lib.tree_contains(t, 23000) + res = lib.tree_delete_item(t, 23000) assert not res - res = lib.tree_delete_item(t, 21) + res = lib.tree_delete_item(t, 21000) assert not res assert not lib.tree_is_cleared(t) - assert lib.tree_contains(t, 42) - res = lib.tree_delete_item(t, 42) + assert lib.tree_contains(t, 42000) + res = lib.tree_delete_item(t, 42000) assert res assert not lib.tree_is_cleared(t) # not cleared, but still empty for i in range(100): @@ -101,18 +101,18 @@ def test_tree_walk(): t = lib.tree_create() - lib.tree_insert(t, 23, 456) - lib.tree_insert(t, 42, 34289) + lib.tree_insert(t, 23000, 456) + lib.tree_insert(t, 42000, 34289) a = ffi.new("uintptr_t[10]") res = lib.test_tree_walk(t, a) assert res == 2 - assert a[0] == 23 - assert a[1] == 42 + assert ((a[0] == 23000 and a[1] == 42000) or + (a[0] == 42000 and a[1] == 23000)) lib.tree_free(t) def test_tree_walk_big(): t = lib.tree_create() - values = [random.randrange(0, 1000000) for i in range(300)] + values = random.sample(xrange(0, 1000000, 8), 300) for x in values: lib.tree_insert(t, x, x) a = ffi.new("uintptr_t[1000]") From noreply at buildbot.pypy.org Wed Feb 26 14:18:27 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Wed, 26 Feb 2014 14:18:27 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: start adapting duhton Message-ID: <20140226131827.AC0DF1C15A7@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7-refactor Changeset: r868:a39bcb6b49c1 Date: 2014-02-26 14:18 +0100 http://bitbucket.org/pypy/stmgc/changeset/a39bcb6b49c1/ Log: start adapting duhton diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -66,16 +66,17 @@ void _stm_start_transaction(stm_thread_local_t *, stm_jmpbuf_t *); void _stm_collectable_safe_point(void); +/* for tests, but also used in duhton: */ +object_t *_stm_allocate_old(ssize_t size_rounded_up); +char *_stm_real_address(object_t *o); #ifdef STM_TESTS bool _stm_was_read(object_t *obj); bool _stm_was_written(object_t *obj); uint8_t _stm_creation_marker(object_t *obj); bool _stm_in_nursery(object_t *obj); bool _stm_in_transaction(stm_thread_local_t *tl); -char *_stm_real_address(object_t *o); char *_stm_get_segment_base(long index); void _stm_test_switch(stm_thread_local_t *tl); -object_t *_stm_allocate_old(ssize_t size_rounded_up); void _stm_largemalloc_init_arena(char *data_start, size_t data_size); int _stm_largemalloc_resize_arena(size_t new_size); char *_stm_largemalloc_data_start(void); diff --git a/duhton/Makefile b/duhton/Makefile --- a/duhton/Makefile +++ b/duhton/Makefile @@ -14,7 +14,7 @@ duhton_debug: *.c *.h $(C7SOURCES) $(C7HEADERS) - clang -pthread -g -DDu_DEBUG -o duhton_debug *.c ../c7/stmgc.c -Wall + clang -DSTM_DEBUGPRINT -pthread -g -DDu_DEBUG -o duhton_debug *.c ../c7/stmgc.c -Wall clean: - rm -f duhton duhton_debug + rm -f duhton duhton_debug duhton_release diff --git a/duhton/duhton.c b/duhton/duhton.c --- a/duhton/duhton.c +++ b/duhton/duhton.c @@ -41,24 +41,24 @@ printf("))) "); fflush(stdout); } - stm_start_inevitable_transaction(); + stm_start_inevitable_transaction(&stm_thread_local); DuObject *code = Du_Compile(filename, interactive); if (code == NULL) { printf("\n"); break; } - + DuObject *res = Du_Eval(code, Du_Globals); if (interactive) { Du_Print(res, 1); } _du_save1(stm_thread_local_obj); - _stm_minor_collect(); /* hack... */ + stm_collect(0); /* hack... */ _du_restore1(stm_thread_local_obj); - - stm_stop_transaction(); + + stm_commit_transaction(); Du_TransactionRun(); if (!interactive) diff --git a/duhton/duhton.h b/duhton/duhton.h --- a/duhton/duhton.h +++ b/duhton/duhton.h @@ -11,6 +11,7 @@ the program */ #define DEFAULT_NUM_THREADS 2 +extern __thread stm_thread_local_t stm_thread_local; struct DuObject_s { struct object_s header; @@ -191,23 +192,26 @@ #endif -#ifdef NDEBUG -# define _push_root(ob) stm_push_root((object_t *)ob) -# define _pop_root() stm_pop_root() -#else +#ifndef NDEBUG # define _check_not_free(ob) \ assert(_DuObject_TypeNum(ob) > DUTYPE_INVALID && \ _DuObject_TypeNum(ob) < _DUTYPE_TOTAL) +#endif + static inline void _push_root(DuObject *ob) { + #ifndef NDEBUG if (ob) _check_not_free(ob); - stm_push_root((object_t *)ob); + #endif + STM_PUSH_ROOT(stm_thread_local, ob); } static inline object_t *_pop_root(void) { - object_t *ob = stm_pop_root(); + object_t *ob; + STM_POP_ROOT(stm_thread_local, ob); + #ifndef NDEBUG if (ob) _check_not_free(ob); + #endif return ob; } -#endif extern pthread_t *all_threads; extern int all_threads_count; diff --git a/duhton/frame.c b/duhton/frame.c --- a/duhton/frame.c +++ b/duhton/frame.c @@ -48,15 +48,17 @@ void init_prebuilt_frame_objects(void) { du_empty_framenode = (DuFrameNodeObject *) - stm_allocate_prebuilt(sizeof(DuFrameNodeObject)); + _stm_allocate_old(sizeof(DuFrameNodeObject)); du_empty_framenode->ob_base.type_id = DUTYPE_FRAMENODE; du_empty_framenode->ob_count = 0; DuFrameObject *g = (DuFrameObject *) - stm_allocate_prebuilt(sizeof(DuFrameObject)); + _stm_allocate_old(sizeof(DuFrameObject)); g->ob_base.type_id = DUTYPE_FRAME; g->ob_nodes = du_empty_framenode; Du_Globals = (DuObject *)g; + + _du_save2(du_empty_framenode, Du_Globals); } DuObject *DuFrame_New() @@ -203,7 +205,7 @@ _du_save1(frame); dictentry_t *e = find_entry((DuFrameObject *)frame, sym, 1); _du_restore1(frame); - + _du_write1(frame); /* e is part of frame or a new object */ e->builtin_macro = func; } diff --git a/duhton/glob.c b/duhton/glob.c --- a/duhton/glob.c +++ b/duhton/glob.c @@ -147,12 +147,12 @@ /* _du_read1(cons); IMMUTABLE */ DuObject *expr = _DuCons_CAR(cons); DuObject *next = _DuCons_NEXT(cons); - + _du_save2(next, locals); DuObject *obj = Du_Eval(expr, locals); result = DuInt_AsInt(obj); _du_restore2(next, locals); - + cons = next; while (cons != Du_None) { @@ -167,7 +167,7 @@ cons = next; } - + return DuInt_FromInt(result); } @@ -177,12 +177,12 @@ /* _du_read1(cons); IMMUTABLE */ DuObject *expr = _DuCons_CAR(cons); DuObject *next = _DuCons_NEXT(cons); - + _du_save2(next, locals); DuObject *obj = Du_Eval(expr, locals); result = DuInt_AsInt(obj); _du_restore2(next, locals); - + cons = next; while (cons != Du_None) { @@ -197,7 +197,7 @@ cons = next; } - + return DuInt_FromInt(result); } @@ -207,12 +207,12 @@ /* _du_read1(cons); IMMUTABLE */ DuObject *expr = _DuCons_CAR(cons); DuObject *next = _DuCons_NEXT(cons); - + _du_save2(next, locals); DuObject *obj = Du_Eval(expr, locals); result = DuInt_AsInt(obj); _du_restore2(next, locals); - + cons = next; while (cons != Du_None) { @@ -227,7 +227,7 @@ cons = next; } - + return DuInt_FromInt(result); } @@ -686,14 +686,14 @@ Du_FatalError("run-transactions: expected no argument"); _du_save1(stm_thread_local_obj); - _stm_minor_collect(); /* hack... */ + stm_collect(0); /* hack... */ _du_restore1(stm_thread_local_obj); - - stm_stop_transaction(); - + + stm_commit_transaction(); + Du_TransactionRun(); - - stm_start_inevitable_transaction(); + + stm_start_inevitable_transaction(&stm_thread_local); return Du_None; } @@ -718,7 +718,7 @@ long mtime; gettimeofday(¤t, NULL); - + mtime = ((current.tv_sec) * 1000 + current.tv_usec/1000.0) + 0.5; return DuInt_FromInt(mtime & 0x7fffffff); /* make it always positive 32bit */ } @@ -771,15 +771,17 @@ void Du_Initialize(int num_threads) { stm_setup(); - stm_setup_pthread(); + stm_register_thread_local(&stm_thread_local); - stm_start_inevitable_transaction(); + stm_start_inevitable_transaction(&stm_thread_local); + /* allocate old and push on shadowstack: */ init_prebuilt_object_objects(); init_prebuilt_symbol_objects(); init_prebuilt_list_objects(); init_prebuilt_frame_objects(); init_prebuilt_transaction_objects(); + /* prebuilt objs stay on the shadowstack forever */ all_threads_count = num_threads; all_threads = (pthread_t*)malloc(sizeof(pthread_t) * num_threads); @@ -827,11 +829,11 @@ DuFrame_SetBuiltinMacro(Du_Globals, "pair?", du_pair); DuFrame_SetBuiltinMacro(Du_Globals, "assert", du_assert); DuFrame_SetSymbolStr(Du_Globals, "None", Du_None); - stm_stop_transaction(); + stm_commit_transaction(); } void Du_Finalize(void) { - stm_teardown_pthread(); + stm_unregister_thread_local(&stm_thread_local); stm_teardown(); } diff --git a/duhton/listobject.c b/duhton/listobject.c --- a/duhton/listobject.c +++ b/duhton/listobject.c @@ -99,7 +99,7 @@ DuTupleObject *newitems = DuTuple_New(overallocated_size(newcount)); newitems->ob_count = newcount; _du_restore3(ob, x, olditems); - + _du_write1(ob); for (i=0; iob_items[newcount-1] = x; ob->ob_tuple = newitems; - } + } } void DuList_Append(DuObject *ob, DuObject *item) @@ -206,10 +206,11 @@ void init_prebuilt_list_objects(void) { du_empty_tuple = (DuTupleObject *) - stm_allocate_prebuilt(sizeof(DuTupleObject)); + _stm_allocate_old(sizeof(DuTupleObject)); du_empty_tuple->ob_base.type_id = DUTYPE_TUPLE; du_empty_tuple->ob_count = 0; du_empty_tuple->ob_capacity = 0; + _du_save1(du_empty_tuple); } DuObject *DuList_New() diff --git a/duhton/object.c b/duhton/object.c --- a/duhton/object.c +++ b/duhton/object.c @@ -17,7 +17,7 @@ /* callback: get the size of an object */ -size_t stmcb_size(struct object_s *obj) +ssize_t stmcb_size_rounded_up(struct object_s *obj) { DuType *tp = Du_Types[((struct DuObject_s *)obj)->type_id]; size_t result = tp->dt_size; @@ -69,8 +69,9 @@ void init_prebuilt_object_objects(void) { - Du_None = (DuObject *)stm_allocate_prebuilt(sizeof(DuObject)); + Du_None = (DuObject *)_stm_allocate_old(sizeof(DuObject)); Du_None->type_id = DUTYPE_NONE; + _du_save1(Du_None); } void Du_FatalError(char *msg, ...) diff --git a/duhton/symbol.c b/duhton/symbol.c --- a/duhton/symbol.c +++ b/duhton/symbol.c @@ -56,11 +56,12 @@ void init_prebuilt_symbol_objects(void) { _Du_AllSymbols = (DuSymbolObject *) - stm_allocate_prebuilt(sizeof(DuSymbolObject)); + _stm_allocate_old(sizeof(DuSymbolObject)); _Du_AllSymbols->ob_base.type_id = DUTYPE_SYMBOL; _Du_AllSymbols->myid = 0; _Du_AllSymbols->name = ""; _Du_AllSymbols->next = NULL; + _du_save1(_Du_AllSymbols); } DuObject *DuSymbol_FromString(const char *name) diff --git a/duhton/transaction.c b/duhton/transaction.c --- a/duhton/transaction.c +++ b/duhton/transaction.c @@ -2,7 +2,7 @@ #include #include - +__thread stm_thread_local_t stm_thread_local; static DuConsObject *du_pending_transactions; void init_prebuilt_transaction_objects(void) @@ -10,10 +10,12 @@ assert(Du_None); /* already created */ du_pending_transactions = (DuConsObject *) - stm_allocate_prebuilt(sizeof(DuConsObject)); + _stm_allocate_old(sizeof(DuConsObject)); du_pending_transactions->ob_base.type_id = DUTYPE_CONS; du_pending_transactions->car = NULL; du_pending_transactions->cdr = Du_None; + + _du_save1(du_pending_transactions); }; static pthread_mutex_t mutex_sleep = PTHREAD_MUTEX_INITIALIZER; @@ -61,14 +63,14 @@ if (stm_thread_local_obj == NULL) return; - stm_start_inevitable_transaction(); - + stm_start_inevitable_transaction(&stm_thread_local); + DuConsObject *root = du_pending_transactions; _du_write1(root); root->cdr = stm_thread_local_obj; - - stm_stop_transaction(); - + + stm_commit_transaction(); + stm_thread_local_obj = NULL; run_all_threads(); @@ -79,18 +81,15 @@ static DuObject *next_cell(void) { DuObject *pending = stm_thread_local_obj; - jmpbufptr_t here; if (pending == NULL) { /* fish from the global list of pending transactions */ DuConsObject *root; - while (__builtin_setjmp(here) == 1) { } restart: - // stm_start_transaction(&here); /* this code is critical enough so that we want it to be serialized perfectly using inevitable transactions */ - stm_start_inevitable_transaction(); + stm_start_inevitable_transaction(&stm_thread_local); root = du_pending_transactions; _du_read1(root); /* not immutable... */ @@ -103,12 +102,12 @@ DuObject *result = _DuCons_CAR(cell); root->cdr = _DuCons_NEXT(cell); - stm_stop_transaction(); + stm_commit_transaction(); return result; } else { - stm_stop_transaction(); + stm_commit_transaction(); /* nothing to do, wait */ int ts = __sync_add_and_fetch(&thread_sleeping, 1); @@ -134,10 +133,8 @@ /* we have at least one thread-local transaction pending */ stm_thread_local_obj = NULL; - while (__builtin_setjmp(here) == 1) { } - //stm_start_transaction(&here); - stm_start_inevitable_transaction(); - + stm_start_inevitable_transaction(&stm_thread_local); + /* _du_read1(pending); IMMUTABLE */ DuObject *result = _DuCons_CAR(pending); DuObject *next = _DuCons_NEXT(pending); @@ -161,7 +158,7 @@ root->cdr = next; } - stm_stop_transaction(); + stm_commit_transaction(); return result; } @@ -175,8 +172,8 @@ void *run_thread(void *thread_id) { - jmpbufptr_t here; - stm_setup_pthread(); + stm_jmpbuf_t here; + stm_register_thread_local(&stm_thread_local); stm_thread_local_obj = NULL; @@ -186,20 +183,19 @@ break; assert(stm_thread_local_obj == NULL); - while (__builtin_setjmp(here) == 1) { } - stm_start_transaction(&here); - + STM_START_TRANSACTION(&stm_thread_local, here); + run_transaction(cell); - + _du_save1(stm_thread_local_obj); - _stm_minor_collect(); /* hack.. */ + stm_collect(0); /* hack.. */ _du_restore1(stm_thread_local_obj); - - stm_stop_transaction(); + + stm_commit_transaction(); } - stm_teardown_pthread(); + stm_unregister_thread_local(&stm_thread_local); return NULL; } From noreply at buildbot.pypy.org Wed Feb 26 14:18:28 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Wed, 26 Feb 2014 14:18:28 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: Merge Message-ID: <20140226131828.A28201C15A7@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7-refactor Changeset: r869:4964c4924c65 Date: 2014-02-26 14:18 +0100 http://bitbucket.org/pypy/stmgc/changeset/4964c4924c65/ Log: Merge diff --git a/c7/stm/list.c b/c7/stm/list.c --- a/c7/stm/list.c +++ b/c7/stm/list.c @@ -54,23 +54,23 @@ static void tree_free(struct tree_s *tree) { free(tree->raw_start); + assert(memset(tree, 0xDD, sizeof(struct tree_s))); free(tree); } static void _tree_compress(struct tree_s *tree) { - wlog_t *item; - struct tree_s tree_copy; - memset(&tree_copy, 0, sizeof(struct tree_s)); + wlog_t *item; + struct tree_s tree_copy; + memset(&tree_copy, 0, sizeof(struct tree_s)); - TREE_LOOP_FORWARD(*tree, item) - { - tree_insert(&tree_copy, item->addr, item->val); + TREE_LOOP_FORWARD(*tree, item) { + tree_insert(&tree_copy, item->addr, item->val); } TREE_LOOP_END; - free(tree->raw_start); - *tree = tree_copy; + free(tree->raw_start); + *tree = tree_copy; } static wlog_t *_tree_find(char *entry, uintptr_t addr) @@ -122,6 +122,7 @@ static void tree_insert(struct tree_s *tree, uintptr_t addr, uintptr_t val) { assert(addr != 0); /* the NULL key is reserved */ + assert(!(addr & (sizeof(void *) - 1))); /* the key must be aligned */ retry:; wlog_t *wlog; uintptr_t key = addr; @@ -129,6 +130,7 @@ char *p = (char *)(tree->toplevel.items); char *entry; while (1) { + assert(shift < TREE_DEPTH_MAX * TREE_BITS); p += (key >> shift) & TREE_MASK; shift += TREE_BITS; entry = *(char **)p; diff --git a/c7/stm/list.h b/c7/stm/list.h --- a/c7/stm/list.h +++ b/c7/stm/list.h @@ -138,6 +138,7 @@ continue; \ if (((long)_entry) & 1) \ { /* points to a further level: enter it */ \ + assert(_stackp - _stack < TREE_DEPTH_MAX); \ _stackp->next = _next; \ _stackp->end = _end; \ _stackp++; \ diff --git a/c7/test/test_list.py b/c7/test/test_list.py --- a/c7/test/test_list.py +++ b/c7/test/test_list.py @@ -65,34 +65,34 @@ def test_tree_add(): t = lib.tree_create() - lib.tree_insert(t, 23, 456) - for i in range(100): - assert lib.tree_contains(t, i) == (i == 23) + lib.tree_insert(t, 23000, 456) + for i in range(0, 100000, 1000): + assert lib.tree_contains(t, i) == (i == 23000) lib.tree_free(t) def test_tree_is_cleared(): t = lib.tree_create() assert lib.tree_is_cleared(t) - lib.tree_insert(t, 23, 456) + lib.tree_insert(t, 23000, 456) assert not lib.tree_is_cleared(t) lib.tree_free(t) def test_tree_delete_item(): t = lib.tree_create() - lib.tree_insert(t, 23, 456) - lib.tree_insert(t, 42, 34289) + lib.tree_insert(t, 23000, 456) + lib.tree_insert(t, 42000, 34289) assert not lib.tree_is_cleared(t) - assert lib.tree_contains(t, 23) - res = lib.tree_delete_item(t, 23) + assert lib.tree_contains(t, 23000) + res = lib.tree_delete_item(t, 23000) assert res - assert not lib.tree_contains(t, 23) - res = lib.tree_delete_item(t, 23) + assert not lib.tree_contains(t, 23000) + res = lib.tree_delete_item(t, 23000) assert not res - res = lib.tree_delete_item(t, 21) + res = lib.tree_delete_item(t, 21000) assert not res assert not lib.tree_is_cleared(t) - assert lib.tree_contains(t, 42) - res = lib.tree_delete_item(t, 42) + assert lib.tree_contains(t, 42000) + res = lib.tree_delete_item(t, 42000) assert res assert not lib.tree_is_cleared(t) # not cleared, but still empty for i in range(100): @@ -101,18 +101,18 @@ def test_tree_walk(): t = lib.tree_create() - lib.tree_insert(t, 23, 456) - lib.tree_insert(t, 42, 34289) + lib.tree_insert(t, 23000, 456) + lib.tree_insert(t, 42000, 34289) a = ffi.new("uintptr_t[10]") res = lib.test_tree_walk(t, a) assert res == 2 - assert a[0] == 23 - assert a[1] == 42 + assert ((a[0] == 23000 and a[1] == 42000) or + (a[0] == 42000 and a[1] == 23000)) lib.tree_free(t) def test_tree_walk_big(): t = lib.tree_create() - values = [random.randrange(0, 1000000) for i in range(300)] + values = random.sample(xrange(0, 1000000, 8), 300) for x in values: lib.tree_insert(t, x, x) a = ffi.new("uintptr_t[1000]") From noreply at buildbot.pypy.org Wed Feb 26 14:24:28 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 26 Feb 2014 14:24:28 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: Move stuff around: this removes one check on each object. Message-ID: <20140226132428.107081C15A7@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r870:cdd10e0a9914 Date: 2014-02-26 14:24 +0100 http://bitbucket.org/pypy/stmgc/changeset/cdd10e0a9914/ Log: Move stuff around: this removes one check on each object. diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -57,19 +57,9 @@ /************************************************************/ #define GCWORD_MOVED ((object_t *) -42) -#define FLAG_SYNC_LARGE_NOW 0x01 +#define FLAG_SYNC_LARGE 0x01 -static uintptr_t minor_record_large_overflow_object(object_t *nobj) -{ - uintptr_t nobj_sync_now = (uintptr_t)nobj; - if (STM_PSEGMENT->minor_collect_will_commit_now) - nobj_sync_now |= FLAG_SYNC_LARGE_NOW; - else - LIST_APPEND(STM_PSEGMENT->large_overflow_objects, nobj); - return nobj_sync_now; -} - static void minor_trace_if_young(object_t **pobj) { /* takes a normal pointer to a thread-local pointer to an object */ @@ -110,7 +100,7 @@ char *realnobj = REAL_ADDRESS(STM_SEGMENT->segment_base, nobj); memcpy(realnobj, realobj, size); - nobj_sync_now = minor_record_large_overflow_object(nobj); + nobj_sync_now = ((uintptr_t)nobj) | FLAG_SYNC_LARGE; } else { /* case "small enough" */ @@ -133,7 +123,7 @@ /* a young object outside the nursery */ nobj = obj; tree_delete_item(STM_PSEGMENT->young_outside_nursery, (uintptr_t)nobj); - nobj_sync_now = minor_record_large_overflow_object(nobj); + nobj_sync_now = ((uintptr_t)nobj) | FLAG_SYNC_LARGE; } /* Set the overflow_number if nedeed */ @@ -178,15 +168,20 @@ while (!list_is_empty(lst)) { uintptr_t obj_sync_now = list_pop_item(lst); - object_t *obj = (object_t *)(obj_sync_now & ~FLAG_SYNC_LARGE_NOW); + object_t *obj = (object_t *)(obj_sync_now & ~FLAG_SYNC_LARGE); _collect_now(obj); - if (obj_sync_now & FLAG_SYNC_LARGE_NOW) { - /* synchronize the object to other segments *now* -- which - means, just after we added the WRITE_BARRIER flag and - traced into it, because tracing might change it again. */ - synchronize_overflow_object_now(obj); + if (obj_sync_now & FLAG_SYNC_LARGE) { + /* this was a large object. We must either synchronize the + object to other segments now (after we added the + WRITE_BARRIER flag and traced into it to fix its + content); or add the object to 'large_overflow_objects'. + */ + if (STM_PSEGMENT->minor_collect_will_commit_now) + synchronize_overflow_object_now(obj); + else + LIST_APPEND(STM_PSEGMENT->large_overflow_objects, obj); } } } From noreply at buildbot.pypy.org Wed Feb 26 15:08:29 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 26 Feb 2014 15:08:29 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: Workaround for another llvm bug Message-ID: <20140226140829.5402E1D23BE@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r871:35baad3a75f6 Date: 2014-02-26 15:08 +0100 http://bitbucket.org/pypy/stmgc/changeset/35baad3a75f6/ Log: Workaround for another llvm bug diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -213,11 +213,15 @@ Use the macro STM_START_TRANSACTION() to start a transaction that can be restarted using the 'jmpbuf' (a local variable of type stm_jmpbuf_t). */ -#define STM_START_TRANSACTION(tl, jmpbuf) ({ \ - int _restart = __builtin_setjmp(jmpbuf); \ - _stm_start_transaction(tl, &jmpbuf); \ - _restart; \ +#define STM_START_TRANSACTION(tl, jmpbuf) ({ \ + int _restart = __builtin_setjmp(jmpbuf) ? _stm_duck() : 0; \ + _stm_start_transaction(tl, &jmpbuf); \ + _restart; \ }) +static inline int _stm_duck(void) { + asm("/* workaround for a llvm bug */"); + return 1; +} /* Start an inevitable transaction, if it's going to return from the current function immediately. */ From noreply at buildbot.pypy.org Wed Feb 26 15:11:39 2014 From: noreply at buildbot.pypy.org (Remi Meier) Date: Wed, 26 Feb 2014 15:11:39 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: lists can move Message-ID: <20140226141139.B93A01C03FC@cobra.cs.uni-duesseldorf.de> Author: Remi Meier Branch: c7-refactor Changeset: r872:335565ce1aae Date: 2014-02-26 15:11 +0100 http://bitbucket.org/pypy/stmgc/changeset/335565ce1aae/ Log: lists can move diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -183,6 +183,9 @@ else LIST_APPEND(STM_PSEGMENT->large_overflow_objects, obj); } + + /* the list could have moved while appending */ + lst = STM_PSEGMENT->objects_pointing_to_nursery; } } From noreply at buildbot.pypy.org Wed Feb 26 15:18:33 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 26 Feb 2014 15:18:33 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: Remove this gross hack, and reintroduce the loop: if longjmp() Message-ID: <20140226141833.B6DDD1C03FC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r873:c43f36e38277 Date: 2014-02-26 15:18 +0100 http://bitbucket.org/pypy/stmgc/changeset/c43f36e38277/ Log: Remove this gross hack, and reintroduce the loop: if longjmp() is called, make sure we redo the setjmp() diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -214,14 +214,9 @@ can be restarted using the 'jmpbuf' (a local variable of type stm_jmpbuf_t). */ #define STM_START_TRANSACTION(tl, jmpbuf) ({ \ - int _restart = __builtin_setjmp(jmpbuf) ? _stm_duck() : 0; \ + while (__builtin_setjmp(jmpbuf) == 1) { /*redo setjmp*/ } \ _stm_start_transaction(tl, &jmpbuf); \ - _restart; \ }) -static inline int _stm_duck(void) { - asm("/* workaround for a llvm bug */"); - return 1; -} /* Start an inevitable transaction, if it's going to return from the current function immediately. */ From noreply at buildbot.pypy.org Wed Feb 26 15:30:10 2014 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 26 Feb 2014 15:30:10 +0100 (CET) Subject: [pypy-commit] pypy default: improve the embedding interface a little Message-ID: <20140226143010.F19651C15A7@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r69463:849a6171a311 Date: 2014-02-26 16:29 +0200 http://bitbucket.org/pypy/pypy/changeset/849a6171a311/ Log: improve the embedding interface a little diff --git a/include/PyPy.h b/include/PyPy.h --- a/include/PyPy.h +++ b/include/PyPy.h @@ -8,9 +8,14 @@ extern "C" { #endif +/* You should call this first once. */ +#define pypy_init(need_threads) do { pypy_asm_stack_bottom(); \ +rpython_startup_code();\ + if (need_threads) pypy_init_threads(); } while (0) -/* You should call this first once. */ +// deprecated interface void rpython_startup_code(void); +void pypy_init_threads(void); /* Initialize the home directory of PyPy. It is necessary to call this. @@ -26,11 +31,10 @@ /* If your program has multiple threads, then you need to call - pypy_init_threads() once at init time, and then pypy_thread_attach() - once in each other thread that just started and in which you want to - run Python code (including via callbacks, see below). + pypy_thread_attach() once in each other thread that just started + and in which you want to run Python code (including via callbacks, + see below). DO NOT CALL IT IN THE MAIN THREAD */ -void pypy_init_threads(void); void pypy_thread_attach(void); diff --git a/pypy/doc/embedding.rst b/pypy/doc/embedding.rst --- a/pypy/doc/embedding.rst +++ b/pypy/doc/embedding.rst @@ -23,7 +23,6 @@ .. function:: void pypy_init_threads(void); Initialize threads. Only need to be called if there are any threads involved - XXXX double check .. function:: long pypy_setup_home(char* home, int verbose); @@ -43,7 +42,7 @@ Execute the source code given in the ``source`` argument. Will print the error message to stderr upon failure and return 1, otherwise returns 0. You should really do your own error handling in the source. It'll acquire - + the GIL. .. function:: void pypy_thread_attach(void); @@ -55,12 +54,51 @@ Simple example -------------- +Note that this API is a lot more minimal than say CPython C API, so at first +it's obvious to think that you can't do much. However, the trick is to do +all the logic in Python and expose it via `cffi`_ callbacks. Let's assume +we're on linux and pypy is put in ``/opt/pypy`` (a source checkout) and +library is in ``/opt/pypy/libpypy-c.so``. We write a little C program +(for simplicity assuming that all operations will be performed:: + + #include "include/PyPy.h" + #include + + const char source[] = "print 'hello from pypy'"; + + int main() + { + int res; + + rpython_startup_code(); + res = pypy_execute_source((char*)source); + if (res) { + printf("Error calling pypy_execute_source!\n"); + } + return res; + } + +If we save it as ``x.c`` now, compile it and run it with:: + + fijal at hermann:/opt/pypy$ gcc -o x x.c -lpypy-c -L. + fijal at hermann:~/src/pypy$ LD_LIBRARY_PATH=. ./x + hello from pypy + +Worked! + +More advanced example +--------------------- + +Typically we need something more to do than simply execute source. The following +is a fully fledged example, please consult cffi documentation for details. + +xxx Threading --------- XXXX I don't understand what's going on, discuss with unbit -.. _`cffi`: xxx -.. _`uwsgi`: xxx -.. _`PyPy uwsgi plugin`: xxx +.. _`cffi`: http://cffi.readthedocs.org/ +.. _`uwsgi`: http://uwsgi-docs.readthedocs.org/en/latest/ +.. _`PyPy uwsgi plugin`: http://uwsgi-docs.readthedocs.org/en/latest/PyPy.html diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -82,6 +82,7 @@ from rpython.rlib.entrypoint import entrypoint from rpython.rtyper.lltypesystem import rffi, lltype + from rpython.rtyper.lltypesystem.lloperation import llop w_pathsetter = space.appexec([], """(): def f(path): @@ -93,6 +94,7 @@ @entrypoint('main', [rffi.CCHARP, rffi.INT], c_name='pypy_setup_home') def pypy_setup_home(ll_home, verbose): from pypy.module.sys.initpath import pypy_find_stdlib + llop.gc_stack_bottom(lltype.Void) verbose = rffi.cast(lltype.Signed, verbose) if ll_home: home = rffi.charp2str(ll_home) @@ -121,6 +123,7 @@ @entrypoint('main', [rffi.CCHARP], c_name='pypy_execute_source') def pypy_execute_source(ll_source): rffi.aroundstate.after() + llop.gc_stack_bottom(lltype.Void) source = rffi.charp2str(ll_source) res = _pypy_execute_source(source) rffi.aroundstate.before() From noreply at buildbot.pypy.org Wed Feb 26 15:31:00 2014 From: noreply at buildbot.pypy.org (fijal) Date: Wed, 26 Feb 2014 15:31:00 +0100 (CET) Subject: [pypy-commit] pypy default: update this example Message-ID: <20140226143100.8A6D01C15A7@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r69464:62604e2af5d2 Date: 2014-02-26 16:30 +0200 http://bitbucket.org/pypy/pypy/changeset/62604e2af5d2/ Log: update this example diff --git a/pypy/doc/embedding.rst b/pypy/doc/embedding.rst --- a/pypy/doc/embedding.rst +++ b/pypy/doc/embedding.rst @@ -14,15 +14,12 @@ to make a full API working, provided you'll follow a few principles. The API is: -.. function:: void rpython_startup_code(void); +.. function:: void pypy_init(int need_threads); This is a function that you have to call (once) before calling anything. It initializes the RPython/PyPy GC and does a bunch of necessary startup - code. This function cannot fail. - -.. function:: void pypy_init_threads(void); - - Initialize threads. Only need to be called if there are any threads involved + code. This function cannot fail. Pass 1 in case you need thread support, 0 + otherwise. .. function:: long pypy_setup_home(char* home, int verbose); @@ -49,7 +46,7 @@ In case your application uses threads that are initialized outside of PyPy, you need to call this function to tell the PyPy GC to track this thread. Note that this function is not thread-safe itself, so you need to guard it - with a mutex. + with a mutex. Do not call it from the main thread. Simple example -------------- From noreply at buildbot.pypy.org Wed Feb 26 16:18:07 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 26 Feb 2014 16:18:07 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: Move this logic in common.py, otherwise when running only test_list.py Message-ID: <20140226151807.820EB1C02FC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r874:00e254f45cf1 Date: 2014-02-26 16:17 +0100 http://bitbucket.org/pypy/stmgc/changeset/00e254f45cf1/ Log: Move this logic in common.py, otherwise when running only test_list.py the C files are not rebuilt diff --git a/c7/test/common.py b/c7/test/common.py --- a/c7/test/common.py +++ b/c7/test/common.py @@ -6,3 +6,22 @@ os.environ['CC'] = 'clang' parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + +# ---------- + +source_files = [os.path.join(parent_dir, "stmgc.c")] +all_files = [os.path.join(parent_dir, "stmgc.h"), + os.path.join(parent_dir, "stmgc.c")] + [ + os.path.join(parent_dir, 'stm', _n) + for _n in os.listdir(os.path.join(parent_dir, 'stm')) + if _n.endswith('.h') or _n.endswith('.c')] + +_pycache_ = os.path.join(parent_dir, 'test', '__pycache__') +if os.path.exists(_pycache_): + _fs = [_f for _f in os.listdir(_pycache_) if _f.startswith('_cffi_')] + if _fs: + _fsmtime = min(os.stat(os.path.join(_pycache_, _f)).st_mtime + for _f in _fs) + if any(os.stat(src).st_mtime >= _fsmtime for src in all_files): + import shutil + shutil.rmtree(_pycache_) diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -1,25 +1,6 @@ import os import cffi, weakref -from common import parent_dir - -# ---------- - -source_files = [os.path.join(parent_dir, "stmgc.c")] -all_files = [os.path.join(parent_dir, "stmgc.h"), - os.path.join(parent_dir, "stmgc.c")] + [ - os.path.join(parent_dir, 'stm', _n) - for _n in os.listdir(os.path.join(parent_dir, 'stm')) - if _n.endswith('.h') or _n.endswith('.c')] - -_pycache_ = os.path.join(parent_dir, 'test', '__pycache__') -if os.path.exists(_pycache_): - _fs = [_f for _f in os.listdir(_pycache_) if _f.startswith('_cffi_')] - if _fs: - _fsmtime = min(os.stat(os.path.join(_pycache_, _f)).st_mtime - for _f in _fs) - if any(os.stat(src).st_mtime >= _fsmtime for src in all_files): - import shutil - shutil.rmtree(_pycache_) +from common import parent_dir, source_files # ---------- From noreply at buildbot.pypy.org Wed Feb 26 16:23:54 2014 From: noreply at buildbot.pypy.org (antocuni) Date: Wed, 26 Feb 2014 16:23:54 +0100 (CET) Subject: [pypy-commit] pypy int_w-refactor: fix numpypy for the new int_w behavior Message-ID: <20140226152354.C52151C02FC@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: int_w-refactor Changeset: r69465:0cb2938c3b1b Date: 2014-02-26 16:22 +0100 http://bitbucket.org/pypy/pypy/changeset/0cb2938c3b1b/ Log: fix numpypy for the new int_w behavior diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -156,8 +156,7 @@ assert isinstance(w_obj, FloatObject) return w_obj.floatval - def int_w(self, w_obj): - XXX # fix this + def int_w(self, w_obj, allow_conversion=True): if isinstance(w_obj, IntObject): return w_obj.intval elif isinstance(w_obj, FloatObject): diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -365,8 +365,7 @@ pass class W_IntegerBox(W_NumberBox): - def int_w(self, space): - XXX # fix this + def _int_w(self, space): return space.int_w(self.descr_int(space)) class W_SignedIntegerBox(W_IntegerBox): From noreply at buildbot.pypy.org Wed Feb 26 16:29:11 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 26 Feb 2014 16:29:11 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: Kill this logic and comment; no longer applies. Message-ID: <20140226152911.EF47C1C02FC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r875:6da8d8bef599 Date: 2014-02-26 16:28 +0100 http://bitbucket.org/pypy/stmgc/changeset/6da8d8bef599/ Log: Kill this logic and comment; no longer applies. diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -172,12 +172,7 @@ uint8_t old_rv = STM_SEGMENT->transaction_read_version; STM_SEGMENT->transaction_read_version = old_rv + 1; - if (UNLIKELY(old_rv >= 0xfe)) { - /* reset if transaction_read_version was 0xfe or 0xff. If it's - 0xff, then we need it because the new value would overflow to - 0. But resetting it already from 0xfe is better for short - or medium transactions: at the next minor collection we'll - still have one free number to increase to. */ + if (UNLIKELY(old_rv == 0xff)) { reset_transaction_read_version(); } From noreply at buildbot.pypy.org Wed Feb 26 17:12:16 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 26 Feb 2014 17:12:16 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: (arigo, Remi) missing: preventing a transaction from _starting_ Message-ID: <20140226161216.214071C03FC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r876:d6cbf69ae9ed Date: 2014-02-26 17:12 +0100 http://bitbucket.org/pypy/stmgc/changeset/d6cbf69ae9ed/ Log: (arigo, Remi) missing: preventing a transaction from _starting_ in inevitable mode if there is already an inevitable transaction running diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -151,8 +151,14 @@ { mutex_lock_no_abort(); + retry: + if (jmpbuf == NULL) { + wait_for_end_of_inevitable_transaction(false); + } + + if (!acquire_thread_segment(tl)) + goto retry; /* GS invalid before this point! */ - acquire_thread_segment(tl); assert(STM_PSEGMENT->safe_point == SP_NO_TRANSACTION); assert(STM_PSEGMENT->transaction_state == TS_NONE); @@ -376,6 +382,10 @@ STM_PSEGMENT->overflow_number = highest_overflow_number; } + /* if we were inevitable, signal */ + if (STM_PSEGMENT->transaction_state == TS_INEVITABLE) + cond_signal(C_INEVITABLE_DONE); + /* done */ _finish_transaction(); @@ -479,8 +489,6 @@ void _stm_become_inevitable(char *msg) { - long i; - mutex_lock(); switch (STM_PSEGMENT->transaction_state) { @@ -489,11 +497,7 @@ case TS_REGULAR: /* become inevitable */ - for (i = 0; i < NB_SEGMENTS; i++) { - if (get_priv_segment(i)->transaction_state == TS_INEVITABLE) { - abort_with_mutex(); - } - } + wait_for_end_of_inevitable_transaction(true); STM_PSEGMENT->transaction_state = TS_INEVITABLE; break; diff --git a/c7/stm/sync.c b/c7/stm/sync.c --- a/c7/stm/sync.c +++ b/c7/stm/sync.c @@ -122,14 +122,13 @@ stm_fatalerror("pthread_cond_signal/%d: %m\n", (int)ctype); } -static void acquire_thread_segment(stm_thread_local_t *tl) +static bool acquire_thread_segment(stm_thread_local_t *tl) { /* This function acquires a segment for the currently running thread, and set up the GS register if it changed. */ assert(_has_mutex()); assert(_is_tl_registered(tl)); - retry:; int num = tl->associated_segment_num; if (sync_ctl.in_use[num] == 0) { /* fast-path: we can get the same segment number than the one @@ -157,7 +156,9 @@ segment will do so by acquiring the mutex and calling cond_signal(C_RELEASE_THREAD_SEGMENT). */ cond_wait_no_abort(C_RELEASE_THREAD_SEGMENT); - goto retry; + + /* Return false to the caller, which will call us again */ + return false; got_num: sync_ctl.in_use[num] = 1; @@ -165,6 +166,7 @@ assert(STM_SEGMENT->running_thread == NULL); STM_SEGMENT->running_thread = tl; STM_PSEGMENT->start_time = ++sync_ctl.global_time; + return true; } static void release_thread_segment(stm_thread_local_t *tl) @@ -178,6 +180,28 @@ sync_ctl.in_use[tl->associated_segment_num] = 0; } +static void wait_for_end_of_inevitable_transaction(bool can_abort) +{ + assert(_has_mutex()); + + long i; + restart: + for (i = 0; i < NB_SEGMENTS; i++) { + if (get_priv_segment(i)->transaction_state == TS_INEVITABLE) { + if (can_abort) { + /* XXX should we wait here? or abort? or a mix? + for now, always abort */ + abort_with_mutex(); + //cond_wait(C_INEVITABLE_DONE); + } + else { + cond_wait_no_abort(C_INEVITABLE_DONE); + } + goto restart; + } + } +} + static bool _running_transaction(void) __attribute__((unused)); static bool _running_transaction(void) { diff --git a/c7/stm/sync.h b/c7/stm/sync.h --- a/c7/stm/sync.h +++ b/c7/stm/sync.h @@ -8,6 +8,7 @@ C_RELEASE_THREAD_SEGMENT, C_SAFE_POINT, C_RESUME, + C_INEVITABLE_DONE, _C_TOTAL }; static void mutex_lock(void); @@ -21,8 +22,9 @@ /* acquire and release one of the segments for running the given thread (must have the mutex acquired!) */ -static void acquire_thread_segment(stm_thread_local_t *tl); +static bool acquire_thread_segment(stm_thread_local_t *tl); static void release_thread_segment(stm_thread_local_t *tl); +static void wait_for_end_of_inevitable_transaction(bool can_abort); /* see the source for an exact description */ static void wait_for_other_safe_points(int requested_safe_point_kind); From noreply at buildbot.pypy.org Wed Feb 26 17:24:52 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 26 Feb 2014 17:24:52 +0100 (CET) Subject: [pypy-commit] stmgc c5: close branch Message-ID: <20140226162452.16EFA1C03FC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c5 Changeset: r877:c80a6198d3ca Date: 2014-02-26 17:21 +0100 http://bitbucket.org/pypy/stmgc/changeset/c80a6198d3ca/ Log: close branch From noreply at buildbot.pypy.org Wed Feb 26 17:24:53 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 26 Feb 2014 17:24:53 +0100 (CET) Subject: [pypy-commit] stmgc c6: merge Message-ID: <20140226162453.1CDC01C03FC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c6 Changeset: r878:bb06ec59eece Date: 2014-02-26 17:21 +0100 http://bitbucket.org/pypy/stmgc/changeset/bb06ec59eece/ Log: merge From noreply at buildbot.pypy.org Wed Feb 26 17:24:54 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 26 Feb 2014 17:24:54 +0100 (CET) Subject: [pypy-commit] stmgc c6: close branch Message-ID: <20140226162454.1D0B41C03FC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c6 Changeset: r879:d165713156ae Date: 2014-02-26 17:21 +0100 http://bitbucket.org/pypy/stmgc/changeset/d165713156ae/ Log: close branch From noreply at buildbot.pypy.org Wed Feb 26 17:24:55 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 26 Feb 2014 17:24:55 +0100 (CET) Subject: [pypy-commit] stmgc c7: close branch Message-ID: <20140226162455.4F4D41C03FC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7 Changeset: r880:8fe174ce6f5e Date: 2014-02-26 17:22 +0100 http://bitbucket.org/pypy/stmgc/changeset/8fe174ce6f5e/ Log: close branch From noreply at buildbot.pypy.org Wed Feb 26 17:24:56 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 26 Feb 2014 17:24:56 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: merge Message-ID: <20140226162456.47E751C03FC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r881:7fd8f289709f Date: 2014-02-26 17:22 +0100 http://bitbucket.org/pypy/stmgc/changeset/7fd8f289709f/ Log: merge From noreply at buildbot.pypy.org Wed Feb 26 17:24:57 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 26 Feb 2014 17:24:57 +0100 (CET) Subject: [pypy-commit] stmgc c7-refactor: Close branch, ready to be merged Message-ID: <20140226162457.755CD1C03FC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: c7-refactor Changeset: r882:8d95741d13ef Date: 2014-02-26 17:22 +0100 http://bitbucket.org/pypy/stmgc/changeset/8d95741d13ef/ Log: Close branch, ready to be merged From noreply at buildbot.pypy.org Wed Feb 26 17:24:58 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 26 Feb 2014 17:24:58 +0100 (CET) Subject: [pypy-commit] stmgc default: hg merge c7-refactor Message-ID: <20140226162458.E7D641C03FC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r883:842286dfcf39 Date: 2014-02-26 17:23 +0100 http://bitbucket.org/pypy/stmgc/changeset/842286dfcf39/ Log: hg merge c7-refactor This is "stmgc-c7", a new version of stmgc, with very good performance. diff too long, truncating to 2000 out of 9164 lines diff --git a/.hgignore b/.hgignore --- a/.hgignore +++ b/.hgignore @@ -1,8 +1,9 @@ syntax: glob *.pyc *~ -c4/build-demo* -c4/debug-demo* -c4/release-demo* +*/build-demo* +*/debug-demo* +*/release-demo* *.orig -c4/test/__pycache__ +*/__pycache__ +*.out.* diff --git a/c7/README.txt b/c7/README.txt new file mode 100644 --- /dev/null +++ b/c7/README.txt @@ -0,0 +1,228 @@ +============================================================ +STMGC-C7 +============================================================ + + +An STM system, focusing on low numbers of CPUs. It requires Linux +running 64-bit, and must be compiled with clang. + + +The %gs segment prefix +---------------------- + +This a hack using __attribute__((address_space(256))) on structs, which +makes clang write all pointer dereferences to them using the "%gs:" +prefix at the assembler level. This is a rarely-used way to shift all +memory accesses by some offset stored in the %gs special register. Each +thread has its own value in %gs. Note that %fs is used in a similar way +by the pthread library to offset the thread-local variables; what we +need is similar to thread-local variables, but in large quantity. + +I did not find any measurable slow-down from any example using the %gs +prefix, so I expect the real performance hit to be tiny (along the lines +of the extra stress on instruction caches caused by the extra byte for +each load/store instruction). + + +remap_file_pages +---------------- + +The Linux-only system call remap_file_pages() allows us to tweak a +mmap() region of memory. It makes explicit one extra level of the +memory-mapped management of the CPU. Let us focus on mmaps that are not +backed by a file. A call to mmap() reserves a number of physical pages +4096 bytes each, initialized to zero (and actually lazily allocated when +the process really needs them, rather than all at once). It also +reserves a range of addresses in the current process, of the same size, +which correspond to the physical pages. But by using +remap_file_pages(), we can change the mapping of the addresses to the +physical pages. The total amount of both quantities is identical, and +invariable, but we can take any page-sized range of addresses and ask +that it now maps to a different physical page. Most probably, this +comes with no overhead once the change is done: neither in terms of +performance nor in extra memory in the kernel. The trick here is that +different ranges of addresses can map to the same physical page of +memory, which gives a zero-cost way to share data at different +addresses. + +NOTE: this functionality is only available on Linux. There are +potential ideas for other OSes, like a Windows device driver that would +tweak the OS' page tables. But it would need serious research to know +if it is feasible. + + +Memory organization +------------------- + +We allocate a big mmap that contains enough addresses for N times M +bytes, where N is the number of threads and M is an upper bound on the +total size of the objects. Then we use remap_file_pages() to make these +N regions all map to the same physical memory. In each thread, +%gs is made to point to the start of the corresponding region. This +means that %gs-relative accesses will go to different addresses in +each thread, but these addresses are then (initially) mapped to the +same physical memory, so the effect is as if we used neither %gs nor +remap_file_pages(). + +The exception comes from pages that contain objects that are already +committed, but are being modified by the current transaction. Such +changes must not be visible to other threads before the current +transaction commits. This is done by using another remap_file_pages() +to "unshare" the page, i.e. stop the corresponding %gs-relative, +thread-local page from mapping to the same physical page as others. We +get a fresh new physical page, and duplicate its content --- much like +the OS does after a fork() for pages modified by one or the other +process. + +In more details: the first page of addresses in each thread-local region +(4096 bytes) is made non-accessible, to detect errors of accessing the +NULL pointer. The second page is reserved for thread-local data. The +rest is divided into 1/16 for thread-local read markers, followed by +15/16 for the real objects. We initially use remap_file_pages() on this +15/16 range. + +Each transaction records the objects that it changed. These are +necessarily within unshared pages. When other threads are about to +commit their own transaction, they first copy these objects into their +version of the page. The point is that, from another thread's point of +view, the memory didn't appear to change unexpectedly, but only when +that other thread decides to copy the change explicitly. + +Each transaction uses their own (private) read markers to track which +objects have been read. When a thread "imports" changes done to some +objects, it can quickly check if these objects have also been read by +the current transaction, and if so, we know we have a conflict. + + +STM details +----------- + +Here is how the STM works in terms that are hopefully common in STM +research. The transactions run from a "start time" to a "commit time", +but these are not explicitly represented numerically. The start time +defines the initial state of the objects as seen in this thread. We use +the "extendable timestamps" approach in order to regularly bump the +start time of running transactions (not only when a potential conflict +is detected, but more eagerly). + +Each thread records privately its read objects (using a byte-map) and +publicly its written objects (using an array of pointers as well as a +global flag in the object). Read-write conflicts are detected during +the start time bumps. Write-write conflicts are detected eagerly --- +only one transaction can be concurrently running with a given object +modified. (In the case of write-write conficts, there are several +possible contention management policies; for now we always abort the +transaction that comes later in its attempt to modify the object.) + +Special care is taken for objects allocated in the current transaction. +We expect these objects to be the vast majority of modified objects, and +also most of them to die quickly. More about it below. + +We use what looks like an "undo log" approach, where objects are +modified in-place and aborts cause them to be copied back from somewhere +else. However, it is implemented without any explicit undo log, but by +copying objects between multiple thread-local copies. Memory pages +containing modified objects are duplicated anyway, and so we already +have around several copies of the objects at potentially different +versions. + + +(The rest of this section defines the "leader". It's a complicated way +to make sure we always have an object to copy back in case this +transaction is aborted. At first, what will be implemented in core.c +will simply be waiting if necessary until two threads reach the latest +version; then each thread can use the other's original object.) + + +At most one thread is called the "leader" (this is new terminology as +far as I know). The leader is: + +- a thread that runs a transaction right now (as opposed to being + in some blocking syscall between two transactions, for example); + +- not alone: there are other threads running a transaction concurrently + (when only one thread is running, there is no leader); + +- finally, the start time of this thread's transaction is strictly + higher than the start time of any other running transaction. (If there + are several threads with the same highest start time, we have no + leader.) + +Leadership is a temporary condition: it is acquired (typically) by the +thread whose transaction commits and whose next transaction starts; but +it is lost again as soon as any other thread updates its transaction's +start time to match. + +The point of the notion of leadership is that when the leader wants to +modify an object, it must first make sure that the original version is +also present somewhere else. Only the leader thread, if there is any, +needs to worry about it. We don't need to remember the original version +of an older object, because if we need to abort a transaction, we may as +well update all objects to the latest version. And if there are several +threads with the same highest start time, we can be sure that the +original version of the object is somewhere among them --- this is the +point of detecting write-write conflicts eagerly. Finally, if there is +only one thread running, as soon as it was updated, it cannot abort any +more, so we don't need to record the old version of anything. + +The only remaining case is the one in which there is a leader thread, +this leader thread has the only latest version of an object, and it +tries to further modify this object. To handle this precise case, for +now, we simply wait until another thread updates and we are no longer +the leader. (*) + +(*) the code in core.c contains, or contained, or will again contain, an +explicit undo log that would be filled in this case only. + + +Object creation and GC +---------------------- + +draft: + +- pages need to be unshared when they contain already-committed objects + that are then modified. + +- pages can remain shared if a fraction of (or all) their space was not + used previously, but is used by new allocations; any changes to these + fresh objects during the same transaction do *not* need to unshare the + page. This should ensure that in the common case the majority of pages + are not unshared. + +- minor collection: occurs regularly, and maybe always at the end of + transactions (we'll see). Should work by marking the young objects + that survive. Non-marked objects are then sweeped lazily by the + next allocation requests (as in "mark-and-don't-sweep" GCs, here + for the minor collection only). Needs a write barrier to detect + old-objects-pointing-to-young objects (the old object may be fresh + from the same running transaction as well, or be already committed). + +- the numbers and flags stored in the objects need to be designed with + the above goals in mind. + +- unclear yet: the minor collections may be triggered only when the + memory is full, or whenever a few MBs of memory was allocated. It is + not important for small-to-medium transactions that only allocate a + few MBs anyway, but it might be for long-running transactions. + +- the major collections walk *all* objects. They'll probably require + all threads to be synchronized. Ideally the threads should then proceed + to do a parallel GC, but as a first step, blocking all threads but one + should be fine. + +- the major collections should be triggered by the amount of really-used + memory, which means: counting the unshared pages as N pages. Major + collection should then re-share the pages as much as possible, after + making sure that all threads have their timestamp updated. This is the + essential part that guarantees that large, old, no-longer-modified + bunches of objects are eventually present in only one copy in memory, + in shared pages --- while at the same time bounding the number of + calls to remap_file_pages() for each page at 2 per major collection + cycle. + + +Misc +---- + +Use __builtin_setjmp() and __builtin_longjmp() rather than setjmp() +and longjmp(). diff --git a/c7/demo/Makefile b/c7/demo/Makefile new file mode 100644 --- /dev/null +++ b/c7/demo/Makefile @@ -0,0 +1,32 @@ +# +# Makefile for the demos. +# + +DEBUG_EXE = debug-demo2 +BUILD_EXE = build-demo2 +RELEASE_EXE = release-demo2 + +debug: $(DEBUG_EXE) # with prints and asserts +build: $(BUILD_EXE) # without prints, but with asserts +release: $(RELEASE_EXE) # without prints nor asserts + +clean: + rm -f $(BUILD_EXE) $(DEBUG_EXE) $(RELEASE_EXE) + + +H_FILES = ../stmgc.h ../stm/*.h +C_FILES = ../stmgc.c ../stm/*.c + + +# note that 'build' is optimized but still contains all asserts +debug-%: %.c ${H_FILES} ${C_FILES} + clang -I.. -pthread -DSTM_DEBUGPRINT -g -O0 $< -o debug-$* \ + -Wall -Werror ../stmgc.c + +build-%: %.c ${H_FILES} ${C_FILES} + clang -I.. -pthread -g -O0 $< -o build-$* \ + -Wall -Werror ../stmgc.c + +release-%: %.c ${H_FILES} ${C_FILES} + clang -I.. -pthread -g -DNDEBUG -O2 $< -o release-$* \ + -Wall -Werror ../stmgc.c diff --git a/c7/demo/demo2.c b/c7/demo/demo2.c new file mode 100644 --- /dev/null +++ b/c7/demo/demo2.c @@ -0,0 +1,249 @@ +#include +#include +#include +#include +#include + +#include "stmgc.h" + +#define LIST_LENGTH 2000 +#define BUNCH 100 + +typedef TLPREFIX struct node_s node_t; +typedef node_t* nodeptr_t; +typedef object_t* objptr_t; + +struct node_s { + struct object_s hdr; + long value; + nodeptr_t next; +}; + +__thread stm_thread_local_t stm_thread_local; + + +ssize_t stmcb_size_rounded_up(struct object_s *ob) +{ + return sizeof(struct node_s); +} + +void stmcb_trace(struct object_s *obj, void visit(object_t **)) +{ + struct node_s *n; + n = (struct node_s*)obj; + visit((object_t **)&n->next); +} + + +nodeptr_t global_chained_list; + + +long check_sorted(void) +{ + nodeptr_t r_n; + long prev, sum; + stm_jmpbuf_t here; + + STM_START_TRANSACTION(&stm_thread_local, here); + + stm_read((objptr_t)global_chained_list); + r_n = global_chained_list; + assert(r_n->value == -1); + + prev = -1; + sum = 0; + while (r_n->next) { + r_n = r_n->next; + stm_read((objptr_t)r_n); + sum += r_n->value; + + stm_safe_point(); + if (prev >= r_n->value) { + stm_commit_transaction(); + return -1; + } + + prev = r_n->value; + } + + stm_commit_transaction(); + return sum; +} + +nodeptr_t swap_nodes(nodeptr_t initial) +{ + stm_jmpbuf_t here; + + assert(initial != NULL); + + STM_START_TRANSACTION(&stm_thread_local, here); + + nodeptr_t prev = initial; + stm_read((objptr_t)prev); + + int i; + for (i=0; inext; + if (current == NULL) { + stm_commit_transaction(); + return NULL; + } + stm_read((objptr_t)current); + nodeptr_t next = current->next; + if (next == NULL) { + stm_commit_transaction(); + return NULL; + } + stm_read((objptr_t)next); + + if (next->value < current->value) { + stm_write((objptr_t)prev); + stm_write((objptr_t)current); + stm_write((objptr_t)next); + + prev->next = next; + current->next = next->next; + next->next = current; + + stm_safe_point(); + } + prev = current; + } + + stm_commit_transaction(); + return prev; +} + + + +void bubble_run(void) +{ + nodeptr_t r_current; + + r_current = global_chained_list; + while (r_current) { + r_current = swap_nodes(r_current); + } +} + + +/* initialize list with values in decreasing order */ +void setup_list(void) +{ + int i; + nodeptr_t w_newnode, w_prev; + + stm_start_inevitable_transaction(&stm_thread_local); + + global_chained_list = (nodeptr_t)stm_allocate(sizeof(struct node_s)); + global_chained_list->value = -1; + global_chained_list->next = NULL; + + STM_PUSH_ROOT(stm_thread_local, global_chained_list); + + w_prev = global_chained_list; + for (i = 0; i < LIST_LENGTH; i++) { + STM_PUSH_ROOT(stm_thread_local, w_prev); + w_newnode = (nodeptr_t)stm_allocate(sizeof(struct node_s)); + + STM_POP_ROOT(stm_thread_local, w_prev); + w_newnode->value = LIST_LENGTH - i; + w_newnode->next = NULL; + + stm_write((objptr_t)w_prev); + w_prev->next = w_newnode; + w_prev = w_newnode; + } + + STM_POP_ROOT(stm_thread_local, global_chained_list); /* update value */ + assert(global_chained_list->value == -1); + STM_PUSH_ROOT(stm_thread_local, global_chained_list); + + stm_commit_transaction(); + + stm_start_inevitable_transaction(&stm_thread_local); + STM_POP_ROOT(stm_thread_local, global_chained_list); /* update value */ + assert(global_chained_list->value == -1); + STM_PUSH_ROOT(stm_thread_local, global_chained_list); /* remains forever in the shadow stack */ + stm_commit_transaction(); + + printf("setup ok\n"); +} + + +static sem_t done; + + +void *demo2(void *arg) +{ + int status; + stm_register_thread_local(&stm_thread_local); + + STM_PUSH_ROOT(stm_thread_local, global_chained_list); /* remains forever in the shadow stack */ + + while (check_sorted() == -1) { + bubble_run(); + } + + STM_POP_ROOT(stm_thread_local, global_chained_list); + assert(stm_thread_local.shadowstack == stm_thread_local.shadowstack_base); + + stm_unregister_thread_local(&stm_thread_local); + status = sem_post(&done); assert(status == 0); + return NULL; +} + +void final_check(void) +{ + long sum; + + printf("final check\n"); + + sum = check_sorted(); + + // little Gauss: + if (sum == (1 + LIST_LENGTH) * (LIST_LENGTH / 2)) + printf("check ok\n"); + else + printf("check ERROR\n"); +} + + +void newthread(void*(*func)(void*), void *arg) +{ + pthread_t th; + int status = pthread_create(&th, NULL, func, arg); + if (status != 0) + abort(); + pthread_detach(th); + printf("started new thread\n"); +} + + + +int main(void) +{ + int status; + + status = sem_init(&done, 0, 0); assert(status == 0); + + stm_setup(); + stm_register_thread_local(&stm_thread_local); + + + setup_list(); + + newthread(demo2, (void*)1); + newthread(demo2, (void*)2); + + status = sem_wait(&done); assert(status == 0); + status = sem_wait(&done); assert(status == 0); + + final_check(); + + + stm_unregister_thread_local(&stm_thread_local); + stm_teardown(); + + return 0; +} diff --git a/c7/demo/demo_random.c b/c7/demo/demo_random.c new file mode 100644 --- /dev/null +++ b/c7/demo/demo_random.c @@ -0,0 +1,406 @@ +#include +#include +#include +#include +#include +#include + +#include "stmgc.h" + +#define NUMTHREADS 3 +#define STEPS_PER_THREAD 5000 +#define THREAD_STARTS 100 // how many restarts of threads +#define SHARED_ROOTS 3 +#define MAXROOTS 1000 + + +// SUPPORT +struct node_s; +typedef TLPREFIX struct node_s node_t; +typedef node_t* nodeptr_t; +typedef object_t* objptr_t; + +struct node_s { + struct object_s hdr; + long my_size; + nodeptr_t next; +}; + + +static sem_t done; +__thread stm_thread_local_t stm_thread_local; + +// global and per-thread-data +time_t default_seed; +objptr_t shared_roots[SHARED_ROOTS]; + +struct thread_data { + unsigned int thread_seed; + objptr_t roots[MAXROOTS]; + int num_roots; + int num_roots_at_transaction_start; + int steps_left; +}; +__thread struct thread_data td; + + +ssize_t stmcb_size_rounded_up(struct object_s *ob) +{ + return ((struct node_s*)ob)->my_size; +} + +void stmcb_trace(struct object_s *obj, void visit(object_t **)) +{ + struct node_s *n; + n = (struct node_s*)obj; + + /* and the same value at the end: */ + /* note, ->next may be the same as last_next */ + nodeptr_t *last_next = (nodeptr_t*)((char*)n + n->my_size - sizeof(void*)); + + assert(n->next == *last_next); + + visit((object_t **)&n->next); + visit((object_t **)last_next); + + assert(n->next == *last_next); +} + +void _push_shared_roots() +{ + int i; + for (i = 0; i < SHARED_ROOTS; i++) { + STM_PUSH_ROOT(stm_thread_local, shared_roots[i]); + } +} + +void _pop_shared_roots() +{ + int i; + for (i = 0; i < SHARED_ROOTS; i++) { + STM_POP_ROOT(stm_thread_local, shared_roots[SHARED_ROOTS - i - 1]); + } +} + +int get_rand(int max) +{ + if (max == 0) + return 0; + return (int)(rand_r(&td.thread_seed) % (unsigned int)max); +} + +objptr_t get_random_root() +{ + int num = get_rand(2); + if (num == 0 && td.num_roots > 0) { + num = get_rand(td.num_roots); + return td.roots[num]; + } + else { + num = get_rand(SHARED_ROOTS); + return shared_roots[num]; + } +} + +void reload_roots() +{ + int i; + assert(td.num_roots == td.num_roots_at_transaction_start); + for (i = td.num_roots_at_transaction_start - 1; i >= 0; i--) { + if (td.roots[i]) + STM_POP_ROOT(stm_thread_local, td.roots[i]); + } + + for (i = 0; i < td.num_roots_at_transaction_start; i++) { + if (td.roots[i]) + STM_PUSH_ROOT(stm_thread_local, td.roots[i]); + } +} + +void push_roots() +{ + int i; + for (i = td.num_roots_at_transaction_start; i < td.num_roots; i++) { + if (td.roots[i]) + STM_PUSH_ROOT(stm_thread_local, td.roots[i]); + } +} + +void pop_roots() +{ + int i; + for (i = td.num_roots - 1; i >= td.num_roots_at_transaction_start; i--) { + if (td.roots[i]) + STM_POP_ROOT(stm_thread_local, td.roots[i]); + } +} + +void del_root(int idx) +{ + int i; + assert(idx >= td.num_roots_at_transaction_start); + + for (i = idx; i < td.num_roots - 1; i++) + td.roots[i] = td.roots[i + 1]; + td.num_roots--; +} + +void add_root(objptr_t r) +{ + if (r && td.num_roots < MAXROOTS) { + td.roots[td.num_roots++] = r; + } +} + + +void read_barrier(objptr_t p) +{ + if (p != NULL) { + stm_read(p); + } +} + +void write_barrier(objptr_t p) +{ + if (p != NULL) { + stm_write(p); + } +} + +void set_next(objptr_t p, objptr_t v) +{ + if (p != NULL) { + nodeptr_t n = (nodeptr_t)p; + + /* and the same value at the end: */ + nodeptr_t TLPREFIX *last_next = (nodeptr_t TLPREFIX *)((stm_char*)n + n->my_size - sizeof(void*)); + assert(n->next == *last_next); + n->next = (nodeptr_t)v; + *last_next = (nodeptr_t)v; + } +} + +nodeptr_t get_next(objptr_t p) +{ + nodeptr_t n = (nodeptr_t)p; + + /* and the same value at the end: */ + nodeptr_t TLPREFIX *last_next = (nodeptr_t TLPREFIX *)((stm_char*)n + n->my_size - sizeof(void*)); + assert(n->next == *last_next); + + return n->next; +} + + +objptr_t simple_events(objptr_t p, objptr_t _r) +{ + int k = get_rand(8); + int num; + + switch (k) { + case 0: // remove a root + if (td.num_roots > td.num_roots_at_transaction_start) { + num = td.num_roots_at_transaction_start + + get_rand(td.num_roots - td.num_roots_at_transaction_start); + del_root(num); + } + break; + case 1: // add 'p' to roots + add_root(p); + break; + case 2: // set 'p' to point to a root + if (_r) + p = _r; + break; + case 3: // allocate fresh 'p' + push_roots(); + size_t sizes[4] = {sizeof(struct node_s), + sizeof(struct node_s) + 48, + sizeof(struct node_s) + 4096, + sizeof(struct node_s) + 4096*70}; + size_t size = sizes[get_rand(4)]; + p = stm_allocate(size); + ((nodeptr_t)p)->my_size = size; + pop_roots(); + /* reload_roots not necessary, all are old after start_transaction */ + break; + case 4: // read and validate 'p' + read_barrier(p); + break; + case 5: // only do a stm_write_barrier + write_barrier(p); + break; + case 6: // follow p->next + if (p) { + read_barrier(p); + p = (objptr_t)(get_next(p)); + } + break; + case 7: // set 'p' as *next in one of the roots + write_barrier(_r); + set_next(_r, p); + break; + } + return p; +} + + +objptr_t do_step(objptr_t p) +{ + objptr_t _r; + int k; + + _r = get_random_root(); + k = get_rand(11); + + if (k < 10) + p = simple_events(p, _r); + else if (get_rand(20) == 1) { + return (objptr_t)-1; // break current + } + return p; +} + + + +void setup_thread() +{ + memset(&td, 0, sizeof(struct thread_data)); + + /* stupid check because gdb shows garbage + in td.roots: */ + int i; + for (i = 0; i < MAXROOTS; i++) + assert(td.roots[i] == NULL); + + td.thread_seed = default_seed++; + td.steps_left = STEPS_PER_THREAD; + td.num_roots = 0; + td.num_roots_at_transaction_start = 0; +} + + + +void *demo_random(void *arg) +{ + int status; + stm_register_thread_local(&stm_thread_local); + + /* forever on the shadowstack: */ + _push_shared_roots(); + + setup_thread(); + + objptr_t p = NULL; + stm_jmpbuf_t here; + + STM_START_TRANSACTION(&stm_thread_local, here); + assert(td.num_roots >= td.num_roots_at_transaction_start); + td.num_roots = td.num_roots_at_transaction_start; + p = NULL; + pop_roots(); /* does nothing.. */ + reload_roots(); + + while (td.steps_left-->0) { + if (td.steps_left % 8 == 0) + fprintf(stdout, "#"); + + p = do_step(p); + + if (p == (objptr_t)-1) { + push_roots(); + stm_commit_transaction(); + + td.num_roots_at_transaction_start = td.num_roots; + + STM_START_TRANSACTION(&stm_thread_local, here); + td.num_roots = td.num_roots_at_transaction_start; + p = NULL; + pop_roots(); + reload_roots(); + } + } + stm_commit_transaction(); + + stm_unregister_thread_local(&stm_thread_local); + + status = sem_post(&done); assert(status == 0); + return NULL; +} + +void newthread(void*(*func)(void*), void *arg) +{ + pthread_t th; + int status = pthread_create(&th, NULL, func, arg); + if (status != 0) + abort(); + pthread_detach(th); + printf("started new thread\n"); +} + + +void setup_globals() +{ + int i; + + stm_start_inevitable_transaction(&stm_thread_local); + for (i = 0; i < SHARED_ROOTS; i++) { + shared_roots[i] = stm_allocate(sizeof(struct node_s)); + ((nodeptr_t)shared_roots[i])->my_size = sizeof(struct node_s); + STM_PUSH_ROOT(stm_thread_local, shared_roots[i]); + } + stm_commit_transaction(); + + /* make them OLD */ + + stm_start_inevitable_transaction(&stm_thread_local); + /* update now old references: */ + _pop_shared_roots(); + _push_shared_roots(); + stm_commit_transaction(); + /* leave them on this shadow stack forever for major collections */ +} + +int main(void) +{ + int i, status; + + /* pick a random seed from the time in seconds. + A bit pointless for now... because the interleaving of the + threads is really random. */ + default_seed = time(NULL); + printf("running with seed=%lld\n", (long long)default_seed); + + status = sem_init(&done, 0, 0); + assert(status == 0); + + + stm_setup(); + stm_register_thread_local(&stm_thread_local); + + setup_globals(); + + int thread_starts = NUMTHREADS * THREAD_STARTS; + for (i = 0; i < NUMTHREADS; i++) { + newthread(demo_random, NULL); + thread_starts--; + } + + for (i=0; i < NUMTHREADS * THREAD_STARTS; i++) { + status = sem_wait(&done); + assert(status == 0); + printf("thread finished\n"); + if (thread_starts) { + thread_starts--; + newthread(demo_random, NULL); + } + } + + printf("Test OK!\n"); + + _pop_shared_roots(); + stm_unregister_thread_local(&stm_thread_local); + stm_teardown(); + + return 0; +} diff --git a/c7/llvmfix/no-memset-creation-with-addrspace.diff b/c7/llvmfix/no-memset-creation-with-addrspace.diff new file mode 100644 --- /dev/null +++ b/c7/llvmfix/no-memset-creation-with-addrspace.diff @@ -0,0 +1,16 @@ +Index: lib/Transforms/Scalar/MemCpyOptimizer.cpp +=================================================================== +--- lib/Transforms/Scalar/MemCpyOptimizer.cpp (revision 201645) ++++ lib/Transforms/Scalar/MemCpyOptimizer.cpp (working copy) +@@ -368,6 +368,11 @@ + Value *StartPtr, Value *ByteVal) { + if (TD == 0) return 0; + ++ // We have to check for address space < 256, since llvm.memset only supports ++ // user defined address spaces. ++ if (cast(StartPtr->getType())->getAddressSpace() >= 256) ++ return 0; ++ + // Okay, so we now have a single store that can be splatable. Scan to find + // all subsequent stores of the same value to offset from the same pointer. + // Join these together into ranges, so we can decide whether contiguous blocks diff --git a/c7/stm/atomic.h b/c7/stm/atomic.h new file mode 100644 --- /dev/null +++ b/c7/stm/atomic.h @@ -0,0 +1,34 @@ + +/* spin_loop() corresponds to the PAUSE instruction on x86. On + other architectures, we generate no instruction (but still need + the compiler barrier); if on another architecture you find the + corresponding instruction, feel free to add it here. +*/ + +/* write_fence() is a function that inserts a "write fence". The + goal is to make sure that past writes are really pushed to memory + before the future writes. We assume that the corresponding "read + fence" effect is done automatically by a corresponding + __sync_bool_compare_and_swap(). + + On x86, this is done automatically by the CPU; we only need a + compiler barrier (asm("memory")). + + On other architectures, we use __sync_synchronize() as a general + fall-back, but we might have more efficient alternative on some other + platforms too. +*/ + + +#if defined(__i386__) || defined(__amd64__) + +# define HAVE_FULL_EXCHANGE_INSN + static inline void spin_loop(void) { asm("pause" : : : "memory"); } + static inline void write_fence(void) { asm("" : : : "memory"); } + +#else + + static inline void spin_loop(void) { asm("" : : : "memory"); } + static inline void write_fence(void) { __sync_synchronize(); } + +#endif diff --git a/c7/stm/contention.c b/c7/stm/contention.c new file mode 100644 --- /dev/null +++ b/c7/stm/contention.c @@ -0,0 +1,93 @@ +#ifndef _STM_CORE_H_ +# error "must be compiled via stmgc.c" +#endif + + +static void contention_management(uint8_t other_segment_num) +{ + /* A simple contention manager. Called when some other thread + holds the write lock on an object. The current thread tries + to do either a write or a read on it. */ + + assert(_has_mutex()); + assert(other_segment_num != STM_SEGMENT->segment_num); + + /* Who should abort here: this thread, or the other thread? */ + struct stm_priv_segment_info_s* other_pseg; + other_pseg = get_priv_segment(other_segment_num); + + if (STM_PSEGMENT->transaction_state == TS_INEVITABLE) { + /* I'm inevitable, so the other is not. */ + assert(other_pseg->transaction_state != TS_INEVITABLE); + other_pseg->transaction_state = TS_MUST_ABORT; + } + else if (other_pseg->start_time < STM_PSEGMENT->start_time) { + /* The other thread started before us, so I should abort, as I'm + the least long-running transaction. */ + } + else if (other_pseg->transaction_state == TS_REGULAR) { + /* The other thread started strictly after us. We tell it + to abort if we can (e.g. if it's not TS_INEVITABLE). */ + other_pseg->transaction_state = TS_MUST_ABORT; + } + + if (other_pseg->transaction_state != TS_MUST_ABORT) { + /* if the other thread is not in aborting-soon mode, then we must + abort. */ + abort_with_mutex(); + } + else { + /* signal the other thread; it must abort. + + Note that we know that the target thread is running now, and + so it is or will soon be blocked at a mutex_lock() or a + cond_wait(C_SAFE_POINT). Thus broadcasting C_SAFE_POINT is + enough to wake it up in the second case. + */ + cond_broadcast(C_SAFE_POINT); + } +} + +static void write_write_contention_management(uintptr_t lock_idx) +{ + mutex_lock(); + + if (STM_PSEGMENT->transaction_state == TS_MUST_ABORT) + abort_with_mutex(); + + uint8_t prev_owner = ((volatile uint8_t *)write_locks)[lock_idx]; + if (prev_owner != 0 && prev_owner != STM_PSEGMENT->write_lock_num) { + + uint8_t other_segment_num = prev_owner - 1; + contention_management(other_segment_num); + + /* the rest of this code is for the case where we continue to + run, and the other thread is asked to abort */ + +#ifdef STM_TESTS + /* abort anyway for tests. We mustn't call cond_wait() */ + abort_with_mutex(); +#endif + + /* first mark the other thread as "needing a safe-point" */ + struct stm_priv_segment_info_s* other_pseg; + other_pseg = get_priv_segment(other_segment_num); + assert(other_pseg->transaction_state == TS_MUST_ABORT); + other_pseg->pub.nursery_end = NSE_SIGNAL; + + /* we will issue a safe point and wait: */ + STM_PSEGMENT->safe_point = SP_SAFE_POINT_CANNOT_COLLECT; + + /* wait, hopefully until the other thread broadcasts "I'm + done aborting" (spurious wake-ups are ok). */ + cond_wait(C_SAFE_POINT); + + cond_broadcast(C_RESUME); + + /* now we return into _stm_write_slowpath() and will try again + to acquire the write lock on our object. */ + STM_PSEGMENT->safe_point = SP_RUNNING; + } + + mutex_unlock(); +} diff --git a/c7/stm/contention.h b/c7/stm/contention.h new file mode 100644 --- /dev/null +++ b/c7/stm/contention.h @@ -0,0 +1,3 @@ + +static void contention_management(uint8_t other_segment_num); +static void write_write_contention_management(uintptr_t lock_idx); diff --git a/c7/stm/core.c b/c7/stm/core.c new file mode 100644 --- /dev/null +++ b/c7/stm/core.c @@ -0,0 +1,511 @@ +#ifndef _STM_CORE_H_ +# error "must be compiled via stmgc.c" +#endif + + +static void teardown_core(void) +{ + memset(write_locks, 0, sizeof(write_locks)); +} + + +void _stm_write_slowpath(object_t *obj) +{ + assert(_running_transaction()); + assert(!_is_young(obj)); + + /* is this an object from the same transaction, outside the nursery? */ + if ((obj->stm_flags & -GCFLAG_OVERFLOW_NUMBER_bit0) == + STM_PSEGMENT->overflow_number) { + + dprintf_test(("write_slowpath %p -> ovf obj_to_nurs\n", obj)); + obj->stm_flags &= ~GCFLAG_WRITE_BARRIER; + assert(STM_PSEGMENT->objects_pointing_to_nursery != NULL); + LIST_APPEND(STM_PSEGMENT->objects_pointing_to_nursery, obj); + return; + } + + /* do a read-barrier now. Note that this must occur before the + safepoints that may be issued in contention_management(). */ + stm_read(obj); + + /* claim the write-lock for this object. In case we're running the + same transaction since a long while, the object can be already in + 'modified_old_objects' (but, because it had GCFLAG_WRITE_BARRIER, + not in 'objects_pointing_to_nursery'). We'll detect this case + by finding that we already own the write-lock. */ + uintptr_t lock_idx = (((uintptr_t)obj) >> 4) - WRITELOCK_START; + uint8_t lock_num = STM_PSEGMENT->write_lock_num; + assert((intptr_t)lock_idx >= 0); + retry: + if (write_locks[lock_idx] == 0) { + if (UNLIKELY(!__sync_bool_compare_and_swap(&write_locks[lock_idx], + 0, lock_num))) + goto retry; + + dprintf_test(("write_slowpath %p -> mod_old\n", obj)); + + /* First change to this old object from this transaction. + Add it to the list 'modified_old_objects'. */ + LIST_APPEND(STM_PSEGMENT->modified_old_objects, obj); + + /* We need to privatize the pages containing the object, if they + are still SHARED_PAGE. The common case is that there is only + one page in total. */ + uintptr_t first_page = ((uintptr_t)obj) / 4096UL; + + /* If the object is in the uniform pages of small objects + (outside the nursery), then it fits into one page. This is + the common case. Otherwise, we need to compute it based on + its location and size. */ + if ((obj->stm_flags & GCFLAG_SMALL_UNIFORM) != 0) { + pages_privatize(first_page, 1, true); + } + else { + char *realobj; + size_t obj_size; + uintptr_t end_page; + + /* get the size of the object */ + realobj = REAL_ADDRESS(STM_SEGMENT->segment_base, obj); + obj_size = stmcb_size_rounded_up((struct object_s *)realobj); + + /* that's the page *following* the last page with the object */ + end_page = (((uintptr_t)obj) + obj_size + 4095) / 4096UL; + + pages_privatize(first_page, end_page - first_page, true); + } + } + else if (write_locks[lock_idx] == lock_num) { + OPT_ASSERT(STM_PSEGMENT->objects_pointing_to_nursery != NULL); +#ifdef STM_TESTS + bool found = false; + LIST_FOREACH_R(STM_PSEGMENT->modified_old_objects, object_t *, + ({ if (item == obj) { found = true; break; } })); + assert(found); +#endif + } + else { + /* call the contention manager, and then retry (unless we were + aborted). */ + write_write_contention_management(lock_idx); + goto retry; + } + + /* A common case for write_locks[] that was either 0 or lock_num: + we need to add the object to 'objects_pointing_to_nursery' + if there is such a list. */ + if (STM_PSEGMENT->objects_pointing_to_nursery != NULL) { + dprintf_test(("write_slowpath %p -> old obj_to_nurs\n", obj)); + LIST_APPEND(STM_PSEGMENT->objects_pointing_to_nursery, obj); + } + + /* add the write-barrier-already-called flag ONLY if we succeeded in + getting the write-lock */ + assert(obj->stm_flags & GCFLAG_WRITE_BARRIER); + obj->stm_flags &= ~GCFLAG_WRITE_BARRIER; + + /* for sanity, check that all other segment copies of this object + still have the flag */ + long i; + for (i = 0; i < NB_SEGMENTS; i++) { + assert(i == STM_SEGMENT->segment_num || + (((struct object_s *)REAL_ADDRESS(get_segment_base(i), obj)) + ->stm_flags & GCFLAG_WRITE_BARRIER)); + } +} + +static void reset_transaction_read_version(void) +{ + /* force-reset all read markers to 0 */ + + /* XXX measure the time taken by this madvise() and the following + zeroing of pages done lazily by the kernel; compare it with using + 16-bit read_versions. + */ + /* XXX try to use madvise() on smaller ranges of memory. In my + measures, we could gain a factor 2 --- not really more, even if + the range of virtual addresses below is very large, as long as it + is already mostly non-reserved pages. (The following call keeps + them non-reserved; apparently the kernel just skips them very + quickly.) + */ + char *readmarkers = REAL_ADDRESS(STM_SEGMENT->segment_base, + FIRST_READMARKER_PAGE * 4096UL); + dprintf(("reset_transaction_read_version: %p %ld\n", readmarkers, + (long)(NB_READMARKER_PAGES * 4096UL))); + if (mmap(readmarkers, NB_READMARKER_PAGES * 4096UL, + PROT_READ | PROT_WRITE, + MAP_FIXED | MAP_PAGES_FLAGS, -1, 0) != readmarkers) { + /* fall-back */ +#if STM_TESTS + stm_fatalerror("reset_transaction_read_version: %m\n"); +#endif + memset(readmarkers, 0, NB_READMARKER_PAGES * 4096UL); + } + reset_transaction_read_version_prebuilt(); + STM_SEGMENT->transaction_read_version = 1; +} + +void _stm_start_transaction(stm_thread_local_t *tl, stm_jmpbuf_t *jmpbuf) +{ + mutex_lock_no_abort(); + + retry: + if (jmpbuf == NULL) { + wait_for_end_of_inevitable_transaction(false); + } + + if (!acquire_thread_segment(tl)) + goto retry; + /* GS invalid before this point! */ + + assert(STM_PSEGMENT->safe_point == SP_NO_TRANSACTION); + assert(STM_PSEGMENT->transaction_state == TS_NONE); + STM_PSEGMENT->safe_point = SP_RUNNING; + STM_PSEGMENT->transaction_state = (jmpbuf != NULL ? TS_REGULAR + : TS_INEVITABLE); + STM_SEGMENT->jmpbuf_ptr = jmpbuf; +#ifndef NDEBUG + STM_PSEGMENT->running_pthread = pthread_self(); +#endif + STM_PSEGMENT->shadowstack_at_start_of_transaction = tl->shadowstack; + STM_SEGMENT->nursery_end = NURSERY_END; + + dprintf(("start_transaction\n")); + + mutex_unlock(); + + uint8_t old_rv = STM_SEGMENT->transaction_read_version; + STM_SEGMENT->transaction_read_version = old_rv + 1; + if (UNLIKELY(old_rv == 0xff)) { + reset_transaction_read_version(); + } + + assert(list_is_empty(STM_PSEGMENT->modified_old_objects)); + assert(tree_is_cleared(STM_PSEGMENT->young_outside_nursery)); + assert(STM_PSEGMENT->objects_pointing_to_nursery == NULL); + assert(STM_PSEGMENT->large_overflow_objects == NULL); + + check_nursery_at_transaction_start(); +} + + +/************************************************************/ + +#if NB_SEGMENTS != 2 +# error "The logic in the functions below only works with two segments" +#endif + +static void detect_write_read_conflicts(void) +{ + long remote_num = 1 - STM_SEGMENT->segment_num; + char *remote_base = get_segment_base(remote_num); + uint8_t remote_version = get_segment(remote_num)->transaction_read_version; + + switch (get_priv_segment(remote_num)->transaction_state) { + case TS_NONE: + case TS_MUST_ABORT: + return; /* no need to do any check */ + default:; + } + + LIST_FOREACH_R( + STM_PSEGMENT->modified_old_objects, + object_t * /*item*/, + ({ + if (was_read_remote(remote_base, item, remote_version)) { + /* A write-read conflict! */ + contention_management(remote_num); + + /* If we reach this point, it means we aborted the other + thread. We're done here. */ + assert(get_priv_segment(remote_num)->transaction_state == + TS_MUST_ABORT); + return; + } + })); +} + +static void synchronize_overflow_object_now(object_t *obj) +{ + assert(!_is_young(obj)); + assert((obj->stm_flags & GCFLAG_SMALL_UNIFORM) == 0); + + char *realobj = REAL_ADDRESS(STM_SEGMENT->segment_base, obj); + ssize_t obj_size = stmcb_size_rounded_up((struct object_s *)realobj); + uintptr_t start = (uintptr_t)obj; + uintptr_t end = start + obj_size; + uintptr_t first_page = start / 4096UL; + uintptr_t last_page = (end - 1) / 4096UL; + + do { + if (flag_page_private[first_page] != SHARED_PAGE) { + /* The page is a PRIVATE_PAGE. We need to diffuse this fragment + of our object from our own segment to all other segments. */ + + uintptr_t copy_size; + if (first_page == last_page) { + /* this is the final fragment */ + copy_size = end - start; + } + else { + /* this is a non-final fragment, going up to the page's end */ + copy_size = 4096 - (start & 4095); + } + + /* double-check that the result fits in one page */ + assert(copy_size > 0); + assert(copy_size + (start & 4095) <= 4096); + + long i; + char *src = REAL_ADDRESS(STM_SEGMENT->segment_base, start); + for (i = 0; i < NB_SEGMENTS; i++) { + if (i != STM_SEGMENT->segment_num) { + char *dst = REAL_ADDRESS(get_segment_base(i), start); + memcpy(dst, src, copy_size); + } + } + } + + start = (start + 4096) & ~4095; + } while (first_page++ < last_page); +} + +static void push_overflow_objects_from_privatized_pages(void) +{ + if (STM_PSEGMENT->large_overflow_objects == NULL) + return; + + LIST_FOREACH_R(STM_PSEGMENT->large_overflow_objects, object_t *, + synchronize_overflow_object_now(item)); +} + +static void push_modified_to_other_segments(void) +{ + long remote_num = 1 - STM_SEGMENT->segment_num; + char *local_base = STM_SEGMENT->segment_base; + char *remote_base = get_segment_base(remote_num); + bool remote_active = + (get_priv_segment(remote_num)->transaction_state == TS_REGULAR || + get_priv_segment(remote_num)->transaction_state == TS_INEVITABLE); + + LIST_FOREACH_R( + STM_PSEGMENT->modified_old_objects, + object_t * /*item*/, + ({ + if (remote_active) { + assert(!was_read_remote(remote_base, item, + get_segment(remote_num)->transaction_read_version)); + } + + /* clear the write-lock (note that this runs with all other + threads paused, so no need to be careful about ordering) */ + uintptr_t lock_idx = (((uintptr_t)item) >> 4) - WRITELOCK_START; + assert((intptr_t)lock_idx >= 0); + assert(write_locks[lock_idx] == STM_PSEGMENT->write_lock_num); + write_locks[lock_idx] = 0; + + /* the WRITE_BARRIER flag should have been set again by + minor_collection() */ + assert((item->stm_flags & GCFLAG_WRITE_BARRIER) != 0); + + /* copy the modified object to the other segment */ + char *src = REAL_ADDRESS(local_base, item); + char *dst = REAL_ADDRESS(remote_base, item); + ssize_t size = stmcb_size_rounded_up((struct object_s *)src); + memcpy(dst, src, size); + })); + + list_clear(STM_PSEGMENT->modified_old_objects); +} + +static void _finish_transaction(void) +{ + /* signal all the threads blocked in wait_for_other_safe_points() */ + if (STM_SEGMENT->nursery_end == NSE_SIGNAL) { + STM_SEGMENT->nursery_end = NURSERY_END; + cond_broadcast(C_SAFE_POINT); + } + + STM_PSEGMENT->safe_point = SP_NO_TRANSACTION; + STM_PSEGMENT->transaction_state = TS_NONE; + + /* reset these lists to NULL for the next transaction */ + LIST_FREE(STM_PSEGMENT->objects_pointing_to_nursery); + LIST_FREE(STM_PSEGMENT->large_overflow_objects); + + stm_thread_local_t *tl = STM_SEGMENT->running_thread; + release_thread_segment(tl); + /* cannot access STM_SEGMENT or STM_PSEGMENT from here ! */ +} + +void stm_commit_transaction(void) +{ + assert(!_has_mutex()); + assert(STM_PSEGMENT->safe_point == SP_RUNNING); + assert(STM_PSEGMENT->running_pthread == pthread_self()); + + bool has_any_overflow_object = + (STM_PSEGMENT->objects_pointing_to_nursery != NULL); + + minor_collection(/*commit=*/ true); + + mutex_lock(); + STM_PSEGMENT->safe_point = SP_SAFE_POINT_CAN_COLLECT; + + /* wait until the other thread is at a safe-point */ + wait_for_other_safe_points(SP_SAFE_POINT_CANNOT_COLLECT); + + /* the rest of this function either runs atomically without + releasing the mutex, or aborts the current thread. */ + + /* detect conflicts */ + detect_write_read_conflicts(); + + /* cannot abort any more from here */ + dprintf(("commit_transaction\n")); + + assert(STM_PSEGMENT->transaction_state != TS_MUST_ABORT); + STM_SEGMENT->jmpbuf_ptr = NULL; + + /* synchronize overflow objects living in privatized pages */ + push_overflow_objects_from_privatized_pages(); + + /* synchronize modified old objects to other threads */ + push_modified_to_other_segments(); + + /* update 'overflow_number' if needed */ + if (has_any_overflow_object) { + highest_overflow_number += GCFLAG_OVERFLOW_NUMBER_bit0; + assert(highest_overflow_number != 0); /* XXX else, overflow! */ + STM_PSEGMENT->overflow_number = highest_overflow_number; + } + + /* if we were inevitable, signal */ + if (STM_PSEGMENT->transaction_state == TS_INEVITABLE) + cond_signal(C_INEVITABLE_DONE); + + /* done */ + _finish_transaction(); + + /* wake up one other thread waiting for a segment. */ + cond_signal(C_RELEASE_THREAD_SEGMENT); + + mutex_unlock(); +} + +void stm_abort_transaction(void) +{ + mutex_lock(); + abort_with_mutex(); +} + +static void reset_modified_from_other_segments(void) +{ + /* pull the right versions from other threads in order + to reset our pages as part of an abort */ + long remote_num = 1 - STM_SEGMENT->segment_num; + char *local_base = STM_SEGMENT->segment_base; + char *remote_base = get_segment_base(remote_num); + + LIST_FOREACH_R( + STM_PSEGMENT->modified_old_objects, + object_t * /*item*/, + ({ + /* memcpy in the opposite direction than + push_modified_to_other_segments() */ + char *src = REAL_ADDRESS(remote_base, item); + char *dst = REAL_ADDRESS(local_base, item); + ssize_t size = stmcb_size_rounded_up((struct object_s *)src); + memcpy(dst, src, size); + + /* objects in 'modified_old_objects' usually have the + WRITE_BARRIER flag, unless they have been modified + recently. Ignore the old flag; after copying from the + other segment, we should have the flag. */ + assert(item->stm_flags & GCFLAG_WRITE_BARRIER); + + /* write all changes to the object before we release the + write lock below. This is needed because we need to + ensure that if the write lock is not set, another thread + can get it and then change 'src' in parallel. The + write_fence() ensures in particular that 'src' has been + fully read before we release the lock: reading it + is necessary to write 'dst'. */ + write_fence(); + + /* clear the write-lock */ + uintptr_t lock_idx = (((uintptr_t)item) >> 4) - WRITELOCK_START; + assert((intptr_t)lock_idx >= 0); + assert(write_locks[lock_idx] == STM_PSEGMENT->write_lock_num); + write_locks[lock_idx] = 0; + })); + + list_clear(STM_PSEGMENT->modified_old_objects); +} + +static void abort_with_mutex(void) +{ + dprintf(("~~~ ABORT\n")); + + switch (STM_PSEGMENT->transaction_state) { + case TS_REGULAR: + case TS_MUST_ABORT: + break; + case TS_INEVITABLE: + assert(!"abort: transaction_state == TS_INEVITABLE"); + default: + assert(!"abort: bad transaction_state"); + } + assert(STM_PSEGMENT->running_pthread == pthread_self()); + + /* throw away the content of the nursery */ + throw_away_nursery(); + + /* reset all the modified objects (incl. re-adding GCFLAG_WRITE_BARRIER) */ + reset_modified_from_other_segments(); + + stm_jmpbuf_t *jmpbuf_ptr = STM_SEGMENT->jmpbuf_ptr; + stm_thread_local_t *tl = STM_SEGMENT->running_thread; + tl->shadowstack = STM_PSEGMENT->shadowstack_at_start_of_transaction; + + _finish_transaction(); + + /* wake up one other thread waiting for a segment. In order to support + contention.c, we use a broadcast, to make sure that all threads are + signalled, including the one that requested an abort, if any. + Moreover, we wake up any thread waiting for this one to do a safe + point, if any. + */ + cond_broadcast(C_RELEASE_THREAD_SEGMENT); + + mutex_unlock(); + + assert(jmpbuf_ptr != NULL); + assert(jmpbuf_ptr != (stm_jmpbuf_t *)-1); /* for tests only */ + __builtin_longjmp(*jmpbuf_ptr, 1); +} + +void _stm_become_inevitable(char *msg) +{ + mutex_lock(); + switch (STM_PSEGMENT->transaction_state) { + + case TS_INEVITABLE: + break; /* already good */ + + case TS_REGULAR: + /* become inevitable */ + wait_for_end_of_inevitable_transaction(true); + STM_PSEGMENT->transaction_state = TS_INEVITABLE; + break; + + case TS_MUST_ABORT: + abort_with_mutex(); + + default: + assert(!"invalid transaction_state in become_inevitable"); + } + mutex_unlock(); +} diff --git a/c7/stm/core.h b/c7/stm/core.h new file mode 100644 --- /dev/null +++ b/c7/stm/core.h @@ -0,0 +1,208 @@ +#define _STM_CORE_H_ + +#include +#include +#include +#include +#include +#include + +/************************************************************/ + + +#define NB_PAGES (1500*256) // 1500MB +#define NB_SEGMENTS 2 +#define NB_SEGMENTS_MAX 240 /* don't increase NB_SEGMENTS past this */ +#define MAP_PAGES_FLAGS (MAP_SHARED | MAP_ANONYMOUS | MAP_NORESERVE) +#define NB_NURSERY_PAGES 1024 // 4MB + +#define TOTAL_MEMORY (NB_PAGES * 4096UL * NB_SEGMENTS) +#define READMARKER_END ((NB_PAGES * 4096UL) >> 4) +#define FIRST_OBJECT_PAGE ((READMARKER_END + 4095) / 4096UL) +#define FIRST_NURSERY_PAGE FIRST_OBJECT_PAGE +#define END_NURSERY_PAGE (FIRST_NURSERY_PAGE + NB_NURSERY_PAGES) + +#define READMARKER_START ((FIRST_OBJECT_PAGE * 4096UL) >> 4) +#define FIRST_READMARKER_PAGE (READMARKER_START / 4096UL) +#define NB_READMARKER_PAGES (FIRST_OBJECT_PAGE - FIRST_READMARKER_PAGE) + +#define WRITELOCK_START ((END_NURSERY_PAGE * 4096UL) >> 4) +#define WRITELOCK_END READMARKER_END + +#define SHADOW_STACK_SIZE 1000 + +enum /* stm_flags */ { + /* This flag is set on non-nursery objects. It forces stm_write() + to call _stm_write_slowpath(). + */ + GCFLAG_WRITE_BARRIER = _STM_GCFLAG_WRITE_BARRIER, + + /* This flag is set by gcpage.c for all objects living in + uniformly-sized pages of small objects. + */ + GCFLAG_SMALL_UNIFORM = 0x02, + + /* All remaining bits of the 32-bit 'stm_flags' field are taken by + the "overflow number". This is a number that identifies the + "overflow objects" from the current transaction among all old + objects. More precisely, overflow objects are objects from the + current transaction that have been flushed out of the nursery, + which occurs if the same transaction allocates too many objects. + */ + GCFLAG_OVERFLOW_NUMBER_bit0 = 0x04 /* must be last */ +}; + + +/************************************************************/ + + +#define STM_PSEGMENT ((stm_priv_segment_info_t *)STM_SEGMENT) + +typedef TLPREFIX struct stm_priv_segment_info_s stm_priv_segment_info_t; + +struct stm_priv_segment_info_s { + struct stm_segment_info_s pub; + + /* List of old objects (older than the current transaction) that the + current transaction attempts to modify. This is used to track + the STM status: they are old objects that where written to and + that need to be copied to other segments upon commit. */ + struct list_s *modified_old_objects; + + /* List of out-of-nursery objects that may contain pointers to + nursery objects. This is used to track the GC status: they are + all objects outside the nursery on which an stm_write() occurred + since the last minor collection. This list contains exactly the + objects without GCFLAG_WRITE_BARRIER. If there was no minor + collection yet in the current transaction, this is NULL, + understood as meaning implicitly "this is the same as + 'modified_old_objects'". */ + struct list_s *objects_pointing_to_nursery; + + /* List of all large, overflowed objects. Only non-NULL after the + current transaction spanned a minor collection. */ + struct list_s *large_overflow_objects; + + /* List of all young objects outside the nursery ("young" in the + sense that they should be in the nursery, but were too big for + that). */ + struct tree_s *young_outside_nursery; + + /* Start time: to know approximately for how long a transaction has + been running, in contention management */ + uint64_t start_time; + + /* This is the number stored in the overflowed objects (a multiple of + GCFLAG_OVERFLOW_NUMBER_bit0). It is incremented when the + transaction is done, but only if we actually overflowed any + object; otherwise, no object has got this number. */ + uint32_t overflow_number; + + /* The marker stored in the global 'write_locks' array to mean + "this segment has modified this old object". */ + uint8_t write_lock_num; + + /* The thread's safe-point state, one of the SP_xxx constants. The + thread is in a "safe point" if it is not concurrently doing any + change that might cause race conditions in other threads. A + thread may enter but not *leave* the safe point it is in without + getting hold of the mutex. Broadly speaking, any value other + than SP_RUNNING means a safe point of some kind. */ + uint8_t safe_point; + + /* The transaction status, one of the TS_xxx constants. This is + only accessed when we hold the mutex. */ + uint8_t transaction_state; + + /* Temp for minor collection */ + bool minor_collect_will_commit_now; + + /* In case of abort, we restore the 'shadowstack' field. */ + object_t **shadowstack_at_start_of_transaction; + + /* For debugging */ +#ifndef NDEBUG + pthread_t running_pthread; +#endif +}; + +enum /* safe_point */ { + SP_NO_TRANSACTION=0, + SP_RUNNING, + SP_SAFE_POINT_CANNOT_COLLECT, + SP_SAFE_POINT_CAN_COLLECT, +}; +enum /* transaction_state */ { + TS_NONE=0, + TS_REGULAR, + TS_INEVITABLE, + TS_MUST_ABORT, +}; + +static char *stm_object_pages; +static stm_thread_local_t *stm_all_thread_locals = NULL; + +#ifdef STM_TESTS +static char *stm_other_pages; +#endif + +static uint8_t write_locks[WRITELOCK_END - WRITELOCK_START]; + + +#define REAL_ADDRESS(segment_base, src) ((segment_base) + (uintptr_t)(src)) + +static inline char *get_segment_base(long segment_num) { + return stm_object_pages + segment_num * (NB_PAGES * 4096UL); +} + +static inline +struct stm_segment_info_s *get_segment(long segment_num) { + return (struct stm_segment_info_s *)REAL_ADDRESS( + get_segment_base(segment_num), STM_PSEGMENT); +} + +static inline +struct stm_priv_segment_info_s *get_priv_segment(long segment_num) { + return (struct stm_priv_segment_info_s *)REAL_ADDRESS( + get_segment_base(segment_num), STM_PSEGMENT); +} + +static bool _is_tl_registered(stm_thread_local_t *tl); +static bool _running_transaction(void); + +static void teardown_core(void); +static void abort_with_mutex(void) __attribute__((noreturn)); + +static inline bool was_read_remote(char *base, object_t *obj, + uint8_t other_transaction_read_version) +{ + uint8_t rm = ((struct stm_read_marker_s *) + (base + (((uintptr_t)obj) >> 4)))->rm; + assert(rm <= other_transaction_read_version); + return rm == other_transaction_read_version; +} + +static inline void _duck(void) { + /* put a call to _duck() between two instructions that set 0 into + a %gs-prefixed address and that may otherwise be replaced with + llvm.memset --- it fails later because of the prefix... + This is not needed any more after applying the patch + llvmfix/no-memset-creation-with-addrspace.diff. */ + asm("/* workaround for llvm bug */"); +} + +static inline void abort_if_needed(void) { + switch (STM_PSEGMENT->transaction_state) { + case TS_REGULAR: + case TS_INEVITABLE: + break; + + case TS_MUST_ABORT: + stm_abort_transaction(); + + default: + assert(!"commit: bad transaction_state"); + } +} + +static void synchronize_overflow_object_now(object_t *obj); diff --git a/c7/stm/fprintcolor.c b/c7/stm/fprintcolor.c new file mode 100644 --- /dev/null +++ b/c7/stm/fprintcolor.c @@ -0,0 +1,52 @@ +/* ------------------------------------------------------------ */ +#ifdef STM_DEBUGPRINT +/* ------------------------------------------------------------ */ + + +static int dprintfcolor(void) +{ + return 31 + STM_SEGMENT->segment_num % 6; +} + +static int threadcolor_printf(const char *format, ...) +{ + char buffer[2048]; + va_list ap; + int result; + int size = (int)sprintf(buffer, "\033[%dm[%lx]", dprintfcolor(), + (long)pthread_self()); + assert(size >= 0); + + va_start(ap, format); + result = vsnprintf(buffer + size, 2000, format, ap); + assert(result >= 0); + va_end(ap); + + strcpy(buffer + size + result, "\033[0m"); + fputs(buffer, stderr); + + return result; +} + + +/* ------------------------------------------------------------ */ +#endif +/* ------------------------------------------------------------ */ + + +static void stm_fatalerror(const char *format, ...) +{ + va_list ap; + +#ifdef STM_DEBUGPRINT + dprintf(("STM Subsystem: Fatal Error\n")); +#else + fprintf(stderr, "STM Subsystem: Fatal Error\n"); +#endif + + va_start(ap, format); + vfprintf(stderr, format, ap); + va_end(ap); + + abort(); +} diff --git a/c7/stm/fprintcolor.h b/c7/stm/fprintcolor.h new file mode 100644 --- /dev/null +++ b/c7/stm/fprintcolor.h @@ -0,0 +1,38 @@ +/* ------------------------------------------------------------ */ +#ifdef STM_DEBUGPRINT +/* ------------------------------------------------------------ */ + + +#include + + +#define dprintf(args) threadcolor_printf args +static int dprintfcolor(void); + +static int threadcolor_printf(const char *format, ...) + __attribute__((format (printf, 1, 2))); + +#ifdef STM_TESTS +# define dprintf_test(args) dprintf(args) +#else +# define dprintf_test(args) do { } while(0) +#endif + + +/* ------------------------------------------------------------ */ +#else +/* ------------------------------------------------------------ */ + + +#define dprintf(args) do { } while(0) +#define dprintf_test(args) do { } while(0) +#define dprintfcolor() 0 + + +/* ------------------------------------------------------------ */ +#endif +/* ------------------------------------------------------------ */ + + +static void stm_fatalerror(const char *format, ...) + __attribute__((format (printf, 1, 2), noreturn)); diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c new file mode 100644 --- /dev/null +++ b/c7/stm/gcpage.c @@ -0,0 +1,110 @@ +#ifndef _STM_CORE_H_ +# error "must be compiled via stmgc.c" +#endif + + +static void setup_gcpage(void) +{ + /* NB. the very last page is not used, which allows a speed-up in + reset_all_creation_markers() */ + char *base = stm_object_pages + END_NURSERY_PAGE * 4096UL; + uintptr_t length = (NB_PAGES - END_NURSERY_PAGE - 1) * 4096UL; + _stm_largemalloc_init_arena(base, length); + + uninitialized_page_start = stm_object_pages + END_NURSERY_PAGE * 4096UL; + uninitialized_page_stop = stm_object_pages + NB_PAGES * 4096UL; + + assert(GC_MEDIUM_REQUEST >= (1 << 8)); +} + +static void teardown_gcpage(void) +{ + memset(small_alloc, 0, sizeof(small_alloc)); + free_uniform_pages = NULL; +} + + +#define GCPAGE_NUM_PAGES 20 + +static void setup_N_pages(char *pages_addr, uint64_t num) +{ + pages_initialize_shared((pages_addr - stm_object_pages) / 4096UL, num); +} + +static void grab_more_free_pages_for_small_allocations(void) +{ + /* grab N (= GCPAGE_NUM_PAGES) pages out of the top addresses */ + uintptr_t decrease_by = GCPAGE_NUM_PAGES * 4096; + if (uninitialized_page_stop - uninitialized_page_start <= decrease_by) + goto out_of_memory; + + uninitialized_page_stop -= decrease_by; + + if (!_stm_largemalloc_resize_arena(uninitialized_page_stop - + uninitialized_page_start)) + goto out_of_memory; + + setup_N_pages(uninitialized_page_start, GCPAGE_NUM_PAGES); + From noreply at buildbot.pypy.org Wed Feb 26 18:09:42 2014 From: noreply at buildbot.pypy.org (antocuni) Date: Wed, 26 Feb 2014 18:09:42 +0100 (CET) Subject: [pypy-commit] pypy int_w-refactor: add allow_conversion=True also to space.bigint_w, and improve a test Message-ID: <20140226170942.AB2F01D27B9@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: int_w-refactor Changeset: r69466:796466a94c48 Date: 2014-02-26 17:23 +0100 http://bitbucket.org/pypy/pypy/changeset/796466a94c48/ Log: add allow_conversion=True also to space.bigint_w, and improve a test diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -203,7 +203,7 @@ def int_w(self, space, allow_conversion=True): # note that W_IntObject.int_w has a fast path and W_FloatObject.int_w - # raises + # raises w_TypeError w_obj = self if allow_conversion: w_obj = space.int(self) @@ -218,7 +218,15 @@ def uint_w(self, space): self._typed_unwrap_error(space, "integer") - def bigint_w(self, space): + def bigint_w(self, space, allow_conversion=True): + # note that W_IntObject and W_LongObject have fast paths, + # W_FloatObject.rbigint_w raises w_TypeError raises + w_obj = self + if allow_conversion: + w_obj = space.long(self) + return w_obj._bigint_w(space) + + def _bigint_w(self, space): self._typed_unwrap_error(space, "integer") def _typed_unwrap_error(self, space, expected): @@ -1378,8 +1386,12 @@ def uint_w(self, w_obj): return w_obj.uint_w(self) - def bigint_w(self, w_obj): - return w_obj.bigint_w(self) + def bigint_w(self, w_obj, allow_conversion=True): + """ + Like int_w, for returns a rlib.rbigint object and call __long__ is + allow_conversion is True. + """ + return w_obj.bigint_w(self, allow_conversion) def float_w(self, w_obj): return w_obj.float_w(self) @@ -1425,11 +1437,8 @@ def gateway_float_w(self, w_obj): return self.float_w(self.float(w_obj)) - def gateway_r_longlong_w(self, w_obj): - if self.isinstance_w(w_obj, self.w_float): - raise OperationError(self.w_TypeError, - self.wrap("integer argument expected, got float")) - return self.r_longlong_w(self.int(w_obj)) + gateway_r_longlong_w = r_longlong_w + gateway_r_ulonglong_w = r_ulonglong_w def gateway_r_uint_w(self, w_obj): if self.isinstance_w(w_obj, self.w_float): @@ -1437,12 +1446,6 @@ self.wrap("integer argument expected, got float")) return self.uint_w(self.int(w_obj)) - def gateway_r_ulonglong_w(self, w_obj): - if self.isinstance_w(w_obj, self.w_float): - raise OperationError(self.w_TypeError, - self.wrap("integer argument expected, got float")) - return self.r_ulonglong_w(self.int(w_obj)) - def gateway_nonnegint_w(self, w_obj): # Like space.gateway_int_w(), but raises an app-level ValueError if # the integer is negative. Here for gateway.py. diff --git a/pypy/interpreter/test/test_gateway.py b/pypy/interpreter/test/test_gateway.py --- a/pypy/interpreter/test/test_gateway.py +++ b/pypy/interpreter/test/test_gateway.py @@ -457,6 +457,8 @@ space.mul(space.wrap(sys.maxint), space.wrap(-7))) def test_interp2app_unwrap_spec_typechecks(self): + from rpython.rlib.rarithmetic import r_longlong + space = self.space w = space.wrap def g3_id(space, x): @@ -491,6 +493,12 @@ raises(gateway.OperationError,space.call_function,w_app_g3_f,w(None)) raises(gateway.OperationError,space.call_function,w_app_g3_f,w("foo")) + app_g3_r = gateway.interp2app_temp(g3_id, + unwrap_spec=[gateway.ObjSpace, + r_longlong]) + w_app_g3_r = space.wrap(app_g3_r) + raises(gateway.OperationError,space.call_function,w_app_g3_r,w(1.0)) + def test_interp2app_unwrap_spec_unicode(self): space = self.space w = space.wrap diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py --- a/pypy/objspace/fake/objspace.py +++ b/pypy/objspace/fake/objspace.py @@ -51,7 +51,7 @@ def uint_w(self, space): return r_uint(NonConstant(42)) - def bigint_w(self, space): + def bigint_w(self, space, allow_conversion=True): from rpython.rlib.rbigint import rbigint return rbigint.fromint(NonConstant(42)) diff --git a/pypy/objspace/std/floatobject.py b/pypy/objspace/std/floatobject.py --- a/pypy/objspace/std/floatobject.py +++ b/pypy/objspace/std/floatobject.py @@ -37,6 +37,9 @@ def int_w(self, space, allow_conversion=True): self._typed_unwrap_error(space, "integer") + def bigint_w(self, space, allow_conversion=True): + self._typed_unwrap_error(space, "integer") + def float_w(self, space): return self.floatval diff --git a/pypy/objspace/std/intobject.py b/pypy/objspace/std/intobject.py --- a/pypy/objspace/std/intobject.py +++ b/pypy/objspace/std/intobject.py @@ -325,7 +325,10 @@ "cannot convert negative integer to unsigned") return r_uint(intval) - def bigint_w(self, space): + def bigint_w(self, space, allow_conversion=True): + return rbigint.fromint(self.intval) + + def _bigint_w(self, space): return rbigint.fromint(self.intval) def float_w(self, space): diff --git a/pypy/objspace/std/longobject.py b/pypy/objspace/std/longobject.py --- a/pypy/objspace/std/longobject.py +++ b/pypy/objspace/std/longobject.py @@ -261,7 +261,10 @@ raise oefmt(space.w_OverflowError, "long int too large to convert to unsigned int") - def bigint_w(self, space): + def bigint_w(self, space, allow_conversion=True): + return self.num + + def _bigint_w(self, space): return self.num def float_w(self, space): diff --git a/pypy/objspace/std/smalllongobject.py b/pypy/objspace/std/smalllongobject.py --- a/pypy/objspace/std/smalllongobject.py +++ b/pypy/objspace/std/smalllongobject.py @@ -63,7 +63,10 @@ raise oefmt(space.w_OverflowError, "long int too large to convert to unsigned int") - def bigint_w(self, space): + def bigint_w(self, space, allow_conversion=True): + return self.asbigint() + + def _bigint_w(self, space): return self.asbigint() def float_w(self, space): From noreply at buildbot.pypy.org Wed Feb 26 18:09:43 2014 From: noreply at buildbot.pypy.org (antocuni) Date: Wed, 26 Feb 2014 18:09:43 +0100 (CET) Subject: [pypy-commit] pypy int_w-refactor: implement allow_conversion=True also for float_w: this finally fixes the struct+numpy failing test which I introduced at the beginning of the branch Message-ID: <20140226170943.D7E021D27B9@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: int_w-refactor Changeset: r69467:229c615d291d Date: 2014-02-26 18:08 +0100 http://bitbucket.org/pypy/pypy/changeset/229c615d291d/ Log: implement allow_conversion=True also for float_w: this finally fixes the struct+numpy failing test which I introduced at the beginning of the branch diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -212,7 +212,13 @@ def _int_w(self, space): self._typed_unwrap_error(space, "integer") - def float_w(self, space): + def float_w(self, space, allow_conversion=True): + w_obj = self + if allow_conversion: + w_obj = space.float(self) + return w_obj._float_w(space) + + def _float_w(self, space): self._typed_unwrap_error(space, "float") def uint_w(self, space): @@ -1388,13 +1394,17 @@ def bigint_w(self, w_obj, allow_conversion=True): """ - Like int_w, for returns a rlib.rbigint object and call __long__ is + Like int_w, but return a rlib.rbigint object and call __long__ if allow_conversion is True. """ return w_obj.bigint_w(self, allow_conversion) - def float_w(self, w_obj): - return w_obj.float_w(self) + def float_w(self, w_obj, allow_conversion=True): + """ + Like int_w, but return an interp-level float and call __float__ if + allow_conversion is True. + """ + return w_obj.float_w(self, allow_conversion) def realstr_w(self, w_obj): # Like str_w, but only works if w_obj is really of type 'str'. diff --git a/pypy/module/cppyy/test/test_zjit.py b/pypy/module/cppyy/test/test_zjit.py --- a/pypy/module/cppyy/test/test_zjit.py +++ b/pypy/module/cppyy/test/test_zjit.py @@ -120,7 +120,7 @@ return FakeInt(int(obj)) assert 0 - def float_w(self, w_obj): + def float_w(self, w_obj, allow_conversion=True): assert isinstance(w_obj, FakeFloat) return w_obj.val diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -152,7 +152,7 @@ assert isinstance(w_obj, interp_boxes.W_GenericBox) return self.float(w_obj.descr_float(self)) - def float_w(self, w_obj): + def float_w(self, w_obj, allow_conversion=True): assert isinstance(w_obj, FloatObject) return w_obj.floatval diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -868,8 +868,8 @@ args_w = space.fixedview(w_tuple) if len(args_w) != 2: raise OperationError(space.w_TypeError, space.wrap(msg)) - actime = space.float_w(args_w[0]) - modtime = space.float_w(args_w[1]) + actime = space.float_w(args_w[0], allow_conversion=False) + modtime = space.float_w(args_w[1], allow_conversion=False) dispatch_filename(rposix.utime, 2)(space, w_path, (actime, modtime)) except OSError, e: raise wrap_oserror2(space, e, w_path) diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py --- a/pypy/objspace/fake/objspace.py +++ b/pypy/objspace/fake/objspace.py @@ -115,7 +115,7 @@ def _freeze_(self): return True - def float_w(self, w_obj): + def float_w(self, w_obj, allow_conversion=True): is_root(w_obj) return NonConstant(42.5) diff --git a/pypy/objspace/std/floatobject.py b/pypy/objspace/std/floatobject.py --- a/pypy/objspace/std/floatobject.py +++ b/pypy/objspace/std/floatobject.py @@ -40,7 +40,10 @@ def bigint_w(self, space, allow_conversion=True): self._typed_unwrap_error(space, "integer") - def float_w(self, space): + def float_w(self, space, allow_conversion=True): + return self.floatval + + def _float_w(self, space): return self.floatval def int(self, space): diff --git a/pypy/objspace/std/intobject.py b/pypy/objspace/std/intobject.py --- a/pypy/objspace/std/intobject.py +++ b/pypy/objspace/std/intobject.py @@ -331,9 +331,12 @@ def _bigint_w(self, space): return rbigint.fromint(self.intval) - def float_w(self, space): + def float_w(self, space, allow_conversion=True): return float(self.intval) + # note that we do NOT implement _float_w, because __float__ cannot return + # an int + def int(self, space): if type(self) is W_IntObject: return self diff --git a/pypy/objspace/std/longobject.py b/pypy/objspace/std/longobject.py --- a/pypy/objspace/std/longobject.py +++ b/pypy/objspace/std/longobject.py @@ -267,7 +267,10 @@ def _bigint_w(self, space): return self.num - def float_w(self, space): + def float_w(self, space, allow_conversion=True): + return self.tofloat(space) + + def _float_w(self, space): return self.tofloat(space) def int(self, space): diff --git a/pypy/objspace/std/smalllongobject.py b/pypy/objspace/std/smalllongobject.py --- a/pypy/objspace/std/smalllongobject.py +++ b/pypy/objspace/std/smalllongobject.py @@ -69,7 +69,7 @@ def _bigint_w(self, space): return self.asbigint() - def float_w(self, space): + def _float_w(self, space): return float(self.longlong) def int(self, space): From noreply at buildbot.pypy.org Wed Feb 26 18:51:41 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 26 Feb 2014 18:51:41 +0100 (CET) Subject: [pypy-commit] pypy default: Allow @jit.elidable_promote to work also with string arguments. Message-ID: <20140226175141.414161C03D5@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69468:0e5972cfb8d4 Date: 2014-02-26 18:35 +0100 http://bitbucket.org/pypy/pypy/changeset/0e5972cfb8d4/ Log: Allow @jit.elidable_promote to work also with string arguments. Done with a hack of saying hint(arg, promote=True, promote_string=True) and letting jtransform.py choose. diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -499,6 +499,16 @@ def rewrite_op_hint(self, op): hints = op.args[1].value + + # hack: if there are both 'promote' and 'promote_string', kill + # one of them based on the type of the value + if hints.get('promote_string') and hints.get('promote'): + hints = hints.copy() + if op.args[0].concretetype == lltype.Ptr(rstr.STR): + del hints['promote'] + else: + del hints['promote_string'] + if hints.get('promote') and op.args[0].concretetype is not lltype.Void: assert op.args[0].concretetype != lltype.Ptr(rstr.STR) kind = getkind(op.args[0].concretetype) diff --git a/rpython/jit/codewriter/test/test_jtransform.py b/rpython/jit/codewriter/test/test_jtransform.py --- a/rpython/jit/codewriter/test/test_jtransform.py +++ b/rpython/jit/codewriter/test/test_jtransform.py @@ -1050,6 +1050,37 @@ assert op1.result == v2 assert op0.opname == '-live-' +def test_double_promote_str(): + PSTR = lltype.Ptr(rstr.STR) + v1 = varoftype(PSTR) + v2 = varoftype(PSTR) + tr = Transformer(FakeCPU(), FakeBuiltinCallControl()) + op1 = SpaceOperation('hint', + [v1, Constant({'promote_string': True}, lltype.Void)], + v2) + op2 = SpaceOperation('hint', + [v1, Constant({'promote_string': True, + 'promote': True}, lltype.Void)], + v2) + lst1 = tr.rewrite_operation(op1) + lst2 = tr.rewrite_operation(op2) + assert lst1 == lst2 + +def test_double_promote_nonstr(): + v1 = varoftype(lltype.Signed) + v2 = varoftype(lltype.Signed) + tr = Transformer(FakeCPU(), FakeBuiltinCallControl()) + op1 = SpaceOperation('hint', + [v1, Constant({'promote': True}, lltype.Void)], + v2) + op2 = SpaceOperation('hint', + [v1, Constant({'promote_string': True, + 'promote': True}, lltype.Void)], + v2) + lst1 = tr.rewrite_operation(op1) + lst2 = tr.rewrite_operation(op2) + assert lst1 == lst2 + def test_unicode_concat(): # test that the oopspec is present and correctly transformed PSTR = lltype.Ptr(rstr.UNICODE) diff --git a/rpython/rlib/jit.py b/rpython/rlib/jit.py --- a/rpython/rlib/jit.py +++ b/rpython/rlib/jit.py @@ -130,7 +130,9 @@ if promote_args != 'all': args = [args[int(i)] for i in promote_args.split(",")] for arg in args: - code.append(" %s = hint(%s, promote=True)\n" % (arg, arg)) + code.append( #use both hints, and let jtransform pick the right one + " %s = hint(%s, promote=True, promote_string=True)\n" % + (arg, arg)) code.append(" return _orig_func_unlikely_name(%s)\n" % (argstring, )) d = {"_orig_func_unlikely_name": func, "hint": hint} exec py.code.Source("\n".join(code)).compile() in d From noreply at buildbot.pypy.org Wed Feb 26 19:15:13 2014 From: noreply at buildbot.pypy.org (antocuni) Date: Wed, 26 Feb 2014 19:15:13 +0100 (CET) Subject: [pypy-commit] pypy int_w-refactor: don't allow unwanted conversions between cdatas Message-ID: <20140226181513.5027F1C03FC@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: int_w-refactor Changeset: r69469:20289253d2ac Date: 2014-02-26 19:14 +0100 http://bitbucket.org/pypy/pypy/changeset/20289253d2ac/ Log: don't allow unwanted conversions between cdatas diff --git a/pypy/module/_cffi_backend/misc.py b/pypy/module/_cffi_backend/misc.py --- a/pypy/module/_cffi_backend/misc.py +++ b/pypy/module/_cffi_backend/misc.py @@ -131,13 +131,13 @@ if space.is_w(space.type(w_ob), space.w_int): # shortcut return space.int_w(w_ob) try: - bigint = space.bigint_w(w_ob) + bigint = space.bigint_w(w_ob, allow_conversion=False) except OperationError, e: if not e.match(space, space.w_TypeError): raise if _is_a_float(space, w_ob): raise - bigint = space.bigint_w(space.int(w_ob)) + bigint = space.bigint_w(space.int(w_ob), allow_conversion=False) try: return bigint.tolonglong() except OverflowError: @@ -148,13 +148,13 @@ if space.is_w(space.type(w_ob), space.w_int): # shortcut return space.int_w(w_ob) try: - bigint = space.bigint_w(w_ob) + bigint = space.bigint_w(w_ob, allow_conversion=False) except OperationError, e: if not e.match(space, space.w_TypeError): raise if _is_a_float(space, w_ob): raise - bigint = space.bigint_w(space.int(w_ob)) + bigint = space.bigint_w(space.int(w_ob), allow_conversion=False) try: return bigint.toint() except OverflowError: @@ -171,13 +171,13 @@ raise OperationError(space.w_OverflowError, space.wrap(neg_msg)) return r_ulonglong(value) try: - bigint = space.bigint_w(w_ob) + bigint = space.bigint_w(w_ob, allow_conversion=False) except OperationError, e: if not e.match(space, space.w_TypeError): raise if strict and _is_a_float(space, w_ob): raise - bigint = space.bigint_w(space.int(w_ob)) + bigint = space.bigint_w(space.int(w_ob), allow_conversion=False) if strict: try: return bigint.toulonglong() @@ -196,13 +196,13 @@ raise OperationError(space.w_OverflowError, space.wrap(neg_msg)) return r_uint(value) try: - bigint = space.bigint_w(w_ob) + bigint = space.bigint_w(w_ob, allow_conversion=False) except OperationError, e: if not e.match(space, space.w_TypeError): raise if strict and _is_a_float(space, w_ob): raise - bigint = space.bigint_w(space.int(w_ob)) + bigint = space.bigint_w(space.int(w_ob), allow_conversion=False) if strict: try: return bigint.touint() From noreply at buildbot.pypy.org Wed Feb 26 20:22:57 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Wed, 26 Feb 2014 20:22:57 +0100 (CET) Subject: [pypy-commit] pypy remove-remaining-smm: hg merge default Message-ID: <20140226192257.D6B061C03FC@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: remove-remaining-smm Changeset: r69470:94cc167b7402 Date: 2014-02-26 20:22 +0100 http://bitbucket.org/pypy/pypy/changeset/94cc167b7402/ Log: hg merge default diff --git a/include/PyPy.h b/include/PyPy.h --- a/include/PyPy.h +++ b/include/PyPy.h @@ -8,9 +8,14 @@ extern "C" { #endif +/* You should call this first once. */ +#define pypy_init(need_threads) do { pypy_asm_stack_bottom(); \ +rpython_startup_code();\ + if (need_threads) pypy_init_threads(); } while (0) -/* You should call this first once. */ +// deprecated interface void rpython_startup_code(void); +void pypy_init_threads(void); /* Initialize the home directory of PyPy. It is necessary to call this. @@ -26,11 +31,10 @@ /* If your program has multiple threads, then you need to call - pypy_init_threads() once at init time, and then pypy_thread_attach() - once in each other thread that just started and in which you want to - run Python code (including via callbacks, see below). + pypy_thread_attach() once in each other thread that just started + and in which you want to run Python code (including via callbacks, + see below). DO NOT CALL IT IN THE MAIN THREAD */ -void pypy_init_threads(void); void pypy_thread_attach(void); diff --git a/pypy/doc/embedding.rst b/pypy/doc/embedding.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/embedding.rst @@ -0,0 +1,101 @@ + +PyPy has a very minimal and a very strange embedding interface, based on +the usage of `cffi`_ and the philosophy that Python is a better language in C. +It was developed in collaboration with Roberto De Ioris from the `uwsgi`_ +project. The `PyPy uwsgi plugin`_ is a good example of usage of such interface. + +The first thing that you need, that we plan to change in the future, is to +compile PyPy yourself with an option ``--shared``. Consult the +`how to compile PyPy`_ doc for details. That should result in ``libpypy.so`` +or ``pypy.dll`` file or something similar, depending on your platform. Consult +your platform specification for details. + +The resulting shared library has very few functions that are however enough +to make a full API working, provided you'll follow a few principles. The API +is: + +.. function:: void pypy_init(int need_threads); + + This is a function that you have to call (once) before calling anything. + It initializes the RPython/PyPy GC and does a bunch of necessary startup + code. This function cannot fail. Pass 1 in case you need thread support, 0 + otherwise. + +.. function:: long pypy_setup_home(char* home, int verbose); + + This is another function that you have to call at some point, without + it you would not be able to find the standard library (and run pretty much + nothing). Arguments: + + * ``home``: null terminated path + + * ``verbose``: if non-zero, would print error messages to stderr + + Function returns 0 on success or 1 on failure, can be called multiple times + until the library is found. + +.. function:: int pypy_execute_source(char* source); + + Execute the source code given in the ``source`` argument. Will print + the error message to stderr upon failure and return 1, otherwise returns 0. + You should really do your own error handling in the source. It'll acquire + the GIL. + +.. function:: void pypy_thread_attach(void); + + In case your application uses threads that are initialized outside of PyPy, + you need to call this function to tell the PyPy GC to track this thread. + Note that this function is not thread-safe itself, so you need to guard it + with a mutex. Do not call it from the main thread. + +Simple example +-------------- + +Note that this API is a lot more minimal than say CPython C API, so at first +it's obvious to think that you can't do much. However, the trick is to do +all the logic in Python and expose it via `cffi`_ callbacks. Let's assume +we're on linux and pypy is put in ``/opt/pypy`` (a source checkout) and +library is in ``/opt/pypy/libpypy-c.so``. We write a little C program +(for simplicity assuming that all operations will be performed:: + + #include "include/PyPy.h" + #include + + const char source[] = "print 'hello from pypy'"; + + int main() + { + int res; + + rpython_startup_code(); + res = pypy_execute_source((char*)source); + if (res) { + printf("Error calling pypy_execute_source!\n"); + } + return res; + } + +If we save it as ``x.c`` now, compile it and run it with:: + + fijal at hermann:/opt/pypy$ gcc -o x x.c -lpypy-c -L. + fijal at hermann:~/src/pypy$ LD_LIBRARY_PATH=. ./x + hello from pypy + +Worked! + +More advanced example +--------------------- + +Typically we need something more to do than simply execute source. The following +is a fully fledged example, please consult cffi documentation for details. + +xxx + +Threading +--------- + +XXXX I don't understand what's going on, discuss with unbit + +.. _`cffi`: http://cffi.readthedocs.org/ +.. _`uwsgi`: http://uwsgi-docs.readthedocs.org/en/latest/ +.. _`PyPy uwsgi plugin`: http://uwsgi-docs.readthedocs.org/en/latest/PyPy.html diff --git a/pypy/doc/getting-started.rst b/pypy/doc/getting-started.rst --- a/pypy/doc/getting-started.rst +++ b/pypy/doc/getting-started.rst @@ -145,11 +145,13 @@ After you successfully manage to get PyPy's source you can read more about: - `Building and using PyPy's Python interpreter`_ + - `Embedding PyPy`_ - `Learning more about the RPython toolchain and how to develop (with) PyPy`_ - `Tutorial for how to write an interpreter with the RPython toolchain and make it fast`_ - `Look at our benchmark results`_ .. _`Building and using PyPy's Python interpreter`: getting-started-python.html +.. _`Embedding PyPy`: embedding.html .. _`Learning more about the RPython toolchain and how to develop (with) PyPy`: getting-started-dev.html .. _`Tutorial for how to write an interpreter with the RPython toolchain and make it fast`: http://morepypy.blogspot.com/2011/04/tutorial-writing-interpreter-with-pypy.html .. _`Look at our benchmark results`: http://speed.pypy.org diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -82,6 +82,7 @@ from rpython.rlib.entrypoint import entrypoint from rpython.rtyper.lltypesystem import rffi, lltype + from rpython.rtyper.lltypesystem.lloperation import llop w_pathsetter = space.appexec([], """(): def f(path): @@ -93,6 +94,7 @@ @entrypoint('main', [rffi.CCHARP, rffi.INT], c_name='pypy_setup_home') def pypy_setup_home(ll_home, verbose): from pypy.module.sys.initpath import pypy_find_stdlib + llop.gc_stack_bottom(lltype.Void) verbose = rffi.cast(lltype.Signed, verbose) if ll_home: home = rffi.charp2str(ll_home) @@ -120,8 +122,11 @@ @entrypoint('main', [rffi.CCHARP], c_name='pypy_execute_source') def pypy_execute_source(ll_source): + rffi.aroundstate.after() + llop.gc_stack_bottom(lltype.Void) source = rffi.charp2str(ll_source) res = _pypy_execute_source(source) + rffi.aroundstate.before() return rffi.cast(rffi.INT, res) @entrypoint('main', [], c_name='pypy_init_threads') diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -499,6 +499,16 @@ def rewrite_op_hint(self, op): hints = op.args[1].value + + # hack: if there are both 'promote' and 'promote_string', kill + # one of them based on the type of the value + if hints.get('promote_string') and hints.get('promote'): + hints = hints.copy() + if op.args[0].concretetype == lltype.Ptr(rstr.STR): + del hints['promote'] + else: + del hints['promote_string'] + if hints.get('promote') and op.args[0].concretetype is not lltype.Void: assert op.args[0].concretetype != lltype.Ptr(rstr.STR) kind = getkind(op.args[0].concretetype) diff --git a/rpython/jit/codewriter/test/test_jtransform.py b/rpython/jit/codewriter/test/test_jtransform.py --- a/rpython/jit/codewriter/test/test_jtransform.py +++ b/rpython/jit/codewriter/test/test_jtransform.py @@ -1050,6 +1050,37 @@ assert op1.result == v2 assert op0.opname == '-live-' +def test_double_promote_str(): + PSTR = lltype.Ptr(rstr.STR) + v1 = varoftype(PSTR) + v2 = varoftype(PSTR) + tr = Transformer(FakeCPU(), FakeBuiltinCallControl()) + op1 = SpaceOperation('hint', + [v1, Constant({'promote_string': True}, lltype.Void)], + v2) + op2 = SpaceOperation('hint', + [v1, Constant({'promote_string': True, + 'promote': True}, lltype.Void)], + v2) + lst1 = tr.rewrite_operation(op1) + lst2 = tr.rewrite_operation(op2) + assert lst1 == lst2 + +def test_double_promote_nonstr(): + v1 = varoftype(lltype.Signed) + v2 = varoftype(lltype.Signed) + tr = Transformer(FakeCPU(), FakeBuiltinCallControl()) + op1 = SpaceOperation('hint', + [v1, Constant({'promote': True}, lltype.Void)], + v2) + op2 = SpaceOperation('hint', + [v1, Constant({'promote_string': True, + 'promote': True}, lltype.Void)], + v2) + lst1 = tr.rewrite_operation(op1) + lst2 = tr.rewrite_operation(op2) + assert lst1 == lst2 + def test_unicode_concat(): # test that the oopspec is present and correctly transformed PSTR = lltype.Ptr(rstr.UNICODE) diff --git a/rpython/jit/metainterp/test/test_list.py b/rpython/jit/metainterp/test/test_list.py --- a/rpython/jit/metainterp/test/test_list.py +++ b/rpython/jit/metainterp/test/test_list.py @@ -98,8 +98,8 @@ self.check_resops(setarrayitem_gc=0, call=0, getarrayitem_gc=0) def test_vlist_alloc_and_set(self): - # the check_loops fails, because [non-null] * n is not supported yet - # (it is implemented as a residual call) + # the check_loops fails, because [non-null] * n is only supported + # if n < 15 (otherwise it is implemented as a residual call) jitdriver = JitDriver(greens = [], reds = ['n']) def f(n): l = [1] * 20 @@ -116,7 +116,7 @@ res = self.meta_interp(f, [10], listops=True) assert res == f(10) - py.test.skip("'[non-null] * n' gives a residual call so far") + py.test.skip("'[non-null] * n' for n >= 15 gives a residual call so far") self.check_loops(setarrayitem_gc=0, getarrayitem_gc=0, call=0) def test_arraycopy_simpleoptimize(self): @@ -287,6 +287,74 @@ assert res == 5 self.check_resops(call=0) + def test_list_mul_virtual(self): + class Foo: + def __init__(self, l): + self.l = l + l[0] = self + + myjitdriver = JitDriver(greens = [], reds = ['y']) + def f(y): + while y > 0: + myjitdriver.jit_merge_point(y=y) + Foo([None] * 5) + y -= 1 + return 42 + + self.meta_interp(f, [5]) + self.check_resops({'int_sub': 2, + 'int_gt': 2, + 'guard_true': 2, + 'jump': 1}) + + def test_list_mul_virtual_nonzero(self): + class base: + pass + class Foo(base): + def __init__(self, l): + self.l = l + l[0] = self + class nil(base): + pass + + nil = nil() + + myjitdriver = JitDriver(greens = [], reds = ['y']) + def f(y): + while y > 0: + myjitdriver.jit_merge_point(y=y) + Foo([nil] * 5) + y -= 1 + return 42 + + self.meta_interp(f, [5]) + self.check_resops({'int_sub': 2, + 'int_gt': 2, + 'guard_true': 2, + 'jump': 1}) + + def test_list_mul_unsigned_virtual(self): + from rpython.rlib.rarithmetic import r_uint + + class Foo: + def __init__(self, l): + self.l = l + l[0] = self + + myjitdriver = JitDriver(greens = [], reds = ['y']) + def f(y): + while y > 0: + myjitdriver.jit_merge_point(y=y) + Foo([None] * r_uint(5)) + y -= 1 + return 42 + + self.meta_interp(f, [5]) + self.check_resops({'int_sub': 2, + 'int_gt': 2, + 'guard_true': 2, + 'jump': 1}) + class TestLLtype(ListTests, LLJitMixin): def test_listops_dont_invalidate_caches(self): diff --git a/rpython/rlib/jit.py b/rpython/rlib/jit.py --- a/rpython/rlib/jit.py +++ b/rpython/rlib/jit.py @@ -130,7 +130,9 @@ if promote_args != 'all': args = [args[int(i)] for i in promote_args.split(",")] for arg in args: - code.append(" %s = hint(%s, promote=True)\n" % (arg, arg)) + code.append( #use both hints, and let jtransform pick the right one + " %s = hint(%s, promote=True, promote_string=True)\n" % + (arg, arg)) code.append(" return _orig_func_unlikely_name(%s)\n" % (argstring, )) d = {"_orig_func_unlikely_name": func, "hint": hint} exec py.code.Source("\n".join(code)).compile() in d diff --git a/rpython/rtyper/test/test_generator.py b/rpython/rtyper/test/test_generator.py --- a/rpython/rtyper/test/test_generator.py +++ b/rpython/rtyper/test/test_generator.py @@ -88,3 +88,16 @@ return s res = self.interpret(g, []) assert res == 6 + + def test_send(self): + def f(): + yield (yield 1) + 1 + def g(): + gen = f() + res = f.send(2) + assert res == 1 + res = f.next() + assert res == 3 + + res = self.interpret(g, []) + diff --git a/rpython/rtyper/test/test_rlist.py b/rpython/rtyper/test/test_rlist.py --- a/rpython/rtyper/test/test_rlist.py +++ b/rpython/rtyper/test/test_rlist.py @@ -1619,3 +1619,17 @@ rgc.ll_arraycopy = old_arraycopy # assert 2 <= res <= 10 + + def test_alloc_and_set(self): + def fn(i): + lst = [0] * r_uint(i) + return lst + t, rtyper, graph = self.gengraph(fn, [int]) + block = graph.startblock + seen = 0 + for op in block.operations: + if op.opname in ['cast_int_to_uint', 'cast_uint_to_int']: + continue + assert op.opname == 'direct_call' + seen += 1 + assert seen == 1 diff --git a/rpython/translator/transform.py b/rpython/translator/transform.py --- a/rpython/translator/transform.py +++ b/rpython/translator/transform.py @@ -30,7 +30,7 @@ # [a] * b # --> # c = newlist(a) -# d = mul(c, int b) +# d = mul(c, b) # --> # d = alloc_and_set(b, a) @@ -44,8 +44,7 @@ len(op.args) == 1): length1_lists[op.result] = op.args[0] elif (op.opname == 'mul' and - op.args[0] in length1_lists and - self.gettype(op.args[1]) is int): + op.args[0] in length1_lists): new_op = SpaceOperation('alloc_and_set', (op.args[1], length1_lists[op.args[0]]), op.result) From noreply at buildbot.pypy.org Wed Feb 26 22:23:45 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Wed, 26 Feb 2014 22:23:45 +0100 (CET) Subject: [pypy-commit] pypy remove-remaining-smm: Introduce a fast-path for interp2app when __args__ is passed through and the interp-level function is a method. Message-ID: <20140226212345.4D9FD1C3369@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: remove-remaining-smm Changeset: r69471:153f2fa2ab59 Date: 2014-02-26 22:22 +0100 http://bitbucket.org/pypy/pypy/changeset/153f2fa2ab59/ Log: Introduce a fast-path for interp2app when __args__ is passed through and the interp-level function is a method. diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py --- a/pypy/interpreter/gateway.py +++ b/pypy/interpreter/gateway.py @@ -592,6 +592,9 @@ elif unwrap_spec == [ObjSpace, W_Root, Arguments]: self.__class__ = BuiltinCodePassThroughArguments1 self.func__args__ = func + elif unwrap_spec == [self_type, ObjSpace, Arguments]: + self.__class__ = BuiltinCodePassThroughArgumentsMethod + self.func__args__ = func else: self.__class__ = globals()['BuiltinCode%d' % arity] setattr(self, 'fastfunc_%d' % arity, fastfunc) @@ -700,6 +703,27 @@ return w_result +class BuiltinCodePassThroughArgumentsMethod(BuiltinCodePassThroughArguments1): + # almost the same as BuiltinCodePassThroughArguments1 but passes w_obj + # first for the case when self.func__args__ is a method + + def funcrun_obj(self, func, w_obj, args): + space = func.space + try: + w_result = self.func__args__(w_obj, space, args) + except DescrMismatch: + return args.firstarg().descr_call_mismatch(space, + self.descrmismatch_op, + self.descr_reqcls, + args.prepend(w_obj)) + except Exception, e: + self.handle_exception(space, e) + w_result = None + if w_result is None: + w_result = space.w_None + return w_result + + class BuiltinCode0(BuiltinCode): _immutable_ = True fast_natural_arity = 0 diff --git a/pypy/interpreter/test/test_gateway.py b/pypy/interpreter/test/test_gateway.py --- a/pypy/interpreter/test/test_gateway.py +++ b/pypy/interpreter/test/test_gateway.py @@ -824,6 +824,28 @@ assert len(called) == 1 assert isinstance(called[0], argument.Arguments) + def test_pass_trough_arguments_method(self): + space = self.space + + called = [] + + class W_Something(W_Root): + def f(self, space, __args__): + called.append(__args__) + a_w, _ = __args__.unpack() + return space.newtuple([space.wrap('f')]+a_w) + + w_f = space.wrap(gateway.interp2app_temp(W_Something.f)) + + w_self = space.wrap(W_Something()) + args = argument.Arguments(space, [space.wrap(7)]) + + w_res = space.call_obj_args(w_f, w_self, args) + assert space.is_true(space.eq(w_res, space.wrap(('f', 7)))) + + # white-box check for opt + assert called[0] is args + class AppTestKeywordsToBuiltinSanity(object): def test_type(self): From noreply at buildbot.pypy.org Wed Feb 26 23:51:22 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 26 Feb 2014 23:51:22 +0100 (CET) Subject: [pypy-commit] stmgc default: Only acquire the mutex_pages_lock if there are really objects to free Message-ID: <20140226225122.020FC1C3973@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r885:39c5f170d4d1 Date: 2014-02-26 23:45 +0100 http://bitbucket.org/pypy/stmgc/changeset/39c5f170d4d1/ Log: Only acquire the mutex_pages_lock if there are really objects to free diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -209,15 +209,20 @@ /* free any object left from 'young_outside_nursery' */ if (!tree_is_cleared(STM_PSEGMENT->young_outside_nursery)) { - mutex_pages_lock(); - + bool locked = false; wlog_t *item; TREE_LOOP_FORWARD(*STM_PSEGMENT->young_outside_nursery, item) { + if (!locked) { + mutex_pages_lock(); + locked = true; + } _stm_large_free(stm_object_pages + item->addr); } TREE_LOOP_END; + if (locked) + mutex_pages_unlock(); + tree_clear(STM_PSEGMENT->young_outside_nursery); - mutex_pages_unlock(); } } From noreply at buildbot.pypy.org Wed Feb 26 23:51:20 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 26 Feb 2014 23:51:20 +0100 (CET) Subject: [pypy-commit] stmgc default: Do we really need this? Message-ID: <20140226225120.D4E991C3973@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r884:72584e5eb7df Date: 2014-02-26 23:27 +0100 http://bitbucket.org/pypy/stmgc/changeset/72584e5eb7df/ Log: Do we really need this? diff --git a/c7/stm/contention.c b/c7/stm/contention.c --- a/c7/stm/contention.c +++ b/c7/stm/contention.c @@ -77,6 +77,8 @@ /* we will issue a safe point and wait: */ STM_PSEGMENT->safe_point = SP_SAFE_POINT_CANNOT_COLLECT; + // XXX do we really need a safe_point here? It seems we can + // kill it and the whole SP_SAFE_POINT_CANNOT_COLLECT /* wait, hopefully until the other thread broadcasts "I'm done aborting" (spurious wake-ups are ok). */ From noreply at buildbot.pypy.org Wed Feb 26 23:51:23 2014 From: noreply at buildbot.pypy.org (arigo) Date: Wed, 26 Feb 2014 23:51:23 +0100 (CET) Subject: [pypy-commit] stmgc default: Readd logic for a thread-local object. Message-ID: <20140226225123.14A1E1C3973@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r886:f78b189223c1 Date: 2014-02-26 23:51 +0100 http://bitbucket.org/pypy/stmgc/changeset/f78b189223c1/ Log: Readd logic for a thread-local object. Helps "duhton demo/list_transaction.duh", but it still seems to be caught sometimes in infnite loops. diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -170,6 +170,7 @@ STM_PSEGMENT->running_pthread = pthread_self(); #endif STM_PSEGMENT->shadowstack_at_start_of_transaction = tl->shadowstack; + STM_PSEGMENT->threadlocal_at_start_of_transaction = tl->thread_local_obj; STM_SEGMENT->nursery_end = NURSERY_END; dprintf(("start_transaction\n")); @@ -469,6 +470,7 @@ stm_jmpbuf_t *jmpbuf_ptr = STM_SEGMENT->jmpbuf_ptr; stm_thread_local_t *tl = STM_SEGMENT->running_thread; tl->shadowstack = STM_PSEGMENT->shadowstack_at_start_of_transaction; + tl->thread_local_obj = STM_PSEGMENT->threadlocal_at_start_of_transaction; _finish_transaction(); diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -117,8 +117,10 @@ /* Temp for minor collection */ bool minor_collect_will_commit_now; - /* In case of abort, we restore the 'shadowstack' field. */ + /* In case of abort, we restore the 'shadowstack' field and the + 'thread_local_obj' field. */ object_t **shadowstack_at_start_of_transaction; + object_t *threadlocal_at_start_of_transaction; /* For debugging */ #ifndef NDEBUG diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -145,6 +145,7 @@ assert(*current != (object_t *)-1); minor_trace_if_young(current); } + minor_trace_if_young(&tl->thread_local_obj); } static inline void _collect_now(object_t *obj) diff --git a/c7/stm/setup.c b/c7/stm/setup.c --- a/c7/stm/setup.c +++ b/c7/stm/setup.c @@ -133,6 +133,7 @@ stm_all_thread_locals->prev = tl; num = tl->prev->associated_segment_num + 1; } + tl->thread_local_obj = NULL; /* assign numbers consecutively, but that's for tests; we could also assign the same number to all of them and they would get their own diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -52,6 +52,8 @@ typedef struct stm_thread_local_s { /* every thread should handle the shadow stack itself */ object_t **shadowstack, **shadowstack_base; + /* a generic optional thread-local object */ + object_t *thread_local_obj; /* the next fields are handled automatically by the library */ int associated_segment_num; struct stm_thread_local_s *prev, *next; diff --git a/duhton/duhton.c b/duhton/duhton.c --- a/duhton/duhton.c +++ b/duhton/duhton.c @@ -54,9 +54,9 @@ Du_Print(res, 1); } - _du_save1(stm_thread_local_obj); - stm_collect(0); /* hack... */ - _du_restore1(stm_thread_local_obj); + //_du_save1(stm_thread_local_obj); + //stm_collect(0); /* hack... */ + //_du_restore1(stm_thread_local_obj); stm_commit_transaction(); diff --git a/duhton/duhton.h b/duhton/duhton.h --- a/duhton/duhton.h +++ b/duhton/duhton.h @@ -216,5 +216,5 @@ extern pthread_t *all_threads; extern int all_threads_count; -extern __thread DuObject *stm_thread_local_obj; /* XXX temp */ +//extern __thread DuObject *stm_thread_local_obj; /* XXX temp */ #endif /* _DUHTON_H_ */ diff --git a/duhton/glob.c b/duhton/glob.c --- a/duhton/glob.c +++ b/duhton/glob.c @@ -685,9 +685,9 @@ if (cons != Du_None) Du_FatalError("run-transactions: expected no argument"); - _du_save1(stm_thread_local_obj); - stm_collect(0); /* hack... */ - _du_restore1(stm_thread_local_obj); + //_du_save1(stm_thread_local_obj); + //stm_collect(0); /* hack... */ + //_du_restore1(stm_thread_local_obj); stm_commit_transaction(); diff --git a/duhton/transaction.c b/duhton/transaction.c --- a/duhton/transaction.c +++ b/duhton/transaction.c @@ -43,35 +43,34 @@ /************************************************************/ -__thread DuObject *stm_thread_local_obj = NULL; /* XXX temp */ - +#define TLOBJ (*((DuObject **)(&stm_thread_local.thread_local_obj))) void Du_TransactionAdd(DuObject *code, DuObject *frame) { DuObject *cell = DuCons_New(code, frame); - DuObject *pending = stm_thread_local_obj; + DuObject *pending = TLOBJ; if (pending == NULL) { pending = Du_None; } pending = DuCons_New(cell, pending); - stm_thread_local_obj = pending; + TLOBJ = pending; } void Du_TransactionRun(void) { - if (stm_thread_local_obj == NULL) + if (TLOBJ == NULL) return; stm_start_inevitable_transaction(&stm_thread_local); DuConsObject *root = du_pending_transactions; _du_write1(root); - root->cdr = stm_thread_local_obj; + root->cdr = TLOBJ; stm_commit_transaction(); - stm_thread_local_obj = NULL; + TLOBJ = NULL; run_all_threads(); } @@ -80,7 +79,7 @@ static DuObject *next_cell(void) { - DuObject *pending = stm_thread_local_obj; + DuObject *pending = TLOBJ; if (pending == NULL) { /* fish from the global list of pending transactions */ @@ -131,7 +130,7 @@ } /* we have at least one thread-local transaction pending */ - stm_thread_local_obj = NULL; + TLOBJ = NULL; stm_start_inevitable_transaction(&stm_thread_local); @@ -175,22 +174,18 @@ stm_jmpbuf_t here; stm_register_thread_local(&stm_thread_local); - stm_thread_local_obj = NULL; + TLOBJ = NULL; while (1) { DuObject *cell = next_cell(); if (cell == NULL) break; - assert(stm_thread_local_obj == NULL); + assert(TLOBJ == NULL); STM_START_TRANSACTION(&stm_thread_local, here); run_transaction(cell); - _du_save1(stm_thread_local_obj); - stm_collect(0); /* hack.. */ - _du_restore1(stm_thread_local_obj); - stm_commit_transaction(); } From noreply at buildbot.pypy.org Thu Feb 27 00:42:42 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 27 Feb 2014 00:42:42 +0100 (CET) Subject: [pypy-commit] stmgc default: Fix the big slowness that duhton's list_transaction sometimes shows. Message-ID: <20140226234242.4536C1C35CC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r887:e0e14e5a9b5d Date: 2014-02-27 00:42 +0100 http://bitbucket.org/pypy/stmgc/changeset/e0e14e5a9b5d/ Log: Fix the big slowness that duhton's list_transaction sometimes shows. diff --git a/c7/stm/contention.c b/c7/stm/contention.c --- a/c7/stm/contention.c +++ b/c7/stm/contention.c @@ -82,7 +82,9 @@ /* wait, hopefully until the other thread broadcasts "I'm done aborting" (spurious wake-ups are ok). */ + dprintf(("contention: wait C_SAFE_POINT...\n")); cond_wait(C_SAFE_POINT); + dprintf(("contention: done\n")); cond_broadcast(C_RESUME); diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -171,7 +171,7 @@ #endif STM_PSEGMENT->shadowstack_at_start_of_transaction = tl->shadowstack; STM_PSEGMENT->threadlocal_at_start_of_transaction = tl->thread_local_obj; - STM_SEGMENT->nursery_end = NURSERY_END; + assert(STM_SEGMENT->nursery_end == NURSERY_END); dprintf(("start_transaction\n")); @@ -478,12 +478,28 @@ contention.c, we use a broadcast, to make sure that all threads are signalled, including the one that requested an abort, if any. Moreover, we wake up any thread waiting for this one to do a safe - point, if any. + point, if any (in _finish_transaction above). Finally, it's + possible that we reach this place from the middle of a piece of + code like wait_for_other_safe_points() which ends in broadcasting + C_RESUME; we must make sure to broadcast it. */ cond_broadcast(C_RELEASE_THREAD_SEGMENT); + cond_broadcast(C_RESUME); mutex_unlock(); + /* It seems to be a good idea, at least in some examples, to sleep + one microsecond here before retrying. Otherwise, what was + observed is that the transaction very often restarts too quickly + for contention.c to react, and before it can do anything, we have + again recreated in this thread a similar situation to the one + that caused contention. Anyway, usleep'ing in case of abort + doesn't seem like a very bad idea. If there are more threads + than segments, it should also make sure another thread gets the + segment next. + */ + usleep(1); + assert(jmpbuf_ptr != NULL); assert(jmpbuf_ptr != (stm_jmpbuf_t *)-1); /* for tests only */ __builtin_longjmp(*jmpbuf_ptr, 1); diff --git a/c7/stm/sync.c b/c7/stm/sync.c --- a/c7/stm/sync.c +++ b/c7/stm/sync.c @@ -323,6 +323,7 @@ assert(STM_PSEGMENT->safe_point == SP_RUNNING); while (STM_SEGMENT->nursery_end == NSE_SIGNAL) { + dprintf(("collectable_safe_point...\n")); STM_PSEGMENT->safe_point = SP_SAFE_POINT_CAN_COLLECT; STM_SEGMENT->nursery_end = NURSERY_END; @@ -334,4 +335,5 @@ STM_PSEGMENT->safe_point = SP_RUNNING; } + dprintf(("collectable_safe_point done\n")); } diff --git a/duhton/glob.c b/duhton/glob.c --- a/duhton/glob.c +++ b/duhton/glob.c @@ -705,9 +705,11 @@ int ms = DuInt_AsInt(obj); struct timeval t; + fprintf(stderr, "[sleeping %d ms]\n", ms); t.tv_sec = ms / 1000; t.tv_usec = (ms % 1000) * 1000; select(0, (fd_set *)0, (fd_set *)0, (fd_set *)0, &t); + fprintf(stderr, "[slept %d ms]\n", ms); return Du_None; } From noreply at buildbot.pypy.org Thu Feb 27 01:36:23 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Thu, 27 Feb 2014 01:36:23 +0100 (CET) Subject: [pypy-commit] pypy remove-remaining-smm: Fix translation. Message-ID: <20140227003623.E8D861C3369@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: remove-remaining-smm Changeset: r69472:d5a11afb2206 Date: 2014-02-27 01:35 +0100 http://bitbucket.org/pypy/pypy/changeset/d5a11afb2206/ Log: Fix translation. diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py --- a/pypy/interpreter/gateway.py +++ b/pypy/interpreter/gateway.py @@ -593,8 +593,16 @@ self.__class__ = BuiltinCodePassThroughArguments1 self.func__args__ = func elif unwrap_spec == [self_type, ObjSpace, Arguments]: - self.__class__ = BuiltinCodePassThroughArgumentsMethod - self.func__args__ = func + self.__class__ = BuiltinCodePassThroughArguments1 + miniglobals = {'func': func, 'self_type': self_type} + d = {} + source = """if 1: + def _call(space, w_obj, args): + self = space.descr_self_interp_w(self_type, w_obj) + return func(self, space, args) + \n""" + exec compile2(source) in miniglobals, d + self.func__args__ = d['_call'] else: self.__class__ = globals()['BuiltinCode%d' % arity] setattr(self, 'fastfunc_%d' % arity, fastfunc) @@ -703,27 +711,6 @@ return w_result -class BuiltinCodePassThroughArgumentsMethod(BuiltinCodePassThroughArguments1): - # almost the same as BuiltinCodePassThroughArguments1 but passes w_obj - # first for the case when self.func__args__ is a method - - def funcrun_obj(self, func, w_obj, args): - space = func.space - try: - w_result = self.func__args__(w_obj, space, args) - except DescrMismatch: - return args.firstarg().descr_call_mismatch(space, - self.descrmismatch_op, - self.descr_reqcls, - args.prepend(w_obj)) - except Exception, e: - self.handle_exception(space, e) - w_result = None - if w_result is None: - w_result = space.w_None - return w_result - - class BuiltinCode0(BuiltinCode): _immutable_ = True fast_natural_arity = 0 From noreply at buildbot.pypy.org Thu Feb 27 01:59:45 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 27 Feb 2014 01:59:45 +0100 (CET) Subject: [pypy-commit] pypy numpy-refactor: remove arrayimpl.Scalar Message-ID: <20140227005945.EB0A61C3973@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: numpy-refactor Changeset: r69473:41743661d6f2 Date: 2014-02-26 05:00 -0500 http://bitbucket.org/pypy/pypy/changeset/41743661d6f2/ Log: remove arrayimpl.Scalar diff --git a/pypy/module/micronumpy/arrayimpl/base.py b/pypy/module/micronumpy/arrayimpl/base.py deleted file mode 100644 --- a/pypy/module/micronumpy/arrayimpl/base.py +++ /dev/null @@ -1,20 +0,0 @@ - -class BaseArrayImplementation(object): - def is_scalar(self): - return False - - def base(self): - raise NotImplementedError - - def create_iter(self, shape=None, backward_broadcast=False, require_index=False): - raise NotImplementedError - -class BaseArrayIterator(object): - def next(self): - raise NotImplementedError # purely abstract base class - - def setitem(self, elem): - raise NotImplementedError - - def set_scalar_object(self, value): - raise NotImplementedError # works only on scalars diff --git a/pypy/module/micronumpy/arrayimpl/concrete.py b/pypy/module/micronumpy/arrayimpl/concrete.py --- a/pypy/module/micronumpy/arrayimpl/concrete.py +++ b/pypy/module/micronumpy/arrayimpl/concrete.py @@ -1,4 +1,3 @@ -from pypy.module.micronumpy.arrayimpl import base, scalar from pypy.module.micronumpy import support, loop, iter from pypy.module.micronumpy.base import convert_to_array, W_NDimArray,\ ArrayArgumentException @@ -9,12 +8,12 @@ from pypy.interpreter.buffer import RWBuffer from rpython.rlib import jit from rpython.rtyper.lltypesystem import rffi, lltype -from rpython.rlib.rawstorage import free_raw_storage, raw_storage_getitem,\ - raw_storage_setitem, RAW_STORAGE +from rpython.rlib.rawstorage import alloc_raw_storage, free_raw_storage, \ + raw_storage_getitem, raw_storage_setitem, RAW_STORAGE from rpython.rlib.debug import make_sure_not_resized -class BaseConcreteArray(base.BaseArrayImplementation): +class BaseConcreteArray(object): start = 0 parent = None @@ -46,9 +45,6 @@ def setslice(self, space, arr): impl = arr.implementation - if impl.is_scalar(): - self.fill(space, impl.get_scalar_value()) - return shape = shape_agreement(space, self.get_shape(), arr) if impl.storage == self.storage: impl = impl.copy(space) @@ -64,9 +60,12 @@ # Since we got to here, prod(new_shape) == self.size new_strides = None if self.size > 0: - new_strides = calc_new_strides(new_shape, self.get_shape(), - self.get_strides(), self.order) - if new_strides: + if len(self.get_shape()) == 0: + new_strides = [self.dtype.elsize] * len(new_shape) + else: + new_strides = calc_new_strides(new_shape, self.get_shape(), + self.get_strides(), self.order) + if new_strides is not None: # We can create a view, strides somehow match up. ndims = len(new_shape) new_backstrides = [0] * ndims @@ -75,10 +74,6 @@ assert isinstance(orig_array, W_NDimArray) or orig_array is None return SliceArray(self.start, new_strides, new_backstrides, new_shape, self, orig_array) - else: - if self.get_size() == 1 and len(new_shape) == 0: - return scalar.Scalar(self.dtype, self.getitem(0)) - return None def get_view(self, space, orig_array, dtype, new_shape): strides, backstrides = support.calc_strides(new_shape, dtype, @@ -92,7 +87,7 @@ if self.dtype.is_complex(): dtype = self.dtype.get_float_dtype(space) return SliceArray(self.start, strides, backstrides, - self.get_shape(), self, orig_array, dtype=dtype) + self.get_shape(), self, orig_array, dtype=dtype) return SliceArray(self.start, strides, backstrides, self.get_shape(), self, orig_array) @@ -105,10 +100,10 @@ backstrides = self.get_backstrides() if self.dtype.is_complex(): dtype = self.dtype.get_float_dtype(space) - return SliceArray(self.start + dtype.elsize, strides, - backstrides, self.get_shape(), self, orig_array, dtype=dtype) - impl = NonWritableArray(self.get_shape(), self.dtype, self.order, strides, - backstrides) + return SliceArray(self.start + dtype.elsize, strides, backstrides, + self.get_shape(), self, orig_array, dtype=dtype) + impl = NonWritableArray(self.get_shape(), self.dtype, self.order, + strides, backstrides) if not self.dtype.is_flexible(): impl.fill(space, self.dtype.box(0)) return impl @@ -167,7 +162,7 @@ space.isinstance_w(w_idx, space.w_slice) or space.is_w(w_idx, space.w_None)): raise IndexError - if isinstance(w_idx, W_NDimArray) and not isinstance(w_idx.implementation, scalar.Scalar): + if isinstance(w_idx, W_NDimArray) and not w_idx.is_scalar(): raise ArrayArgumentException shape = self.get_shape() shape_len = len(shape) @@ -208,11 +203,12 @@ raise OperationError(space.w_ValueError, space.wrap( "field named %s not found" % idx)) return RecordChunk(idx) + if len(self.get_shape()) == 0: + raise oefmt(space.w_IndexError, "0-d arrays can't be indexed") if (space.isinstance_w(w_idx, space.w_int) or space.isinstance_w(w_idx, space.w_slice)): return Chunks([Chunk(*space.decode_index4(w_idx, self.get_shape()[0]))]) - elif isinstance(w_idx, W_NDimArray) and \ - isinstance(w_idx.implementation, scalar.Scalar): + elif isinstance(w_idx, W_NDimArray) and w_idx.is_scalar(): w_idx = w_idx.get_scalar_value().item(space) if not space.isinstance_w(w_idx, space.w_int) and \ not space.isinstance_w(w_idx, space.w_bool): @@ -319,7 +315,6 @@ class ConcreteArrayNotOwning(BaseConcreteArray): def __init__(self, shape, dtype, order, strides, backstrides, storage): - make_sure_not_resized(shape) make_sure_not_resized(strides) make_sure_not_resized(backstrides) @@ -389,6 +384,7 @@ def __del__(self): free_raw_storage(self.storage, track_allocation=False) + class ConcreteArrayWithBase(ConcreteArrayNotOwning): def __init__(self, shape, dtype, order, strides, backstrides, storage, orig_base): ConcreteArrayNotOwning.__init__(self, shape, dtype, order, @@ -460,7 +456,10 @@ strides = [] backstrides = [] dtype = self.dtype - s = self.get_strides()[0] // dtype.elsize + try: + s = self.get_strides()[0] // dtype.elsize + except IndexError: + s = 1 if self.order == 'C': new_shape.reverse() for sh in new_shape: @@ -486,6 +485,16 @@ self, orig_array) +class VoidBoxStorage(BaseConcreteArray): + def __init__(self, size, dtype): + self.storage = alloc_raw_storage(size) + self.dtype = dtype + self.size = size + + def __del__(self): + free_raw_storage(self.storage) + + class ArrayBuffer(RWBuffer): def __init__(self, impl): self.impl = impl diff --git a/pypy/module/micronumpy/arrayimpl/scalar.py b/pypy/module/micronumpy/arrayimpl/scalar.py deleted file mode 100644 --- a/pypy/module/micronumpy/arrayimpl/scalar.py +++ /dev/null @@ -1,209 +0,0 @@ -from pypy.module.micronumpy.arrayimpl import base -from pypy.module.micronumpy.base import W_NDimArray, convert_to_array -from pypy.module.micronumpy import support -from pypy.interpreter.error import OperationError - -class ScalarIterator(base.BaseArrayIterator): - def __init__(self, v): - self.v = v - self.called_once = False - - def next(self): - self.called_once = True - - def next_skip_x(self, n): - self.called_once = True - - def getitem(self): - return self.v.get_scalar_value() - - def getitem_bool(self): - return self.v.dtype.itemtype.bool(self.v.value) - - def setitem(self, v): - self.v.set_scalar_value(v) - - def done(self): - return self.called_once - - def reset(self): - pass - -class Scalar(base.BaseArrayImplementation): - def __init__(self, dtype, value=None): - self.dtype = dtype - self.value = value - - def is_scalar(self): - return True - - def get_shape(self): - return [] - - def get_strides(self): - return [] - - def get_backstrides(self): - return [] - - def create_iter(self, shape=None, backward_broadcast=False, require_index=False): - return ScalarIterator(self) - - def get_scalar_value(self): - return self.value - - def set_scalar_value(self, w_val): - self.value = w_val - - def copy(self, space): - scalar = Scalar(self.dtype) - scalar.value = self.value - return scalar - - def get_size(self): - return 1 - - def transpose(self, _): - return self - - def get_view(self, space, orig_array, dtype, new_shape): - scalar = Scalar(dtype) - if dtype.is_str_or_unicode(): - scalar.value = dtype.coerce(space, space.wrap(self.value.raw_str())) - elif dtype.is_record(): - raise OperationError(space.w_NotImplementedError, space.wrap( - "viewing scalar as record not implemented")) - else: - scalar.value = dtype.itemtype.runpack_str(space, self.value.raw_str()) - return scalar - - def get_real(self, space, orig_array): - if self.dtype.is_complex(): - scalar = Scalar(self.dtype.get_float_dtype(space)) - scalar.value = self.value.convert_real_to(scalar.dtype) - return scalar - return self - - def set_real(self, space, orig_array, w_val): - w_arr = convert_to_array(space, w_val) - if len(w_arr.get_shape()) > 0: - raise OperationError(space.w_ValueError, space.wrap( - "could not broadcast input array from shape " + - "(%s) into shape ()" % ( - ','.join([str(x) for x in w_arr.get_shape()],)))) - if self.dtype.is_complex(): - dtype = self.dtype.get_float_dtype(space) - self.value = self.dtype.itemtype.composite( - w_arr.get_scalar_value().convert_to(space, dtype), - self.value.convert_imag_to(dtype)) - else: - self.value = w_arr.get_scalar_value() - - def get_imag(self, space, orig_array): - if self.dtype.is_complex(): - scalar = Scalar(self.dtype.get_float_dtype(space)) - scalar.value = self.value.convert_imag_to(scalar.dtype) - return scalar - scalar = Scalar(self.dtype) - scalar.value = scalar.dtype.coerce(space, None) - return scalar - - def set_imag(self, space, orig_array, w_val): - #Only called on complex dtype - assert self.dtype.is_complex() - w_arr = convert_to_array(space, w_val) - if len(w_arr.get_shape()) > 0: - raise OperationError(space.w_ValueError, space.wrap( - "could not broadcast input array from shape " + - "(%s) into shape ()" % ( - ','.join([str(x) for x in w_arr.get_shape()],)))) - dtype = self.dtype.get_float_dtype(space) - self.value = self.dtype.itemtype.composite( - self.value.convert_real_to(dtype), - w_arr.get_scalar_value().convert_to(space, dtype)) - - def descr_getitem(self, space, _, w_idx): - if space.isinstance_w(w_idx, space.w_tuple): - if space.len_w(w_idx) == 0: - return self.get_scalar_value() - elif space.isinstance_w(w_idx, space.w_str): - if self.dtype.is_record(): - w_val = self.value.descr_getitem(space, w_idx) - return convert_to_array(space, w_val) - elif space.is_none(w_idx): - new_shape = [1] - arr = W_NDimArray.from_shape(space, new_shape, self.dtype) - arr_iter = arr.create_iter(new_shape) - arr_iter.setitem(self.value) - return arr - raise OperationError(space.w_IndexError, - space.wrap("0-d arrays can't be indexed")) - - def getitem_index(self, space, idx): - raise OperationError(space.w_IndexError, - space.wrap("0-d arrays can't be indexed")) - - def descr_setitem(self, space, _, w_idx, w_val): - if space.isinstance_w(w_idx, space.w_tuple): - if space.len_w(w_idx) == 0: - return self.set_scalar_value(self.dtype.coerce(space, w_val)) - elif space.isinstance_w(w_idx, space.w_str): - if self.dtype.is_record(): - return self.value.descr_setitem(space, w_idx, w_val) - raise OperationError(space.w_IndexError, - space.wrap("0-d arrays can't be indexed")) - - def setitem_index(self, space, idx, w_val): - raise OperationError(space.w_IndexError, - space.wrap("0-d arrays can't be indexed")) - - def set_shape(self, space, orig_array, new_shape): - if not new_shape: - return self - if support.product(new_shape) == 1: - arr = W_NDimArray.from_shape(space, new_shape, self.dtype) - arr_iter = arr.create_iter(new_shape) - arr_iter.setitem(self.value) - return arr.implementation - raise OperationError(space.w_ValueError, space.wrap( - "total size of the array must be unchanged")) - - def set_dtype(self, space, dtype): - self.value = self.value.convert_to(space, dtype) - self.dtype = dtype - - def reshape(self, space, orig_array, new_shape): - return self.set_shape(space, orig_array, new_shape) - - def create_axis_iter(self, shape, dim, cum): - raise Exception("axis iter should not happen on scalar") - - def swapaxes(self, space, orig_array, axis1, axis2): - raise Exception("should not be called") - - def nonzero(self, space, index_type): - s = self.dtype.itemtype.bool(self.value) - w_res = W_NDimArray.from_shape(space, [s], index_type) - if s == 1: - w_res.implementation.setitem(0, index_type.itemtype.box(0)) - return space.newtuple([w_res]) - - def fill(self, space, w_value): - self.value = w_value - - def get_storage_as_int(self, space): - raise OperationError(space.w_ValueError, - space.wrap("scalars have no address")) - - def argsort(self, space, w_axis): - return space.wrap(0) - - def astype(self, space, dtype): - raise Exception("should not be called") - - def base(self): - return None - - def get_buffer(self, space): - raise OperationError(space.w_ValueError, space.wrap( - "cannot point buffer to a scalar")) diff --git a/pypy/module/micronumpy/arrayimpl/voidbox.py b/pypy/module/micronumpy/arrayimpl/voidbox.py deleted file mode 100644 --- a/pypy/module/micronumpy/arrayimpl/voidbox.py +++ /dev/null @@ -1,12 +0,0 @@ - -from pypy.module.micronumpy.arrayimpl.base import BaseArrayImplementation -from rpython.rlib.rawstorage import free_raw_storage, alloc_raw_storage - -class VoidBoxStorage(BaseArrayImplementation): - def __init__(self, size, dtype): - self.storage = alloc_raw_storage(size) - self.dtype = dtype - self.size = size - - def __del__(self): - free_raw_storage(self.storage) diff --git a/pypy/module/micronumpy/base.py b/pypy/module/micronumpy/base.py --- a/pypy/module/micronumpy/base.py +++ b/pypy/module/micronumpy/base.py @@ -1,9 +1,7 @@ - from pypy.interpreter.error import OperationError from pypy.interpreter.baseobjspace import W_Root from rpython.tool.pairtype import extendabletype from pypy.module.micronumpy.support import calc_strides -from pypy.module.micronumpy.arrayimpl.base import BaseArrayImplementation def issequence_w(space, w_obj): @@ -29,21 +27,18 @@ __metaclass__ = extendabletype def __init__(self, implementation): - assert isinstance(implementation, BaseArrayImplementation) + from pypy.module.micronumpy.arrayimpl.concrete import BaseConcreteArray + assert isinstance(implementation, BaseConcreteArray) assert isinstance(self, W_NDimArray) self.implementation = implementation @staticmethod def from_shape(space, shape, dtype, order='C', w_instance=None): - from pypy.module.micronumpy.arrayimpl import concrete, scalar + from pypy.module.micronumpy.arrayimpl import concrete - if not shape: - w_val = dtype.base.coerce(space, None) - impl = scalar.Scalar(dtype.base, w_val) - else: - strides, backstrides = calc_strides(shape, dtype.base, order) - impl = concrete.ConcreteArray(shape, dtype.base, order, strides, - backstrides) + strides, backstrides = calc_strides(shape, dtype.base, order) + impl = concrete.ConcreteArray(shape, dtype.base, order, strides, + backstrides) if w_instance: return wrap_impl(space, space.type(w_instance), w_instance, impl) return W_NDimArray(impl) @@ -90,36 +85,15 @@ @staticmethod def new_scalar(space, dtype, w_val=None): - from pypy.module.micronumpy.arrayimpl import scalar - if w_val is not None: w_val = dtype.coerce(space, w_val) else: w_val = dtype.coerce(space, space.wrap(0)) - return W_NDimArray(scalar.Scalar(dtype, w_val)) + return convert_to_array(space, w_val) def convert_to_array(space, w_obj): - #XXX: This whole routine should very likely simply be array() from pypy.module.micronumpy.interp_numarray import array - from pypy.module.micronumpy import interp_ufuncs - if isinstance(w_obj, W_NDimArray): return w_obj - else: - # Use __array__() method if it exists - w_array = space.lookup(w_obj, "__array__") - if w_array is not None: - w_result = space.get_and_call_function(w_array, w_obj) - if isinstance(w_result, W_NDimArray): - return w_result - else: - raise OperationError(space.w_ValueError, - space.wrap("object __array__ method not producing an array")) - elif issequence_w(space, w_obj): - # Convert to array. - return array(space, w_obj, w_order=None) - else: - # If it's a scalar - dtype = interp_ufuncs.find_dtype_for_scalar(space, w_obj) - return W_NDimArray.new_scalar(space, dtype, w_obj) + return array(space, w_obj) diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -10,7 +10,7 @@ from rpython.rlib.rarithmetic import LONG_BIT from rpython.rtyper.lltypesystem import rffi from rpython.tool.sourcetools import func_with_new_name -from pypy.module.micronumpy.arrayimpl.voidbox import VoidBoxStorage +from pypy.module.micronumpy.arrayimpl.concrete import VoidBoxStorage from pypy.module.micronumpy.base import W_NDimArray from pypy.module.micronumpy.interp_flagsobj import W_FlagsObject from pypy.interpreter.mixedmodule import MixedModule diff --git a/pypy/module/micronumpy/interp_flatiter.py b/pypy/module/micronumpy/interp_flatiter.py --- a/pypy/module/micronumpy/interp_flatiter.py +++ b/pypy/module/micronumpy/interp_flatiter.py @@ -1,10 +1,10 @@ - from pypy.module.micronumpy.base import W_NDimArray, convert_to_array from pypy.module.micronumpy import loop -from pypy.module.micronumpy.arrayimpl.base import BaseArrayImplementation +from pypy.module.micronumpy.arrayimpl.concrete import BaseConcreteArray from pypy.interpreter.error import OperationError -class FakeArrayImplementation(BaseArrayImplementation): + +class FakeArrayImplementation(BaseConcreteArray): """ The sole purpose of this class is to W_FlatIterator can behave like a real array for descr_eq and friends """ diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -5,7 +5,7 @@ from pypy.interpreter.gateway import interp2app, unwrap_spec, applevel, \ WrappedDefault from pypy.module.micronumpy.base import W_NDimArray, convert_to_array,\ - ArrayArgumentException, issequence_w, wrap_impl + ArrayArgumentException, wrap_impl from pypy.module.micronumpy import interp_dtype, interp_ufuncs, interp_boxes,\ interp_arrayops from pypy.module.micronumpy.strides import find_shape_and_elems,\ @@ -16,11 +16,10 @@ from pypy.module.micronumpy.appbridge import get_appbridge_cache from pypy.module.micronumpy import loop from pypy.module.micronumpy.interp_arrayops import repeat, choose, put -from pypy.module.micronumpy.arrayimpl import scalar from rpython.tool.sourcetools import func_with_new_name from rpython.rlib import jit from rpython.rlib.rstring import StringBuilder -from pypy.module.micronumpy.arrayimpl.base import BaseArrayImplementation +from pypy.module.micronumpy.arrayimpl.concrete import BaseConcreteArray from pypy.module.micronumpy.conversion_utils import order_converter, multi_axis_converter from pypy.module.micronumpy import support from pypy.module.micronumpy import constants as NPY @@ -292,7 +291,7 @@ return s.build() def create_iter(self, shape=None, backward_broadcast=False, require_index=False): - assert isinstance(self.implementation, BaseArrayImplementation) + assert isinstance(self.implementation, BaseConcreteArray) return self.implementation.create_iter(shape=shape, backward_broadcast=backward_broadcast, require_index=require_index) @@ -304,10 +303,10 @@ return self.implementation.create_dot_iter(shape, skip) def is_scalar(self): - return self.implementation.is_scalar() + return len(self.get_shape()) == 0 def set_scalar_value(self, w_val): - self.implementation.set_scalar_value(w_val) + return self.implementation.setitem(0, w_val) def fill(self, space, box): self.implementation.fill(space, box) @@ -319,7 +318,8 @@ return self.implementation.get_size() def get_scalar_value(self): - return self.implementation.get_scalar_value() + assert len(self.get_shape()) == 0 + return self.implementation.getitem(0) def descr_copy(self, space, w_order=None): order = order_converter(space, w_order, NPY.KEEPORDER) @@ -580,11 +580,8 @@ new_dtype = interp_dtype.variable_dtype(space, 'S' + str(cur_dtype.elsize)) impl = self.implementation - if isinstance(impl, scalar.Scalar): - return W_NDimArray.new_scalar(space, new_dtype, impl.value) - else: - new_impl = impl.astype(space, new_dtype) - return wrap_impl(space, space.type(self), self, new_impl) + new_impl = impl.astype(space, new_dtype) + return wrap_impl(space, space.type(self), self, new_impl) def descr_get_base(self, space): impl = self.implementation @@ -1038,65 +1035,42 @@ descr_argmin = _reduce_argmax_argmin_impl("argmin") def descr_int(self, space): - shape = self.get_shape() - if len(shape) == 0: - assert isinstance(self.implementation, scalar.Scalar) - value = space.wrap(self.implementation.get_scalar_value()) - elif shape == [1]: - value = self.descr_getitem(space, space.wrap(0)) - else: + if self.get_size() != 1: raise OperationError(space.w_TypeError, space.wrap( "only length-1 arrays can be converted to Python scalars")) if self.get_dtype().is_str_or_unicode(): raise OperationError(space.w_TypeError, space.wrap( "don't know how to convert scalar number to int")) + value = self.implementation.getitem(0) return space.int(value) def descr_long(self, space): - shape = self.get_shape() - if len(shape) == 0: - assert isinstance(self.implementation, scalar.Scalar) - value = space.wrap(self.implementation.get_scalar_value()) - elif shape == [1]: - value = self.descr_getitem(space, space.wrap(0)) - else: + if self.get_size() != 1: raise OperationError(space.w_TypeError, space.wrap( "only length-1 arrays can be converted to Python scalars")) if self.get_dtype().is_str_or_unicode(): raise OperationError(space.w_TypeError, space.wrap( "don't know how to convert scalar number to long")) + value = self.implementation.getitem(0) return space.long(value) def descr_float(self, space): - shape = self.get_shape() - if len(shape) == 0: - assert isinstance(self.implementation, scalar.Scalar) - value = space.wrap(self.implementation.get_scalar_value()) - elif shape == [1]: - value = self.descr_getitem(space, space.wrap(0)) - else: + if self.get_size() != 1: raise OperationError(space.w_TypeError, space.wrap( "only length-1 arrays can be converted to Python scalars")) if self.get_dtype().is_str_or_unicode(): raise OperationError(space.w_TypeError, space.wrap( "don't know how to convert scalar number to float")) + value = self.implementation.getitem(0) return space.float(value) def descr_index(self, space): - shape = self.get_shape() - if len(shape) == 0: - assert isinstance(self.implementation, scalar.Scalar) - value = space.wrap(self.implementation.get_scalar_value()) - elif shape == [1]: - value = self.descr_getitem(space, space.wrap(0)) - else: + if self.get_size() != 1 or \ + not self.get_dtype().is_int() or self.get_dtype().is_bool(): raise OperationError(space.w_TypeError, space.wrap( "only integer arrays with one element " "can be converted to an index")) - if not self.get_dtype().is_int() or self.get_dtype().is_bool(): - raise OperationError(space.w_TypeError, space.wrap( - "only integer arrays with one element " - "can be converted to an index")) + value = self.implementation.getitem(0) assert isinstance(value, interp_boxes.W_GenericBox) return value.item(space) @@ -1445,14 +1419,6 @@ dtype = interp_dtype.decode_w_dtype(space, w_dtype) - # scalars and strings w/o __array__ method - isstr = space.isinstance_w(w_object, space.w_str) - if not issequence_w(space, w_object) or isstr: - if dtype is None or dtype.char != NPY.CHARLTR: - if dtype is None or (dtype.is_str_or_unicode() and dtype.elsize < 1): - dtype = interp_ufuncs.find_dtype_for_scalar(space, w_object) - return W_NDimArray.new_scalar(space, dtype, w_object) - if space.is_none(w_order): order = 'C' else: diff --git a/pypy/module/micronumpy/iter.py b/pypy/module/micronumpy/iter.py --- a/pypy/module/micronumpy/iter.py +++ b/pypy/module/micronumpy/iter.py @@ -45,7 +45,6 @@ from pypy.module.micronumpy.strides import enumerate_chunks,\ calculate_slice_strides from pypy.module.micronumpy.base import W_NDimArray -from pypy.module.micronumpy.arrayimpl import base from pypy.module.micronumpy.support import product from rpython.rlib import jit @@ -169,7 +168,17 @@ def get_index(self, space, shapelen): return [space.wrap(self.indexes[i]) for i in range(shapelen)] -class ConcreteArrayIterator(base.BaseArrayIterator): +class BaseArrayIterator(object): + def next(self): + raise NotImplementedError # purely abstract base class + + def setitem(self, elem): + raise NotImplementedError + + def set_scalar_object(self, value): + raise NotImplementedError # works only on scalars + +class ConcreteArrayIterator(BaseArrayIterator): _immutable_fields_ = ['array', 'skip', 'size'] def __init__(self, array): self.array = array @@ -275,7 +284,7 @@ def get_index(self, d): return self.indexes[d] -class AxisIterator(base.BaseArrayIterator): +class AxisIterator(BaseArrayIterator): def __init__(self, array, shape, dim, cumulative): self.shape = shape strides = array.get_strides() diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py --- a/pypy/module/micronumpy/strides.py +++ b/pypy/module/micronumpy/strides.py @@ -1,6 +1,7 @@ from rpython.rlib import jit from pypy.interpreter.error import OperationError -from pypy.module.micronumpy.base import W_NDimArray +from pypy.module.micronumpy.base import W_NDimArray, issequence_w +from pypy.module.micronumpy import constants as NPY @jit.look_inside_iff(lambda chunks: jit.isconstant(len(chunks))) def enumerate_chunks(chunks): @@ -69,6 +70,10 @@ return True def find_shape_and_elems(space, w_iterable, dtype): + isstr = space.isinstance_w(w_iterable, space.w_str) + if not issequence_w(space, w_iterable) or isstr: + if dtype is None or dtype.char != NPY.CHARLTR: + return [], [w_iterable] is_rec_type = dtype is not None and dtype.is_record() if is_rec_type and is_single_elem(space, w_iterable, is_rec_type): return [], [w_iterable] diff --git a/pypy/module/micronumpy/test/test_iter.py b/pypy/module/micronumpy/test/test_iter.py --- a/pypy/module/micronumpy/test/test_iter.py +++ b/pypy/module/micronumpy/test/test_iter.py @@ -1,5 +1,4 @@ from pypy.module.micronumpy.iter import MultiDimViewIterator -from pypy.module.micronumpy.arrayimpl.scalar import ScalarIterator class MockArray(object): @@ -92,9 +91,3 @@ assert i.indexes == [0,1] assert i.offset == 3 assert i.done() - - def test_scalar_iter(self): - i = ScalarIterator(MockArray) - i.next() - i.next_skip_x(3) - assert i.done() diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -2683,6 +2683,11 @@ def test_array_interface(self): from numpypy import array + a = array(2.5) + i = a.__array_interface__ + assert isinstance(i['data'][0], int) + assert i['shape'] == () + assert i['strides'] is None a = array([1, 2, 3]) i = a.__array_interface__ assert isinstance(i['data'][0], int) @@ -3194,6 +3199,7 @@ assert str(array([1, 2, 3])) == '[1 2 3]' assert str(array(['abc'], 'S3')) == "['abc']" assert str(array('abc')) == 'abc' + assert str(array(1.5)) == '1.5' class AppTestRepr(BaseNumpyAppTest): @@ -3211,6 +3217,7 @@ from numpypy import array assert repr(array([1, 2, 3])) == 'array([1, 2, 3])' assert repr(array(['abc'], 'S3')) == "array(['abc'])" + assert repr(array(1.5)) == "array(1.5)" def teardown_class(cls): if option.runappdirect: diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -4,8 +4,7 @@ from pypy.interpreter.error import OperationError, oefmt from pypy.module.micronumpy import interp_boxes from pypy.module.micronumpy import support -from pypy.module.micronumpy.arrayimpl.voidbox import VoidBoxStorage -from pypy.module.micronumpy.arrayimpl.concrete import SliceArray +from pypy.module.micronumpy.arrayimpl.concrete import SliceArray, VoidBoxStorage from pypy.objspace.std.floatobject import float2string from pypy.objspace.std.complexobject import str_format from rpython.rlib import rfloat, clibffi, rcomplex From noreply at buildbot.pypy.org Thu Feb 27 01:59:47 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 27 Feb 2014 01:59:47 +0100 (CET) Subject: [pypy-commit] pypy numpy-refactor: fix str/repr of scalar views Message-ID: <20140227005947.5865E1C3973@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: numpy-refactor Changeset: r69474:4a9b7ee9934d Date: 2014-02-26 15:17 -0500 http://bitbucket.org/pypy/pypy/changeset/4a9b7ee9934d/ Log: fix str/repr of scalar views diff --git a/pypy/module/micronumpy/arrayimpl/concrete.py b/pypy/module/micronumpy/arrayimpl/concrete.py --- a/pypy/module/micronumpy/arrayimpl/concrete.py +++ b/pypy/module/micronumpy/arrayimpl/concrete.py @@ -337,7 +337,7 @@ r[0], r[1], shape) if not require_index: return iter.ConcreteArrayIterator(self) - if len(self.get_shape()) == 1: + if len(self.get_shape()) <= 1: return iter.OneDimViewIterator(self, self.start, self.get_strides(), self.get_shape()) @@ -440,7 +440,7 @@ backward_broadcast) return iter.MultiDimViewIterator(self, self.start, r[0], r[1], shape) - if len(self.get_shape()) == 1: + if len(self.get_shape()) <= 1: return iter.OneDimViewIterator(self, self.start, self.get_strides(), self.get_shape()) diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -292,9 +292,9 @@ def create_iter(self, shape=None, backward_broadcast=False, require_index=False): assert isinstance(self.implementation, BaseConcreteArray) - return self.implementation.create_iter(shape=shape, - backward_broadcast=backward_broadcast, - require_index=require_index) + return self.implementation.create_iter( + shape=shape, backward_broadcast=backward_broadcast, + require_index=require_index) def create_axis_iter(self, shape, dim, cum): return self.implementation.create_axis_iter(shape, dim, cum) diff --git a/pypy/module/micronumpy/iter.py b/pypy/module/micronumpy/iter.py --- a/pypy/module/micronumpy/iter.py +++ b/pypy/module/micronumpy/iter.py @@ -211,9 +211,15 @@ def __init__(self, array, start, strides, shape): self.array = array self.offset = start - self.skip = strides[0] self.index = 0 - self.size = shape[0] + assert len(strides) == len(shape) + if len(shape) == 0: + self.skip = array.dtype.elsize + self.size = 1 + else: + assert len(shape) == 1 + self.skip = strides[0] + self.size = shape[0] def next(self): self.offset += self.skip diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -3200,6 +3200,7 @@ assert str(array(['abc'], 'S3')) == "['abc']" assert str(array('abc')) == 'abc' assert str(array(1.5)) == '1.5' + assert str(array(1.5).real) == '1.5' class AppTestRepr(BaseNumpyAppTest): @@ -3218,6 +3219,7 @@ assert repr(array([1, 2, 3])) == 'array([1, 2, 3])' assert repr(array(['abc'], 'S3')) == "array(['abc'])" assert repr(array(1.5)) == "array(1.5)" + assert repr(array(1.5).real) == "array(1.5)" def teardown_class(cls): if option.runappdirect: From noreply at buildbot.pypy.org Thu Feb 27 01:59:48 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 27 Feb 2014 01:59:48 +0100 (CET) Subject: [pypy-commit] pypy numpy-refactor: kill arrayimpl subdirectory Message-ID: <20140227005948.A33DF1C3973@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: numpy-refactor Changeset: r69475:61e06779d82e Date: 2014-02-26 14:59 -0500 http://bitbucket.org/pypy/pypy/changeset/61e06779d82e/ Log: kill arrayimpl subdirectory diff --git a/pypy/module/micronumpy/arrayimpl/__init__.py b/pypy/module/micronumpy/arrayimpl/__init__.py deleted file mode 100644 diff --git a/pypy/module/micronumpy/base.py b/pypy/module/micronumpy/base.py --- a/pypy/module/micronumpy/base.py +++ b/pypy/module/micronumpy/base.py @@ -27,14 +27,14 @@ __metaclass__ = extendabletype def __init__(self, implementation): - from pypy.module.micronumpy.arrayimpl.concrete import BaseConcreteArray + from pypy.module.micronumpy.concrete import BaseConcreteArray assert isinstance(implementation, BaseConcreteArray) assert isinstance(self, W_NDimArray) self.implementation = implementation @staticmethod def from_shape(space, shape, dtype, order='C', w_instance=None): - from pypy.module.micronumpy.arrayimpl import concrete + from pypy.module.micronumpy import concrete strides, backstrides = calc_strides(shape, dtype.base, order) impl = concrete.ConcreteArray(shape, dtype.base, order, strides, @@ -46,7 +46,7 @@ @staticmethod def from_shape_and_storage(space, shape, storage, dtype, order='C', owning=False, w_subtype=None, w_base=None, writable=True): - from pypy.module.micronumpy.arrayimpl import concrete + from pypy.module.micronumpy import concrete assert shape strides, backstrides = calc_strides(shape, dtype, order) if w_base is not None: @@ -77,7 +77,7 @@ @staticmethod def new_slice(space, offset, strides, backstrides, shape, parent, orig_arr, dtype=None): - from pypy.module.micronumpy.arrayimpl import concrete + from pypy.module.micronumpy import concrete impl = concrete.SliceArray(offset, strides, backstrides, shape, parent, orig_arr, dtype) diff --git a/pypy/module/micronumpy/arrayimpl/concrete.py b/pypy/module/micronumpy/concrete.py rename from pypy/module/micronumpy/arrayimpl/concrete.py rename to pypy/module/micronumpy/concrete.py --- a/pypy/module/micronumpy/arrayimpl/concrete.py +++ b/pypy/module/micronumpy/concrete.py @@ -360,11 +360,11 @@ self.dtype = dtype def argsort(self, space, w_axis): - from pypy.module.micronumpy.arrayimpl.sort import argsort_array + from pypy.module.micronumpy.sort import argsort_array return argsort_array(self, space, w_axis) def sort(self, space, w_axis, w_order): - from pypy.module.micronumpy.arrayimpl.sort import sort_array + from pypy.module.micronumpy.sort import sort_array return sort_array(self, space, w_axis, w_order) def base(self): diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -10,7 +10,7 @@ from rpython.rlib.rarithmetic import LONG_BIT from rpython.rtyper.lltypesystem import rffi from rpython.tool.sourcetools import func_with_new_name -from pypy.module.micronumpy.arrayimpl.concrete import VoidBoxStorage +from pypy.module.micronumpy.concrete import VoidBoxStorage from pypy.module.micronumpy.base import W_NDimArray from pypy.module.micronumpy.interp_flagsobj import W_FlagsObject from pypy.interpreter.mixedmodule import MixedModule diff --git a/pypy/module/micronumpy/interp_flatiter.py b/pypy/module/micronumpy/interp_flatiter.py --- a/pypy/module/micronumpy/interp_flatiter.py +++ b/pypy/module/micronumpy/interp_flatiter.py @@ -1,6 +1,6 @@ from pypy.module.micronumpy.base import W_NDimArray, convert_to_array from pypy.module.micronumpy import loop -from pypy.module.micronumpy.arrayimpl.concrete import BaseConcreteArray +from pypy.module.micronumpy.concrete import BaseConcreteArray from pypy.interpreter.error import OperationError diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -19,7 +19,7 @@ from rpython.tool.sourcetools import func_with_new_name from rpython.rlib import jit from rpython.rlib.rstring import StringBuilder -from pypy.module.micronumpy.arrayimpl.concrete import BaseConcreteArray +from pypy.module.micronumpy.concrete import BaseConcreteArray from pypy.module.micronumpy.conversion_utils import order_converter, multi_axis_converter from pypy.module.micronumpy import support from pypy.module.micronumpy import constants as NPY @@ -1077,7 +1077,7 @@ def descr_reduce(self, space): from rpython.rlib.rstring import StringBuilder from pypy.interpreter.mixedmodule import MixedModule - from pypy.module.micronumpy.arrayimpl.concrete import SliceArray + from pypy.module.micronumpy.concrete import SliceArray numpypy = space.getbuiltinmodule("_numpypy") assert isinstance(numpypy, MixedModule) @@ -1142,7 +1142,7 @@ @unwrap_spec(offset=int) def descr_new_array(space, w_subtype, w_shape, w_dtype=None, w_buffer=None, offset=0, w_strides=None, w_order=None): - from pypy.module.micronumpy.arrayimpl.concrete import ConcreteArray + from pypy.module.micronumpy.concrete import ConcreteArray from pypy.module.micronumpy.support import calc_strides dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype)) diff --git a/pypy/module/micronumpy/arrayimpl/sort.py b/pypy/module/micronumpy/sort.py rename from pypy/module/micronumpy/arrayimpl/sort.py rename to pypy/module/micronumpy/sort.py diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -4,7 +4,7 @@ from pypy.interpreter.error import OperationError, oefmt from pypy.module.micronumpy import interp_boxes from pypy.module.micronumpy import support -from pypy.module.micronumpy.arrayimpl.concrete import SliceArray, VoidBoxStorage +from pypy.module.micronumpy.concrete import SliceArray, VoidBoxStorage from pypy.objspace.std.floatobject import float2string from pypy.objspace.std.complexobject import str_format from rpython.rlib import rfloat, clibffi, rcomplex From noreply at buildbot.pypy.org Thu Feb 27 01:59:49 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 27 Feb 2014 01:59:49 +0100 (CET) Subject: [pypy-commit] pypy numpy-refactor: fix some scalar get/set cases Message-ID: <20140227005949.D09541C3973@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: numpy-refactor Changeset: r69476:20666c9f42b8 Date: 2014-02-26 16:07 -0500 http://bitbucket.org/pypy/pypy/changeset/20666c9f42b8/ Log: fix some scalar get/set cases diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -306,7 +306,7 @@ return len(self.get_shape()) == 0 def set_scalar_value(self, w_val): - return self.implementation.setitem(0, w_val) + return self.implementation.setitem(self.implementation.start, w_val) def fill(self, space, box): self.implementation.fill(space, box) @@ -318,8 +318,8 @@ return self.implementation.get_size() def get_scalar_value(self): - assert len(self.get_shape()) == 0 - return self.implementation.getitem(0) + assert self.get_size() == 1 + return self.implementation.getitem(self.implementation.start) def descr_copy(self, space, w_order=None): order = order_converter(space, w_order, NPY.KEEPORDER) @@ -490,19 +490,15 @@ def descr_item(self, space, w_arg=None): if space.is_none(w_arg): - if self.is_scalar(): - return self.get_scalar_value().item(space) if self.get_size() == 1: - w_obj = self.getitem(space, - [0] * len(self.get_shape())) + w_obj = self.get_scalar_value() assert isinstance(w_obj, interp_boxes.W_GenericBox) return w_obj.item(space) - raise OperationError(space.w_ValueError, - space.wrap("can only convert an array of size 1 to a Python scalar")) + raise oefmt(space.w_ValueError, + "can only convert an array of size 1 to a Python scalar") if space.isinstance_w(w_arg, space.w_int): if self.is_scalar(): - raise OperationError(space.w_IndexError, - space.wrap("index out of bounds")) + raise oefmt(space.w_IndexError, "index out of bounds") i = self.to_coords(space, w_arg) item = self.getitem(space, i) assert isinstance(item, interp_boxes.W_GenericBox) @@ -1041,7 +1037,7 @@ if self.get_dtype().is_str_or_unicode(): raise OperationError(space.w_TypeError, space.wrap( "don't know how to convert scalar number to int")) - value = self.implementation.getitem(0) + value = self.get_scalar_value() return space.int(value) def descr_long(self, space): @@ -1051,7 +1047,7 @@ if self.get_dtype().is_str_or_unicode(): raise OperationError(space.w_TypeError, space.wrap( "don't know how to convert scalar number to long")) - value = self.implementation.getitem(0) + value = self.get_scalar_value() return space.long(value) def descr_float(self, space): @@ -1061,7 +1057,7 @@ if self.get_dtype().is_str_or_unicode(): raise OperationError(space.w_TypeError, space.wrap( "don't know how to convert scalar number to float")) - value = self.implementation.getitem(0) + value = self.get_scalar_value() return space.float(value) def descr_index(self, space): @@ -1070,7 +1066,7 @@ raise OperationError(space.w_TypeError, space.wrap( "only integer arrays with one element " "can be converted to an index")) - value = self.implementation.getitem(0) + value = self.get_scalar_value() assert isinstance(value, interp_boxes.W_GenericBox) return value.item(space) diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -1415,6 +1415,12 @@ b = a.sum(out=d) assert b == d assert b is d + c = array(1.5+2.5j) + assert c.real == 1.5 + assert c.imag == 2.5 + a.sum(out=c.imag) + assert c.real == 1.5 + assert c.imag == 5 assert list(zeros((0, 2)).sum(axis=1)) == [] From noreply at buildbot.pypy.org Thu Feb 27 01:59:51 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 27 Feb 2014 01:59:51 +0100 (CET) Subject: [pypy-commit] pypy numpy-refactor: fix setitem_filter with scalar Message-ID: <20140227005951.15E931C3973@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: numpy-refactor Changeset: r69477:afe7292cc2d3 Date: 2014-02-26 16:54 -0500 http://bitbucket.org/pypy/pypy/changeset/afe7292cc2d3/ Log: fix setitem_filter with scalar diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -144,7 +144,7 @@ "cannot assign %d input values to " "the %d output values where the mask is true" % (val.get_size(), size))) - loop.setitem_filter(space, self, idx, val, size) + loop.setitem_filter(space, self, idx, val) def _prepare_array_index(self, space, w_index): if isinstance(w_index, W_NDimArray): diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -380,14 +380,17 @@ 'index_dtype'], reds = 'auto') -def setitem_filter(space, arr, index, value, size): +def setitem_filter(space, arr, index, value): arr_iter = arr.create_iter() shapelen = len(arr.get_shape()) if shapelen > 1 and len(index.get_shape()) < 2: index_iter = index.create_iter(arr.get_shape(), backward_broadcast=True) else: index_iter = index.create_iter() - value_iter = value.create_iter([size]) + if value.get_size() == 1: + value_iter = value.create_iter(arr.get_shape()) + else: + value_iter = value.create_iter() index_dtype = index.get_dtype() arr_dtype = arr.get_dtype() while not index_iter.done(): diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -2281,6 +2281,12 @@ assert (a[b] == a).all() a[b] = 1. assert (a == [[1., 1., 1.]]).all() + a[b] = np.array(2.) + assert (a == [[2., 2., 2.]]).all() + a[b] = np.array([3.]) + assert (a == [[3., 3., 3.]]).all() + a[b] = np.array([[4.]]) + assert (a == [[4., 4., 4.]]).all() def test_ellipsis_indexing(self): import numpy as np From noreply at buildbot.pypy.org Thu Feb 27 01:59:52 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 27 Feb 2014 01:59:52 +0100 (CET) Subject: [pypy-commit] pypy numpy-refactor: fix nonzero Message-ID: <20140227005952.435A51C3973@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: numpy-refactor Changeset: r69478:32523cf4d763 Date: 2014-02-26 17:18 -0500 http://bitbucket.org/pypy/pypy/changeset/32523cf4d763/ Log: fix nonzero diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py --- a/pypy/module/micronumpy/concrete.py +++ b/pypy/module/micronumpy/concrete.py @@ -288,7 +288,7 @@ def nonzero(self, space, index_type): s = loop.count_all_true_concrete(self) box = index_type.itemtype.box - nd = len(self.get_shape()) + nd = len(self.get_shape()) or 1 w_res = W_NDimArray.from_shape(space, [s, nd], index_type) loop.nonzero(w_res, self, box) w_res = w_res.implementation.swapaxes(space, w_res, 0, 1) From noreply at buildbot.pypy.org Thu Feb 27 01:59:53 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 27 Feb 2014 01:59:53 +0100 (CET) Subject: [pypy-commit] pypy numpy-refactor: fix slicing message for array scalars Message-ID: <20140227005953.5F28E1C3973@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: numpy-refactor Changeset: r69479:efbc3fbb15c3 Date: 2014-02-26 17:43 -0500 http://bitbucket.org/pypy/pypy/changeset/efbc3fbb15c3/ Log: fix slicing message for array scalars diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py --- a/pypy/module/micronumpy/concrete.py +++ b/pypy/module/micronumpy/concrete.py @@ -189,7 +189,9 @@ space.isinstance_w(w_item, space.w_list)): raise ArrayArgumentException return self._lookup_by_index(space, view_w) - if shape_len > 1: + if shape_len == 0: + raise oefmt(space.w_IndexError, "0-d arrays can't be indexed") + elif shape_len > 1: raise IndexError idx = support.index_w(space, w_idx) return self._lookup_by_index(space, [space.wrap(idx)]) @@ -204,7 +206,7 @@ "field named %s not found" % idx)) return RecordChunk(idx) if len(self.get_shape()) == 0: - raise oefmt(space.w_IndexError, "0-d arrays can't be indexed") + raise oefmt(space.w_ValueError, "cannot slice a 0-d array") if (space.isinstance_w(w_idx, space.w_int) or space.isinstance_w(w_idx, space.w_slice)): return Chunks([Chunk(*space.decode_index4(w_idx, self.get_shape()[0]))]) diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -657,6 +657,9 @@ def test_setslice_array(self): from numpypy import array + a = array(5) + exc = raises(ValueError, "a[:] = 4") + assert exc.value[0] == "cannot slice a 0-d array" a = array(range(5)) b = array(range(2)) a[1:4:2] = b @@ -1350,6 +1353,9 @@ def test_getslice(self): from numpypy import array + a = array(5) + exc = raises(ValueError, "a[:]") + assert exc.value[0] == "cannot slice a 0-d array" a = array(range(5)) s = a[1:5] assert len(s) == 4 From noreply at buildbot.pypy.org Thu Feb 27 01:59:54 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 27 Feb 2014 01:59:54 +0100 (CET) Subject: [pypy-commit] pypy numpy-refactor: fix newaxis with scalars Message-ID: <20140227005954.811A71C3973@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: numpy-refactor Changeset: r69480:568a574ba925 Date: 2014-02-26 18:18 -0500 http://bitbucket.org/pypy/pypy/changeset/568a574ba925/ Log: fix newaxis with scalars diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py --- a/pypy/module/micronumpy/concrete.py +++ b/pypy/module/micronumpy/concrete.py @@ -205,10 +205,10 @@ raise OperationError(space.w_ValueError, space.wrap( "field named %s not found" % idx)) return RecordChunk(idx) - if len(self.get_shape()) == 0: - raise oefmt(space.w_ValueError, "cannot slice a 0-d array") - if (space.isinstance_w(w_idx, space.w_int) or + elif (space.isinstance_w(w_idx, space.w_int) or space.isinstance_w(w_idx, space.w_slice)): + if len(self.get_shape()) == 0: + raise oefmt(space.w_ValueError, "cannot slice a 0-d array") return Chunks([Chunk(*space.decode_index4(w_idx, self.get_shape()[0]))]) elif isinstance(w_idx, W_NDimArray) and w_idx.is_scalar(): w_idx = w_idx.get_scalar_value().item(space) diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py --- a/pypy/module/micronumpy/strides.py +++ b/pypy/module/micronumpy/strides.py @@ -27,12 +27,16 @@ i = -1 j = 0 for i, chunk in enumerate_chunks(chunks): + try: + s_i = strides[i] + except IndexError: + continue if chunk.step != 0: - rstrides[j] = strides[i] * chunk.step - rbackstrides[j] = strides[i] * max(0, chunk.lgt - 1) * chunk.step + rstrides[j] = s_i * chunk.step + rbackstrides[j] = s_i * max(0, chunk.lgt - 1) * chunk.step rshape[j] = chunk.lgt j += 1 - rstart += strides[i] * chunk.start + rstart += s_i * chunk.start # add a reminder s = i + 1 assert s >= 0 diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -716,9 +716,14 @@ for y in range(2): expected[x, y] = math.cos(a[x]) * math.cos(b[y]) assert ((cos(a)[:,newaxis] * cos(b).T) == expected).all() - a = array(1)[newaxis] + o = array(1) + a = o[newaxis] assert a == array([1]) assert a.shape == (1,) + o[newaxis, newaxis] = 2 + assert o == 2 + a[:] = 3 + assert o == 3 def test_newaxis_slice(self): from numpypy import array, newaxis From noreply at buildbot.pypy.org Thu Feb 27 01:59:55 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 27 Feb 2014 01:59:55 +0100 (CET) Subject: [pypy-commit] pypy numpy-refactor: fix setslice on scalars Message-ID: <20140227005955.DA9FB1C3973@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: numpy-refactor Changeset: r69481:f13a87f568d7 Date: 2014-02-26 18:31 -0500 http://bitbucket.org/pypy/pypy/changeset/f13a87f568d7/ Log: fix setslice on scalars diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py --- a/pypy/module/micronumpy/concrete.py +++ b/pypy/module/micronumpy/concrete.py @@ -44,8 +44,13 @@ self.dtype.itemtype.store(self, index, 0, value) def setslice(self, space, arr): + if len(arr.get_shape()) > 0 and len(self.get_shape()) == 0: + raise oefmt(space.w_ValueError, + "could not broadcast input array from shape " + "(%s) into shape ()", + ','.join([str(x) for x in arr.get_shape()])) + shape = shape_agreement(space, self.get_shape(), arr) impl = arr.implementation - shape = shape_agreement(space, self.get_shape(), arr) if impl.storage == self.storage: impl = impl.copy(space) loop.setslice(space, shape, self, impl) From noreply at buildbot.pypy.org Thu Feb 27 01:59:57 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 27 Feb 2014 01:59:57 +0100 (CET) Subject: [pypy-commit] pypy numpy-refactor: fix ufunc out arg with scalar Message-ID: <20140227005957.0B5701C3973@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: numpy-refactor Changeset: r69482:cd1472409d78 Date: 2014-02-26 18:35 -0500 http://bitbucket.org/pypy/pypy/changeset/cd1472409d78/ Log: fix ufunc out arg with scalar diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -321,10 +321,11 @@ w_obj.get_scalar_value().convert_to(space, calc_dtype)) if out is None: return w_val + w_val = res_dtype.coerce(space, w_val) if out.is_scalar(): out.set_scalar_value(w_val) else: - out.fill(space, res_dtype.coerce(space, w_val)) + out.fill(space, w_val) return out shape = shape_agreement(space, w_obj.get_shape(), out, broadcast_down=False) From noreply at buildbot.pypy.org Thu Feb 27 01:59:58 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 27 Feb 2014 01:59:58 +0100 (CET) Subject: [pypy-commit] pypy numpy-refactor: fix test_compile Message-ID: <20140227005958.335391C3973@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: numpy-refactor Changeset: r69483:328438b28770 Date: 2014-02-26 18:46 -0500 http://bitbucket.org/pypy/pypy/changeset/328438b28770/ Log: fix test_compile diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -1,4 +1,3 @@ - """ This is a set of tools for standalone compiling of numpy expressions. It should not be imported by the module itself """ @@ -95,7 +94,10 @@ return StringObject(NonConstant('foo')) def isinstance_w(self, w_obj, w_tp): - return w_obj.tp == w_tp + try: + return w_obj.tp == w_tp + except AttributeError: + return False def decode_index4(self, w_idx, size): if isinstance(w_idx, IntObject): @@ -215,7 +217,8 @@ def lookup(self, w_obj, name): w_type = self.type(w_obj) - return w_type.lookup(name) + if not self.is_none(w_type): + return w_type.lookup(name) def gettypefor(self, w_obj): return W_TypeObject(w_obj.typedef.name) From noreply at buildbot.pypy.org Thu Feb 27 01:59:59 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 27 Feb 2014 01:59:59 +0100 (CET) Subject: [pypy-commit] pypy numpy-refactor: move issequence_w to support, cleanup other support usage Message-ID: <20140227005959.6C13E1C3973@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: numpy-refactor Changeset: r69484:daf0f2019fee Date: 2014-02-26 19:04 -0500 http://bitbucket.org/pypy/pypy/changeset/daf0f2019fee/ Log: move issequence_w to support, cleanup other support usage diff --git a/pypy/module/micronumpy/base.py b/pypy/module/micronumpy/base.py --- a/pypy/module/micronumpy/base.py +++ b/pypy/module/micronumpy/base.py @@ -4,11 +4,6 @@ from pypy.module.micronumpy.support import calc_strides -def issequence_w(space, w_obj): - return (space.isinstance_w(w_obj, space.w_tuple) or - space.isinstance_w(w_obj, space.w_list) or - isinstance(w_obj, W_NDimArray)) - def wrap_impl(space, w_cls, w_instance, impl): if w_cls is None or space.is_w(w_cls, space.gettypefor(W_NDimArray)): w_ret = W_NDimArray(impl) diff --git a/pypy/module/micronumpy/interp_arrayops.py b/pypy/module/micronumpy/interp_arrayops.py --- a/pypy/module/micronumpy/interp_arrayops.py +++ b/pypy/module/micronumpy/interp_arrayops.py @@ -6,6 +6,7 @@ from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import unwrap_spec from pypy.module.micronumpy.conversion_utils import clipmode_converter +from pypy.module.micronumpy import support from pypy.module.micronumpy import constants as NPY def where(space, w_arr, w_x=None, w_y=None): @@ -209,8 +210,6 @@ return out def put(space, w_arr, w_indices, w_values, w_mode): - from pypy.module.micronumpy.support import index_w - arr = convert_to_array(space, w_arr) mode = clipmode_converter(space, w_mode) @@ -235,7 +234,7 @@ v_idx = 0 for idx in indices: - index = index_w(space, idx) + index = support.index_w(space, idx) if index < 0 or index >= arr.get_size(): if mode == NPY.RAISE: diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -10,6 +10,7 @@ from rpython.rlib import jit from pypy.module.micronumpy.appbridge import get_appbridge_cache from pypy.module.micronumpy.conversion_utils import byteorder_converter +from pypy.module.micronumpy import support from pypy.module.micronumpy import constants as NPY @@ -372,7 +373,7 @@ "incorrect subarray in __setstate__") subdtype, w_shape = space.fixedview(w_subarray) assert isinstance(subdtype, W_Dtype) - if not base.issequence_w(space, w_shape): + if not support.issequence_w(space, w_shape): self.shape = [space.int_w(w_shape)] else: self.shape = [space.int_w(w_s) for w_s in space.fixedview(w_shape)] @@ -432,7 +433,7 @@ w_shape = space.newtuple([]) if space.len_w(w_elem) == 3: w_fldname, w_flddesc, w_shape = space.fixedview(w_elem) - if not base.issequence_w(space, w_shape): + if not support.issequence_w(space, w_shape): w_shape = space.newtuple([w_shape]) else: w_fldname, w_flddesc = space.fixedview(w_elem, 2) diff --git a/pypy/module/micronumpy/iter.py b/pypy/module/micronumpy/iter.py --- a/pypy/module/micronumpy/iter.py +++ b/pypy/module/micronumpy/iter.py @@ -45,7 +45,7 @@ from pypy.module.micronumpy.strides import enumerate_chunks,\ calculate_slice_strides from pypy.module.micronumpy.base import W_NDimArray -from pypy.module.micronumpy.support import product +from pypy.module.micronumpy import support from rpython.rlib import jit # structures to describe slicing @@ -245,7 +245,7 @@ self.shape = shape self.offset = start self.shapelen = len(shape) - self._done = self.shapelen == 0 or product(shape) == 0 + self._done = self.shapelen == 0 or support.product(shape) == 0 self.strides = strides self.backstrides = backstrides self.size = array.size diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -10,7 +10,7 @@ from rpython.rtyper.lltypesystem import lltype, rffi from pypy.module.micronumpy.base import W_NDimArray from pypy.module.micronumpy.iter import PureShapeIterator -from pypy.module.micronumpy.support import index_w +from pypy.module.micronumpy import support from pypy.module.micronumpy import constants as NPY call2_driver = jit.JitDriver(name='numpy_call2', @@ -557,7 +557,7 @@ while not arr_iter.done(): choose_driver.jit_merge_point(shapelen=shapelen, dtype=dtype, mode=mode) - index = index_w(space, arr_iter.getitem()) + index = support.index_w(space, arr_iter.getitem()) if index < 0 or index >= len(iterators): if mode == NPY.RAISE: raise OperationError(space.w_ValueError, space.wrap( diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py --- a/pypy/module/micronumpy/strides.py +++ b/pypy/module/micronumpy/strides.py @@ -1,6 +1,7 @@ from rpython.rlib import jit from pypy.interpreter.error import OperationError -from pypy.module.micronumpy.base import W_NDimArray, issequence_w +from pypy.module.micronumpy.base import W_NDimArray +from pypy.module.micronumpy import support from pypy.module.micronumpy import constants as NPY @jit.look_inside_iff(lambda chunks: jit.isconstant(len(chunks))) @@ -75,7 +76,7 @@ def find_shape_and_elems(space, w_iterable, dtype): isstr = space.isinstance_w(w_iterable, space.w_str) - if not issequence_w(space, w_iterable) or isstr: + if not support.issequence_w(space, w_iterable) or isstr: if dtype is None or dtype.char != NPY.CHARLTR: return [], [w_iterable] is_rec_type = dtype is not None and dtype.is_record() diff --git a/pypy/module/micronumpy/support.py b/pypy/module/micronumpy/support.py --- a/pypy/module/micronumpy/support.py +++ b/pypy/module/micronumpy/support.py @@ -1,6 +1,12 @@ from rpython.rlib import jit from pypy.interpreter.error import OperationError +def issequence_w(space, w_obj): + from pypy.module.micronumpy.base import W_NDimArray + return (space.isinstance_w(w_obj, space.w_tuple) or + space.isinstance_w(w_obj, space.w_list) or + isinstance(w_obj, W_NDimArray)) + def index_w(space, w_obj): try: return space.int_w(space.index(w_obj)) From noreply at buildbot.pypy.org Thu Feb 27 02:00:00 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 27 Feb 2014 02:00:00 +0100 (CET) Subject: [pypy-commit] pypy numpy-refactor: put Chunks in strides.py Message-ID: <20140227010000.ADEB21C3973@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: numpy-refactor Changeset: r69485:dd44430725b8 Date: 2014-02-26 19:32 -0500 http://bitbucket.org/pypy/pypy/changeset/dd44430725b8/ Log: put Chunks in strides.py diff --git a/pypy/module/micronumpy/appbridge.py b/pypy/module/micronumpy/appbridge.py --- a/pypy/module/micronumpy/appbridge.py +++ b/pypy/module/micronumpy/appbridge.py @@ -1,5 +1,6 @@ from rpython.rlib.objectmodel import specialize + class AppBridgeCache(object): w__mean = None w__var = None @@ -20,6 +21,7 @@ setattr(self, 'w_' + name, w_method) return space.call_args(w_method, args) + def set_string_function(space, w_f, w_repr): cache = get_appbridge_cache(space) if space.is_true(w_repr): @@ -27,5 +29,6 @@ else: cache.w_array_str = w_f + def get_appbridge_cache(space): return space.fromcache(AppBridgeCache) diff --git a/pypy/module/micronumpy/base.py b/pypy/module/micronumpy/base.py --- a/pypy/module/micronumpy/base.py +++ b/pypy/module/micronumpy/base.py @@ -14,6 +14,7 @@ space.call_method(w_ret, '__array_finalize__', w_instance) return w_ret + class ArrayArgumentException(Exception): pass @@ -46,7 +47,7 @@ strides, backstrides = calc_strides(shape, dtype, order) if w_base is not None: if owning: - raise OperationError(space.w_ValueError, + raise OperationError(space.w_ValueError, space.wrap("Cannot have owning=True when specifying a buffer")) if writable: impl = concrete.ConcreteArrayWithBase(shape, dtype, order, strides, diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py --- a/pypy/module/micronumpy/concrete.py +++ b/pypy/module/micronumpy/concrete.py @@ -1,9 +1,9 @@ from pypy.module.micronumpy import support, loop, iter from pypy.module.micronumpy.base import convert_to_array, W_NDimArray,\ ArrayArgumentException -from pypy.module.micronumpy.strides import calc_new_strides, shape_agreement,\ - calculate_broadcast_strides, calculate_dot_strides -from pypy.module.micronumpy.iter import Chunk, Chunks, NewAxisChunk, RecordChunk +from pypy.module.micronumpy.strides import (Chunk, Chunks, NewAxisChunk, + RecordChunk, calc_new_strides, shape_agreement, calculate_broadcast_strides, + calculate_dot_strides) from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.buffer import RWBuffer from rpython.rlib import jit diff --git a/pypy/module/micronumpy/interp_arrayops.py b/pypy/module/micronumpy/interp_arrayops.py --- a/pypy/module/micronumpy/interp_arrayops.py +++ b/pypy/module/micronumpy/interp_arrayops.py @@ -1,8 +1,7 @@ from pypy.module.micronumpy.base import convert_to_array, W_NDimArray from pypy.module.micronumpy import loop, interp_dtype, interp_ufuncs -from pypy.module.micronumpy.iter import Chunk, Chunks -from pypy.module.micronumpy.strides import shape_agreement,\ - shape_agreement_multiple +from pypy.module.micronumpy.strides import Chunk, Chunks, shape_agreement, \ + shape_agreement_multiple from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import unwrap_spec from pypy.module.micronumpy.conversion_utils import clipmode_converter diff --git a/pypy/module/micronumpy/iter.py b/pypy/module/micronumpy/iter.py --- a/pypy/module/micronumpy/iter.py +++ b/pypy/module/micronumpy/iter.py @@ -1,4 +1,3 @@ - """ This is a mini-tutorial on iterators, strides, and memory layout. It assumes you are familiar with the terms, see http://docs.scipy.org/doc/numpy/reference/arrays.ndarray.html @@ -42,99 +41,10 @@ dimension, perhaps we could overflow times in one big step. """ -from pypy.module.micronumpy.strides import enumerate_chunks,\ - calculate_slice_strides from pypy.module.micronumpy.base import W_NDimArray from pypy.module.micronumpy import support from rpython.rlib import jit -# structures to describe slicing - -class BaseChunk(object): - pass - -class RecordChunk(BaseChunk): - def __init__(self, name): - self.name = name - - def apply(self, space, orig_arr): - arr = orig_arr.implementation - ofs, subdtype = arr.dtype.fields[self.name] - # ofs only changes start - # create a view of the original array by extending - # the shape, strides, backstrides of the array - from pypy.module.micronumpy.support import calc_strides - strides, backstrides = calc_strides(subdtype.shape, - subdtype.subdtype, arr.order) - final_shape = arr.shape + subdtype.shape - final_strides = arr.get_strides() + strides - final_backstrides = arr.get_backstrides() + backstrides - final_dtype = subdtype - if subdtype.subdtype: - final_dtype = subdtype.subdtype - return W_NDimArray.new_slice(space, arr.start + ofs, final_strides, - final_backstrides, - final_shape, arr, orig_arr, final_dtype) - -class Chunks(BaseChunk): - def __init__(self, l): - self.l = l - - @jit.unroll_safe - def extend_shape(self, old_shape): - shape = [] - i = -1 - for i, c in enumerate_chunks(self.l): - if c.step != 0: - shape.append(c.lgt) - s = i + 1 - assert s >= 0 - return shape[:] + old_shape[s:] - - def apply(self, space, orig_arr): - arr = orig_arr.implementation - shape = self.extend_shape(arr.shape) - r = calculate_slice_strides(arr.shape, arr.start, arr.get_strides(), - arr.get_backstrides(), self.l) - _, start, strides, backstrides = r - return W_NDimArray.new_slice(space, start, strides[:], backstrides[:], - shape[:], arr, orig_arr) - - -class Chunk(BaseChunk): - axis_step = 1 - - def __init__(self, start, stop, step, lgt): - self.start = start - self.stop = stop - self.step = step - self.lgt = lgt - - def __repr__(self): - return 'Chunk(%d, %d, %d, %d)' % (self.start, self.stop, self.step, - self.lgt) - -class NewAxisChunk(Chunk): - start = 0 - stop = 1 - step = 1 - lgt = 1 - axis_step = 0 - - def __init__(self): - pass - -class BaseTransform(object): - pass - -class ViewTransform(BaseTransform): - def __init__(self, chunks): - # 4-tuple specifying slicing - self.chunks = chunks - -class BroadcastTransform(BaseTransform): - def __init__(self, res_shape): - self.res_shape = res_shape class PureShapeIterator(object): def __init__(self, shape, idx_w): @@ -168,18 +78,21 @@ def get_index(self, space, shapelen): return [space.wrap(self.indexes[i]) for i in range(shapelen)] + class BaseArrayIterator(object): def next(self): - raise NotImplementedError # purely abstract base class + raise NotImplementedError # purely abstract base class def setitem(self, elem): raise NotImplementedError def set_scalar_object(self, value): - raise NotImplementedError # works only on scalars + raise NotImplementedError # works only on scalars + class ConcreteArrayIterator(BaseArrayIterator): _immutable_fields_ = ['array', 'skip', 'size'] + def __init__(self, array): self.array = array self.offset = 0 @@ -207,6 +120,7 @@ def reset(self): self.offset %= self.size + class OneDimViewIterator(ConcreteArrayIterator): def __init__(self, array, start, strides, shape): self.array = array @@ -238,6 +152,7 @@ def get_index(self, d): return self.index + class MultiDimViewIterator(ConcreteArrayIterator): def __init__(self, array, start, strides, backstrides, shape): self.indexes = [0] * len(shape) @@ -276,7 +191,7 @@ remaining_step = (self.indexes[i] + step) // self.shape[i] this_i_step = step - remaining_step * self.shape[i] self.offset += self.strides[i] * this_i_step - self.indexes[i] = self.indexes[i] + this_i_step + self.indexes[i] = self.indexes[i] + this_i_step step = remaining_step else: self._done = True @@ -290,6 +205,7 @@ def get_index(self, d): return self.indexes[d] + class AxisIterator(BaseArrayIterator): def __init__(self, array, shape, dim, cumulative): self.shape = shape diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -1,4 +1,3 @@ - """ This file is the main run loop as well as evaluation loops for various operations. This is the place to look for all the computations that iterate over all the array elements. @@ -660,4 +659,3 @@ out_iter.setitem(arr.getitem_index(space, indexes)) iter.next() out_iter.next() - diff --git a/pypy/module/micronumpy/sort.py b/pypy/module/micronumpy/sort.py --- a/pypy/module/micronumpy/sort.py +++ b/pypy/module/micronumpy/sort.py @@ -17,6 +17,7 @@ INT_SIZE = rffi.sizeof(lltype.Signed) + def make_argsort_function(space, itemtype, comp_type, count=1): TP = itemtype.T step = rffi.sizeof(TP) @@ -167,6 +168,7 @@ return argsort + def argsort_array(arr, space, w_axis): cache = space.fromcache(ArgSortCache) # that populates ArgSortClasses itemtype = arr.dtype.itemtype @@ -179,10 +181,6 @@ "sorting of non-numeric types '%s' is not implemented", arr.dtype.get_name()) -all_types = (types.all_float_types + types.all_complex_types + - types.all_int_types) -all_types = [i for i in all_types if not '_mixin_' in i[0].__dict__] -all_types = unrolling_iterable(all_types) def make_sort_function(space, itemtype, comp_type, count=1): TP = itemtype.T @@ -307,8 +305,9 @@ return sort + def sort_array(arr, space, w_axis, w_order): - cache = space.fromcache(SortCache) # that populates SortClasses + cache = space.fromcache(SortCache) # that populates SortClasses itemtype = arr.dtype.itemtype if arr.dtype.byteorder == NPY.OPPBYTE: raise oefmt(space.w_NotImplementedError, @@ -327,6 +326,7 @@ all_types = [i for i in all_types if not issubclass(i[0], types.Float16)] all_types = unrolling_iterable(all_types) + class ArgSortCache(object): built = False @@ -341,7 +341,7 @@ else: cache[cls] = make_argsort_function(space, cls, it) self.cache = cache - self._lookup = specialize.memo()(lambda tp : cache[tp[0]]) + self._lookup = specialize.memo()(lambda tp: cache[tp[0]]) class SortCache(object): @@ -358,4 +358,4 @@ else: cache[cls] = make_sort_function(space, cls, it) self.cache = cache - self._lookup = specialize.memo()(lambda tp : cache[tp[0]]) + self._lookup = specialize.memo()(lambda tp: cache[tp[0]]) diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py --- a/pypy/module/micronumpy/strides.py +++ b/pypy/module/micronumpy/strides.py @@ -1,9 +1,105 @@ from rpython.rlib import jit -from pypy.interpreter.error import OperationError +from pypy.interpreter.error import OperationError, oefmt from pypy.module.micronumpy.base import W_NDimArray from pypy.module.micronumpy import support from pypy.module.micronumpy import constants as NPY + +# structures to describe slicing + +class BaseChunk(object): + pass + + +class RecordChunk(BaseChunk): + def __init__(self, name): + self.name = name + + def apply(self, space, orig_arr): + arr = orig_arr.implementation + ofs, subdtype = arr.dtype.fields[self.name] + # ofs only changes start + # create a view of the original array by extending + # the shape, strides, backstrides of the array + from pypy.module.micronumpy.support import calc_strides + strides, backstrides = calc_strides(subdtype.shape, + subdtype.subdtype, arr.order) + final_shape = arr.shape + subdtype.shape + final_strides = arr.get_strides() + strides + final_backstrides = arr.get_backstrides() + backstrides + final_dtype = subdtype + if subdtype.subdtype: + final_dtype = subdtype.subdtype + return W_NDimArray.new_slice(space, arr.start + ofs, final_strides, + final_backstrides, + final_shape, arr, orig_arr, final_dtype) + + +class Chunks(BaseChunk): + def __init__(self, l): + self.l = l + + @jit.unroll_safe + def extend_shape(self, old_shape): + shape = [] + i = -1 + for i, c in enumerate_chunks(self.l): + if c.step != 0: + shape.append(c.lgt) + s = i + 1 + assert s >= 0 + return shape[:] + old_shape[s:] + + def apply(self, space, orig_arr): + arr = orig_arr.implementation + shape = self.extend_shape(arr.shape) + r = calculate_slice_strides(arr.shape, arr.start, arr.get_strides(), + arr.get_backstrides(), self.l) + _, start, strides, backstrides = r + return W_NDimArray.new_slice(space, start, strides[:], backstrides[:], + shape[:], arr, orig_arr) + + +class Chunk(BaseChunk): + axis_step = 1 + + def __init__(self, start, stop, step, lgt): + self.start = start + self.stop = stop + self.step = step + self.lgt = lgt + + def __repr__(self): + return 'Chunk(%d, %d, %d, %d)' % (self.start, self.stop, self.step, + self.lgt) + + +class NewAxisChunk(Chunk): + start = 0 + stop = 1 + step = 1 + lgt = 1 + axis_step = 0 + + def __init__(self): + pass + + +class BaseTransform(object): + pass + + +class ViewTransform(BaseTransform): + def __init__(self, chunks): + # 4-tuple specifying slicing + self.chunks = chunks + + +class BroadcastTransform(BaseTransform): + def __init__(self, res_shape): + self.res_shape = res_shape + + @jit.look_inside_iff(lambda chunks: jit.isconstant(len(chunks))) def enumerate_chunks(chunks): result = [] @@ -13,9 +109,9 @@ result.append((i, chunk)) return result + @jit.look_inside_iff(lambda shape, start, strides, backstrides, chunks: - jit.isconstant(len(chunks)) -) + jit.isconstant(len(chunks))) def calculate_slice_strides(shape, start, strides, backstrides, chunks): size = 0 for chunk in chunks: @@ -46,6 +142,7 @@ rshape += shape[s:] return rshape, rstart, rstrides, rbackstrides + def calculate_broadcast_strides(strides, backstrides, orig_shape, res_shape, backwards=False): rstrides = [] rbackstrides = [] @@ -64,16 +161,18 @@ rbackstrides = [0] * (len(res_shape) - len(orig_shape)) + rbackstrides return rstrides, rbackstrides + def is_single_elem(space, w_elem, is_rec_type): if (is_rec_type and space.isinstance_w(w_elem, space.w_tuple)): return True if (space.isinstance_w(w_elem, space.w_tuple) or - space.isinstance_w(w_elem, space.w_list)): + space.isinstance_w(w_elem, space.w_list)): return False if isinstance(w_elem, W_NDimArray) and not w_elem.is_scalar(): return False return True + def find_shape_and_elems(space, w_iterable, dtype): isstr = space.isinstance_w(w_iterable, space.w_str) if not support.issequence_w(space, w_iterable) or isstr: @@ -99,7 +198,7 @@ size = space.len_w(batch[0]) for w_elem in batch: if (is_single_elem(space, w_elem, is_rec_type) or - space.len_w(w_elem) != size): + space.len_w(w_elem) != size): raise OperationError(space.w_ValueError, space.wrap( "setting an array element with a sequence")) w_array = space.lookup(w_elem, '__array__') @@ -112,12 +211,13 @@ shape.append(size) batch = new_batch + def to_coords(space, shape, size, order, w_item_or_slice): '''Returns a start coord, step, and length. ''' start = lngth = step = 0 if not (space.isinstance_w(w_item_or_slice, space.w_int) or - space.isinstance_w(w_item_or_slice, space.w_slice)): + space.isinstance_w(w_item_or_slice, space.w_slice)): raise OperationError(space.w_IndexError, space.wrap('unsupported iterator index')) @@ -135,6 +235,7 @@ i //= shape[s] return coords, step, lngth + @jit.unroll_safe def shape_agreement(space, shape1, w_arr2, broadcast_down=True): if w_arr2 is None: @@ -158,6 +259,7 @@ ) return ret + @jit.unroll_safe def shape_agreement_multiple(space, array_list): """ call shape_agreement recursively, allow elements from array_list to @@ -169,6 +271,7 @@ shape = shape_agreement(space, shape, arr) return shape + def _shape_agreement(shape1, shape2): """ Checks agreement about two shapes with respect to broadcasting. Returns the resulting shape. @@ -207,6 +310,7 @@ endshape[i] = remainder[i] return endshape + def get_shape_from_iterable(space, old_size, w_iterable): new_size = 0 new_shape = [] @@ -225,8 +329,8 @@ s = space.int_w(elem) if s < 0: if neg_dim >= 0: - raise OperationError(space.w_ValueError, space.wrap( - "can only specify one unknown dimension")) + raise oefmt(space.w_ValueError, + "can only specify one unknown dimension") s = 1 neg_dim = i new_size *= s @@ -240,6 +344,7 @@ space.wrap("total size of new array must be unchanged")) return new_shape + # Recalculating strides. Find the steps that the iteration does for each # dimension, given the stride and shape. Then try to create a new stride that # fits the new shape, using those steps. If there is a shape/step mismatch diff --git a/pypy/module/micronumpy/support.py b/pypy/module/micronumpy/support.py --- a/pypy/module/micronumpy/support.py +++ b/pypy/module/micronumpy/support.py @@ -1,5 +1,6 @@ from rpython.rlib import jit -from pypy.interpreter.error import OperationError +from pypy.interpreter.error import OperationError, oefmt + def issequence_w(space, w_obj): from pypy.module.micronumpy.base import W_NDimArray @@ -7,6 +8,7 @@ space.isinstance_w(w_obj, space.w_list) or isinstance(w_obj, W_NDimArray)) + def index_w(space, w_obj): try: return space.int_w(space.index(w_obj)) @@ -14,8 +16,8 @@ try: return space.int_w(space.int(w_obj)) except OperationError: - raise OperationError(space.w_IndexError, space.wrap( - "cannot convert index to integer")) + raise oefmt(space.w_IndexError, "cannot convert index to integer") + @jit.unroll_safe def product(s): @@ -24,6 +26,7 @@ i *= x return i + @jit.unroll_safe def calc_strides(shape, dtype, order): strides = [] diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -3,7 +3,7 @@ from pypy.conftest import option from pypy.module.micronumpy.appbridge import get_appbridge_cache -from pypy.module.micronumpy.iter import Chunk, Chunks +from pypy.module.micronumpy.strides import Chunk, Chunks from pypy.module.micronumpy.interp_numarray import W_NDimArray from pypy.module.micronumpy.test.test_base import BaseNumpyAppTest diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -11,7 +11,8 @@ from rpython.rlib.rawstorage import (alloc_raw_storage, raw_storage_getitem_unaligned, raw_storage_setitem_unaligned) from rpython.rlib.objectmodel import specialize -from rpython.rlib.rarithmetic import widen, byteswap, r_ulonglong, most_neg_value_of, LONG_BIT +from rpython.rlib.rarithmetic import widen, byteswap, r_ulonglong, \ + most_neg_value_of, LONG_BIT from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rlib.rstruct.runpack import runpack from rpython.rlib.rstruct.nativefmttable import native_is_bigendian From noreply at buildbot.pypy.org Thu Feb 27 02:00:02 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 27 Feb 2014 02:00:02 +0100 (CET) Subject: [pypy-commit] pypy default: merge numpy-refactor Message-ID: <20140227010002.18EC61C3973@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69486:5c51d9a890ac Date: 2014-02-26 19:57 -0500 http://bitbucket.org/pypy/pypy/changeset/5c51d9a890ac/ Log: merge numpy-refactor diff --git a/pypy/module/micronumpy/appbridge.py b/pypy/module/micronumpy/appbridge.py --- a/pypy/module/micronumpy/appbridge.py +++ b/pypy/module/micronumpy/appbridge.py @@ -1,5 +1,6 @@ from rpython.rlib.objectmodel import specialize + class AppBridgeCache(object): w__mean = None w__var = None @@ -20,6 +21,7 @@ setattr(self, 'w_' + name, w_method) return space.call_args(w_method, args) + def set_string_function(space, w_f, w_repr): cache = get_appbridge_cache(space) if space.is_true(w_repr): @@ -27,5 +29,6 @@ else: cache.w_array_str = w_f + def get_appbridge_cache(space): return space.fromcache(AppBridgeCache) diff --git a/pypy/module/micronumpy/arrayimpl/__init__.py b/pypy/module/micronumpy/arrayimpl/__init__.py deleted file mode 100644 diff --git a/pypy/module/micronumpy/arrayimpl/base.py b/pypy/module/micronumpy/arrayimpl/base.py deleted file mode 100644 --- a/pypy/module/micronumpy/arrayimpl/base.py +++ /dev/null @@ -1,20 +0,0 @@ - -class BaseArrayImplementation(object): - def is_scalar(self): - return False - - def base(self): - raise NotImplementedError - - def create_iter(self, shape=None, backward_broadcast=False, require_index=False): - raise NotImplementedError - -class BaseArrayIterator(object): - def next(self): - raise NotImplementedError # purely abstract base class - - def setitem(self, elem): - raise NotImplementedError - - def set_scalar_object(self, value): - raise NotImplementedError # works only on scalars diff --git a/pypy/module/micronumpy/arrayimpl/scalar.py b/pypy/module/micronumpy/arrayimpl/scalar.py deleted file mode 100644 --- a/pypy/module/micronumpy/arrayimpl/scalar.py +++ /dev/null @@ -1,209 +0,0 @@ -from pypy.module.micronumpy.arrayimpl import base -from pypy.module.micronumpy.base import W_NDimArray, convert_to_array -from pypy.module.micronumpy import support -from pypy.interpreter.error import OperationError - -class ScalarIterator(base.BaseArrayIterator): - def __init__(self, v): - self.v = v - self.called_once = False - - def next(self): - self.called_once = True - - def next_skip_x(self, n): - self.called_once = True - - def getitem(self): - return self.v.get_scalar_value() - - def getitem_bool(self): - return self.v.dtype.itemtype.bool(self.v.value) - - def setitem(self, v): - self.v.set_scalar_value(v) - - def done(self): - return self.called_once - - def reset(self): - pass - -class Scalar(base.BaseArrayImplementation): - def __init__(self, dtype, value=None): - self.dtype = dtype - self.value = value - - def is_scalar(self): - return True - - def get_shape(self): - return [] - - def get_strides(self): - return [] - - def get_backstrides(self): - return [] - - def create_iter(self, shape=None, backward_broadcast=False, require_index=False): - return ScalarIterator(self) - - def get_scalar_value(self): - return self.value - - def set_scalar_value(self, w_val): - self.value = w_val - - def copy(self, space): - scalar = Scalar(self.dtype) - scalar.value = self.value - return scalar - - def get_size(self): - return 1 - - def transpose(self, _): - return self - - def get_view(self, space, orig_array, dtype, new_shape): - scalar = Scalar(dtype) - if dtype.is_str_or_unicode(): - scalar.value = dtype.coerce(space, space.wrap(self.value.raw_str())) - elif dtype.is_record(): - raise OperationError(space.w_NotImplementedError, space.wrap( - "viewing scalar as record not implemented")) - else: - scalar.value = dtype.itemtype.runpack_str(space, self.value.raw_str()) - return scalar - - def get_real(self, space, orig_array): - if self.dtype.is_complex(): - scalar = Scalar(self.dtype.get_float_dtype(space)) - scalar.value = self.value.convert_real_to(scalar.dtype) - return scalar - return self - - def set_real(self, space, orig_array, w_val): - w_arr = convert_to_array(space, w_val) - if len(w_arr.get_shape()) > 0: - raise OperationError(space.w_ValueError, space.wrap( - "could not broadcast input array from shape " + - "(%s) into shape ()" % ( - ','.join([str(x) for x in w_arr.get_shape()],)))) - if self.dtype.is_complex(): - dtype = self.dtype.get_float_dtype(space) - self.value = self.dtype.itemtype.composite( - w_arr.get_scalar_value().convert_to(space, dtype), - self.value.convert_imag_to(dtype)) - else: - self.value = w_arr.get_scalar_value() - - def get_imag(self, space, orig_array): - if self.dtype.is_complex(): - scalar = Scalar(self.dtype.get_float_dtype(space)) - scalar.value = self.value.convert_imag_to(scalar.dtype) - return scalar - scalar = Scalar(self.dtype) - scalar.value = scalar.dtype.coerce(space, None) - return scalar - - def set_imag(self, space, orig_array, w_val): - #Only called on complex dtype - assert self.dtype.is_complex() - w_arr = convert_to_array(space, w_val) - if len(w_arr.get_shape()) > 0: - raise OperationError(space.w_ValueError, space.wrap( - "could not broadcast input array from shape " + - "(%s) into shape ()" % ( - ','.join([str(x) for x in w_arr.get_shape()],)))) - dtype = self.dtype.get_float_dtype(space) - self.value = self.dtype.itemtype.composite( - self.value.convert_real_to(dtype), - w_arr.get_scalar_value().convert_to(space, dtype)) - - def descr_getitem(self, space, _, w_idx): - if space.isinstance_w(w_idx, space.w_tuple): - if space.len_w(w_idx) == 0: - return self.get_scalar_value() - elif space.isinstance_w(w_idx, space.w_str): - if self.dtype.is_record(): - w_val = self.value.descr_getitem(space, w_idx) - return convert_to_array(space, w_val) - elif space.is_none(w_idx): - new_shape = [1] - arr = W_NDimArray.from_shape(space, new_shape, self.dtype) - arr_iter = arr.create_iter(new_shape) - arr_iter.setitem(self.value) - return arr - raise OperationError(space.w_IndexError, - space.wrap("0-d arrays can't be indexed")) - - def getitem_index(self, space, idx): - raise OperationError(space.w_IndexError, - space.wrap("0-d arrays can't be indexed")) - - def descr_setitem(self, space, _, w_idx, w_val): - if space.isinstance_w(w_idx, space.w_tuple): - if space.len_w(w_idx) == 0: - return self.set_scalar_value(self.dtype.coerce(space, w_val)) - elif space.isinstance_w(w_idx, space.w_str): - if self.dtype.is_record(): - return self.value.descr_setitem(space, w_idx, w_val) - raise OperationError(space.w_IndexError, - space.wrap("0-d arrays can't be indexed")) - - def setitem_index(self, space, idx, w_val): - raise OperationError(space.w_IndexError, - space.wrap("0-d arrays can't be indexed")) - - def set_shape(self, space, orig_array, new_shape): - if not new_shape: - return self - if support.product(new_shape) == 1: - arr = W_NDimArray.from_shape(space, new_shape, self.dtype) - arr_iter = arr.create_iter(new_shape) - arr_iter.setitem(self.value) - return arr.implementation - raise OperationError(space.w_ValueError, space.wrap( - "total size of the array must be unchanged")) - - def set_dtype(self, space, dtype): - self.value = self.value.convert_to(space, dtype) - self.dtype = dtype - - def reshape(self, space, orig_array, new_shape): - return self.set_shape(space, orig_array, new_shape) - - def create_axis_iter(self, shape, dim, cum): - raise Exception("axis iter should not happen on scalar") - - def swapaxes(self, space, orig_array, axis1, axis2): - raise Exception("should not be called") - - def nonzero(self, space, index_type): - s = self.dtype.itemtype.bool(self.value) - w_res = W_NDimArray.from_shape(space, [s], index_type) - if s == 1: - w_res.implementation.setitem(0, index_type.itemtype.box(0)) - return space.newtuple([w_res]) - - def fill(self, space, w_value): - self.value = w_value - - def get_storage_as_int(self, space): - raise OperationError(space.w_ValueError, - space.wrap("scalars have no address")) - - def argsort(self, space, w_axis): - return space.wrap(0) - - def astype(self, space, dtype): - raise Exception("should not be called") - - def base(self): - return None - - def get_buffer(self, space): - raise OperationError(space.w_ValueError, space.wrap( - "cannot point buffer to a scalar")) diff --git a/pypy/module/micronumpy/arrayimpl/voidbox.py b/pypy/module/micronumpy/arrayimpl/voidbox.py deleted file mode 100644 --- a/pypy/module/micronumpy/arrayimpl/voidbox.py +++ /dev/null @@ -1,12 +0,0 @@ - -from pypy.module.micronumpy.arrayimpl.base import BaseArrayImplementation -from rpython.rlib.rawstorage import free_raw_storage, alloc_raw_storage - -class VoidBoxStorage(BaseArrayImplementation): - def __init__(self, size, dtype): - self.storage = alloc_raw_storage(size) - self.dtype = dtype - self.size = size - - def __del__(self): - free_raw_storage(self.storage) diff --git a/pypy/module/micronumpy/base.py b/pypy/module/micronumpy/base.py --- a/pypy/module/micronumpy/base.py +++ b/pypy/module/micronumpy/base.py @@ -1,16 +1,9 @@ - from pypy.interpreter.error import OperationError from pypy.interpreter.baseobjspace import W_Root from rpython.tool.pairtype import extendabletype from pypy.module.micronumpy.support import calc_strides -from pypy.module.micronumpy.arrayimpl.base import BaseArrayImplementation -def issequence_w(space, w_obj): - return (space.isinstance_w(w_obj, space.w_tuple) or - space.isinstance_w(w_obj, space.w_list) or - isinstance(w_obj, W_NDimArray)) - def wrap_impl(space, w_cls, w_instance, impl): if w_cls is None or space.is_w(w_cls, space.gettypefor(W_NDimArray)): w_ret = W_NDimArray(impl) @@ -21,6 +14,7 @@ space.call_method(w_ret, '__array_finalize__', w_instance) return w_ret + class ArrayArgumentException(Exception): pass @@ -29,21 +23,18 @@ __metaclass__ = extendabletype def __init__(self, implementation): - assert isinstance(implementation, BaseArrayImplementation) + from pypy.module.micronumpy.concrete import BaseConcreteArray + assert isinstance(implementation, BaseConcreteArray) assert isinstance(self, W_NDimArray) self.implementation = implementation @staticmethod def from_shape(space, shape, dtype, order='C', w_instance=None): - from pypy.module.micronumpy.arrayimpl import concrete, scalar + from pypy.module.micronumpy import concrete - if not shape: - w_val = dtype.base.coerce(space, None) - impl = scalar.Scalar(dtype.base, w_val) - else: - strides, backstrides = calc_strides(shape, dtype.base, order) - impl = concrete.ConcreteArray(shape, dtype.base, order, strides, - backstrides) + strides, backstrides = calc_strides(shape, dtype.base, order) + impl = concrete.ConcreteArray(shape, dtype.base, order, strides, + backstrides) if w_instance: return wrap_impl(space, space.type(w_instance), w_instance, impl) return W_NDimArray(impl) @@ -51,12 +42,12 @@ @staticmethod def from_shape_and_storage(space, shape, storage, dtype, order='C', owning=False, w_subtype=None, w_base=None, writable=True): - from pypy.module.micronumpy.arrayimpl import concrete + from pypy.module.micronumpy import concrete assert shape strides, backstrides = calc_strides(shape, dtype, order) if w_base is not None: if owning: - raise OperationError(space.w_ValueError, + raise OperationError(space.w_ValueError, space.wrap("Cannot have owning=True when specifying a buffer")) if writable: impl = concrete.ConcreteArrayWithBase(shape, dtype, order, strides, @@ -82,7 +73,7 @@ @staticmethod def new_slice(space, offset, strides, backstrides, shape, parent, orig_arr, dtype=None): - from pypy.module.micronumpy.arrayimpl import concrete + from pypy.module.micronumpy import concrete impl = concrete.SliceArray(offset, strides, backstrides, shape, parent, orig_arr, dtype) @@ -90,36 +81,15 @@ @staticmethod def new_scalar(space, dtype, w_val=None): - from pypy.module.micronumpy.arrayimpl import scalar - if w_val is not None: w_val = dtype.coerce(space, w_val) else: w_val = dtype.coerce(space, space.wrap(0)) - return W_NDimArray(scalar.Scalar(dtype, w_val)) + return convert_to_array(space, w_val) def convert_to_array(space, w_obj): - #XXX: This whole routine should very likely simply be array() from pypy.module.micronumpy.interp_numarray import array - from pypy.module.micronumpy import interp_ufuncs - if isinstance(w_obj, W_NDimArray): return w_obj - else: - # Use __array__() method if it exists - w_array = space.lookup(w_obj, "__array__") - if w_array is not None: - w_result = space.get_and_call_function(w_array, w_obj) - if isinstance(w_result, W_NDimArray): - return w_result - else: - raise OperationError(space.w_ValueError, - space.wrap("object __array__ method not producing an array")) - elif issequence_w(space, w_obj): - # Convert to array. - return array(space, w_obj, w_order=None) - else: - # If it's a scalar - dtype = interp_ufuncs.find_dtype_for_scalar(space, w_obj) - return W_NDimArray.new_scalar(space, dtype, w_obj) + return array(space, w_obj) diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -1,4 +1,3 @@ - """ This is a set of tools for standalone compiling of numpy expressions. It should not be imported by the module itself """ @@ -95,7 +94,10 @@ return StringObject(NonConstant('foo')) def isinstance_w(self, w_obj, w_tp): - return w_obj.tp == w_tp + try: + return w_obj.tp == w_tp + except AttributeError: + return False def decode_index4(self, w_idx, size): if isinstance(w_idx, IntObject): @@ -215,7 +217,8 @@ def lookup(self, w_obj, name): w_type = self.type(w_obj) - return w_type.lookup(name) + if not self.is_none(w_type): + return w_type.lookup(name) def gettypefor(self, w_obj): return W_TypeObject(w_obj.typedef.name) diff --git a/pypy/module/micronumpy/arrayimpl/concrete.py b/pypy/module/micronumpy/concrete.py rename from pypy/module/micronumpy/arrayimpl/concrete.py rename to pypy/module/micronumpy/concrete.py --- a/pypy/module/micronumpy/arrayimpl/concrete.py +++ b/pypy/module/micronumpy/concrete.py @@ -1,20 +1,19 @@ -from pypy.module.micronumpy.arrayimpl import base, scalar from pypy.module.micronumpy import support, loop, iter from pypy.module.micronumpy.base import convert_to_array, W_NDimArray,\ ArrayArgumentException -from pypy.module.micronumpy.strides import calc_new_strides, shape_agreement,\ - calculate_broadcast_strides, calculate_dot_strides -from pypy.module.micronumpy.iter import Chunk, Chunks, NewAxisChunk, RecordChunk +from pypy.module.micronumpy.strides import (Chunk, Chunks, NewAxisChunk, + RecordChunk, calc_new_strides, shape_agreement, calculate_broadcast_strides, + calculate_dot_strides) from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.buffer import RWBuffer from rpython.rlib import jit from rpython.rtyper.lltypesystem import rffi, lltype -from rpython.rlib.rawstorage import free_raw_storage, raw_storage_getitem,\ - raw_storage_setitem, RAW_STORAGE +from rpython.rlib.rawstorage import alloc_raw_storage, free_raw_storage, \ + raw_storage_getitem, raw_storage_setitem, RAW_STORAGE from rpython.rlib.debug import make_sure_not_resized -class BaseConcreteArray(base.BaseArrayImplementation): +class BaseConcreteArray(object): start = 0 parent = None @@ -45,11 +44,13 @@ self.dtype.itemtype.store(self, index, 0, value) def setslice(self, space, arr): + if len(arr.get_shape()) > 0 and len(self.get_shape()) == 0: + raise oefmt(space.w_ValueError, + "could not broadcast input array from shape " + "(%s) into shape ()", + ','.join([str(x) for x in arr.get_shape()])) + shape = shape_agreement(space, self.get_shape(), arr) impl = arr.implementation - if impl.is_scalar(): - self.fill(space, impl.get_scalar_value()) - return - shape = shape_agreement(space, self.get_shape(), arr) if impl.storage == self.storage: impl = impl.copy(space) loop.setslice(space, shape, self, impl) @@ -64,9 +65,12 @@ # Since we got to here, prod(new_shape) == self.size new_strides = None if self.size > 0: - new_strides = calc_new_strides(new_shape, self.get_shape(), - self.get_strides(), self.order) - if new_strides: + if len(self.get_shape()) == 0: + new_strides = [self.dtype.elsize] * len(new_shape) + else: + new_strides = calc_new_strides(new_shape, self.get_shape(), + self.get_strides(), self.order) + if new_strides is not None: # We can create a view, strides somehow match up. ndims = len(new_shape) new_backstrides = [0] * ndims @@ -75,10 +79,6 @@ assert isinstance(orig_array, W_NDimArray) or orig_array is None return SliceArray(self.start, new_strides, new_backstrides, new_shape, self, orig_array) - else: - if self.get_size() == 1 and len(new_shape) == 0: - return scalar.Scalar(self.dtype, self.getitem(0)) - return None def get_view(self, space, orig_array, dtype, new_shape): strides, backstrides = support.calc_strides(new_shape, dtype, @@ -92,7 +92,7 @@ if self.dtype.is_complex(): dtype = self.dtype.get_float_dtype(space) return SliceArray(self.start, strides, backstrides, - self.get_shape(), self, orig_array, dtype=dtype) + self.get_shape(), self, orig_array, dtype=dtype) return SliceArray(self.start, strides, backstrides, self.get_shape(), self, orig_array) @@ -105,10 +105,10 @@ backstrides = self.get_backstrides() if self.dtype.is_complex(): dtype = self.dtype.get_float_dtype(space) - return SliceArray(self.start + dtype.elsize, strides, - backstrides, self.get_shape(), self, orig_array, dtype=dtype) - impl = NonWritableArray(self.get_shape(), self.dtype, self.order, strides, - backstrides) + return SliceArray(self.start + dtype.elsize, strides, backstrides, + self.get_shape(), self, orig_array, dtype=dtype) + impl = NonWritableArray(self.get_shape(), self.dtype, self.order, + strides, backstrides) if not self.dtype.is_flexible(): impl.fill(space, self.dtype.box(0)) return impl @@ -167,7 +167,7 @@ space.isinstance_w(w_idx, space.w_slice) or space.is_w(w_idx, space.w_None)): raise IndexError - if isinstance(w_idx, W_NDimArray) and not isinstance(w_idx.implementation, scalar.Scalar): + if isinstance(w_idx, W_NDimArray) and not w_idx.is_scalar(): raise ArrayArgumentException shape = self.get_shape() shape_len = len(shape) @@ -194,7 +194,9 @@ space.isinstance_w(w_item, space.w_list)): raise ArrayArgumentException return self._lookup_by_index(space, view_w) - if shape_len > 1: + if shape_len == 0: + raise oefmt(space.w_IndexError, "0-d arrays can't be indexed") + elif shape_len > 1: raise IndexError idx = support.index_w(space, w_idx) return self._lookup_by_index(space, [space.wrap(idx)]) @@ -208,11 +210,12 @@ raise OperationError(space.w_ValueError, space.wrap( "field named %s not found" % idx)) return RecordChunk(idx) - if (space.isinstance_w(w_idx, space.w_int) or + elif (space.isinstance_w(w_idx, space.w_int) or space.isinstance_w(w_idx, space.w_slice)): + if len(self.get_shape()) == 0: + raise oefmt(space.w_ValueError, "cannot slice a 0-d array") return Chunks([Chunk(*space.decode_index4(w_idx, self.get_shape()[0]))]) - elif isinstance(w_idx, W_NDimArray) and \ - isinstance(w_idx.implementation, scalar.Scalar): + elif isinstance(w_idx, W_NDimArray) and w_idx.is_scalar(): w_idx = w_idx.get_scalar_value().item(space) if not space.isinstance_w(w_idx, space.w_int) and \ not space.isinstance_w(w_idx, space.w_bool): @@ -292,7 +295,7 @@ def nonzero(self, space, index_type): s = loop.count_all_true_concrete(self) box = index_type.itemtype.box - nd = len(self.get_shape()) + nd = len(self.get_shape()) or 1 w_res = W_NDimArray.from_shape(space, [s, nd], index_type) loop.nonzero(w_res, self, box) w_res = w_res.implementation.swapaxes(space, w_res, 0, 1) @@ -319,7 +322,6 @@ class ConcreteArrayNotOwning(BaseConcreteArray): def __init__(self, shape, dtype, order, strides, backstrides, storage): - make_sure_not_resized(shape) make_sure_not_resized(strides) make_sure_not_resized(backstrides) @@ -342,7 +344,7 @@ r[0], r[1], shape) if not require_index: return iter.ConcreteArrayIterator(self) - if len(self.get_shape()) == 1: + if len(self.get_shape()) <= 1: return iter.OneDimViewIterator(self, self.start, self.get_strides(), self.get_shape()) @@ -365,11 +367,11 @@ self.dtype = dtype def argsort(self, space, w_axis): - from pypy.module.micronumpy.arrayimpl.sort import argsort_array + from pypy.module.micronumpy.sort import argsort_array return argsort_array(self, space, w_axis) def sort(self, space, w_axis, w_order): - from pypy.module.micronumpy.arrayimpl.sort import sort_array + from pypy.module.micronumpy.sort import sort_array return sort_array(self, space, w_axis, w_order) def base(self): @@ -389,6 +391,7 @@ def __del__(self): free_raw_storage(self.storage, track_allocation=False) + class ConcreteArrayWithBase(ConcreteArrayNotOwning): def __init__(self, shape, dtype, order, strides, backstrides, storage, orig_base): ConcreteArrayNotOwning.__init__(self, shape, dtype, order, @@ -444,7 +447,7 @@ backward_broadcast) return iter.MultiDimViewIterator(self, self.start, r[0], r[1], shape) - if len(self.get_shape()) == 1: + if len(self.get_shape()) <= 1: return iter.OneDimViewIterator(self, self.start, self.get_strides(), self.get_shape()) @@ -460,7 +463,10 @@ strides = [] backstrides = [] dtype = self.dtype - s = self.get_strides()[0] // dtype.elsize + try: + s = self.get_strides()[0] // dtype.elsize + except IndexError: + s = 1 if self.order == 'C': new_shape.reverse() for sh in new_shape: @@ -486,6 +492,16 @@ self, orig_array) +class VoidBoxStorage(BaseConcreteArray): + def __init__(self, size, dtype): + self.storage = alloc_raw_storage(size) + self.dtype = dtype + self.size = size + + def __del__(self): + free_raw_storage(self.storage) + + class ArrayBuffer(RWBuffer): def __init__(self, impl): self.impl = impl diff --git a/pypy/module/micronumpy/interp_arrayops.py b/pypy/module/micronumpy/interp_arrayops.py --- a/pypy/module/micronumpy/interp_arrayops.py +++ b/pypy/module/micronumpy/interp_arrayops.py @@ -1,11 +1,11 @@ from pypy.module.micronumpy.base import convert_to_array, W_NDimArray from pypy.module.micronumpy import loop, interp_dtype, interp_ufuncs -from pypy.module.micronumpy.iter import Chunk, Chunks -from pypy.module.micronumpy.strides import shape_agreement,\ - shape_agreement_multiple +from pypy.module.micronumpy.strides import Chunk, Chunks, shape_agreement, \ + shape_agreement_multiple from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import unwrap_spec from pypy.module.micronumpy.conversion_utils import clipmode_converter +from pypy.module.micronumpy import support from pypy.module.micronumpy import constants as NPY def where(space, w_arr, w_x=None, w_y=None): @@ -209,8 +209,6 @@ return out def put(space, w_arr, w_indices, w_values, w_mode): - from pypy.module.micronumpy.support import index_w - arr = convert_to_array(space, w_arr) mode = clipmode_converter(space, w_mode) @@ -235,7 +233,7 @@ v_idx = 0 for idx in indices: - index = index_w(space, idx) + index = support.index_w(space, idx) if index < 0 or index >= arr.get_size(): if mode == NPY.RAISE: diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -10,7 +10,7 @@ from rpython.rlib.rarithmetic import LONG_BIT from rpython.rtyper.lltypesystem import rffi from rpython.tool.sourcetools import func_with_new_name -from pypy.module.micronumpy.arrayimpl.voidbox import VoidBoxStorage +from pypy.module.micronumpy.concrete import VoidBoxStorage from pypy.module.micronumpy.base import W_NDimArray from pypy.module.micronumpy.interp_flagsobj import W_FlagsObject from pypy.interpreter.mixedmodule import MixedModule diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -10,6 +10,7 @@ from rpython.rlib import jit from pypy.module.micronumpy.appbridge import get_appbridge_cache from pypy.module.micronumpy.conversion_utils import byteorder_converter +from pypy.module.micronumpy import support from pypy.module.micronumpy import constants as NPY @@ -372,7 +373,7 @@ "incorrect subarray in __setstate__") subdtype, w_shape = space.fixedview(w_subarray) assert isinstance(subdtype, W_Dtype) - if not base.issequence_w(space, w_shape): + if not support.issequence_w(space, w_shape): self.shape = [space.int_w(w_shape)] else: self.shape = [space.int_w(w_s) for w_s in space.fixedview(w_shape)] @@ -432,7 +433,7 @@ w_shape = space.newtuple([]) if space.len_w(w_elem) == 3: w_fldname, w_flddesc, w_shape = space.fixedview(w_elem) - if not base.issequence_w(space, w_shape): + if not support.issequence_w(space, w_shape): w_shape = space.newtuple([w_shape]) else: w_fldname, w_flddesc = space.fixedview(w_elem, 2) diff --git a/pypy/module/micronumpy/interp_flatiter.py b/pypy/module/micronumpy/interp_flatiter.py --- a/pypy/module/micronumpy/interp_flatiter.py +++ b/pypy/module/micronumpy/interp_flatiter.py @@ -1,10 +1,10 @@ - from pypy.module.micronumpy.base import W_NDimArray, convert_to_array from pypy.module.micronumpy import loop -from pypy.module.micronumpy.arrayimpl.base import BaseArrayImplementation +from pypy.module.micronumpy.concrete import BaseConcreteArray from pypy.interpreter.error import OperationError -class FakeArrayImplementation(BaseArrayImplementation): + +class FakeArrayImplementation(BaseConcreteArray): """ The sole purpose of this class is to W_FlatIterator can behave like a real array for descr_eq and friends """ diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -5,7 +5,7 @@ from pypy.interpreter.gateway import interp2app, unwrap_spec, applevel, \ WrappedDefault from pypy.module.micronumpy.base import W_NDimArray, convert_to_array,\ - ArrayArgumentException, issequence_w, wrap_impl + ArrayArgumentException, wrap_impl from pypy.module.micronumpy import interp_dtype, interp_ufuncs, interp_boxes,\ interp_arrayops from pypy.module.micronumpy.strides import find_shape_and_elems,\ @@ -16,11 +16,10 @@ from pypy.module.micronumpy.appbridge import get_appbridge_cache from pypy.module.micronumpy import loop from pypy.module.micronumpy.interp_arrayops import repeat, choose, put -from pypy.module.micronumpy.arrayimpl import scalar from rpython.tool.sourcetools import func_with_new_name from rpython.rlib import jit from rpython.rlib.rstring import StringBuilder -from pypy.module.micronumpy.arrayimpl.base import BaseArrayImplementation +from pypy.module.micronumpy.concrete import BaseConcreteArray from pypy.module.micronumpy.conversion_utils import order_converter, multi_axis_converter from pypy.module.micronumpy import support from pypy.module.micronumpy import constants as NPY @@ -145,7 +144,7 @@ "cannot assign %d input values to " "the %d output values where the mask is true" % (val.get_size(), size))) - loop.setitem_filter(space, self, idx, val, size) + loop.setitem_filter(space, self, idx, val) def _prepare_array_index(self, space, w_index): if isinstance(w_index, W_NDimArray): @@ -292,10 +291,10 @@ return s.build() def create_iter(self, shape=None, backward_broadcast=False, require_index=False): - assert isinstance(self.implementation, BaseArrayImplementation) - return self.implementation.create_iter(shape=shape, - backward_broadcast=backward_broadcast, - require_index=require_index) + assert isinstance(self.implementation, BaseConcreteArray) + return self.implementation.create_iter( + shape=shape, backward_broadcast=backward_broadcast, + require_index=require_index) def create_axis_iter(self, shape, dim, cum): return self.implementation.create_axis_iter(shape, dim, cum) @@ -304,10 +303,10 @@ return self.implementation.create_dot_iter(shape, skip) def is_scalar(self): - return self.implementation.is_scalar() + return len(self.get_shape()) == 0 def set_scalar_value(self, w_val): - self.implementation.set_scalar_value(w_val) + return self.implementation.setitem(self.implementation.start, w_val) def fill(self, space, box): self.implementation.fill(space, box) @@ -319,7 +318,8 @@ return self.implementation.get_size() def get_scalar_value(self): - return self.implementation.get_scalar_value() + assert self.get_size() == 1 + return self.implementation.getitem(self.implementation.start) def descr_copy(self, space, w_order=None): order = order_converter(space, w_order, NPY.KEEPORDER) @@ -490,19 +490,15 @@ def descr_item(self, space, w_arg=None): if space.is_none(w_arg): - if self.is_scalar(): - return self.get_scalar_value().item(space) if self.get_size() == 1: - w_obj = self.getitem(space, - [0] * len(self.get_shape())) + w_obj = self.get_scalar_value() assert isinstance(w_obj, interp_boxes.W_GenericBox) return w_obj.item(space) - raise OperationError(space.w_ValueError, - space.wrap("can only convert an array of size 1 to a Python scalar")) + raise oefmt(space.w_ValueError, + "can only convert an array of size 1 to a Python scalar") if space.isinstance_w(w_arg, space.w_int): if self.is_scalar(): - raise OperationError(space.w_IndexError, - space.wrap("index out of bounds")) + raise oefmt(space.w_IndexError, "index out of bounds") i = self.to_coords(space, w_arg) item = self.getitem(space, i) assert isinstance(item, interp_boxes.W_GenericBox) @@ -580,11 +576,8 @@ new_dtype = interp_dtype.variable_dtype(space, 'S' + str(cur_dtype.elsize)) impl = self.implementation - if isinstance(impl, scalar.Scalar): - return W_NDimArray.new_scalar(space, new_dtype, impl.value) - else: - new_impl = impl.astype(space, new_dtype) - return wrap_impl(space, space.type(self), self, new_impl) + new_impl = impl.astype(space, new_dtype) + return wrap_impl(space, space.type(self), self, new_impl) def descr_get_base(self, space): impl = self.implementation @@ -1038,72 +1031,49 @@ descr_argmin = _reduce_argmax_argmin_impl("argmin") def descr_int(self, space): - shape = self.get_shape() - if len(shape) == 0: - assert isinstance(self.implementation, scalar.Scalar) - value = space.wrap(self.implementation.get_scalar_value()) - elif shape == [1]: - value = self.descr_getitem(space, space.wrap(0)) - else: + if self.get_size() != 1: raise OperationError(space.w_TypeError, space.wrap( "only length-1 arrays can be converted to Python scalars")) if self.get_dtype().is_str_or_unicode(): raise OperationError(space.w_TypeError, space.wrap( "don't know how to convert scalar number to int")) + value = self.get_scalar_value() return space.int(value) def descr_long(self, space): - shape = self.get_shape() - if len(shape) == 0: - assert isinstance(self.implementation, scalar.Scalar) - value = space.wrap(self.implementation.get_scalar_value()) - elif shape == [1]: - value = self.descr_getitem(space, space.wrap(0)) - else: + if self.get_size() != 1: raise OperationError(space.w_TypeError, space.wrap( "only length-1 arrays can be converted to Python scalars")) if self.get_dtype().is_str_or_unicode(): raise OperationError(space.w_TypeError, space.wrap( "don't know how to convert scalar number to long")) + value = self.get_scalar_value() return space.long(value) def descr_float(self, space): - shape = self.get_shape() - if len(shape) == 0: - assert isinstance(self.implementation, scalar.Scalar) - value = space.wrap(self.implementation.get_scalar_value()) - elif shape == [1]: - value = self.descr_getitem(space, space.wrap(0)) - else: + if self.get_size() != 1: raise OperationError(space.w_TypeError, space.wrap( "only length-1 arrays can be converted to Python scalars")) if self.get_dtype().is_str_or_unicode(): raise OperationError(space.w_TypeError, space.wrap( "don't know how to convert scalar number to float")) + value = self.get_scalar_value() return space.float(value) def descr_index(self, space): - shape = self.get_shape() - if len(shape) == 0: - assert isinstance(self.implementation, scalar.Scalar) - value = space.wrap(self.implementation.get_scalar_value()) - elif shape == [1]: - value = self.descr_getitem(space, space.wrap(0)) - else: + if self.get_size() != 1 or \ + not self.get_dtype().is_int() or self.get_dtype().is_bool(): raise OperationError(space.w_TypeError, space.wrap( "only integer arrays with one element " "can be converted to an index")) - if not self.get_dtype().is_int() or self.get_dtype().is_bool(): - raise OperationError(space.w_TypeError, space.wrap( - "only integer arrays with one element " - "can be converted to an index")) + value = self.get_scalar_value() assert isinstance(value, interp_boxes.W_GenericBox) return value.item(space) def descr_reduce(self, space): from rpython.rlib.rstring import StringBuilder from pypy.interpreter.mixedmodule import MixedModule - from pypy.module.micronumpy.arrayimpl.concrete import SliceArray + from pypy.module.micronumpy.concrete import SliceArray numpypy = space.getbuiltinmodule("_numpypy") assert isinstance(numpypy, MixedModule) @@ -1168,7 +1138,7 @@ @unwrap_spec(offset=int) def descr_new_array(space, w_subtype, w_shape, w_dtype=None, w_buffer=None, offset=0, w_strides=None, w_order=None): - from pypy.module.micronumpy.arrayimpl.concrete import ConcreteArray + from pypy.module.micronumpy.concrete import ConcreteArray from pypy.module.micronumpy.support import calc_strides dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype)) @@ -1445,14 +1415,6 @@ dtype = interp_dtype.decode_w_dtype(space, w_dtype) - # scalars and strings w/o __array__ method - isstr = space.isinstance_w(w_object, space.w_str) - if not issequence_w(space, w_object) or isstr: - if dtype is None or dtype.char != NPY.CHARLTR: - if dtype is None or (dtype.is_str_or_unicode() and dtype.elsize < 1): - dtype = interp_ufuncs.find_dtype_for_scalar(space, w_object) - return W_NDimArray.new_scalar(space, dtype, w_object) - if space.is_none(w_order): order = 'C' else: diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -321,10 +321,11 @@ w_obj.get_scalar_value().convert_to(space, calc_dtype)) if out is None: return w_val + w_val = res_dtype.coerce(space, w_val) if out.is_scalar(): out.set_scalar_value(w_val) else: - out.fill(space, res_dtype.coerce(space, w_val)) + out.fill(space, w_val) return out shape = shape_agreement(space, w_obj.get_shape(), out, broadcast_down=False) diff --git a/pypy/module/micronumpy/iter.py b/pypy/module/micronumpy/iter.py --- a/pypy/module/micronumpy/iter.py +++ b/pypy/module/micronumpy/iter.py @@ -1,4 +1,3 @@ - """ This is a mini-tutorial on iterators, strides, and memory layout. It assumes you are familiar with the terms, see http://docs.scipy.org/doc/numpy/reference/arrays.ndarray.html @@ -42,100 +41,10 @@ dimension, perhaps we could overflow times in one big step. """ -from pypy.module.micronumpy.strides import enumerate_chunks,\ - calculate_slice_strides from pypy.module.micronumpy.base import W_NDimArray -from pypy.module.micronumpy.arrayimpl import base -from pypy.module.micronumpy.support import product +from pypy.module.micronumpy import support from rpython.rlib import jit -# structures to describe slicing - -class BaseChunk(object): - pass - -class RecordChunk(BaseChunk): - def __init__(self, name): - self.name = name - - def apply(self, space, orig_arr): - arr = orig_arr.implementation - ofs, subdtype = arr.dtype.fields[self.name] - # ofs only changes start - # create a view of the original array by extending - # the shape, strides, backstrides of the array - from pypy.module.micronumpy.support import calc_strides - strides, backstrides = calc_strides(subdtype.shape, - subdtype.subdtype, arr.order) - final_shape = arr.shape + subdtype.shape - final_strides = arr.get_strides() + strides - final_backstrides = arr.get_backstrides() + backstrides - final_dtype = subdtype - if subdtype.subdtype: - final_dtype = subdtype.subdtype - return W_NDimArray.new_slice(space, arr.start + ofs, final_strides, - final_backstrides, - final_shape, arr, orig_arr, final_dtype) - -class Chunks(BaseChunk): - def __init__(self, l): - self.l = l - - @jit.unroll_safe - def extend_shape(self, old_shape): - shape = [] - i = -1 - for i, c in enumerate_chunks(self.l): - if c.step != 0: - shape.append(c.lgt) - s = i + 1 - assert s >= 0 - return shape[:] + old_shape[s:] - - def apply(self, space, orig_arr): - arr = orig_arr.implementation - shape = self.extend_shape(arr.shape) - r = calculate_slice_strides(arr.shape, arr.start, arr.get_strides(), - arr.get_backstrides(), self.l) - _, start, strides, backstrides = r - return W_NDimArray.new_slice(space, start, strides[:], backstrides[:], - shape[:], arr, orig_arr) - - -class Chunk(BaseChunk): - axis_step = 1 - - def __init__(self, start, stop, step, lgt): - self.start = start - self.stop = stop - self.step = step - self.lgt = lgt - - def __repr__(self): - return 'Chunk(%d, %d, %d, %d)' % (self.start, self.stop, self.step, - self.lgt) - -class NewAxisChunk(Chunk): - start = 0 - stop = 1 - step = 1 - lgt = 1 - axis_step = 0 - - def __init__(self): - pass - -class BaseTransform(object): - pass - -class ViewTransform(BaseTransform): - def __init__(self, chunks): - # 4-tuple specifying slicing - self.chunks = chunks - -class BroadcastTransform(BaseTransform): - def __init__(self, res_shape): - self.res_shape = res_shape class PureShapeIterator(object): def __init__(self, shape, idx_w): @@ -169,8 +78,21 @@ def get_index(self, space, shapelen): return [space.wrap(self.indexes[i]) for i in range(shapelen)] -class ConcreteArrayIterator(base.BaseArrayIterator): + +class BaseArrayIterator(object): + def next(self): + raise NotImplementedError # purely abstract base class + + def setitem(self, elem): + raise NotImplementedError + + def set_scalar_object(self, value): + raise NotImplementedError # works only on scalars + + +class ConcreteArrayIterator(BaseArrayIterator): _immutable_fields_ = ['array', 'skip', 'size'] + def __init__(self, array): self.array = array self.offset = 0 @@ -198,13 +120,20 @@ def reset(self): self.offset %= self.size + class OneDimViewIterator(ConcreteArrayIterator): def __init__(self, array, start, strides, shape): self.array = array self.offset = start - self.skip = strides[0] self.index = 0 - self.size = shape[0] + assert len(strides) == len(shape) + if len(shape) == 0: + self.skip = array.dtype.elsize + self.size = 1 + else: + assert len(shape) == 1 + self.skip = strides[0] + self.size = shape[0] def next(self): self.offset += self.skip @@ -223,6 +152,7 @@ def get_index(self, d): return self.index + class MultiDimViewIterator(ConcreteArrayIterator): def __init__(self, array, start, strides, backstrides, shape): self.indexes = [0] * len(shape) @@ -230,7 +160,7 @@ self.shape = shape self.offset = start self.shapelen = len(shape) - self._done = self.shapelen == 0 or product(shape) == 0 + self._done = self.shapelen == 0 or support.product(shape) == 0 self.strides = strides self.backstrides = backstrides self.size = array.size @@ -261,7 +191,7 @@ remaining_step = (self.indexes[i] + step) // self.shape[i] this_i_step = step - remaining_step * self.shape[i] self.offset += self.strides[i] * this_i_step - self.indexes[i] = self.indexes[i] + this_i_step + self.indexes[i] = self.indexes[i] + this_i_step step = remaining_step else: self._done = True @@ -275,7 +205,8 @@ def get_index(self, d): return self.indexes[d] -class AxisIterator(base.BaseArrayIterator): + +class AxisIterator(BaseArrayIterator): def __init__(self, array, shape, dim, cumulative): self.shape = shape strides = array.get_strides() diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -1,4 +1,3 @@ - """ This file is the main run loop as well as evaluation loops for various operations. This is the place to look for all the computations that iterate over all the array elements. @@ -10,7 +9,7 @@ from rpython.rtyper.lltypesystem import lltype, rffi from pypy.module.micronumpy.base import W_NDimArray from pypy.module.micronumpy.iter import PureShapeIterator -from pypy.module.micronumpy.support import index_w +from pypy.module.micronumpy import support from pypy.module.micronumpy import constants as NPY call2_driver = jit.JitDriver(name='numpy_call2', @@ -380,14 +379,17 @@ 'index_dtype'], reds = 'auto') -def setitem_filter(space, arr, index, value, size): +def setitem_filter(space, arr, index, value): arr_iter = arr.create_iter() shapelen = len(arr.get_shape()) if shapelen > 1 and len(index.get_shape()) < 2: index_iter = index.create_iter(arr.get_shape(), backward_broadcast=True) else: index_iter = index.create_iter() - value_iter = value.create_iter([size]) + if value.get_size() == 1: + value_iter = value.create_iter(arr.get_shape()) + else: + value_iter = value.create_iter() index_dtype = index.get_dtype() arr_dtype = arr.get_dtype() while not index_iter.done(): @@ -554,7 +556,7 @@ while not arr_iter.done(): choose_driver.jit_merge_point(shapelen=shapelen, dtype=dtype, mode=mode) - index = index_w(space, arr_iter.getitem()) + index = support.index_w(space, arr_iter.getitem()) if index < 0 or index >= len(iterators): if mode == NPY.RAISE: raise OperationError(space.w_ValueError, space.wrap( @@ -657,4 +659,3 @@ out_iter.setitem(arr.getitem_index(space, indexes)) iter.next() out_iter.next() - diff --git a/pypy/module/micronumpy/arrayimpl/sort.py b/pypy/module/micronumpy/sort.py rename from pypy/module/micronumpy/arrayimpl/sort.py rename to pypy/module/micronumpy/sort.py --- a/pypy/module/micronumpy/arrayimpl/sort.py +++ b/pypy/module/micronumpy/sort.py @@ -17,6 +17,7 @@ INT_SIZE = rffi.sizeof(lltype.Signed) + def make_argsort_function(space, itemtype, comp_type, count=1): TP = itemtype.T step = rffi.sizeof(TP) @@ -167,6 +168,7 @@ return argsort + def argsort_array(arr, space, w_axis): cache = space.fromcache(ArgSortCache) # that populates ArgSortClasses itemtype = arr.dtype.itemtype @@ -179,10 +181,6 @@ "sorting of non-numeric types '%s' is not implemented", arr.dtype.get_name()) -all_types = (types.all_float_types + types.all_complex_types + - types.all_int_types) -all_types = [i for i in all_types if not '_mixin_' in i[0].__dict__] -all_types = unrolling_iterable(all_types) def make_sort_function(space, itemtype, comp_type, count=1): TP = itemtype.T @@ -307,8 +305,9 @@ return sort + def sort_array(arr, space, w_axis, w_order): - cache = space.fromcache(SortCache) # that populates SortClasses + cache = space.fromcache(SortCache) # that populates SortClasses itemtype = arr.dtype.itemtype if arr.dtype.byteorder == NPY.OPPBYTE: raise oefmt(space.w_NotImplementedError, @@ -327,6 +326,7 @@ all_types = [i for i in all_types if not issubclass(i[0], types.Float16)] all_types = unrolling_iterable(all_types) + class ArgSortCache(object): built = False @@ -341,7 +341,7 @@ else: cache[cls] = make_argsort_function(space, cls, it) self.cache = cache - self._lookup = specialize.memo()(lambda tp : cache[tp[0]]) + self._lookup = specialize.memo()(lambda tp: cache[tp[0]]) class SortCache(object): @@ -358,4 +358,4 @@ else: cache[cls] = make_sort_function(space, cls, it) self.cache = cache - self._lookup = specialize.memo()(lambda tp : cache[tp[0]]) + self._lookup = specialize.memo()(lambda tp: cache[tp[0]]) diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py --- a/pypy/module/micronumpy/strides.py +++ b/pypy/module/micronumpy/strides.py @@ -1,6 +1,104 @@ from rpython.rlib import jit -from pypy.interpreter.error import OperationError +from pypy.interpreter.error import OperationError, oefmt from pypy.module.micronumpy.base import W_NDimArray +from pypy.module.micronumpy import support +from pypy.module.micronumpy import constants as NPY + + +# structures to describe slicing + +class BaseChunk(object): + pass + + +class RecordChunk(BaseChunk): + def __init__(self, name): + self.name = name + + def apply(self, space, orig_arr): + arr = orig_arr.implementation + ofs, subdtype = arr.dtype.fields[self.name] + # ofs only changes start + # create a view of the original array by extending + # the shape, strides, backstrides of the array + from pypy.module.micronumpy.support import calc_strides + strides, backstrides = calc_strides(subdtype.shape, + subdtype.subdtype, arr.order) + final_shape = arr.shape + subdtype.shape + final_strides = arr.get_strides() + strides + final_backstrides = arr.get_backstrides() + backstrides + final_dtype = subdtype + if subdtype.subdtype: + final_dtype = subdtype.subdtype + return W_NDimArray.new_slice(space, arr.start + ofs, final_strides, + final_backstrides, + final_shape, arr, orig_arr, final_dtype) + + +class Chunks(BaseChunk): + def __init__(self, l): + self.l = l + + @jit.unroll_safe + def extend_shape(self, old_shape): + shape = [] + i = -1 + for i, c in enumerate_chunks(self.l): + if c.step != 0: + shape.append(c.lgt) + s = i + 1 + assert s >= 0 + return shape[:] + old_shape[s:] + + def apply(self, space, orig_arr): + arr = orig_arr.implementation + shape = self.extend_shape(arr.shape) + r = calculate_slice_strides(arr.shape, arr.start, arr.get_strides(), + arr.get_backstrides(), self.l) + _, start, strides, backstrides = r + return W_NDimArray.new_slice(space, start, strides[:], backstrides[:], + shape[:], arr, orig_arr) + + +class Chunk(BaseChunk): + axis_step = 1 + + def __init__(self, start, stop, step, lgt): + self.start = start + self.stop = stop + self.step = step + self.lgt = lgt + + def __repr__(self): + return 'Chunk(%d, %d, %d, %d)' % (self.start, self.stop, self.step, + self.lgt) + + +class NewAxisChunk(Chunk): + start = 0 + stop = 1 + step = 1 + lgt = 1 + axis_step = 0 + + def __init__(self): + pass + + +class BaseTransform(object): + pass + + +class ViewTransform(BaseTransform): + def __init__(self, chunks): + # 4-tuple specifying slicing + self.chunks = chunks + + +class BroadcastTransform(BaseTransform): + def __init__(self, res_shape): + self.res_shape = res_shape + @jit.look_inside_iff(lambda chunks: jit.isconstant(len(chunks))) def enumerate_chunks(chunks): @@ -11,9 +109,9 @@ result.append((i, chunk)) return result + @jit.look_inside_iff(lambda shape, start, strides, backstrides, chunks: - jit.isconstant(len(chunks)) -) + jit.isconstant(len(chunks))) def calculate_slice_strides(shape, start, strides, backstrides, chunks): size = 0 for chunk in chunks: @@ -26,12 +124,16 @@ i = -1 j = 0 for i, chunk in enumerate_chunks(chunks): + try: + s_i = strides[i] + except IndexError: + continue if chunk.step != 0: - rstrides[j] = strides[i] * chunk.step - rbackstrides[j] = strides[i] * max(0, chunk.lgt - 1) * chunk.step + rstrides[j] = s_i * chunk.step + rbackstrides[j] = s_i * max(0, chunk.lgt - 1) * chunk.step rshape[j] = chunk.lgt j += 1 - rstart += strides[i] * chunk.start + rstart += s_i * chunk.start # add a reminder s = i + 1 assert s >= 0 @@ -40,6 +142,7 @@ rshape += shape[s:] return rshape, rstart, rstrides, rbackstrides + def calculate_broadcast_strides(strides, backstrides, orig_shape, res_shape, backwards=False): rstrides = [] rbackstrides = [] @@ -58,17 +161,23 @@ rbackstrides = [0] * (len(res_shape) - len(orig_shape)) + rbackstrides return rstrides, rbackstrides + def is_single_elem(space, w_elem, is_rec_type): if (is_rec_type and space.isinstance_w(w_elem, space.w_tuple)): return True if (space.isinstance_w(w_elem, space.w_tuple) or - space.isinstance_w(w_elem, space.w_list)): + space.isinstance_w(w_elem, space.w_list)): return False if isinstance(w_elem, W_NDimArray) and not w_elem.is_scalar(): return False return True + def find_shape_and_elems(space, w_iterable, dtype): + isstr = space.isinstance_w(w_iterable, space.w_str) + if not support.issequence_w(space, w_iterable) or isstr: + if dtype is None or dtype.char != NPY.CHARLTR: + return [], [w_iterable] is_rec_type = dtype is not None and dtype.is_record() if is_rec_type and is_single_elem(space, w_iterable, is_rec_type): return [], [w_iterable] @@ -89,7 +198,7 @@ size = space.len_w(batch[0]) for w_elem in batch: if (is_single_elem(space, w_elem, is_rec_type) or - space.len_w(w_elem) != size): + space.len_w(w_elem) != size): raise OperationError(space.w_ValueError, space.wrap( "setting an array element with a sequence")) w_array = space.lookup(w_elem, '__array__') @@ -102,12 +211,13 @@ shape.append(size) batch = new_batch + def to_coords(space, shape, size, order, w_item_or_slice): '''Returns a start coord, step, and length. ''' start = lngth = step = 0 if not (space.isinstance_w(w_item_or_slice, space.w_int) or - space.isinstance_w(w_item_or_slice, space.w_slice)): + space.isinstance_w(w_item_or_slice, space.w_slice)): raise OperationError(space.w_IndexError, space.wrap('unsupported iterator index')) @@ -125,6 +235,7 @@ i //= shape[s] return coords, step, lngth + @jit.unroll_safe def shape_agreement(space, shape1, w_arr2, broadcast_down=True): if w_arr2 is None: @@ -148,6 +259,7 @@ ) return ret + @jit.unroll_safe def shape_agreement_multiple(space, array_list): """ call shape_agreement recursively, allow elements from array_list to @@ -159,6 +271,7 @@ shape = shape_agreement(space, shape, arr) return shape + def _shape_agreement(shape1, shape2): """ Checks agreement about two shapes with respect to broadcasting. Returns the resulting shape. @@ -197,6 +310,7 @@ endshape[i] = remainder[i] return endshape + def get_shape_from_iterable(space, old_size, w_iterable): new_size = 0 new_shape = [] @@ -215,8 +329,8 @@ s = space.int_w(elem) if s < 0: if neg_dim >= 0: - raise OperationError(space.w_ValueError, space.wrap( - "can only specify one unknown dimension")) + raise oefmt(space.w_ValueError, + "can only specify one unknown dimension") s = 1 neg_dim = i new_size *= s @@ -230,6 +344,7 @@ space.wrap("total size of new array must be unchanged")) return new_shape + # Recalculating strides. Find the steps that the iteration does for each # dimension, given the stride and shape. Then try to create a new stride that # fits the new shape, using those steps. If there is a shape/step mismatch diff --git a/pypy/module/micronumpy/support.py b/pypy/module/micronumpy/support.py --- a/pypy/module/micronumpy/support.py +++ b/pypy/module/micronumpy/support.py @@ -1,5 +1,13 @@ from rpython.rlib import jit -from pypy.interpreter.error import OperationError +from pypy.interpreter.error import OperationError, oefmt + + +def issequence_w(space, w_obj): + from pypy.module.micronumpy.base import W_NDimArray + return (space.isinstance_w(w_obj, space.w_tuple) or + space.isinstance_w(w_obj, space.w_list) or + isinstance(w_obj, W_NDimArray)) + def index_w(space, w_obj): try: @@ -8,8 +16,8 @@ try: return space.int_w(space.int(w_obj)) except OperationError: - raise OperationError(space.w_IndexError, space.wrap( - "cannot convert index to integer")) + raise oefmt(space.w_IndexError, "cannot convert index to integer") + @jit.unroll_safe def product(s): @@ -18,6 +26,7 @@ i *= x return i + @jit.unroll_safe def calc_strides(shape, dtype, order): strides = [] diff --git a/pypy/module/micronumpy/test/test_iter.py b/pypy/module/micronumpy/test/test_iter.py --- a/pypy/module/micronumpy/test/test_iter.py +++ b/pypy/module/micronumpy/test/test_iter.py @@ -1,5 +1,4 @@ from pypy.module.micronumpy.iter import MultiDimViewIterator -from pypy.module.micronumpy.arrayimpl.scalar import ScalarIterator class MockArray(object): @@ -92,9 +91,3 @@ assert i.indexes == [0,1] assert i.offset == 3 assert i.done() - - def test_scalar_iter(self): - i = ScalarIterator(MockArray) - i.next() - i.next_skip_x(3) - assert i.done() diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -3,7 +3,7 @@ from pypy.conftest import option from pypy.module.micronumpy.appbridge import get_appbridge_cache -from pypy.module.micronumpy.iter import Chunk, Chunks +from pypy.module.micronumpy.strides import Chunk, Chunks from pypy.module.micronumpy.interp_numarray import W_NDimArray from pypy.module.micronumpy.test.test_base import BaseNumpyAppTest @@ -657,6 +657,9 @@ def test_setslice_array(self): from numpypy import array + a = array(5) + exc = raises(ValueError, "a[:] = 4") + assert exc.value[0] == "cannot slice a 0-d array" a = array(range(5)) b = array(range(2)) a[1:4:2] = b @@ -713,9 +716,14 @@ for y in range(2): expected[x, y] = math.cos(a[x]) * math.cos(b[y]) assert ((cos(a)[:,newaxis] * cos(b).T) == expected).all() - a = array(1)[newaxis] + o = array(1) + a = o[newaxis] assert a == array([1]) assert a.shape == (1,) + o[newaxis, newaxis] = 2 + assert o == 2 + a[:] = 3 + assert o == 3 def test_newaxis_slice(self): from numpypy import array, newaxis @@ -1350,6 +1358,9 @@ def test_getslice(self): from numpypy import array + a = array(5) + exc = raises(ValueError, "a[:]") + assert exc.value[0] == "cannot slice a 0-d array" a = array(range(5)) s = a[1:5] assert len(s) == 4 @@ -1415,6 +1426,12 @@ b = a.sum(out=d) assert b == d assert b is d + c = array(1.5+2.5j) + assert c.real == 1.5 + assert c.imag == 2.5 + a.sum(out=c.imag) + assert c.real == 1.5 + assert c.imag == 5 assert list(zeros((0, 2)).sum(axis=1)) == [] @@ -2275,6 +2292,12 @@ assert (a[b] == a).all() a[b] = 1. assert (a == [[1., 1., 1.]]).all() + a[b] = np.array(2.) + assert (a == [[2., 2., 2.]]).all() + a[b] = np.array([3.]) + assert (a == [[3., 3., 3.]]).all() + a[b] = np.array([[4.]]) + assert (a == [[4., 4., 4.]]).all() def test_ellipsis_indexing(self): import numpy as np @@ -2683,6 +2706,11 @@ def test_array_interface(self): from numpypy import array + a = array(2.5) + i = a.__array_interface__ + assert isinstance(i['data'][0], int) + assert i['shape'] == () + assert i['strides'] is None a = array([1, 2, 3]) i = a.__array_interface__ assert isinstance(i['data'][0], int) @@ -3194,6 +3222,8 @@ assert str(array([1, 2, 3])) == '[1 2 3]' assert str(array(['abc'], 'S3')) == "['abc']" assert str(array('abc')) == 'abc' + assert str(array(1.5)) == '1.5' + assert str(array(1.5).real) == '1.5' class AppTestRepr(BaseNumpyAppTest): @@ -3211,6 +3241,8 @@ from numpypy import array assert repr(array([1, 2, 3])) == 'array([1, 2, 3])' assert repr(array(['abc'], 'S3')) == "array(['abc'])" + assert repr(array(1.5)) == "array(1.5)" + assert repr(array(1.5).real) == "array(1.5)" def teardown_class(cls): if option.runappdirect: diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -4,15 +4,15 @@ from pypy.interpreter.error import OperationError, oefmt from pypy.module.micronumpy import interp_boxes from pypy.module.micronumpy import support -from pypy.module.micronumpy.arrayimpl.voidbox import VoidBoxStorage -from pypy.module.micronumpy.arrayimpl.concrete import SliceArray +from pypy.module.micronumpy.concrete import SliceArray, VoidBoxStorage from pypy.objspace.std.floatobject import float2string from pypy.objspace.std.complexobject import str_format from rpython.rlib import rfloat, clibffi, rcomplex from rpython.rlib.rawstorage import (alloc_raw_storage, raw_storage_getitem_unaligned, raw_storage_setitem_unaligned) from rpython.rlib.objectmodel import specialize -from rpython.rlib.rarithmetic import widen, byteswap, r_ulonglong, most_neg_value_of, LONG_BIT +from rpython.rlib.rarithmetic import widen, byteswap, r_ulonglong, \ + most_neg_value_of, LONG_BIT from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rlib.rstruct.runpack import runpack from rpython.rlib.rstruct.nativefmttable import native_is_bigendian From noreply at buildbot.pypy.org Thu Feb 27 02:02:16 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 27 Feb 2014 02:02:16 +0100 (CET) Subject: [pypy-commit] pypy numpypy-remove-scalar: close abandoned branch Message-ID: <20140227010216.7F46D1C3973@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: numpypy-remove-scalar Changeset: r69487:651f13647de9 Date: 2014-02-26 20:01 -0500 http://bitbucket.org/pypy/pypy/changeset/651f13647de9/ Log: close abandoned branch From noreply at buildbot.pypy.org Thu Feb 27 02:13:25 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 27 Feb 2014 02:13:25 +0100 (CET) Subject: [pypy-commit] pypy default: fixes for cpyext ndarrayobject Message-ID: <20140227011325.092071C03D5@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69488:28a4bfdc3627 Date: 2014-02-26 20:12 -0500 http://bitbucket.org/pypy/pypy/changeset/28a4bfdc3627/ Log: fixes for cpyext ndarrayobject diff --git a/pypy/module/cpyext/ndarrayobject.py b/pypy/module/cpyext/ndarrayobject.py --- a/pypy/module/cpyext/ndarrayobject.py +++ b/pypy/module/cpyext/ndarrayobject.py @@ -9,8 +9,7 @@ from pypy.module.cpyext.api import PyObject from pypy.module.micronumpy.interp_numarray import W_NDimArray, array from pypy.module.micronumpy.interp_dtype import get_dtype_cache, W_Dtype -from pypy.module.micronumpy.arrayimpl.concrete import ConcreteArray -from pypy.module.micronumpy.arrayimpl.scalar import Scalar +from pypy.module.micronumpy.concrete import ConcreteArray from rpython.rlib.rawstorage import RAW_STORAGE_PTR NPY_C_CONTIGUOUS = 0x0001 @@ -167,7 +166,7 @@ # void *data = PyArray_DATA(arr); impl = w_array.implementation w_array = W_NDimArray.from_shape(space, [1], impl.dtype) - w_array.implementation.setitem(0, impl.value) + w_array.implementation.setitem(0, impl.getitem(impl.start + 0)) w_array.implementation.shape = [] return w_array @@ -214,12 +213,8 @@ order='C', owning=False, w_subtype=None): shape, dtype = get_shape_and_dtype(space, nd, dims, typenum) storage = rffi.cast(RAW_STORAGE_PTR, data) - if nd == 0: - w_val = dtype.itemtype.box_raw_data(storage) - return W_NDimArray(Scalar(dtype, w_val)) - else: - return W_NDimArray.from_shape_and_storage(space, shape, storage, dtype, - order=order, owning=owning, w_subtype=w_subtype) + return W_NDimArray.from_shape_and_storage(space, shape, storage, dtype, + order=order, owning=owning, w_subtype=w_subtype) @cpython_api([Py_ssize_t, rffi.LONGP, Py_ssize_t], PyObject) diff --git a/pypy/module/cpyext/test/test_ndarrayobject.py b/pypy/module/cpyext/test/test_ndarrayobject.py --- a/pypy/module/cpyext/test/test_ndarrayobject.py +++ b/pypy/module/cpyext/test/test_ndarrayobject.py @@ -77,7 +77,7 @@ def test_FromAny_scalar(self, space, api): a0 = scalar(space) - assert a0.implementation.get_scalar_value().value == 10. + assert a0.get_scalar_value().value == 10. a = api._PyArray_FromAny(a0, NULL, 0, 0, 0, NULL) assert api._PyArray_NDIM(a) == 0 diff --git a/pypy/module/micronumpy/base.py b/pypy/module/micronumpy/base.py --- a/pypy/module/micronumpy/base.py +++ b/pypy/module/micronumpy/base.py @@ -31,7 +31,6 @@ @staticmethod def from_shape(space, shape, dtype, order='C', w_instance=None): from pypy.module.micronumpy import concrete - strides, backstrides = calc_strides(shape, dtype.base, order) impl = concrete.ConcreteArray(shape, dtype.base, order, strides, backstrides) @@ -43,7 +42,6 @@ def from_shape_and_storage(space, shape, storage, dtype, order='C', owning=False, w_subtype=None, w_base=None, writable=True): from pypy.module.micronumpy import concrete - assert shape strides, backstrides = calc_strides(shape, dtype, order) if w_base is not None: if owning: @@ -56,7 +54,6 @@ impl = concrete.ConcreteNonWritableArrayWithBase(shape, dtype, order, strides, backstrides, storage, w_base) - elif owning: # Will free storage when GCd impl = concrete.ConcreteArray(shape, dtype, order, strides, From noreply at buildbot.pypy.org Thu Feb 27 03:00:18 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Thu, 27 Feb 2014 03:00:18 +0100 (CET) Subject: [pypy-commit] pypy kill-multimethod: hg merge remove-remaining-smm Message-ID: <20140227020018.5D4771C03FC@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: kill-multimethod Changeset: r69489:99fb1bea6848 Date: 2014-02-27 02:59 +0100 http://bitbucket.org/pypy/pypy/changeset/99fb1bea6848/ Log: hg merge remove-remaining-smm diff --git a/include/PyPy.h b/include/PyPy.h --- a/include/PyPy.h +++ b/include/PyPy.h @@ -8,9 +8,14 @@ extern "C" { #endif +/* You should call this first once. */ +#define pypy_init(need_threads) do { pypy_asm_stack_bottom(); \ +rpython_startup_code();\ + if (need_threads) pypy_init_threads(); } while (0) -/* You should call this first once. */ +// deprecated interface void rpython_startup_code(void); +void pypy_init_threads(void); /* Initialize the home directory of PyPy. It is necessary to call this. @@ -26,11 +31,10 @@ /* If your program has multiple threads, then you need to call - pypy_init_threads() once at init time, and then pypy_thread_attach() - once in each other thread that just started and in which you want to - run Python code (including via callbacks, see below). + pypy_thread_attach() once in each other thread that just started + and in which you want to run Python code (including via callbacks, + see below). DO NOT CALL IT IN THE MAIN THREAD */ -void pypy_init_threads(void); void pypy_thread_attach(void); diff --git a/pypy/doc/embedding.rst b/pypy/doc/embedding.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/embedding.rst @@ -0,0 +1,101 @@ + +PyPy has a very minimal and a very strange embedding interface, based on +the usage of `cffi`_ and the philosophy that Python is a better language in C. +It was developed in collaboration with Roberto De Ioris from the `uwsgi`_ +project. The `PyPy uwsgi plugin`_ is a good example of usage of such interface. + +The first thing that you need, that we plan to change in the future, is to +compile PyPy yourself with an option ``--shared``. Consult the +`how to compile PyPy`_ doc for details. That should result in ``libpypy.so`` +or ``pypy.dll`` file or something similar, depending on your platform. Consult +your platform specification for details. + +The resulting shared library has very few functions that are however enough +to make a full API working, provided you'll follow a few principles. The API +is: + +.. function:: void pypy_init(int need_threads); + + This is a function that you have to call (once) before calling anything. + It initializes the RPython/PyPy GC and does a bunch of necessary startup + code. This function cannot fail. Pass 1 in case you need thread support, 0 + otherwise. + +.. function:: long pypy_setup_home(char* home, int verbose); + + This is another function that you have to call at some point, without + it you would not be able to find the standard library (and run pretty much + nothing). Arguments: + + * ``home``: null terminated path + + * ``verbose``: if non-zero, would print error messages to stderr + + Function returns 0 on success or 1 on failure, can be called multiple times + until the library is found. + +.. function:: int pypy_execute_source(char* source); + + Execute the source code given in the ``source`` argument. Will print + the error message to stderr upon failure and return 1, otherwise returns 0. + You should really do your own error handling in the source. It'll acquire + the GIL. + +.. function:: void pypy_thread_attach(void); + + In case your application uses threads that are initialized outside of PyPy, + you need to call this function to tell the PyPy GC to track this thread. + Note that this function is not thread-safe itself, so you need to guard it + with a mutex. Do not call it from the main thread. + +Simple example +-------------- + +Note that this API is a lot more minimal than say CPython C API, so at first +it's obvious to think that you can't do much. However, the trick is to do +all the logic in Python and expose it via `cffi`_ callbacks. Let's assume +we're on linux and pypy is put in ``/opt/pypy`` (a source checkout) and +library is in ``/opt/pypy/libpypy-c.so``. We write a little C program +(for simplicity assuming that all operations will be performed:: + + #include "include/PyPy.h" + #include + + const char source[] = "print 'hello from pypy'"; + + int main() + { + int res; + + rpython_startup_code(); + res = pypy_execute_source((char*)source); + if (res) { + printf("Error calling pypy_execute_source!\n"); + } + return res; + } + +If we save it as ``x.c`` now, compile it and run it with:: + + fijal at hermann:/opt/pypy$ gcc -o x x.c -lpypy-c -L. + fijal at hermann:~/src/pypy$ LD_LIBRARY_PATH=. ./x + hello from pypy + +Worked! + +More advanced example +--------------------- + +Typically we need something more to do than simply execute source. The following +is a fully fledged example, please consult cffi documentation for details. + +xxx + +Threading +--------- + +XXXX I don't understand what's going on, discuss with unbit + +.. _`cffi`: http://cffi.readthedocs.org/ +.. _`uwsgi`: http://uwsgi-docs.readthedocs.org/en/latest/ +.. _`PyPy uwsgi plugin`: http://uwsgi-docs.readthedocs.org/en/latest/PyPy.html diff --git a/pypy/doc/getting-started.rst b/pypy/doc/getting-started.rst --- a/pypy/doc/getting-started.rst +++ b/pypy/doc/getting-started.rst @@ -145,11 +145,13 @@ After you successfully manage to get PyPy's source you can read more about: - `Building and using PyPy's Python interpreter`_ + - `Embedding PyPy`_ - `Learning more about the RPython toolchain and how to develop (with) PyPy`_ - `Tutorial for how to write an interpreter with the RPython toolchain and make it fast`_ - `Look at our benchmark results`_ .. _`Building and using PyPy's Python interpreter`: getting-started-python.html +.. _`Embedding PyPy`: embedding.html .. _`Learning more about the RPython toolchain and how to develop (with) PyPy`: getting-started-dev.html .. _`Tutorial for how to write an interpreter with the RPython toolchain and make it fast`: http://morepypy.blogspot.com/2011/04/tutorial-writing-interpreter-with-pypy.html .. _`Look at our benchmark results`: http://speed.pypy.org diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -82,6 +82,7 @@ from rpython.rlib.entrypoint import entrypoint from rpython.rtyper.lltypesystem import rffi, lltype + from rpython.rtyper.lltypesystem.lloperation import llop w_pathsetter = space.appexec([], """(): def f(path): @@ -93,6 +94,7 @@ @entrypoint('main', [rffi.CCHARP, rffi.INT], c_name='pypy_setup_home') def pypy_setup_home(ll_home, verbose): from pypy.module.sys.initpath import pypy_find_stdlib + llop.gc_stack_bottom(lltype.Void) verbose = rffi.cast(lltype.Signed, verbose) if ll_home: home = rffi.charp2str(ll_home) @@ -120,8 +122,11 @@ @entrypoint('main', [rffi.CCHARP], c_name='pypy_execute_source') def pypy_execute_source(ll_source): + rffi.aroundstate.after() + llop.gc_stack_bottom(lltype.Void) source = rffi.charp2str(ll_source) res = _pypy_execute_source(source) + rffi.aroundstate.before() return rffi.cast(rffi.INT, res) @entrypoint('main', [], c_name='pypy_init_threads') diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py --- a/pypy/interpreter/gateway.py +++ b/pypy/interpreter/gateway.py @@ -592,6 +592,17 @@ elif unwrap_spec == [ObjSpace, W_Root, Arguments]: self.__class__ = BuiltinCodePassThroughArguments1 self.func__args__ = func + elif unwrap_spec == [self_type, ObjSpace, Arguments]: + self.__class__ = BuiltinCodePassThroughArguments1 + miniglobals = {'func': func, 'self_type': self_type} + d = {} + source = """if 1: + def _call(space, w_obj, args): + self = space.descr_self_interp_w(self_type, w_obj) + return func(self, space, args) + \n""" + exec compile2(source) in miniglobals, d + self.func__args__ = d['_call'] else: self.__class__ = globals()['BuiltinCode%d' % arity] setattr(self, 'fastfunc_%d' % arity, fastfunc) diff --git a/pypy/interpreter/test/test_gateway.py b/pypy/interpreter/test/test_gateway.py --- a/pypy/interpreter/test/test_gateway.py +++ b/pypy/interpreter/test/test_gateway.py @@ -824,6 +824,28 @@ assert len(called) == 1 assert isinstance(called[0], argument.Arguments) + def test_pass_trough_arguments_method(self): + space = self.space + + called = [] + + class W_Something(W_Root): + def f(self, space, __args__): + called.append(__args__) + a_w, _ = __args__.unpack() + return space.newtuple([space.wrap('f')]+a_w) + + w_f = space.wrap(gateway.interp2app_temp(W_Something.f)) + + w_self = space.wrap(W_Something()) + args = argument.Arguments(space, [space.wrap(7)]) + + w_res = space.call_obj_args(w_f, w_self, args) + assert space.is_true(space.eq(w_res, space.wrap(('f', 7)))) + + # white-box check for opt + assert called[0] is args + class AppTestKeywordsToBuiltinSanity(object): def test_type(self): diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -499,6 +499,16 @@ def rewrite_op_hint(self, op): hints = op.args[1].value + + # hack: if there are both 'promote' and 'promote_string', kill + # one of them based on the type of the value + if hints.get('promote_string') and hints.get('promote'): + hints = hints.copy() + if op.args[0].concretetype == lltype.Ptr(rstr.STR): + del hints['promote'] + else: + del hints['promote_string'] + if hints.get('promote') and op.args[0].concretetype is not lltype.Void: assert op.args[0].concretetype != lltype.Ptr(rstr.STR) kind = getkind(op.args[0].concretetype) diff --git a/rpython/jit/codewriter/test/test_jtransform.py b/rpython/jit/codewriter/test/test_jtransform.py --- a/rpython/jit/codewriter/test/test_jtransform.py +++ b/rpython/jit/codewriter/test/test_jtransform.py @@ -1050,6 +1050,37 @@ assert op1.result == v2 assert op0.opname == '-live-' +def test_double_promote_str(): + PSTR = lltype.Ptr(rstr.STR) + v1 = varoftype(PSTR) + v2 = varoftype(PSTR) + tr = Transformer(FakeCPU(), FakeBuiltinCallControl()) + op1 = SpaceOperation('hint', + [v1, Constant({'promote_string': True}, lltype.Void)], + v2) + op2 = SpaceOperation('hint', + [v1, Constant({'promote_string': True, + 'promote': True}, lltype.Void)], + v2) + lst1 = tr.rewrite_operation(op1) + lst2 = tr.rewrite_operation(op2) + assert lst1 == lst2 + +def test_double_promote_nonstr(): + v1 = varoftype(lltype.Signed) + v2 = varoftype(lltype.Signed) + tr = Transformer(FakeCPU(), FakeBuiltinCallControl()) + op1 = SpaceOperation('hint', + [v1, Constant({'promote': True}, lltype.Void)], + v2) + op2 = SpaceOperation('hint', + [v1, Constant({'promote_string': True, + 'promote': True}, lltype.Void)], + v2) + lst1 = tr.rewrite_operation(op1) + lst2 = tr.rewrite_operation(op2) + assert lst1 == lst2 + def test_unicode_concat(): # test that the oopspec is present and correctly transformed PSTR = lltype.Ptr(rstr.UNICODE) diff --git a/rpython/jit/metainterp/test/test_list.py b/rpython/jit/metainterp/test/test_list.py --- a/rpython/jit/metainterp/test/test_list.py +++ b/rpython/jit/metainterp/test/test_list.py @@ -98,8 +98,8 @@ self.check_resops(setarrayitem_gc=0, call=0, getarrayitem_gc=0) def test_vlist_alloc_and_set(self): - # the check_loops fails, because [non-null] * n is not supported yet - # (it is implemented as a residual call) + # the check_loops fails, because [non-null] * n is only supported + # if n < 15 (otherwise it is implemented as a residual call) jitdriver = JitDriver(greens = [], reds = ['n']) def f(n): l = [1] * 20 @@ -116,7 +116,7 @@ res = self.meta_interp(f, [10], listops=True) assert res == f(10) - py.test.skip("'[non-null] * n' gives a residual call so far") + py.test.skip("'[non-null] * n' for n >= 15 gives a residual call so far") self.check_loops(setarrayitem_gc=0, getarrayitem_gc=0, call=0) def test_arraycopy_simpleoptimize(self): @@ -287,6 +287,74 @@ assert res == 5 self.check_resops(call=0) + def test_list_mul_virtual(self): + class Foo: + def __init__(self, l): + self.l = l + l[0] = self + + myjitdriver = JitDriver(greens = [], reds = ['y']) + def f(y): + while y > 0: + myjitdriver.jit_merge_point(y=y) + Foo([None] * 5) + y -= 1 + return 42 + + self.meta_interp(f, [5]) + self.check_resops({'int_sub': 2, + 'int_gt': 2, + 'guard_true': 2, + 'jump': 1}) + + def test_list_mul_virtual_nonzero(self): + class base: + pass + class Foo(base): + def __init__(self, l): + self.l = l + l[0] = self + class nil(base): + pass + + nil = nil() + + myjitdriver = JitDriver(greens = [], reds = ['y']) + def f(y): + while y > 0: + myjitdriver.jit_merge_point(y=y) + Foo([nil] * 5) + y -= 1 + return 42 + + self.meta_interp(f, [5]) + self.check_resops({'int_sub': 2, + 'int_gt': 2, + 'guard_true': 2, + 'jump': 1}) + + def test_list_mul_unsigned_virtual(self): + from rpython.rlib.rarithmetic import r_uint + + class Foo: + def __init__(self, l): + self.l = l + l[0] = self + + myjitdriver = JitDriver(greens = [], reds = ['y']) + def f(y): + while y > 0: + myjitdriver.jit_merge_point(y=y) + Foo([None] * r_uint(5)) + y -= 1 + return 42 + + self.meta_interp(f, [5]) + self.check_resops({'int_sub': 2, + 'int_gt': 2, + 'guard_true': 2, + 'jump': 1}) + class TestLLtype(ListTests, LLJitMixin): def test_listops_dont_invalidate_caches(self): diff --git a/rpython/rlib/jit.py b/rpython/rlib/jit.py --- a/rpython/rlib/jit.py +++ b/rpython/rlib/jit.py @@ -130,7 +130,9 @@ if promote_args != 'all': args = [args[int(i)] for i in promote_args.split(",")] for arg in args: - code.append(" %s = hint(%s, promote=True)\n" % (arg, arg)) + code.append( #use both hints, and let jtransform pick the right one + " %s = hint(%s, promote=True, promote_string=True)\n" % + (arg, arg)) code.append(" return _orig_func_unlikely_name(%s)\n" % (argstring, )) d = {"_orig_func_unlikely_name": func, "hint": hint} exec py.code.Source("\n".join(code)).compile() in d diff --git a/rpython/rtyper/test/test_generator.py b/rpython/rtyper/test/test_generator.py --- a/rpython/rtyper/test/test_generator.py +++ b/rpython/rtyper/test/test_generator.py @@ -88,3 +88,16 @@ return s res = self.interpret(g, []) assert res == 6 + + def test_send(self): + def f(): + yield (yield 1) + 1 + def g(): + gen = f() + res = f.send(2) + assert res == 1 + res = f.next() + assert res == 3 + + res = self.interpret(g, []) + diff --git a/rpython/rtyper/test/test_rlist.py b/rpython/rtyper/test/test_rlist.py --- a/rpython/rtyper/test/test_rlist.py +++ b/rpython/rtyper/test/test_rlist.py @@ -1619,3 +1619,17 @@ rgc.ll_arraycopy = old_arraycopy # assert 2 <= res <= 10 + + def test_alloc_and_set(self): + def fn(i): + lst = [0] * r_uint(i) + return lst + t, rtyper, graph = self.gengraph(fn, [int]) + block = graph.startblock + seen = 0 + for op in block.operations: + if op.opname in ['cast_int_to_uint', 'cast_uint_to_int']: + continue + assert op.opname == 'direct_call' + seen += 1 + assert seen == 1 diff --git a/rpython/translator/transform.py b/rpython/translator/transform.py --- a/rpython/translator/transform.py +++ b/rpython/translator/transform.py @@ -30,7 +30,7 @@ # [a] * b # --> # c = newlist(a) -# d = mul(c, int b) +# d = mul(c, b) # --> # d = alloc_and_set(b, a) @@ -44,8 +44,7 @@ len(op.args) == 1): length1_lists[op.result] = op.args[0] elif (op.opname == 'mul' and - op.args[0] in length1_lists and - self.gettype(op.args[1]) is int): + op.args[0] in length1_lists): new_op = SpaceOperation('alloc_and_set', (op.args[1], length1_lists[op.args[0]]), op.result) From noreply at buildbot.pypy.org Thu Feb 27 03:36:01 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Thu, 27 Feb 2014 03:36:01 +0100 (CET) Subject: [pypy-commit] pypy kill-multimethod: (pjenvey) Use space.isinstance_w instead of isinstance here to fix array ztranslation. Message-ID: <20140227023601.0DAAA1C3369@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: kill-multimethod Changeset: r69490:211aa0409896 Date: 2014-02-27 03:35 +0100 http://bitbucket.org/pypy/pypy/changeset/211aa0409896/ Log: (pjenvey) Use space.isinstance_w instead of isinstance here to fix array ztranslation. diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -16,7 +16,6 @@ from pypy.interpreter.typedef import ( GetSetProperty, TypeDef, make_weakref_descr) from pypy.module._file.interp_file import W_File -from pypy.objspace.std.floatobject import W_FloatObject @unwrap_spec(typecode=str) @@ -626,7 +625,7 @@ try: item = unwrap(w_item) except OperationError, e: - if isinstance(w_item, W_FloatObject): + if space.isinstance_w(w_item, space.w_float): # Odd special case from cpython raise if mytype.method != '' and e.match(space, space.w_TypeError): From noreply at buildbot.pypy.org Thu Feb 27 07:11:24 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 27 Feb 2014 07:11:24 +0100 (CET) Subject: [pypy-commit] pypy numpy-refactor: clean up reduce axis arg errors Message-ID: <20140227061124.CB4481C03D5@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: numpy-refactor Changeset: r69494:6a347d1b5fb7 Date: 2014-02-27 00:45 -0500 http://bitbucket.org/pypy/pypy/changeset/6a347d1b5fb7/ Log: clean up reduce axis arg errors diff --git a/pypy/module/micronumpy/interp_support.py b/pypy/module/micronumpy/interp_support.py --- a/pypy/module/micronumpy/interp_support.py +++ b/pypy/module/micronumpy/interp_support.py @@ -3,7 +3,6 @@ from rpython.rtyper.lltypesystem import lltype, rffi from pypy.module.micronumpy import descriptor, loop from rpython.rlib.rstring import strip_spaces -from rpython.rlib.rarithmetic import maxint from pypy.module.micronumpy.base import W_NDimArray FLOAT_SIZE = rffi.sizeof(lltype.Float) @@ -85,16 +84,3 @@ return _fromstring_bin(space, s, count, length, dtype) else: return _fromstring_text(space, s, count, sep, length, dtype) - -def unwrap_axis_arg(space, shapelen, w_axis): - if space.is_none(w_axis): - axis = maxint - else: - axis = space.int_w(w_axis) - if axis < -shapelen or axis >= shapelen: - raise oefmt(space.w_ValueError, - "axis entry %d is out of bounds [%d, %d)", - axis, -shapelen, shapelen) - if axis < 0: - axis += shapelen - return axis diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -763,8 +763,17 @@ assert add.reduce(1) == 1 assert list(maximum.reduce(zeros((2, 0)), axis=0)) == [] - raises(ValueError, maximum.reduce, zeros((2, 0)), axis=None) - raises(ValueError, maximum.reduce, zeros((2, 0)), axis=1) + exc = raises(ValueError, maximum.reduce, zeros((2, 0)), axis=None) + assert exc.value[0] == ('zero-size array to reduction operation ' + 'maximum which has no identity') + exc = raises(ValueError, maximum.reduce, zeros((2, 0)), axis=1) + assert exc.value[0] == ('zero-size array to reduction operation ' + 'maximum which has no identity') + + a = zeros((2, 2)) + 1 + assert (add.reduce(a, axis=1) == [2, 2]).all() + exc = raises(ValueError, add.reduce, a, axis=2) + assert exc.value[0] == "'axis' entry is out of bounds" def test_reduce_1d(self): from numpypy import array, add, maximum, less, float16, complex64 diff --git a/pypy/module/micronumpy/ufuncs.py b/pypy/module/micronumpy/ufuncs.py --- a/pypy/module/micronumpy/ufuncs.py +++ b/pypy/module/micronumpy/ufuncs.py @@ -4,9 +4,8 @@ from pypy.interpreter.typedef import TypeDef, GetSetProperty, interp_attrproperty from pypy.module.micronumpy import boxes, descriptor, loop from rpython.rlib import jit -from rpython.rlib.rarithmetic import LONG_BIT +from rpython.rlib.rarithmetic import LONG_BIT, maxint from rpython.tool.sourcetools import func_with_new_name -from pypy.module.micronumpy.interp_support import unwrap_axis_arg from pypy.module.micronumpy.strides import shape_agreement from pypy.module.micronumpy.base import convert_to_array, W_NDimArray from pypy.module.micronumpy import constants as NPY @@ -175,7 +174,14 @@ if obj.is_scalar(): return obj.get_scalar_value() shapelen = len(obj_shape) - axis = unwrap_axis_arg(space, shapelen, w_axis) + if space.is_none(w_axis): + axis = maxint + else: + axis = space.int_w(w_axis) + if axis < -shapelen or axis >= shapelen: + raise oefmt(space.w_ValueError, "'axis' entry is out of bounds") + if axis < 0: + axis += shapelen assert axis >= 0 dtype = descriptor.decode_w_dtype(space, dtype) if dtype is None: @@ -192,8 +198,9 @@ for i in range(shapelen): if space.is_none(w_axis) or i == axis: if obj_shape[i] == 0: - raise oefmt(space.w_ValueError, "zero-size array to " - "%s.reduce without identity", self.name) + raise oefmt(space.w_ValueError, + "zero-size array to reduction operation %s " + "which has no identity", self.name) if shapelen > 1 and axis < shapelen: temp = None if cumulative: From noreply at buildbot.pypy.org Thu Feb 27 07:11:25 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 27 Feb 2014 07:11:25 +0100 (CET) Subject: [pypy-commit] pypy numpy-refactor: consolidate array creation funcs in ctors.py Message-ID: <20140227061125.E3BBC1C03D5@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: numpy-refactor Changeset: r69495:9fa7ed888869 Date: 2014-02-27 01:01 -0500 http://bitbucket.org/pypy/pypy/changeset/9fa7ed888869/ Log: consolidate array creation funcs in ctors.py diff --git a/pypy/module/cpyext/ndarrayobject.py b/pypy/module/cpyext/ndarrayobject.py --- a/pypy/module/cpyext/ndarrayobject.py +++ b/pypy/module/cpyext/ndarrayobject.py @@ -7,7 +7,8 @@ from rpython.rtyper.lltypesystem import rffi, lltype from pypy.module.cpyext.api import cpython_api, Py_ssize_t, CANNOT_FAIL from pypy.module.cpyext.api import PyObject -from pypy.module.micronumpy.ndarray import W_NDimArray, array +from pypy.module.micronumpy.ndarray import W_NDimArray +from pypy.module.micronumpy.ctors import array from pypy.module.micronumpy.descriptor import get_dtype_cache, W_Dtype from pypy.module.micronumpy.concrete import ConcreteArray from rpython.rlib.rawstorage import RAW_STORAGE_PTR diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -7,14 +7,14 @@ 'ndarray': 'ndarray.W_NDimArray', 'dtype': 'descriptor.W_Dtype', - 'array': 'ndarray.array', - 'zeros': 'ndarray.zeros', - 'empty': 'ndarray.zeros', - 'empty_like': 'ndarray.empty_like', + 'array': 'ctors.array', + 'zeros': 'ctors.zeros', + 'empty': 'ctors.zeros', + 'empty_like': 'ctors.empty_like', '_reconstruct' : 'ndarray._reconstruct', - 'scalar' : 'ndarray.build_scalar', + 'scalar' : 'ctors.build_scalar', 'dot': 'arrayops.dot', - 'fromstring': 'interp_support.fromstring', + 'fromstring': 'ctors.fromstring', 'flatiter': 'flatiter.W_FlatIterator', 'concatenate': 'arrayops.concatenate', 'where': 'arrayops.where', diff --git a/pypy/module/micronumpy/base.py b/pypy/module/micronumpy/base.py --- a/pypy/module/micronumpy/base.py +++ b/pypy/module/micronumpy/base.py @@ -86,7 +86,7 @@ def convert_to_array(space, w_obj): - from pypy.module.micronumpy.ndarray import array + from pypy.module.micronumpy.ctors import array if isinstance(w_obj, W_NDimArray): return w_obj return array(space, w_obj) diff --git a/pypy/module/micronumpy/boxes.py b/pypy/module/micronumpy/boxes.py --- a/pypy/module/micronumpy/boxes.py +++ b/pypy/module/micronumpy/boxes.py @@ -40,7 +40,7 @@ return get_dtype_cache(space).dtypes_by_num[num] def descr__new__(space, w_subtype, w_value=None): - from pypy.module.micronumpy.ndarray import array + from pypy.module.micronumpy.ctors import array dtype = _get_dtype(space) if not space.is_none(w_value): w_arr = array(space, w_value, dtype, copy=False) diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -10,7 +10,7 @@ from pypy.module.micronumpy import boxes from pypy.module.micronumpy.descriptor import get_dtype_cache from pypy.module.micronumpy.base import W_NDimArray -from pypy.module.micronumpy.ndarray import array +from pypy.module.micronumpy.ctors import array from pypy.module.micronumpy.arrayops import where from pypy.module.micronumpy import ufuncs from rpython.rlib.objectmodel import specialize, instantiate diff --git a/pypy/module/micronumpy/converters.py b/pypy/module/micronumpy/converters.py --- a/pypy/module/micronumpy/converters.py +++ b/pypy/module/micronumpy/converters.py @@ -84,3 +84,15 @@ "duplicate value in 'axis'")) out[axis] = True return out + + +def shape_converter(space, w_size, dtype): + if space.is_none(w_size): + return [] + if space.isinstance_w(w_size, space.w_int): + return [space.int_w(w_size)] + shape = [] + for w_item in space.fixedview(w_size): + shape.append(space.int_w(w_item)) + shape += dtype.shape + return shape[:] diff --git a/pypy/module/micronumpy/interp_support.py b/pypy/module/micronumpy/ctors.py rename from pypy/module/micronumpy/interp_support.py rename to pypy/module/micronumpy/ctors.py --- a/pypy/module/micronumpy/interp_support.py +++ b/pypy/module/micronumpy/ctors.py @@ -3,9 +3,117 @@ from rpython.rtyper.lltypesystem import lltype, rffi from pypy.module.micronumpy import descriptor, loop from rpython.rlib.rstring import strip_spaces -from pypy.module.micronumpy.base import W_NDimArray +from pypy.module.micronumpy import ufuncs +from pypy.module.micronumpy.base import W_NDimArray, convert_to_array +from pypy.module.micronumpy.converters import shape_converter +from pypy.module.micronumpy.strides import find_shape_and_elems -FLOAT_SIZE = rffi.sizeof(lltype.Float) + +def build_scalar(space, w_dtype, w_state): + from rpython.rtyper.lltypesystem import rffi, lltype + if not isinstance(w_dtype, descriptor.W_Dtype): + raise oefmt(space.w_TypeError, + "argument 1 must be numpy.dtype, not %T", w_dtype) + if w_dtype.elsize == 0: + raise oefmt(space.w_ValueError, "itemsize cannot be zero") + if not space.isinstance_w(w_state, space.w_str): + raise oefmt(space.w_TypeError, "initializing object must be a string") + if space.len_w(w_state) != w_dtype.elsize: + raise oefmt(space.w_ValueError, "initialization string is too small") + state = rffi.str2charp(space.str_w(w_state)) + box = w_dtype.itemtype.box_raw_data(state) + lltype.free(state, flavor="raw") + return box + + + at unwrap_spec(ndmin=int, copy=bool, subok=bool) +def array(space, w_object, w_dtype=None, copy=True, w_order=None, subok=False, + ndmin=0): + # for anything that isn't already an array, try __array__ method first + if not isinstance(w_object, W_NDimArray): + w___array__ = space.lookup(w_object, "__array__") + if w___array__ is not None: + if space.is_none(w_dtype): + w_dtype = space.w_None + w_array = space.get_and_call_function(w___array__, w_object, w_dtype) + if isinstance(w_array, W_NDimArray): + # feed w_array back into array() for other properties + return array(space, w_array, w_dtype, False, w_order, subok, ndmin) + else: + raise oefmt(space.w_ValueError, + "object __array__ method not producing an array") + + dtype = descriptor.decode_w_dtype(space, w_dtype) + + if space.is_none(w_order): + order = 'C' + else: + order = space.str_w(w_order) + if order != 'C': # or order != 'F': + raise oefmt(space.w_ValueError, "Unknown order: %s", order) + + # arrays with correct dtype + if isinstance(w_object, W_NDimArray) and \ + (space.is_none(w_dtype) or w_object.get_dtype() is dtype): + shape = w_object.get_shape() + if copy: + w_ret = w_object.descr_copy(space) + else: + if ndmin <= len(shape): + return w_object + new_impl = w_object.implementation.set_shape(space, w_object, shape) + w_ret = W_NDimArray(new_impl) + if ndmin > len(shape): + shape = [1] * (ndmin - len(shape)) + shape + w_ret.implementation = w_ret.implementation.set_shape(space, + w_ret, shape) + return w_ret + + # not an array or incorrect dtype + shape, elems_w = find_shape_and_elems(space, w_object, dtype) + if dtype is None or (dtype.is_str_or_unicode() and dtype.elsize < 1): + for w_elem in elems_w: + if isinstance(w_elem, W_NDimArray) and w_elem.is_scalar(): + w_elem = w_elem.get_scalar_value() + dtype = ufuncs.find_dtype_for_scalar(space, w_elem, dtype) + if dtype is None: + dtype = descriptor.get_dtype_cache(space).w_float64dtype + elif dtype.is_str_or_unicode() and dtype.elsize < 1: + # promote S0 -> S1, U0 -> U1 + dtype = descriptor.variable_dtype(space, dtype.char + '1') + + if ndmin > len(shape): + shape = [1] * (ndmin - len(shape)) + shape + w_arr = W_NDimArray.from_shape(space, shape, dtype, order=order) + arr_iter = w_arr.create_iter() + for w_elem in elems_w: + arr_iter.setitem(dtype.coerce(space, w_elem)) + arr_iter.next() + return w_arr + + +def zeros(space, w_shape, w_dtype=None, w_order=None): + dtype = space.interp_w(descriptor.W_Dtype, + space.call_function(space.gettypefor(descriptor.W_Dtype), w_dtype)) + if dtype.is_str_or_unicode() and dtype.elsize < 1: + dtype = descriptor.variable_dtype(space, dtype.char + '1') + shape = shape_converter(space, w_shape, dtype) + return W_NDimArray.from_shape(space, shape, dtype=dtype) + + + at unwrap_spec(subok=bool) +def empty_like(space, w_a, w_dtype=None, w_order=None, subok=True): + w_a = convert_to_array(space, w_a) + if w_dtype is None: + dtype = w_a.get_dtype() + else: + dtype = space.interp_w(descriptor.W_Dtype, + space.call_function(space.gettypefor(descriptor.W_Dtype), w_dtype)) + if dtype.is_str_or_unicode() and dtype.elsize < 1: + dtype = descriptor.variable_dtype(space, dtype.char + '1') + return W_NDimArray.from_shape(space, w_a.get_shape(), dtype=dtype, + w_instance=w_a if subok else None) + def _fromstring_text(space, s, count, sep, length, dtype): sep_stripped = strip_spaces(sep) @@ -57,6 +165,7 @@ return space.wrap(a) + def _fromstring_bin(space, s, count, length, dtype): itemsize = dtype.elsize assert itemsize >= 0 @@ -74,11 +183,11 @@ loop.fromstring_loop(space, a, dtype, itemsize, s) return space.wrap(a) + @unwrap_spec(s=str, count=int, sep=str, w_dtype=WrappedDefault(None)) def fromstring(space, s, w_dtype=None, count=-1, sep=''): dtype = space.interp_w(descriptor.W_Dtype, - space.call_function(space.gettypefor(descriptor.W_Dtype), w_dtype) - ) + space.call_function(space.gettypefor(descriptor.W_Dtype), w_dtype)) length = len(s) if sep == '': return _fromstring_bin(space, s, count, length, dtype) diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -7,9 +7,8 @@ from pypy.module.micronumpy.base import W_NDimArray, convert_to_array,\ ArrayArgumentException, wrap_impl from pypy.module.micronumpy import descriptor, ufuncs, boxes, arrayops -from pypy.module.micronumpy.strides import find_shape_and_elems,\ - get_shape_from_iterable, to_coords, shape_agreement, \ - shape_agreement_multiple +from pypy.module.micronumpy.strides import get_shape_from_iterable, to_coords, \ + shape_agreement, shape_agreement_multiple from pypy.module.micronumpy.flagsobj import W_FlagsObject from pypy.module.micronumpy.flatiter import W_FlatIterator from pypy.module.micronumpy.appbridge import get_appbridge_cache @@ -19,20 +18,11 @@ from rpython.rlib import jit from rpython.rlib.rstring import StringBuilder from pypy.module.micronumpy.concrete import BaseConcreteArray -from pypy.module.micronumpy.converters import order_converter, multi_axis_converter +from pypy.module.micronumpy.converters import order_converter, shape_converter, \ + multi_axis_converter from pypy.module.micronumpy import support from pypy.module.micronumpy import constants as NPY -def _find_shape(space, w_size, dtype): - if space.is_none(w_size): - return [] - if space.isinstance_w(w_size, space.w_int): - return [space.int_w(w_size)] - shape = [] - for w_item in space.fixedview(w_size): - shape.append(space.int_w(w_item)) - shape += dtype.shape - return shape[:] def _match_dot_shapes(space, left, right): left_shape = left.get_shape() @@ -1141,7 +1131,7 @@ from pypy.module.micronumpy.support import calc_strides dtype = space.interp_w(descriptor.W_Dtype, space.call_function(space.gettypefor(descriptor.W_Dtype), w_dtype)) - shape = _find_shape(space, w_shape, dtype) + shape = shape_converter(space, w_shape, dtype) if not space.is_none(w_buffer): if (not space.is_none(w_strides)): @@ -1194,7 +1184,7 @@ dtype = space.interp_w(descriptor.W_Dtype, space.call_function(space.gettypefor(descriptor.W_Dtype), w_dtype)) - shape = _find_shape(space, w_shape, dtype) + shape = shape_converter(space, w_shape, dtype) if w_subtype: if not space.isinstance_w(w_subtype, space.w_type): raise OperationError(space.w_ValueError, space.wrap( @@ -1395,107 +1385,6 @@ __array__ = interp2app(W_NDimArray.descr___array__), ) - at unwrap_spec(ndmin=int, copy=bool, subok=bool) -def array(space, w_object, w_dtype=None, copy=True, w_order=None, subok=False, - ndmin=0): - # for anything that isn't already an array, try __array__ method first - if not isinstance(w_object, W_NDimArray): - w___array__ = space.lookup(w_object, "__array__") - if w___array__ is not None: - if space.is_none(w_dtype): - w_dtype = space.w_None - w_array = space.get_and_call_function(w___array__, w_object, w_dtype) - if isinstance(w_array, W_NDimArray): - # feed w_array back into array() for other properties - return array(space, w_array, w_dtype, False, w_order, subok, ndmin) - else: - raise oefmt(space.w_ValueError, - "object __array__ method not producing an array") - - dtype = descriptor.decode_w_dtype(space, w_dtype) - - if space.is_none(w_order): - order = 'C' - else: - order = space.str_w(w_order) - if order != 'C': # or order != 'F': - raise oefmt(space.w_ValueError, "Unknown order: %s", order) - - # arrays with correct dtype - if isinstance(w_object, W_NDimArray) and \ - (space.is_none(w_dtype) or w_object.get_dtype() is dtype): - shape = w_object.get_shape() - if copy: - w_ret = w_object.descr_copy(space) - else: - if ndmin <= len(shape): - return w_object - new_impl = w_object.implementation.set_shape(space, w_object, shape) - w_ret = W_NDimArray(new_impl) - if ndmin > len(shape): - shape = [1] * (ndmin - len(shape)) + shape - w_ret.implementation = w_ret.implementation.set_shape(space, - w_ret, shape) - return w_ret - - # not an array or incorrect dtype - shape, elems_w = find_shape_and_elems(space, w_object, dtype) - if dtype is None or (dtype.is_str_or_unicode() and dtype.elsize < 1): - for w_elem in elems_w: - if isinstance(w_elem, W_NDimArray) and w_elem.is_scalar(): - w_elem = w_elem.get_scalar_value() - dtype = ufuncs.find_dtype_for_scalar(space, w_elem, dtype) - if dtype is None: - dtype = descriptor.get_dtype_cache(space).w_float64dtype - elif dtype.is_str_or_unicode() and dtype.elsize < 1: - # promote S0 -> S1, U0 -> U1 - dtype = descriptor.variable_dtype(space, dtype.char + '1') - - if ndmin > len(shape): - shape = [1] * (ndmin - len(shape)) + shape - w_arr = W_NDimArray.from_shape(space, shape, dtype, order=order) - arr_iter = w_arr.create_iter() - for w_elem in elems_w: - arr_iter.setitem(dtype.coerce(space, w_elem)) - arr_iter.next() - return w_arr - -def zeros(space, w_shape, w_dtype=None, w_order=None): - dtype = space.interp_w(descriptor.W_Dtype, - space.call_function(space.gettypefor(descriptor.W_Dtype), w_dtype)) - if dtype.is_str_or_unicode() and dtype.elsize < 1: - dtype = descriptor.variable_dtype(space, dtype.char + '1') - shape = _find_shape(space, w_shape, dtype) - return W_NDimArray.from_shape(space, shape, dtype=dtype) - - at unwrap_spec(subok=bool) -def empty_like(space, w_a, w_dtype=None, w_order=None, subok=True): - w_a = convert_to_array(space, w_a) - if w_dtype is None: - dtype = w_a.get_dtype() - else: - dtype = space.interp_w(descriptor.W_Dtype, - space.call_function(space.gettypefor(descriptor.W_Dtype), w_dtype)) - if dtype.is_str_or_unicode() and dtype.elsize < 1: - dtype = descriptor.variable_dtype(space, dtype.char + '1') - return W_NDimArray.from_shape(space, w_a.get_shape(), dtype=dtype, - w_instance=w_a if subok else None) - -def build_scalar(space, w_dtype, w_state): - from rpython.rtyper.lltypesystem import rffi, lltype - if not isinstance(w_dtype, descriptor.W_Dtype): - raise oefmt(space.w_TypeError, - "argument 1 must be numpy.dtype, not %T", w_dtype) - if w_dtype.elsize == 0: - raise oefmt(space.w_ValueError, "itemsize cannot be zero") - if not space.isinstance_w(w_state, space.w_str): - raise oefmt(space.w_TypeError, "initializing object must be a string") - if space.len_w(w_state) != w_dtype.elsize: - raise oefmt(space.w_ValueError, "initialization string is too small") - state = rffi.str2charp(space.str_w(w_state)) - box = w_dtype.itemtype.box_raw_data(state) - lltype.free(state, flavor="raw") - return box def _reconstruct(space, w_subtype, w_shape, w_dtype): return descr_new_array(space, w_subtype, w_shape, w_dtype) From noreply at buildbot.pypy.org Thu Feb 27 07:11:23 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 27 Feb 2014 07:11:23 +0100 (CET) Subject: [pypy-commit] pypy numpy-refactor: rename some things for sanity Message-ID: <20140227061123.A23791C03D5@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: numpy-refactor Changeset: r69493:3f2859c313a1 Date: 2014-02-27 00:22 -0500 http://bitbucket.org/pypy/pypy/changeset/3f2859c313a1/ Log: rename some things for sanity diff --git a/pypy/module/cpyext/ndarrayobject.py b/pypy/module/cpyext/ndarrayobject.py --- a/pypy/module/cpyext/ndarrayobject.py +++ b/pypy/module/cpyext/ndarrayobject.py @@ -7,8 +7,8 @@ from rpython.rtyper.lltypesystem import rffi, lltype from pypy.module.cpyext.api import cpython_api, Py_ssize_t, CANNOT_FAIL from pypy.module.cpyext.api import PyObject -from pypy.module.micronumpy.interp_numarray import W_NDimArray, array -from pypy.module.micronumpy.interp_dtype import get_dtype_cache, W_Dtype +from pypy.module.micronumpy.ndarray import W_NDimArray, array +from pypy.module.micronumpy.descriptor import get_dtype_cache, W_Dtype from pypy.module.micronumpy.concrete import ConcreteArray from rpython.rlib.rawstorage import RAW_STORAGE_PTR diff --git a/pypy/module/cpyext/test/test_ndarrayobject.py b/pypy/module/cpyext/test/test_ndarrayobject.py --- a/pypy/module/cpyext/test/test_ndarrayobject.py +++ b/pypy/module/cpyext/test/test_ndarrayobject.py @@ -2,8 +2,8 @@ from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase from rpython.rtyper.lltypesystem import rffi, lltype -from pypy.module.micronumpy.interp_numarray import W_NDimArray -from pypy.module.micronumpy.interp_dtype import get_dtype_cache +from pypy.module.micronumpy.ndarray import W_NDimArray +from pypy.module.micronumpy.descriptor import get_dtype_cache def scalar(space): dtype = get_dtype_cache(space).w_float64dtype diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -4,24 +4,24 @@ class MultiArrayModule(MixedModule): appleveldefs = {'arange': 'app_numpy.arange'} interpleveldefs = { - 'ndarray': 'interp_numarray.W_NDimArray', - 'dtype': 'interp_dtype.W_Dtype', + 'ndarray': 'ndarray.W_NDimArray', + 'dtype': 'descriptor.W_Dtype', - 'array': 'interp_numarray.array', - 'zeros': 'interp_numarray.zeros', - 'empty': 'interp_numarray.zeros', - 'empty_like': 'interp_numarray.empty_like', - '_reconstruct' : 'interp_numarray._reconstruct', - 'scalar' : 'interp_numarray.build_scalar', - 'dot': 'interp_arrayops.dot', + 'array': 'ndarray.array', + 'zeros': 'ndarray.zeros', + 'empty': 'ndarray.zeros', + 'empty_like': 'ndarray.empty_like', + '_reconstruct' : 'ndarray._reconstruct', + 'scalar' : 'ndarray.build_scalar', + 'dot': 'arrayops.dot', 'fromstring': 'interp_support.fromstring', - 'flatiter': 'interp_flatiter.W_FlatIterator', - 'concatenate': 'interp_arrayops.concatenate', - 'where': 'interp_arrayops.where', - 'count_nonzero': 'interp_arrayops.count_nonzero', + 'flatiter': 'flatiter.W_FlatIterator', + 'concatenate': 'arrayops.concatenate', + 'where': 'arrayops.where', + 'count_nonzero': 'arrayops.count_nonzero', 'set_string_function': 'appbridge.set_string_function', - 'typeinfo': 'interp_dtype.get_dtype_cache(space).w_typeinfo', + 'typeinfo': 'descriptor.get_dtype_cache(space).w_typeinfo', } @@ -107,7 +107,7 @@ ('real', 'real'), ('imag', 'imag'), ]: - interpleveldefs[exposed] = "interp_ufuncs.get(space).%s" % impl + interpleveldefs[exposed] = "ufuncs.get(space).%s" % impl class Module(MixedModule): diff --git a/pypy/module/micronumpy/interp_arrayops.py b/pypy/module/micronumpy/arrayops.py rename from pypy/module/micronumpy/interp_arrayops.py rename to pypy/module/micronumpy/arrayops.py --- a/pypy/module/micronumpy/interp_arrayops.py +++ b/pypy/module/micronumpy/arrayops.py @@ -1,10 +1,10 @@ from pypy.module.micronumpy.base import convert_to_array, W_NDimArray -from pypy.module.micronumpy import loop, interp_dtype, interp_ufuncs +from pypy.module.micronumpy import loop, descriptor, ufuncs from pypy.module.micronumpy.strides import Chunk, Chunks, shape_agreement, \ shape_agreement_multiple from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import unwrap_spec -from pypy.module.micronumpy.conversion_utils import clipmode_converter +from pypy.module.micronumpy.converters import clipmode_converter from pypy.module.micronumpy import support from pypy.module.micronumpy import constants as NPY @@ -84,7 +84,7 @@ if arr.get_dtype().itemtype.bool(arr.get_scalar_value()): return x return y - dtype = interp_ufuncs.find_binop_result_dtype(space, x.get_dtype(), + dtype = ufuncs.find_binop_result_dtype(space, x.get_dtype(), y.get_dtype()) shape = shape_agreement(space, arr.get_shape(), x) shape = shape_agreement(space, shape, y) @@ -147,7 +147,7 @@ elif dtype.is_record() or a_dt.is_record(): raise OperationError(space.w_TypeError, space.wrap("invalid type promotion")) - dtype = interp_ufuncs.find_binop_result_dtype(space, dtype, + dtype = ufuncs.find_binop_result_dtype(space, dtype, arr.get_dtype()) # concatenate does not handle ndarray subtypes, it always returns a ndarray res = W_NDimArray.from_shape(space, shape, dtype, 'C') @@ -202,7 +202,7 @@ raise OperationError(space.w_TypeError, space.wrap( "return arrays must be of ArrayType")) shape = shape_agreement_multiple(space, choices + [w_out]) - out = interp_dtype.dtype_agreement(space, choices, shape, w_out) + out = descriptor.dtype_agreement(space, choices, shape, w_out) dtype = out.get_dtype() mode = clipmode_converter(space, w_mode) loop.choose(space, arr, choices, shape, dtype, out, mode) diff --git a/pypy/module/micronumpy/base.py b/pypy/module/micronumpy/base.py --- a/pypy/module/micronumpy/base.py +++ b/pypy/module/micronumpy/base.py @@ -86,7 +86,7 @@ def convert_to_array(space, w_obj): - from pypy.module.micronumpy.interp_numarray import array + from pypy.module.micronumpy.ndarray import array if isinstance(w_obj, W_NDimArray): return w_obj return array(space, w_obj) diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/boxes.py rename from pypy/module/micronumpy/interp_boxes.py rename to pypy/module/micronumpy/boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/boxes.py @@ -12,7 +12,7 @@ from rpython.tool.sourcetools import func_with_new_name from pypy.module.micronumpy.concrete import VoidBoxStorage from pypy.module.micronumpy.base import W_NDimArray -from pypy.module.micronumpy.interp_flagsobj import W_FlagsObject +from pypy.module.micronumpy.flagsobj import W_FlagsObject from pypy.interpreter.mixedmodule import MixedModule from rpython.rtyper.lltypesystem import lltype from rpython.rlib.rstring import StringBuilder @@ -36,11 +36,11 @@ def new_dtype_getter(num): @specialize.memo() def _get_dtype(space): - from pypy.module.micronumpy.interp_dtype import get_dtype_cache + from pypy.module.micronumpy.descriptor import get_dtype_cache return get_dtype_cache(space).dtypes_by_num[num] def descr__new__(space, w_subtype, w_value=None): - from pypy.module.micronumpy.interp_numarray import array + from pypy.module.micronumpy.ndarray import array dtype = _get_dtype(space) if not space.is_none(w_value): w_arr = array(space, w_value, dtype, copy=False) @@ -188,22 +188,22 @@ def _binop_impl(ufunc_name): def impl(self, space, w_other, w_out=None): - from pypy.module.micronumpy import interp_ufuncs - return getattr(interp_ufuncs.get(space), ufunc_name).call(space, + from pypy.module.micronumpy import ufuncs + return getattr(ufuncs.get(space), ufunc_name).call(space, [self, w_other, w_out]) return func_with_new_name(impl, "binop_%s_impl" % ufunc_name) def _binop_right_impl(ufunc_name): def impl(self, space, w_other, w_out=None): - from pypy.module.micronumpy import interp_ufuncs - return getattr(interp_ufuncs.get(space), ufunc_name).call(space, + from pypy.module.micronumpy import ufuncs + return getattr(ufuncs.get(space), ufunc_name).call(space, [w_other, self, w_out]) return func_with_new_name(impl, "binop_right_%s_impl" % ufunc_name) def _unaryop_impl(ufunc_name): def impl(self, space, w_out=None): - from pypy.module.micronumpy import interp_ufuncs - return getattr(interp_ufuncs.get(space), ufunc_name).call(space, + from pypy.module.micronumpy import ufuncs + return getattr(ufuncs.get(space), ufunc_name).call(space, [self, w_out]) return func_with_new_name(impl, "unaryop_%s_impl" % ufunc_name) @@ -259,17 +259,17 @@ return space.newtuple([w_quotient, w_remainder]) def descr_any(self, space): - from pypy.module.micronumpy.interp_dtype import get_dtype_cache + from pypy.module.micronumpy.descriptor import get_dtype_cache value = space.is_true(self) return get_dtype_cache(space).w_booldtype.box(value) def descr_all(self, space): - from pypy.module.micronumpy.interp_dtype import get_dtype_cache + from pypy.module.micronumpy.descriptor import get_dtype_cache value = space.is_true(self) return get_dtype_cache(space).w_booldtype.box(value) def descr_zero(self, space): - from pypy.module.micronumpy.interp_dtype import get_dtype_cache + from pypy.module.micronumpy.descriptor import get_dtype_cache return get_dtype_cache(space).w_longdtype.box(0) def descr_ravel(self, space): @@ -285,13 +285,13 @@ return self.get_dtype(space).itemtype.round(self, decimals) def descr_astype(self, space, w_dtype): - from pypy.module.micronumpy.interp_dtype import W_Dtype + from pypy.module.micronumpy.descriptor import W_Dtype dtype = space.interp_w(W_Dtype, space.call_function(space.gettypefor(W_Dtype), w_dtype)) return self.convert_to(space, dtype) def descr_view(self, space, w_dtype): - from pypy.module.micronumpy.interp_dtype import W_Dtype + from pypy.module.micronumpy.descriptor import W_Dtype try: subclass = space.is_true(space.issubtype( w_dtype, space.gettypefor(W_NDimArray))) @@ -520,7 +520,7 @@ class W_StringBox(W_CharacterBox): def descr__new__string_box(space, w_subtype, w_arg): - from pypy.module.micronumpy.interp_dtype import new_string_dtype + from pypy.module.micronumpy.descriptor import new_string_dtype arg = space.str_w(space.str(w_arg)) arr = VoidBoxStorage(len(arg), new_string_dtype(space, len(arg))) for i in range(len(arg)): @@ -531,7 +531,7 @@ def descr__new__unicode_box(space, w_subtype, w_arg): raise OperationError(space.w_NotImplementedError, space.wrap("Unicode is not supported yet")) - from pypy.module.micronumpy.interp_dtype import new_unicode_dtype + from pypy.module.micronumpy.descriptor import new_unicode_dtype arg = space.unicode_w(space.unicode_from_object(w_arg)) # XXX size computations, we need tests anyway diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -7,12 +7,12 @@ from pypy.interpreter import special from pypy.interpreter.baseobjspace import InternalSpaceCache, W_Root from pypy.interpreter.error import OperationError -from pypy.module.micronumpy import interp_boxes -from pypy.module.micronumpy.interp_dtype import get_dtype_cache +from pypy.module.micronumpy import boxes +from pypy.module.micronumpy.descriptor import get_dtype_cache from pypy.module.micronumpy.base import W_NDimArray -from pypy.module.micronumpy.interp_numarray import array -from pypy.module.micronumpy.interp_arrayops import where -from pypy.module.micronumpy import interp_ufuncs +from pypy.module.micronumpy.ndarray import array +from pypy.module.micronumpy.arrayops import where +from pypy.module.micronumpy import ufuncs from rpython.rlib.objectmodel import specialize, instantiate from rpython.rlib.nonconst import NonConstant @@ -151,7 +151,7 @@ def float(self, w_obj): if isinstance(w_obj, FloatObject): return w_obj - assert isinstance(w_obj, interp_boxes.W_GenericBox) + assert isinstance(w_obj, boxes.W_GenericBox) return self.float(w_obj.descr_float(self)) def float_w(self, w_obj): @@ -183,13 +183,13 @@ def int(self, w_obj): if isinstance(w_obj, IntObject): return w_obj - assert isinstance(w_obj, interp_boxes.W_GenericBox) + assert isinstance(w_obj, boxes.W_GenericBox) return self.int(w_obj.descr_int(self)) def str(self, w_obj): if isinstance(w_obj, StringObject): return w_obj - assert isinstance(w_obj, interp_boxes.W_GenericBox) + assert isinstance(w_obj, boxes.W_GenericBox) return self.str(w_obj.descr_str(self)) def is_true(self, w_obj): @@ -399,7 +399,7 @@ else: raise NotImplementedError if (not isinstance(w_res, W_NDimArray) and - not isinstance(w_res, interp_boxes.W_GenericBox)): + not isinstance(w_res, boxes.W_GenericBox)): dtype = get_dtype_cache(interp.space).w_float64dtype w_res = W_NDimArray.new_scalar(interp.space, dtype, w_res) return w_res @@ -554,10 +554,10 @@ elif self.name == "all": w_res = arr.descr_all(interp.space) elif self.name == "unegative": - neg = interp_ufuncs.get(interp.space).negative + neg = ufuncs.get(interp.space).negative w_res = neg.call(interp.space, [arr]) elif self.name == "cos": - cos = interp_ufuncs.get(interp.space).cos + cos = ufuncs.get(interp.space).cos w_res = cos.call(interp.space, [arr]) elif self.name == "flat": w_res = arr.descr_get_flatiter(interp.space) @@ -611,7 +611,7 @@ dtype = get_dtype_cache(interp.space).w_int64dtype elif isinstance(w_res, BoolObject): dtype = get_dtype_cache(interp.space).w_booldtype - elif isinstance(w_res, interp_boxes.W_GenericBox): + elif isinstance(w_res, boxes.W_GenericBox): dtype = w_res.get_dtype(interp.space) else: dtype = None diff --git a/pypy/module/micronumpy/conversion_utils.py b/pypy/module/micronumpy/converters.py rename from pypy/module/micronumpy/conversion_utils.py rename to pypy/module/micronumpy/converters.py diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/descriptor.py rename from pypy/module/micronumpy/interp_dtype.py rename to pypy/module/micronumpy/descriptor.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/descriptor.py @@ -4,12 +4,12 @@ from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import (TypeDef, GetSetProperty, interp_attrproperty, interp_attrproperty_w) -from pypy.module.micronumpy import types, interp_boxes, base +from pypy.module.micronumpy import types, boxes, base from rpython.rlib.objectmodel import specialize from rpython.rlib.rarithmetic import r_longlong, r_ulonglong from rpython.rlib import jit from pypy.module.micronumpy.appbridge import get_appbridge_cache -from pypy.module.micronumpy.conversion_utils import byteorder_converter +from pypy.module.micronumpy.converters import byteorder_converter from pypy.module.micronumpy import support from pypy.module.micronumpy import constants as NPY @@ -26,7 +26,7 @@ """ agree on dtype from a list of arrays. if out is allocated, use it's dtype, otherwise allocate a new one with agreed dtype """ - from pypy.module.micronumpy.interp_ufuncs import find_binop_result_dtype + from pypy.module.micronumpy.ufuncs import find_binop_result_dtype if not space.is_none(out): return out @@ -448,7 +448,7 @@ offset += subdtype.elsize names.append(fldname) return W_Dtype(types.RecordType(), NPY.VOID, NPY.VOIDLTR, NPY.VOIDLTR, - space.gettypefor(interp_boxes.W_VoidBox), + space.gettypefor(boxes.W_VoidBox), names=names, fields=fields, elsize=offset) @@ -490,7 +490,7 @@ return subdtype size *= subdtype.elsize return W_Dtype(types.VoidType(), NPY.VOID, NPY.VOIDLTR, NPY.VOIDLTR, - space.gettypefor(interp_boxes.W_VoidBox), + space.gettypefor(boxes.W_VoidBox), shape=shape, subdtype=subdtype, elsize=size) if space.is_none(w_dtype): @@ -606,7 +606,7 @@ num=NPY.STRING, kind=NPY.STRINGLTR, char=char, - w_box_type=space.gettypefor(interp_boxes.W_StringBox), + w_box_type=space.gettypefor(boxes.W_StringBox), ) @@ -618,7 +618,7 @@ num=NPY.UNICODE, kind=NPY.UNICODELTR, char=NPY.UNICODELTR, - w_box_type=space.gettypefor(interp_boxes.W_UnicodeBox), + w_box_type=space.gettypefor(boxes.W_UnicodeBox), ) @@ -629,7 +629,7 @@ num=NPY.VOID, kind=NPY.VOIDLTR, char=NPY.VOIDLTR, - w_box_type=space.gettypefor(interp_boxes.W_VoidBox), + w_box_type=space.gettypefor(boxes.W_VoidBox), ) @@ -640,119 +640,119 @@ num=NPY.BOOL, kind=NPY.GENBOOLLTR, char=NPY.BOOLLTR, - w_box_type=space.gettypefor(interp_boxes.W_BoolBox), + w_box_type=space.gettypefor(boxes.W_BoolBox), ) self.w_int8dtype = W_Dtype( types.Int8(), num=NPY.BYTE, kind=NPY.SIGNEDLTR, char=NPY.BYTELTR, - w_box_type=space.gettypefor(interp_boxes.W_Int8Box), + w_box_type=space.gettypefor(boxes.W_Int8Box), ) self.w_uint8dtype = W_Dtype( types.UInt8(), num=NPY.UBYTE, kind=NPY.UNSIGNEDLTR, char=NPY.UBYTELTR, - w_box_type=space.gettypefor(interp_boxes.W_UInt8Box), + w_box_type=space.gettypefor(boxes.W_UInt8Box), ) self.w_int16dtype = W_Dtype( types.Int16(), num=NPY.SHORT, kind=NPY.SIGNEDLTR, char=NPY.SHORTLTR, - w_box_type=space.gettypefor(interp_boxes.W_Int16Box), + w_box_type=space.gettypefor(boxes.W_Int16Box), ) self.w_uint16dtype = W_Dtype( types.UInt16(), num=NPY.USHORT, kind=NPY.UNSIGNEDLTR, char=NPY.USHORTLTR, - w_box_type=space.gettypefor(interp_boxes.W_UInt16Box), + w_box_type=space.gettypefor(boxes.W_UInt16Box), ) self.w_int32dtype = W_Dtype( types.Int32(), num=NPY.INT, kind=NPY.SIGNEDLTR, char=NPY.INTLTR, - w_box_type=space.gettypefor(interp_boxes.W_Int32Box), + w_box_type=space.gettypefor(boxes.W_Int32Box), ) self.w_uint32dtype = W_Dtype( types.UInt32(), num=NPY.UINT, kind=NPY.UNSIGNEDLTR, char=NPY.UINTLTR, - w_box_type=space.gettypefor(interp_boxes.W_UInt32Box), + w_box_type=space.gettypefor(boxes.W_UInt32Box), ) self.w_longdtype = W_Dtype( types.Long(), num=NPY.LONG, kind=NPY.SIGNEDLTR, char=NPY.LONGLTR, - w_box_type=space.gettypefor(interp_boxes.W_LongBox), + w_box_type=space.gettypefor(boxes.W_LongBox), ) self.w_ulongdtype = W_Dtype( types.ULong(), num=NPY.ULONG, kind=NPY.UNSIGNEDLTR, char=NPY.ULONGLTR, - w_box_type=space.gettypefor(interp_boxes.W_ULongBox), + w_box_type=space.gettypefor(boxes.W_ULongBox), ) self.w_int64dtype = W_Dtype( types.Int64(), num=NPY.LONGLONG, kind=NPY.SIGNEDLTR, char=NPY.LONGLONGLTR, - w_box_type=space.gettypefor(interp_boxes.W_Int64Box), + w_box_type=space.gettypefor(boxes.W_Int64Box), ) self.w_uint64dtype = W_Dtype( types.UInt64(), num=NPY.ULONGLONG, kind=NPY.UNSIGNEDLTR, char=NPY.ULONGLONGLTR, - w_box_type=space.gettypefor(interp_boxes.W_UInt64Box), + w_box_type=space.gettypefor(boxes.W_UInt64Box), ) self.w_float32dtype = W_Dtype( types.Float32(), num=NPY.FLOAT, kind=NPY.FLOATINGLTR, char=NPY.FLOATLTR, - w_box_type=space.gettypefor(interp_boxes.W_Float32Box), + w_box_type=space.gettypefor(boxes.W_Float32Box), ) self.w_float64dtype = W_Dtype( types.Float64(), num=NPY.DOUBLE, kind=NPY.FLOATINGLTR, char=NPY.DOUBLELTR, - w_box_type=space.gettypefor(interp_boxes.W_Float64Box), + w_box_type=space.gettypefor(boxes.W_Float64Box), ) self.w_floatlongdtype = W_Dtype( types.FloatLong(), num=NPY.LONGDOUBLE, kind=NPY.FLOATINGLTR, char=NPY.LONGDOUBLELTR, - w_box_type=space.gettypefor(interp_boxes.W_FloatLongBox), + w_box_type=space.gettypefor(boxes.W_FloatLongBox), ) self.w_complex64dtype = W_Dtype( types.Complex64(), num=NPY.CFLOAT, kind=NPY.COMPLEXLTR, char=NPY.CFLOATLTR, - w_box_type=space.gettypefor(interp_boxes.W_Complex64Box), + w_box_type=space.gettypefor(boxes.W_Complex64Box), ) self.w_complex128dtype = W_Dtype( types.Complex128(), num=NPY.CDOUBLE, kind=NPY.COMPLEXLTR, char=NPY.CDOUBLELTR, - w_box_type=space.gettypefor(interp_boxes.W_Complex128Box), + w_box_type=space.gettypefor(boxes.W_Complex128Box), ) self.w_complexlongdtype = W_Dtype( types.ComplexLong(), num=NPY.CLONGDOUBLE, kind=NPY.COMPLEXLTR, char=NPY.CLONGDOUBLELTR, - w_box_type=space.gettypefor(interp_boxes.W_ComplexLongBox), + w_box_type=space.gettypefor(boxes.W_ComplexLongBox), ) self.w_stringdtype = W_Dtype( types.StringType(), @@ -760,7 +760,7 @@ num=NPY.STRING, kind=NPY.STRINGLTR, char=NPY.STRINGLTR, - w_box_type=space.gettypefor(interp_boxes.W_StringBox), + w_box_type=space.gettypefor(boxes.W_StringBox), ) self.w_unicodedtype = W_Dtype( types.UnicodeType(), @@ -768,7 +768,7 @@ num=NPY.UNICODE, kind=NPY.UNICODELTR, char=NPY.UNICODELTR, - w_box_type=space.gettypefor(interp_boxes.W_UnicodeBox), + w_box_type=space.gettypefor(boxes.W_UnicodeBox), ) self.w_voiddtype = W_Dtype( types.VoidType(), @@ -776,28 +776,28 @@ num=NPY.VOID, kind=NPY.VOIDLTR, char=NPY.VOIDLTR, - w_box_type=space.gettypefor(interp_boxes.W_VoidBox), + w_box_type=space.gettypefor(boxes.W_VoidBox), ) self.w_float16dtype = W_Dtype( types.Float16(), num=NPY.HALF, kind=NPY.FLOATINGLTR, char=NPY.HALFLTR, - w_box_type=space.gettypefor(interp_boxes.W_Float16Box), + w_box_type=space.gettypefor(boxes.W_Float16Box), ) self.w_intpdtype = W_Dtype( types.Long(), num=NPY.LONG, kind=NPY.SIGNEDLTR, char=NPY.INTPLTR, - w_box_type=space.gettypefor(interp_boxes.W_LongBox), + w_box_type=space.gettypefor(boxes.W_LongBox), ) self.w_uintpdtype = W_Dtype( types.ULong(), num=NPY.ULONG, kind=NPY.UNSIGNEDLTR, char=NPY.UINTPLTR, - w_box_type=space.gettypefor(interp_boxes.W_ULongBox), + w_box_type=space.gettypefor(boxes.W_ULongBox), ) aliases = { NPY.BOOL: ['bool', 'bool8'], @@ -821,19 +821,19 @@ self.alternate_constructors = { NPY.BOOL: [space.w_bool], NPY.LONG: [space.w_int, - space.gettypefor(interp_boxes.W_IntegerBox), - space.gettypefor(interp_boxes.W_SignedIntegerBox)], - NPY.ULONG: [space.gettypefor(interp_boxes.W_UnsignedIntegerBox)], + space.gettypefor(boxes.W_IntegerBox), + space.gettypefor(boxes.W_SignedIntegerBox)], + NPY.ULONG: [space.gettypefor(boxes.W_UnsignedIntegerBox)], NPY.LONGLONG: [space.w_long], NPY.DOUBLE: [space.w_float, - space.gettypefor(interp_boxes.W_NumberBox), - space.gettypefor(interp_boxes.W_FloatingBox)], + space.gettypefor(boxes.W_NumberBox), + space.gettypefor(boxes.W_FloatingBox)], NPY.CDOUBLE: [space.w_complex, - space.gettypefor(interp_boxes.W_ComplexFloatingBox)], + space.gettypefor(boxes.W_ComplexFloatingBox)], NPY.STRING: [space.w_str, - space.gettypefor(interp_boxes.W_CharacterBox)], + space.gettypefor(boxes.W_CharacterBox)], NPY.UNICODE: [space.w_unicode], - NPY.VOID: [space.gettypefor(interp_boxes.W_GenericBox)], + NPY.VOID: [space.gettypefor(boxes.W_GenericBox)], #space.w_buffer, # XXX no buffer in space } float_dtypes = [self.w_float16dtype, self.w_float32dtype, @@ -909,16 +909,16 @@ } typeinfo_partial = { - 'Generic': interp_boxes.W_GenericBox, - 'Character': interp_boxes.W_CharacterBox, - 'Flexible': interp_boxes.W_FlexibleBox, - 'Inexact': interp_boxes.W_InexactBox, - 'Integer': interp_boxes.W_IntegerBox, - 'SignedInteger': interp_boxes.W_SignedIntegerBox, - 'UnsignedInteger': interp_boxes.W_UnsignedIntegerBox, - 'ComplexFloating': interp_boxes.W_ComplexFloatingBox, - 'Number': interp_boxes.W_NumberBox, - 'Floating': interp_boxes.W_FloatingBox + 'Generic': boxes.W_GenericBox, + 'Character': boxes.W_CharacterBox, + 'Flexible': boxes.W_FlexibleBox, + 'Inexact': boxes.W_InexactBox, + 'Integer': boxes.W_IntegerBox, + 'SignedInteger': boxes.W_SignedIntegerBox, + 'UnsignedInteger': boxes.W_UnsignedIntegerBox, + 'ComplexFloating': boxes.W_ComplexFloatingBox, + 'Number': boxes.W_NumberBox, + 'Floating': boxes.W_FloatingBox } w_typeinfo = space.newdict() for k, v in typeinfo_partial.iteritems(): diff --git a/pypy/module/micronumpy/interp_flagsobj.py b/pypy/module/micronumpy/flagsobj.py rename from pypy/module/micronumpy/interp_flagsobj.py rename to pypy/module/micronumpy/flagsobj.py diff --git a/pypy/module/micronumpy/interp_flatiter.py b/pypy/module/micronumpy/flatiter.py rename from pypy/module/micronumpy/interp_flatiter.py rename to pypy/module/micronumpy/flatiter.py --- a/pypy/module/micronumpy/interp_flatiter.py +++ b/pypy/module/micronumpy/flatiter.py @@ -83,4 +83,4 @@ def descr_base(self, space): return space.wrap(self.base) -# typedef is in interp_numarray, so we see the additional arguments +# typedef is in interp_ndarray, so we see the additional arguments diff --git a/pypy/module/micronumpy/interp_support.py b/pypy/module/micronumpy/interp_support.py --- a/pypy/module/micronumpy/interp_support.py +++ b/pypy/module/micronumpy/interp_support.py @@ -1,7 +1,7 @@ from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import unwrap_spec, WrappedDefault from rpython.rtyper.lltypesystem import lltype, rffi -from pypy.module.micronumpy import interp_dtype, loop +from pypy.module.micronumpy import descriptor, loop from rpython.rlib.rstring import strip_spaces from rpython.rlib.rarithmetic import maxint from pypy.module.micronumpy.base import W_NDimArray @@ -77,8 +77,8 @@ @unwrap_spec(s=str, count=int, sep=str, w_dtype=WrappedDefault(None)) def fromstring(space, s, w_dtype=None, count=-1, sep=''): - dtype = space.interp_w(interp_dtype.W_Dtype, - space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) + dtype = space.interp_w(descriptor.W_Dtype, + space.call_function(space.gettypefor(descriptor.W_Dtype), w_dtype) ) length = len(s) if sep == '': diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/ndarray.py rename from pypy/module/micronumpy/interp_numarray.py rename to pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -6,21 +6,20 @@ WrappedDefault from pypy.module.micronumpy.base import W_NDimArray, convert_to_array,\ ArrayArgumentException, wrap_impl -from pypy.module.micronumpy import interp_dtype, interp_ufuncs, interp_boxes,\ - interp_arrayops +from pypy.module.micronumpy import descriptor, ufuncs, boxes, arrayops from pypy.module.micronumpy.strides import find_shape_and_elems,\ get_shape_from_iterable, to_coords, shape_agreement, \ shape_agreement_multiple -from pypy.module.micronumpy.interp_flagsobj import W_FlagsObject -from pypy.module.micronumpy.interp_flatiter import W_FlatIterator +from pypy.module.micronumpy.flagsobj import W_FlagsObject +from pypy.module.micronumpy.flatiter import W_FlatIterator from pypy.module.micronumpy.appbridge import get_appbridge_cache from pypy.module.micronumpy import loop -from pypy.module.micronumpy.interp_arrayops import repeat, choose, put +from pypy.module.micronumpy.arrayops import repeat, choose, put from rpython.tool.sourcetools import func_with_new_name from rpython.rlib import jit from rpython.rlib.rstring import StringBuilder from pypy.module.micronumpy.concrete import BaseConcreteArray -from pypy.module.micronumpy.conversion_utils import order_converter, multi_axis_converter +from pypy.module.micronumpy.converters import order_converter, multi_axis_converter from pypy.module.micronumpy import support from pypy.module.micronumpy import constants as NPY @@ -84,8 +83,8 @@ return self.implementation.dtype def descr_set_dtype(self, space, w_dtype): - dtype = space.interp_w(interp_dtype.W_Dtype, - space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype)) + dtype = space.interp_w(descriptor.W_Dtype, + space.call_function(space.gettypefor(descriptor.W_Dtype), w_dtype)) if (dtype.elsize != self.get_dtype().elsize or dtype.is_flexible() or self.get_dtype().is_flexible()): raise OperationError(space.w_ValueError, space.wrap( @@ -424,7 +423,7 @@ return self.implementation.swapaxes(space, self, axis1, axis2) def descr_nonzero(self, space): - index_type = interp_dtype.get_dtype_cache(space).w_int64dtype + index_type = descriptor.get_dtype_cache(space).w_int64dtype return self.implementation.nonzero(space, index_type) def descr_tolist(self, space): @@ -492,7 +491,7 @@ if space.is_none(w_arg): if self.get_size() == 1: w_obj = self.get_scalar_value() - assert isinstance(w_obj, interp_boxes.W_GenericBox) + assert isinstance(w_obj, boxes.W_GenericBox) return w_obj.item(space) raise oefmt(space.w_ValueError, "can only convert an array of size 1 to a Python scalar") @@ -501,7 +500,7 @@ raise oefmt(space.w_IndexError, "index out of bounds") i = self.to_coords(space, w_arg) item = self.getitem(space, i) - assert isinstance(item, interp_boxes.W_GenericBox) + assert isinstance(item, boxes.W_GenericBox) return item.item(space) raise OperationError(space.w_NotImplementedError, space.wrap( "non-int arg not supported")) @@ -565,15 +564,15 @@ def descr_astype(self, space, w_dtype): cur_dtype = self.get_dtype() - new_dtype = space.interp_w(interp_dtype.W_Dtype, - space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype)) + new_dtype = space.interp_w(descriptor.W_Dtype, + space.call_function(space.gettypefor(descriptor.W_Dtype), w_dtype)) if new_dtype.num == NPY.VOID: raise oefmt(space.w_NotImplementedError, "astype(%s) not implemented yet", new_dtype.get_name()) if new_dtype.num == NPY.STRING and new_dtype.elsize == 0: if cur_dtype.num == NPY.STRING: - new_dtype = interp_dtype.variable_dtype(space, + new_dtype = descriptor.variable_dtype(space, 'S' + str(cur_dtype.elsize)) impl = self.implementation new_impl = impl.astype(space, new_dtype) @@ -608,7 +607,7 @@ min = convert_to_array(space, w_min) max = convert_to_array(space, w_max) shape = shape_agreement_multiple(space, [self, min, max, w_out]) - out = interp_dtype.dtype_agreement(space, [self, min, max], shape, + out = descriptor.dtype_agreement(space, [self, min, max], shape, w_out) loop.clip(space, self, shape, min, max, out) return out @@ -633,7 +632,7 @@ if axis1 == axis2: raise OperationError(space.w_ValueError, space.wrap( "axis1 and axis2 cannot be the same")) - return interp_arrayops.diagonal(space, self.implementation, offset, + return arrayops.diagonal(space, self.implementation, offset, axis1, axis2) @unwrap_spec(offset=int, axis1=int, axis2=int) @@ -685,16 +684,16 @@ if self.get_dtype().is_bool(): #numpy promotes bool.round() to float16. Go figure. w_out = W_NDimArray.from_shape(space, self.get_shape(), - interp_dtype.get_dtype_cache(space).w_float16dtype) + descriptor.get_dtype_cache(space).w_float16dtype) else: w_out = None elif not isinstance(w_out, W_NDimArray): raise OperationError(space.w_TypeError, space.wrap( "return arrays must be of ArrayType")) - out = interp_dtype.dtype_agreement(space, [self], self.get_shape(), + out = descriptor.dtype_agreement(space, [self], self.get_shape(), w_out) if out.get_dtype().is_bool() and self.get_dtype().is_bool(): - calc_dtype = interp_dtype.get_dtype_cache(space).w_longdtype + calc_dtype = descriptor.get_dtype_cache(space).w_longdtype else: calc_dtype = out.get_dtype() @@ -769,8 +768,8 @@ else: raise if w_dtype: - dtype = space.interp_w(interp_dtype.W_Dtype, - space.call_function(space.gettypefor(interp_dtype.W_Dtype), + dtype = space.interp_w(descriptor.W_Dtype, + space.call_function(space.gettypefor(descriptor.W_Dtype), w_dtype)) else: dtype = self.get_dtype() @@ -813,7 +812,7 @@ def _unaryop_impl(ufunc_name): def impl(self, space, w_out=None): - return getattr(interp_ufuncs.get(space), ufunc_name).call(space, + return getattr(ufuncs.get(space), ufunc_name).call(space, [self, w_out]) return func_with_new_name(impl, "unaryop_%s_impl" % ufunc_name) @@ -833,7 +832,7 @@ def _binop_impl(ufunc_name): def impl(self, space, w_other, w_out=None): - return getattr(interp_ufuncs.get(space), ufunc_name).call(space, + return getattr(ufuncs.get(space), ufunc_name).call(space, [self, w_other, w_out]) return func_with_new_name(impl, "binop_%s_impl" % ufunc_name) @@ -877,7 +876,7 @@ def _binop_inplace_impl(ufunc_name): def impl(self, space, w_other): w_out = self - ufunc = getattr(interp_ufuncs.get(space), ufunc_name) + ufunc = getattr(ufuncs.get(space), ufunc_name) return ufunc.call(space, [self, w_other, w_out]) return func_with_new_name(impl, "binop_inplace_%s_impl" % ufunc_name) @@ -898,7 +897,7 @@ def _binop_right_impl(ufunc_name): def impl(self, space, w_other, w_out=None): w_other = convert_to_array(space, w_other) - return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [w_other, self, w_out]) + return getattr(ufuncs.get(space), ufunc_name).call(space, [w_other, self, w_out]) return func_with_new_name(impl, "binop_right_%s_impl" % ufunc_name) descr_radd = _binop_right_impl("add") @@ -936,7 +935,7 @@ w_res = self.descr_mul(space, other) assert isinstance(w_res, W_NDimArray) return w_res.descr_sum(space, space.wrap(-1), out) - dtype = interp_ufuncs.find_binop_result_dtype(space, self.get_dtype(), + dtype = ufuncs.find_binop_result_dtype(space, self.get_dtype(), other.get_dtype()) if self.get_size() < 1 and other.get_size() < 1: # numpy compatability @@ -992,7 +991,7 @@ 'output must be an array')) else: out = w_out - return getattr(interp_ufuncs.get(space), ufunc_name).reduce( + return getattr(ufuncs.get(space), ufunc_name).reduce( space, self, w_axis, keepdims, out, w_dtype, cumulative=cumulative) return func_with_new_name(impl, "reduce_%s_impl_%d" % (ufunc_name, cumulative)) @@ -1067,7 +1066,7 @@ "only integer arrays with one element " "can be converted to an index")) value = self.get_scalar_value() - assert isinstance(value, interp_boxes.W_GenericBox) + assert isinstance(value, boxes.W_GenericBox) return value.item(space) def descr_reduce(self, space): @@ -1117,7 +1116,7 @@ dtype = space.getitem(w_state, space.wrap(base_index+1)) #isfortran = space.getitem(w_state, space.wrap(base_index+2)) storage = space.getitem(w_state, space.wrap(base_index+3)) - if not isinstance(dtype, interp_dtype.W_Dtype): + if not isinstance(dtype, descriptor.W_Dtype): raise OperationError(space.w_ValueError, space.wrap( "__setstate__(self, (shape, dtype, .. called with improper dtype '%r'" % dtype)) self.implementation = W_NDimArray.from_shape_and_storage(space, @@ -1140,8 +1139,8 @@ offset=0, w_strides=None, w_order=None): from pypy.module.micronumpy.concrete import ConcreteArray from pypy.module.micronumpy.support import calc_strides - dtype = space.interp_w(interp_dtype.W_Dtype, - space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype)) + dtype = space.interp_w(descriptor.W_Dtype, + space.call_function(space.gettypefor(descriptor.W_Dtype), w_dtype)) shape = _find_shape(space, w_shape, dtype) if not space.is_none(w_buffer): @@ -1192,8 +1191,8 @@ PyPy-only implementation detail. """ storage = rffi.cast(RAW_STORAGE_PTR, addr) - dtype = space.interp_w(interp_dtype.W_Dtype, - space.call_function(space.gettypefor(interp_dtype.W_Dtype), + dtype = space.interp_w(descriptor.W_Dtype, + space.call_function(space.gettypefor(descriptor.W_Dtype), w_dtype)) shape = _find_shape(space, w_shape, dtype) if w_subtype: @@ -1413,7 +1412,7 @@ raise oefmt(space.w_ValueError, "object __array__ method not producing an array") - dtype = interp_dtype.decode_w_dtype(space, w_dtype) + dtype = descriptor.decode_w_dtype(space, w_dtype) if space.is_none(w_order): order = 'C' @@ -1445,12 +1444,12 @@ for w_elem in elems_w: if isinstance(w_elem, W_NDimArray) and w_elem.is_scalar(): w_elem = w_elem.get_scalar_value() - dtype = interp_ufuncs.find_dtype_for_scalar(space, w_elem, dtype) + dtype = ufuncs.find_dtype_for_scalar(space, w_elem, dtype) if dtype is None: - dtype = interp_dtype.get_dtype_cache(space).w_float64dtype + dtype = descriptor.get_dtype_cache(space).w_float64dtype elif dtype.is_str_or_unicode() and dtype.elsize < 1: # promote S0 -> S1, U0 -> U1 - dtype = interp_dtype.variable_dtype(space, dtype.char + '1') + dtype = descriptor.variable_dtype(space, dtype.char + '1') if ndmin > len(shape): shape = [1] * (ndmin - len(shape)) + shape @@ -1462,10 +1461,10 @@ return w_arr def zeros(space, w_shape, w_dtype=None, w_order=None): - dtype = space.interp_w(interp_dtype.W_Dtype, - space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype)) + dtype = space.interp_w(descriptor.W_Dtype, + space.call_function(space.gettypefor(descriptor.W_Dtype), w_dtype)) if dtype.is_str_or_unicode() and dtype.elsize < 1: - dtype = interp_dtype.variable_dtype(space, dtype.char + '1') + dtype = descriptor.variable_dtype(space, dtype.char + '1') shape = _find_shape(space, w_shape, dtype) return W_NDimArray.from_shape(space, shape, dtype=dtype) @@ -1475,16 +1474,16 @@ if w_dtype is None: dtype = w_a.get_dtype() else: - dtype = space.interp_w(interp_dtype.W_Dtype, - space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype)) + dtype = space.interp_w(descriptor.W_Dtype, + space.call_function(space.gettypefor(descriptor.W_Dtype), w_dtype)) if dtype.is_str_or_unicode() and dtype.elsize < 1: - dtype = interp_dtype.variable_dtype(space, dtype.char + '1') + dtype = descriptor.variable_dtype(space, dtype.char + '1') return W_NDimArray.from_shape(space, w_a.get_shape(), dtype=dtype, w_instance=w_a if subok else None) def build_scalar(space, w_dtype, w_state): from rpython.rtyper.lltypesystem import rffi, lltype - if not isinstance(w_dtype, interp_dtype.W_Dtype): + if not isinstance(w_dtype, descriptor.W_Dtype): raise oefmt(space.w_TypeError, "argument 1 must be numpy.dtype, not %T", w_dtype) if w_dtype.elsize == 0: diff --git a/pypy/module/micronumpy/sort.py b/pypy/module/micronumpy/sort.py --- a/pypy/module/micronumpy/sort.py +++ b/pypy/module/micronumpy/sort.py @@ -12,7 +12,7 @@ from rpython.rlib.objectmodel import specialize from pypy.interpreter.error import OperationError, oefmt from pypy.module.micronumpy.base import W_NDimArray -from pypy.module.micronumpy import interp_dtype, types, constants as NPY +from pypy.module.micronumpy import descriptor, types, constants as NPY from pypy.module.micronumpy.iter import AxisIterator INT_SIZE = rffi.sizeof(lltype.Signed) @@ -71,7 +71,7 @@ class ArgArrayRepWithStorage(Repr): def __init__(self, index_stride_size, stride_size, size): start = 0 - dtype = interp_dtype.get_dtype_cache(space).w_longdtype + dtype = descriptor.get_dtype_cache(space).w_longdtype indexes = dtype.itemtype.malloc(size * dtype.elsize) values = alloc_raw_storage(size * stride_size, track_allocation=False) @@ -132,7 +132,7 @@ else: axis = space.int_w(w_axis) # create array of indexes - dtype = interp_dtype.get_dtype_cache(space).w_longdtype + dtype = descriptor.get_dtype_cache(space).w_longdtype index_arr = W_NDimArray.from_shape(space, arr.get_shape(), dtype) storage = index_arr.implementation.get_storage() if len(arr.get_shape()) == 1: diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_ndarray.py rename from pypy/module/micronumpy/test/test_numarray.py rename to pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -4,7 +4,7 @@ from pypy.conftest import option from pypy.module.micronumpy.appbridge import get_appbridge_cache from pypy.module.micronumpy.strides import Chunk, Chunks -from pypy.module.micronumpy.interp_numarray import W_NDimArray +from pypy.module.micronumpy.ndarray import W_NDimArray from pypy.module.micronumpy.test.test_base import BaseNumpyAppTest @@ -197,7 +197,7 @@ def test_from_shape_and_storage(self): from rpython.rlib.rawstorage import alloc_raw_storage, raw_storage_setitem from rpython.rtyper.lltypesystem import rffi - from pypy.module.micronumpy.interp_dtype import get_dtype_cache + from pypy.module.micronumpy.descriptor import get_dtype_cache storage = alloc_raw_storage(4, track_allocation=False, zero=True) for i in range(4): raw_storage_setitem(storage, i, rffi.cast(rffi.UCHAR, i)) diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -1,7 +1,7 @@ from pypy.module.micronumpy.test.test_base import BaseNumpyAppTest -from pypy.module.micronumpy.interp_ufuncs import (find_binop_result_dtype, +from pypy.module.micronumpy.ufuncs import (find_binop_result_dtype, find_unaryop_result_dtype) -from pypy.module.micronumpy.interp_dtype import get_dtype_cache +from pypy.module.micronumpy.descriptor import get_dtype_cache class TestUfuncCoercion(object): diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -5,7 +5,7 @@ import py from rpython.jit.metainterp.test.support import LLJitMixin from rpython.jit.metainterp.warmspot import reset_jit, get_stats -from pypy.module.micronumpy import interp_boxes +from pypy.module.micronumpy import boxes from pypy.module.micronumpy.compile import FakeSpace, Parser, InterpreterState from pypy.module.micronumpy.base import W_NDimArray @@ -48,11 +48,11 @@ w_res = interp.results[-1] if isinstance(w_res, W_NDimArray): w_res = w_res.create_iter().getitem() - if isinstance(w_res, interp_boxes.W_Float64Box): + if isinstance(w_res, boxes.W_Float64Box): return w_res.value - if isinstance(w_res, interp_boxes.W_Int64Box): + if isinstance(w_res, boxes.W_Int64Box): return float(w_res.value) - elif isinstance(w_res, interp_boxes.W_BoolBox): + elif isinstance(w_res, boxes.W_BoolBox): return float(w_res.value) raise TypeError(w_res) diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -2,7 +2,7 @@ import math from pypy.interpreter.error import OperationError, oefmt -from pypy.module.micronumpy import interp_boxes +from pypy.module.micronumpy import boxes from pypy.module.micronumpy import support from pypy.module.micronumpy.concrete import SliceArray, VoidBoxStorage from pypy.objspace.std.floatobject import float2string @@ -303,7 +303,7 @@ class Bool(BaseType, Primitive): T = lltype.Bool - BoxType = interp_boxes.W_BoolBox + BoxType = boxes.W_BoolBox format_code = "?" True = BoxType(True) @@ -537,32 +537,32 @@ class Int8(BaseType, Integer): T = rffi.SIGNEDCHAR - BoxType = interp_boxes.W_Int8Box + BoxType = boxes.W_Int8Box format_code = "b" class UInt8(BaseType, Integer): T = rffi.UCHAR - BoxType = interp_boxes.W_UInt8Box + BoxType = boxes.W_UInt8Box format_code = "B" class Int16(BaseType, Integer): T = rffi.SHORT - BoxType = interp_boxes.W_Int16Box + BoxType = boxes.W_Int16Box format_code = "h" class UInt16(BaseType, Integer): T = rffi.USHORT - BoxType = interp_boxes.W_UInt16Box + BoxType = boxes.W_UInt16Box format_code = "H" class Int32(BaseType, Integer): T = rffi.INT - BoxType = interp_boxes.W_Int32Box + BoxType = boxes.W_Int32Box format_code = "i" class UInt32(BaseType, Integer): T = rffi.UINT - BoxType = interp_boxes.W_UInt32Box + BoxType = boxes.W_UInt32Box format_code = "I" def _int64_coerce(self, space, w_item): @@ -580,7 +580,7 @@ class Int64(BaseType, Integer): T = rffi.LONGLONG - BoxType = interp_boxes.W_Int64Box + BoxType = boxes.W_Int64Box format_code = "q" if LONG_BIT == 32: @@ -601,14 +601,14 @@ class UInt64(BaseType, Integer): T = rffi.ULONGLONG - BoxType = interp_boxes.W_UInt64Box + BoxType = boxes.W_UInt64Box format_code = "Q" _coerce = func_with_new_name(_uint64_coerce, '_coerce') class Long(BaseType, Integer): T = rffi.LONG - BoxType = interp_boxes.W_LongBox + BoxType = boxes.W_LongBox format_code = "l" def _ulong_coerce(self, space, w_item): @@ -626,7 +626,7 @@ class ULong(BaseType, Integer): T = rffi.ULONG - BoxType = interp_boxes.W_ULongBox + BoxType = boxes.W_ULongBox format_code = "L" _coerce = func_with_new_name(_ulong_coerce, '_coerce') @@ -960,7 +960,7 @@ class Float16(BaseType, Float): _STORAGE_T = rffi.USHORT T = rffi.SHORT - BoxType = interp_boxes.W_Float16Box + BoxType = boxes.W_Float16Box @specialize.argtype(1) def box(self, value): @@ -1000,12 +1000,12 @@ class Float32(BaseType, Float): T = rffi.FLOAT - BoxType = interp_boxes.W_Float32Box + BoxType = boxes.W_Float32Box format_code = "f" class Float64(BaseType, Float): T = rffi.DOUBLE - BoxType = interp_boxes.W_Float64Box + BoxType = boxes.W_Float64Box format_code = "d" class ComplexFloating(object): @@ -1569,32 +1569,32 @@ class Complex64(ComplexFloating, BaseType): T = rffi.FLOAT - BoxType = interp_boxes.W_Complex64Box - ComponentBoxType = interp_boxes.W_Float32Box + BoxType = boxes.W_Complex64Box + ComponentBoxType = boxes.W_Float32Box class Complex128(ComplexFloating, BaseType): T = rffi.DOUBLE - BoxType = interp_boxes.W_Complex128Box - ComponentBoxType = interp_boxes.W_Float64Box + BoxType = boxes.W_Complex128Box + ComponentBoxType = boxes.W_Float64Box -if interp_boxes.long_double_size == 8: +if boxes.long_double_size == 8: class FloatLong(BaseType, Float): T = rffi.DOUBLE - BoxType = interp_boxes.W_FloatLongBox + BoxType = boxes.W_FloatLongBox format_code = "d" class ComplexLong(ComplexFloating, BaseType): T = rffi.DOUBLE - BoxType = interp_boxes.W_ComplexLongBox - ComponentBoxType = interp_boxes.W_FloatLongBox + BoxType = boxes.W_ComplexLongBox + ComponentBoxType = boxes.W_FloatLongBox -elif interp_boxes.long_double_size in (12, 16): +elif boxes.long_double_size in (12, 16): class FloatLong(BaseType, Float): T = rffi.LONGDOUBLE - BoxType = interp_boxes.W_FloatLongBox + BoxType = boxes.W_FloatLongBox def runpack_str(self, space, s): - assert len(s) == interp_boxes.long_double_size + assert len(s) == boxes.long_double_size fval = self.box(unpack_float80(s, native_is_bigendian)) if not self.native: fval = self.byteswap(fval) @@ -1608,8 +1608,8 @@ class ComplexLong(ComplexFloating, BaseType): T = rffi.LONGDOUBLE - BoxType = interp_boxes.W_ComplexLongBox - ComponentBoxType = interp_boxes.W_FloatLongBox + BoxType = boxes.W_ComplexLongBox + ComponentBoxType = boxes.W_FloatLongBox class FlexibleType(BaseType): def get_element_size(self): @@ -1618,7 +1618,7 @@ @jit.unroll_safe def to_str(self, item): builder = StringBuilder() - assert isinstance(item, interp_boxes.W_FlexibleBox) + assert isinstance(item, boxes.W_FlexibleBox) i = item.ofs end = i + item.dtype.elsize while i < end: @@ -1651,7 +1651,7 @@ @jit.unroll_safe def coerce(self, space, dtype, w_item): - if isinstance(w_item, interp_boxes.W_StringBox): + if isinstance(w_item, boxes.W_StringBox): return w_item if w_item is None: w_item = space.wrap('') @@ -1662,23 +1662,23 @@ arr.storage[i] = arg[i] for j in range(j, dtype.elsize): arr.storage[j] = '\x00' - return interp_boxes.W_StringBox(arr, 0, arr.dtype) + return boxes.W_StringBox(arr, 0, arr.dtype) def store(self, arr, i, offset, box): - assert isinstance(box, interp_boxes.W_StringBox) + assert isinstance(box, boxes.W_StringBox) size = min(arr.dtype.elsize - offset, box.arr.size - box.ofs) return self._store(arr.storage, i, offset, box, size) @jit.unroll_safe def _store(self, storage, i, offset, box, size): - assert isinstance(box, interp_boxes.W_StringBox) + assert isinstance(box, boxes.W_StringBox) for k in range(size): storage[k + offset + i] = box.arr.storage[k + box.ofs] def read(self, arr, i, offset, dtype=None): if dtype is None: dtype = arr.dtype - return interp_boxes.W_StringBox(arr, i + offset, dtype) + return boxes.W_StringBox(arr, i + offset, dtype) def str_format(self, item): builder = StringBuilder() @@ -1743,7 +1743,7 @@ @jit.unroll_safe def coerce(self, space, dtype, w_item): - if isinstance(w_item, interp_boxes.W_UnicodeBox): + if isinstance(w_item, boxes.W_UnicodeBox): return w_item raise OperationError(space.w_NotImplementedError, space.wrap( "coerce (probably from set_item) not implemented for unicode type")) @@ -1753,7 +1753,7 @@ def _coerce(self, space, arr, ofs, dtype, w_items, shape): # TODO: Make sure the shape and the array match - from interp_dtype import W_Dtype + from pypy.module.micronumpy.descriptor import W_Dtype if w_items is not None: items_w = space.fixedview(w_items) else: @@ -1778,12 +1778,12 @@ def coerce(self, space, dtype, w_items): arr = VoidBoxStorage(dtype.elsize, dtype) self._coerce(space, arr, 0, dtype, w_items, dtype.shape) - return interp_boxes.W_VoidBox(arr, 0, dtype) + return boxes.W_VoidBox(arr, 0, dtype) @jit.unroll_safe def store(self, arr, i, ofs, box): assert i == 0 - assert isinstance(box, interp_boxes.W_VoidBox) + assert isinstance(box, boxes.W_VoidBox) assert box.dtype is box.arr.dtype for k in range(box.arr.dtype.elsize): arr.storage[k + ofs] = box.arr.storage[k + box.ofs] @@ -1802,11 +1802,11 @@ def read(self, arr, i, offset, dtype=None): if dtype is None: dtype = arr.dtype - return interp_boxes.W_VoidBox(arr, i + offset, dtype) + return boxes.W_VoidBox(arr, i + offset, dtype) @jit.unroll_safe def str_format(self, box): - assert isinstance(box, interp_boxes.W_VoidBox) + assert isinstance(box, boxes.W_VoidBox) arr = self.readarray(box.arr, box.ofs, 0, box.dtype) return arr.dump_data(prefix='', suffix='') @@ -1815,7 +1815,7 @@ "Void arrays return a buffer object for item(), unless fields are defined, in which case a tuple is returned." ''' - assert isinstance(item, interp_boxes.W_VoidBox) + assert isinstance(item, boxes.W_VoidBox) dt = item.arr.dtype ret_unwrapped = [] for name in dt.names: @@ -1824,7 +1824,7 @@ read_val = dtype.itemtype.readarray(item.arr, ofs, 0, dtype) else: read_val = dtype.itemtype.read(item.arr, ofs, 0, dtype) - if isinstance (read_val, interp_boxes.W_StringBox): + if isinstance (read_val, boxes.W_StringBox): # StringType returns a str read_val = space.wrap(dtype.itemtype.to_str(read_val)) ret_unwrapped = ret_unwrapped + [read_val,] @@ -1839,12 +1839,12 @@ def read(self, arr, i, offset, dtype=None): if dtype is None: dtype = arr.dtype - return interp_boxes.W_VoidBox(arr, i + offset, dtype) + return boxes.W_VoidBox(arr, i + offset, dtype) @jit.unroll_safe def coerce(self, space, dtype, w_item): from pypy.module.micronumpy.base import W_NDimArray - if isinstance(w_item, interp_boxes.W_VoidBox): + if isinstance(w_item, boxes.W_VoidBox): return w_item if w_item is not None: if space.isinstance_w(w_item, space.w_tuple): @@ -1868,14 +1868,14 @@ except IndexError: w_box = itemtype.coerce(space, subdtype, None) itemtype.store(arr, 0, ofs, w_box) - return interp_boxes.W_VoidBox(arr, 0, dtype) + return boxes.W_VoidBox(arr, 0, dtype) def runpack_str(self, space, s): raise oefmt(space.w_NotImplementedError, "fromstring not implemented for record types") def store(self, arr, i, ofs, box): - assert isinstance(box, interp_boxes.W_VoidBox) + assert isinstance(box, boxes.W_VoidBox) self._store(arr.storage, i, ofs, box, box.dtype.elsize) @jit.unroll_safe @@ -1884,7 +1884,7 @@ storage[k + i + ofs] = box.arr.storage[k + box.ofs] def fill(self, storage, width, box, start, stop, offset): - assert isinstance(box, interp_boxes.W_VoidBox) + assert isinstance(box, boxes.W_VoidBox) assert width == box.dtype.elsize for i in xrange(start, stop, width): self._store(storage, i, offset, box, width) @@ -1894,7 +1894,7 @@ return w_v def to_builtin_type(self, space, box): - assert isinstance(box, interp_boxes.W_VoidBox) + assert isinstance(box, boxes.W_VoidBox) items = [] dtype = box.dtype for name in dtype.names: @@ -1906,7 +1906,7 @@ @jit.unroll_safe def str_format(self, box): - assert isinstance(box, interp_boxes.W_VoidBox) + assert isinstance(box, boxes.W_VoidBox) pieces = ["("] first = True for name in box.dtype.names: @@ -1922,8 +1922,8 @@ return "".join(pieces) def eq(self, v1, v2): - assert isinstance(v1, interp_boxes.W_VoidBox) - assert isinstance(v2, interp_boxes.W_VoidBox) + assert isinstance(v1, boxes.W_VoidBox) + assert isinstance(v2, boxes.W_VoidBox) s1 = v1.dtype.elsize s2 = v2.dtype.elsize assert s1 == s2 diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/ufuncs.py rename from pypy/module/micronumpy/interp_ufuncs.py rename to pypy/module/micronumpy/ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/ufuncs.py @@ -2,7 +2,7 @@ from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef, GetSetProperty, interp_attrproperty -from pypy.module.micronumpy import interp_boxes, interp_dtype, loop +from pypy.module.micronumpy import boxes, descriptor, loop from rpython.rlib import jit from rpython.rlib.rarithmetic import LONG_BIT from rpython.tool.sourcetools import func_with_new_name @@ -149,7 +149,7 @@ array([[ 1, 5], [ 9, 13]]) """ - from pypy.module.micronumpy.interp_numarray import W_NDimArray + from pypy.module.micronumpy.ndarray import W_NDimArray if w_axis is None: w_axis = space.wrap(0) if space.is_none(w_out): @@ -177,10 +177,10 @@ shapelen = len(obj_shape) axis = unwrap_axis_arg(space, shapelen, w_axis) assert axis >= 0 - dtype = interp_dtype.decode_w_dtype(space, dtype) + dtype = descriptor.decode_w_dtype(space, dtype) if dtype is None: if self.comparison_func: - dtype = interp_dtype.get_dtype_cache(space).w_booldtype + dtype = descriptor.get_dtype_cache(space).w_booldtype else: dtype = find_unaryop_result_dtype( space, obj.get_dtype(), @@ -308,14 +308,14 @@ # raise oefmt(space.w_TypeError, # "Cannot cast ufunc %s output from dtype('%s') to dtype('%s') with casting rule 'same_kind'", self.name, w_obj.get_dtype().name, res_dtype.name) elif self.bool_result: - res_dtype = interp_dtype.get_dtype_cache(space).w_booldtype + res_dtype = descriptor.get_dtype_cache(space).w_booldtype else: res_dtype = calc_dtype if self.complex_to_float and calc_dtype.is_complex(): if calc_dtype.num == NPY.CFLOAT: - res_dtype = interp_dtype.get_dtype_cache(space).w_float32dtype + res_dtype = descriptor.get_dtype_cache(space).w_float32dtype else: - res_dtype = interp_dtype.get_dtype_cache(space).w_float64dtype + res_dtype = descriptor.get_dtype_cache(space).w_float64dtype if w_obj.is_scalar(): w_val = self.func(calc_dtype, w_obj.get_scalar_value().convert_to(space, calc_dtype)) @@ -418,7 +418,7 @@ out = w_out calc_dtype = out.get_dtype() if self.comparison_func: - res_dtype = interp_dtype.get_dtype_cache(space).w_booldtype + res_dtype = descriptor.get_dtype_cache(space).w_booldtype else: res_dtype = calc_dtype if w_lhs.is_scalar() and w_rhs.is_scalar(): @@ -465,7 +465,7 @@ dt1, dt2 = dt2, dt1 # Some operations promote op(bool, bool) to return int8, rather than bool if promote_bools and (dt1.kind == dt2.kind == NPY.GENBOOLLTR): - return interp_dtype.get_dtype_cache(space).w_int8dtype + return descriptor.get_dtype_cache(space).w_int8dtype # Everything numeric promotes to complex if dt2.is_complex() or dt1.is_complex(): @@ -473,16 +473,16 @@ dt1, dt2 = dt2, dt1 if dt2.num == NPY.CFLOAT: if dt1.num == NPY.DOUBLE: - return interp_dtype.get_dtype_cache(space).w_complex128dtype + return descriptor.get_dtype_cache(space).w_complex128dtype elif dt1.num == NPY.LONGDOUBLE: - return interp_dtype.get_dtype_cache(space).w_complexlongdtype - return interp_dtype.get_dtype_cache(space).w_complex64dtype + return descriptor.get_dtype_cache(space).w_complexlongdtype + return descriptor.get_dtype_cache(space).w_complex64dtype elif dt2.num == NPY.CDOUBLE: if dt1.num == NPY.LONGDOUBLE: - return interp_dtype.get_dtype_cache(space).w_complexlongdtype - return interp_dtype.get_dtype_cache(space).w_complex128dtype + return descriptor.get_dtype_cache(space).w_complexlongdtype + return descriptor.get_dtype_cache(space).w_complex128dtype elif dt2.num == NPY.CLONGDOUBLE: - return interp_dtype.get_dtype_cache(space).w_complexlongdtype + return descriptor.get_dtype_cache(space).w_complexlongdtype else: raise OperationError(space.w_TypeError, space.wrap("Unsupported types")) @@ -497,11 +497,11 @@ # Everything promotes to float, and bool promotes to everything. if dt2.kind == NPY.FLOATINGLTR or dt1.kind == NPY.GENBOOLLTR: if dt2.num == NPY.HALF and dt1.itemtype.get_element_size() == 2: - return interp_dtype.get_dtype_cache(space).w_float32dtype + return descriptor.get_dtype_cache(space).w_float32dtype if dt2.num == NPY.HALF and dt1.itemtype.get_element_size() >= 4: - return interp_dtype.get_dtype_cache(space).w_float64dtype + return descriptor.get_dtype_cache(space).w_float64dtype if dt2.num == NPY.FLOAT and dt1.itemtype.get_element_size() >= 4: - return interp_dtype.get_dtype_cache(space).w_float64dtype + return descriptor.get_dtype_cache(space).w_float64dtype return dt2 # for now this means mixing signed and unsigned @@ -527,7 +527,7 @@ else: # increase to the next signed type dtypenum = dt2.num + 1 - newdtype = interp_dtype.get_dtype_cache(space).dtypes_by_num[dtypenum] + newdtype = descriptor.get_dtype_cache(space).dtypes_by_num[dtypenum] if (newdtype.itemtype.get_element_size() > dt2.itemtype.get_element_size() or newdtype.kind == NPY.FLOATINGLTR): @@ -536,7 +536,7 @@ # we only promoted to long on 32-bit or to longlong on 64-bit # this is really for dealing with the Long and Ulong dtypes dtypenum += 2 - return interp_dtype.get_dtype_cache(space).dtypes_by_num[dtypenum] + return descriptor.get_dtype_cache(space).dtypes_by_num[dtypenum] @jit.unroll_safe def find_unaryop_result_dtype(space, dt, promote_to_float=False, @@ -544,34 +544,34 @@ if promote_to_largest: if dt.kind == NPY.GENBOOLLTR or dt.kind == NPY.SIGNEDLTR: if dt.elsize * 8 < LONG_BIT: - return interp_dtype.get_dtype_cache(space).w_longdtype + return descriptor.get_dtype_cache(space).w_longdtype elif dt.kind == NPY.UNSIGNEDLTR: if dt.elsize * 8 < LONG_BIT: - return interp_dtype.get_dtype_cache(space).w_ulongdtype + return descriptor.get_dtype_cache(space).w_ulongdtype else: assert dt.kind == NPY.FLOATINGLTR or dt.kind == NPY.COMPLEXLTR return dt if promote_bools and (dt.kind == NPY.GENBOOLLTR): - return interp_dtype.get_dtype_cache(space).w_int8dtype + return descriptor.get_dtype_cache(space).w_int8dtype if promote_to_float: if dt.kind == NPY.FLOATINGLTR or dt.kind == NPY.COMPLEXLTR: return dt if dt.num >= NPY.INT: - return interp_dtype.get_dtype_cache(space).w_float64dtype - for bytes, dtype in interp_dtype.get_dtype_cache(space).float_dtypes_by_num_bytes: + return descriptor.get_dtype_cache(space).w_float64dtype + for bytes, dtype in descriptor.get_dtype_cache(space).float_dtypes_by_num_bytes: if (dtype.kind == NPY.FLOATINGLTR and dtype.itemtype.get_element_size() > dt.itemtype.get_element_size()): return dtype return dt def find_dtype_for_scalar(space, w_obj, current_guess=None): - bool_dtype = interp_dtype.get_dtype_cache(space).w_booldtype - long_dtype = interp_dtype.get_dtype_cache(space).w_longdtype - int64_dtype = interp_dtype.get_dtype_cache(space).w_int64dtype - uint64_dtype = interp_dtype.get_dtype_cache(space).w_uint64dtype - complex_dtype = interp_dtype.get_dtype_cache(space).w_complex128dtype - float_dtype = interp_dtype.get_dtype_cache(space).w_float64dtype - if isinstance(w_obj, interp_boxes.W_GenericBox): + bool_dtype = descriptor.get_dtype_cache(space).w_booldtype + long_dtype = descriptor.get_dtype_cache(space).w_longdtype + int64_dtype = descriptor.get_dtype_cache(space).w_int64dtype + uint64_dtype = descriptor.get_dtype_cache(space).w_uint64dtype + complex_dtype = descriptor.get_dtype_cache(space).w_complex128dtype + float_dtype = descriptor.get_dtype_cache(space).w_float64dtype + if isinstance(w_obj, boxes.W_GenericBox): dtype = w_obj.get_dtype(space) return find_binop_result_dtype(space, dtype, current_guess) @@ -594,11 +594,11 @@ return complex_dtype elif space.isinstance_w(w_obj, space.w_str): if current_guess is None: - return interp_dtype.variable_dtype(space, + return descriptor.variable_dtype(space, 'S%d' % space.len_w(w_obj)) elif current_guess.num == NPY.STRING: if current_guess.elsize < space.len_w(w_obj): - return interp_dtype.variable_dtype(space, + return descriptor.variable_dtype(space, 'S%d' % space.len_w(w_obj)) return current_guess raise oefmt(space.w_NotImplementedError, @@ -607,7 +607,7 @@ def ufunc_dtype_caller(space, ufunc_name, op_name, argcount, comparison_func, bool_result): - dtype_cache = interp_dtype.get_dtype_cache(space) + dtype_cache = descriptor.get_dtype_cache(space) def get_op(dtype): try: return getattr(dtype.itemtype, op_name) @@ -743,7 +743,7 @@ identity = extra_kwargs.get("identity") if identity is not None: identity = \ - interp_dtype.get_dtype_cache(space).w_longdtype.box(identity) + descriptor.get_dtype_cache(space).w_longdtype.box(identity) extra_kwargs["identity"] = identity func = ufunc_dtype_caller(space, ufunc_name, op_name, argcount, From noreply at buildbot.pypy.org Thu Feb 27 07:11:27 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 27 Feb 2014 07:11:27 +0100 (CET) Subject: [pypy-commit] pypy default: merge numpy-refactor Message-ID: <20140227061127.367EC1C03D5@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69496:05dd17c81105 Date: 2014-02-27 01:10 -0500 http://bitbucket.org/pypy/pypy/changeset/05dd17c81105/ Log: merge numpy-refactor diff --git a/pypy/module/cpyext/ndarrayobject.py b/pypy/module/cpyext/ndarrayobject.py --- a/pypy/module/cpyext/ndarrayobject.py +++ b/pypy/module/cpyext/ndarrayobject.py @@ -7,8 +7,9 @@ from rpython.rtyper.lltypesystem import rffi, lltype from pypy.module.cpyext.api import cpython_api, Py_ssize_t, CANNOT_FAIL from pypy.module.cpyext.api import PyObject -from pypy.module.micronumpy.interp_numarray import W_NDimArray, array -from pypy.module.micronumpy.interp_dtype import get_dtype_cache, W_Dtype +from pypy.module.micronumpy.ndarray import W_NDimArray +from pypy.module.micronumpy.ctors import array +from pypy.module.micronumpy.descriptor import get_dtype_cache, W_Dtype from pypy.module.micronumpy.concrete import ConcreteArray from rpython.rlib.rawstorage import RAW_STORAGE_PTR diff --git a/pypy/module/cpyext/test/test_ndarrayobject.py b/pypy/module/cpyext/test/test_ndarrayobject.py --- a/pypy/module/cpyext/test/test_ndarrayobject.py +++ b/pypy/module/cpyext/test/test_ndarrayobject.py @@ -2,8 +2,8 @@ from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase from rpython.rtyper.lltypesystem import rffi, lltype -from pypy.module.micronumpy.interp_numarray import W_NDimArray -from pypy.module.micronumpy.interp_dtype import get_dtype_cache +from pypy.module.micronumpy.ndarray import W_NDimArray +from pypy.module.micronumpy.descriptor import get_dtype_cache def scalar(space): dtype = get_dtype_cache(space).w_float64dtype diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -4,24 +4,24 @@ class MultiArrayModule(MixedModule): appleveldefs = {'arange': 'app_numpy.arange'} interpleveldefs = { - 'ndarray': 'interp_numarray.W_NDimArray', - 'dtype': 'interp_dtype.W_Dtype', + 'ndarray': 'ndarray.W_NDimArray', + 'dtype': 'descriptor.W_Dtype', - 'array': 'interp_numarray.array', - 'zeros': 'interp_numarray.zeros', - 'empty': 'interp_numarray.zeros', - 'empty_like': 'interp_numarray.empty_like', - '_reconstruct' : 'interp_numarray._reconstruct', - 'scalar' : 'interp_numarray.build_scalar', - 'dot': 'interp_arrayops.dot', - 'fromstring': 'interp_support.fromstring', - 'flatiter': 'interp_flatiter.W_FlatIterator', - 'concatenate': 'interp_arrayops.concatenate', - 'where': 'interp_arrayops.where', - 'count_nonzero': 'interp_arrayops.count_nonzero', + 'array': 'ctors.array', + 'zeros': 'ctors.zeros', + 'empty': 'ctors.zeros', + 'empty_like': 'ctors.empty_like', + '_reconstruct' : 'ndarray._reconstruct', + 'scalar' : 'ctors.build_scalar', + 'dot': 'arrayops.dot', + 'fromstring': 'ctors.fromstring', + 'flatiter': 'flatiter.W_FlatIterator', + 'concatenate': 'arrayops.concatenate', + 'where': 'arrayops.where', + 'count_nonzero': 'arrayops.count_nonzero', 'set_string_function': 'appbridge.set_string_function', - 'typeinfo': 'interp_dtype.get_dtype_cache(space).w_typeinfo', + 'typeinfo': 'descriptor.get_dtype_cache(space).w_typeinfo', } @@ -107,7 +107,7 @@ ('real', 'real'), ('imag', 'imag'), ]: - interpleveldefs[exposed] = "interp_ufuncs.get(space).%s" % impl + interpleveldefs[exposed] = "ufuncs.get(space).%s" % impl class Module(MixedModule): diff --git a/pypy/module/micronumpy/interp_arrayops.py b/pypy/module/micronumpy/arrayops.py rename from pypy/module/micronumpy/interp_arrayops.py rename to pypy/module/micronumpy/arrayops.py --- a/pypy/module/micronumpy/interp_arrayops.py +++ b/pypy/module/micronumpy/arrayops.py @@ -1,10 +1,10 @@ from pypy.module.micronumpy.base import convert_to_array, W_NDimArray -from pypy.module.micronumpy import loop, interp_dtype, interp_ufuncs +from pypy.module.micronumpy import loop, descriptor, ufuncs from pypy.module.micronumpy.strides import Chunk, Chunks, shape_agreement, \ shape_agreement_multiple from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import unwrap_spec -from pypy.module.micronumpy.conversion_utils import clipmode_converter +from pypy.module.micronumpy.converters import clipmode_converter from pypy.module.micronumpy import support from pypy.module.micronumpy import constants as NPY @@ -84,7 +84,7 @@ if arr.get_dtype().itemtype.bool(arr.get_scalar_value()): return x return y - dtype = interp_ufuncs.find_binop_result_dtype(space, x.get_dtype(), + dtype = ufuncs.find_binop_result_dtype(space, x.get_dtype(), y.get_dtype()) shape = shape_agreement(space, arr.get_shape(), x) shape = shape_agreement(space, shape, y) @@ -147,7 +147,7 @@ elif dtype.is_record() or a_dt.is_record(): raise OperationError(space.w_TypeError, space.wrap("invalid type promotion")) - dtype = interp_ufuncs.find_binop_result_dtype(space, dtype, + dtype = ufuncs.find_binop_result_dtype(space, dtype, arr.get_dtype()) # concatenate does not handle ndarray subtypes, it always returns a ndarray res = W_NDimArray.from_shape(space, shape, dtype, 'C') @@ -202,7 +202,7 @@ raise OperationError(space.w_TypeError, space.wrap( "return arrays must be of ArrayType")) shape = shape_agreement_multiple(space, choices + [w_out]) - out = interp_dtype.dtype_agreement(space, choices, shape, w_out) + out = descriptor.dtype_agreement(space, choices, shape, w_out) dtype = out.get_dtype() mode = clipmode_converter(space, w_mode) loop.choose(space, arr, choices, shape, dtype, out, mode) diff --git a/pypy/module/micronumpy/base.py b/pypy/module/micronumpy/base.py --- a/pypy/module/micronumpy/base.py +++ b/pypy/module/micronumpy/base.py @@ -86,7 +86,7 @@ def convert_to_array(space, w_obj): - from pypy.module.micronumpy.interp_numarray import array + from pypy.module.micronumpy.ctors import array if isinstance(w_obj, W_NDimArray): return w_obj return array(space, w_obj) diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/boxes.py rename from pypy/module/micronumpy/interp_boxes.py rename to pypy/module/micronumpy/boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/boxes.py @@ -12,7 +12,7 @@ from rpython.tool.sourcetools import func_with_new_name from pypy.module.micronumpy.concrete import VoidBoxStorage from pypy.module.micronumpy.base import W_NDimArray -from pypy.module.micronumpy.interp_flagsobj import W_FlagsObject +from pypy.module.micronumpy.flagsobj import W_FlagsObject from pypy.interpreter.mixedmodule import MixedModule from rpython.rtyper.lltypesystem import lltype from rpython.rlib.rstring import StringBuilder @@ -36,11 +36,11 @@ def new_dtype_getter(num): @specialize.memo() def _get_dtype(space): - from pypy.module.micronumpy.interp_dtype import get_dtype_cache + from pypy.module.micronumpy.descriptor import get_dtype_cache return get_dtype_cache(space).dtypes_by_num[num] def descr__new__(space, w_subtype, w_value=None): - from pypy.module.micronumpy.interp_numarray import array + from pypy.module.micronumpy.ctors import array dtype = _get_dtype(space) if not space.is_none(w_value): w_arr = array(space, w_value, dtype, copy=False) @@ -188,22 +188,22 @@ def _binop_impl(ufunc_name): def impl(self, space, w_other, w_out=None): - from pypy.module.micronumpy import interp_ufuncs - return getattr(interp_ufuncs.get(space), ufunc_name).call(space, + from pypy.module.micronumpy import ufuncs + return getattr(ufuncs.get(space), ufunc_name).call(space, [self, w_other, w_out]) return func_with_new_name(impl, "binop_%s_impl" % ufunc_name) def _binop_right_impl(ufunc_name): def impl(self, space, w_other, w_out=None): - from pypy.module.micronumpy import interp_ufuncs - return getattr(interp_ufuncs.get(space), ufunc_name).call(space, + from pypy.module.micronumpy import ufuncs + return getattr(ufuncs.get(space), ufunc_name).call(space, [w_other, self, w_out]) return func_with_new_name(impl, "binop_right_%s_impl" % ufunc_name) def _unaryop_impl(ufunc_name): def impl(self, space, w_out=None): - from pypy.module.micronumpy import interp_ufuncs - return getattr(interp_ufuncs.get(space), ufunc_name).call(space, + from pypy.module.micronumpy import ufuncs + return getattr(ufuncs.get(space), ufunc_name).call(space, [self, w_out]) return func_with_new_name(impl, "unaryop_%s_impl" % ufunc_name) @@ -259,17 +259,17 @@ return space.newtuple([w_quotient, w_remainder]) def descr_any(self, space): - from pypy.module.micronumpy.interp_dtype import get_dtype_cache + from pypy.module.micronumpy.descriptor import get_dtype_cache value = space.is_true(self) return get_dtype_cache(space).w_booldtype.box(value) def descr_all(self, space): - from pypy.module.micronumpy.interp_dtype import get_dtype_cache + from pypy.module.micronumpy.descriptor import get_dtype_cache value = space.is_true(self) return get_dtype_cache(space).w_booldtype.box(value) def descr_zero(self, space): - from pypy.module.micronumpy.interp_dtype import get_dtype_cache + from pypy.module.micronumpy.descriptor import get_dtype_cache return get_dtype_cache(space).w_longdtype.box(0) def descr_ravel(self, space): @@ -285,13 +285,13 @@ return self.get_dtype(space).itemtype.round(self, decimals) def descr_astype(self, space, w_dtype): - from pypy.module.micronumpy.interp_dtype import W_Dtype + from pypy.module.micronumpy.descriptor import W_Dtype dtype = space.interp_w(W_Dtype, space.call_function(space.gettypefor(W_Dtype), w_dtype)) return self.convert_to(space, dtype) def descr_view(self, space, w_dtype): - from pypy.module.micronumpy.interp_dtype import W_Dtype + from pypy.module.micronumpy.descriptor import W_Dtype try: subclass = space.is_true(space.issubtype( w_dtype, space.gettypefor(W_NDimArray))) @@ -520,7 +520,7 @@ class W_StringBox(W_CharacterBox): def descr__new__string_box(space, w_subtype, w_arg): - from pypy.module.micronumpy.interp_dtype import new_string_dtype + from pypy.module.micronumpy.descriptor import new_string_dtype arg = space.str_w(space.str(w_arg)) arr = VoidBoxStorage(len(arg), new_string_dtype(space, len(arg))) for i in range(len(arg)): @@ -531,7 +531,7 @@ def descr__new__unicode_box(space, w_subtype, w_arg): raise OperationError(space.w_NotImplementedError, space.wrap("Unicode is not supported yet")) - from pypy.module.micronumpy.interp_dtype import new_unicode_dtype + from pypy.module.micronumpy.descriptor import new_unicode_dtype arg = space.unicode_w(space.unicode_from_object(w_arg)) # XXX size computations, we need tests anyway diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -7,12 +7,12 @@ from pypy.interpreter import special from pypy.interpreter.baseobjspace import InternalSpaceCache, W_Root from pypy.interpreter.error import OperationError -from pypy.module.micronumpy import interp_boxes -from pypy.module.micronumpy.interp_dtype import get_dtype_cache +from pypy.module.micronumpy import boxes +from pypy.module.micronumpy.descriptor import get_dtype_cache from pypy.module.micronumpy.base import W_NDimArray -from pypy.module.micronumpy.interp_numarray import array -from pypy.module.micronumpy.interp_arrayops import where -from pypy.module.micronumpy import interp_ufuncs +from pypy.module.micronumpy.ctors import array +from pypy.module.micronumpy.arrayops import where +from pypy.module.micronumpy import ufuncs from rpython.rlib.objectmodel import specialize, instantiate from rpython.rlib.nonconst import NonConstant @@ -151,7 +151,7 @@ def float(self, w_obj): if isinstance(w_obj, FloatObject): return w_obj - assert isinstance(w_obj, interp_boxes.W_GenericBox) + assert isinstance(w_obj, boxes.W_GenericBox) return self.float(w_obj.descr_float(self)) def float_w(self, w_obj): @@ -183,13 +183,13 @@ def int(self, w_obj): if isinstance(w_obj, IntObject): return w_obj - assert isinstance(w_obj, interp_boxes.W_GenericBox) + assert isinstance(w_obj, boxes.W_GenericBox) return self.int(w_obj.descr_int(self)) def str(self, w_obj): if isinstance(w_obj, StringObject): return w_obj - assert isinstance(w_obj, interp_boxes.W_GenericBox) + assert isinstance(w_obj, boxes.W_GenericBox) return self.str(w_obj.descr_str(self)) def is_true(self, w_obj): @@ -399,7 +399,7 @@ else: raise NotImplementedError if (not isinstance(w_res, W_NDimArray) and - not isinstance(w_res, interp_boxes.W_GenericBox)): + not isinstance(w_res, boxes.W_GenericBox)): dtype = get_dtype_cache(interp.space).w_float64dtype w_res = W_NDimArray.new_scalar(interp.space, dtype, w_res) return w_res @@ -554,10 +554,10 @@ elif self.name == "all": w_res = arr.descr_all(interp.space) elif self.name == "unegative": - neg = interp_ufuncs.get(interp.space).negative + neg = ufuncs.get(interp.space).negative w_res = neg.call(interp.space, [arr]) elif self.name == "cos": - cos = interp_ufuncs.get(interp.space).cos + cos = ufuncs.get(interp.space).cos w_res = cos.call(interp.space, [arr]) elif self.name == "flat": w_res = arr.descr_get_flatiter(interp.space) @@ -611,7 +611,7 @@ dtype = get_dtype_cache(interp.space).w_int64dtype elif isinstance(w_res, BoolObject): dtype = get_dtype_cache(interp.space).w_booldtype - elif isinstance(w_res, interp_boxes.W_GenericBox): + elif isinstance(w_res, boxes.W_GenericBox): dtype = w_res.get_dtype(interp.space) else: dtype = None diff --git a/pypy/module/micronumpy/conversion_utils.py b/pypy/module/micronumpy/converters.py rename from pypy/module/micronumpy/conversion_utils.py rename to pypy/module/micronumpy/converters.py --- a/pypy/module/micronumpy/conversion_utils.py +++ b/pypy/module/micronumpy/converters.py @@ -84,3 +84,15 @@ "duplicate value in 'axis'")) out[axis] = True return out + + +def shape_converter(space, w_size, dtype): + if space.is_none(w_size): + return [] + if space.isinstance_w(w_size, space.w_int): + return [space.int_w(w_size)] + shape = [] + for w_item in space.fixedview(w_size): + shape.append(space.int_w(w_item)) + shape += dtype.shape + return shape[:] diff --git a/pypy/module/micronumpy/interp_support.py b/pypy/module/micronumpy/ctors.py rename from pypy/module/micronumpy/interp_support.py rename to pypy/module/micronumpy/ctors.py --- a/pypy/module/micronumpy/interp_support.py +++ b/pypy/module/micronumpy/ctors.py @@ -1,12 +1,119 @@ from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import unwrap_spec, WrappedDefault from rpython.rtyper.lltypesystem import lltype, rffi -from pypy.module.micronumpy import interp_dtype, loop +from pypy.module.micronumpy import descriptor, loop from rpython.rlib.rstring import strip_spaces -from rpython.rlib.rarithmetic import maxint -from pypy.module.micronumpy.base import W_NDimArray +from pypy.module.micronumpy import ufuncs +from pypy.module.micronumpy.base import W_NDimArray, convert_to_array +from pypy.module.micronumpy.converters import shape_converter +from pypy.module.micronumpy.strides import find_shape_and_elems -FLOAT_SIZE = rffi.sizeof(lltype.Float) + +def build_scalar(space, w_dtype, w_state): + from rpython.rtyper.lltypesystem import rffi, lltype + if not isinstance(w_dtype, descriptor.W_Dtype): + raise oefmt(space.w_TypeError, + "argument 1 must be numpy.dtype, not %T", w_dtype) + if w_dtype.elsize == 0: + raise oefmt(space.w_ValueError, "itemsize cannot be zero") + if not space.isinstance_w(w_state, space.w_str): + raise oefmt(space.w_TypeError, "initializing object must be a string") + if space.len_w(w_state) != w_dtype.elsize: + raise oefmt(space.w_ValueError, "initialization string is too small") + state = rffi.str2charp(space.str_w(w_state)) + box = w_dtype.itemtype.box_raw_data(state) + lltype.free(state, flavor="raw") + return box + + + at unwrap_spec(ndmin=int, copy=bool, subok=bool) +def array(space, w_object, w_dtype=None, copy=True, w_order=None, subok=False, + ndmin=0): + # for anything that isn't already an array, try __array__ method first + if not isinstance(w_object, W_NDimArray): + w___array__ = space.lookup(w_object, "__array__") + if w___array__ is not None: + if space.is_none(w_dtype): + w_dtype = space.w_None + w_array = space.get_and_call_function(w___array__, w_object, w_dtype) + if isinstance(w_array, W_NDimArray): + # feed w_array back into array() for other properties + return array(space, w_array, w_dtype, False, w_order, subok, ndmin) + else: + raise oefmt(space.w_ValueError, + "object __array__ method not producing an array") + + dtype = descriptor.decode_w_dtype(space, w_dtype) + + if space.is_none(w_order): + order = 'C' + else: + order = space.str_w(w_order) + if order != 'C': # or order != 'F': + raise oefmt(space.w_ValueError, "Unknown order: %s", order) + + # arrays with correct dtype + if isinstance(w_object, W_NDimArray) and \ + (space.is_none(w_dtype) or w_object.get_dtype() is dtype): + shape = w_object.get_shape() + if copy: + w_ret = w_object.descr_copy(space) + else: + if ndmin <= len(shape): + return w_object + new_impl = w_object.implementation.set_shape(space, w_object, shape) + w_ret = W_NDimArray(new_impl) + if ndmin > len(shape): + shape = [1] * (ndmin - len(shape)) + shape + w_ret.implementation = w_ret.implementation.set_shape(space, + w_ret, shape) + return w_ret + + # not an array or incorrect dtype + shape, elems_w = find_shape_and_elems(space, w_object, dtype) + if dtype is None or (dtype.is_str_or_unicode() and dtype.elsize < 1): + for w_elem in elems_w: + if isinstance(w_elem, W_NDimArray) and w_elem.is_scalar(): + w_elem = w_elem.get_scalar_value() + dtype = ufuncs.find_dtype_for_scalar(space, w_elem, dtype) + if dtype is None: + dtype = descriptor.get_dtype_cache(space).w_float64dtype + elif dtype.is_str_or_unicode() and dtype.elsize < 1: + # promote S0 -> S1, U0 -> U1 + dtype = descriptor.variable_dtype(space, dtype.char + '1') + + if ndmin > len(shape): + shape = [1] * (ndmin - len(shape)) + shape + w_arr = W_NDimArray.from_shape(space, shape, dtype, order=order) + arr_iter = w_arr.create_iter() + for w_elem in elems_w: + arr_iter.setitem(dtype.coerce(space, w_elem)) + arr_iter.next() + return w_arr + + +def zeros(space, w_shape, w_dtype=None, w_order=None): + dtype = space.interp_w(descriptor.W_Dtype, + space.call_function(space.gettypefor(descriptor.W_Dtype), w_dtype)) + if dtype.is_str_or_unicode() and dtype.elsize < 1: + dtype = descriptor.variable_dtype(space, dtype.char + '1') + shape = shape_converter(space, w_shape, dtype) + return W_NDimArray.from_shape(space, shape, dtype=dtype) + + + at unwrap_spec(subok=bool) +def empty_like(space, w_a, w_dtype=None, w_order=None, subok=True): + w_a = convert_to_array(space, w_a) + if w_dtype is None: + dtype = w_a.get_dtype() + else: + dtype = space.interp_w(descriptor.W_Dtype, + space.call_function(space.gettypefor(descriptor.W_Dtype), w_dtype)) + if dtype.is_str_or_unicode() and dtype.elsize < 1: + dtype = descriptor.variable_dtype(space, dtype.char + '1') + return W_NDimArray.from_shape(space, w_a.get_shape(), dtype=dtype, + w_instance=w_a if subok else None) + def _fromstring_text(space, s, count, sep, length, dtype): sep_stripped = strip_spaces(sep) @@ -58,6 +165,7 @@ return space.wrap(a) + def _fromstring_bin(space, s, count, length, dtype): itemsize = dtype.elsize assert itemsize >= 0 @@ -75,26 +183,13 @@ loop.fromstring_loop(space, a, dtype, itemsize, s) return space.wrap(a) + @unwrap_spec(s=str, count=int, sep=str, w_dtype=WrappedDefault(None)) def fromstring(space, s, w_dtype=None, count=-1, sep=''): - dtype = space.interp_w(interp_dtype.W_Dtype, - space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype) - ) + dtype = space.interp_w(descriptor.W_Dtype, + space.call_function(space.gettypefor(descriptor.W_Dtype), w_dtype)) length = len(s) if sep == '': return _fromstring_bin(space, s, count, length, dtype) else: return _fromstring_text(space, s, count, sep, length, dtype) - -def unwrap_axis_arg(space, shapelen, w_axis): - if space.is_none(w_axis): - axis = maxint - else: - axis = space.int_w(w_axis) - if axis < -shapelen or axis >= shapelen: - raise oefmt(space.w_ValueError, - "axis entry %d is out of bounds [%d, %d)", - axis, -shapelen, shapelen) - if axis < 0: - axis += shapelen - return axis diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/descriptor.py rename from pypy/module/micronumpy/interp_dtype.py rename to pypy/module/micronumpy/descriptor.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/descriptor.py @@ -4,12 +4,12 @@ from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import (TypeDef, GetSetProperty, interp_attrproperty, interp_attrproperty_w) -from pypy.module.micronumpy import types, interp_boxes, base +from pypy.module.micronumpy import types, boxes, base from rpython.rlib.objectmodel import specialize from rpython.rlib.rarithmetic import r_longlong, r_ulonglong from rpython.rlib import jit from pypy.module.micronumpy.appbridge import get_appbridge_cache -from pypy.module.micronumpy.conversion_utils import byteorder_converter +from pypy.module.micronumpy.converters import byteorder_converter from pypy.module.micronumpy import support from pypy.module.micronumpy import constants as NPY @@ -26,7 +26,7 @@ """ agree on dtype from a list of arrays. if out is allocated, use it's dtype, otherwise allocate a new one with agreed dtype """ - from pypy.module.micronumpy.interp_ufuncs import find_binop_result_dtype + from pypy.module.micronumpy.ufuncs import find_binop_result_dtype if not space.is_none(out): return out @@ -448,7 +448,7 @@ offset += subdtype.elsize names.append(fldname) return W_Dtype(types.RecordType(), NPY.VOID, NPY.VOIDLTR, NPY.VOIDLTR, - space.gettypefor(interp_boxes.W_VoidBox), + space.gettypefor(boxes.W_VoidBox), names=names, fields=fields, elsize=offset) @@ -490,7 +490,7 @@ return subdtype size *= subdtype.elsize return W_Dtype(types.VoidType(), NPY.VOID, NPY.VOIDLTR, NPY.VOIDLTR, - space.gettypefor(interp_boxes.W_VoidBox), + space.gettypefor(boxes.W_VoidBox), shape=shape, subdtype=subdtype, elsize=size) if space.is_none(w_dtype): @@ -606,7 +606,7 @@ num=NPY.STRING, kind=NPY.STRINGLTR, char=char, - w_box_type=space.gettypefor(interp_boxes.W_StringBox), + w_box_type=space.gettypefor(boxes.W_StringBox), ) @@ -618,7 +618,7 @@ num=NPY.UNICODE, kind=NPY.UNICODELTR, char=NPY.UNICODELTR, - w_box_type=space.gettypefor(interp_boxes.W_UnicodeBox), + w_box_type=space.gettypefor(boxes.W_UnicodeBox), ) @@ -629,7 +629,7 @@ num=NPY.VOID, kind=NPY.VOIDLTR, char=NPY.VOIDLTR, - w_box_type=space.gettypefor(interp_boxes.W_VoidBox), + w_box_type=space.gettypefor(boxes.W_VoidBox), ) @@ -640,119 +640,119 @@ num=NPY.BOOL, kind=NPY.GENBOOLLTR, char=NPY.BOOLLTR, - w_box_type=space.gettypefor(interp_boxes.W_BoolBox), + w_box_type=space.gettypefor(boxes.W_BoolBox), ) self.w_int8dtype = W_Dtype( types.Int8(), num=NPY.BYTE, kind=NPY.SIGNEDLTR, char=NPY.BYTELTR, - w_box_type=space.gettypefor(interp_boxes.W_Int8Box), + w_box_type=space.gettypefor(boxes.W_Int8Box), ) self.w_uint8dtype = W_Dtype( types.UInt8(), num=NPY.UBYTE, kind=NPY.UNSIGNEDLTR, char=NPY.UBYTELTR, - w_box_type=space.gettypefor(interp_boxes.W_UInt8Box), + w_box_type=space.gettypefor(boxes.W_UInt8Box), ) self.w_int16dtype = W_Dtype( types.Int16(), num=NPY.SHORT, kind=NPY.SIGNEDLTR, char=NPY.SHORTLTR, - w_box_type=space.gettypefor(interp_boxes.W_Int16Box), + w_box_type=space.gettypefor(boxes.W_Int16Box), ) self.w_uint16dtype = W_Dtype( types.UInt16(), num=NPY.USHORT, kind=NPY.UNSIGNEDLTR, char=NPY.USHORTLTR, - w_box_type=space.gettypefor(interp_boxes.W_UInt16Box), + w_box_type=space.gettypefor(boxes.W_UInt16Box), ) self.w_int32dtype = W_Dtype( types.Int32(), num=NPY.INT, kind=NPY.SIGNEDLTR, char=NPY.INTLTR, - w_box_type=space.gettypefor(interp_boxes.W_Int32Box), + w_box_type=space.gettypefor(boxes.W_Int32Box), ) self.w_uint32dtype = W_Dtype( types.UInt32(), num=NPY.UINT, kind=NPY.UNSIGNEDLTR, char=NPY.UINTLTR, - w_box_type=space.gettypefor(interp_boxes.W_UInt32Box), + w_box_type=space.gettypefor(boxes.W_UInt32Box), ) self.w_longdtype = W_Dtype( types.Long(), num=NPY.LONG, kind=NPY.SIGNEDLTR, char=NPY.LONGLTR, - w_box_type=space.gettypefor(interp_boxes.W_LongBox), + w_box_type=space.gettypefor(boxes.W_LongBox), ) self.w_ulongdtype = W_Dtype( types.ULong(), num=NPY.ULONG, kind=NPY.UNSIGNEDLTR, char=NPY.ULONGLTR, - w_box_type=space.gettypefor(interp_boxes.W_ULongBox), + w_box_type=space.gettypefor(boxes.W_ULongBox), ) self.w_int64dtype = W_Dtype( types.Int64(), num=NPY.LONGLONG, kind=NPY.SIGNEDLTR, char=NPY.LONGLONGLTR, - w_box_type=space.gettypefor(interp_boxes.W_Int64Box), + w_box_type=space.gettypefor(boxes.W_Int64Box), ) self.w_uint64dtype = W_Dtype( types.UInt64(), num=NPY.ULONGLONG, kind=NPY.UNSIGNEDLTR, char=NPY.ULONGLONGLTR, - w_box_type=space.gettypefor(interp_boxes.W_UInt64Box), + w_box_type=space.gettypefor(boxes.W_UInt64Box), ) self.w_float32dtype = W_Dtype( types.Float32(), num=NPY.FLOAT, kind=NPY.FLOATINGLTR, char=NPY.FLOATLTR, - w_box_type=space.gettypefor(interp_boxes.W_Float32Box), + w_box_type=space.gettypefor(boxes.W_Float32Box), ) self.w_float64dtype = W_Dtype( types.Float64(), num=NPY.DOUBLE, kind=NPY.FLOATINGLTR, char=NPY.DOUBLELTR, - w_box_type=space.gettypefor(interp_boxes.W_Float64Box), + w_box_type=space.gettypefor(boxes.W_Float64Box), ) self.w_floatlongdtype = W_Dtype( types.FloatLong(), num=NPY.LONGDOUBLE, kind=NPY.FLOATINGLTR, char=NPY.LONGDOUBLELTR, - w_box_type=space.gettypefor(interp_boxes.W_FloatLongBox), + w_box_type=space.gettypefor(boxes.W_FloatLongBox), ) self.w_complex64dtype = W_Dtype( types.Complex64(), num=NPY.CFLOAT, kind=NPY.COMPLEXLTR, char=NPY.CFLOATLTR, - w_box_type=space.gettypefor(interp_boxes.W_Complex64Box), + w_box_type=space.gettypefor(boxes.W_Complex64Box), ) self.w_complex128dtype = W_Dtype( types.Complex128(), num=NPY.CDOUBLE, kind=NPY.COMPLEXLTR, char=NPY.CDOUBLELTR, - w_box_type=space.gettypefor(interp_boxes.W_Complex128Box), + w_box_type=space.gettypefor(boxes.W_Complex128Box), ) self.w_complexlongdtype = W_Dtype( types.ComplexLong(), num=NPY.CLONGDOUBLE, kind=NPY.COMPLEXLTR, char=NPY.CLONGDOUBLELTR, - w_box_type=space.gettypefor(interp_boxes.W_ComplexLongBox), + w_box_type=space.gettypefor(boxes.W_ComplexLongBox), ) self.w_stringdtype = W_Dtype( types.StringType(), @@ -760,7 +760,7 @@ num=NPY.STRING, kind=NPY.STRINGLTR, char=NPY.STRINGLTR, - w_box_type=space.gettypefor(interp_boxes.W_StringBox), + w_box_type=space.gettypefor(boxes.W_StringBox), ) self.w_unicodedtype = W_Dtype( types.UnicodeType(), @@ -768,7 +768,7 @@ num=NPY.UNICODE, kind=NPY.UNICODELTR, char=NPY.UNICODELTR, - w_box_type=space.gettypefor(interp_boxes.W_UnicodeBox), + w_box_type=space.gettypefor(boxes.W_UnicodeBox), ) self.w_voiddtype = W_Dtype( types.VoidType(), @@ -776,28 +776,28 @@ num=NPY.VOID, kind=NPY.VOIDLTR, char=NPY.VOIDLTR, - w_box_type=space.gettypefor(interp_boxes.W_VoidBox), + w_box_type=space.gettypefor(boxes.W_VoidBox), ) self.w_float16dtype = W_Dtype( types.Float16(), num=NPY.HALF, kind=NPY.FLOATINGLTR, char=NPY.HALFLTR, - w_box_type=space.gettypefor(interp_boxes.W_Float16Box), + w_box_type=space.gettypefor(boxes.W_Float16Box), ) self.w_intpdtype = W_Dtype( types.Long(), num=NPY.LONG, kind=NPY.SIGNEDLTR, char=NPY.INTPLTR, - w_box_type=space.gettypefor(interp_boxes.W_LongBox), + w_box_type=space.gettypefor(boxes.W_LongBox), ) self.w_uintpdtype = W_Dtype( types.ULong(), num=NPY.ULONG, kind=NPY.UNSIGNEDLTR, char=NPY.UINTPLTR, - w_box_type=space.gettypefor(interp_boxes.W_ULongBox), + w_box_type=space.gettypefor(boxes.W_ULongBox), ) aliases = { NPY.BOOL: ['bool', 'bool8'], @@ -821,19 +821,19 @@ self.alternate_constructors = { NPY.BOOL: [space.w_bool], NPY.LONG: [space.w_int, - space.gettypefor(interp_boxes.W_IntegerBox), - space.gettypefor(interp_boxes.W_SignedIntegerBox)], - NPY.ULONG: [space.gettypefor(interp_boxes.W_UnsignedIntegerBox)], + space.gettypefor(boxes.W_IntegerBox), + space.gettypefor(boxes.W_SignedIntegerBox)], + NPY.ULONG: [space.gettypefor(boxes.W_UnsignedIntegerBox)], NPY.LONGLONG: [space.w_long], NPY.DOUBLE: [space.w_float, - space.gettypefor(interp_boxes.W_NumberBox), - space.gettypefor(interp_boxes.W_FloatingBox)], + space.gettypefor(boxes.W_NumberBox), + space.gettypefor(boxes.W_FloatingBox)], NPY.CDOUBLE: [space.w_complex, - space.gettypefor(interp_boxes.W_ComplexFloatingBox)], + space.gettypefor(boxes.W_ComplexFloatingBox)], NPY.STRING: [space.w_str, - space.gettypefor(interp_boxes.W_CharacterBox)], + space.gettypefor(boxes.W_CharacterBox)], NPY.UNICODE: [space.w_unicode], - NPY.VOID: [space.gettypefor(interp_boxes.W_GenericBox)], + NPY.VOID: [space.gettypefor(boxes.W_GenericBox)], #space.w_buffer, # XXX no buffer in space } float_dtypes = [self.w_float16dtype, self.w_float32dtype, @@ -909,16 +909,16 @@ } typeinfo_partial = { - 'Generic': interp_boxes.W_GenericBox, - 'Character': interp_boxes.W_CharacterBox, - 'Flexible': interp_boxes.W_FlexibleBox, - 'Inexact': interp_boxes.W_InexactBox, - 'Integer': interp_boxes.W_IntegerBox, - 'SignedInteger': interp_boxes.W_SignedIntegerBox, - 'UnsignedInteger': interp_boxes.W_UnsignedIntegerBox, - 'ComplexFloating': interp_boxes.W_ComplexFloatingBox, - 'Number': interp_boxes.W_NumberBox, - 'Floating': interp_boxes.W_FloatingBox + 'Generic': boxes.W_GenericBox, + 'Character': boxes.W_CharacterBox, + 'Flexible': boxes.W_FlexibleBox, + 'Inexact': boxes.W_InexactBox, + 'Integer': boxes.W_IntegerBox, + 'SignedInteger': boxes.W_SignedIntegerBox, + 'UnsignedInteger': boxes.W_UnsignedIntegerBox, + 'ComplexFloating': boxes.W_ComplexFloatingBox, + 'Number': boxes.W_NumberBox, + 'Floating': boxes.W_FloatingBox } w_typeinfo = space.newdict() for k, v in typeinfo_partial.iteritems(): diff --git a/pypy/module/micronumpy/interp_flagsobj.py b/pypy/module/micronumpy/flagsobj.py rename from pypy/module/micronumpy/interp_flagsobj.py rename to pypy/module/micronumpy/flagsobj.py diff --git a/pypy/module/micronumpy/interp_flatiter.py b/pypy/module/micronumpy/flatiter.py rename from pypy/module/micronumpy/interp_flatiter.py rename to pypy/module/micronumpy/flatiter.py --- a/pypy/module/micronumpy/interp_flatiter.py +++ b/pypy/module/micronumpy/flatiter.py @@ -1,7 +1,7 @@ from pypy.module.micronumpy.base import W_NDimArray, convert_to_array from pypy.module.micronumpy import loop from pypy.module.micronumpy.concrete import BaseConcreteArray -from pypy.interpreter.error import OperationError +from pypy.interpreter.error import OperationError, oefmt class FakeArrayImplementation(BaseConcreteArray): @@ -23,6 +23,7 @@ assert isinstance(self.base(), W_NDimArray) return self.base().create_iter() + class W_FlatIterator(W_NDimArray): def __init__(self, arr): self.base = arr @@ -54,9 +55,8 @@ def descr_getitem(self, space, w_idx): if not (space.isinstance_w(w_idx, space.w_int) or - space.isinstance_w(w_idx, space.w_slice)): - raise OperationError(space.w_IndexError, - space.wrap('unsupported iterator index')) + space.isinstance_w(w_idx, space.w_slice)): + raise oefmt(space.w_IndexError, 'unsupported iterator index') self.reset() base = self.base start, stop, step, length = space.decode_index4(w_idx, base.get_size()) @@ -70,9 +70,8 @@ def descr_setitem(self, space, w_idx, w_value): if not (space.isinstance_w(w_idx, space.w_int) or - space.isinstance_w(w_idx, space.w_slice)): - raise OperationError(space.w_IndexError, - space.wrap('unsupported iterator index')) + space.isinstance_w(w_idx, space.w_slice)): + raise oefmt(space.w_IndexError, 'unsupported iterator index') base = self.base start, stop, step, length = space.decode_index4(w_idx, base.get_size()) arr = convert_to_array(space, w_value) @@ -84,4 +83,4 @@ def descr_base(self, space): return space.wrap(self.base) -# typedef is in interp_numarray, so we see the additional arguments +# typedef is in interp_ndarray, so we see the additional arguments diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/ndarray.py rename from pypy/module/micronumpy/interp_numarray.py rename to pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -6,34 +6,23 @@ WrappedDefault from pypy.module.micronumpy.base import W_NDimArray, convert_to_array,\ ArrayArgumentException, wrap_impl -from pypy.module.micronumpy import interp_dtype, interp_ufuncs, interp_boxes,\ - interp_arrayops -from pypy.module.micronumpy.strides import find_shape_and_elems,\ - get_shape_from_iterable, to_coords, shape_agreement, \ - shape_agreement_multiple -from pypy.module.micronumpy.interp_flagsobj import W_FlagsObject -from pypy.module.micronumpy.interp_flatiter import W_FlatIterator +from pypy.module.micronumpy import descriptor, ufuncs, boxes, arrayops +from pypy.module.micronumpy.strides import get_shape_from_iterable, to_coords, \ + shape_agreement, shape_agreement_multiple +from pypy.module.micronumpy.flagsobj import W_FlagsObject +from pypy.module.micronumpy.flatiter import W_FlatIterator from pypy.module.micronumpy.appbridge import get_appbridge_cache from pypy.module.micronumpy import loop -from pypy.module.micronumpy.interp_arrayops import repeat, choose, put +from pypy.module.micronumpy.arrayops import repeat, choose, put from rpython.tool.sourcetools import func_with_new_name from rpython.rlib import jit from rpython.rlib.rstring import StringBuilder from pypy.module.micronumpy.concrete import BaseConcreteArray -from pypy.module.micronumpy.conversion_utils import order_converter, multi_axis_converter +from pypy.module.micronumpy.converters import order_converter, shape_converter, \ + multi_axis_converter from pypy.module.micronumpy import support from pypy.module.micronumpy import constants as NPY -def _find_shape(space, w_size, dtype): - if space.is_none(w_size): - return [] - if space.isinstance_w(w_size, space.w_int): - return [space.int_w(w_size)] - shape = [] - for w_item in space.fixedview(w_size): - shape.append(space.int_w(w_item)) - shape += dtype.shape - return shape[:] def _match_dot_shapes(space, left, right): left_shape = left.get_shape() @@ -84,8 +73,8 @@ return self.implementation.dtype def descr_set_dtype(self, space, w_dtype): - dtype = space.interp_w(interp_dtype.W_Dtype, - space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype)) + dtype = space.interp_w(descriptor.W_Dtype, + space.call_function(space.gettypefor(descriptor.W_Dtype), w_dtype)) if (dtype.elsize != self.get_dtype().elsize or dtype.is_flexible() or self.get_dtype().is_flexible()): raise OperationError(space.w_ValueError, space.wrap( @@ -424,7 +413,7 @@ return self.implementation.swapaxes(space, self, axis1, axis2) def descr_nonzero(self, space): - index_type = interp_dtype.get_dtype_cache(space).w_int64dtype + index_type = descriptor.get_dtype_cache(space).w_int64dtype return self.implementation.nonzero(space, index_type) def descr_tolist(self, space): @@ -492,7 +481,7 @@ if space.is_none(w_arg): if self.get_size() == 1: w_obj = self.get_scalar_value() - assert isinstance(w_obj, interp_boxes.W_GenericBox) + assert isinstance(w_obj, boxes.W_GenericBox) return w_obj.item(space) raise oefmt(space.w_ValueError, "can only convert an array of size 1 to a Python scalar") @@ -501,7 +490,7 @@ raise oefmt(space.w_IndexError, "index out of bounds") i = self.to_coords(space, w_arg) item = self.getitem(space, i) - assert isinstance(item, interp_boxes.W_GenericBox) + assert isinstance(item, boxes.W_GenericBox) return item.item(space) raise OperationError(space.w_NotImplementedError, space.wrap( "non-int arg not supported")) @@ -565,15 +554,15 @@ def descr_astype(self, space, w_dtype): cur_dtype = self.get_dtype() - new_dtype = space.interp_w(interp_dtype.W_Dtype, - space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype)) + new_dtype = space.interp_w(descriptor.W_Dtype, + space.call_function(space.gettypefor(descriptor.W_Dtype), w_dtype)) if new_dtype.num == NPY.VOID: raise oefmt(space.w_NotImplementedError, "astype(%s) not implemented yet", new_dtype.get_name()) if new_dtype.num == NPY.STRING and new_dtype.elsize == 0: if cur_dtype.num == NPY.STRING: - new_dtype = interp_dtype.variable_dtype(space, + new_dtype = descriptor.variable_dtype(space, 'S' + str(cur_dtype.elsize)) impl = self.implementation new_impl = impl.astype(space, new_dtype) @@ -608,7 +597,7 @@ min = convert_to_array(space, w_min) max = convert_to_array(space, w_max) shape = shape_agreement_multiple(space, [self, min, max, w_out]) - out = interp_dtype.dtype_agreement(space, [self, min, max], shape, + out = descriptor.dtype_agreement(space, [self, min, max], shape, w_out) loop.clip(space, self, shape, min, max, out) return out @@ -633,7 +622,7 @@ if axis1 == axis2: raise OperationError(space.w_ValueError, space.wrap( "axis1 and axis2 cannot be the same")) - return interp_arrayops.diagonal(space, self.implementation, offset, + return arrayops.diagonal(space, self.implementation, offset, axis1, axis2) @unwrap_spec(offset=int, axis1=int, axis2=int) @@ -685,16 +674,16 @@ if self.get_dtype().is_bool(): #numpy promotes bool.round() to float16. Go figure. w_out = W_NDimArray.from_shape(space, self.get_shape(), - interp_dtype.get_dtype_cache(space).w_float16dtype) + descriptor.get_dtype_cache(space).w_float16dtype) else: w_out = None elif not isinstance(w_out, W_NDimArray): raise OperationError(space.w_TypeError, space.wrap( "return arrays must be of ArrayType")) - out = interp_dtype.dtype_agreement(space, [self], self.get_shape(), + out = descriptor.dtype_agreement(space, [self], self.get_shape(), w_out) if out.get_dtype().is_bool() and self.get_dtype().is_bool(): - calc_dtype = interp_dtype.get_dtype_cache(space).w_longdtype + calc_dtype = descriptor.get_dtype_cache(space).w_longdtype else: calc_dtype = out.get_dtype() @@ -769,8 +758,8 @@ else: raise if w_dtype: - dtype = space.interp_w(interp_dtype.W_Dtype, - space.call_function(space.gettypefor(interp_dtype.W_Dtype), + dtype = space.interp_w(descriptor.W_Dtype, + space.call_function(space.gettypefor(descriptor.W_Dtype), w_dtype)) else: dtype = self.get_dtype() @@ -813,7 +802,7 @@ def _unaryop_impl(ufunc_name): def impl(self, space, w_out=None): - return getattr(interp_ufuncs.get(space), ufunc_name).call(space, + return getattr(ufuncs.get(space), ufunc_name).call(space, [self, w_out]) return func_with_new_name(impl, "unaryop_%s_impl" % ufunc_name) @@ -833,7 +822,7 @@ def _binop_impl(ufunc_name): def impl(self, space, w_other, w_out=None): - return getattr(interp_ufuncs.get(space), ufunc_name).call(space, + return getattr(ufuncs.get(space), ufunc_name).call(space, [self, w_other, w_out]) return func_with_new_name(impl, "binop_%s_impl" % ufunc_name) @@ -877,7 +866,7 @@ def _binop_inplace_impl(ufunc_name): def impl(self, space, w_other): w_out = self - ufunc = getattr(interp_ufuncs.get(space), ufunc_name) + ufunc = getattr(ufuncs.get(space), ufunc_name) return ufunc.call(space, [self, w_other, w_out]) return func_with_new_name(impl, "binop_inplace_%s_impl" % ufunc_name) @@ -898,7 +887,7 @@ def _binop_right_impl(ufunc_name): def impl(self, space, w_other, w_out=None): w_other = convert_to_array(space, w_other) - return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [w_other, self, w_out]) + return getattr(ufuncs.get(space), ufunc_name).call(space, [w_other, self, w_out]) return func_with_new_name(impl, "binop_right_%s_impl" % ufunc_name) descr_radd = _binop_right_impl("add") @@ -936,7 +925,7 @@ w_res = self.descr_mul(space, other) assert isinstance(w_res, W_NDimArray) return w_res.descr_sum(space, space.wrap(-1), out) - dtype = interp_ufuncs.find_binop_result_dtype(space, self.get_dtype(), + dtype = ufuncs.find_binop_result_dtype(space, self.get_dtype(), other.get_dtype()) if self.get_size() < 1 and other.get_size() < 1: # numpy compatability @@ -992,7 +981,7 @@ 'output must be an array')) else: out = w_out - return getattr(interp_ufuncs.get(space), ufunc_name).reduce( + return getattr(ufuncs.get(space), ufunc_name).reduce( space, self, w_axis, keepdims, out, w_dtype, cumulative=cumulative) return func_with_new_name(impl, "reduce_%s_impl_%d" % (ufunc_name, cumulative)) @@ -1067,7 +1056,7 @@ "only integer arrays with one element " "can be converted to an index")) value = self.get_scalar_value() - assert isinstance(value, interp_boxes.W_GenericBox) + assert isinstance(value, boxes.W_GenericBox) return value.item(space) def descr_reduce(self, space): @@ -1117,7 +1106,7 @@ dtype = space.getitem(w_state, space.wrap(base_index+1)) #isfortran = space.getitem(w_state, space.wrap(base_index+2)) storage = space.getitem(w_state, space.wrap(base_index+3)) - if not isinstance(dtype, interp_dtype.W_Dtype): + if not isinstance(dtype, descriptor.W_Dtype): raise OperationError(space.w_ValueError, space.wrap( "__setstate__(self, (shape, dtype, .. called with improper dtype '%r'" % dtype)) self.implementation = W_NDimArray.from_shape_and_storage(space, @@ -1140,9 +1129,9 @@ offset=0, w_strides=None, w_order=None): from pypy.module.micronumpy.concrete import ConcreteArray from pypy.module.micronumpy.support import calc_strides - dtype = space.interp_w(interp_dtype.W_Dtype, - space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype)) - shape = _find_shape(space, w_shape, dtype) + dtype = space.interp_w(descriptor.W_Dtype, + space.call_function(space.gettypefor(descriptor.W_Dtype), w_dtype)) + shape = shape_converter(space, w_shape, dtype) if not space.is_none(w_buffer): if (not space.is_none(w_strides)): @@ -1192,10 +1181,10 @@ PyPy-only implementation detail. """ storage = rffi.cast(RAW_STORAGE_PTR, addr) - dtype = space.interp_w(interp_dtype.W_Dtype, - space.call_function(space.gettypefor(interp_dtype.W_Dtype), + dtype = space.interp_w(descriptor.W_Dtype, + space.call_function(space.gettypefor(descriptor.W_Dtype), w_dtype)) - shape = _find_shape(space, w_shape, dtype) + shape = shape_converter(space, w_shape, dtype) if w_subtype: if not space.isinstance_w(w_subtype, space.w_type): raise OperationError(space.w_ValueError, space.wrap( @@ -1396,107 +1385,6 @@ __array__ = interp2app(W_NDimArray.descr___array__), ) - at unwrap_spec(ndmin=int, copy=bool, subok=bool) -def array(space, w_object, w_dtype=None, copy=True, w_order=None, subok=False, - ndmin=0): - # for anything that isn't already an array, try __array__ method first - if not isinstance(w_object, W_NDimArray): - w___array__ = space.lookup(w_object, "__array__") - if w___array__ is not None: - if space.is_none(w_dtype): - w_dtype = space.w_None - w_array = space.get_and_call_function(w___array__, w_object, w_dtype) - if isinstance(w_array, W_NDimArray): - # feed w_array back into array() for other properties - return array(space, w_array, w_dtype, False, w_order, subok, ndmin) - else: - raise oefmt(space.w_ValueError, - "object __array__ method not producing an array") - - dtype = interp_dtype.decode_w_dtype(space, w_dtype) - - if space.is_none(w_order): - order = 'C' - else: - order = space.str_w(w_order) - if order != 'C': # or order != 'F': - raise oefmt(space.w_ValueError, "Unknown order: %s", order) - - # arrays with correct dtype - if isinstance(w_object, W_NDimArray) and \ - (space.is_none(w_dtype) or w_object.get_dtype() is dtype): - shape = w_object.get_shape() - if copy: - w_ret = w_object.descr_copy(space) - else: - if ndmin <= len(shape): - return w_object - new_impl = w_object.implementation.set_shape(space, w_object, shape) - w_ret = W_NDimArray(new_impl) - if ndmin > len(shape): - shape = [1] * (ndmin - len(shape)) + shape - w_ret.implementation = w_ret.implementation.set_shape(space, - w_ret, shape) - return w_ret - - # not an array or incorrect dtype - shape, elems_w = find_shape_and_elems(space, w_object, dtype) - if dtype is None or (dtype.is_str_or_unicode() and dtype.elsize < 1): - for w_elem in elems_w: - if isinstance(w_elem, W_NDimArray) and w_elem.is_scalar(): - w_elem = w_elem.get_scalar_value() - dtype = interp_ufuncs.find_dtype_for_scalar(space, w_elem, dtype) - if dtype is None: - dtype = interp_dtype.get_dtype_cache(space).w_float64dtype - elif dtype.is_str_or_unicode() and dtype.elsize < 1: - # promote S0 -> S1, U0 -> U1 - dtype = interp_dtype.variable_dtype(space, dtype.char + '1') - - if ndmin > len(shape): - shape = [1] * (ndmin - len(shape)) + shape - w_arr = W_NDimArray.from_shape(space, shape, dtype, order=order) - arr_iter = w_arr.create_iter() - for w_elem in elems_w: - arr_iter.setitem(dtype.coerce(space, w_elem)) - arr_iter.next() - return w_arr - -def zeros(space, w_shape, w_dtype=None, w_order=None): - dtype = space.interp_w(interp_dtype.W_Dtype, - space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype)) - if dtype.is_str_or_unicode() and dtype.elsize < 1: - dtype = interp_dtype.variable_dtype(space, dtype.char + '1') - shape = _find_shape(space, w_shape, dtype) - return W_NDimArray.from_shape(space, shape, dtype=dtype) - - at unwrap_spec(subok=bool) -def empty_like(space, w_a, w_dtype=None, w_order=None, subok=True): - w_a = convert_to_array(space, w_a) - if w_dtype is None: - dtype = w_a.get_dtype() - else: - dtype = space.interp_w(interp_dtype.W_Dtype, - space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype)) - if dtype.is_str_or_unicode() and dtype.elsize < 1: - dtype = interp_dtype.variable_dtype(space, dtype.char + '1') - return W_NDimArray.from_shape(space, w_a.get_shape(), dtype=dtype, - w_instance=w_a if subok else None) - -def build_scalar(space, w_dtype, w_state): - from rpython.rtyper.lltypesystem import rffi, lltype - if not isinstance(w_dtype, interp_dtype.W_Dtype): - raise oefmt(space.w_TypeError, - "argument 1 must be numpy.dtype, not %T", w_dtype) - if w_dtype.elsize == 0: - raise oefmt(space.w_ValueError, "itemsize cannot be zero") - if not space.isinstance_w(w_state, space.w_str): - raise oefmt(space.w_TypeError, "initializing object must be a string") - if space.len_w(w_state) != w_dtype.elsize: - raise oefmt(space.w_ValueError, "initialization string is too small") - state = rffi.str2charp(space.str_w(w_state)) - box = w_dtype.itemtype.box_raw_data(state) - lltype.free(state, flavor="raw") - return box def _reconstruct(space, w_subtype, w_shape, w_dtype): return descr_new_array(space, w_subtype, w_shape, w_dtype) diff --git a/pypy/module/micronumpy/sort.py b/pypy/module/micronumpy/sort.py --- a/pypy/module/micronumpy/sort.py +++ b/pypy/module/micronumpy/sort.py @@ -12,7 +12,7 @@ from rpython.rlib.objectmodel import specialize from pypy.interpreter.error import OperationError, oefmt from pypy.module.micronumpy.base import W_NDimArray -from pypy.module.micronumpy import interp_dtype, types, constants as NPY +from pypy.module.micronumpy import descriptor, types, constants as NPY from pypy.module.micronumpy.iter import AxisIterator INT_SIZE = rffi.sizeof(lltype.Signed) @@ -71,7 +71,7 @@ class ArgArrayRepWithStorage(Repr): def __init__(self, index_stride_size, stride_size, size): start = 0 - dtype = interp_dtype.get_dtype_cache(space).w_longdtype + dtype = descriptor.get_dtype_cache(space).w_longdtype indexes = dtype.itemtype.malloc(size * dtype.elsize) values = alloc_raw_storage(size * stride_size, track_allocation=False) @@ -132,7 +132,7 @@ else: axis = space.int_w(w_axis) # create array of indexes - dtype = interp_dtype.get_dtype_cache(space).w_longdtype + dtype = descriptor.get_dtype_cache(space).w_longdtype index_arr = W_NDimArray.from_shape(space, arr.get_shape(), dtype) storage = index_arr.implementation.get_storage() if len(arr.get_shape()) == 1: diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_ndarray.py rename from pypy/module/micronumpy/test/test_numarray.py rename to pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -4,7 +4,7 @@ from pypy.conftest import option from pypy.module.micronumpy.appbridge import get_appbridge_cache from pypy.module.micronumpy.strides import Chunk, Chunks -from pypy.module.micronumpy.interp_numarray import W_NDimArray +from pypy.module.micronumpy.ndarray import W_NDimArray from pypy.module.micronumpy.test.test_base import BaseNumpyAppTest @@ -197,7 +197,7 @@ def test_from_shape_and_storage(self): from rpython.rlib.rawstorage import alloc_raw_storage, raw_storage_setitem from rpython.rtyper.lltypesystem import rffi - from pypy.module.micronumpy.interp_dtype import get_dtype_cache + from pypy.module.micronumpy.descriptor import get_dtype_cache storage = alloc_raw_storage(4, track_allocation=False, zero=True) for i in range(4): raw_storage_setitem(storage, i, rffi.cast(rffi.UCHAR, i)) @@ -2303,8 +2303,12 @@ import numpy as np a = np.array(1.5) assert a[...] is a + #a[...] = 2.5 + #assert a == 2.5 a = np.array([1, 2, 3]) assert a[...] is a + #a[...] = 4 + #assert (a == [4, 4, 4]).all() class AppTestNumArrayFromBuffer(BaseNumpyAppTest): diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py --- a/pypy/module/micronumpy/test/test_ufuncs.py +++ b/pypy/module/micronumpy/test/test_ufuncs.py @@ -1,7 +1,7 @@ from pypy.module.micronumpy.test.test_base import BaseNumpyAppTest -from pypy.module.micronumpy.interp_ufuncs import (find_binop_result_dtype, +from pypy.module.micronumpy.ufuncs import (find_binop_result_dtype, find_unaryop_result_dtype) -from pypy.module.micronumpy.interp_dtype import get_dtype_cache +from pypy.module.micronumpy.descriptor import get_dtype_cache class TestUfuncCoercion(object): @@ -763,8 +763,17 @@ assert add.reduce(1) == 1 assert list(maximum.reduce(zeros((2, 0)), axis=0)) == [] - raises(ValueError, maximum.reduce, zeros((2, 0)), axis=None) - raises(ValueError, maximum.reduce, zeros((2, 0)), axis=1) + exc = raises(ValueError, maximum.reduce, zeros((2, 0)), axis=None) + assert exc.value[0] == ('zero-size array to reduction operation ' + 'maximum which has no identity') + exc = raises(ValueError, maximum.reduce, zeros((2, 0)), axis=1) + assert exc.value[0] == ('zero-size array to reduction operation ' + 'maximum which has no identity') + + a = zeros((2, 2)) + 1 + assert (add.reduce(a, axis=1) == [2, 2]).all() + exc = raises(ValueError, add.reduce, a, axis=2) + assert exc.value[0] == "'axis' entry is out of bounds" def test_reduce_1d(self): from numpypy import array, add, maximum, less, float16, complex64 diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -5,7 +5,7 @@ import py from rpython.jit.metainterp.test.support import LLJitMixin from rpython.jit.metainterp.warmspot import reset_jit, get_stats -from pypy.module.micronumpy import interp_boxes +from pypy.module.micronumpy import boxes from pypy.module.micronumpy.compile import FakeSpace, Parser, InterpreterState from pypy.module.micronumpy.base import W_NDimArray @@ -48,11 +48,11 @@ w_res = interp.results[-1] if isinstance(w_res, W_NDimArray): w_res = w_res.create_iter().getitem() - if isinstance(w_res, interp_boxes.W_Float64Box): + if isinstance(w_res, boxes.W_Float64Box): return w_res.value - if isinstance(w_res, interp_boxes.W_Int64Box): + if isinstance(w_res, boxes.W_Int64Box): return float(w_res.value) - elif isinstance(w_res, interp_boxes.W_BoolBox): + elif isinstance(w_res, boxes.W_BoolBox): return float(w_res.value) raise TypeError(w_res) diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -2,7 +2,7 @@ import math from pypy.interpreter.error import OperationError, oefmt -from pypy.module.micronumpy import interp_boxes +from pypy.module.micronumpy import boxes from pypy.module.micronumpy import support from pypy.module.micronumpy.concrete import SliceArray, VoidBoxStorage from pypy.objspace.std.floatobject import float2string @@ -110,8 +110,6 @@ return dispatcher class BaseType(object): - SortRepr = None # placeholders for sorting classes, overloaded in sort.py - Sort = None _immutable_fields_ = ['native'] def __init__(self, native=True): @@ -123,7 +121,6 @@ def malloc(self, size): return alloc_raw_storage(size, track_allocation=False, zero=True) - class Primitive(object): _mixin_ = True @@ -306,7 +303,7 @@ class Bool(BaseType, Primitive): T = lltype.Bool - BoxType = interp_boxes.W_BoolBox + BoxType = boxes.W_BoolBox format_code = "?" True = BoxType(True) @@ -540,32 +537,32 @@ class Int8(BaseType, Integer): T = rffi.SIGNEDCHAR - BoxType = interp_boxes.W_Int8Box + BoxType = boxes.W_Int8Box format_code = "b" class UInt8(BaseType, Integer): T = rffi.UCHAR - BoxType = interp_boxes.W_UInt8Box + BoxType = boxes.W_UInt8Box format_code = "B" class Int16(BaseType, Integer): T = rffi.SHORT - BoxType = interp_boxes.W_Int16Box + BoxType = boxes.W_Int16Box format_code = "h" class UInt16(BaseType, Integer): T = rffi.USHORT - BoxType = interp_boxes.W_UInt16Box + BoxType = boxes.W_UInt16Box format_code = "H" class Int32(BaseType, Integer): T = rffi.INT - BoxType = interp_boxes.W_Int32Box + BoxType = boxes.W_Int32Box format_code = "i" class UInt32(BaseType, Integer): T = rffi.UINT - BoxType = interp_boxes.W_UInt32Box + BoxType = boxes.W_UInt32Box format_code = "I" def _int64_coerce(self, space, w_item): @@ -583,7 +580,7 @@ class Int64(BaseType, Integer): T = rffi.LONGLONG - BoxType = interp_boxes.W_Int64Box + BoxType = boxes.W_Int64Box format_code = "q" if LONG_BIT == 32: @@ -604,14 +601,14 @@ class UInt64(BaseType, Integer): T = rffi.ULONGLONG - BoxType = interp_boxes.W_UInt64Box + BoxType = boxes.W_UInt64Box format_code = "Q" _coerce = func_with_new_name(_uint64_coerce, '_coerce') class Long(BaseType, Integer): T = rffi.LONG - BoxType = interp_boxes.W_LongBox + BoxType = boxes.W_LongBox format_code = "l" def _ulong_coerce(self, space, w_item): @@ -629,7 +626,7 @@ class ULong(BaseType, Integer): T = rffi.ULONG - BoxType = interp_boxes.W_ULongBox + BoxType = boxes.W_ULongBox format_code = "L" _coerce = func_with_new_name(_ulong_coerce, '_coerce') @@ -963,7 +960,7 @@ class Float16(BaseType, Float): _STORAGE_T = rffi.USHORT T = rffi.SHORT - BoxType = interp_boxes.W_Float16Box + BoxType = boxes.W_Float16Box @specialize.argtype(1) def box(self, value): @@ -1003,12 +1000,12 @@ class Float32(BaseType, Float): T = rffi.FLOAT - BoxType = interp_boxes.W_Float32Box + BoxType = boxes.W_Float32Box format_code = "f" class Float64(BaseType, Float): T = rffi.DOUBLE - BoxType = interp_boxes.W_Float64Box + BoxType = boxes.W_Float64Box format_code = "d" class ComplexFloating(object): @@ -1572,32 +1569,32 @@ class Complex64(ComplexFloating, BaseType): T = rffi.FLOAT - BoxType = interp_boxes.W_Complex64Box - ComponentBoxType = interp_boxes.W_Float32Box + BoxType = boxes.W_Complex64Box + ComponentBoxType = boxes.W_Float32Box class Complex128(ComplexFloating, BaseType): T = rffi.DOUBLE - BoxType = interp_boxes.W_Complex128Box - ComponentBoxType = interp_boxes.W_Float64Box + BoxType = boxes.W_Complex128Box + ComponentBoxType = boxes.W_Float64Box -if interp_boxes.long_double_size == 8: +if boxes.long_double_size == 8: class FloatLong(BaseType, Float): T = rffi.DOUBLE - BoxType = interp_boxes.W_FloatLongBox + BoxType = boxes.W_FloatLongBox format_code = "d" class ComplexLong(ComplexFloating, BaseType): T = rffi.DOUBLE - BoxType = interp_boxes.W_ComplexLongBox - ComponentBoxType = interp_boxes.W_FloatLongBox + BoxType = boxes.W_ComplexLongBox + ComponentBoxType = boxes.W_FloatLongBox -elif interp_boxes.long_double_size in (12, 16): +elif boxes.long_double_size in (12, 16): class FloatLong(BaseType, Float): T = rffi.LONGDOUBLE - BoxType = interp_boxes.W_FloatLongBox + BoxType = boxes.W_FloatLongBox def runpack_str(self, space, s): - assert len(s) == interp_boxes.long_double_size + assert len(s) == boxes.long_double_size fval = self.box(unpack_float80(s, native_is_bigendian)) if not self.native: fval = self.byteswap(fval) @@ -1611,8 +1608,8 @@ class ComplexLong(ComplexFloating, BaseType): T = rffi.LONGDOUBLE - BoxType = interp_boxes.W_ComplexLongBox - ComponentBoxType = interp_boxes.W_FloatLongBox + BoxType = boxes.W_ComplexLongBox + ComponentBoxType = boxes.W_FloatLongBox class FlexibleType(BaseType): def get_element_size(self): @@ -1621,7 +1618,7 @@ @jit.unroll_safe def to_str(self, item): builder = StringBuilder() - assert isinstance(item, interp_boxes.W_FlexibleBox) + assert isinstance(item, boxes.W_FlexibleBox) i = item.ofs end = i + item.dtype.elsize while i < end: @@ -1654,7 +1651,7 @@ @jit.unroll_safe def coerce(self, space, dtype, w_item): - if isinstance(w_item, interp_boxes.W_StringBox): + if isinstance(w_item, boxes.W_StringBox): return w_item if w_item is None: w_item = space.wrap('') @@ -1665,23 +1662,23 @@ arr.storage[i] = arg[i] for j in range(j, dtype.elsize): arr.storage[j] = '\x00' - return interp_boxes.W_StringBox(arr, 0, arr.dtype) + return boxes.W_StringBox(arr, 0, arr.dtype) def store(self, arr, i, offset, box): - assert isinstance(box, interp_boxes.W_StringBox) + assert isinstance(box, boxes.W_StringBox) size = min(arr.dtype.elsize - offset, box.arr.size - box.ofs) return self._store(arr.storage, i, offset, box, size) @jit.unroll_safe def _store(self, storage, i, offset, box, size): - assert isinstance(box, interp_boxes.W_StringBox) + assert isinstance(box, boxes.W_StringBox) for k in range(size): storage[k + offset + i] = box.arr.storage[k + box.ofs] def read(self, arr, i, offset, dtype=None): if dtype is None: dtype = arr.dtype - return interp_boxes.W_StringBox(arr, i + offset, dtype) + return boxes.W_StringBox(arr, i + offset, dtype) def str_format(self, item): builder = StringBuilder() @@ -1746,7 +1743,7 @@ @jit.unroll_safe def coerce(self, space, dtype, w_item): - if isinstance(w_item, interp_boxes.W_UnicodeBox): + if isinstance(w_item, boxes.W_UnicodeBox): return w_item raise OperationError(space.w_NotImplementedError, space.wrap( "coerce (probably from set_item) not implemented for unicode type")) @@ -1756,7 +1753,7 @@ def _coerce(self, space, arr, ofs, dtype, w_items, shape): # TODO: Make sure the shape and the array match - from interp_dtype import W_Dtype + from pypy.module.micronumpy.descriptor import W_Dtype if w_items is not None: items_w = space.fixedview(w_items) else: @@ -1781,12 +1778,12 @@ def coerce(self, space, dtype, w_items): arr = VoidBoxStorage(dtype.elsize, dtype) self._coerce(space, arr, 0, dtype, w_items, dtype.shape) - return interp_boxes.W_VoidBox(arr, 0, dtype) + return boxes.W_VoidBox(arr, 0, dtype) @jit.unroll_safe def store(self, arr, i, ofs, box): assert i == 0 - assert isinstance(box, interp_boxes.W_VoidBox) + assert isinstance(box, boxes.W_VoidBox) assert box.dtype is box.arr.dtype for k in range(box.arr.dtype.elsize): arr.storage[k + ofs] = box.arr.storage[k + box.ofs] @@ -1805,11 +1802,11 @@ def read(self, arr, i, offset, dtype=None): if dtype is None: dtype = arr.dtype - return interp_boxes.W_VoidBox(arr, i + offset, dtype) + return boxes.W_VoidBox(arr, i + offset, dtype) @jit.unroll_safe def str_format(self, box): - assert isinstance(box, interp_boxes.W_VoidBox) + assert isinstance(box, boxes.W_VoidBox) arr = self.readarray(box.arr, box.ofs, 0, box.dtype) return arr.dump_data(prefix='', suffix='') @@ -1818,7 +1815,7 @@ "Void arrays return a buffer object for item(), unless fields are defined, in which case a tuple is returned." ''' - assert isinstance(item, interp_boxes.W_VoidBox) + assert isinstance(item, boxes.W_VoidBox) dt = item.arr.dtype ret_unwrapped = [] for name in dt.names: @@ -1827,7 +1824,7 @@ read_val = dtype.itemtype.readarray(item.arr, ofs, 0, dtype) else: read_val = dtype.itemtype.read(item.arr, ofs, 0, dtype) - if isinstance (read_val, interp_boxes.W_StringBox): + if isinstance (read_val, boxes.W_StringBox): # StringType returns a str read_val = space.wrap(dtype.itemtype.to_str(read_val)) ret_unwrapped = ret_unwrapped + [read_val,] @@ -1842,12 +1839,12 @@ def read(self, arr, i, offset, dtype=None): if dtype is None: dtype = arr.dtype - return interp_boxes.W_VoidBox(arr, i + offset, dtype) + return boxes.W_VoidBox(arr, i + offset, dtype) @jit.unroll_safe def coerce(self, space, dtype, w_item): from pypy.module.micronumpy.base import W_NDimArray - if isinstance(w_item, interp_boxes.W_VoidBox): + if isinstance(w_item, boxes.W_VoidBox): return w_item if w_item is not None: if space.isinstance_w(w_item, space.w_tuple): @@ -1871,14 +1868,14 @@ except IndexError: w_box = itemtype.coerce(space, subdtype, None) itemtype.store(arr, 0, ofs, w_box) - return interp_boxes.W_VoidBox(arr, 0, dtype) + return boxes.W_VoidBox(arr, 0, dtype) def runpack_str(self, space, s): raise oefmt(space.w_NotImplementedError, "fromstring not implemented for record types") def store(self, arr, i, ofs, box): - assert isinstance(box, interp_boxes.W_VoidBox) + assert isinstance(box, boxes.W_VoidBox) self._store(arr.storage, i, ofs, box, box.dtype.elsize) @jit.unroll_safe @@ -1887,7 +1884,7 @@ storage[k + i + ofs] = box.arr.storage[k + box.ofs] def fill(self, storage, width, box, start, stop, offset): - assert isinstance(box, interp_boxes.W_VoidBox) + assert isinstance(box, boxes.W_VoidBox) assert width == box.dtype.elsize for i in xrange(start, stop, width): self._store(storage, i, offset, box, width) @@ -1897,7 +1894,7 @@ return w_v def to_builtin_type(self, space, box): - assert isinstance(box, interp_boxes.W_VoidBox) + assert isinstance(box, boxes.W_VoidBox) items = [] dtype = box.dtype for name in dtype.names: @@ -1909,7 +1906,7 @@ @jit.unroll_safe def str_format(self, box): - assert isinstance(box, interp_boxes.W_VoidBox) + assert isinstance(box, boxes.W_VoidBox) pieces = ["("] first = True for name in box.dtype.names: @@ -1925,8 +1922,8 @@ return "".join(pieces) def eq(self, v1, v2): - assert isinstance(v1, interp_boxes.W_VoidBox) - assert isinstance(v2, interp_boxes.W_VoidBox) + assert isinstance(v1, boxes.W_VoidBox) + assert isinstance(v2, boxes.W_VoidBox) s1 = v1.dtype.elsize s2 = v2.dtype.elsize assert s1 == s2 diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/ufuncs.py rename from pypy/module/micronumpy/interp_ufuncs.py rename to pypy/module/micronumpy/ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/ufuncs.py @@ -2,11 +2,10 @@ from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef, GetSetProperty, interp_attrproperty -from pypy.module.micronumpy import interp_boxes, interp_dtype, loop +from pypy.module.micronumpy import boxes, descriptor, loop from rpython.rlib import jit -from rpython.rlib.rarithmetic import LONG_BIT +from rpython.rlib.rarithmetic import LONG_BIT, maxint from rpython.tool.sourcetools import func_with_new_name -from pypy.module.micronumpy.interp_support import unwrap_axis_arg from pypy.module.micronumpy.strides import shape_agreement from pypy.module.micronumpy.base import convert_to_array, W_NDimArray from pypy.module.micronumpy import constants as NPY @@ -149,7 +148,7 @@ array([[ 1, 5], [ 9, 13]]) """ - from pypy.module.micronumpy.interp_numarray import W_NDimArray + from pypy.module.micronumpy.ndarray import W_NDimArray if w_axis is None: w_axis = space.wrap(0) if space.is_none(w_out): @@ -175,12 +174,19 @@ if obj.is_scalar(): return obj.get_scalar_value() shapelen = len(obj_shape) - axis = unwrap_axis_arg(space, shapelen, w_axis) + if space.is_none(w_axis): + axis = maxint + else: + axis = space.int_w(w_axis) + if axis < -shapelen or axis >= shapelen: + raise oefmt(space.w_ValueError, "'axis' entry is out of bounds") + if axis < 0: + axis += shapelen assert axis >= 0 - dtype = interp_dtype.decode_w_dtype(space, dtype) + dtype = descriptor.decode_w_dtype(space, dtype) if dtype is None: if self.comparison_func: - dtype = interp_dtype.get_dtype_cache(space).w_booldtype + dtype = descriptor.get_dtype_cache(space).w_booldtype else: dtype = find_unaryop_result_dtype( space, obj.get_dtype(), @@ -192,8 +198,9 @@ for i in range(shapelen): if space.is_none(w_axis) or i == axis: if obj_shape[i] == 0: - raise oefmt(space.w_ValueError, "zero-size array to " - "%s.reduce without identity", self.name) + raise oefmt(space.w_ValueError, + "zero-size array to reduction operation %s " + "which has no identity", self.name) if shapelen > 1 and axis < shapelen: temp = None if cumulative: @@ -308,14 +315,14 @@ # raise oefmt(space.w_TypeError, # "Cannot cast ufunc %s output from dtype('%s') to dtype('%s') with casting rule 'same_kind'", self.name, w_obj.get_dtype().name, res_dtype.name) elif self.bool_result: - res_dtype = interp_dtype.get_dtype_cache(space).w_booldtype + res_dtype = descriptor.get_dtype_cache(space).w_booldtype else: res_dtype = calc_dtype if self.complex_to_float and calc_dtype.is_complex(): if calc_dtype.num == NPY.CFLOAT: - res_dtype = interp_dtype.get_dtype_cache(space).w_float32dtype + res_dtype = descriptor.get_dtype_cache(space).w_float32dtype else: - res_dtype = interp_dtype.get_dtype_cache(space).w_float64dtype + res_dtype = descriptor.get_dtype_cache(space).w_float64dtype if w_obj.is_scalar(): w_val = self.func(calc_dtype, w_obj.get_scalar_value().convert_to(space, calc_dtype)) @@ -418,7 +425,7 @@ out = w_out calc_dtype = out.get_dtype() if self.comparison_func: - res_dtype = interp_dtype.get_dtype_cache(space).w_booldtype + res_dtype = descriptor.get_dtype_cache(space).w_booldtype else: res_dtype = calc_dtype if w_lhs.is_scalar() and w_rhs.is_scalar(): @@ -465,7 +472,7 @@ dt1, dt2 = dt2, dt1 # Some operations promote op(bool, bool) to return int8, rather than bool if promote_bools and (dt1.kind == dt2.kind == NPY.GENBOOLLTR): - return interp_dtype.get_dtype_cache(space).w_int8dtype + return descriptor.get_dtype_cache(space).w_int8dtype # Everything numeric promotes to complex if dt2.is_complex() or dt1.is_complex(): @@ -473,16 +480,16 @@ dt1, dt2 = dt2, dt1 if dt2.num == NPY.CFLOAT: if dt1.num == NPY.DOUBLE: - return interp_dtype.get_dtype_cache(space).w_complex128dtype + return descriptor.get_dtype_cache(space).w_complex128dtype elif dt1.num == NPY.LONGDOUBLE: - return interp_dtype.get_dtype_cache(space).w_complexlongdtype - return interp_dtype.get_dtype_cache(space).w_complex64dtype + return descriptor.get_dtype_cache(space).w_complexlongdtype + return descriptor.get_dtype_cache(space).w_complex64dtype elif dt2.num == NPY.CDOUBLE: if dt1.num == NPY.LONGDOUBLE: - return interp_dtype.get_dtype_cache(space).w_complexlongdtype - return interp_dtype.get_dtype_cache(space).w_complex128dtype + return descriptor.get_dtype_cache(space).w_complexlongdtype + return descriptor.get_dtype_cache(space).w_complex128dtype elif dt2.num == NPY.CLONGDOUBLE: - return interp_dtype.get_dtype_cache(space).w_complexlongdtype + return descriptor.get_dtype_cache(space).w_complexlongdtype else: raise OperationError(space.w_TypeError, space.wrap("Unsupported types")) @@ -497,11 +504,11 @@ # Everything promotes to float, and bool promotes to everything. if dt2.kind == NPY.FLOATINGLTR or dt1.kind == NPY.GENBOOLLTR: if dt2.num == NPY.HALF and dt1.itemtype.get_element_size() == 2: - return interp_dtype.get_dtype_cache(space).w_float32dtype + return descriptor.get_dtype_cache(space).w_float32dtype if dt2.num == NPY.HALF and dt1.itemtype.get_element_size() >= 4: - return interp_dtype.get_dtype_cache(space).w_float64dtype + return descriptor.get_dtype_cache(space).w_float64dtype if dt2.num == NPY.FLOAT and dt1.itemtype.get_element_size() >= 4: - return interp_dtype.get_dtype_cache(space).w_float64dtype + return descriptor.get_dtype_cache(space).w_float64dtype return dt2 # for now this means mixing signed and unsigned @@ -527,7 +534,7 @@ else: # increase to the next signed type dtypenum = dt2.num + 1 - newdtype = interp_dtype.get_dtype_cache(space).dtypes_by_num[dtypenum] + newdtype = descriptor.get_dtype_cache(space).dtypes_by_num[dtypenum] if (newdtype.itemtype.get_element_size() > dt2.itemtype.get_element_size() or newdtype.kind == NPY.FLOATINGLTR): @@ -536,7 +543,7 @@ # we only promoted to long on 32-bit or to longlong on 64-bit # this is really for dealing with the Long and Ulong dtypes dtypenum += 2 - return interp_dtype.get_dtype_cache(space).dtypes_by_num[dtypenum] + return descriptor.get_dtype_cache(space).dtypes_by_num[dtypenum] @jit.unroll_safe def find_unaryop_result_dtype(space, dt, promote_to_float=False, @@ -544,34 +551,34 @@ if promote_to_largest: if dt.kind == NPY.GENBOOLLTR or dt.kind == NPY.SIGNEDLTR: if dt.elsize * 8 < LONG_BIT: - return interp_dtype.get_dtype_cache(space).w_longdtype + return descriptor.get_dtype_cache(space).w_longdtype elif dt.kind == NPY.UNSIGNEDLTR: if dt.elsize * 8 < LONG_BIT: - return interp_dtype.get_dtype_cache(space).w_ulongdtype + return descriptor.get_dtype_cache(space).w_ulongdtype else: assert dt.kind == NPY.FLOATINGLTR or dt.kind == NPY.COMPLEXLTR return dt if promote_bools and (dt.kind == NPY.GENBOOLLTR): - return interp_dtype.get_dtype_cache(space).w_int8dtype + return descriptor.get_dtype_cache(space).w_int8dtype if promote_to_float: if dt.kind == NPY.FLOATINGLTR or dt.kind == NPY.COMPLEXLTR: return dt if dt.num >= NPY.INT: - return interp_dtype.get_dtype_cache(space).w_float64dtype - for bytes, dtype in interp_dtype.get_dtype_cache(space).float_dtypes_by_num_bytes: + return descriptor.get_dtype_cache(space).w_float64dtype + for bytes, dtype in descriptor.get_dtype_cache(space).float_dtypes_by_num_bytes: if (dtype.kind == NPY.FLOATINGLTR and dtype.itemtype.get_element_size() > dt.itemtype.get_element_size()): return dtype return dt def find_dtype_for_scalar(space, w_obj, current_guess=None): - bool_dtype = interp_dtype.get_dtype_cache(space).w_booldtype - long_dtype = interp_dtype.get_dtype_cache(space).w_longdtype - int64_dtype = interp_dtype.get_dtype_cache(space).w_int64dtype - uint64_dtype = interp_dtype.get_dtype_cache(space).w_uint64dtype - complex_dtype = interp_dtype.get_dtype_cache(space).w_complex128dtype - float_dtype = interp_dtype.get_dtype_cache(space).w_float64dtype - if isinstance(w_obj, interp_boxes.W_GenericBox): + bool_dtype = descriptor.get_dtype_cache(space).w_booldtype + long_dtype = descriptor.get_dtype_cache(space).w_longdtype + int64_dtype = descriptor.get_dtype_cache(space).w_int64dtype + uint64_dtype = descriptor.get_dtype_cache(space).w_uint64dtype + complex_dtype = descriptor.get_dtype_cache(space).w_complex128dtype + float_dtype = descriptor.get_dtype_cache(space).w_float64dtype + if isinstance(w_obj, boxes.W_GenericBox): dtype = w_obj.get_dtype(space) return find_binop_result_dtype(space, dtype, current_guess) @@ -594,11 +601,11 @@ return complex_dtype elif space.isinstance_w(w_obj, space.w_str): if current_guess is None: - return interp_dtype.variable_dtype(space, + return descriptor.variable_dtype(space, 'S%d' % space.len_w(w_obj)) elif current_guess.num == NPY.STRING: if current_guess.elsize < space.len_w(w_obj): - return interp_dtype.variable_dtype(space, + return descriptor.variable_dtype(space, 'S%d' % space.len_w(w_obj)) return current_guess raise oefmt(space.w_NotImplementedError, @@ -607,7 +614,7 @@ def ufunc_dtype_caller(space, ufunc_name, op_name, argcount, comparison_func, bool_result): - dtype_cache = interp_dtype.get_dtype_cache(space) + dtype_cache = descriptor.get_dtype_cache(space) def get_op(dtype): try: return getattr(dtype.itemtype, op_name) @@ -743,7 +750,7 @@ identity = extra_kwargs.get("identity") if identity is not None: identity = \ - interp_dtype.get_dtype_cache(space).w_longdtype.box(identity) + descriptor.get_dtype_cache(space).w_longdtype.box(identity) extra_kwargs["identity"] = identity func = ufunc_dtype_caller(space, ufunc_name, op_name, argcount, From noreply at buildbot.pypy.org Thu Feb 27 07:11:22 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 27 Feb 2014 07:11:22 +0100 (CET) Subject: [pypy-commit] pypy numpy-refactor: cleanup Message-ID: <20140227061122.1FC951C03D5@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: numpy-refactor Changeset: r69492:758fa0da0cdd Date: 2014-02-26 23:16 -0500 http://bitbucket.org/pypy/pypy/changeset/758fa0da0cdd/ Log: cleanup diff --git a/pypy/module/micronumpy/interp_flatiter.py b/pypy/module/micronumpy/interp_flatiter.py --- a/pypy/module/micronumpy/interp_flatiter.py +++ b/pypy/module/micronumpy/interp_flatiter.py @@ -1,7 +1,7 @@ from pypy.module.micronumpy.base import W_NDimArray, convert_to_array from pypy.module.micronumpy import loop from pypy.module.micronumpy.concrete import BaseConcreteArray -from pypy.interpreter.error import OperationError +from pypy.interpreter.error import OperationError, oefmt class FakeArrayImplementation(BaseConcreteArray): @@ -23,6 +23,7 @@ assert isinstance(self.base(), W_NDimArray) return self.base().create_iter() + class W_FlatIterator(W_NDimArray): def __init__(self, arr): self.base = arr @@ -54,9 +55,8 @@ def descr_getitem(self, space, w_idx): if not (space.isinstance_w(w_idx, space.w_int) or - space.isinstance_w(w_idx, space.w_slice)): - raise OperationError(space.w_IndexError, - space.wrap('unsupported iterator index')) + space.isinstance_w(w_idx, space.w_slice)): + raise oefmt(space.w_IndexError, 'unsupported iterator index') self.reset() base = self.base start, stop, step, length = space.decode_index4(w_idx, base.get_size()) @@ -70,9 +70,8 @@ def descr_setitem(self, space, w_idx, w_value): if not (space.isinstance_w(w_idx, space.w_int) or - space.isinstance_w(w_idx, space.w_slice)): - raise OperationError(space.w_IndexError, - space.wrap('unsupported iterator index')) + space.isinstance_w(w_idx, space.w_slice)): + raise oefmt(space.w_IndexError, 'unsupported iterator index') base = self.base start, stop, step, length = space.decode_index4(w_idx, base.get_size()) arr = convert_to_array(space, w_value) diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py --- a/pypy/module/micronumpy/test/test_numarray.py +++ b/pypy/module/micronumpy/test/test_numarray.py @@ -2303,8 +2303,12 @@ import numpy as np a = np.array(1.5) assert a[...] is a + #a[...] = 2.5 + #assert a == 2.5 a = np.array([1, 2, 3]) assert a[...] is a + #a[...] = 4 + #assert (a == [4, 4, 4]).all() class AppTestNumArrayFromBuffer(BaseNumpyAppTest): diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -110,8 +110,6 @@ return dispatcher class BaseType(object): - SortRepr = None # placeholders for sorting classes, overloaded in sort.py - Sort = None _immutable_fields_ = ['native'] def __init__(self, native=True): @@ -123,7 +121,6 @@ def malloc(self, size): return alloc_raw_storage(size, track_allocation=False, zero=True) - class Primitive(object): _mixin_ = True From noreply at buildbot.pypy.org Thu Feb 27 07:11:20 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 27 Feb 2014 07:11:20 +0100 (CET) Subject: [pypy-commit] pypy numpy-refactor: merge default Message-ID: <20140227061120.C55311C03D5@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: numpy-refactor Changeset: r69491:8da9e6ab9957 Date: 2014-02-26 23:05 -0500 http://bitbucket.org/pypy/pypy/changeset/8da9e6ab9957/ Log: merge default diff --git a/include/PyPy.h b/include/PyPy.h --- a/include/PyPy.h +++ b/include/PyPy.h @@ -8,9 +8,14 @@ extern "C" { #endif +/* You should call this first once. */ +#define pypy_init(need_threads) do { pypy_asm_stack_bottom(); \ +rpython_startup_code();\ + if (need_threads) pypy_init_threads(); } while (0) -/* You should call this first once. */ +// deprecated interface void rpython_startup_code(void); +void pypy_init_threads(void); /* Initialize the home directory of PyPy. It is necessary to call this. @@ -26,11 +31,10 @@ /* If your program has multiple threads, then you need to call - pypy_init_threads() once at init time, and then pypy_thread_attach() - once in each other thread that just started and in which you want to - run Python code (including via callbacks, see below). + pypy_thread_attach() once in each other thread that just started + and in which you want to run Python code (including via callbacks, + see below). DO NOT CALL IT IN THE MAIN THREAD */ -void pypy_init_threads(void); void pypy_thread_attach(void); diff --git a/pypy/doc/embedding.rst b/pypy/doc/embedding.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/embedding.rst @@ -0,0 +1,101 @@ + +PyPy has a very minimal and a very strange embedding interface, based on +the usage of `cffi`_ and the philosophy that Python is a better language in C. +It was developed in collaboration with Roberto De Ioris from the `uwsgi`_ +project. The `PyPy uwsgi plugin`_ is a good example of usage of such interface. + +The first thing that you need, that we plan to change in the future, is to +compile PyPy yourself with an option ``--shared``. Consult the +`how to compile PyPy`_ doc for details. That should result in ``libpypy.so`` +or ``pypy.dll`` file or something similar, depending on your platform. Consult +your platform specification for details. + +The resulting shared library has very few functions that are however enough +to make a full API working, provided you'll follow a few principles. The API +is: + +.. function:: void pypy_init(int need_threads); + + This is a function that you have to call (once) before calling anything. + It initializes the RPython/PyPy GC and does a bunch of necessary startup + code. This function cannot fail. Pass 1 in case you need thread support, 0 + otherwise. + +.. function:: long pypy_setup_home(char* home, int verbose); + + This is another function that you have to call at some point, without + it you would not be able to find the standard library (and run pretty much + nothing). Arguments: + + * ``home``: null terminated path + + * ``verbose``: if non-zero, would print error messages to stderr + + Function returns 0 on success or 1 on failure, can be called multiple times + until the library is found. + +.. function:: int pypy_execute_source(char* source); + + Execute the source code given in the ``source`` argument. Will print + the error message to stderr upon failure and return 1, otherwise returns 0. + You should really do your own error handling in the source. It'll acquire + the GIL. + +.. function:: void pypy_thread_attach(void); + + In case your application uses threads that are initialized outside of PyPy, + you need to call this function to tell the PyPy GC to track this thread. + Note that this function is not thread-safe itself, so you need to guard it + with a mutex. Do not call it from the main thread. + +Simple example +-------------- + +Note that this API is a lot more minimal than say CPython C API, so at first +it's obvious to think that you can't do much. However, the trick is to do +all the logic in Python and expose it via `cffi`_ callbacks. Let's assume +we're on linux and pypy is put in ``/opt/pypy`` (a source checkout) and +library is in ``/opt/pypy/libpypy-c.so``. We write a little C program +(for simplicity assuming that all operations will be performed:: + + #include "include/PyPy.h" + #include + + const char source[] = "print 'hello from pypy'"; + + int main() + { + int res; + + rpython_startup_code(); + res = pypy_execute_source((char*)source); + if (res) { + printf("Error calling pypy_execute_source!\n"); + } + return res; + } + +If we save it as ``x.c`` now, compile it and run it with:: + + fijal at hermann:/opt/pypy$ gcc -o x x.c -lpypy-c -L. + fijal at hermann:~/src/pypy$ LD_LIBRARY_PATH=. ./x + hello from pypy + +Worked! + +More advanced example +--------------------- + +Typically we need something more to do than simply execute source. The following +is a fully fledged example, please consult cffi documentation for details. + +xxx + +Threading +--------- + +XXXX I don't understand what's going on, discuss with unbit + +.. _`cffi`: http://cffi.readthedocs.org/ +.. _`uwsgi`: http://uwsgi-docs.readthedocs.org/en/latest/ +.. _`PyPy uwsgi plugin`: http://uwsgi-docs.readthedocs.org/en/latest/PyPy.html diff --git a/pypy/doc/getting-started.rst b/pypy/doc/getting-started.rst --- a/pypy/doc/getting-started.rst +++ b/pypy/doc/getting-started.rst @@ -145,11 +145,13 @@ After you successfully manage to get PyPy's source you can read more about: - `Building and using PyPy's Python interpreter`_ + - `Embedding PyPy`_ - `Learning more about the RPython toolchain and how to develop (with) PyPy`_ - `Tutorial for how to write an interpreter with the RPython toolchain and make it fast`_ - `Look at our benchmark results`_ .. _`Building and using PyPy's Python interpreter`: getting-started-python.html +.. _`Embedding PyPy`: embedding.html .. _`Learning more about the RPython toolchain and how to develop (with) PyPy`: getting-started-dev.html .. _`Tutorial for how to write an interpreter with the RPython toolchain and make it fast`: http://morepypy.blogspot.com/2011/04/tutorial-writing-interpreter-with-pypy.html .. _`Look at our benchmark results`: http://speed.pypy.org diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -82,6 +82,7 @@ from rpython.rlib.entrypoint import entrypoint from rpython.rtyper.lltypesystem import rffi, lltype + from rpython.rtyper.lltypesystem.lloperation import llop w_pathsetter = space.appexec([], """(): def f(path): @@ -93,6 +94,7 @@ @entrypoint('main', [rffi.CCHARP, rffi.INT], c_name='pypy_setup_home') def pypy_setup_home(ll_home, verbose): from pypy.module.sys.initpath import pypy_find_stdlib + llop.gc_stack_bottom(lltype.Void) verbose = rffi.cast(lltype.Signed, verbose) if ll_home: home = rffi.charp2str(ll_home) @@ -120,8 +122,11 @@ @entrypoint('main', [rffi.CCHARP], c_name='pypy_execute_source') def pypy_execute_source(ll_source): + rffi.aroundstate.after() + llop.gc_stack_bottom(lltype.Void) source = rffi.charp2str(ll_source) res = _pypy_execute_source(source) + rffi.aroundstate.before() return rffi.cast(rffi.INT, res) @entrypoint('main', [], c_name='pypy_init_threads') diff --git a/pypy/module/cpyext/ndarrayobject.py b/pypy/module/cpyext/ndarrayobject.py --- a/pypy/module/cpyext/ndarrayobject.py +++ b/pypy/module/cpyext/ndarrayobject.py @@ -9,8 +9,7 @@ from pypy.module.cpyext.api import PyObject from pypy.module.micronumpy.interp_numarray import W_NDimArray, array from pypy.module.micronumpy.interp_dtype import get_dtype_cache, W_Dtype -from pypy.module.micronumpy.arrayimpl.concrete import ConcreteArray -from pypy.module.micronumpy.arrayimpl.scalar import Scalar +from pypy.module.micronumpy.concrete import ConcreteArray from rpython.rlib.rawstorage import RAW_STORAGE_PTR NPY_C_CONTIGUOUS = 0x0001 @@ -167,7 +166,7 @@ # void *data = PyArray_DATA(arr); impl = w_array.implementation w_array = W_NDimArray.from_shape(space, [1], impl.dtype) - w_array.implementation.setitem(0, impl.value) + w_array.implementation.setitem(0, impl.getitem(impl.start + 0)) w_array.implementation.shape = [] return w_array @@ -214,12 +213,8 @@ order='C', owning=False, w_subtype=None): shape, dtype = get_shape_and_dtype(space, nd, dims, typenum) storage = rffi.cast(RAW_STORAGE_PTR, data) - if nd == 0: - w_val = dtype.itemtype.box_raw_data(storage) - return W_NDimArray(Scalar(dtype, w_val)) - else: - return W_NDimArray.from_shape_and_storage(space, shape, storage, dtype, - order=order, owning=owning, w_subtype=w_subtype) + return W_NDimArray.from_shape_and_storage(space, shape, storage, dtype, + order=order, owning=owning, w_subtype=w_subtype) @cpython_api([Py_ssize_t, rffi.LONGP, Py_ssize_t], PyObject) diff --git a/pypy/module/cpyext/test/test_ndarrayobject.py b/pypy/module/cpyext/test/test_ndarrayobject.py --- a/pypy/module/cpyext/test/test_ndarrayobject.py +++ b/pypy/module/cpyext/test/test_ndarrayobject.py @@ -77,7 +77,7 @@ def test_FromAny_scalar(self, space, api): a0 = scalar(space) - assert a0.implementation.get_scalar_value().value == 10. + assert a0.get_scalar_value().value == 10. a = api._PyArray_FromAny(a0, NULL, 0, 0, 0, NULL) assert api._PyArray_NDIM(a) == 0 diff --git a/pypy/module/micronumpy/base.py b/pypy/module/micronumpy/base.py --- a/pypy/module/micronumpy/base.py +++ b/pypy/module/micronumpy/base.py @@ -31,7 +31,6 @@ @staticmethod def from_shape(space, shape, dtype, order='C', w_instance=None): from pypy.module.micronumpy import concrete - strides, backstrides = calc_strides(shape, dtype.base, order) impl = concrete.ConcreteArray(shape, dtype.base, order, strides, backstrides) @@ -43,7 +42,6 @@ def from_shape_and_storage(space, shape, storage, dtype, order='C', owning=False, w_subtype=None, w_base=None, writable=True): from pypy.module.micronumpy import concrete - assert shape strides, backstrides = calc_strides(shape, dtype, order) if w_base is not None: if owning: @@ -56,7 +54,6 @@ impl = concrete.ConcreteNonWritableArrayWithBase(shape, dtype, order, strides, backstrides, storage, w_base) - elif owning: # Will free storage when GCd impl = concrete.ConcreteArray(shape, dtype, order, strides, diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -499,6 +499,16 @@ def rewrite_op_hint(self, op): hints = op.args[1].value + + # hack: if there are both 'promote' and 'promote_string', kill + # one of them based on the type of the value + if hints.get('promote_string') and hints.get('promote'): + hints = hints.copy() + if op.args[0].concretetype == lltype.Ptr(rstr.STR): + del hints['promote'] + else: + del hints['promote_string'] + if hints.get('promote') and op.args[0].concretetype is not lltype.Void: assert op.args[0].concretetype != lltype.Ptr(rstr.STR) kind = getkind(op.args[0].concretetype) diff --git a/rpython/jit/codewriter/test/test_jtransform.py b/rpython/jit/codewriter/test/test_jtransform.py --- a/rpython/jit/codewriter/test/test_jtransform.py +++ b/rpython/jit/codewriter/test/test_jtransform.py @@ -1050,6 +1050,37 @@ assert op1.result == v2 assert op0.opname == '-live-' +def test_double_promote_str(): + PSTR = lltype.Ptr(rstr.STR) + v1 = varoftype(PSTR) + v2 = varoftype(PSTR) + tr = Transformer(FakeCPU(), FakeBuiltinCallControl()) + op1 = SpaceOperation('hint', + [v1, Constant({'promote_string': True}, lltype.Void)], + v2) + op2 = SpaceOperation('hint', + [v1, Constant({'promote_string': True, + 'promote': True}, lltype.Void)], + v2) + lst1 = tr.rewrite_operation(op1) + lst2 = tr.rewrite_operation(op2) + assert lst1 == lst2 + +def test_double_promote_nonstr(): + v1 = varoftype(lltype.Signed) + v2 = varoftype(lltype.Signed) + tr = Transformer(FakeCPU(), FakeBuiltinCallControl()) + op1 = SpaceOperation('hint', + [v1, Constant({'promote': True}, lltype.Void)], + v2) + op2 = SpaceOperation('hint', + [v1, Constant({'promote_string': True, + 'promote': True}, lltype.Void)], + v2) + lst1 = tr.rewrite_operation(op1) + lst2 = tr.rewrite_operation(op2) + assert lst1 == lst2 + def test_unicode_concat(): # test that the oopspec is present and correctly transformed PSTR = lltype.Ptr(rstr.UNICODE) diff --git a/rpython/jit/metainterp/test/test_list.py b/rpython/jit/metainterp/test/test_list.py --- a/rpython/jit/metainterp/test/test_list.py +++ b/rpython/jit/metainterp/test/test_list.py @@ -98,8 +98,8 @@ self.check_resops(setarrayitem_gc=0, call=0, getarrayitem_gc=0) def test_vlist_alloc_and_set(self): - # the check_loops fails, because [non-null] * n is not supported yet - # (it is implemented as a residual call) + # the check_loops fails, because [non-null] * n is only supported + # if n < 15 (otherwise it is implemented as a residual call) jitdriver = JitDriver(greens = [], reds = ['n']) def f(n): l = [1] * 20 @@ -116,7 +116,7 @@ res = self.meta_interp(f, [10], listops=True) assert res == f(10) - py.test.skip("'[non-null] * n' gives a residual call so far") + py.test.skip("'[non-null] * n' for n >= 15 gives a residual call so far") self.check_loops(setarrayitem_gc=0, getarrayitem_gc=0, call=0) def test_arraycopy_simpleoptimize(self): @@ -287,6 +287,74 @@ assert res == 5 self.check_resops(call=0) + def test_list_mul_virtual(self): + class Foo: + def __init__(self, l): + self.l = l + l[0] = self + + myjitdriver = JitDriver(greens = [], reds = ['y']) + def f(y): + while y > 0: + myjitdriver.jit_merge_point(y=y) + Foo([None] * 5) + y -= 1 + return 42 + + self.meta_interp(f, [5]) + self.check_resops({'int_sub': 2, + 'int_gt': 2, + 'guard_true': 2, + 'jump': 1}) + + def test_list_mul_virtual_nonzero(self): + class base: + pass + class Foo(base): + def __init__(self, l): + self.l = l + l[0] = self + class nil(base): + pass + + nil = nil() + + myjitdriver = JitDriver(greens = [], reds = ['y']) + def f(y): + while y > 0: + myjitdriver.jit_merge_point(y=y) + Foo([nil] * 5) + y -= 1 + return 42 + + self.meta_interp(f, [5]) + self.check_resops({'int_sub': 2, + 'int_gt': 2, + 'guard_true': 2, + 'jump': 1}) + + def test_list_mul_unsigned_virtual(self): + from rpython.rlib.rarithmetic import r_uint + + class Foo: + def __init__(self, l): + self.l = l + l[0] = self + + myjitdriver = JitDriver(greens = [], reds = ['y']) + def f(y): + while y > 0: + myjitdriver.jit_merge_point(y=y) + Foo([None] * r_uint(5)) + y -= 1 + return 42 + + self.meta_interp(f, [5]) + self.check_resops({'int_sub': 2, + 'int_gt': 2, + 'guard_true': 2, + 'jump': 1}) + class TestLLtype(ListTests, LLJitMixin): def test_listops_dont_invalidate_caches(self): diff --git a/rpython/rlib/jit.py b/rpython/rlib/jit.py --- a/rpython/rlib/jit.py +++ b/rpython/rlib/jit.py @@ -130,7 +130,9 @@ if promote_args != 'all': args = [args[int(i)] for i in promote_args.split(",")] for arg in args: - code.append(" %s = hint(%s, promote=True)\n" % (arg, arg)) + code.append( #use both hints, and let jtransform pick the right one + " %s = hint(%s, promote=True, promote_string=True)\n" % + (arg, arg)) code.append(" return _orig_func_unlikely_name(%s)\n" % (argstring, )) d = {"_orig_func_unlikely_name": func, "hint": hint} exec py.code.Source("\n".join(code)).compile() in d diff --git a/rpython/rtyper/test/test_generator.py b/rpython/rtyper/test/test_generator.py --- a/rpython/rtyper/test/test_generator.py +++ b/rpython/rtyper/test/test_generator.py @@ -88,3 +88,16 @@ return s res = self.interpret(g, []) assert res == 6 + + def test_send(self): + def f(): + yield (yield 1) + 1 + def g(): + gen = f() + res = f.send(2) + assert res == 1 + res = f.next() + assert res == 3 + + res = self.interpret(g, []) + diff --git a/rpython/rtyper/test/test_rlist.py b/rpython/rtyper/test/test_rlist.py --- a/rpython/rtyper/test/test_rlist.py +++ b/rpython/rtyper/test/test_rlist.py @@ -1619,3 +1619,17 @@ rgc.ll_arraycopy = old_arraycopy # assert 2 <= res <= 10 + + def test_alloc_and_set(self): + def fn(i): + lst = [0] * r_uint(i) + return lst + t, rtyper, graph = self.gengraph(fn, [int]) + block = graph.startblock + seen = 0 + for op in block.operations: + if op.opname in ['cast_int_to_uint', 'cast_uint_to_int']: + continue + assert op.opname == 'direct_call' + seen += 1 + assert seen == 1 diff --git a/rpython/translator/transform.py b/rpython/translator/transform.py --- a/rpython/translator/transform.py +++ b/rpython/translator/transform.py @@ -30,7 +30,7 @@ # [a] * b # --> # c = newlist(a) -# d = mul(c, int b) +# d = mul(c, b) # --> # d = alloc_and_set(b, a) @@ -44,8 +44,7 @@ len(op.args) == 1): length1_lists[op.result] = op.args[0] elif (op.opname == 'mul' and - op.args[0] in length1_lists and - self.gettype(op.args[1]) is int): + op.args[0] in length1_lists): new_op = SpaceOperation('alloc_and_set', (op.args[1], length1_lists[op.args[0]]), op.result) From noreply at buildbot.pypy.org Thu Feb 27 08:28:43 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 27 Feb 2014 08:28:43 +0100 (CET) Subject: [pypy-commit] pypy default: Cancel unintended part of 2c8e18a5330a Message-ID: <20140227072843.690391C0A5B@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69498:9b4ec95dbd96 Date: 2014-02-27 08:26 +0100 http://bitbucket.org/pypy/pypy/changeset/9b4ec95dbd96/ Log: Cancel unintended part of 2c8e18a5330a diff --git a/rpython/rtyper/test/test_generator.py b/rpython/rtyper/test/test_generator.py --- a/rpython/rtyper/test/test_generator.py +++ b/rpython/rtyper/test/test_generator.py @@ -88,16 +88,3 @@ return s res = self.interpret(g, []) assert res == 6 - - def test_send(self): - def f(): - yield (yield 1) + 1 - def g(): - gen = f() - res = f.send(2) - assert res == 1 - res = f.next() - assert res == 3 - - res = self.interpret(g, []) - From noreply at buildbot.pypy.org Thu Feb 27 08:28:42 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 27 Feb 2014 08:28:42 +0100 (CET) Subject: [pypy-commit] pypy default: Fix: we need to check for NULL-ness before calling the aroundstate functions Message-ID: <20140227072842.305EF1C0A5B@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69497:b277ef305108 Date: 2014-02-27 08:24 +0100 http://bitbucket.org/pypy/pypy/changeset/b277ef305108/ Log: Fix: we need to check for NULL-ness before calling the aroundstate functions diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -122,11 +122,13 @@ @entrypoint('main', [rffi.CCHARP], c_name='pypy_execute_source') def pypy_execute_source(ll_source): - rffi.aroundstate.after() + after = rffi.aroundstate.after + if after: after() llop.gc_stack_bottom(lltype.Void) source = rffi.charp2str(ll_source) res = _pypy_execute_source(source) - rffi.aroundstate.before() + before = rffi.aroundstate.before + if before: before() return rffi.cast(rffi.INT, res) @entrypoint('main', [], c_name='pypy_init_threads') @@ -134,7 +136,8 @@ if not space.config.objspace.usemodules.thread: return os_thread.setup_threads(space) - rffi.aroundstate.before() + before = rffi.aroundstate.before + if before: before() @entrypoint('main', [], c_name='pypy_thread_attach') def pypy_thread_attach(): @@ -145,7 +148,8 @@ rthread.gc_thread_start() os_thread.bootstrapper.nbthreads += 1 os_thread.bootstrapper.release() - rffi.aroundstate.before() + before = rffi.aroundstate.before + if before: before() w_globals = space.newdict() space.setitem(w_globals, space.wrap('__builtins__'), From noreply at buildbot.pypy.org Thu Feb 27 08:28:45 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 27 Feb 2014 08:28:45 +0100 (CET) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20140227072845.25A7E1C0A5B@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69499:1393c9dc8c7d Date: 2014-02-27 08:27 +0100 http://bitbucket.org/pypy/pypy/changeset/1393c9dc8c7d/ Log: merge heads diff too long, truncating to 2000 out of 20175 lines diff --git a/pypy/module/cpyext/ndarrayobject.py b/pypy/module/cpyext/ndarrayobject.py --- a/pypy/module/cpyext/ndarrayobject.py +++ b/pypy/module/cpyext/ndarrayobject.py @@ -7,10 +7,10 @@ from rpython.rtyper.lltypesystem import rffi, lltype from pypy.module.cpyext.api import cpython_api, Py_ssize_t, CANNOT_FAIL from pypy.module.cpyext.api import PyObject -from pypy.module.micronumpy.interp_numarray import W_NDimArray, array -from pypy.module.micronumpy.interp_dtype import get_dtype_cache, W_Dtype -from pypy.module.micronumpy.arrayimpl.concrete import ConcreteArray -from pypy.module.micronumpy.arrayimpl.scalar import Scalar +from pypy.module.micronumpy.ndarray import W_NDimArray +from pypy.module.micronumpy.ctors import array +from pypy.module.micronumpy.descriptor import get_dtype_cache, W_Dtype +from pypy.module.micronumpy.concrete import ConcreteArray from rpython.rlib.rawstorage import RAW_STORAGE_PTR NPY_C_CONTIGUOUS = 0x0001 @@ -167,7 +167,7 @@ # void *data = PyArray_DATA(arr); impl = w_array.implementation w_array = W_NDimArray.from_shape(space, [1], impl.dtype) - w_array.implementation.setitem(0, impl.value) + w_array.implementation.setitem(0, impl.getitem(impl.start + 0)) w_array.implementation.shape = [] return w_array @@ -214,12 +214,8 @@ order='C', owning=False, w_subtype=None): shape, dtype = get_shape_and_dtype(space, nd, dims, typenum) storage = rffi.cast(RAW_STORAGE_PTR, data) - if nd == 0: - w_val = dtype.itemtype.box_raw_data(storage) - return W_NDimArray(Scalar(dtype, w_val)) - else: - return W_NDimArray.from_shape_and_storage(space, shape, storage, dtype, - order=order, owning=owning, w_subtype=w_subtype) + return W_NDimArray.from_shape_and_storage(space, shape, storage, dtype, + order=order, owning=owning, w_subtype=w_subtype) @cpython_api([Py_ssize_t, rffi.LONGP, Py_ssize_t], PyObject) diff --git a/pypy/module/cpyext/test/test_ndarrayobject.py b/pypy/module/cpyext/test/test_ndarrayobject.py --- a/pypy/module/cpyext/test/test_ndarrayobject.py +++ b/pypy/module/cpyext/test/test_ndarrayobject.py @@ -2,8 +2,8 @@ from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase from rpython.rtyper.lltypesystem import rffi, lltype -from pypy.module.micronumpy.interp_numarray import W_NDimArray -from pypy.module.micronumpy.interp_dtype import get_dtype_cache +from pypy.module.micronumpy.ndarray import W_NDimArray +from pypy.module.micronumpy.descriptor import get_dtype_cache def scalar(space): dtype = get_dtype_cache(space).w_float64dtype @@ -77,7 +77,7 @@ def test_FromAny_scalar(self, space, api): a0 = scalar(space) - assert a0.implementation.get_scalar_value().value == 10. + assert a0.get_scalar_value().value == 10. a = api._PyArray_FromAny(a0, NULL, 0, 0, 0, NULL) assert api._PyArray_NDIM(a) == 0 diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py --- a/pypy/module/micronumpy/__init__.py +++ b/pypy/module/micronumpy/__init__.py @@ -4,24 +4,24 @@ class MultiArrayModule(MixedModule): appleveldefs = {'arange': 'app_numpy.arange'} interpleveldefs = { - 'ndarray': 'interp_numarray.W_NDimArray', - 'dtype': 'interp_dtype.W_Dtype', + 'ndarray': 'ndarray.W_NDimArray', + 'dtype': 'descriptor.W_Dtype', - 'array': 'interp_numarray.array', - 'zeros': 'interp_numarray.zeros', - 'empty': 'interp_numarray.zeros', - 'empty_like': 'interp_numarray.empty_like', - '_reconstruct' : 'interp_numarray._reconstruct', - 'scalar' : 'interp_numarray.build_scalar', - 'dot': 'interp_arrayops.dot', - 'fromstring': 'interp_support.fromstring', - 'flatiter': 'interp_flatiter.W_FlatIterator', - 'concatenate': 'interp_arrayops.concatenate', - 'where': 'interp_arrayops.where', - 'count_nonzero': 'interp_arrayops.count_nonzero', + 'array': 'ctors.array', + 'zeros': 'ctors.zeros', + 'empty': 'ctors.zeros', + 'empty_like': 'ctors.empty_like', + '_reconstruct' : 'ndarray._reconstruct', + 'scalar' : 'ctors.build_scalar', + 'dot': 'arrayops.dot', + 'fromstring': 'ctors.fromstring', + 'flatiter': 'flatiter.W_FlatIterator', + 'concatenate': 'arrayops.concatenate', + 'where': 'arrayops.where', + 'count_nonzero': 'arrayops.count_nonzero', 'set_string_function': 'appbridge.set_string_function', - 'typeinfo': 'interp_dtype.get_dtype_cache(space).w_typeinfo', + 'typeinfo': 'descriptor.get_dtype_cache(space).w_typeinfo', } @@ -107,7 +107,7 @@ ('real', 'real'), ('imag', 'imag'), ]: - interpleveldefs[exposed] = "interp_ufuncs.get(space).%s" % impl + interpleveldefs[exposed] = "ufuncs.get(space).%s" % impl class Module(MixedModule): diff --git a/pypy/module/micronumpy/appbridge.py b/pypy/module/micronumpy/appbridge.py --- a/pypy/module/micronumpy/appbridge.py +++ b/pypy/module/micronumpy/appbridge.py @@ -1,5 +1,6 @@ from rpython.rlib.objectmodel import specialize + class AppBridgeCache(object): w__mean = None w__var = None @@ -20,6 +21,7 @@ setattr(self, 'w_' + name, w_method) return space.call_args(w_method, args) + def set_string_function(space, w_f, w_repr): cache = get_appbridge_cache(space) if space.is_true(w_repr): @@ -27,5 +29,6 @@ else: cache.w_array_str = w_f + def get_appbridge_cache(space): return space.fromcache(AppBridgeCache) diff --git a/pypy/module/micronumpy/arrayimpl/__init__.py b/pypy/module/micronumpy/arrayimpl/__init__.py deleted file mode 100644 diff --git a/pypy/module/micronumpy/arrayimpl/base.py b/pypy/module/micronumpy/arrayimpl/base.py deleted file mode 100644 --- a/pypy/module/micronumpy/arrayimpl/base.py +++ /dev/null @@ -1,20 +0,0 @@ - -class BaseArrayImplementation(object): - def is_scalar(self): - return False - - def base(self): - raise NotImplementedError - - def create_iter(self, shape=None, backward_broadcast=False, require_index=False): - raise NotImplementedError - -class BaseArrayIterator(object): - def next(self): - raise NotImplementedError # purely abstract base class - - def setitem(self, elem): - raise NotImplementedError - - def set_scalar_object(self, value): - raise NotImplementedError # works only on scalars diff --git a/pypy/module/micronumpy/arrayimpl/concrete.py b/pypy/module/micronumpy/arrayimpl/concrete.py deleted file mode 100644 --- a/pypy/module/micronumpy/arrayimpl/concrete.py +++ /dev/null @@ -1,504 +0,0 @@ -from pypy.module.micronumpy.arrayimpl import base, scalar -from pypy.module.micronumpy import support, loop, iter -from pypy.module.micronumpy.base import convert_to_array, W_NDimArray,\ - ArrayArgumentException -from pypy.module.micronumpy.strides import calc_new_strides, shape_agreement,\ - calculate_broadcast_strides, calculate_dot_strides -from pypy.module.micronumpy.iter import Chunk, Chunks, NewAxisChunk, RecordChunk -from pypy.interpreter.error import OperationError, oefmt -from pypy.interpreter.buffer import RWBuffer -from rpython.rlib import jit -from rpython.rtyper.lltypesystem import rffi, lltype -from rpython.rlib.rawstorage import free_raw_storage, raw_storage_getitem,\ - raw_storage_setitem, RAW_STORAGE -from rpython.rlib.debug import make_sure_not_resized - - -class BaseConcreteArray(base.BaseArrayImplementation): - start = 0 - parent = None - - # JIT hints that length of all those arrays is a constant - - def get_shape(self): - shape = self.shape - jit.hint(len(shape), promote=True) - return shape - - def get_strides(self): - strides = self.strides - jit.hint(len(strides), promote=True) - return strides - - def get_backstrides(self): - backstrides = self.backstrides - jit.hint(len(backstrides), promote=True) - return backstrides - - def getitem(self, index): - return self.dtype.itemtype.read(self, index, 0) - - def getitem_bool(self, index): - return self.dtype.itemtype.read_bool(self, index, 0) - - def setitem(self, index, value): - self.dtype.itemtype.store(self, index, 0, value) - - def setslice(self, space, arr): - impl = arr.implementation - if impl.is_scalar(): - self.fill(space, impl.get_scalar_value()) - return - shape = shape_agreement(space, self.get_shape(), arr) - if impl.storage == self.storage: - impl = impl.copy(space) - loop.setslice(space, shape, self, impl) - - def get_size(self): - return self.size // self.dtype.elsize - - def get_storage_size(self): - return self.size - - def reshape(self, space, orig_array, new_shape): - # Since we got to here, prod(new_shape) == self.size - new_strides = None - if self.size > 0: - new_strides = calc_new_strides(new_shape, self.get_shape(), - self.get_strides(), self.order) - if new_strides: - # We can create a view, strides somehow match up. - ndims = len(new_shape) - new_backstrides = [0] * ndims - for nd in range(ndims): - new_backstrides[nd] = (new_shape[nd] - 1) * new_strides[nd] - assert isinstance(orig_array, W_NDimArray) or orig_array is None - return SliceArray(self.start, new_strides, new_backstrides, - new_shape, self, orig_array) - else: - if self.get_size() == 1 and len(new_shape) == 0: - return scalar.Scalar(self.dtype, self.getitem(0)) - return None - - def get_view(self, space, orig_array, dtype, new_shape): - strides, backstrides = support.calc_strides(new_shape, dtype, - self.order) - return SliceArray(self.start, strides, backstrides, new_shape, - self, orig_array, dtype=dtype) - - def get_real(self, space, orig_array): - strides = self.get_strides() - backstrides = self.get_backstrides() - if self.dtype.is_complex(): - dtype = self.dtype.get_float_dtype(space) - return SliceArray(self.start, strides, backstrides, - self.get_shape(), self, orig_array, dtype=dtype) - return SliceArray(self.start, strides, backstrides, - self.get_shape(), self, orig_array) - - def set_real(self, space, orig_array, w_value): - tmp = self.get_real(space, orig_array) - tmp.setslice(space, convert_to_array(space, w_value)) - - def get_imag(self, space, orig_array): - strides = self.get_strides() - backstrides = self.get_backstrides() - if self.dtype.is_complex(): - dtype = self.dtype.get_float_dtype(space) - return SliceArray(self.start + dtype.elsize, strides, - backstrides, self.get_shape(), self, orig_array, dtype=dtype) - impl = NonWritableArray(self.get_shape(), self.dtype, self.order, strides, - backstrides) - if not self.dtype.is_flexible(): - impl.fill(space, self.dtype.box(0)) - return impl - - def set_imag(self, space, orig_array, w_value): - tmp = self.get_imag(space, orig_array) - tmp.setslice(space, convert_to_array(space, w_value)) - - # -------------------- applevel get/setitem ----------------------- - - @jit.unroll_safe - def _lookup_by_index(self, space, view_w): - item = self.start - strides = self.get_strides() - for i, w_index in enumerate(view_w): - if space.isinstance_w(w_index, space.w_slice): - raise IndexError - idx = support.index_w(space, w_index) - if idx < 0: - idx = self.get_shape()[i] + idx - if idx < 0 or idx >= self.get_shape()[i]: - raise oefmt(space.w_IndexError, - "index %d is out of bounds for axis %d with size " - "%d", idx, i, self.get_shape()[i]) - item += idx * strides[i] - return item - - @jit.unroll_safe - def _lookup_by_unwrapped_index(self, space, lst): - item = self.start - shape = self.get_shape() - strides = self.get_strides() - assert len(lst) == len(shape) - for i, idx in enumerate(lst): - if idx < 0: - idx = shape[i] + idx - if idx < 0 or idx >= shape[i]: - raise oefmt(space.w_IndexError, - "index %d is out of bounds for axis %d with size " - "%d", idx, i, self.get_shape()[i]) - item += idx * strides[i] - return item - - def getitem_index(self, space, index): - return self.getitem(self._lookup_by_unwrapped_index(space, index)) - - def setitem_index(self, space, index, value): - self.setitem(self._lookup_by_unwrapped_index(space, index), value) - - @jit.unroll_safe - def _single_item_index(self, space, w_idx): - """ Return an index of single item if possible, otherwise raises - IndexError - """ - if (space.isinstance_w(w_idx, space.w_str) or - space.isinstance_w(w_idx, space.w_slice) or - space.is_w(w_idx, space.w_None)): - raise IndexError - if isinstance(w_idx, W_NDimArray) and not isinstance(w_idx.implementation, scalar.Scalar): - raise ArrayArgumentException - shape = self.get_shape() - shape_len = len(shape) - view_w = None - if space.isinstance_w(w_idx, space.w_list): - raise ArrayArgumentException - if space.isinstance_w(w_idx, space.w_tuple): - view_w = space.fixedview(w_idx) - if len(view_w) < shape_len: - raise IndexError - if len(view_w) > shape_len: - # we can allow for one extra None - count = len(view_w) - for w_item in view_w: - if space.is_w(w_item, space.w_None): - count -= 1 - if count == shape_len: - raise IndexError # but it's still not a single item - raise OperationError(space.w_IndexError, - space.wrap("invalid index")) - # check for arrays - for w_item in view_w: - if (isinstance(w_item, W_NDimArray) or - space.isinstance_w(w_item, space.w_list)): - raise ArrayArgumentException - return self._lookup_by_index(space, view_w) - if shape_len > 1: - raise IndexError - idx = support.index_w(space, w_idx) - return self._lookup_by_index(space, [space.wrap(idx)]) - - @jit.unroll_safe - def _prepare_slice_args(self, space, w_idx): - if space.isinstance_w(w_idx, space.w_str): - idx = space.str_w(w_idx) - dtype = self.dtype - if not dtype.is_record() or idx not in dtype.fields: - raise OperationError(space.w_ValueError, space.wrap( - "field named %s not found" % idx)) - return RecordChunk(idx) - if (space.isinstance_w(w_idx, space.w_int) or - space.isinstance_w(w_idx, space.w_slice)): - return Chunks([Chunk(*space.decode_index4(w_idx, self.get_shape()[0]))]) - elif isinstance(w_idx, W_NDimArray) and \ - isinstance(w_idx.implementation, scalar.Scalar): - w_idx = w_idx.get_scalar_value().item(space) - if not space.isinstance_w(w_idx, space.w_int) and \ - not space.isinstance_w(w_idx, space.w_bool): - raise OperationError(space.w_IndexError, space.wrap( - "arrays used as indices must be of integer (or boolean) type")) - return Chunks([Chunk(*space.decode_index4(w_idx, self.get_shape()[0]))]) - elif space.is_w(w_idx, space.w_None): - return Chunks([NewAxisChunk()]) - result = [] - i = 0 - for w_item in space.fixedview(w_idx): - if space.is_w(w_item, space.w_None): - result.append(NewAxisChunk()) - else: - result.append(Chunk(*space.decode_index4(w_item, - self.get_shape()[i]))) - i += 1 - return Chunks(result) - - def descr_getitem(self, space, orig_arr, w_index): - try: - item = self._single_item_index(space, w_index) - return self.getitem(item) - except IndexError: - # not a single result - chunks = self._prepare_slice_args(space, w_index) - return chunks.apply(space, orig_arr) - - def descr_setitem(self, space, orig_arr, w_index, w_value): - try: - item = self._single_item_index(space, w_index) - self.setitem(item, self.dtype.coerce(space, w_value)) - except IndexError: - w_value = convert_to_array(space, w_value) - chunks = self._prepare_slice_args(space, w_index) - view = chunks.apply(space, orig_arr) - view.implementation.setslice(space, w_value) - - def transpose(self, orig_array): - if len(self.get_shape()) < 2: - return self - strides = [] - backstrides = [] - shape = [] - for i in range(len(self.get_shape()) - 1, -1, -1): - strides.append(self.get_strides()[i]) - backstrides.append(self.get_backstrides()[i]) - shape.append(self.get_shape()[i]) - return SliceArray(self.start, strides, - backstrides, shape, self, orig_array) - - def copy(self, space): - strides, backstrides = support.calc_strides(self.get_shape(), self.dtype, - self.order) - impl = ConcreteArray(self.get_shape(), self.dtype, self.order, strides, - backstrides) - return loop.setslice(space, self.get_shape(), impl, self) - - def create_axis_iter(self, shape, dim, cum): - return iter.AxisIterator(self, shape, dim, cum) - - def create_dot_iter(self, shape, skip): - r = calculate_dot_strides(self.get_strides(), self.get_backstrides(), - shape, skip) - return iter.MultiDimViewIterator(self, self.start, r[0], r[1], shape) - - def swapaxes(self, space, orig_arr, axis1, axis2): - shape = self.get_shape()[:] - strides = self.get_strides()[:] - backstrides = self.get_backstrides()[:] - shape[axis1], shape[axis2] = shape[axis2], shape[axis1] - strides[axis1], strides[axis2] = strides[axis2], strides[axis1] - backstrides[axis1], backstrides[axis2] = backstrides[axis2], backstrides[axis1] - return W_NDimArray.new_slice(space, self.start, strides, - backstrides, shape, self, orig_arr) - - def nonzero(self, space, index_type): - s = loop.count_all_true_concrete(self) - box = index_type.itemtype.box - nd = len(self.get_shape()) - w_res = W_NDimArray.from_shape(space, [s, nd], index_type) - loop.nonzero(w_res, self, box) - w_res = w_res.implementation.swapaxes(space, w_res, 0, 1) - l_w = [w_res.descr_getitem(space, space.wrap(d)) for d in range(nd)] - return space.newtuple(l_w) - - def get_storage_as_int(self, space): - return rffi.cast(lltype.Signed, self.storage) + self.start - - def get_storage(self): - return self.storage - - def get_buffer(self, space): - return ArrayBuffer(self) - - def astype(self, space, dtype): - strides, backstrides = support.calc_strides(self.get_shape(), dtype, - self.order) - impl = ConcreteArray(self.get_shape(), dtype, self.order, - strides, backstrides) - loop.setslice(space, impl.get_shape(), impl, self) - return impl - - -class ConcreteArrayNotOwning(BaseConcreteArray): - def __init__(self, shape, dtype, order, strides, backstrides, storage): - - make_sure_not_resized(shape) - make_sure_not_resized(strides) - make_sure_not_resized(backstrides) - self.shape = shape - self.size = support.product(shape) * dtype.elsize - self.order = order - self.dtype = dtype - self.strides = strides - self.backstrides = backstrides - self.storage = storage - - def create_iter(self, shape=None, backward_broadcast=False, require_index=False): - if shape is not None and \ - support.product(shape) > support.product(self.get_shape()): - r = calculate_broadcast_strides(self.get_strides(), - self.get_backstrides(), - self.get_shape(), shape, - backward_broadcast) - return iter.MultiDimViewIterator(self, self.start, - r[0], r[1], shape) - if not require_index: - return iter.ConcreteArrayIterator(self) - if len(self.get_shape()) == 1: - return iter.OneDimViewIterator(self, self.start, - self.get_strides(), - self.get_shape()) - return iter.MultiDimViewIterator(self, self.start, - self.get_strides(), - self.get_backstrides(), - self.get_shape()) - - def fill(self, space, box): - self.dtype.itemtype.fill(self.storage, self.dtype.elsize, - box, 0, self.size, 0) - - def set_shape(self, space, orig_array, new_shape): - strides, backstrides = support.calc_strides(new_shape, self.dtype, - self.order) - return SliceArray(0, strides, backstrides, new_shape, self, - orig_array) - - def set_dtype(self, space, dtype): - self.dtype = dtype - - def argsort(self, space, w_axis): - from pypy.module.micronumpy.arrayimpl.sort import argsort_array - return argsort_array(self, space, w_axis) - - def sort(self, space, w_axis, w_order): - from pypy.module.micronumpy.arrayimpl.sort import sort_array - return sort_array(self, space, w_axis, w_order) - - def base(self): - return None - - -class ConcreteArray(ConcreteArrayNotOwning): - def __init__(self, shape, dtype, order, strides, backstrides, storage=lltype.nullptr(RAW_STORAGE)): - null_storage = lltype.nullptr(RAW_STORAGE) - ConcreteArrayNotOwning.__init__(self, shape, dtype, order, strides, backstrides, - null_storage) - if storage == lltype.nullptr(RAW_STORAGE): - self.storage = dtype.itemtype.malloc(self.size) - else: - self.storage = storage - - def __del__(self): - free_raw_storage(self.storage, track_allocation=False) - -class ConcreteArrayWithBase(ConcreteArrayNotOwning): - def __init__(self, shape, dtype, order, strides, backstrides, storage, orig_base): - ConcreteArrayNotOwning.__init__(self, shape, dtype, order, - strides, backstrides, storage) - self.orig_base = orig_base - - def base(self): - return self.orig_base - - -class ConcreteNonWritableArrayWithBase(ConcreteArrayWithBase): - def descr_setitem(self, space, orig_array, w_index, w_value): - raise OperationError(space.w_ValueError, space.wrap( - "assignment destination is read-only")) - - -class NonWritableArray(ConcreteArray): - def descr_setitem(self, space, orig_array, w_index, w_value): - raise OperationError(space.w_ValueError, space.wrap( - "assignment destination is read-only")) - - -class SliceArray(BaseConcreteArray): - def __init__(self, start, strides, backstrides, shape, parent, orig_arr, - dtype=None): - self.strides = strides - self.backstrides = backstrides - self.shape = shape - if dtype is None: - dtype = parent.dtype - if isinstance(parent, SliceArray): - parent = parent.parent # one level only - self.parent = parent - self.storage = parent.storage - self.order = parent.order - self.dtype = dtype - self.size = support.product(shape) * self.dtype.elsize - self.start = start - self.orig_arr = orig_arr - - def base(self): - return self.orig_arr - - def fill(self, space, box): - loop.fill(self, box.convert_to(space, self.dtype)) - - def create_iter(self, shape=None, backward_broadcast=False, require_index=False): - if shape is not None and \ - support.product(shape) > support.product(self.get_shape()): - r = calculate_broadcast_strides(self.get_strides(), - self.get_backstrides(), - self.get_shape(), shape, - backward_broadcast) - return iter.MultiDimViewIterator(self, self.start, - r[0], r[1], shape) - if len(self.get_shape()) == 1: - return iter.OneDimViewIterator(self, self.start, - self.get_strides(), - self.get_shape()) - return iter.MultiDimViewIterator(self, self.start, - self.get_strides(), - self.get_backstrides(), - self.get_shape()) - - def set_shape(self, space, orig_array, new_shape): - if len(self.get_shape()) < 2 or self.size == 0: - # TODO: this code could be refactored into calc_strides - # but then calc_strides would have to accept a stepping factor - strides = [] - backstrides = [] - dtype = self.dtype - s = self.get_strides()[0] // dtype.elsize - if self.order == 'C': - new_shape.reverse() - for sh in new_shape: - strides.append(s * dtype.elsize) - backstrides.append(s * (sh - 1) * dtype.elsize) - s *= max(1, sh) - if self.order == 'C': - strides.reverse() - backstrides.reverse() - new_shape.reverse() - return SliceArray(self.start, strides, backstrides, new_shape, - self, orig_array) - new_strides = calc_new_strides(new_shape, self.get_shape(), - self.get_strides(), - self.order) - if new_strides is None: - raise OperationError(space.w_AttributeError, space.wrap( - "incompatible shape for a non-contiguous array")) - new_backstrides = [0] * len(new_shape) - for nd in range(len(new_shape)): - new_backstrides[nd] = (new_shape[nd] - 1) * new_strides[nd] - return SliceArray(self.start, new_strides, new_backstrides, new_shape, - self, orig_array) - - -class ArrayBuffer(RWBuffer): - def __init__(self, impl): - self.impl = impl - - def getitem(self, item): - return raw_storage_getitem(lltype.Char, self.impl.storage, item) - - def setitem(self, item, v): - raw_storage_setitem(self.impl.storage, item, - rffi.cast(lltype.Char, v)) - - def getlength(self): - return self.impl.size - - def get_raw_address(self): - return self.impl.storage diff --git a/pypy/module/micronumpy/arrayimpl/scalar.py b/pypy/module/micronumpy/arrayimpl/scalar.py deleted file mode 100644 --- a/pypy/module/micronumpy/arrayimpl/scalar.py +++ /dev/null @@ -1,209 +0,0 @@ -from pypy.module.micronumpy.arrayimpl import base -from pypy.module.micronumpy.base import W_NDimArray, convert_to_array -from pypy.module.micronumpy import support -from pypy.interpreter.error import OperationError - -class ScalarIterator(base.BaseArrayIterator): - def __init__(self, v): - self.v = v - self.called_once = False - - def next(self): - self.called_once = True - - def next_skip_x(self, n): - self.called_once = True - - def getitem(self): - return self.v.get_scalar_value() - - def getitem_bool(self): - return self.v.dtype.itemtype.bool(self.v.value) - - def setitem(self, v): - self.v.set_scalar_value(v) - - def done(self): - return self.called_once - - def reset(self): - pass - -class Scalar(base.BaseArrayImplementation): - def __init__(self, dtype, value=None): - self.dtype = dtype - self.value = value - - def is_scalar(self): - return True - - def get_shape(self): - return [] - - def get_strides(self): - return [] - - def get_backstrides(self): - return [] - - def create_iter(self, shape=None, backward_broadcast=False, require_index=False): - return ScalarIterator(self) - - def get_scalar_value(self): - return self.value - - def set_scalar_value(self, w_val): - self.value = w_val - - def copy(self, space): - scalar = Scalar(self.dtype) - scalar.value = self.value - return scalar - - def get_size(self): - return 1 - - def transpose(self, _): - return self - - def get_view(self, space, orig_array, dtype, new_shape): - scalar = Scalar(dtype) - if dtype.is_str_or_unicode(): - scalar.value = dtype.coerce(space, space.wrap(self.value.raw_str())) - elif dtype.is_record(): - raise OperationError(space.w_NotImplementedError, space.wrap( - "viewing scalar as record not implemented")) - else: - scalar.value = dtype.itemtype.runpack_str(space, self.value.raw_str()) - return scalar - - def get_real(self, space, orig_array): - if self.dtype.is_complex(): - scalar = Scalar(self.dtype.get_float_dtype(space)) - scalar.value = self.value.convert_real_to(scalar.dtype) - return scalar - return self - - def set_real(self, space, orig_array, w_val): - w_arr = convert_to_array(space, w_val) - if len(w_arr.get_shape()) > 0: - raise OperationError(space.w_ValueError, space.wrap( - "could not broadcast input array from shape " + - "(%s) into shape ()" % ( - ','.join([str(x) for x in w_arr.get_shape()],)))) - if self.dtype.is_complex(): - dtype = self.dtype.get_float_dtype(space) - self.value = self.dtype.itemtype.composite( - w_arr.get_scalar_value().convert_to(space, dtype), - self.value.convert_imag_to(dtype)) - else: - self.value = w_arr.get_scalar_value() - - def get_imag(self, space, orig_array): - if self.dtype.is_complex(): - scalar = Scalar(self.dtype.get_float_dtype(space)) - scalar.value = self.value.convert_imag_to(scalar.dtype) - return scalar - scalar = Scalar(self.dtype) - scalar.value = scalar.dtype.coerce(space, None) - return scalar - - def set_imag(self, space, orig_array, w_val): - #Only called on complex dtype - assert self.dtype.is_complex() - w_arr = convert_to_array(space, w_val) - if len(w_arr.get_shape()) > 0: - raise OperationError(space.w_ValueError, space.wrap( - "could not broadcast input array from shape " + - "(%s) into shape ()" % ( - ','.join([str(x) for x in w_arr.get_shape()],)))) - dtype = self.dtype.get_float_dtype(space) - self.value = self.dtype.itemtype.composite( - self.value.convert_real_to(dtype), - w_arr.get_scalar_value().convert_to(space, dtype)) - - def descr_getitem(self, space, _, w_idx): - if space.isinstance_w(w_idx, space.w_tuple): - if space.len_w(w_idx) == 0: - return self.get_scalar_value() - elif space.isinstance_w(w_idx, space.w_str): - if self.dtype.is_record(): - w_val = self.value.descr_getitem(space, w_idx) - return convert_to_array(space, w_val) - elif space.is_none(w_idx): - new_shape = [1] - arr = W_NDimArray.from_shape(space, new_shape, self.dtype) - arr_iter = arr.create_iter(new_shape) - arr_iter.setitem(self.value) - return arr - raise OperationError(space.w_IndexError, - space.wrap("0-d arrays can't be indexed")) - - def getitem_index(self, space, idx): - raise OperationError(space.w_IndexError, - space.wrap("0-d arrays can't be indexed")) - - def descr_setitem(self, space, _, w_idx, w_val): - if space.isinstance_w(w_idx, space.w_tuple): - if space.len_w(w_idx) == 0: - return self.set_scalar_value(self.dtype.coerce(space, w_val)) - elif space.isinstance_w(w_idx, space.w_str): - if self.dtype.is_record(): - return self.value.descr_setitem(space, w_idx, w_val) - raise OperationError(space.w_IndexError, - space.wrap("0-d arrays can't be indexed")) - - def setitem_index(self, space, idx, w_val): - raise OperationError(space.w_IndexError, - space.wrap("0-d arrays can't be indexed")) - - def set_shape(self, space, orig_array, new_shape): - if not new_shape: - return self - if support.product(new_shape) == 1: - arr = W_NDimArray.from_shape(space, new_shape, self.dtype) - arr_iter = arr.create_iter(new_shape) - arr_iter.setitem(self.value) - return arr.implementation - raise OperationError(space.w_ValueError, space.wrap( - "total size of the array must be unchanged")) - - def set_dtype(self, space, dtype): - self.value = self.value.convert_to(space, dtype) - self.dtype = dtype - - def reshape(self, space, orig_array, new_shape): - return self.set_shape(space, orig_array, new_shape) - - def create_axis_iter(self, shape, dim, cum): - raise Exception("axis iter should not happen on scalar") - - def swapaxes(self, space, orig_array, axis1, axis2): - raise Exception("should not be called") - - def nonzero(self, space, index_type): - s = self.dtype.itemtype.bool(self.value) - w_res = W_NDimArray.from_shape(space, [s], index_type) - if s == 1: - w_res.implementation.setitem(0, index_type.itemtype.box(0)) - return space.newtuple([w_res]) - - def fill(self, space, w_value): - self.value = w_value - - def get_storage_as_int(self, space): - raise OperationError(space.w_ValueError, - space.wrap("scalars have no address")) - - def argsort(self, space, w_axis): - return space.wrap(0) - - def astype(self, space, dtype): - raise Exception("should not be called") - - def base(self): - return None - - def get_buffer(self, space): - raise OperationError(space.w_ValueError, space.wrap( - "cannot point buffer to a scalar")) diff --git a/pypy/module/micronumpy/arrayimpl/sort.py b/pypy/module/micronumpy/arrayimpl/sort.py deleted file mode 100644 --- a/pypy/module/micronumpy/arrayimpl/sort.py +++ /dev/null @@ -1,361 +0,0 @@ - -""" This is the implementation of various sorting routines in numpy. It's here -because it only makes sense on a concrete array -""" - -from rpython.rtyper.lltypesystem import rffi, lltype -from rpython.rlib.listsort import make_timsort_class -from rpython.rlib.rawstorage import raw_storage_getitem, raw_storage_setitem, \ - free_raw_storage, alloc_raw_storage -from rpython.rlib.unroll import unrolling_iterable -from rpython.rlib.rarithmetic import widen -from rpython.rlib.objectmodel import specialize -from pypy.interpreter.error import OperationError, oefmt -from pypy.module.micronumpy.base import W_NDimArray -from pypy.module.micronumpy import interp_dtype, types, constants as NPY -from pypy.module.micronumpy.iter import AxisIterator - -INT_SIZE = rffi.sizeof(lltype.Signed) - -def make_argsort_function(space, itemtype, comp_type, count=1): - TP = itemtype.T - step = rffi.sizeof(TP) - - class Repr(object): - def __init__(self, index_stride_size, stride_size, size, values, - indexes, index_start, start): - self.index_stride_size = index_stride_size - self.stride_size = stride_size - self.index_start = index_start - self.start = start - self.size = size - self.values = values - self.indexes = indexes - - def getitem(self, item): - if count < 2: - v = raw_storage_getitem(TP, self.values, item * self.stride_size - + self.start) - else: - v = [] - for i in range(count): - _v = raw_storage_getitem(TP, self.values, item * self.stride_size - + self.start + step * i) - v.append(_v) - if comp_type == 'int': - v = widen(v) - elif comp_type == 'float': - v = float(v) - elif comp_type == 'complex': - v = [float(v[0]),float(v[1])] - else: - raise NotImplementedError('cannot reach') - return (v, raw_storage_getitem(lltype.Signed, self.indexes, - item * self.index_stride_size + - self.index_start)) - - def setitem(self, idx, item): - if count < 2: - raw_storage_setitem(self.values, idx * self.stride_size + - self.start, rffi.cast(TP, item[0])) - else: - i = 0 - for val in item[0]: - raw_storage_setitem(self.values, idx * self.stride_size + - self.start + i*step, rffi.cast(TP, val)) - i += 1 - raw_storage_setitem(self.indexes, idx * self.index_stride_size + - self.index_start, item[1]) - - class ArgArrayRepWithStorage(Repr): - def __init__(self, index_stride_size, stride_size, size): - start = 0 - dtype = interp_dtype.get_dtype_cache(space).w_longdtype - indexes = dtype.itemtype.malloc(size * dtype.elsize) - values = alloc_raw_storage(size * stride_size, - track_allocation=False) - Repr.__init__(self, dtype.elsize, stride_size, - size, values, indexes, start, start) - - def __del__(self): - free_raw_storage(self.indexes, track_allocation=False) - free_raw_storage(self.values, track_allocation=False) - - def arg_getitem(lst, item): - return lst.getitem(item) - - def arg_setitem(lst, item, value): - lst.setitem(item, value) - - def arg_length(lst): - return lst.size - - def arg_getitem_slice(lst, start, stop): - retval = ArgArrayRepWithStorage(lst.index_stride_size, lst.stride_size, - stop-start) - for i in range(stop-start): - retval.setitem(i, lst.getitem(i+start)) - return retval - - if count < 2: - def arg_lt(a, b): - # Does numpy do <= ? - return a[0] < b[0] or b[0] != b[0] and a[0] == a[0] - else: - def arg_lt(a, b): - for i in range(count): - if b[0][i] != b[0][i] and a[0][i] == a[0][i]: - return True - elif b[0][i] == b[0][i] and a[0][i] != a[0][i]: - return False - for i in range(count): - if a[0][i] < b[0][i]: - return True - elif a[0][i] > b[0][i]: - return False - # Does numpy do True? - return False - - ArgSort = make_timsort_class(arg_getitem, arg_setitem, arg_length, - arg_getitem_slice, arg_lt) - - def argsort(arr, space, w_axis, itemsize): - if w_axis is space.w_None: - # note that it's fine ot pass None here as we're not going - # to pass the result around (None is the link to base in slices) - if arr.get_size() > 0: - arr = arr.reshape(space, None, [arr.get_size()]) - axis = 0 - elif w_axis is None: - axis = -1 - else: - axis = space.int_w(w_axis) - # create array of indexes - dtype = interp_dtype.get_dtype_cache(space).w_longdtype - index_arr = W_NDimArray.from_shape(space, arr.get_shape(), dtype) - storage = index_arr.implementation.get_storage() - if len(arr.get_shape()) == 1: - for i in range(arr.get_size()): - raw_storage_setitem(storage, i * INT_SIZE, i) - r = Repr(INT_SIZE, itemsize, arr.get_size(), arr.get_storage(), - storage, 0, arr.start) - ArgSort(r).sort() - else: - shape = arr.get_shape() - if axis < 0: - axis = len(shape) + axis - if axis < 0 or axis >= len(shape): - raise OperationError(space.w_IndexError, space.wrap( - "Wrong axis %d" % axis)) - iterable_shape = shape[:axis] + [0] + shape[axis + 1:] - iter = AxisIterator(arr, iterable_shape, axis, False) - index_impl = index_arr.implementation - index_iter = AxisIterator(index_impl, iterable_shape, axis, False) - stride_size = arr.strides[axis] - index_stride_size = index_impl.strides[axis] - axis_size = arr.shape[axis] - while not iter.done(): - for i in range(axis_size): - raw_storage_setitem(storage, i * index_stride_size + - index_iter.offset, i) - r = Repr(index_stride_size, stride_size, axis_size, - arr.get_storage(), storage, index_iter.offset, iter.offset) - ArgSort(r).sort() - iter.next() - index_iter.next() - return index_arr - - return argsort - -def argsort_array(arr, space, w_axis): - cache = space.fromcache(ArgSortCache) # that populates ArgSortClasses - itemtype = arr.dtype.itemtype - for tp in all_types: - if isinstance(itemtype, tp[0]): - return cache._lookup(tp)(arr, space, w_axis, - itemtype.get_element_size()) - # XXX this should probably be changed - raise oefmt(space.w_NotImplementedError, - "sorting of non-numeric types '%s' is not implemented", - arr.dtype.get_name()) - -all_types = (types.all_float_types + types.all_complex_types + - types.all_int_types) -all_types = [i for i in all_types if not '_mixin_' in i[0].__dict__] -all_types = unrolling_iterable(all_types) - -def make_sort_function(space, itemtype, comp_type, count=1): - TP = itemtype.T - step = rffi.sizeof(TP) - - class Repr(object): - def __init__(self, stride_size, size, values, start): - self.stride_size = stride_size - self.start = start - self.size = size - self.values = values - - def getitem(self, item): - if count < 2: - v = raw_storage_getitem(TP, self.values, item * self.stride_size - + self.start) - else: - v = [] - for i in range(count): - _v = raw_storage_getitem(TP, self.values, item * self.stride_size - + self.start + step * i) - v.append(_v) - if comp_type == 'int': - v = widen(v) - elif comp_type == 'float': - v = float(v) - elif comp_type == 'complex': - v = [float(v[0]),float(v[1])] - else: - raise NotImplementedError('cannot reach') - return (v) - - def setitem(self, idx, item): - if count < 2: - raw_storage_setitem(self.values, idx * self.stride_size + - self.start, rffi.cast(TP, item)) - else: - i = 0 - for val in item: - raw_storage_setitem(self.values, idx * self.stride_size + - self.start + i*step, rffi.cast(TP, val)) - i += 1 - - class ArgArrayRepWithStorage(Repr): - def __init__(self, stride_size, size): - start = 0 - values = alloc_raw_storage(size * stride_size, - track_allocation=False) - Repr.__init__(self, stride_size, - size, values, start) - - def __del__(self): - free_raw_storage(self.values, track_allocation=False) - - def arg_getitem(lst, item): - return lst.getitem(item) - - def arg_setitem(lst, item, value): - lst.setitem(item, value) - - def arg_length(lst): - return lst.size - - def arg_getitem_slice(lst, start, stop): - retval = ArgArrayRepWithStorage(lst.stride_size, stop-start) - for i in range(stop-start): - retval.setitem(i, lst.getitem(i+start)) - return retval - - if count < 2: - def arg_lt(a, b): - # handles NAN and INF - return a < b or b != b and a == a - else: - def arg_lt(a, b): - for i in range(count): - if b[i] != b[i] and a[i] == a[i]: - return True - elif b[i] == b[i] and a[i] != a[i]: - return False - for i in range(count): - if a[i] < b[i]: - return True - elif a[i] > b[i]: - return False - # Does numpy do True? - return False - - ArgSort = make_timsort_class(arg_getitem, arg_setitem, arg_length, - arg_getitem_slice, arg_lt) - - def sort(arr, space, w_axis, itemsize): - if w_axis is space.w_None: - # note that it's fine to pass None here as we're not going - # to pass the result around (None is the link to base in slices) - arr = arr.reshape(space, None, [arr.get_size()]) - axis = 0 - elif w_axis is None: - axis = -1 - else: - axis = space.int_w(w_axis) - # create array of indexes - if len(arr.get_shape()) == 1: - r = Repr(itemsize, arr.get_size(), arr.get_storage(), - arr.start) - ArgSort(r).sort() - else: - shape = arr.get_shape() - if axis < 0: - axis = len(shape) + axis - if axis < 0 or axis >= len(shape): - raise OperationError(space.w_IndexError, space.wrap( - "Wrong axis %d" % axis)) - iterable_shape = shape[:axis] + [0] + shape[axis + 1:] - iter = AxisIterator(arr, iterable_shape, axis, False) - stride_size = arr.strides[axis] - axis_size = arr.shape[axis] - while not iter.done(): - r = Repr(stride_size, axis_size, arr.get_storage(), iter.offset) - ArgSort(r).sort() - iter.next() - - return sort - -def sort_array(arr, space, w_axis, w_order): - cache = space.fromcache(SortCache) # that populates SortClasses - itemtype = arr.dtype.itemtype - if arr.dtype.byteorder == NPY.OPPBYTE: - raise oefmt(space.w_NotImplementedError, - "sorting of non-native byteorder not supported yet") - for tp in all_types: - if isinstance(itemtype, tp[0]): - return cache._lookup(tp)(arr, space, w_axis, - itemtype.get_element_size()) - # XXX this should probably be changed - raise oefmt(space.w_NotImplementedError, - "sorting of non-numeric types '%s' is not implemented", - arr.dtype.get_name()) - -all_types = (types.all_float_types + types.all_complex_types + - types.all_int_types) -all_types = [i for i in all_types if not issubclass(i[0], types.Float16)] -all_types = unrolling_iterable(all_types) - -class ArgSortCache(object): - built = False - - def __init__(self, space): - if self.built: - return - self.built = True - cache = {} - for cls, it in all_types._items: - if it == 'complex': - cache[cls] = make_argsort_function(space, cls, it, 2) - else: - cache[cls] = make_argsort_function(space, cls, it) - self.cache = cache - self._lookup = specialize.memo()(lambda tp : cache[tp[0]]) - - -class SortCache(object): - built = False - - def __init__(self, space): - if self.built: - return - self.built = True - cache = {} - for cls, it in all_types._items: - if it == 'complex': - cache[cls] = make_sort_function(space, cls, it, 2) - else: - cache[cls] = make_sort_function(space, cls, it) - self.cache = cache - self._lookup = specialize.memo()(lambda tp : cache[tp[0]]) diff --git a/pypy/module/micronumpy/arrayimpl/voidbox.py b/pypy/module/micronumpy/arrayimpl/voidbox.py deleted file mode 100644 --- a/pypy/module/micronumpy/arrayimpl/voidbox.py +++ /dev/null @@ -1,12 +0,0 @@ - -from pypy.module.micronumpy.arrayimpl.base import BaseArrayImplementation -from rpython.rlib.rawstorage import free_raw_storage, alloc_raw_storage - -class VoidBoxStorage(BaseArrayImplementation): - def __init__(self, size, dtype): - self.storage = alloc_raw_storage(size) - self.dtype = dtype - self.size = size - - def __del__(self): - free_raw_storage(self.storage) diff --git a/pypy/module/micronumpy/arrayops.py b/pypy/module/micronumpy/arrayops.py new file mode 100644 --- /dev/null +++ b/pypy/module/micronumpy/arrayops.py @@ -0,0 +1,281 @@ +from pypy.module.micronumpy.base import convert_to_array, W_NDimArray +from pypy.module.micronumpy import loop, descriptor, ufuncs +from pypy.module.micronumpy.strides import Chunk, Chunks, shape_agreement, \ + shape_agreement_multiple +from pypy.interpreter.error import OperationError, oefmt +from pypy.interpreter.gateway import unwrap_spec +from pypy.module.micronumpy.converters import clipmode_converter +from pypy.module.micronumpy import support +from pypy.module.micronumpy import constants as NPY + +def where(space, w_arr, w_x=None, w_y=None): + """where(condition, [x, y]) + + Return elements, either from `x` or `y`, depending on `condition`. + + If only `condition` is given, return ``condition.nonzero()``. + + Parameters + ---------- + condition : array_like, bool + When True, yield `x`, otherwise yield `y`. + x, y : array_like, optional + Values from which to choose. `x` and `y` need to have the same + shape as `condition`. + + Returns + ------- + out : ndarray or tuple of ndarrays + If both `x` and `y` are specified, the output array contains + elements of `x` where `condition` is True, and elements from + `y` elsewhere. + + If only `condition` is given, return the tuple + ``condition.nonzero()``, the indices where `condition` is True. + + See Also + -------- + nonzero, choose + + Notes + ----- + If `x` and `y` are given and input arrays are 1-D, `where` is + equivalent to:: + + [xv if c else yv for (c,xv,yv) in zip(condition,x,y)] + + Examples + -------- + >>> np.where([[True, False], [True, True]], + ... [[1, 2], [3, 4]], + ... [[9, 8], [7, 6]]) + array([[1, 8], + [3, 4]]) + + >>> np.where([[0, 1], [1, 0]]) + (array([0, 1]), array([1, 0])) + + >>> x = np.arange(9.).reshape(3, 3) + >>> np.where( x > 5 ) + (array([2, 2, 2]), array([0, 1, 2])) + >>> x[np.where( x > 3.0 )] # Note: result is 1D. + array([ 4., 5., 6., 7., 8.]) + >>> np.where(x < 5, x, -1) # Note: broadcasting. + array([[ 0., 1., 2.], + [ 3., 4., -1.], + [-1., -1., -1.]]) + + + NOTE: support for not passing x and y is unsupported + """ + if space.is_none(w_y): + if space.is_none(w_x): + raise OperationError(space.w_NotImplementedError, space.wrap( + "1-arg where unsupported right now")) + raise OperationError(space.w_ValueError, space.wrap( + "Where should be called with either 1 or 3 arguments")) + if space.is_none(w_x): + raise OperationError(space.w_ValueError, space.wrap( + "Where should be called with either 1 or 3 arguments")) + arr = convert_to_array(space, w_arr) + x = convert_to_array(space, w_x) + y = convert_to_array(space, w_y) + if x.is_scalar() and y.is_scalar() and arr.is_scalar(): + if arr.get_dtype().itemtype.bool(arr.get_scalar_value()): + return x + return y + dtype = ufuncs.find_binop_result_dtype(space, x.get_dtype(), + y.get_dtype()) + shape = shape_agreement(space, arr.get_shape(), x) + shape = shape_agreement(space, shape, y) + out = W_NDimArray.from_shape(space, shape, dtype) + return loop.where(space, out, shape, arr, x, y, dtype) + +def dot(space, w_obj1, w_obj2, w_out=None): + w_arr = convert_to_array(space, w_obj1) + if w_arr.is_scalar(): + return convert_to_array(space, w_obj2).descr_dot(space, w_arr, w_out) + return w_arr.descr_dot(space, w_obj2, w_out) + + +def concatenate(space, w_args, w_axis=None): + args_w = space.listview(w_args) + if len(args_w) == 0: + raise oefmt(space.w_ValueError, "need at least one array to concatenate") + args_w = [convert_to_array(space, w_arg) for w_arg in args_w] + if w_axis is None: + w_axis = space.wrap(0) + if space.is_none(w_axis): + args_w = [w_arg.reshape(space, + space.newlist([w_arg.descr_get_size(space)])) + for w_arg in args_w] + w_axis = space.wrap(0) + dtype = args_w[0].get_dtype() + shape = args_w[0].get_shape()[:] + ndim = len(shape) + if ndim == 0: + raise oefmt(space.w_ValueError, + "zero-dimensional arrays cannot be concatenated") + axis = space.int_w(w_axis) + orig_axis = axis + if axis < 0: + axis = ndim + axis + if ndim == 1 and axis != 0: + axis = 0 + if axis < 0 or axis >= ndim: + raise oefmt(space.w_IndexError, "axis %d out of bounds [0, %d)", + orig_axis, ndim) + for arr in args_w[1:]: + if len(arr.get_shape()) != ndim: + raise OperationError(space.w_ValueError, space.wrap( + "all the input arrays must have same number of dimensions")) + for i, axis_size in enumerate(arr.get_shape()): + if i == axis: + shape[i] += axis_size + elif axis_size != shape[i]: + raise OperationError(space.w_ValueError, space.wrap( + "all the input array dimensions except for the " + "concatenation axis must match exactly")) + a_dt = arr.get_dtype() + if dtype.is_record() and a_dt.is_record(): + # Record types must match + for f in dtype.fields: + if f not in a_dt.fields or \ + dtype.fields[f] != a_dt.fields[f]: + raise OperationError(space.w_TypeError, + space.wrap("invalid type promotion")) + elif dtype.is_record() or a_dt.is_record(): + raise OperationError(space.w_TypeError, + space.wrap("invalid type promotion")) + dtype = ufuncs.find_binop_result_dtype(space, dtype, + arr.get_dtype()) + # concatenate does not handle ndarray subtypes, it always returns a ndarray + res = W_NDimArray.from_shape(space, shape, dtype, 'C') + chunks = [Chunk(0, i, 1, i) for i in shape] + axis_start = 0 + for arr in args_w: + if arr.get_shape()[axis] == 0: + continue + chunks[axis] = Chunk(axis_start, axis_start + arr.get_shape()[axis], 1, + arr.get_shape()[axis]) + Chunks(chunks).apply(space, res).implementation.setslice(space, arr) + axis_start += arr.get_shape()[axis] + return res + + at unwrap_spec(repeats=int) +def repeat(space, w_arr, repeats, w_axis): + arr = convert_to_array(space, w_arr) + if space.is_none(w_axis): + arr = arr.descr_flatten(space) + orig_size = arr.get_shape()[0] + shape = [arr.get_shape()[0] * repeats] + w_res = W_NDimArray.from_shape(space, shape, arr.get_dtype(), w_instance=arr) + for i in range(repeats): + Chunks([Chunk(i, shape[0] - repeats + i, repeats, + orig_size)]).apply(space, w_res).implementation.setslice(space, arr) + else: + axis = space.int_w(w_axis) + shape = arr.get_shape()[:] + chunks = [Chunk(0, i, 1, i) for i in shape] + orig_size = shape[axis] + shape[axis] *= repeats + w_res = W_NDimArray.from_shape(space, shape, arr.get_dtype(), w_instance=arr) + for i in range(repeats): + chunks[axis] = Chunk(i, shape[axis] - repeats + i, repeats, + orig_size) + Chunks(chunks).apply(space, w_res).implementation.setslice(space, arr) + return w_res + +def count_nonzero(space, w_obj): + return space.wrap(loop.count_all_true(convert_to_array(space, w_obj))) + +def choose(space, w_arr, w_choices, w_out, w_mode): + arr = convert_to_array(space, w_arr) + choices = [convert_to_array(space, w_item) for w_item + in space.listview(w_choices)] + if not choices: + raise OperationError(space.w_ValueError, + space.wrap("choices list cannot be empty")) + if space.is_none(w_out): + w_out = None + elif not isinstance(w_out, W_NDimArray): + raise OperationError(space.w_TypeError, space.wrap( + "return arrays must be of ArrayType")) + shape = shape_agreement_multiple(space, choices + [w_out]) + out = descriptor.dtype_agreement(space, choices, shape, w_out) + dtype = out.get_dtype() + mode = clipmode_converter(space, w_mode) + loop.choose(space, arr, choices, shape, dtype, out, mode) + return out + +def put(space, w_arr, w_indices, w_values, w_mode): + arr = convert_to_array(space, w_arr) + mode = clipmode_converter(space, w_mode) + + if not w_indices: + raise OperationError(space.w_ValueError, + space.wrap("indice list cannot be empty")) + if not w_values: + raise OperationError(space.w_ValueError, + space.wrap("value list cannot be empty")) + + dtype = arr.get_dtype() + + if space.isinstance_w(w_indices, space.w_list): + indices = space.listview(w_indices) + else: + indices = [w_indices] + + if space.isinstance_w(w_values, space.w_list): + values = space.listview(w_values) + else: + values = [w_values] + + v_idx = 0 + for idx in indices: + index = support.index_w(space, idx) + + if index < 0 or index >= arr.get_size(): + if mode == NPY.RAISE: + raise OperationError(space.w_IndexError, space.wrap( + "index %d is out of bounds for axis 0 with size %d" % (index, arr.get_size()))) + elif mode == NPY.WRAP: + index = index % arr.get_size() + elif mode == NPY.CLIP: + if index < 0: + index = 0 + else: + index = arr.get_size() - 1 + else: + assert False + + value = values[v_idx] + + if v_idx + 1 < len(values): + v_idx += 1 + + arr.setitem(space, [index], dtype.coerce(space, value)) + +def diagonal(space, arr, offset, axis1, axis2): + shape = arr.get_shape() + shapelen = len(shape) + if offset < 0: + offset = -offset + axis1, axis2 = axis2, axis1 + size = min(shape[axis1], shape[axis2] - offset) + dtype = arr.dtype + if axis1 < axis2: + shape = (shape[:axis1] + shape[axis1 + 1:axis2] + + shape[axis2 + 1:] + [size]) + else: + shape = (shape[:axis2] + shape[axis2 + 1:axis1] + + shape[axis1 + 1:] + [size]) + out = W_NDimArray.from_shape(space, shape, dtype) + if size == 0: + return out + if shapelen == 2: + # simple case + loop.diagonal_simple(space, arr, out, offset, axis1, axis2, size) + else: + loop.diagonal_array(space, arr, out, offset, axis1, axis2, shape) + return out diff --git a/pypy/module/micronumpy/base.py b/pypy/module/micronumpy/base.py --- a/pypy/module/micronumpy/base.py +++ b/pypy/module/micronumpy/base.py @@ -1,16 +1,9 @@ - from pypy.interpreter.error import OperationError from pypy.interpreter.baseobjspace import W_Root from rpython.tool.pairtype import extendabletype from pypy.module.micronumpy.support import calc_strides -from pypy.module.micronumpy.arrayimpl.base import BaseArrayImplementation -def issequence_w(space, w_obj): - return (space.isinstance_w(w_obj, space.w_tuple) or - space.isinstance_w(w_obj, space.w_list) or - isinstance(w_obj, W_NDimArray)) - def wrap_impl(space, w_cls, w_instance, impl): if w_cls is None or space.is_w(w_cls, space.gettypefor(W_NDimArray)): w_ret = W_NDimArray(impl) @@ -21,6 +14,7 @@ space.call_method(w_ret, '__array_finalize__', w_instance) return w_ret + class ArrayArgumentException(Exception): pass @@ -29,21 +23,17 @@ __metaclass__ = extendabletype def __init__(self, implementation): - assert isinstance(implementation, BaseArrayImplementation) + from pypy.module.micronumpy.concrete import BaseConcreteArray + assert isinstance(implementation, BaseConcreteArray) assert isinstance(self, W_NDimArray) self.implementation = implementation @staticmethod def from_shape(space, shape, dtype, order='C', w_instance=None): - from pypy.module.micronumpy.arrayimpl import concrete, scalar - - if not shape: - w_val = dtype.base.coerce(space, None) - impl = scalar.Scalar(dtype.base, w_val) - else: - strides, backstrides = calc_strides(shape, dtype.base, order) - impl = concrete.ConcreteArray(shape, dtype.base, order, strides, - backstrides) + from pypy.module.micronumpy import concrete + strides, backstrides = calc_strides(shape, dtype.base, order) + impl = concrete.ConcreteArray(shape, dtype.base, order, strides, + backstrides) if w_instance: return wrap_impl(space, space.type(w_instance), w_instance, impl) return W_NDimArray(impl) @@ -51,12 +41,11 @@ @staticmethod def from_shape_and_storage(space, shape, storage, dtype, order='C', owning=False, w_subtype=None, w_base=None, writable=True): - from pypy.module.micronumpy.arrayimpl import concrete - assert shape + from pypy.module.micronumpy import concrete strides, backstrides = calc_strides(shape, dtype, order) if w_base is not None: if owning: - raise OperationError(space.w_ValueError, + raise OperationError(space.w_ValueError, space.wrap("Cannot have owning=True when specifying a buffer")) if writable: impl = concrete.ConcreteArrayWithBase(shape, dtype, order, strides, @@ -65,7 +54,6 @@ impl = concrete.ConcreteNonWritableArrayWithBase(shape, dtype, order, strides, backstrides, storage, w_base) - elif owning: # Will free storage when GCd impl = concrete.ConcreteArray(shape, dtype, order, strides, @@ -82,7 +70,7 @@ @staticmethod def new_slice(space, offset, strides, backstrides, shape, parent, orig_arr, dtype=None): - from pypy.module.micronumpy.arrayimpl import concrete + from pypy.module.micronumpy import concrete impl = concrete.SliceArray(offset, strides, backstrides, shape, parent, orig_arr, dtype) @@ -90,36 +78,15 @@ @staticmethod def new_scalar(space, dtype, w_val=None): - from pypy.module.micronumpy.arrayimpl import scalar - if w_val is not None: w_val = dtype.coerce(space, w_val) else: w_val = dtype.coerce(space, space.wrap(0)) - return W_NDimArray(scalar.Scalar(dtype, w_val)) + return convert_to_array(space, w_val) def convert_to_array(space, w_obj): - #XXX: This whole routine should very likely simply be array() - from pypy.module.micronumpy.interp_numarray import array - from pypy.module.micronumpy import interp_ufuncs - + from pypy.module.micronumpy.ctors import array if isinstance(w_obj, W_NDimArray): return w_obj - else: - # Use __array__() method if it exists - w_array = space.lookup(w_obj, "__array__") - if w_array is not None: - w_result = space.get_and_call_function(w_array, w_obj) - if isinstance(w_result, W_NDimArray): - return w_result - else: - raise OperationError(space.w_ValueError, - space.wrap("object __array__ method not producing an array")) - elif issequence_w(space, w_obj): - # Convert to array. - return array(space, w_obj, w_order=None) - else: - # If it's a scalar - dtype = interp_ufuncs.find_dtype_for_scalar(space, w_obj) - return W_NDimArray.new_scalar(space, dtype, w_obj) + return array(space, w_obj) diff --git a/pypy/module/micronumpy/boxes.py b/pypy/module/micronumpy/boxes.py new file mode 100644 --- /dev/null +++ b/pypy/module/micronumpy/boxes.py @@ -0,0 +1,815 @@ +from pypy.interpreter.baseobjspace import W_Root +from pypy.interpreter.error import OperationError, oefmt +from pypy.interpreter.gateway import interp2app, unwrap_spec +from pypy.interpreter.typedef import TypeDef, GetSetProperty +from pypy.objspace.std.bytesobject import W_BytesObject +from pypy.objspace.std.floattype import float_typedef +from pypy.objspace.std.unicodeobject import W_UnicodeObject +from pypy.objspace.std.intobject import W_IntObject +from pypy.objspace.std.complextype import complex_typedef +from rpython.rlib.rarithmetic import LONG_BIT +from rpython.rtyper.lltypesystem import rffi +from rpython.tool.sourcetools import func_with_new_name +from pypy.module.micronumpy.concrete import VoidBoxStorage +from pypy.module.micronumpy.base import W_NDimArray +from pypy.module.micronumpy.flagsobj import W_FlagsObject +from pypy.interpreter.mixedmodule import MixedModule +from rpython.rtyper.lltypesystem import lltype +from rpython.rlib.rstring import StringBuilder +from rpython.rlib.objectmodel import specialize +from pypy.module.micronumpy import constants as NPY + + +MIXIN_32 = (W_IntObject.typedef,) if LONG_BIT == 32 else () +MIXIN_64 = (W_IntObject.typedef,) if LONG_BIT == 64 else () + +#long_double_size = rffi.sizeof_c_type('long double', ignore_errors=True) +#import os +#if long_double_size == 8 and os.name == 'nt': +# # this is a lie, or maybe a wish, MS fakes longdouble math with double +# long_double_size = 12 + +# hardcode to 8 for now (simulate using normal double) until long double works +long_double_size = 8 + + +def new_dtype_getter(num): + @specialize.memo() + def _get_dtype(space): + from pypy.module.micronumpy.descriptor import get_dtype_cache + return get_dtype_cache(space).dtypes_by_num[num] + + def descr__new__(space, w_subtype, w_value=None): + from pypy.module.micronumpy.ctors import array + dtype = _get_dtype(space) + if not space.is_none(w_value): + w_arr = array(space, w_value, dtype, copy=False) + if len(w_arr.get_shape()) != 0: + return w_arr + w_value = w_arr.get_scalar_value().item(space) + return dtype.itemtype.coerce_subtype(space, w_subtype, w_value) + + def descr_reduce(self, space): + return self.reduce(space) + + return (func_with_new_name(descr__new__, 'descr__new__%d' % num), + staticmethod(_get_dtype), + descr_reduce) + + +class Box(object): + _mixin_ = True + + def reduce(self, space): + numpypy = space.getbuiltinmodule("_numpypy") + assert isinstance(numpypy, MixedModule) + multiarray = numpypy.get("multiarray") + assert isinstance(multiarray, MixedModule) + scalar = multiarray.get("scalar") + + ret = space.newtuple([scalar, space.newtuple([space.wrap(self._get_dtype(space)), space.wrap(self.raw_str())])]) + return ret + +class PrimitiveBox(Box): + _mixin_ = True + _immutable_fields_ = ['value'] + + def __init__(self, value): + self.value = value + + def convert_to(self, space, dtype): + return dtype.box(self.value) + + def __repr__(self): + return '%s(%s)' % (self.__class__.__name__, self.value) + + def raw_str(self): + value = lltype.malloc(rffi.CArray(lltype.typeOf(self.value)), 1, flavor="raw") + value[0] = self.value + + builder = StringBuilder() + builder.append_charpsize(rffi.cast(rffi.CCHARP, value), rffi.sizeof(lltype.typeOf(self.value))) + ret = builder.build() + + lltype.free(value, flavor="raw") + return ret + +class ComplexBox(Box): + _mixin_ = True + _immutable_fields_ = ['real', 'imag'] + + def __init__(self, real, imag=0.): + self.real = real + self.imag = imag + + def convert_to(self, space, dtype): + return dtype.box_complex(self.real, self.imag) + + def convert_real_to(self, dtype): + return dtype.box(self.real) + + def convert_imag_to(self, dtype): + return dtype.box(self.imag) + + def raw_str(self): + value = lltype.malloc(rffi.CArray(lltype.typeOf(self.real)), 2, flavor="raw") + value[0] = self.real + value[1] = self.imag + + builder = StringBuilder() + builder.append_charpsize(rffi.cast(rffi.CCHARP, value), rffi.sizeof(lltype.typeOf(self.real)) * 2) + ret = builder.build() + + lltype.free(value, flavor="raw") + return ret + + +class W_GenericBox(W_Root): + _attrs_ = ['w_flags'] + + def descr__new__(space, w_subtype, __args__): + raise oefmt(space.w_TypeError, + "cannot create '%N' instances", w_subtype) + + def get_dtype(self, space): + return self._get_dtype(space) + + def item(self, space): + return self.get_dtype(space).itemtype.to_builtin_type(space, self) + + def descr_getitem(self, space, w_item): + from pypy.module.micronumpy.base import convert_to_array + if space.is_w(w_item, space.w_Ellipsis) or \ + (space.isinstance_w(w_item, space.w_tuple) and + space.len_w(w_item) == 0): + return convert_to_array(space, self) + raise OperationError(space.w_IndexError, space.wrap( + "invalid index to scalar variable")) + + def descr_str(self, space): + return space.wrap(self.get_dtype(space).itemtype.str_format(self)) + + def descr_format(self, space, w_spec): + return space.format(self.item(space), w_spec) + + def descr_hash(self, space): + return space.hash(self.item(space)) + + def descr_index(self, space): + return space.index(self.item(space)) + + def descr_int(self, space): + if isinstance(self, W_UnsignedIntegerBox): + box = self.convert_to(space, W_UInt64Box._get_dtype(space)) + else: + box = self.convert_to(space, W_Int64Box._get_dtype(space)) + return space.int(box.item(space)) + + def descr_long(self, space): + if isinstance(self, W_UnsignedIntegerBox): + box = self.convert_to(space, W_UInt64Box._get_dtype(space)) + else: + box = self.convert_to(space, W_Int64Box._get_dtype(space)) + return space.long(box.item(space)) + + def descr_float(self, space): + box = self.convert_to(space, W_Float64Box._get_dtype(space)) + return space.float(box.item(space)) + + def descr_oct(self, space): + return space.oct(self.descr_int(space)) + + def descr_hex(self, space): + return space.hex(self.descr_int(space)) + + def descr_nonzero(self, space): + dtype = self.get_dtype(space) + return space.wrap(dtype.itemtype.bool(self)) + + def _binop_impl(ufunc_name): + def impl(self, space, w_other, w_out=None): + from pypy.module.micronumpy import ufuncs + return getattr(ufuncs.get(space), ufunc_name).call(space, + [self, w_other, w_out]) + return func_with_new_name(impl, "binop_%s_impl" % ufunc_name) + + def _binop_right_impl(ufunc_name): + def impl(self, space, w_other, w_out=None): + from pypy.module.micronumpy import ufuncs + return getattr(ufuncs.get(space), ufunc_name).call(space, + [w_other, self, w_out]) + return func_with_new_name(impl, "binop_right_%s_impl" % ufunc_name) + + def _unaryop_impl(ufunc_name): + def impl(self, space, w_out=None): + from pypy.module.micronumpy import ufuncs + return getattr(ufuncs.get(space), ufunc_name).call(space, + [self, w_out]) + return func_with_new_name(impl, "unaryop_%s_impl" % ufunc_name) + + descr_add = _binop_impl("add") + descr_sub = _binop_impl("subtract") + descr_mul = _binop_impl("multiply") + descr_div = _binop_impl("divide") + descr_truediv = _binop_impl("true_divide") + descr_floordiv = _binop_impl("floor_divide") + descr_mod = _binop_impl("mod") + descr_pow = _binop_impl("power") + descr_lshift = _binop_impl("left_shift") + descr_rshift = _binop_impl("right_shift") + descr_and = _binop_impl("bitwise_and") + descr_or = _binop_impl("bitwise_or") + descr_xor = _binop_impl("bitwise_xor") + + descr_eq = _binop_impl("equal") + descr_ne = _binop_impl("not_equal") + descr_lt = _binop_impl("less") + descr_le = _binop_impl("less_equal") + descr_gt = _binop_impl("greater") + descr_ge = _binop_impl("greater_equal") + + descr_radd = _binop_right_impl("add") + descr_rsub = _binop_right_impl("subtract") + descr_rmul = _binop_right_impl("multiply") + descr_rdiv = _binop_right_impl("divide") + descr_rtruediv = _binop_right_impl("true_divide") + descr_rfloordiv = _binop_right_impl("floor_divide") + descr_rmod = _binop_right_impl("mod") + descr_rpow = _binop_right_impl("power") + descr_rlshift = _binop_right_impl("left_shift") + descr_rrshift = _binop_right_impl("right_shift") + descr_rand = _binop_right_impl("bitwise_and") + descr_ror = _binop_right_impl("bitwise_or") + descr_rxor = _binop_right_impl("bitwise_xor") + + descr_pos = _unaryop_impl("positive") + descr_neg = _unaryop_impl("negative") + descr_abs = _unaryop_impl("absolute") + descr_invert = _unaryop_impl("invert") + descr_conjugate = _unaryop_impl("conjugate") + + def descr_divmod(self, space, w_other): + w_quotient = self.descr_div(space, w_other) + w_remainder = self.descr_mod(space, w_other) + return space.newtuple([w_quotient, w_remainder]) + + def descr_rdivmod(self, space, w_other): + w_quotient = self.descr_rdiv(space, w_other) + w_remainder = self.descr_rmod(space, w_other) + return space.newtuple([w_quotient, w_remainder]) + + def descr_any(self, space): + from pypy.module.micronumpy.descriptor import get_dtype_cache + value = space.is_true(self) + return get_dtype_cache(space).w_booldtype.box(value) + + def descr_all(self, space): + from pypy.module.micronumpy.descriptor import get_dtype_cache + value = space.is_true(self) + return get_dtype_cache(space).w_booldtype.box(value) + + def descr_zero(self, space): + from pypy.module.micronumpy.descriptor import get_dtype_cache + return get_dtype_cache(space).w_longdtype.box(0) + + def descr_ravel(self, space): + from pypy.module.micronumpy.base import convert_to_array + w_values = space.newtuple([self]) + return convert_to_array(space, w_values) + + @unwrap_spec(decimals=int) + def descr_round(self, space, decimals=0, w_out=None): + if not space.is_none(w_out): + raise OperationError(space.w_NotImplementedError, space.wrap( + "out not supported")) + return self.get_dtype(space).itemtype.round(self, decimals) + + def descr_astype(self, space, w_dtype): + from pypy.module.micronumpy.descriptor import W_Dtype + dtype = space.interp_w(W_Dtype, + space.call_function(space.gettypefor(W_Dtype), w_dtype)) + return self.convert_to(space, dtype) + + def descr_view(self, space, w_dtype): + from pypy.module.micronumpy.descriptor import W_Dtype + try: + subclass = space.is_true(space.issubtype( + w_dtype, space.gettypefor(W_NDimArray))) + except OperationError, e: + if e.match(space, space.w_TypeError): + subclass = False + else: + raise + if subclass: + dtype = self.get_dtype(space) + else: + dtype = space.interp_w(W_Dtype, From noreply at buildbot.pypy.org Thu Feb 27 08:52:23 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 27 Feb 2014 08:52:23 +0100 (CET) Subject: [pypy-commit] pypy default: On Windows, we're actually using the tempfile module to implement Message-ID: <20140227075223.DAECF1C3369@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69500:21e9670e179b Date: 2014-02-27 08:51 +0100 http://bitbucket.org/pypy/pypy/changeset/21e9670e179b/ Log: On Windows, we're actually using the tempfile module to implement os.tmpfile(). This test checks that os.tmpfile() tries to put its temporary file in the root directory --- and you don't have *permissions* to put it here typically. So, the test expects a failure, which it doesn't get. Duh. diff --git a/lib-python/2.7/test/test_os.py b/lib-python/2.7/test/test_os.py --- a/lib-python/2.7/test/test_os.py +++ b/lib-python/2.7/test/test_os.py @@ -129,9 +129,13 @@ fp = os.tmpfile() except OSError, second: self.assertEqual(first.args, second.args) + return else: - self.fail("expected os.tmpfile() to raise OSError") - return + if test_support.check_impl_detail(pypy=False): + self.fail("expected os.tmpfile() to raise OSError") + # on PyPy, os.tmpfile() uses the tempfile module + # anyway, so works even if we cannot write in root. + fp.close() else: # open() worked, therefore, tmpfile() should work. Close our # dummy file and proceed with the test as normal. From noreply at buildbot.pypy.org Thu Feb 27 09:56:11 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 27 Feb 2014 09:56:11 +0100 (CET) Subject: [pypy-commit] stmgc default: Fix comments (only, for now) Message-ID: <20140227085611.CDBBA1C0A5B@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r888:73ad02610b81 Date: 2014-02-27 09:55 +0100 http://bitbucket.org/pypy/stmgc/changeset/73ad02610b81/ Log: Fix comments (only, for now) diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -5,16 +5,12 @@ static void setup_gcpage(void) { - /* NB. the very last page is not used, which allows a speed-up in - reset_all_creation_markers() */ char *base = stm_object_pages + END_NURSERY_PAGE * 4096UL; - uintptr_t length = (NB_PAGES - END_NURSERY_PAGE - 1) * 4096UL; + uintptr_t length = (NB_PAGES - END_NURSERY_PAGE) * 4096UL; _stm_largemalloc_init_arena(base, length); uninitialized_page_start = stm_object_pages + END_NURSERY_PAGE * 4096UL; uninitialized_page_stop = stm_object_pages + NB_PAGES * 4096UL; - - assert(GC_MEDIUM_REQUEST >= (1 << 8)); } static void teardown_gcpage(void) diff --git a/c7/stm/gcpage.h b/c7/stm/gcpage.h --- a/c7/stm/gcpage.h +++ b/c7/stm/gcpage.h @@ -1,14 +1,14 @@ /* Outside the nursery, we are taking from the highest addresses - complete pages, one at a time, which uniformly contain objects - of size "8 * N" for some N in range(2, GC_N_SMALL_REQUESTS). We - are taking from the lowest addresses "large" objects, which are - guaranteed to be at least 256 bytes long (actually 288), - allocated by largemalloc.c. + complete pages, one at a time, which uniformly contain objects of + size "8 * N" for some N in range(2, GC_N_SMALL_REQUESTS). We are + taking from the lowest addresses "large" objects, which are at least + 128 bytes long, allocated by largemalloc.c. The limit was picked + from a run a PyPy, showing that (in this case) the number of + allocations of at least 128 bytes is a lot below 1%. */ -#define GC_N_SMALL_REQUESTS 36 -#define GC_MEDIUM_REQUEST (GC_N_SMALL_REQUESTS * 8) +#define GC_N_SMALL_REQUESTS 16 static char *uninitialized_page_start; /* within segment 0 */ diff --git a/c7/stm/list.h b/c7/stm/list.h --- a/c7/stm/list.h +++ b/c7/stm/list.h @@ -73,7 +73,8 @@ /* The tree_xx functions are, like the name hints, implemented as a tree, supporting very high performance in TREE_FIND in the common case where there are no or few elements in the tree, but scaling correctly - if the number of items becomes large. */ + if the number of items becomes large (logarithmically, rather + than almost-constant-time with hash maps, but with low constants). */ #define TREE_BITS 4 #define TREE_ARITY (1 << TREE_BITS) diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -89,7 +89,7 @@ char *realobj = REAL_ADDRESS(STM_SEGMENT->segment_base, obj); size_t size = stmcb_size_rounded_up((struct object_s *)realobj); - if (1 /*size >= GC_MEDIUM_REQUEST*/) { + if (1 /*size >= GC_N_SMALL_REQUESTS*8*/) { /* case 1: object is not small enough. Ask gcpage.c for an allocation via largemalloc. */ From noreply at buildbot.pypy.org Thu Feb 27 11:09:04 2014 From: noreply at buildbot.pypy.org (antocuni) Date: Thu, 27 Feb 2014 11:09:04 +0100 (CET) Subject: [pypy-commit] cffi default: newer versions of PyPy emit a slightly different error message Message-ID: <20140227100904.783931C02FC@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: Changeset: r1463:c1089d1a3c89 Date: 2014-02-27 11:08 +0100 http://bitbucket.org/cffi/cffi/changeset/c1089d1a3c89/ Log: newer versions of PyPy emit a slightly different error message diff --git a/c/test_c.py b/c/test_c.py --- a/c/test_c.py +++ b/c/test_c.py @@ -1429,8 +1429,10 @@ p = newp(BStructPtr, [12]) assert p.a1 == 12 e = py.test.raises(TypeError, newp, BStructPtr, [None]) - assert ("an integer is required" in str(e.value) or - "unsupported operand type for int(): 'NoneType'" in str(e.value)) #PyPy + msg = str(e.value) + assert ("an integer is required" in msg or # CPython + "unsupported operand type for int(): 'NoneType'" in msg or # old PyPys + "expected integer, got NoneType object" in msg) # newer PyPys py.test.raises(TypeError, 'p.a1 = "def"') if sys.version_info < (3,): BEnum2 = new_enum_type(unicode("foo"), (unicode('abc'),), (5,), BInt) From noreply at buildbot.pypy.org Thu Feb 27 11:09:05 2014 From: noreply at buildbot.pypy.org (antocuni) Date: Thu, 27 Feb 2014 11:09:05 +0100 (CET) Subject: [pypy-commit] cffi default: merge heads Message-ID: <20140227100905.903901C02FC@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: Changeset: r1464:3b6e66b91886 Date: 2014-02-27 11:08 +0100 http://bitbucket.org/cffi/cffi/changeset/3b6e66b91886/ Log: merge heads diff --git a/c/test_c.py b/c/test_c.py --- a/c/test_c.py +++ b/c/test_c.py @@ -1429,8 +1429,10 @@ p = newp(BStructPtr, [12]) assert p.a1 == 12 e = py.test.raises(TypeError, newp, BStructPtr, [None]) - assert ("an integer is required" in str(e.value) or - "unsupported operand type for int(): 'NoneType'" in str(e.value)) #PyPy + msg = str(e.value) + assert ("an integer is required" in msg or # CPython + "unsupported operand type for int(): 'NoneType'" in msg or # old PyPys + "expected integer, got NoneType object" in msg) # newer PyPys py.test.raises(TypeError, 'p.a1 = "def"') if sys.version_info < (3,): BEnum2 = new_enum_type(unicode("foo"), (unicode('abc'),), (5,), BInt) From noreply at buildbot.pypy.org Thu Feb 27 11:14:33 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 27 Feb 2014 11:14:33 +0100 (CET) Subject: [pypy-commit] pypy default: fix array setitem with ellipsis Message-ID: <20140227101433.1E49C1C02FC@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69501:67b371c67638 Date: 2014-02-27 04:51 -0500 http://bitbucket.org/pypy/pypy/changeset/67b371c67638/ Log: fix array setitem with ellipsis diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -223,7 +223,10 @@ self.implementation.setitem_index(space, index_list, w_value) def descr_setitem(self, space, w_idx, w_value): - if isinstance(w_idx, W_NDimArray) and w_idx.get_dtype().is_bool() \ + if space.is_w(w_idx, space.w_Ellipsis): + self.implementation.setslice(space, convert_to_array(space, w_value)) + return + elif isinstance(w_idx, W_NDimArray) and w_idx.get_dtype().is_bool() \ and len(w_idx.get_shape()) > 0: self.setitem_filter(space, w_idx, convert_to_array(space, w_value)) return diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -2303,12 +2303,12 @@ import numpy as np a = np.array(1.5) assert a[...] is a - #a[...] = 2.5 - #assert a == 2.5 + a[...] = 2.5 + assert a == 2.5 a = np.array([1, 2, 3]) assert a[...] is a - #a[...] = 4 - #assert (a == [4, 4, 4]).all() + a[...] = 4 + assert (a == [4, 4, 4]).all() class AppTestNumArrayFromBuffer(BaseNumpyAppTest): From noreply at buildbot.pypy.org Thu Feb 27 11:14:34 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 27 Feb 2014 11:14:34 +0100 (CET) Subject: [pypy-commit] pypy default: fix array.reshape(None) Message-ID: <20140227101434.555561C02FC@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69502:f4dcfc98a4ab Date: 2014-02-27 05:07 -0500 http://bitbucket.org/pypy/pypy/changeset/f4dcfc98a4ab/ Log: fix array.reshape(None) diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py --- a/pypy/module/micronumpy/concrete.py +++ b/pypy/module/micronumpy/concrete.py @@ -61,7 +61,7 @@ def get_storage_size(self): return self.size - def reshape(self, space, orig_array, new_shape): + def reshape(self, orig_array, new_shape): # Since we got to here, prod(new_shape) == self.size new_strides = None if self.size > 0: diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -343,14 +343,13 @@ def reshape(self, space, w_shape): new_shape = get_shape_from_iterable(space, self.get_size(), w_shape) - new_impl = self.implementation.reshape(space, self, new_shape) + new_impl = self.implementation.reshape(self, new_shape) if new_impl is not None: return wrap_impl(space, space.type(self), self, new_impl) # Create copy with contiguous data arr = self.descr_copy(space) if arr.get_size() > 0: - arr.implementation = arr.implementation.reshape(space, self, - new_shape) + arr.implementation = arr.implementation.reshape(self, new_shape) assert arr.implementation else: arr.implementation.shape = new_shape @@ -384,6 +383,8 @@ raise OperationError(space.w_NotImplementedError, space.wrap( "unsupported value for order")) if len(args_w) == 1: + if space.is_none(args_w[0]): + return self.descr_view(space) w_shape = args_w[0] else: w_shape = space.newtuple(args_w) diff --git a/pypy/module/micronumpy/sort.py b/pypy/module/micronumpy/sort.py --- a/pypy/module/micronumpy/sort.py +++ b/pypy/module/micronumpy/sort.py @@ -125,7 +125,7 @@ # note that it's fine ot pass None here as we're not going # to pass the result around (None is the link to base in slices) if arr.get_size() > 0: - arr = arr.reshape(space, None, [arr.get_size()]) + arr = arr.reshape(None, [arr.get_size()]) axis = 0 elif w_axis is None: axis = -1 @@ -276,7 +276,7 @@ if w_axis is space.w_None: # note that it's fine to pass None here as we're not going # to pass the result around (None is the link to base in slices) - arr = arr.reshape(space, None, [arr.get_size()]) + arr = arr.reshape(None, [arr.get_size()]) axis = 0 elif w_axis is None: axis = -1 diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -844,6 +844,12 @@ b = a.reshape(s) assert b.shape == s assert (b == [1]).all() + a = array(1.5) + b = a.reshape(None) + assert b is not a + assert b == a + b[...] = 2.5 + assert a == 2.5 a = array(range(12)) exc = raises(ValueError, "b = a.reshape(())") assert str(exc.value) == "total size of new array must be unchanged" From noreply at buildbot.pypy.org Thu Feb 27 11:32:42 2014 From: noreply at buildbot.pypy.org (antocuni) Date: Thu, 27 Feb 2014 11:32:42 +0100 (CET) Subject: [pypy-commit] pypy int_w-refactor: change the test to use a more explicit object with __float__ instead of numpy's floats Message-ID: <20140227103242.16B591C08B9@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: int_w-refactor Changeset: r69503:801dcc30809e Date: 2014-02-27 10:27 +0100 http://bitbucket.org/pypy/pypy/changeset/801dcc30809e/ Log: change the test to use a more explicit object with __float__ instead of numpy's floats diff --git a/pypy/module/struct/test/test_struct.py b/pypy/module/struct/test/test_struct.py --- a/pypy/module/struct/test/test_struct.py +++ b/pypy/module/struct/test/test_struct.py @@ -384,13 +384,14 @@ assert self.struct.unpack("ii", b) == (62, 12) raises(self.struct.error, self.struct.unpack, "i", b) - def test_numpy_dtypes(self): - if self.runappdirect: - from numpy.core.multiarray import typeinfo - else: - from _numpypy.multiarray import typeinfo - float64 = typeinfo['DOUBLE'][4] - obj = float64(42.3) + def test___float__(self): + class MyFloat(object): + def __init__(self, x): + self.x = x + def __float__(self): + return self.x + + obj = MyFloat(42.3) data = self.struct.pack('d', obj) obj2, = self.struct.unpack('d', data) assert type(obj2) is float From noreply at buildbot.pypy.org Thu Feb 27 11:32:43 2014 From: noreply at buildbot.pypy.org (antocuni) Date: Thu, 27 Feb 2014 11:32:43 +0100 (CET) Subject: [pypy-commit] pypy int_w-refactor: update to cffi/3b6e66b91886, this fixes a test caused by the change of an error message Message-ID: <20140227103243.5DDC31C08B9@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: int_w-refactor Changeset: r69504:b58f66224d79 Date: 2014-02-27 11:32 +0100 http://bitbucket.org/pypy/pypy/changeset/b58f66224d79/ Log: update to cffi/3b6e66b91886, this fixes a test caused by the change of an error message diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -1418,8 +1418,10 @@ p = newp(BStructPtr, [12]) assert p.a1 == 12 e = py.test.raises(TypeError, newp, BStructPtr, [None]) - assert ("an integer is required" in str(e.value) or - "unsupported operand type for int(): 'NoneType'" in str(e.value)) #PyPy + msg = str(e.value) + assert ("an integer is required" in msg or # CPython + "unsupported operand type for int(): 'NoneType'" in msg or # old PyPys + "expected integer, got NoneType object" in msg) # newer PyPys py.test.raises(TypeError, 'p.a1 = "def"') if sys.version_info < (3,): BEnum2 = new_enum_type(unicode("foo"), (unicode('abc'),), (5,), BInt) From noreply at buildbot.pypy.org Thu Feb 27 11:39:38 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 27 Feb 2014 11:39:38 +0100 (CET) Subject: [pypy-commit] stmgc default: Update the README document to reflect the current status. Add a TODO. Message-ID: <20140227103938.3144C1C08B9@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r889:97097d270bf9 Date: 2014-02-27 11:39 +0100 http://bitbucket.org/pypy/stmgc/changeset/97097d270bf9/ Log: Update the README document to reflect the current status. Add a TODO. diff --git a/c7/README.txt b/c7/README.txt --- a/c7/README.txt +++ b/c7/README.txt @@ -54,175 +54,98 @@ Memory organization ------------------- -We allocate a big mmap that contains enough addresses for N times M -bytes, where N is the number of threads and M is an upper bound on the -total size of the objects. Then we use remap_file_pages() to make these -N regions all map to the same physical memory. In each thread, -%gs is made to point to the start of the corresponding region. This -means that %gs-relative accesses will go to different addresses in -each thread, but these addresses are then (initially) mapped to the -same physical memory, so the effect is as if we used neither %gs nor -remap_file_pages(). +We have a small, fixed number of big pieces of memory called "segments". +Each segment has enough (virtual) address space for all the objects that +the program needs. This is actually allocated from a single big mmap() +so that pages can be exchanged between segments with remap_file_pages(). +We call N the number of segments. Actual threads are not limited in +number; they grab one segment in order to run GC-manipulating code, and +release it afterwards. This is similar to what occurs with the GIL, +except we have up to N threads that can run in parallel, instead of 1. -The exception comes from pages that contain objects that are already -committed, but are being modified by the current transaction. Such -changes must not be visible to other threads before the current -transaction commits. This is done by using another remap_file_pages() -to "unshare" the page, i.e. stop the corresponding %gs-relative, -thread-local page from mapping to the same physical page as others. We -get a fresh new physical page, and duplicate its content --- much like -the OS does after a fork() for pages modified by one or the other -process. +The first step when the process starts is to use remap_file_pages() to +make these N regions all map to the same physical memory. In each +thread, when it grabs a segment, %gs is made to point to the start of +the segment. This means that %gs-relative accesses will go to different +real addresses in each thread, but these addresses are then (initially) +mapped to the same physical memory, so the effect is as if we used +neither %gs nor remap_file_pages(). + +The interesting exception to that rule comes from pages that contain +objects that are already committed, but are being modified by the +current transaction. Such changes must not be visible to other threads +before the current transaction commits. This is done by using another +remap_file_pages() to "unshare" the page, i.e. stop the corresponding +%gs-relative, thread-local page from mapping to the same physical page +as others. We get a fresh new physical page, and duplicate its content +--- much like the OS does after a fork() for pages modified by one or +the other process. In more details: the first page of addresses in each thread-local region (4096 bytes) is made non-accessible, to detect errors of accessing the NULL pointer. The second page is reserved for thread-local data. The rest is divided into 1/16 for thread-local read markers, followed by 15/16 for the real objects. We initially use remap_file_pages() on this -15/16 range. +15/16 range. The read markers are described below. Each transaction records the objects that it changed. These are -necessarily within unshared pages. When other threads are about to -commit their own transaction, they first copy these objects into their -version of the page. The point is that, from another thread's point of -view, the memory didn't appear to change unexpectedly, but only when -that other thread decides to copy the change explicitly. +necessarily within unshared pages. When we want to commit a +transaction, we ask for a safe-point (suspending the other threads in a +known state), and then we copy again the modified objects into the other +version(s) of that data. The point is that, from another thread's point +of view, the memory didn't appear to change unexpectedly, but only when +waiting in a safe-point. -Each transaction uses their own (private) read markers to track which -objects have been read. When a thread "imports" changes done to some -objects, it can quickly check if these objects have also been read by -the current transaction, and if so, we know we have a conflict. +Moreover, we detect read-write conflicts when trying to commit. To do +this, each transaction needs to track in their own (private) read +markers which objects it has read. When we try to commit one segment's +version, when we would write a modified object to the other segments, we +can check the other segments' read markers. If a conflict is detected, +we either abort the committing thread, or mark the other thread as +requiring an abort (which it will do when trying to leave the +safe-point). - -STM details ------------ - -Here is how the STM works in terms that are hopefully common in STM -research. The transactions run from a "start time" to a "commit time", -but these are not explicitly represented numerically. The start time -defines the initial state of the objects as seen in this thread. We use -the "extendable timestamps" approach in order to regularly bump the -start time of running transactions (not only when a potential conflict -is detected, but more eagerly). - -Each thread records privately its read objects (using a byte-map) and -publicly its written objects (using an array of pointers as well as a -global flag in the object). Read-write conflicts are detected during -the start time bumps. Write-write conflicts are detected eagerly --- -only one transaction can be concurrently running with a given object -modified. (In the case of write-write conficts, there are several -possible contention management policies; for now we always abort the -transaction that comes later in its attempt to modify the object.) - -Special care is taken for objects allocated in the current transaction. -We expect these objects to be the vast majority of modified objects, and -also most of them to die quickly. More about it below. - -We use what looks like an "undo log" approach, where objects are -modified in-place and aborts cause them to be copied back from somewhere -else. However, it is implemented without any explicit undo log, but by -copying objects between multiple thread-local copies. Memory pages -containing modified objects are duplicated anyway, and so we already -have around several copies of the objects at potentially different -versions. - - -(The rest of this section defines the "leader". It's a complicated way -to make sure we always have an object to copy back in case this -transaction is aborted. At first, what will be implemented in core.c -will simply be waiting if necessary until two threads reach the latest -version; then each thread can use the other's original object.) - - -At most one thread is called the "leader" (this is new terminology as -far as I know). The leader is: - -- a thread that runs a transaction right now (as opposed to being - in some blocking syscall between two transactions, for example); - -- not alone: there are other threads running a transaction concurrently - (when only one thread is running, there is no leader); - -- finally, the start time of this thread's transaction is strictly - higher than the start time of any other running transaction. (If there - are several threads with the same highest start time, we have no - leader.) - -Leadership is a temporary condition: it is acquired (typically) by the -thread whose transaction commits and whose next transaction starts; but -it is lost again as soon as any other thread updates its transaction's -start time to match. - -The point of the notion of leadership is that when the leader wants to -modify an object, it must first make sure that the original version is -also present somewhere else. Only the leader thread, if there is any, -needs to worry about it. We don't need to remember the original version -of an older object, because if we need to abort a transaction, we may as -well update all objects to the latest version. And if there are several -threads with the same highest start time, we can be sure that the -original version of the object is somewhere among them --- this is the -point of detecting write-write conflicts eagerly. Finally, if there is -only one thread running, as soon as it was updated, it cannot abort any -more, so we don't need to record the old version of anything. - -The only remaining case is the one in which there is a leader thread, -this leader thread has the only latest version of an object, and it -tries to further modify this object. To handle this precise case, for -now, we simply wait until another thread updates and we are no longer -the leader. (*) - -(*) the code in core.c contains, or contained, or will again contain, an -explicit undo log that would be filled in this case only. +On the other hand, write-write conflicts are detected eagerly, which is +necessary to avoid that all segments contain a modified version of the +object and no segment is left with the original version. It is done +with a compare-and-swap into an array of write locks (only the first +time a given old object is modified by a given transaction). Object creation and GC ---------------------- -draft: +We use a GC that is similar to the one in PyPy: -- pages need to be unshared when they contain already-committed objects - that are then modified. +- a generational GC, with one nursery per segment containing + transaction-local objects only, and moved outside when full or when the + transaction commits. -- pages can remain shared if a fraction of (or all) their space was not - used previously, but is used by new allocations; any changes to these - fresh objects during the same transaction do *not* need to unshare the - page. This should ensure that in the common case the majority of pages - are not unshared. +- nomenclature: objects are either "young" or "old" depending on whether + they were created by the current transaction or not. Old objects are + always outside the nursery. We call "overflow" objects the young + objects that are also outside the nursery. -- minor collection: occurs regularly, and maybe always at the end of - transactions (we'll see). Should work by marking the young objects - that survive. Non-marked objects are then sweeped lazily by the - next allocation requests (as in "mark-and-don't-sweep" GCs, here - for the minor collection only). Needs a write barrier to detect - old-objects-pointing-to-young objects (the old object may be fresh - from the same running transaction as well, or be already committed). +- pages need to be unshared when they contain old objects that are then + modified. -- the numbers and flags stored in the objects need to be designed with - the above goals in mind. +- we need a write barrier to detect the changes done to any non-nursery + object (the first time only). This is just a flag check. Then the + slow-path of this write barrier distinguishes between overflow + objects and old objects, and the latter need to be unshared. -- unclear yet: the minor collections may be triggered only when the - memory is full, or whenever a few MBs of memory was allocated. It is - not important for small-to-medium transactions that only allocate a - few MBs anyway, but it might be for long-running transactions. - -- the major collections walk *all* objects. They'll probably require - all threads to be synchronized. Ideally the threads should then proceed - to do a parallel GC, but as a first step, blocking all threads but one - should be fine. +- the old generation is collected with mark-and-sweep, during a major + collection step that walks *all* objects. This requires all threads + to be synchronized, but ideally the threads should then proceed + to do a parallel GC (i.e. mark in all threads in parallel, and + then sweep in al threads in parallel, with one arbitrary thread + taking on the additional coordination role needed). - the major collections should be triggered by the amount of really-used memory, which means: counting the unshared pages as N pages. Major - collection should then re-share the pages as much as possible, after - making sure that all threads have their timestamp updated. This is the - essential part that guarantees that large, old, no-longer-modified + collection should then re-share the pages as much as possible. This is + the essential part that guarantees that old, no-longer-modified bunches of objects are eventually present in only one copy in memory, in shared pages --- while at the same time bounding the number of - calls to remap_file_pages() for each page at 2 per major collection + calls to remap_file_pages() for each page at N-1 per major collection cycle. - - -Misc ----- - -Use __builtin_setjmp() and __builtin_longjmp() rather than setjmp() -and longjmp(). diff --git a/c7/TODO b/c7/TODO new file mode 100644 --- /dev/null +++ b/c7/TODO @@ -0,0 +1,8 @@ + +- major GC + +- use small uniform gcpages + +- write barrier for big arrays + +- prebuilt objects: stm_setup_prebuilt(array_of_"object_t*", length); From noreply at buildbot.pypy.org Thu Feb 27 12:05:05 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 27 Feb 2014 12:05:05 +0100 (CET) Subject: [pypy-commit] pypy default: fix reshape with zero-sized array Message-ID: <20140227110505.801241C35DA@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69505:baa6eb2680b0 Date: 2014-02-27 03:15 -0500 http://bitbucket.org/pypy/pypy/changeset/baa6eb2680b0/ Log: fix reshape with zero-sized array diff --git a/pypy/module/micronumpy/arrayops.py b/pypy/module/micronumpy/arrayops.py --- a/pypy/module/micronumpy/arrayops.py +++ b/pypy/module/micronumpy/arrayops.py @@ -1,12 +1,12 @@ +from pypy.interpreter.error import OperationError, oefmt +from pypy.interpreter.gateway import unwrap_spec +from pypy.module.micronumpy import loop, descriptor, ufuncs, support, \ + constants as NPY from pypy.module.micronumpy.base import convert_to_array, W_NDimArray -from pypy.module.micronumpy import loop, descriptor, ufuncs +from pypy.module.micronumpy.converters import clipmode_converter from pypy.module.micronumpy.strides import Chunk, Chunks, shape_agreement, \ shape_agreement_multiple -from pypy.interpreter.error import OperationError, oefmt -from pypy.interpreter.gateway import unwrap_spec -from pypy.module.micronumpy.converters import clipmode_converter -from pypy.module.micronumpy import support -from pypy.module.micronumpy import constants as NPY + def where(space, w_arr, w_x=None, w_y=None): """where(condition, [x, y]) diff --git a/pypy/module/micronumpy/base.py b/pypy/module/micronumpy/base.py --- a/pypy/module/micronumpy/base.py +++ b/pypy/module/micronumpy/base.py @@ -1,7 +1,6 @@ +from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.error import OperationError -from pypy.interpreter.baseobjspace import W_Root from rpython.tool.pairtype import extendabletype -from pypy.module.micronumpy.support import calc_strides def wrap_impl(space, w_cls, w_instance, impl): @@ -31,9 +30,10 @@ @staticmethod def from_shape(space, shape, dtype, order='C', w_instance=None): from pypy.module.micronumpy import concrete + from pypy.module.micronumpy.strides import calc_strides strides, backstrides = calc_strides(shape, dtype.base, order) impl = concrete.ConcreteArray(shape, dtype.base, order, strides, - backstrides) + backstrides) if w_instance: return wrap_impl(space, space.type(w_instance), w_instance, impl) return W_NDimArray(impl) @@ -42,6 +42,7 @@ def from_shape_and_storage(space, shape, storage, dtype, order='C', owning=False, w_subtype=None, w_base=None, writable=True): from pypy.module.micronumpy import concrete + from pypy.module.micronumpy.strides import calc_strides strides, backstrides = calc_strides(shape, dtype, order) if w_base is not None: if owning: diff --git a/pypy/module/micronumpy/boxes.py b/pypy/module/micronumpy/boxes.py --- a/pypy/module/micronumpy/boxes.py +++ b/pypy/module/micronumpy/boxes.py @@ -1,23 +1,22 @@ from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import interp2app, unwrap_spec +from pypy.interpreter.mixedmodule import MixedModule from pypy.interpreter.typedef import TypeDef, GetSetProperty from pypy.objspace.std.bytesobject import W_BytesObject +from pypy.objspace.std.complextype import complex_typedef from pypy.objspace.std.floattype import float_typedef +from pypy.objspace.std.intobject import W_IntObject from pypy.objspace.std.unicodeobject import W_UnicodeObject -from pypy.objspace.std.intobject import W_IntObject -from pypy.objspace.std.complextype import complex_typedef from rpython.rlib.rarithmetic import LONG_BIT -from rpython.rtyper.lltypesystem import rffi -from rpython.tool.sourcetools import func_with_new_name -from pypy.module.micronumpy.concrete import VoidBoxStorage -from pypy.module.micronumpy.base import W_NDimArray -from pypy.module.micronumpy.flagsobj import W_FlagsObject -from pypy.interpreter.mixedmodule import MixedModule -from rpython.rtyper.lltypesystem import lltype from rpython.rlib.rstring import StringBuilder from rpython.rlib.objectmodel import specialize +from rpython.rtyper.lltypesystem import lltype, rffi +from rpython.tool.sourcetools import func_with_new_name from pypy.module.micronumpy import constants as NPY +from pypy.module.micronumpy.base import W_NDimArray +from pypy.module.micronumpy.concrete import VoidBoxStorage +from pypy.module.micronumpy.flagsobj import W_FlagsObject MIXIN_32 = (W_IntObject.typedef,) if LONG_BIT == 32 else () diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -1,20 +1,17 @@ """ This is a set of tools for standalone compiling of numpy expressions. It should not be imported by the module itself """ - import re - from pypy.interpreter import special from pypy.interpreter.baseobjspace import InternalSpaceCache, W_Root from pypy.interpreter.error import OperationError -from pypy.module.micronumpy import boxes -from pypy.module.micronumpy.descriptor import get_dtype_cache +from rpython.rlib.objectmodel import specialize, instantiate +from rpython.rlib.nonconst import NonConstant +from pypy.module.micronumpy import boxes, ufuncs +from pypy.module.micronumpy.arrayops import where from pypy.module.micronumpy.base import W_NDimArray from pypy.module.micronumpy.ctors import array -from pypy.module.micronumpy.arrayops import where -from pypy.module.micronumpy import ufuncs -from rpython.rlib.objectmodel import specialize, instantiate -from rpython.rlib.nonconst import NonConstant +from pypy.module.micronumpy.descriptor import get_dtype_cache class BogusBytecode(Exception): diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py --- a/pypy/module/micronumpy/concrete.py +++ b/pypy/module/micronumpy/concrete.py @@ -1,16 +1,16 @@ -from pypy.module.micronumpy import support, loop, iter -from pypy.module.micronumpy.base import convert_to_array, W_NDimArray,\ - ArrayArgumentException -from pypy.module.micronumpy.strides import (Chunk, Chunks, NewAxisChunk, - RecordChunk, calc_new_strides, shape_agreement, calculate_broadcast_strides, - calculate_dot_strides) +from pypy.interpreter.buffer import RWBuffer from pypy.interpreter.error import OperationError, oefmt -from pypy.interpreter.buffer import RWBuffer from rpython.rlib import jit -from rpython.rtyper.lltypesystem import rffi, lltype +from rpython.rlib.debug import make_sure_not_resized from rpython.rlib.rawstorage import alloc_raw_storage, free_raw_storage, \ raw_storage_getitem, raw_storage_setitem, RAW_STORAGE -from rpython.rlib.debug import make_sure_not_resized +from rpython.rtyper.lltypesystem import rffi, lltype +from pypy.module.micronumpy import support, loop, iter +from pypy.module.micronumpy.base import convert_to_array, W_NDimArray, \ + ArrayArgumentException +from pypy.module.micronumpy.strides import (Chunk, Chunks, NewAxisChunk, + RecordChunk, calc_strides, calc_new_strides, shape_agreement, + calculate_broadcast_strides, calculate_dot_strides) class BaseConcreteArray(object): @@ -64,7 +64,9 @@ def reshape(self, orig_array, new_shape): # Since we got to here, prod(new_shape) == self.size new_strides = None - if self.size > 0: + if self.size == 0: + new_strides, _ = calc_strides(new_shape, self.dtype, self.order) + else: if len(self.get_shape()) == 0: new_strides = [self.dtype.elsize] * len(new_shape) else: @@ -81,7 +83,7 @@ new_shape, self, orig_array) def get_view(self, space, orig_array, dtype, new_shape): - strides, backstrides = support.calc_strides(new_shape, dtype, + strides, backstrides = calc_strides(new_shape, dtype, self.order) return SliceArray(self.start, strides, backstrides, new_shape, self, orig_array, dtype=dtype) @@ -268,7 +270,7 @@ backstrides, shape, self, orig_array) def copy(self, space): - strides, backstrides = support.calc_strides(self.get_shape(), self.dtype, + strides, backstrides = calc_strides(self.get_shape(), self.dtype, self.order) impl = ConcreteArray(self.get_shape(), self.dtype, self.order, strides, backstrides) @@ -312,7 +314,7 @@ return ArrayBuffer(self) def astype(self, space, dtype): - strides, backstrides = support.calc_strides(self.get_shape(), dtype, + strides, backstrides = calc_strides(self.get_shape(), dtype, self.order) impl = ConcreteArray(self.get_shape(), dtype, self.order, strides, backstrides) @@ -358,7 +360,7 @@ box, 0, self.size, 0) def set_shape(self, space, orig_array, new_shape): - strides, backstrides = support.calc_strides(new_shape, self.dtype, + strides, backstrides = calc_strides(new_shape, self.dtype, self.order) return SliceArray(0, strides, backstrides, new_shape, self, orig_array) diff --git a/pypy/module/micronumpy/ctors.py b/pypy/module/micronumpy/ctors.py --- a/pypy/module/micronumpy/ctors.py +++ b/pypy/module/micronumpy/ctors.py @@ -1,16 +1,14 @@ from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import unwrap_spec, WrappedDefault +from rpython.rlib.rstring import strip_spaces from rpython.rtyper.lltypesystem import lltype, rffi -from pypy.module.micronumpy import descriptor, loop -from rpython.rlib.rstring import strip_spaces -from pypy.module.micronumpy import ufuncs +from pypy.module.micronumpy import descriptor, loop, ufuncs from pypy.module.micronumpy.base import W_NDimArray, convert_to_array from pypy.module.micronumpy.converters import shape_converter from pypy.module.micronumpy.strides import find_shape_and_elems def build_scalar(space, w_dtype, w_state): - from rpython.rtyper.lltypesystem import rffi, lltype if not isinstance(w_dtype, descriptor.W_Dtype): raise oefmt(space.w_TypeError, "argument 1 must be numpy.dtype, not %T", w_dtype) diff --git a/pypy/module/micronumpy/descriptor.py b/pypy/module/micronumpy/descriptor.py --- a/pypy/module/micronumpy/descriptor.py +++ b/pypy/module/micronumpy/descriptor.py @@ -4,14 +4,12 @@ from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import (TypeDef, GetSetProperty, interp_attrproperty, interp_attrproperty_w) -from pypy.module.micronumpy import types, boxes, base +from rpython.rlib import jit from rpython.rlib.objectmodel import specialize from rpython.rlib.rarithmetic import r_longlong, r_ulonglong -from rpython.rlib import jit +from pypy.module.micronumpy import types, boxes, base, support, constants as NPY from pypy.module.micronumpy.appbridge import get_appbridge_cache from pypy.module.micronumpy.converters import byteorder_converter -from pypy.module.micronumpy import support -from pypy.module.micronumpy import constants as NPY def decode_w_dtype(space, w_dtype): diff --git a/pypy/module/micronumpy/flagsobj.py b/pypy/module/micronumpy/flagsobj.py --- a/pypy/module/micronumpy/flagsobj.py +++ b/pypy/module/micronumpy/flagsobj.py @@ -1,7 +1,7 @@ from pypy.interpreter.baseobjspace import W_Root +from pypy.interpreter.error import OperationError +from pypy.interpreter.gateway import interp2app from pypy.interpreter.typedef import TypeDef, GetSetProperty -from pypy.interpreter.gateway import interp2app -from pypy.interpreter.error import OperationError class W_FlagsObject(W_Root): diff --git a/pypy/module/micronumpy/flatiter.py b/pypy/module/micronumpy/flatiter.py --- a/pypy/module/micronumpy/flatiter.py +++ b/pypy/module/micronumpy/flatiter.py @@ -1,7 +1,7 @@ +from pypy.interpreter.error import OperationError, oefmt +from pypy.module.micronumpy import loop from pypy.module.micronumpy.base import W_NDimArray, convert_to_array -from pypy.module.micronumpy import loop from pypy.module.micronumpy.concrete import BaseConcreteArray -from pypy.interpreter.error import OperationError, oefmt class FakeArrayImplementation(BaseConcreteArray): diff --git a/pypy/module/micronumpy/iter.py b/pypy/module/micronumpy/iter.py --- a/pypy/module/micronumpy/iter.py +++ b/pypy/module/micronumpy/iter.py @@ -40,10 +40,9 @@ but then we cannot gaurentee that we only overflow one single shape dimension, perhaps we could overflow times in one big step. """ - +from rpython.rlib import jit +from pypy.module.micronumpy import support from pypy.module.micronumpy.base import W_NDimArray -from pypy.module.micronumpy import support -from rpython.rlib import jit class PureShapeIterator(object): diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -2,15 +2,14 @@ operations. This is the place to look for all the computations that iterate over all the array elements. """ - -from rpython.rlib.rstring import StringBuilder from pypy.interpreter.error import OperationError from rpython.rlib import jit +from rpython.rlib.rstring import StringBuilder from rpython.rtyper.lltypesystem import lltype, rffi +from pypy.module.micronumpy import support, constants as NPY from pypy.module.micronumpy.base import W_NDimArray from pypy.module.micronumpy.iter import PureShapeIterator -from pypy.module.micronumpy import support -from pypy.module.micronumpy import constants as NPY + call2_driver = jit.JitDriver(name='numpy_call2', greens = ['shapelen', 'func', 'calc_dtype', diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -1,27 +1,25 @@ -from rpython.rtyper.lltypesystem import rffi -from rpython.rlib.rawstorage import RAW_STORAGE_PTR from pypy.interpreter.error import OperationError, oefmt -from pypy.interpreter.typedef import TypeDef, GetSetProperty, make_weakref_descr from pypy.interpreter.gateway import interp2app, unwrap_spec, applevel, \ WrappedDefault -from pypy.module.micronumpy.base import W_NDimArray, convert_to_array,\ - ArrayArgumentException, wrap_impl -from pypy.module.micronumpy import descriptor, ufuncs, boxes, arrayops -from pypy.module.micronumpy.strides import get_shape_from_iterable, to_coords, \ - shape_agreement, shape_agreement_multiple -from pypy.module.micronumpy.flagsobj import W_FlagsObject -from pypy.module.micronumpy.flatiter import W_FlatIterator -from pypy.module.micronumpy.appbridge import get_appbridge_cache -from pypy.module.micronumpy import loop -from pypy.module.micronumpy.arrayops import repeat, choose, put -from rpython.tool.sourcetools import func_with_new_name +from pypy.interpreter.typedef import TypeDef, GetSetProperty, make_weakref_descr from rpython.rlib import jit from rpython.rlib.rstring import StringBuilder +from rpython.rlib.rawstorage import RAW_STORAGE_PTR +from rpython.rtyper.lltypesystem import rffi +from rpython.tool.sourcetools import func_with_new_name +from pypy.module.micronumpy import descriptor, ufuncs, boxes, arrayops, loop, \ + support, constants as NPY +from pypy.module.micronumpy.appbridge import get_appbridge_cache +from pypy.module.micronumpy.arrayops import repeat, choose, put +from pypy.module.micronumpy.base import W_NDimArray, convert_to_array, \ + ArrayArgumentException, wrap_impl from pypy.module.micronumpy.concrete import BaseConcreteArray from pypy.module.micronumpy.converters import order_converter, shape_converter, \ multi_axis_converter -from pypy.module.micronumpy import support -from pypy.module.micronumpy import constants as NPY +from pypy.module.micronumpy.flagsobj import W_FlagsObject +from pypy.module.micronumpy.flatiter import W_FlatIterator +from pypy.module.micronumpy.strides import get_shape_from_iterable, to_coords, \ + shape_agreement, shape_agreement_multiple def _match_dot_shapes(space, left, right): @@ -1132,7 +1130,7 @@ def descr_new_array(space, w_subtype, w_shape, w_dtype=None, w_buffer=None, offset=0, w_strides=None, w_order=None): from pypy.module.micronumpy.concrete import ConcreteArray - from pypy.module.micronumpy.support import calc_strides + from pypy.module.micronumpy.strides import calc_strides dtype = space.interp_w(descriptor.W_Dtype, space.call_function(space.gettypefor(descriptor.W_Dtype), w_dtype)) shape = shape_converter(space, w_shape, dtype) diff --git a/pypy/module/micronumpy/sort.py b/pypy/module/micronumpy/sort.py --- a/pypy/module/micronumpy/sort.py +++ b/pypy/module/micronumpy/sort.py @@ -1,18 +1,16 @@ - """ This is the implementation of various sorting routines in numpy. It's here because it only makes sense on a concrete array """ - -from rpython.rtyper.lltypesystem import rffi, lltype +from pypy.interpreter.error import OperationError, oefmt from rpython.rlib.listsort import make_timsort_class +from rpython.rlib.objectmodel import specialize +from rpython.rlib.rarithmetic import widen from rpython.rlib.rawstorage import raw_storage_getitem, raw_storage_setitem, \ free_raw_storage, alloc_raw_storage from rpython.rlib.unroll import unrolling_iterable -from rpython.rlib.rarithmetic import widen -from rpython.rlib.objectmodel import specialize -from pypy.interpreter.error import OperationError, oefmt +from rpython.rtyper.lltypesystem import rffi, lltype +from pypy.module.micronumpy import descriptor, types, constants as NPY from pypy.module.micronumpy.base import W_NDimArray -from pypy.module.micronumpy import descriptor, types, constants as NPY from pypy.module.micronumpy.iter import AxisIterator INT_SIZE = rffi.sizeof(lltype.Signed) diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py --- a/pypy/module/micronumpy/strides.py +++ b/pypy/module/micronumpy/strides.py @@ -1,8 +1,7 @@ +from pypy.interpreter.error import OperationError, oefmt from rpython.rlib import jit -from pypy.interpreter.error import OperationError, oefmt +from pypy.module.micronumpy import support, constants as NPY from pypy.module.micronumpy.base import W_NDimArray -from pypy.module.micronumpy import support -from pypy.module.micronumpy import constants as NPY # structures to describe slicing @@ -21,7 +20,6 @@ # ofs only changes start # create a view of the original array by extending # the shape, strides, backstrides of the array - from pypy.module.micronumpy.support import calc_strides strides, backstrides = calc_strides(subdtype.shape, subdtype.subdtype, arr.order) final_shape = arr.shape + subdtype.shape @@ -345,6 +343,25 @@ return new_shape + at jit.unroll_safe +def calc_strides(shape, dtype, order): + strides = [] + backstrides = [] + s = 1 + shape_rev = shape[:] + if order == 'C': + shape_rev.reverse() + for sh in shape_rev: + slimit = max(sh, 1) + strides.append(s * dtype.elsize) + backstrides.append(s * (slimit - 1) * dtype.elsize) + s *= slimit + if order == 'C': + strides.reverse() + backstrides.reverse() + return strides, backstrides + + # Recalculating strides. Find the steps that the iteration does for each # dimension, given the stride and shape. Then try to create a new stride that # fits the new shape, using those steps. If there is a shape/step mismatch diff --git a/pypy/module/micronumpy/support.py b/pypy/module/micronumpy/support.py --- a/pypy/module/micronumpy/support.py +++ b/pypy/module/micronumpy/support.py @@ -1,5 +1,5 @@ +from pypy.interpreter.error import OperationError, oefmt from rpython.rlib import jit -from pypy.interpreter.error import OperationError, oefmt def issequence_w(space, w_obj): @@ -25,22 +25,3 @@ for x in s: i *= x return i - - - at jit.unroll_safe -def calc_strides(shape, dtype, order): - strides = [] - backstrides = [] - s = 1 - shape_rev = shape[:] - if order == 'C': - shape_rev.reverse() - for sh in shape_rev: - slimit = max(sh, 1) - strides.append(s * dtype.elsize) - backstrides.append(s * (slimit - 1) * dtype.elsize) - s *= slimit - if order == 'C': - strides.reverse() - backstrides.reverse() - return strides, backstrides diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -850,6 +850,17 @@ assert b == a b[...] = 2.5 assert a == 2.5 + a = array([]).reshape((0, 2)) + assert a.shape == (0, 2) + assert a.strides == (16, 8) + a = array([]) + a.shape = (4, 0, 3, 0, 0, 2) + assert a.strides == (48, 48, 16, 16, 16, 8) + a = array(1.5) + assert a.reshape(()).shape == () + a = array(1.5) + a.shape = () + assert a.strides == () a = array(range(12)) exc = raises(ValueError, "b = a.reshape(())") assert str(exc.value) == "total size of new array must be unchanged" diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -1,32 +1,31 @@ import functools import math - from pypy.interpreter.error import OperationError, oefmt -from pypy.module.micronumpy import boxes -from pypy.module.micronumpy import support -from pypy.module.micronumpy.concrete import SliceArray, VoidBoxStorage from pypy.objspace.std.floatobject import float2string from pypy.objspace.std.complexobject import str_format -from rpython.rlib import rfloat, clibffi, rcomplex -from rpython.rlib.rawstorage import (alloc_raw_storage, - raw_storage_getitem_unaligned, raw_storage_setitem_unaligned) +from rpython.rlib import clibffi, jit, rfloat, rcomplex from rpython.rlib.objectmodel import specialize from rpython.rlib.rarithmetic import widen, byteswap, r_ulonglong, \ most_neg_value_of, LONG_BIT -from rpython.rtyper.lltypesystem import lltype, rffi -from rpython.rlib.rstruct.runpack import runpack -from rpython.rlib.rstruct.nativefmttable import native_is_bigendian +from rpython.rlib.rawstorage import (alloc_raw_storage, + raw_storage_getitem_unaligned, raw_storage_setitem_unaligned) +from rpython.rlib.rstring import StringBuilder from rpython.rlib.rstruct.ieee import (float_pack, float_unpack, unpack_float, pack_float80, unpack_float80) +from rpython.rlib.rstruct.nativefmttable import native_is_bigendian +from rpython.rlib.rstruct.runpack import runpack +from rpython.rtyper.lltypesystem import lltype, rffi from rpython.tool.sourcetools import func_with_new_name -from rpython.rlib import jit -from rpython.rlib.rstring import StringBuilder +from pypy.module.micronumpy import boxes +from pypy.module.micronumpy.concrete import SliceArray, VoidBoxStorage +from pypy.module.micronumpy.strides import calc_strides degToRad = math.pi / 180.0 log2 = math.log(2) log2e = 1. / log2 log10 = math.log(10) + def simple_unary_op(func): specialize.argtype(1)(func) @functools.wraps(func) @@ -1792,8 +1791,8 @@ from pypy.module.micronumpy.base import W_NDimArray if dtype is None: dtype = arr.dtype - strides, backstrides = support.calc_strides(dtype.shape, - dtype.subdtype, arr.order) + strides, backstrides = calc_strides(dtype.shape, dtype.subdtype, + arr.order) implementation = SliceArray(i + offset, strides, backstrides, dtype.shape, arr, W_NDimArray(arr), dtype.subdtype) diff --git a/pypy/module/micronumpy/ufuncs.py b/pypy/module/micronumpy/ufuncs.py --- a/pypy/module/micronumpy/ufuncs.py +++ b/pypy/module/micronumpy/ufuncs.py @@ -2,13 +2,12 @@ from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef, GetSetProperty, interp_attrproperty -from pypy.module.micronumpy import boxes, descriptor, loop from rpython.rlib import jit from rpython.rlib.rarithmetic import LONG_BIT, maxint from rpython.tool.sourcetools import func_with_new_name +from pypy.module.micronumpy import boxes, descriptor, loop, constants as NPY +from pypy.module.micronumpy.base import convert_to_array, W_NDimArray from pypy.module.micronumpy.strides import shape_agreement -from pypy.module.micronumpy.base import convert_to_array, W_NDimArray -from pypy.module.micronumpy import constants as NPY def done_if_true(dtype, val): return dtype.itemtype.bool(val) From noreply at buildbot.pypy.org Thu Feb 27 12:35:12 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 27 Feb 2014 12:35:12 +0100 (CET) Subject: [pypy-commit] stmgc default: comment fixes Message-ID: <20140227113512.F24A91C35DA@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r890:24a6732aec1d Date: 2014-02-27 12:25 +0100 http://bitbucket.org/pypy/stmgc/changeset/24a6732aec1d/ Log: comment fixes diff --git a/c7/stm/gcpage.h b/c7/stm/gcpage.h --- a/c7/stm/gcpage.h +++ b/c7/stm/gcpage.h @@ -3,12 +3,11 @@ complete pages, one at a time, which uniformly contain objects of size "8 * N" for some N in range(2, GC_N_SMALL_REQUESTS). We are taking from the lowest addresses "large" objects, which are at least - 128 bytes long, allocated by largemalloc.c. The limit was picked - from a run a PyPy, showing that (in this case) the number of - allocations of at least 128 bytes is a lot below 1%. + 288 bytes long, allocated by largemalloc.c. The limit is the same + as used in PyPy's default GC. */ -#define GC_N_SMALL_REQUESTS 16 +#define GC_N_SMALL_REQUESTS 36 static char *uninitialized_page_start; /* within segment 0 */ diff --git a/c7/stm/largemalloc.c b/c7/stm/largemalloc.c --- a/c7/stm/largemalloc.c +++ b/c7/stm/largemalloc.c @@ -36,9 +36,9 @@ /* The chunk has a total size of 'size'. It is immediately followed in memory by another chunk. This list ends with the last "chunk" - being actually only one word long, 'size_t prev_size'. Both this - last chunk and the theoretical chunk before the first one are - considered "not free". */ + being actually only two words long, with END_MARKER as 'size'. + Both this last chunk and the theoretical chunk before the first + one are considered "not free". */ } mchunk_t; #define FLAG_SORTED 1 @@ -64,7 +64,7 @@ /* The free chunks are stored in "bins". Each bin is a doubly-linked list of chunks. There are 84 bins, with largebin_index() giving the - correspondence between sizes are bin indices. + correspondence between sizes and bin indices. Each free chunk is preceeded in memory by a non-free chunk (or no chunk at all). Each free chunk is followed in memory by a non-free From noreply at buildbot.pypy.org Thu Feb 27 13:15:14 2014 From: noreply at buildbot.pypy.org (fijal) Date: Thu, 27 Feb 2014 13:15:14 +0100 (CET) Subject: [pypy-commit] pypy default: fix those things (maybe?) Message-ID: <20140227121514.265D51C03D5@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r69506:86a6c93110ee Date: 2014-02-27 14:14 +0200 http://bitbucket.org/pypy/pypy/changeset/86a6c93110ee/ Log: fix those things (maybe?) diff --git a/include/PyPy.h b/include/PyPy.h --- a/include/PyPy.h +++ b/include/PyPy.h @@ -8,16 +8,12 @@ extern "C" { #endif -/* You should call this first once. */ -#define pypy_init(need_threads) do { pypy_asm_stack_bottom(); \ -rpython_startup_code();\ - if (need_threads) pypy_init_threads(); } while (0) +// call this first +char* rpython_startup_code(void); -// deprecated interface -void rpython_startup_code(void); +// pypy_init_threads has to be called in case you want to use threads void pypy_init_threads(void); - /* Initialize the home directory of PyPy. It is necessary to call this. Call it with "home" being the file name of the libpypy.so, for diff --git a/pypy/doc/embedding.rst b/pypy/doc/embedding.rst --- a/pypy/doc/embedding.rst +++ b/pypy/doc/embedding.rst @@ -14,12 +14,15 @@ to make a full API working, provided you'll follow a few principles. The API is: -.. function:: void pypy_init(int need_threads); +.. function:: char* rpython_startup_code(void); This is a function that you have to call (once) before calling anything. It initializes the RPython/PyPy GC and does a bunch of necessary startup - code. This function cannot fail. Pass 1 in case you need thread support, 0 - otherwise. + code. This function cannot fail and always returns NULL. + +.. function:: void pypy_init_threads(void); + + Initialize threads. Only need to be called if there are any threads involved .. function:: long pypy_setup_home(char* home, int verbose); @@ -46,7 +49,7 @@ In case your application uses threads that are initialized outside of PyPy, you need to call this function to tell the PyPy GC to track this thread. Note that this function is not thread-safe itself, so you need to guard it - with a mutex. Do not call it from the main thread. + with a mutex. Simple example -------------- diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -80,7 +80,7 @@ # register the minimal equivalent of running a small piece of code. This # should be used as sparsely as possible, just to register callbacks - from rpython.rlib.entrypoint import entrypoint + from rpython.rlib.entrypoint import entrypoint, RPython_StartupCode from rpython.rtyper.lltypesystem import rffi, lltype from rpython.rtyper.lltypesystem.lloperation import llop @@ -94,7 +94,6 @@ @entrypoint('main', [rffi.CCHARP, rffi.INT], c_name='pypy_setup_home') def pypy_setup_home(ll_home, verbose): from pypy.module.sys.initpath import pypy_find_stdlib - llop.gc_stack_bottom(lltype.Void) verbose = rffi.cast(lltype.Signed, verbose) if ll_home: home = rffi.charp2str(ll_home) @@ -124,7 +123,6 @@ def pypy_execute_source(ll_source): after = rffi.aroundstate.after if after: after() - llop.gc_stack_bottom(lltype.Void) source = rffi.charp2str(ll_source) res = _pypy_execute_source(source) before = rffi.aroundstate.before From noreply at buildbot.pypy.org Thu Feb 27 13:17:35 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 27 Feb 2014 13:17:35 +0100 (CET) Subject: [pypy-commit] pypy numpy-refactor: pep8 Message-ID: <20140227121735.8DD551C03D5@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: numpy-refactor Changeset: r69507:4769350dc332 Date: 2014-02-27 01:22 -0500 http://bitbucket.org/pypy/pypy/changeset/4769350dc332/ Log: pep8 diff --git a/pypy/module/micronumpy/arrayops.py b/pypy/module/micronumpy/arrayops.py --- a/pypy/module/micronumpy/arrayops.py +++ b/pypy/module/micronumpy/arrayops.py @@ -8,6 +8,7 @@ from pypy.module.micronumpy import support from pypy.module.micronumpy import constants as NPY + def where(space, w_arr, w_x=None, w_y=None): """where(condition, [x, y]) @@ -91,6 +92,7 @@ out = W_NDimArray.from_shape(space, shape, dtype) return loop.where(space, out, shape, arr, x, y, dtype) + def dot(space, w_obj1, w_obj2, w_out=None): w_arr = convert_to_array(space, w_obj1) if w_arr.is_scalar(): @@ -162,6 +164,7 @@ axis_start += arr.get_shape()[axis] return res + @unwrap_spec(repeats=int) def repeat(space, w_arr, repeats, w_axis): arr = convert_to_array(space, w_arr) @@ -186,9 +189,11 @@ Chunks(chunks).apply(space, w_res).implementation.setslice(space, arr) return w_res + def count_nonzero(space, w_obj): return space.wrap(loop.count_all_true(convert_to_array(space, w_obj))) + def choose(space, w_arr, w_choices, w_out, w_mode): arr = convert_to_array(space, w_arr) choices = [convert_to_array(space, w_item) for w_item @@ -208,6 +213,7 @@ loop.choose(space, arr, choices, shape, dtype, out, mode) return out + def put(space, w_arr, w_indices, w_values, w_mode): arr = convert_to_array(space, w_arr) mode = clipmode_converter(space, w_mode) @@ -256,6 +262,7 @@ arr.setitem(space, [index], dtype.coerce(space, value)) + def diagonal(space, arr, offset, axis1, axis2): shape = arr.get_shape() shapelen = len(shape) diff --git a/pypy/module/micronumpy/boxes.py b/pypy/module/micronumpy/boxes.py --- a/pypy/module/micronumpy/boxes.py +++ b/pypy/module/micronumpy/boxes.py @@ -70,6 +70,7 @@ ret = space.newtuple([scalar, space.newtuple([space.wrap(self._get_dtype(space)), space.wrap(self.raw_str())])]) return ret + class PrimitiveBox(Box): _mixin_ = True _immutable_fields_ = ['value'] @@ -94,6 +95,7 @@ lltype.free(value, flavor="raw") return ret + class ComplexBox(Box): _mixin_ = True _immutable_fields_ = ['real', 'imag'] @@ -361,6 +363,7 @@ return self.get_dtype(space).itemtype.imag(self) w_flags = None + def descr_get_flags(self, space): if self.w_flags is None: self.w_flags = W_FlagsObject(self) diff --git a/pypy/module/micronumpy/constants.py b/pypy/module/micronumpy/constants.py --- a/pypy/module/micronumpy/constants.py +++ b/pypy/module/micronumpy/constants.py @@ -57,7 +57,7 @@ INTPLTR = 'p' UINTPLTR = 'P' -GENBOOLLTR ='b' +GENBOOLLTR = 'b' SIGNEDLTR = 'i' UNSIGNEDLTR = 'u' FLOATINGLTR = 'f' diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -46,6 +46,7 @@ "objects are not aligned")) return out_shape, right_critical_dim + class __extend__(W_NDimArray): @jit.unroll_safe def descr_get_shape(self, space): @@ -1124,6 +1125,7 @@ return w_obj pass + @unwrap_spec(offset=int) def descr_new_array(space, w_subtype, w_shape, w_dtype=None, w_buffer=None, offset=0, w_strides=None, w_order=None): @@ -1174,6 +1176,7 @@ space.wrap('__array_finalize__')), w_subtype) return w_ret + @unwrap_spec(addr=int) def descr__from_shape_and_storage(space, w_cls, w_shape, addr, w_dtype, w_subtype=None): """ diff --git a/pypy/module/micronumpy/ufuncs.py b/pypy/module/micronumpy/ufuncs.py --- a/pypy/module/micronumpy/ufuncs.py +++ b/pypy/module/micronumpy/ufuncs.py @@ -10,9 +10,11 @@ from pypy.module.micronumpy.base import convert_to_array, W_NDimArray from pypy.module.micronumpy import constants as NPY + def done_if_true(dtype, val): return dtype.itemtype.bool(val) + def done_if_false(dtype, val): return not dtype.itemtype.bool(val) @@ -545,6 +547,7 @@ dtypenum += 2 return descriptor.get_dtype_cache(space).dtypes_by_num[dtypenum] + @jit.unroll_safe def find_unaryop_result_dtype(space, dt, promote_to_float=False, promote_bools=False, promote_to_largest=False): @@ -571,6 +574,7 @@ return dtype return dt + def find_dtype_for_scalar(space, w_obj, current_guess=None): bool_dtype = descriptor.get_dtype_cache(space).w_booldtype long_dtype = descriptor.get_dtype_cache(space).w_longdtype @@ -612,9 +616,9 @@ 'unable to create dtype from objects, "%T" instance not ' 'supported', w_obj) + def ufunc_dtype_caller(space, ufunc_name, op_name, argcount, comparison_func, bool_result): - dtype_cache = descriptor.get_dtype_cache(space) def get_op(dtype): try: return getattr(dtype.itemtype, op_name) @@ -622,6 +626,7 @@ raise oefmt(space.w_NotImplementedError, "%s not implemented for %s", ufunc_name, dtype.get_name()) + dtype_cache = descriptor.get_dtype_cache(space) if argcount == 1: def impl(res_dtype, value): res = get_op(res_dtype)(value) @@ -763,6 +768,6 @@ ufunc = W_Ufunc2(func, ufunc_name, **extra_kwargs) setattr(self, ufunc_name, ufunc) + def get(space): return space.fromcache(UfuncState) - From noreply at buildbot.pypy.org Thu Feb 27 13:17:36 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 27 Feb 2014 13:17:36 +0100 (CET) Subject: [pypy-commit] pypy numpy-refactor: allow creation of flagsobj from applevel Message-ID: <20140227121736.C807C1C03D5@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: numpy-refactor Changeset: r69508:026fac042564 Date: 2014-02-27 02:24 -0500 http://bitbucket.org/pypy/pypy/changeset/026fac042564/ Log: allow creation of flagsobj from applevel diff --git a/pypy/module/micronumpy/flagsobj.py b/pypy/module/micronumpy/flagsobj.py --- a/pypy/module/micronumpy/flagsobj.py +++ b/pypy/module/micronumpy/flagsobj.py @@ -6,9 +6,13 @@ class W_FlagsObject(W_Root): def __init__(self, arr): - self.arr = arr self.flags = 0 + def descr__new__(space, w_subtype): + self = space.allocate_instance(W_FlagsObject, w_subtype) + W_FlagsObject.__init__(self, None) + return self + def descr_get_contiguous(self, space): return space.w_True @@ -60,6 +64,8 @@ W_FlagsObject.typedef = TypeDef("flagsobj", __module__ = "numpy", + __new__ = interp2app(W_FlagsObject.descr__new__.im_func), + __getitem__ = interp2app(W_FlagsObject.descr_getitem), __setitem__ = interp2app(W_FlagsObject.descr_setitem), __eq__ = interp2app(W_FlagsObject.descr_eq), diff --git a/pypy/module/micronumpy/test/test_flagsobj.py b/pypy/module/micronumpy/test/test_flagsobj.py --- a/pypy/module/micronumpy/test/test_flagsobj.py +++ b/pypy/module/micronumpy/test/test_flagsobj.py @@ -2,6 +2,14 @@ class AppTestFlagsObj(BaseNumpyAppTest): + def test_init(self): + import numpy as np + a = np.array([1,2,3]) + assert a.flags['C'] is True + b = type(a.flags)() + assert b is not a.flags + assert b['C'] is True + def test_repr(self): import numpy as np a = np.array([1,2,3]) From noreply at buildbot.pypy.org Thu Feb 27 13:17:37 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 27 Feb 2014 13:17:37 +0100 (CET) Subject: [pypy-commit] pypy numpy-refactor: add new iterator implementation Message-ID: <20140227121737.F02DF1C03D5@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: numpy-refactor Changeset: r69509:d2da780cad17 Date: 2014-02-27 06:57 -0500 http://bitbucket.org/pypy/pypy/changeset/d2da780cad17/ Log: add new iterator implementation diff --git a/pypy/module/micronumpy/iter.py b/pypy/module/micronumpy/iter.py --- a/pypy/module/micronumpy/iter.py +++ b/pypy/module/micronumpy/iter.py @@ -79,18 +79,48 @@ return [space.wrap(self.indexes[i]) for i in range(shapelen)] -class BaseArrayIterator(object): +class ArrayIterator(object): + def __init__(self, array): + self.array = array + self.start = array.start + self.size = array.get_size() + self.ndim_m1 = len(array.shape) - 1 + self.shape_m1 = [s - 1 for s in array.shape] + self.strides = array.strides[:] + self.backstrides = array.backstrides[:] + self.reset() + + def reset(self): + self.index = 0 + self.indices = [0] * (self.ndim_m1 + 1) + self.offset = self.start + + @jit.unroll_safe def next(self): - raise NotImplementedError # purely abstract base class + self.index += 1 + for i in xrange(self.ndim_m1, -1, -1): + if self.indices[i] < self.shape_m1[i]: + self.indices[i] += 1 + self.offset += self.strides[i] + break + else: + self.indices[i] = 0 + self.offset -= self.backstrides[i] + + def done(self): + return self.index >= self.size + + def getitem(self): + return self.array.getitem(self.offset) + + def getitem_bool(self): + return self.array.getitem_bool(self.offset) def setitem(self, elem): - raise NotImplementedError + self.array.setitem(self.offset, elem) - def set_scalar_object(self, value): - raise NotImplementedError # works only on scalars - -class ConcreteArrayIterator(BaseArrayIterator): +class ConcreteArrayIterator(ArrayIterator): _immutable_fields_ = ['array', 'skip', 'size'] def __init__(self, array): @@ -206,7 +236,7 @@ return self.indexes[d] -class AxisIterator(BaseArrayIterator): +class AxisIterator(ArrayIterator): def __init__(self, array, shape, dim, cumulative): self.shape = shape strides = array.get_strides() From noreply at buildbot.pypy.org Thu Feb 27 13:17:39 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 27 Feb 2014 13:17:39 +0100 (CET) Subject: [pypy-commit] pypy numpy-refactor: use the new iterator where possible Message-ID: <20140227121739.37AEB1C03D5@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: numpy-refactor Changeset: r69510:b2e159740743 Date: 2014-02-27 03:09 -0500 http://bitbucket.org/pypy/pypy/changeset/b2e159740743/ Log: use the new iterator where possible diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py --- a/pypy/module/micronumpy/concrete.py +++ b/pypy/module/micronumpy/concrete.py @@ -274,6 +274,17 @@ backstrides) return loop.setslice(space, self.get_shape(), impl, self) + def create_iter(self, shape=None, backward_broadcast=False): + if shape is not None and \ + support.product(shape) > support.product(self.get_shape()): + r = calculate_broadcast_strides(self.get_strides(), + self.get_backstrides(), + self.get_shape(), shape, + backward_broadcast) + return iter.MultiDimViewIterator(self, self.start, + r[0], r[1], shape) + return iter.ArrayIterator(self) + def create_axis_iter(self, shape, dim, cum): return iter.AxisIterator(self, shape, dim, cum) @@ -333,26 +344,6 @@ self.backstrides = backstrides self.storage = storage - def create_iter(self, shape=None, backward_broadcast=False, require_index=False): - if shape is not None and \ - support.product(shape) > support.product(self.get_shape()): - r = calculate_broadcast_strides(self.get_strides(), - self.get_backstrides(), - self.get_shape(), shape, - backward_broadcast) - return iter.MultiDimViewIterator(self, self.start, - r[0], r[1], shape) - if not require_index: - return iter.ConcreteArrayIterator(self) - if len(self.get_shape()) <= 1: - return iter.OneDimViewIterator(self, self.start, - self.get_strides(), - self.get_shape()) - return iter.MultiDimViewIterator(self, self.start, - self.get_strides(), - self.get_backstrides(), - self.get_shape()) - def fill(self, space, box): self.dtype.itemtype.fill(self.storage, self.dtype.elsize, box, 0, self.size, 0) @@ -438,24 +429,6 @@ def fill(self, space, box): loop.fill(self, box.convert_to(space, self.dtype)) - def create_iter(self, shape=None, backward_broadcast=False, require_index=False): - if shape is not None and \ - support.product(shape) > support.product(self.get_shape()): - r = calculate_broadcast_strides(self.get_strides(), - self.get_backstrides(), - self.get_shape(), shape, - backward_broadcast) - return iter.MultiDimViewIterator(self, self.start, - r[0], r[1], shape) - if len(self.get_shape()) <= 1: - return iter.OneDimViewIterator(self, self.start, - self.get_strides(), - self.get_shape()) - return iter.MultiDimViewIterator(self, self.start, - self.get_strides(), - self.get_backstrides(), - self.get_shape()) - def set_shape(self, space, orig_array, new_shape): if len(self.get_shape()) < 2 or self.size == 0: # TODO: this code could be refactored into calc_strides diff --git a/pypy/module/micronumpy/flatiter.py b/pypy/module/micronumpy/flatiter.py --- a/pypy/module/micronumpy/flatiter.py +++ b/pypy/module/micronumpy/flatiter.py @@ -19,7 +19,7 @@ def get_shape(self): return self.shape - def create_iter(self, shape=None, backward_broadcast=False, require_index=False): + def create_iter(self, shape=None, backward_broadcast=False): assert isinstance(self.base(), W_NDimArray) return self.base().create_iter() @@ -33,7 +33,6 @@ def reset(self): self.iter = self.base.create_iter() - self.index = 0 def descr_len(self, space): return space.wrap(self.base.get_size()) @@ -43,14 +42,13 @@ raise OperationError(space.w_StopIteration, space.w_None) w_res = self.iter.getitem() self.iter.next() - self.index += 1 return w_res def descr_index(self, space): - return space.wrap(self.index) + return space.wrap(self.iter.index) def descr_coords(self, space): - coords = self.base.to_coords(space, space.wrap(self.index)) + coords = self.base.to_coords(space, space.wrap(self.iter.index)) return space.newtuple([space.wrap(c) for c in coords]) def descr_getitem(self, space, w_idx): diff --git a/pypy/module/micronumpy/iter.py b/pypy/module/micronumpy/iter.py --- a/pypy/module/micronumpy/iter.py +++ b/pypy/module/micronumpy/iter.py @@ -37,10 +37,9 @@ All the calculations happen in next() next_skip_x(steps) tries to do the iteration for a number of steps at once, -but then we cannot gaurentee that we only overflow one single shape +but then we cannot guarantee that we only overflow one single shape dimension, perhaps we could overflow times in one big step. """ - from pypy.module.micronumpy.base import W_NDimArray from pypy.module.micronumpy import support from rpython.rlib import jit @@ -107,6 +106,11 @@ self.indices[i] = 0 self.offset -= self.backstrides[i] + def next_skip_x(self, step): + # XXX implement + for _ in range(step): + self.next() + def done(self): return self.index >= self.size @@ -120,70 +124,7 @@ self.array.setitem(self.offset, elem) -class ConcreteArrayIterator(ArrayIterator): - _immutable_fields_ = ['array', 'skip', 'size'] - - def __init__(self, array): - self.array = array - self.offset = 0 - self.skip = array.dtype.elsize - self.size = array.size - - def setitem(self, elem): - self.array.setitem(self.offset, elem) - - def getitem(self): - return self.array.getitem(self.offset) - - def getitem_bool(self): - return self.array.getitem_bool(self.offset) - - def next(self): - self.offset += self.skip - - def next_skip_x(self, x): - self.offset += self.skip * x - - def done(self): - return self.offset >= self.size - - def reset(self): - self.offset %= self.size - - -class OneDimViewIterator(ConcreteArrayIterator): - def __init__(self, array, start, strides, shape): - self.array = array - self.offset = start - self.index = 0 - assert len(strides) == len(shape) - if len(shape) == 0: - self.skip = array.dtype.elsize - self.size = 1 - else: - assert len(shape) == 1 - self.skip = strides[0] - self.size = shape[0] - - def next(self): - self.offset += self.skip - self.index += 1 - - def next_skip_x(self, x): - self.offset += self.skip * x - self.index += x - - def done(self): - return self.index >= self.size - - def reset(self): - self.offset %= self.size - - def get_index(self, d): - return self.index - - -class MultiDimViewIterator(ConcreteArrayIterator): +class MultiDimViewIterator(ArrayIterator): def __init__(self, array, start, strides, backstrides, shape): self.indexes = [0] * len(shape) self.array = array @@ -232,9 +173,6 @@ def reset(self): self.offset %= self.size - def get_index(self, d): - return self.indexes[d] - class AxisIterator(ArrayIterator): def __init__(self, array, shape, dim, cumulative): @@ -258,12 +196,6 @@ self.dim = dim self.array = array - def setitem(self, elem): - self.array.setitem(self.offset, elem) - - def getitem(self): - return self.array.getitem(self.offset) - @jit.unroll_safe def next(self): for i in range(len(self.shape) - 1, -1, -1): diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -332,7 +332,7 @@ def nonzero(res, arr, box): res_iter = res.create_iter() - arr_iter = arr.create_iter(require_index=True) + arr_iter = arr.create_iter() shapelen = len(arr.shape) dtype = arr.dtype dims = range(shapelen) @@ -340,7 +340,7 @@ nonzero_driver.jit_merge_point(shapelen=shapelen, dims=dims, dtype=dtype) if arr_iter.getitem_bool(): for d in dims: - res_iter.setitem(box(arr_iter.get_index(d))) + res_iter.setitem(box(arr_iter.indices[d])) res_iter.next() arr_iter.next() return res @@ -436,8 +436,6 @@ arr_iter.next_skip_x(step) length -= 1 val_iter.next() - # WTF numpy? - val_iter.reset() fromstring_driver = jit.JitDriver(name = 'numpy_fromstring', greens = ['itemsize', 'dtype'], diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -280,11 +280,10 @@ s.append(suffix) return s.build() - def create_iter(self, shape=None, backward_broadcast=False, require_index=False): + def create_iter(self, shape=None, backward_broadcast=False): assert isinstance(self.implementation, BaseConcreteArray) return self.implementation.create_iter( - shape=shape, backward_broadcast=backward_broadcast, - require_index=require_index) + shape=shape, backward_broadcast=backward_broadcast) def create_axis_iter(self, shape, dim, cum): return self.implementation.create_axis_iter(shape, dim, cum) From noreply at buildbot.pypy.org Thu Feb 27 13:17:40 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 27 Feb 2014 13:17:40 +0100 (CET) Subject: [pypy-commit] pypy numpy-refactor: merge default Message-ID: <20140227121740.72EB01C03D5@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: numpy-refactor Changeset: r69511:fa46e3fc1775 Date: 2014-02-27 06:59 -0500 http://bitbucket.org/pypy/pypy/changeset/fa46e3fc1775/ Log: merge default diff --git a/lib-python/2.7/test/test_os.py b/lib-python/2.7/test/test_os.py --- a/lib-python/2.7/test/test_os.py +++ b/lib-python/2.7/test/test_os.py @@ -129,9 +129,13 @@ fp = os.tmpfile() except OSError, second: self.assertEqual(first.args, second.args) + return else: - self.fail("expected os.tmpfile() to raise OSError") - return + if test_support.check_impl_detail(pypy=False): + self.fail("expected os.tmpfile() to raise OSError") + # on PyPy, os.tmpfile() uses the tempfile module + # anyway, so works even if we cannot write in root. + fp.close() else: # open() worked, therefore, tmpfile() should work. Close our # dummy file and proceed with the test as normal. diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -122,11 +122,13 @@ @entrypoint('main', [rffi.CCHARP], c_name='pypy_execute_source') def pypy_execute_source(ll_source): - rffi.aroundstate.after() + after = rffi.aroundstate.after + if after: after() llop.gc_stack_bottom(lltype.Void) source = rffi.charp2str(ll_source) res = _pypy_execute_source(source) - rffi.aroundstate.before() + before = rffi.aroundstate.before + if before: before() return rffi.cast(rffi.INT, res) @entrypoint('main', [], c_name='pypy_init_threads') @@ -134,7 +136,8 @@ if not space.config.objspace.usemodules.thread: return os_thread.setup_threads(space) - rffi.aroundstate.before() + before = rffi.aroundstate.before + if before: before() @entrypoint('main', [], c_name='pypy_thread_attach') def pypy_thread_attach(): @@ -145,7 +148,8 @@ rthread.gc_thread_start() os_thread.bootstrapper.nbthreads += 1 os_thread.bootstrapper.release() - rffi.aroundstate.before() + before = rffi.aroundstate.before + if before: before() w_globals = space.newdict() space.setitem(w_globals, space.wrap('__builtins__'), diff --git a/pypy/module/micronumpy/arrayops.py b/pypy/module/micronumpy/arrayops.py --- a/pypy/module/micronumpy/arrayops.py +++ b/pypy/module/micronumpy/arrayops.py @@ -1,12 +1,12 @@ +from pypy.interpreter.error import OperationError, oefmt +from pypy.interpreter.gateway import unwrap_spec +from pypy.module.micronumpy import loop, descriptor, ufuncs, support, \ + constants as NPY from pypy.module.micronumpy.base import convert_to_array, W_NDimArray -from pypy.module.micronumpy import loop, descriptor, ufuncs +from pypy.module.micronumpy.converters import clipmode_converter from pypy.module.micronumpy.strides import Chunk, Chunks, shape_agreement, \ shape_agreement_multiple -from pypy.interpreter.error import OperationError, oefmt -from pypy.interpreter.gateway import unwrap_spec -from pypy.module.micronumpy.converters import clipmode_converter -from pypy.module.micronumpy import support -from pypy.module.micronumpy import constants as NPY + def where(space, w_arr, w_x=None, w_y=None): diff --git a/pypy/module/micronumpy/base.py b/pypy/module/micronumpy/base.py --- a/pypy/module/micronumpy/base.py +++ b/pypy/module/micronumpy/base.py @@ -1,7 +1,6 @@ +from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.error import OperationError -from pypy.interpreter.baseobjspace import W_Root from rpython.tool.pairtype import extendabletype -from pypy.module.micronumpy.support import calc_strides def wrap_impl(space, w_cls, w_instance, impl): @@ -31,9 +30,10 @@ @staticmethod def from_shape(space, shape, dtype, order='C', w_instance=None): from pypy.module.micronumpy import concrete + from pypy.module.micronumpy.strides import calc_strides strides, backstrides = calc_strides(shape, dtype.base, order) impl = concrete.ConcreteArray(shape, dtype.base, order, strides, - backstrides) + backstrides) if w_instance: return wrap_impl(space, space.type(w_instance), w_instance, impl) return W_NDimArray(impl) @@ -42,6 +42,7 @@ def from_shape_and_storage(space, shape, storage, dtype, order='C', owning=False, w_subtype=None, w_base=None, writable=True): from pypy.module.micronumpy import concrete + from pypy.module.micronumpy.strides import calc_strides strides, backstrides = calc_strides(shape, dtype, order) if w_base is not None: if owning: diff --git a/pypy/module/micronumpy/boxes.py b/pypy/module/micronumpy/boxes.py --- a/pypy/module/micronumpy/boxes.py +++ b/pypy/module/micronumpy/boxes.py @@ -1,23 +1,22 @@ from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import interp2app, unwrap_spec +from pypy.interpreter.mixedmodule import MixedModule from pypy.interpreter.typedef import TypeDef, GetSetProperty from pypy.objspace.std.bytesobject import W_BytesObject +from pypy.objspace.std.complextype import complex_typedef from pypy.objspace.std.floattype import float_typedef +from pypy.objspace.std.intobject import W_IntObject from pypy.objspace.std.unicodeobject import W_UnicodeObject -from pypy.objspace.std.intobject import W_IntObject -from pypy.objspace.std.complextype import complex_typedef from rpython.rlib.rarithmetic import LONG_BIT -from rpython.rtyper.lltypesystem import rffi -from rpython.tool.sourcetools import func_with_new_name -from pypy.module.micronumpy.concrete import VoidBoxStorage -from pypy.module.micronumpy.base import W_NDimArray -from pypy.module.micronumpy.flagsobj import W_FlagsObject -from pypy.interpreter.mixedmodule import MixedModule -from rpython.rtyper.lltypesystem import lltype from rpython.rlib.rstring import StringBuilder from rpython.rlib.objectmodel import specialize +from rpython.rtyper.lltypesystem import lltype, rffi +from rpython.tool.sourcetools import func_with_new_name from pypy.module.micronumpy import constants as NPY +from pypy.module.micronumpy.base import W_NDimArray +from pypy.module.micronumpy.concrete import VoidBoxStorage +from pypy.module.micronumpy.flagsobj import W_FlagsObject MIXIN_32 = (W_IntObject.typedef,) if LONG_BIT == 32 else () diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -1,20 +1,17 @@ """ This is a set of tools for standalone compiling of numpy expressions. It should not be imported by the module itself """ - import re - from pypy.interpreter import special from pypy.interpreter.baseobjspace import InternalSpaceCache, W_Root from pypy.interpreter.error import OperationError -from pypy.module.micronumpy import boxes -from pypy.module.micronumpy.descriptor import get_dtype_cache +from rpython.rlib.objectmodel import specialize, instantiate +from rpython.rlib.nonconst import NonConstant +from pypy.module.micronumpy import boxes, ufuncs +from pypy.module.micronumpy.arrayops import where from pypy.module.micronumpy.base import W_NDimArray from pypy.module.micronumpy.ctors import array -from pypy.module.micronumpy.arrayops import where -from pypy.module.micronumpy import ufuncs -from rpython.rlib.objectmodel import specialize, instantiate -from rpython.rlib.nonconst import NonConstant +from pypy.module.micronumpy.descriptor import get_dtype_cache class BogusBytecode(Exception): diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py --- a/pypy/module/micronumpy/concrete.py +++ b/pypy/module/micronumpy/concrete.py @@ -1,16 +1,16 @@ -from pypy.module.micronumpy import support, loop, iter -from pypy.module.micronumpy.base import convert_to_array, W_NDimArray,\ - ArrayArgumentException -from pypy.module.micronumpy.strides import (Chunk, Chunks, NewAxisChunk, - RecordChunk, calc_new_strides, shape_agreement, calculate_broadcast_strides, - calculate_dot_strides) +from pypy.interpreter.buffer import RWBuffer from pypy.interpreter.error import OperationError, oefmt -from pypy.interpreter.buffer import RWBuffer from rpython.rlib import jit -from rpython.rtyper.lltypesystem import rffi, lltype +from rpython.rlib.debug import make_sure_not_resized from rpython.rlib.rawstorage import alloc_raw_storage, free_raw_storage, \ raw_storage_getitem, raw_storage_setitem, RAW_STORAGE -from rpython.rlib.debug import make_sure_not_resized +from rpython.rtyper.lltypesystem import rffi, lltype +from pypy.module.micronumpy import support, loop, iter +from pypy.module.micronumpy.base import convert_to_array, W_NDimArray, \ + ArrayArgumentException +from pypy.module.micronumpy.strides import (Chunk, Chunks, NewAxisChunk, + RecordChunk, calc_strides, calc_new_strides, shape_agreement, + calculate_broadcast_strides, calculate_dot_strides) class BaseConcreteArray(object): @@ -61,10 +61,12 @@ def get_storage_size(self): return self.size - def reshape(self, space, orig_array, new_shape): + def reshape(self, orig_array, new_shape): # Since we got to here, prod(new_shape) == self.size new_strides = None - if self.size > 0: + if self.size == 0: + new_strides, _ = calc_strides(new_shape, self.dtype, self.order) + else: if len(self.get_shape()) == 0: new_strides = [self.dtype.elsize] * len(new_shape) else: @@ -81,7 +83,7 @@ new_shape, self, orig_array) def get_view(self, space, orig_array, dtype, new_shape): - strides, backstrides = support.calc_strides(new_shape, dtype, + strides, backstrides = calc_strides(new_shape, dtype, self.order) return SliceArray(self.start, strides, backstrides, new_shape, self, orig_array, dtype=dtype) @@ -268,7 +270,7 @@ backstrides, shape, self, orig_array) def copy(self, space): - strides, backstrides = support.calc_strides(self.get_shape(), self.dtype, + strides, backstrides = calc_strides(self.get_shape(), self.dtype, self.order) impl = ConcreteArray(self.get_shape(), self.dtype, self.order, strides, backstrides) @@ -323,7 +325,7 @@ return ArrayBuffer(self) def astype(self, space, dtype): - strides, backstrides = support.calc_strides(self.get_shape(), dtype, + strides, backstrides = calc_strides(self.get_shape(), dtype, self.order) impl = ConcreteArray(self.get_shape(), dtype, self.order, strides, backstrides) @@ -349,7 +351,7 @@ box, 0, self.size, 0) def set_shape(self, space, orig_array, new_shape): - strides, backstrides = support.calc_strides(new_shape, self.dtype, + strides, backstrides = calc_strides(new_shape, self.dtype, self.order) return SliceArray(0, strides, backstrides, new_shape, self, orig_array) diff --git a/pypy/module/micronumpy/ctors.py b/pypy/module/micronumpy/ctors.py --- a/pypy/module/micronumpy/ctors.py +++ b/pypy/module/micronumpy/ctors.py @@ -1,16 +1,14 @@ from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import unwrap_spec, WrappedDefault +from rpython.rlib.rstring import strip_spaces from rpython.rtyper.lltypesystem import lltype, rffi -from pypy.module.micronumpy import descriptor, loop -from rpython.rlib.rstring import strip_spaces -from pypy.module.micronumpy import ufuncs +from pypy.module.micronumpy import descriptor, loop, ufuncs from pypy.module.micronumpy.base import W_NDimArray, convert_to_array from pypy.module.micronumpy.converters import shape_converter from pypy.module.micronumpy.strides import find_shape_and_elems def build_scalar(space, w_dtype, w_state): - from rpython.rtyper.lltypesystem import rffi, lltype if not isinstance(w_dtype, descriptor.W_Dtype): raise oefmt(space.w_TypeError, "argument 1 must be numpy.dtype, not %T", w_dtype) diff --git a/pypy/module/micronumpy/descriptor.py b/pypy/module/micronumpy/descriptor.py --- a/pypy/module/micronumpy/descriptor.py +++ b/pypy/module/micronumpy/descriptor.py @@ -4,14 +4,12 @@ from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import (TypeDef, GetSetProperty, interp_attrproperty, interp_attrproperty_w) -from pypy.module.micronumpy import types, boxes, base +from rpython.rlib import jit from rpython.rlib.objectmodel import specialize from rpython.rlib.rarithmetic import r_longlong, r_ulonglong -from rpython.rlib import jit +from pypy.module.micronumpy import types, boxes, base, support, constants as NPY from pypy.module.micronumpy.appbridge import get_appbridge_cache from pypy.module.micronumpy.converters import byteorder_converter -from pypy.module.micronumpy import support -from pypy.module.micronumpy import constants as NPY def decode_w_dtype(space, w_dtype): diff --git a/pypy/module/micronumpy/flagsobj.py b/pypy/module/micronumpy/flagsobj.py --- a/pypy/module/micronumpy/flagsobj.py +++ b/pypy/module/micronumpy/flagsobj.py @@ -1,7 +1,7 @@ from pypy.interpreter.baseobjspace import W_Root +from pypy.interpreter.error import OperationError +from pypy.interpreter.gateway import interp2app from pypy.interpreter.typedef import TypeDef, GetSetProperty -from pypy.interpreter.gateway import interp2app -from pypy.interpreter.error import OperationError class W_FlagsObject(W_Root): diff --git a/pypy/module/micronumpy/flatiter.py b/pypy/module/micronumpy/flatiter.py --- a/pypy/module/micronumpy/flatiter.py +++ b/pypy/module/micronumpy/flatiter.py @@ -1,7 +1,7 @@ +from pypy.interpreter.error import OperationError, oefmt +from pypy.module.micronumpy import loop from pypy.module.micronumpy.base import W_NDimArray, convert_to_array -from pypy.module.micronumpy import loop from pypy.module.micronumpy.concrete import BaseConcreteArray -from pypy.interpreter.error import OperationError, oefmt class FakeArrayImplementation(BaseConcreteArray): diff --git a/pypy/module/micronumpy/iter.py b/pypy/module/micronumpy/iter.py --- a/pypy/module/micronumpy/iter.py +++ b/pypy/module/micronumpy/iter.py @@ -40,9 +40,9 @@ but then we cannot guarantee that we only overflow one single shape dimension, perhaps we could overflow times in one big step. """ +from rpython.rlib import jit +from pypy.module.micronumpy import support from pypy.module.micronumpy.base import W_NDimArray -from pypy.module.micronumpy import support -from rpython.rlib import jit class PureShapeIterator(object): diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -2,15 +2,14 @@ operations. This is the place to look for all the computations that iterate over all the array elements. """ - -from rpython.rlib.rstring import StringBuilder from pypy.interpreter.error import OperationError from rpython.rlib import jit +from rpython.rlib.rstring import StringBuilder from rpython.rtyper.lltypesystem import lltype, rffi +from pypy.module.micronumpy import support, constants as NPY from pypy.module.micronumpy.base import W_NDimArray from pypy.module.micronumpy.iter import PureShapeIterator -from pypy.module.micronumpy import support -from pypy.module.micronumpy import constants as NPY + call2_driver = jit.JitDriver(name='numpy_call2', greens = ['shapelen', 'func', 'calc_dtype', diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -1,27 +1,25 @@ -from rpython.rtyper.lltypesystem import rffi -from rpython.rlib.rawstorage import RAW_STORAGE_PTR from pypy.interpreter.error import OperationError, oefmt -from pypy.interpreter.typedef import TypeDef, GetSetProperty, make_weakref_descr from pypy.interpreter.gateway import interp2app, unwrap_spec, applevel, \ WrappedDefault -from pypy.module.micronumpy.base import W_NDimArray, convert_to_array,\ - ArrayArgumentException, wrap_impl -from pypy.module.micronumpy import descriptor, ufuncs, boxes, arrayops -from pypy.module.micronumpy.strides import get_shape_from_iterable, to_coords, \ - shape_agreement, shape_agreement_multiple -from pypy.module.micronumpy.flagsobj import W_FlagsObject -from pypy.module.micronumpy.flatiter import W_FlatIterator -from pypy.module.micronumpy.appbridge import get_appbridge_cache -from pypy.module.micronumpy import loop -from pypy.module.micronumpy.arrayops import repeat, choose, put -from rpython.tool.sourcetools import func_with_new_name +from pypy.interpreter.typedef import TypeDef, GetSetProperty, make_weakref_descr from rpython.rlib import jit from rpython.rlib.rstring import StringBuilder +from rpython.rlib.rawstorage import RAW_STORAGE_PTR +from rpython.rtyper.lltypesystem import rffi +from rpython.tool.sourcetools import func_with_new_name +from pypy.module.micronumpy import descriptor, ufuncs, boxes, arrayops, loop, \ + support, constants as NPY +from pypy.module.micronumpy.appbridge import get_appbridge_cache +from pypy.module.micronumpy.arrayops import repeat, choose, put +from pypy.module.micronumpy.base import W_NDimArray, convert_to_array, \ + ArrayArgumentException, wrap_impl from pypy.module.micronumpy.concrete import BaseConcreteArray from pypy.module.micronumpy.converters import order_converter, shape_converter, \ multi_axis_converter -from pypy.module.micronumpy import support -from pypy.module.micronumpy import constants as NPY +from pypy.module.micronumpy.flagsobj import W_FlagsObject +from pypy.module.micronumpy.flatiter import W_FlatIterator +from pypy.module.micronumpy.strides import get_shape_from_iterable, to_coords, \ + shape_agreement, shape_agreement_multiple def _match_dot_shapes(space, left, right): @@ -224,7 +222,10 @@ self.implementation.setitem_index(space, index_list, w_value) def descr_setitem(self, space, w_idx, w_value): - if isinstance(w_idx, W_NDimArray) and w_idx.get_dtype().is_bool() \ + if space.is_w(w_idx, space.w_Ellipsis): + self.implementation.setslice(space, convert_to_array(space, w_value)) + return + elif isinstance(w_idx, W_NDimArray) and w_idx.get_dtype().is_bool() \ and len(w_idx.get_shape()) > 0: self.setitem_filter(space, w_idx, convert_to_array(space, w_value)) return @@ -340,14 +341,13 @@ def reshape(self, space, w_shape): new_shape = get_shape_from_iterable(space, self.get_size(), w_shape) - new_impl = self.implementation.reshape(space, self, new_shape) + new_impl = self.implementation.reshape(self, new_shape) if new_impl is not None: return wrap_impl(space, space.type(self), self, new_impl) # Create copy with contiguous data arr = self.descr_copy(space) if arr.get_size() > 0: - arr.implementation = arr.implementation.reshape(space, self, - new_shape) + arr.implementation = arr.implementation.reshape(self, new_shape) assert arr.implementation else: arr.implementation.shape = new_shape @@ -381,6 +381,8 @@ raise OperationError(space.w_NotImplementedError, space.wrap( "unsupported value for order")) if len(args_w) == 1: + if space.is_none(args_w[0]): + return self.descr_view(space) w_shape = args_w[0] else: w_shape = space.newtuple(args_w) @@ -1129,7 +1131,7 @@ def descr_new_array(space, w_subtype, w_shape, w_dtype=None, w_buffer=None, offset=0, w_strides=None, w_order=None): from pypy.module.micronumpy.concrete import ConcreteArray - from pypy.module.micronumpy.support import calc_strides + from pypy.module.micronumpy.strides import calc_strides dtype = space.interp_w(descriptor.W_Dtype, space.call_function(space.gettypefor(descriptor.W_Dtype), w_dtype)) shape = shape_converter(space, w_shape, dtype) diff --git a/pypy/module/micronumpy/sort.py b/pypy/module/micronumpy/sort.py --- a/pypy/module/micronumpy/sort.py +++ b/pypy/module/micronumpy/sort.py @@ -1,18 +1,16 @@ - """ This is the implementation of various sorting routines in numpy. It's here because it only makes sense on a concrete array """ - -from rpython.rtyper.lltypesystem import rffi, lltype +from pypy.interpreter.error import OperationError, oefmt from rpython.rlib.listsort import make_timsort_class +from rpython.rlib.objectmodel import specialize +from rpython.rlib.rarithmetic import widen from rpython.rlib.rawstorage import raw_storage_getitem, raw_storage_setitem, \ free_raw_storage, alloc_raw_storage from rpython.rlib.unroll import unrolling_iterable -from rpython.rlib.rarithmetic import widen -from rpython.rlib.objectmodel import specialize -from pypy.interpreter.error import OperationError, oefmt +from rpython.rtyper.lltypesystem import rffi, lltype +from pypy.module.micronumpy import descriptor, types, constants as NPY from pypy.module.micronumpy.base import W_NDimArray -from pypy.module.micronumpy import descriptor, types, constants as NPY from pypy.module.micronumpy.iter import AxisIterator INT_SIZE = rffi.sizeof(lltype.Signed) @@ -125,7 +123,7 @@ # note that it's fine ot pass None here as we're not going # to pass the result around (None is the link to base in slices) if arr.get_size() > 0: - arr = arr.reshape(space, None, [arr.get_size()]) + arr = arr.reshape(None, [arr.get_size()]) axis = 0 elif w_axis is None: axis = -1 @@ -276,7 +274,7 @@ if w_axis is space.w_None: # note that it's fine to pass None here as we're not going # to pass the result around (None is the link to base in slices) - arr = arr.reshape(space, None, [arr.get_size()]) + arr = arr.reshape(None, [arr.get_size()]) axis = 0 elif w_axis is None: axis = -1 diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py --- a/pypy/module/micronumpy/strides.py +++ b/pypy/module/micronumpy/strides.py @@ -1,8 +1,7 @@ +from pypy.interpreter.error import OperationError, oefmt from rpython.rlib import jit -from pypy.interpreter.error import OperationError, oefmt +from pypy.module.micronumpy import support, constants as NPY from pypy.module.micronumpy.base import W_NDimArray -from pypy.module.micronumpy import support -from pypy.module.micronumpy import constants as NPY # structures to describe slicing @@ -21,7 +20,6 @@ # ofs only changes start # create a view of the original array by extending # the shape, strides, backstrides of the array - from pypy.module.micronumpy.support import calc_strides strides, backstrides = calc_strides(subdtype.shape, subdtype.subdtype, arr.order) final_shape = arr.shape + subdtype.shape @@ -345,6 +343,25 @@ return new_shape + at jit.unroll_safe +def calc_strides(shape, dtype, order): + strides = [] + backstrides = [] + s = 1 + shape_rev = shape[:] + if order == 'C': + shape_rev.reverse() + for sh in shape_rev: + slimit = max(sh, 1) + strides.append(s * dtype.elsize) + backstrides.append(s * (slimit - 1) * dtype.elsize) + s *= slimit + if order == 'C': + strides.reverse() + backstrides.reverse() + return strides, backstrides + + # Recalculating strides. Find the steps that the iteration does for each # dimension, given the stride and shape. Then try to create a new stride that # fits the new shape, using those steps. If there is a shape/step mismatch diff --git a/pypy/module/micronumpy/support.py b/pypy/module/micronumpy/support.py --- a/pypy/module/micronumpy/support.py +++ b/pypy/module/micronumpy/support.py @@ -1,5 +1,5 @@ +from pypy.interpreter.error import OperationError, oefmt from rpython.rlib import jit -from pypy.interpreter.error import OperationError, oefmt def issequence_w(space, w_obj): @@ -25,22 +25,3 @@ for x in s: i *= x return i - - - at jit.unroll_safe -def calc_strides(shape, dtype, order): - strides = [] - backstrides = [] - s = 1 - shape_rev = shape[:] - if order == 'C': - shape_rev.reverse() - for sh in shape_rev: - slimit = max(sh, 1) - strides.append(s * dtype.elsize) - backstrides.append(s * (slimit - 1) * dtype.elsize) - s *= slimit - if order == 'C': - strides.reverse() - backstrides.reverse() - return strides, backstrides diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -844,6 +844,23 @@ b = a.reshape(s) assert b.shape == s assert (b == [1]).all() + a = array(1.5) + b = a.reshape(None) + assert b is not a + assert b == a + b[...] = 2.5 + assert a == 2.5 + a = array([]).reshape((0, 2)) + assert a.shape == (0, 2) + assert a.strides == (16, 8) + a = array([]) + a.shape = (4, 0, 3, 0, 0, 2) + assert a.strides == (48, 48, 16, 16, 16, 8) + a = array(1.5) + assert a.reshape(()).shape == () + a = array(1.5) + a.shape = () + assert a.strides == () a = array(range(12)) exc = raises(ValueError, "b = a.reshape(())") assert str(exc.value) == "total size of new array must be unchanged" @@ -2303,12 +2320,12 @@ import numpy as np a = np.array(1.5) assert a[...] is a - #a[...] = 2.5 - #assert a == 2.5 + a[...] = 2.5 + assert a == 2.5 a = np.array([1, 2, 3]) assert a[...] is a - #a[...] = 4 - #assert (a == [4, 4, 4]).all() + a[...] = 4 + assert (a == [4, 4, 4]).all() class AppTestNumArrayFromBuffer(BaseNumpyAppTest): diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py --- a/pypy/module/micronumpy/types.py +++ b/pypy/module/micronumpy/types.py @@ -1,32 +1,31 @@ import functools import math - from pypy.interpreter.error import OperationError, oefmt -from pypy.module.micronumpy import boxes -from pypy.module.micronumpy import support -from pypy.module.micronumpy.concrete import SliceArray, VoidBoxStorage from pypy.objspace.std.floatobject import float2string from pypy.objspace.std.complexobject import str_format -from rpython.rlib import rfloat, clibffi, rcomplex -from rpython.rlib.rawstorage import (alloc_raw_storage, - raw_storage_getitem_unaligned, raw_storage_setitem_unaligned) +from rpython.rlib import clibffi, jit, rfloat, rcomplex from rpython.rlib.objectmodel import specialize from rpython.rlib.rarithmetic import widen, byteswap, r_ulonglong, \ most_neg_value_of, LONG_BIT -from rpython.rtyper.lltypesystem import lltype, rffi -from rpython.rlib.rstruct.runpack import runpack -from rpython.rlib.rstruct.nativefmttable import native_is_bigendian +from rpython.rlib.rawstorage import (alloc_raw_storage, + raw_storage_getitem_unaligned, raw_storage_setitem_unaligned) +from rpython.rlib.rstring import StringBuilder from rpython.rlib.rstruct.ieee import (float_pack, float_unpack, unpack_float, pack_float80, unpack_float80) +from rpython.rlib.rstruct.nativefmttable import native_is_bigendian +from rpython.rlib.rstruct.runpack import runpack +from rpython.rtyper.lltypesystem import lltype, rffi from rpython.tool.sourcetools import func_with_new_name -from rpython.rlib import jit -from rpython.rlib.rstring import StringBuilder +from pypy.module.micronumpy import boxes +from pypy.module.micronumpy.concrete import SliceArray, VoidBoxStorage +from pypy.module.micronumpy.strides import calc_strides degToRad = math.pi / 180.0 log2 = math.log(2) log2e = 1. / log2 log10 = math.log(10) + def simple_unary_op(func): specialize.argtype(1)(func) @functools.wraps(func) @@ -1792,8 +1791,8 @@ from pypy.module.micronumpy.base import W_NDimArray if dtype is None: dtype = arr.dtype - strides, backstrides = support.calc_strides(dtype.shape, - dtype.subdtype, arr.order) + strides, backstrides = calc_strides(dtype.shape, dtype.subdtype, + arr.order) implementation = SliceArray(i + offset, strides, backstrides, dtype.shape, arr, W_NDimArray(arr), dtype.subdtype) diff --git a/pypy/module/micronumpy/ufuncs.py b/pypy/module/micronumpy/ufuncs.py --- a/pypy/module/micronumpy/ufuncs.py +++ b/pypy/module/micronumpy/ufuncs.py @@ -2,13 +2,12 @@ from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef, GetSetProperty, interp_attrproperty -from pypy.module.micronumpy import boxes, descriptor, loop from rpython.rlib import jit from rpython.rlib.rarithmetic import LONG_BIT, maxint from rpython.tool.sourcetools import func_with_new_name +from pypy.module.micronumpy import boxes, descriptor, loop, constants as NPY +from pypy.module.micronumpy.base import convert_to_array, W_NDimArray from pypy.module.micronumpy.strides import shape_agreement -from pypy.module.micronumpy.base import convert_to_array, W_NDimArray -from pypy.module.micronumpy import constants as NPY def done_if_true(dtype, val): diff --git a/rpython/rtyper/test/test_generator.py b/rpython/rtyper/test/test_generator.py --- a/rpython/rtyper/test/test_generator.py +++ b/rpython/rtyper/test/test_generator.py @@ -88,16 +88,3 @@ return s res = self.interpret(g, []) assert res == 6 - - def test_send(self): - def f(): - yield (yield 1) + 1 - def g(): - gen = f() - res = f.send(2) - assert res == 1 - res = f.next() - assert res == 3 - - res = self.interpret(g, []) - From noreply at buildbot.pypy.org Thu Feb 27 13:17:41 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 27 Feb 2014 13:17:41 +0100 (CET) Subject: [pypy-commit] pypy default: merge numpy-refactor Message-ID: <20140227121741.A3AC11C03D5@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69512:ec929caa596e Date: 2014-02-27 07:15 -0500 http://bitbucket.org/pypy/pypy/changeset/ec929caa596e/ Log: merge numpy-refactor diff --git a/pypy/module/micronumpy/arrayops.py b/pypy/module/micronumpy/arrayops.py --- a/pypy/module/micronumpy/arrayops.py +++ b/pypy/module/micronumpy/arrayops.py @@ -8,6 +8,7 @@ shape_agreement_multiple + def where(space, w_arr, w_x=None, w_y=None): """where(condition, [x, y]) @@ -91,6 +92,7 @@ out = W_NDimArray.from_shape(space, shape, dtype) return loop.where(space, out, shape, arr, x, y, dtype) + def dot(space, w_obj1, w_obj2, w_out=None): w_arr = convert_to_array(space, w_obj1) if w_arr.is_scalar(): @@ -162,6 +164,7 @@ axis_start += arr.get_shape()[axis] return res + @unwrap_spec(repeats=int) def repeat(space, w_arr, repeats, w_axis): arr = convert_to_array(space, w_arr) @@ -186,9 +189,11 @@ Chunks(chunks).apply(space, w_res).implementation.setslice(space, arr) return w_res + def count_nonzero(space, w_obj): return space.wrap(loop.count_all_true(convert_to_array(space, w_obj))) + def choose(space, w_arr, w_choices, w_out, w_mode): arr = convert_to_array(space, w_arr) choices = [convert_to_array(space, w_item) for w_item @@ -208,6 +213,7 @@ loop.choose(space, arr, choices, shape, dtype, out, mode) return out + def put(space, w_arr, w_indices, w_values, w_mode): arr = convert_to_array(space, w_arr) mode = clipmode_converter(space, w_mode) @@ -256,6 +262,7 @@ arr.setitem(space, [index], dtype.coerce(space, value)) + def diagonal(space, arr, offset, axis1, axis2): shape = arr.get_shape() shapelen = len(shape) diff --git a/pypy/module/micronumpy/boxes.py b/pypy/module/micronumpy/boxes.py --- a/pypy/module/micronumpy/boxes.py +++ b/pypy/module/micronumpy/boxes.py @@ -69,6 +69,7 @@ ret = space.newtuple([scalar, space.newtuple([space.wrap(self._get_dtype(space)), space.wrap(self.raw_str())])]) return ret + class PrimitiveBox(Box): _mixin_ = True _immutable_fields_ = ['value'] @@ -93,6 +94,7 @@ lltype.free(value, flavor="raw") return ret + class ComplexBox(Box): _mixin_ = True _immutable_fields_ = ['real', 'imag'] @@ -360,6 +362,7 @@ return self.get_dtype(space).itemtype.imag(self) w_flags = None + def descr_get_flags(self, space): if self.w_flags is None: self.w_flags = W_FlagsObject(self) diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py --- a/pypy/module/micronumpy/concrete.py +++ b/pypy/module/micronumpy/concrete.py @@ -276,6 +276,17 @@ backstrides) return loop.setslice(space, self.get_shape(), impl, self) + def create_iter(self, shape=None, backward_broadcast=False): + if shape is not None and \ + support.product(shape) > support.product(self.get_shape()): + r = calculate_broadcast_strides(self.get_strides(), + self.get_backstrides(), + self.get_shape(), shape, + backward_broadcast) + return iter.MultiDimViewIterator(self, self.start, + r[0], r[1], shape) + return iter.ArrayIterator(self) + def create_axis_iter(self, shape, dim, cum): return iter.AxisIterator(self, shape, dim, cum) @@ -335,26 +346,6 @@ self.backstrides = backstrides self.storage = storage - def create_iter(self, shape=None, backward_broadcast=False, require_index=False): - if shape is not None and \ - support.product(shape) > support.product(self.get_shape()): - r = calculate_broadcast_strides(self.get_strides(), - self.get_backstrides(), - self.get_shape(), shape, - backward_broadcast) - return iter.MultiDimViewIterator(self, self.start, - r[0], r[1], shape) - if not require_index: - return iter.ConcreteArrayIterator(self) - if len(self.get_shape()) <= 1: - return iter.OneDimViewIterator(self, self.start, - self.get_strides(), - self.get_shape()) - return iter.MultiDimViewIterator(self, self.start, - self.get_strides(), - self.get_backstrides(), - self.get_shape()) - def fill(self, space, box): self.dtype.itemtype.fill(self.storage, self.dtype.elsize, box, 0, self.size, 0) @@ -440,24 +431,6 @@ def fill(self, space, box): loop.fill(self, box.convert_to(space, self.dtype)) - def create_iter(self, shape=None, backward_broadcast=False, require_index=False): - if shape is not None and \ - support.product(shape) > support.product(self.get_shape()): - r = calculate_broadcast_strides(self.get_strides(), - self.get_backstrides(), - self.get_shape(), shape, - backward_broadcast) - return iter.MultiDimViewIterator(self, self.start, - r[0], r[1], shape) - if len(self.get_shape()) <= 1: - return iter.OneDimViewIterator(self, self.start, - self.get_strides(), - self.get_shape()) - return iter.MultiDimViewIterator(self, self.start, - self.get_strides(), - self.get_backstrides(), - self.get_shape()) - def set_shape(self, space, orig_array, new_shape): if len(self.get_shape()) < 2 or self.size == 0: # TODO: this code could be refactored into calc_strides diff --git a/pypy/module/micronumpy/constants.py b/pypy/module/micronumpy/constants.py --- a/pypy/module/micronumpy/constants.py +++ b/pypy/module/micronumpy/constants.py @@ -57,7 +57,7 @@ INTPLTR = 'p' UINTPLTR = 'P' -GENBOOLLTR ='b' +GENBOOLLTR = 'b' SIGNEDLTR = 'i' UNSIGNEDLTR = 'u' FLOATINGLTR = 'f' diff --git a/pypy/module/micronumpy/flagsobj.py b/pypy/module/micronumpy/flagsobj.py --- a/pypy/module/micronumpy/flagsobj.py +++ b/pypy/module/micronumpy/flagsobj.py @@ -6,9 +6,13 @@ class W_FlagsObject(W_Root): def __init__(self, arr): - self.arr = arr self.flags = 0 + def descr__new__(space, w_subtype): + self = space.allocate_instance(W_FlagsObject, w_subtype) + W_FlagsObject.__init__(self, None) + return self + def descr_get_contiguous(self, space): return space.w_True @@ -60,6 +64,8 @@ W_FlagsObject.typedef = TypeDef("flagsobj", __module__ = "numpy", + __new__ = interp2app(W_FlagsObject.descr__new__.im_func), + __getitem__ = interp2app(W_FlagsObject.descr_getitem), __setitem__ = interp2app(W_FlagsObject.descr_setitem), __eq__ = interp2app(W_FlagsObject.descr_eq), diff --git a/pypy/module/micronumpy/flatiter.py b/pypy/module/micronumpy/flatiter.py --- a/pypy/module/micronumpy/flatiter.py +++ b/pypy/module/micronumpy/flatiter.py @@ -19,7 +19,7 @@ def get_shape(self): return self.shape - def create_iter(self, shape=None, backward_broadcast=False, require_index=False): + def create_iter(self, shape=None, backward_broadcast=False): assert isinstance(self.base(), W_NDimArray) return self.base().create_iter() @@ -33,7 +33,6 @@ def reset(self): self.iter = self.base.create_iter() - self.index = 0 def descr_len(self, space): return space.wrap(self.base.get_size()) @@ -43,14 +42,13 @@ raise OperationError(space.w_StopIteration, space.w_None) w_res = self.iter.getitem() self.iter.next() - self.index += 1 return w_res def descr_index(self, space): - return space.wrap(self.index) + return space.wrap(self.iter.index) def descr_coords(self, space): - coords = self.base.to_coords(space, space.wrap(self.index)) + coords = self.base.to_coords(space, space.wrap(self.iter.index)) return space.newtuple([space.wrap(c) for c in coords]) def descr_getitem(self, space, w_idx): diff --git a/pypy/module/micronumpy/iter.py b/pypy/module/micronumpy/iter.py --- a/pypy/module/micronumpy/iter.py +++ b/pypy/module/micronumpy/iter.py @@ -37,7 +37,7 @@ All the calculations happen in next() next_skip_x(steps) tries to do the iteration for a number of steps at once, -but then we cannot gaurentee that we only overflow one single shape +but then we cannot guarantee that we only overflow one single shape dimension, perhaps we could overflow times in one big step. """ from rpython.rlib import jit @@ -78,28 +78,41 @@ return [space.wrap(self.indexes[i]) for i in range(shapelen)] -class BaseArrayIterator(object): - def next(self): - raise NotImplementedError # purely abstract base class - - def setitem(self, elem): - raise NotImplementedError - - def set_scalar_object(self, value): - raise NotImplementedError # works only on scalars - - -class ConcreteArrayIterator(BaseArrayIterator): - _immutable_fields_ = ['array', 'skip', 'size'] - +class ArrayIterator(object): def __init__(self, array): self.array = array - self.offset = 0 - self.skip = array.dtype.elsize - self.size = array.size + self.start = array.start + self.size = array.get_size() + self.ndim_m1 = len(array.shape) - 1 + self.shape_m1 = [s - 1 for s in array.shape] + self.strides = array.strides[:] + self.backstrides = array.backstrides[:] + self.reset() - def setitem(self, elem): - self.array.setitem(self.offset, elem) + def reset(self): + self.index = 0 + self.indices = [0] * (self.ndim_m1 + 1) + self.offset = self.start + + @jit.unroll_safe + def next(self): + self.index += 1 + for i in xrange(self.ndim_m1, -1, -1): + if self.indices[i] < self.shape_m1[i]: + self.indices[i] += 1 + self.offset += self.strides[i] + break + else: + self.indices[i] = 0 + self.offset -= self.backstrides[i] + + def next_skip_x(self, step): + # XXX implement + for _ in range(step): + self.next() + + def done(self): + return self.index >= self.size def getitem(self): return self.array.getitem(self.offset) @@ -107,52 +120,11 @@ def getitem_bool(self): return self.array.getitem_bool(self.offset) - def next(self): - self.offset += self.skip + def setitem(self, elem): + self.array.setitem(self.offset, elem) - def next_skip_x(self, x): - self.offset += self.skip * x - def done(self): - return self.offset >= self.size - - def reset(self): - self.offset %= self.size - - -class OneDimViewIterator(ConcreteArrayIterator): - def __init__(self, array, start, strides, shape): - self.array = array - self.offset = start - self.index = 0 - assert len(strides) == len(shape) - if len(shape) == 0: - self.skip = array.dtype.elsize - self.size = 1 - else: - assert len(shape) == 1 - self.skip = strides[0] - self.size = shape[0] - - def next(self): - self.offset += self.skip - self.index += 1 - - def next_skip_x(self, x): - self.offset += self.skip * x - self.index += x - - def done(self): - return self.index >= self.size - - def reset(self): - self.offset %= self.size - - def get_index(self, d): - return self.index - - -class MultiDimViewIterator(ConcreteArrayIterator): +class MultiDimViewIterator(ArrayIterator): def __init__(self, array, start, strides, backstrides, shape): self.indexes = [0] * len(shape) self.array = array @@ -201,11 +173,8 @@ def reset(self): self.offset %= self.size - def get_index(self, d): - return self.indexes[d] - -class AxisIterator(BaseArrayIterator): +class AxisIterator(ArrayIterator): def __init__(self, array, shape, dim, cumulative): self.shape = shape strides = array.get_strides() @@ -227,12 +196,6 @@ self.dim = dim self.array = array - def setitem(self, elem): - self.array.setitem(self.offset, elem) - - def getitem(self): - return self.array.getitem(self.offset) - @jit.unroll_safe def next(self): for i in range(len(self.shape) - 1, -1, -1): diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -331,7 +331,7 @@ def nonzero(res, arr, box): res_iter = res.create_iter() - arr_iter = arr.create_iter(require_index=True) + arr_iter = arr.create_iter() shapelen = len(arr.shape) dtype = arr.dtype dims = range(shapelen) @@ -339,7 +339,7 @@ nonzero_driver.jit_merge_point(shapelen=shapelen, dims=dims, dtype=dtype) if arr_iter.getitem_bool(): for d in dims: - res_iter.setitem(box(arr_iter.get_index(d))) + res_iter.setitem(box(arr_iter.indices[d])) res_iter.next() arr_iter.next() return res @@ -435,8 +435,6 @@ arr_iter.next_skip_x(step) length -= 1 val_iter.next() - # WTF numpy? - val_iter.reset() fromstring_driver = jit.JitDriver(name = 'numpy_fromstring', greens = ['itemsize', 'dtype'], diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -44,6 +44,7 @@ "objects are not aligned")) return out_shape, right_critical_dim + class __extend__(W_NDimArray): @jit.unroll_safe def descr_get_shape(self, space): @@ -280,11 +281,10 @@ s.append(suffix) return s.build() - def create_iter(self, shape=None, backward_broadcast=False, require_index=False): + def create_iter(self, shape=None, backward_broadcast=False): assert isinstance(self.implementation, BaseConcreteArray) return self.implementation.create_iter( - shape=shape, backward_broadcast=backward_broadcast, - require_index=require_index) + shape=shape, backward_broadcast=backward_broadcast) def create_axis_iter(self, shape, dim, cum): return self.implementation.create_axis_iter(shape, dim, cum) @@ -1126,6 +1126,7 @@ return w_obj pass + @unwrap_spec(offset=int) def descr_new_array(space, w_subtype, w_shape, w_dtype=None, w_buffer=None, offset=0, w_strides=None, w_order=None): @@ -1176,6 +1177,7 @@ space.wrap('__array_finalize__')), w_subtype) return w_ret + @unwrap_spec(addr=int) def descr__from_shape_and_storage(space, w_cls, w_shape, addr, w_dtype, w_subtype=None): """ diff --git a/pypy/module/micronumpy/test/test_flagsobj.py b/pypy/module/micronumpy/test/test_flagsobj.py --- a/pypy/module/micronumpy/test/test_flagsobj.py +++ b/pypy/module/micronumpy/test/test_flagsobj.py @@ -2,6 +2,14 @@ class AppTestFlagsObj(BaseNumpyAppTest): + def test_init(self): + import numpy as np + a = np.array([1,2,3]) + assert a.flags['C'] is True + b = type(a.flags)() + assert b is not a.flags + assert b['C'] is True + def test_repr(self): import numpy as np a = np.array([1,2,3]) diff --git a/pypy/module/micronumpy/ufuncs.py b/pypy/module/micronumpy/ufuncs.py --- a/pypy/module/micronumpy/ufuncs.py +++ b/pypy/module/micronumpy/ufuncs.py @@ -9,9 +9,11 @@ from pypy.module.micronumpy.base import convert_to_array, W_NDimArray from pypy.module.micronumpy.strides import shape_agreement + def done_if_true(dtype, val): return dtype.itemtype.bool(val) + def done_if_false(dtype, val): return not dtype.itemtype.bool(val) @@ -544,6 +546,7 @@ dtypenum += 2 return descriptor.get_dtype_cache(space).dtypes_by_num[dtypenum] + @jit.unroll_safe def find_unaryop_result_dtype(space, dt, promote_to_float=False, promote_bools=False, promote_to_largest=False): @@ -570,6 +573,7 @@ return dtype return dt + def find_dtype_for_scalar(space, w_obj, current_guess=None): bool_dtype = descriptor.get_dtype_cache(space).w_booldtype long_dtype = descriptor.get_dtype_cache(space).w_longdtype @@ -611,9 +615,9 @@ 'unable to create dtype from objects, "%T" instance not ' 'supported', w_obj) + def ufunc_dtype_caller(space, ufunc_name, op_name, argcount, comparison_func, bool_result): - dtype_cache = descriptor.get_dtype_cache(space) def get_op(dtype): try: return getattr(dtype.itemtype, op_name) @@ -621,6 +625,7 @@ raise oefmt(space.w_NotImplementedError, "%s not implemented for %s", ufunc_name, dtype.get_name()) + dtype_cache = descriptor.get_dtype_cache(space) if argcount == 1: def impl(res_dtype, value): res = get_op(res_dtype)(value) @@ -762,6 +767,6 @@ ufunc = W_Ufunc2(func, ufunc_name, **extra_kwargs) setattr(self, ufunc_name, ufunc) + def get(space): return space.fromcache(UfuncState) - From noreply at buildbot.pypy.org Thu Feb 27 13:17:42 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 27 Feb 2014 13:17:42 +0100 (CET) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20140227121742.BCFDD1C03D5@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69513:e2df1b135323 Date: 2014-02-27 07:16 -0500 http://bitbucket.org/pypy/pypy/changeset/e2df1b135323/ Log: merge heads diff --git a/include/PyPy.h b/include/PyPy.h --- a/include/PyPy.h +++ b/include/PyPy.h @@ -8,16 +8,12 @@ extern "C" { #endif -/* You should call this first once. */ -#define pypy_init(need_threads) do { pypy_asm_stack_bottom(); \ -rpython_startup_code();\ - if (need_threads) pypy_init_threads(); } while (0) +// call this first +char* rpython_startup_code(void); -// deprecated interface -void rpython_startup_code(void); +// pypy_init_threads has to be called in case you want to use threads void pypy_init_threads(void); - /* Initialize the home directory of PyPy. It is necessary to call this. Call it with "home" being the file name of the libpypy.so, for diff --git a/pypy/doc/embedding.rst b/pypy/doc/embedding.rst --- a/pypy/doc/embedding.rst +++ b/pypy/doc/embedding.rst @@ -14,12 +14,15 @@ to make a full API working, provided you'll follow a few principles. The API is: -.. function:: void pypy_init(int need_threads); +.. function:: char* rpython_startup_code(void); This is a function that you have to call (once) before calling anything. It initializes the RPython/PyPy GC and does a bunch of necessary startup - code. This function cannot fail. Pass 1 in case you need thread support, 0 - otherwise. + code. This function cannot fail and always returns NULL. + +.. function:: void pypy_init_threads(void); + + Initialize threads. Only need to be called if there are any threads involved .. function:: long pypy_setup_home(char* home, int verbose); @@ -46,7 +49,7 @@ In case your application uses threads that are initialized outside of PyPy, you need to call this function to tell the PyPy GC to track this thread. Note that this function is not thread-safe itself, so you need to guard it - with a mutex. Do not call it from the main thread. + with a mutex. Simple example -------------- diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -80,7 +80,7 @@ # register the minimal equivalent of running a small piece of code. This # should be used as sparsely as possible, just to register callbacks - from rpython.rlib.entrypoint import entrypoint + from rpython.rlib.entrypoint import entrypoint, RPython_StartupCode from rpython.rtyper.lltypesystem import rffi, lltype from rpython.rtyper.lltypesystem.lloperation import llop @@ -94,7 +94,6 @@ @entrypoint('main', [rffi.CCHARP, rffi.INT], c_name='pypy_setup_home') def pypy_setup_home(ll_home, verbose): from pypy.module.sys.initpath import pypy_find_stdlib - llop.gc_stack_bottom(lltype.Void) verbose = rffi.cast(lltype.Signed, verbose) if ll_home: home = rffi.charp2str(ll_home) @@ -124,7 +123,6 @@ def pypy_execute_source(ll_source): after = rffi.aroundstate.after if after: after() - llop.gc_stack_bottom(lltype.Void) source = rffi.charp2str(ll_source) res = _pypy_execute_source(source) before = rffi.aroundstate.before From noreply at buildbot.pypy.org Thu Feb 27 14:54:24 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Thu, 27 Feb 2014 14:54:24 +0100 (CET) Subject: [pypy-commit] pypy kill-multimethod: Fix ztranslation tests by not looking into typedefs of builtin types. Message-ID: <20140227135424.5F9C41C3656@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: kill-multimethod Changeset: r69514:8588a1b129f6 Date: 2014-02-27 14:53 +0100 http://bitbucket.org/pypy/pypy/changeset/8588a1b129f6/ Log: Fix ztranslation tests by not looking into typedefs of builtin types. diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py --- a/pypy/objspace/fake/objspace.py +++ b/pypy/objspace/fake/objspace.py @@ -107,6 +107,10 @@ # ____________________________________________________________ +BUILTIN_TYPES = ['int', 'str', 'float', 'long', 'tuple', 'list', 'dict', + 'unicode', 'complex', 'slice', 'bool', 'basestring', 'object', + 'bytearray'] + class FakeObjSpace(ObjSpace): def __init__(self, config=None): self._seen_extras = [] @@ -331,9 +335,7 @@ def setup(space): for name in (ObjSpace.ConstantTable + ObjSpace.ExceptionTable + - ['int', 'str', 'float', 'long', 'tuple', 'list', - 'dict', 'unicode', 'complex', 'slice', 'bool', - 'basestring', 'object', 'bytearray']): + BUILTIN_TYPES): setattr(space, 'w_' + name, w_some_obj()) space.w_type = w_some_type() # @@ -364,8 +366,9 @@ @specialize.memo() def see_typedef(space, typedef): assert isinstance(typedef, TypeDef) - for name, value in typedef.rawdict.items(): - space.wrap(value) + if typedef.name not in BUILTIN_TYPES: + for name, value in typedef.rawdict.items(): + space.wrap(value) class FakeCompiler(object): pass From noreply at buildbot.pypy.org Thu Feb 27 15:07:48 2014 From: noreply at buildbot.pypy.org (Manuel Jacob) Date: Thu, 27 Feb 2014 15:07:48 +0100 (CET) Subject: [pypy-commit] pypy int_w-refactor: whitespace fixes Message-ID: <20140227140748.418061D23B9@cobra.cs.uni-duesseldorf.de> Author: Manuel Jacob Branch: int_w-refactor Changeset: r69515:35aac69c1b3d Date: 2014-02-27 15:06 +0100 http://bitbucket.org/pypy/pypy/changeset/35aac69c1b3d/ Log: whitespace fixes diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1375,7 +1375,7 @@ def int_w(self, w_obj, allow_conversion=True): """ Unwrap an app-level int object into an interpret-level int. - + If allow_conversion==True, w_obj might be of any type which implements __int__, *except* floats which are explicitly rejected. This is the same logic as CPython's PyArg_ParseTuple. If you want to also allow diff --git a/pypy/interpreter/test/test_gateway.py b/pypy/interpreter/test/test_gateway.py --- a/pypy/interpreter/test/test_gateway.py +++ b/pypy/interpreter/test/test_gateway.py @@ -458,7 +458,7 @@ def test_interp2app_unwrap_spec_typechecks(self): from rpython.rlib.rarithmetic import r_longlong - + space = self.space w = space.wrap def g3_id(space, x): diff --git a/pypy/module/__builtin__/test/test_buffer.py b/pypy/module/__builtin__/test/test_buffer.py --- a/pypy/module/__builtin__/test/test_buffer.py +++ b/pypy/module/__builtin__/test/test_buffer.py @@ -181,7 +181,8 @@ buf = buffer('hello world') raises(TypeError, "buf[MyInt(0)]") raises(TypeError, "buf[MyInt(0):MyInt(5)]") - + + class AppTestMemoryView: def test_basic(self): v = memoryview("abc") diff --git a/pypy/objspace/std/intobject.py b/pypy/objspace/std/intobject.py --- a/pypy/objspace/std/intobject.py +++ b/pypy/objspace/std/intobject.py @@ -315,7 +315,7 @@ def _int_w(self, space): return int(self.intval) - + unwrap = _int_w def uint_w(self, space): From noreply at buildbot.pypy.org Thu Feb 27 15:45:12 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 27 Feb 2014 15:45:12 +0100 (CET) Subject: [pypy-commit] pypy default: Print this extra info Message-ID: <20140227144512.8361F1C02FC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69516:e6b3dda85360 Date: 2014-02-27 15:44 +0100 http://bitbucket.org/pypy/pypy/changeset/e6b3dda85360/ Log: Print this extra info diff --git a/rpython/jit/codewriter/call.py b/rpython/jit/codewriter/call.py --- a/rpython/jit/codewriter/call.py +++ b/rpython/jit/codewriter/call.py @@ -247,14 +247,14 @@ from rpython.jit.codewriter.policy import log; log.WARNING( "in operation %r: this calls a _jit_loop_invariant_ function," " but this contradicts other sources (e.g. it can have random" - " effects)" % (op,)) + " effects, code %s)" % (op, extraeffect)) if elidable: if extraeffect not in (EffectInfo.EF_ELIDABLE_CANNOT_RAISE, EffectInfo.EF_ELIDABLE_CAN_RAISE): from rpython.jit.codewriter.policy import log; log.WARNING( "in operation %r: this calls an _elidable_function_," " but this contradicts other sources (e.g. it can have random" - " effects)" % (op,)) + " effects, code %s)" % (op, extraeffect)) # effectinfo = effectinfo_from_writeanalyze( self.readwrite_analyzer.analyze(op, self.seen), self.cpu, From noreply at buildbot.pypy.org Thu Feb 27 15:46:35 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 27 Feb 2014 15:46:35 +0100 (CET) Subject: [pypy-commit] pypy default: better this way Message-ID: <20140227144635.EA79B1C02FC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69517:c6cf3d3bc3a6 Date: 2014-02-27 15:45 +0100 http://bitbucket.org/pypy/pypy/changeset/c6cf3d3bc3a6/ Log: better this way diff --git a/rpython/jit/codewriter/call.py b/rpython/jit/codewriter/call.py --- a/rpython/jit/codewriter/call.py +++ b/rpython/jit/codewriter/call.py @@ -247,14 +247,14 @@ from rpython.jit.codewriter.policy import log; log.WARNING( "in operation %r: this calls a _jit_loop_invariant_ function," " but this contradicts other sources (e.g. it can have random" - " effects, code %s)" % (op, extraeffect)) + " effects): EF=%s" % (op, extraeffect)) if elidable: if extraeffect not in (EffectInfo.EF_ELIDABLE_CANNOT_RAISE, EffectInfo.EF_ELIDABLE_CAN_RAISE): from rpython.jit.codewriter.policy import log; log.WARNING( "in operation %r: this calls an _elidable_function_," " but this contradicts other sources (e.g. it can have random" - " effects, code %s)" % (op, extraeffect)) + " effects): EF=%s" % (op, extraeffect)) # effectinfo = effectinfo_from_writeanalyze( self.readwrite_analyzer.analyze(op, self.seen), self.cpu, From noreply at buildbot.pypy.org Thu Feb 27 16:39:51 2014 From: noreply at buildbot.pypy.org (antocuni) Date: Thu, 27 Feb 2014 16:39:51 +0100 (CET) Subject: [pypy-commit] pypy int_w-refactor: merge heads Message-ID: <20140227153951.3B0591C02FC@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: int_w-refactor Changeset: r69519:4e62d529395e Date: 2014-02-27 16:39 +0100 http://bitbucket.org/pypy/pypy/changeset/4e62d529395e/ Log: merge heads diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1375,7 +1375,7 @@ def int_w(self, w_obj, allow_conversion=True): """ Unwrap an app-level int object into an interpret-level int. - + If allow_conversion==True, w_obj might be of any type which implements __int__, *except* floats which are explicitly rejected. This is the same logic as CPython's PyArg_ParseTuple. If you want to also allow diff --git a/pypy/interpreter/test/test_gateway.py b/pypy/interpreter/test/test_gateway.py --- a/pypy/interpreter/test/test_gateway.py +++ b/pypy/interpreter/test/test_gateway.py @@ -458,7 +458,7 @@ def test_interp2app_unwrap_spec_typechecks(self): from rpython.rlib.rarithmetic import r_longlong - + space = self.space w = space.wrap def g3_id(space, x): diff --git a/pypy/module/__builtin__/test/test_buffer.py b/pypy/module/__builtin__/test/test_buffer.py --- a/pypy/module/__builtin__/test/test_buffer.py +++ b/pypy/module/__builtin__/test/test_buffer.py @@ -181,7 +181,8 @@ buf = buffer('hello world') raises(TypeError, "buf[MyInt(0)]") raises(TypeError, "buf[MyInt(0):MyInt(5)]") - + + class AppTestMemoryView: def test_basic(self): v = memoryview("abc") diff --git a/pypy/objspace/std/intobject.py b/pypy/objspace/std/intobject.py --- a/pypy/objspace/std/intobject.py +++ b/pypy/objspace/std/intobject.py @@ -315,7 +315,7 @@ def _int_w(self, space): return int(self.intval) - + unwrap = _int_w def uint_w(self, space): From noreply at buildbot.pypy.org Thu Feb 27 16:39:49 2014 From: noreply at buildbot.pypy.org (antocuni) Date: Thu, 27 Feb 2014 16:39:49 +0100 (CET) Subject: [pypy-commit] pypy int_w-refactor: add support for allow_conversion to more objspace methods, and disallow them in _rawffi.alt: this fixes some weird ctypes behavior with old style classes, which might raise AttributeError instead of TypeError when we try to convert them Message-ID: <20140227153949.DF8441C02FC@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: int_w-refactor Changeset: r69518:612616fc0f38 Date: 2014-02-27 16:38 +0100 http://bitbucket.org/pypy/pypy/changeset/612616fc0f38/ Log: add support for allow_conversion to more objspace methods, and disallow them in _rawffi.alt: this fixes some weird ctypes behavior with old style classes, which might raise AttributeError instead of TypeError when we try to convert them diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1296,16 +1296,16 @@ else: return index - def r_longlong_w(self, w_obj): - bigint = self.bigint_w(w_obj) + def r_longlong_w(self, w_obj, allow_conversion=True): + bigint = self.bigint_w(w_obj, allow_conversion) try: return bigint.tolonglong() except OverflowError: raise OperationError(self.w_OverflowError, self.wrap('integer too large')) - def r_ulonglong_w(self, w_obj): - bigint = self.bigint_w(w_obj) + def r_ulonglong_w(self, w_obj, allow_conversion=True): + bigint = self.bigint_w(w_obj, allow_conversion) try: return bigint.toulonglong() except OverflowError: @@ -1443,10 +1443,7 @@ # This is all interface for gateway.py. gateway_int_w = int_w - - def gateway_float_w(self, w_obj): - return self.float_w(self.float(w_obj)) - + gateway_float_w = float_w gateway_r_longlong_w = r_longlong_w gateway_r_ulonglong_w = r_ulonglong_w @@ -1477,7 +1474,7 @@ def c_uint_w(self, w_obj): # Like space.gateway_uint_w(), but raises an app-level OverflowError if # the integer does not fit in 32 bits. Here for gateway.py. - value = self.gateway_r_uint_w(w_obj) + value = self.uint_w(w_obj) if value > UINT_MAX_32_BITS: raise OperationError(self.w_OverflowError, self.wrap("expected an unsigned 32-bit integer")) @@ -1487,7 +1484,7 @@ # Like space.gateway_int_w(), but raises an app-level ValueError if # the integer is negative or does not fit in 32 bits. Here # for gateway.py. - value = self.gateway_int_w(w_obj) + value = self.int_w(w_obj) if value < 0: raise OperationError(self.w_ValueError, self.wrap("expected a non-negative integer")) @@ -1496,22 +1493,22 @@ self.wrap("expected a 32-bit integer")) return value - def truncatedint_w(self, w_obj): + def truncatedint_w(self, w_obj, allow_conversion=True): # Like space.gateway_int_w(), but return the integer truncated # instead of raising OverflowError. For obscure cases only. try: - return self.int_w(w_obj) + return self.int_w(w_obj, allow_conversion) except OperationError, e: if not e.match(self, self.w_OverflowError): raise from rpython.rlib.rarithmetic import intmask return intmask(self.bigint_w(w_obj).uintmask()) - def truncatedlonglong_w(self, w_obj): + def truncatedlonglong_w(self, w_obj, allow_conversion=True): # Like space.gateway_r_longlong_w(), but return the integer truncated # instead of raising OverflowError. try: - return self.r_longlong_w(w_obj) + return self.r_longlong_w(w_obj, allow_conversion) except OperationError, e: if not e.match(self, self.w_OverflowError): raise diff --git a/pypy/module/_rawffi/alt/test/test_funcptr.py b/pypy/module/_rawffi/alt/test/test_funcptr.py --- a/pypy/module/_rawffi/alt/test/test_funcptr.py +++ b/pypy/module/_rawffi/alt/test/test_funcptr.py @@ -185,6 +185,10 @@ set_val_to_ptr(ptr2, 123) assert get_dummy() == 123 set_val_to_ptr(ptr2, 0) + # + class OldStyle: + pass + raises(TypeError, "set_val_to_ptr(OldStyle(), 0)") def test_convert_strings_to_char_p(self): """ diff --git a/pypy/module/_rawffi/alt/type_converter.py b/pypy/module/_rawffi/alt/type_converter.py --- a/pypy/module/_rawffi/alt/type_converter.py +++ b/pypy/module/_rawffi/alt/type_converter.py @@ -25,7 +25,7 @@ assert libffi.IS_32_BIT self._longlong(w_ffitype, w_obj) elif w_ffitype.is_signed(): - intval = space.truncatedint_w(w_obj) + intval = space.truncatedint_w(w_obj, allow_conversion=False) self.handle_signed(w_ffitype, w_obj, intval) elif self.maybe_handle_char_or_unichar_p(w_ffitype, w_obj): # the object was already handled from within @@ -33,16 +33,16 @@ pass elif w_ffitype.is_pointer(): w_obj = self.convert_pointer_arg_maybe(w_obj, w_ffitype) - intval = space.truncatedint_w(w_obj) + intval = space.truncatedint_w(w_obj, allow_conversion=False) self.handle_pointer(w_ffitype, w_obj, intval) elif w_ffitype.is_unsigned(): - uintval = r_uint(space.truncatedint_w(w_obj)) + uintval = r_uint(space.truncatedint_w(w_obj, allow_conversion=False)) self.handle_unsigned(w_ffitype, w_obj, uintval) elif w_ffitype.is_char(): - intval = space.int_w(space.ord(w_obj)) + intval = space.int_w(space.ord(w_obj), allow_conversion=False) self.handle_char(w_ffitype, w_obj, intval) elif w_ffitype.is_unichar(): - intval = space.int_w(space.ord(w_obj)) + intval = space.int_w(space.ord(w_obj), allow_conversion=False) self.handle_unichar(w_ffitype, w_obj, intval) elif w_ffitype.is_double(): self._float(w_ffitype, w_obj) @@ -60,20 +60,20 @@ def _longlong(self, w_ffitype, w_obj): # a separate function, which can be seen by the jit or not, # depending on whether longlongs are supported - longlongval = self.space.truncatedlonglong_w(w_obj) + longlongval = self.space.truncatedlonglong_w(w_obj, allow_conversion=False) self.handle_longlong(w_ffitype, w_obj, longlongval) def _float(self, w_ffitype, w_obj): # a separate function, which can be seen by the jit or not, # depending on whether floats are supported - floatval = self.space.float_w(w_obj) + floatval = self.space.float_w(w_obj, allow_conversion=False) self.handle_float(w_ffitype, w_obj, floatval) def _singlefloat(self, w_ffitype, w_obj): # a separate function, which can be seen by the jit or not, # depending on whether singlefloats are supported from rpython.rlib.rarithmetic import r_singlefloat - floatval = self.space.float_w(w_obj) + floatval = self.space.float_w(w_obj, allow_conversion=False) singlefloatval = r_singlefloat(floatval) self.handle_singlefloat(w_ffitype, w_obj, singlefloatval) From noreply at buildbot.pypy.org Thu Feb 27 16:45:08 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 27 Feb 2014 16:45:08 +0100 (CET) Subject: [pypy-commit] stmgc default: Start adapting test_gcpage Message-ID: <20140227154508.90E831C02FC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r891:6b8ac788b8c0 Date: 2014-02-27 16:44 +0100 http://bitbucket.org/pypy/stmgc/changeset/6b8ac788b8c0/ Log: Start adapting test_gcpage diff --git a/c7/stm/misc.c b/c7/stm/misc.c --- a/c7/stm/misc.c +++ b/c7/stm/misc.c @@ -41,6 +41,11 @@ } #ifdef STM_TESTS +uint8_t _stm_get_page_flag(uintptr_t index) +{ + return flag_page_private[index]; +} + long _stm_count_modified_old_objects(void) { if (STM_PSEGMENT->modified_old_objects == NULL) diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -74,7 +74,7 @@ #ifdef STM_TESTS bool _stm_was_read(object_t *obj); bool _stm_was_written(object_t *obj); -uint8_t _stm_creation_marker(object_t *obj); +uint8_t _stm_get_page_flag(uintptr_t index); bool _stm_in_nursery(object_t *obj); bool _stm_in_transaction(stm_thread_local_t *tl); char *_stm_get_segment_base(long index); diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -10,6 +10,7 @@ typedef ... stm_jmpbuf_t; #define SIZEOF_MYOBJ ... #define _STM_FAST_ALLOC ... +#define _STM_GCFLAG_WRITE_BARRIER ... typedef struct { object_t **shadowstack, **shadowstack_base; @@ -36,6 +37,8 @@ char *_stm_get_segment_base(long index); bool _stm_in_transaction(stm_thread_local_t *tl); void _stm_test_switch(stm_thread_local_t *tl); +uint8_t _stm_get_page_flag(uintptr_t index); +int _stm_get_flags(object_t *obj); void _stm_start_transaction(stm_thread_local_t *tl, stm_jmpbuf_t *jmpbuf); bool _check_commit_transaction(void); @@ -70,6 +73,11 @@ """) +GC_N_SMALL_REQUESTS = 36 # from gcpage.c +SHARED_PAGE = 1 # from pages.h +PRIVATE_PAGE = 3 # from pages.h + + lib = ffi.verify(''' #include #include @@ -85,7 +93,7 @@ #define SIZEOF_MYOBJ sizeof(struct myobj_s) -uint8_t _stm_get_flags(object_t *obj) { +int _stm_get_flags(object_t *obj) { return obj->stm_flags; } @@ -231,7 +239,9 @@ ''', sources=source_files, define_macros=[('STM_TESTS', '1'), ('STM_NO_COND_WAIT', '1'), - ('STM_DEBUGPRINT', '1')], + ('STM_DEBUGPRINT', '1'), + ('GC_N_SMALL_REQUESTS', str(GC_N_SMALL_REQUESTS)), #check + ], undef_macros=['NDEBUG'], include_dirs=[parent_dir], extra_compile_args=['-g', '-O0', '-Werror', '-ferror-limit=1'], @@ -241,6 +251,7 @@ WORD = 8 HDR = lib.SIZEOF_MYOBJ assert HDR == 8 +GCFLAG_WRITE_BARRIER = lib._STM_GCFLAG_WRITE_BARRIER class Conflict(Exception): @@ -331,7 +342,7 @@ lib.stm_collect(0) def stm_get_page_flag(pagenum): - return lib.stm_get_page_flag(pagenum) + return lib._stm_get_page_flag(pagenum) def stm_get_obj_size(o): return lib.stmcb_size_rounded_up(stm_get_real_address(o)) diff --git a/c7/test/test_gcpage.py b/c7/test/test_gcpage.py --- a/c7/test/test_gcpage.py +++ b/c7/test/test_gcpage.py @@ -1,55 +1,40 @@ +from support import * +import py - -class DISABLED: +class TestGCPage(BaseTest): def test_large_obj_alloc(self): # test obj which doesn't fit into the size_classes # for now, we will still allocate it in the nursery. - # expects: LARGE_OBJECT_WORDS 36 + # expects: GC_N_SMALL_REQUESTS 36 size_class = 1000 # too big obj_size = size_class * 8 assert obj_size > 4096 # we want more than 1 page - assert obj_size < 4096 * 1024 # in the nursery + assert obj_size < lib._STM_FAST_ALLOC # in the nursery self.start_transaction() new = stm_allocate(obj_size) assert is_in_nursery(new) - assert len(stm_get_obj_pages(new)) == 2 - assert ([stm_get_page_flag(p) for p in stm_get_obj_pages(new)] - == [lib.PRIVATE_PAGE]*2) self.push_root(new) stm_minor_collect() new = self.pop_root() assert len(stm_get_obj_pages(new)) == 2 - # assert ([stm_get_page_flag(p) for p in stm_get_obj_pages(new)] - # == [lib.UNCOMMITTED_SHARED_PAGE]*2) + assert ([stm_get_page_flag(p) for p in stm_get_obj_pages(new)] + == [SHARED_PAGE]*2) assert not is_in_nursery(new) + stm_write(new) + self.commit_transaction() - def test_large_obj_write(self): - # test obj which doesn't fit into the size_classes - # expects: LARGE_OBJECT_WORDS 36 - size_class = 1000 # too big - obj_size = size_class * 8 - assert obj_size > 4096 # we want more than 1 page - assert obj_size < 4096 * 1024 # in the nursery - + # now proceed to write into the object in a new transaction self.start_transaction() - new = stm_allocate(obj_size) - assert is_in_nursery(new) - self.push_root(new) - self.commit_transaction() - new = self.pop_root() - assert ([stm_get_page_flag(p) for p in stm_get_obj_pages(new)] - == [lib.SHARED_PAGE]*2) - - self.start_transaction() + == [SHARED_PAGE]*2) stm_write(new) assert ([stm_get_page_flag(p) for p in stm_get_obj_pages(new)] - == [lib.PRIVATE_PAGE]*2) - + == [PRIVATE_PAGE]*2) + # write to 2nd page of object!! wnew = stm_get_real_address(new) wnew[4097] = 'x' @@ -59,19 +44,29 @@ stm_read(new) rnew = stm_get_real_address(new) assert rnew[4097] == '\0' - + self.abort_transaction() + + self.switch(0) + self.abort_transaction() + assert ([stm_get_page_flag(p) for p in stm_get_obj_pages(new)] + == [PRIVATE_PAGE]*2) + def test_partial_alloced_pages(self): self.start_transaction() new = stm_allocate(16) self.push_root(new) stm_minor_collect() new = self.pop_root() - # assert stm_get_page_flag(stm_get_obj_pages(new)[0]) == lib.UNCOMMITTED_SHARED_PAGE - # assert not (stm_get_flags(new) & lib.GCFLAG_NOT_COMMITTED) + + assert stm_get_page_flag(stm_get_obj_pages(new)[0]) == SHARED_PAGE + assert stm_get_flags(new) & GCFLAG_WRITE_BARRIER + + stm_write(new) + assert not (stm_get_flags(new) & GCFLAG_WRITE_BARRIER) self.commit_transaction() - assert stm_get_page_flag(stm_get_obj_pages(new)[0]) == lib.SHARED_PAGE - assert not (stm_get_flags(new) & lib.GCFLAG_NOT_COMMITTED) + assert stm_get_page_flag(stm_get_obj_pages(new)[0]) == SHARED_PAGE + assert stm_get_flags(new) & GCFLAG_WRITE_BARRIER self.start_transaction() newer = stm_allocate(16) @@ -79,15 +74,16 @@ stm_minor_collect() newer = self.pop_root() # 'new' is still in shared_page and committed - assert stm_get_page_flag(stm_get_obj_pages(new)[0]) == lib.SHARED_PAGE - assert not (stm_get_flags(new) & lib.GCFLAG_NOT_COMMITTED) + assert stm_get_page_flag(stm_get_obj_pages(new)[0]) == SHARED_PAGE + assert stm_get_flags(new) & GCFLAG_WRITE_BARRIER # 'newer' is now part of the SHARED page with 'new', but - # marked as UNCOMMITTED, so no privatization has to take place: + # uncommitted, so no privatization has to take place: assert stm_get_obj_pages(new) == stm_get_obj_pages(newer) - assert stm_get_flags(newer) & lib.GCFLAG_NOT_COMMITTED + assert stm_get_flags(newer) & GCFLAG_WRITE_BARRIER stm_write(newer) # does not privatize - assert stm_get_page_flag(stm_get_obj_pages(newer)[0]) == lib.SHARED_PAGE + assert not (stm_get_flags(newer) & GCFLAG_WRITE_BARRIER) + assert stm_get_page_flag(stm_get_obj_pages(newer)[0]) == SHARED_PAGE self.commit_transaction() - - assert stm_get_page_flag(stm_get_obj_pages(newer)[0]) == lib.SHARED_PAGE - assert not (stm_get_flags(newer) & lib.GCFLAG_NOT_COMMITTED) + + assert stm_get_page_flag(stm_get_obj_pages(newer)[0]) == SHARED_PAGE + assert stm_get_flags(newer) & GCFLAG_WRITE_BARRIER From noreply at buildbot.pypy.org Thu Feb 27 16:57:44 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 27 Feb 2014 16:57:44 +0100 (CET) Subject: [pypy-commit] stmgc default: Kill SP_SAFE_POINT_CANNOT_COLLECT. Message-ID: <20140227155744.A19D61C03D5@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r892:e739af01aa02 Date: 2014-02-27 16:57 +0100 http://bitbucket.org/pypy/stmgc/changeset/e739af01aa02/ Log: Kill SP_SAFE_POINT_CANNOT_COLLECT. diff --git a/c7/stm/contention.c b/c7/stm/contention.c --- a/c7/stm/contention.c +++ b/c7/stm/contention.c @@ -75,11 +75,6 @@ assert(other_pseg->transaction_state == TS_MUST_ABORT); other_pseg->pub.nursery_end = NSE_SIGNAL; - /* we will issue a safe point and wait: */ - STM_PSEGMENT->safe_point = SP_SAFE_POINT_CANNOT_COLLECT; - // XXX do we really need a safe_point here? It seems we can - // kill it and the whole SP_SAFE_POINT_CANNOT_COLLECT - /* wait, hopefully until the other thread broadcasts "I'm done aborting" (spurious wake-ups are ok). */ dprintf(("contention: wait C_SAFE_POINT...\n")); @@ -90,7 +85,7 @@ /* now we return into _stm_write_slowpath() and will try again to acquire the write lock on our object. */ - STM_PSEGMENT->safe_point = SP_RUNNING; + assert(STM_PSEGMENT->safe_point == SP_RUNNING); } mutex_unlock(); diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -353,10 +353,10 @@ minor_collection(/*commit=*/ true); mutex_lock(); - STM_PSEGMENT->safe_point = SP_SAFE_POINT_CAN_COLLECT; + STM_PSEGMENT->safe_point = SP_SAFE_POINT; /* wait until the other thread is at a safe-point */ - wait_for_other_safe_points(SP_SAFE_POINT_CANNOT_COLLECT); + wait_for_other_safe_points(); /* the rest of this function either runs atomically without releasing the mutex, or aborts the current thread. */ diff --git a/c7/stm/core.h b/c7/stm/core.h --- a/c7/stm/core.h +++ b/c7/stm/core.h @@ -131,8 +131,7 @@ enum /* safe_point */ { SP_NO_TRANSACTION=0, SP_RUNNING, - SP_SAFE_POINT_CANNOT_COLLECT, - SP_SAFE_POINT_CAN_COLLECT, + SP_SAFE_POINT, }; enum /* transaction_state */ { TS_NONE=0, diff --git a/c7/stm/sync.c b/c7/stm/sync.c --- a/c7/stm/sync.c +++ b/c7/stm/sync.c @@ -226,12 +226,12 @@ void _stm_start_safe_point(void) { assert(STM_PSEGMENT->safe_point == SP_RUNNING); - STM_PSEGMENT->safe_point = SP_SAFE_POINT_CAN_COLLECT; + STM_PSEGMENT->safe_point = SP_SAFE_POINT; } void _stm_stop_safe_point(void) { - assert(STM_PSEGMENT->safe_point == SP_SAFE_POINT_CAN_COLLECT); + assert(STM_PSEGMENT->safe_point == SP_SAFE_POINT); STM_PSEGMENT->safe_point = SP_RUNNING; if (STM_PSEGMENT->transaction_state == TS_MUST_ABORT) @@ -240,7 +240,7 @@ #endif -static void wait_for_other_safe_points(int requested_safe_point_kind) +static void wait_for_other_safe_points(void) { /* Must be called with the mutex. When all other threads are in a safe point of at least the requested kind, returns. Otherwise, @@ -261,12 +261,9 @@ in the cond_wait() in this same function. */ - /* XXX review what occurs for the other kind! */ - assert(requested_safe_point_kind == SP_SAFE_POINT_CANNOT_COLLECT); - restart: assert(_has_mutex()); - assert(STM_PSEGMENT->safe_point == SP_SAFE_POINT_CAN_COLLECT); + assert(STM_PSEGMENT->safe_point == SP_SAFE_POINT); if (STM_PSEGMENT->transaction_state == TS_MUST_ABORT) abort_with_mutex(); @@ -282,10 +279,7 @@ SP_RUNNING, or at the wrong kind of safe point. */ struct stm_priv_segment_info_s *other_pseg = get_priv_segment(i); - if (other_pseg->safe_point == SP_RUNNING || - (requested_safe_point_kind == SP_SAFE_POINT_CAN_COLLECT && - other_pseg->safe_point == SP_SAFE_POINT_CANNOT_COLLECT)) { - + if (other_pseg->safe_point == SP_RUNNING) { /* we need to wait for this thread. Use NSE_SIGNAL to ask it (and possibly all other threads in the same case) to enter a safe-point soon. */ @@ -324,7 +318,7 @@ while (STM_SEGMENT->nursery_end == NSE_SIGNAL) { dprintf(("collectable_safe_point...\n")); - STM_PSEGMENT->safe_point = SP_SAFE_POINT_CAN_COLLECT; + STM_PSEGMENT->safe_point = SP_SAFE_POINT; STM_SEGMENT->nursery_end = NURSERY_END; /* signal all the threads blocked in diff --git a/c7/stm/sync.h b/c7/stm/sync.h --- a/c7/stm/sync.h +++ b/c7/stm/sync.h @@ -27,5 +27,5 @@ static void wait_for_end_of_inevitable_transaction(bool can_abort); /* see the source for an exact description */ -static void wait_for_other_safe_points(int requested_safe_point_kind); +static void wait_for_other_safe_points(void); static void collectable_safe_point(void); From noreply at buildbot.pypy.org Thu Feb 27 17:03:00 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 27 Feb 2014 17:03:00 +0100 (CET) Subject: [pypy-commit] stmgc default: A Makefile to run all *.duh files, or to measure their run-time Message-ID: <20140227160300.E85D41C02FC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r893:1140c01acd00 Date: 2014-02-27 17:02 +0100 http://bitbucket.org/pypy/stmgc/changeset/1140c01acd00/ Log: A Makefile to run all *.duh files, or to measure their run-time diff --git a/duhton/demo/Makefile b/duhton/demo/Makefile new file mode 100644 --- /dev/null +++ b/duhton/demo/Makefile @@ -0,0 +1,21 @@ + +TESTS = $(wildcard *.duh) + +DEBUG = $(addprefix debug-,$(TESTS)) +BUILD = $(addprefix build-,$(TESTS)) +TIMER = $(addprefix timer-,$(TESTS)) + +all: $(DEBUG) $(BUILD) + +timer: $(TIMER) + + +debug-%: + ../duhton_debug $* + +build-%: + ../duhton $* + +timer-%: + bash -c "time ../duhton $*" > /dev/null + @echo From noreply at buildbot.pypy.org Thu Feb 27 17:34:14 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 27 Feb 2014 17:34:14 +0100 (CET) Subject: [pypy-commit] stmgc default: Keep track of how much memory we're using Message-ID: <20140227163414.2DE171C03FC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r894:7bd32f8dc1e8 Date: 2014-02-27 17:34 +0100 http://bitbucket.org/pypy/stmgc/changeset/7bd32f8dc1e8/ Log: Keep track of how much memory we're using diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -76,6 +76,7 @@ char *addr = _stm_large_malloc(size); if (addr == NULL) stm_fatalerror("not enough memory!\n"); + increment_total_allocated(size + LARGE_MALLOC_OVERHEAD); if (addr + size > uninitialized_page_start) { uintptr_t npages; diff --git a/c7/stm/largemalloc.h b/c7/stm/largemalloc.h --- a/c7/stm/largemalloc.h +++ b/c7/stm/largemalloc.h @@ -12,3 +12,6 @@ void _stm_large_free(char *data); void _stm_large_dump(void); + + +#define LARGE_MALLOC_OVERHEAD (2 * sizeof(size_t)) /* estimate */ diff --git a/c7/stm/misc.c b/c7/stm/misc.c --- a/c7/stm/misc.c +++ b/c7/stm/misc.c @@ -71,4 +71,12 @@ return (object_t *)list_item( STM_PSEGMENT->objects_pointing_to_nursery, index); } + +uint64_t _stm_total_allocated(void) +{ + mutex_pages_lock(); + uint64_t result = increment_total_allocated(0); + mutex_pages_unlock(); + return result; +} #endif diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -213,10 +213,14 @@ bool locked = false; wlog_t *item; TREE_LOOP_FORWARD(*STM_PSEGMENT->young_outside_nursery, item) { + assert(!_is_in_nursery((object_t *)item->addr)); if (!locked) { mutex_pages_lock(); locked = true; } + char *realobj = REAL_ADDRESS(STM_SEGMENT->segment_base,item->addr); + ssize_t size = stmcb_size_rounded_up((struct object_s *)realobj); + increment_total_allocated(-(size + LARGE_MALLOC_OVERHEAD)); _stm_large_free(stm_object_pages + item->addr); } TREE_LOOP_END; diff --git a/c7/stm/pages.c b/c7/stm/pages.c --- a/c7/stm/pages.c +++ b/c7/stm/pages.c @@ -6,10 +6,19 @@ /************************************************************/ static union { - uint8_t mutex_pages; + struct { + uint8_t mutex_pages; + uint64_t total_allocated; /* keep track of how much memory we're + using, ignoring nurseries */ + }; char reserved[64]; } pages_ctl __attribute__((aligned(64))); +static void teardown_pages(void) +{ + memset(&pages_ctl, 0, sizeof(pages_ctl)); +} + static void mutex_pages_lock(void) { while (__sync_lock_test_and_set(&pages_ctl.mutex_pages, 1) != 0) { @@ -28,6 +37,13 @@ return pages_ctl.mutex_pages != 0; } +static uint64_t increment_total_allocated(ssize_t add_or_remove) +{ + assert(_has_mutex_pages()); + pages_ctl.total_allocated += add_or_remove; + return pages_ctl.total_allocated; +} + /************************************************************/ @@ -108,6 +124,7 @@ } write_fence(); memset(flag_page_private + pagenum, PRIVATE_PAGE, count); + increment_total_allocated(4096 * count); } static void _pages_privatize(uintptr_t pagenum, uintptr_t count, bool full) diff --git a/c7/stm/pages.h b/c7/stm/pages.h --- a/c7/stm/pages.h +++ b/c7/stm/pages.h @@ -23,6 +23,7 @@ static void mutex_pages_lock(void); static void mutex_pages_unlock(void); +static uint64_t increment_total_allocated(ssize_t add_or_remove); inline static void pages_privatize(uintptr_t pagenum, uintptr_t count, bool full) { diff --git a/c7/stm/setup.c b/c7/stm/setup.c --- a/c7/stm/setup.c +++ b/c7/stm/setup.c @@ -101,6 +101,7 @@ teardown_sync(); teardown_gcpage(); teardown_nursery(); + teardown_pages(); } void _init_shadow_stack(stm_thread_local_t *tl) diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -92,6 +92,7 @@ long _stm_count_objects_pointing_to_nursery(void); object_t *_stm_enum_modified_old_objects(long index); object_t *_stm_enum_objects_pointing_to_nursery(long index); +uint64_t _stm_total_allocated(void); #endif #define _STM_GCFLAG_WRITE_BARRIER 0x01 diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -70,6 +70,7 @@ object_t *_stm_enum_objects_pointing_to_nursery(long index); void stm_collect(long level); +uint64_t _stm_total_allocated(void); """) @@ -215,7 +216,7 @@ } else { int nrefs = myobj->type_id - 421420; - assert(nrefs < 100); + assert(nrefs < 10000); /* artificial limit, to check for garbage */ if (nrefs == 0) /* weakrefs */ nrefs = 1; return sizeof(struct myobj_s) + nrefs * sizeof(void*); diff --git a/c7/test/test_nursery.py b/c7/test/test_nursery.py --- a/c7/test/test_nursery.py +++ b/c7/test/test_nursery.py @@ -85,13 +85,17 @@ obj_size = lib._STM_FAST_ALLOC + 16 self.start_transaction() + assert lib._stm_total_allocated() == 0 seen = set() for i in range(10): stm_minor_collect() new = stm_allocate(obj_size) assert not is_in_nursery(new) + assert lib._stm_total_allocated() == obj_size + 16 seen.add(new) assert len(seen) < 5 # addresses are reused + stm_minor_collect() + assert lib._stm_total_allocated() == 0 def test_larger_than_limit_for_nursery_dont_die(self): obj_nrefs = (lib._STM_FAST_ALLOC + 16) // 8 @@ -115,6 +119,19 @@ lp1 = stm_get_ref(lp1, i) assert not lp1 + def test_account_for_privatized_page(self): + self.start_transaction() + obj = stm_allocate(16) + self.push_root(obj) + self.commit_transaction() + obj = self.pop_root() + base = lib._stm_total_allocated() + assert base <= 4096 + + self.start_transaction() + stm_write(obj) + assert lib._stm_total_allocated() == base + 4096 + def test_reset_partial_alloc_pages(self): py.test.skip("a would-be-nice feature, but not actually needed: " "the next major GC will take care of it") From noreply at buildbot.pypy.org Thu Feb 27 17:51:52 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 27 Feb 2014 17:51:52 +0100 (CET) Subject: [pypy-commit] pypy default: kill MultiDimViewIterator Message-ID: <20140227165152.493951C03FC@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69520:c4355a931db5 Date: 2014-02-27 08:28 -0500 http://bitbucket.org/pypy/pypy/changeset/c4355a931db5/ Log: kill MultiDimViewIterator diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py --- a/pypy/module/micronumpy/concrete.py +++ b/pypy/module/micronumpy/concrete.py @@ -283,9 +283,9 @@ self.get_backstrides(), self.get_shape(), shape, backward_broadcast) - return iter.MultiDimViewIterator(self, self.start, - r[0], r[1], shape) - return iter.ArrayIterator(self) + return iter.ArrayIterator(self, shape, r[0], r[1]) + return iter.ArrayIterator(self, self.shape, self.strides, + self.backstrides) def create_axis_iter(self, shape, dim, cum): return iter.AxisIterator(self, shape, dim, cum) @@ -293,7 +293,7 @@ def create_dot_iter(self, shape, skip): r = calculate_dot_strides(self.get_strides(), self.get_backstrides(), shape, skip) - return iter.MultiDimViewIterator(self, self.start, r[0], r[1], shape) + return iter.ArrayIterator(self, shape, r[0], r[1]) def swapaxes(self, space, orig_arr, axis1, axis2): shape = self.get_shape()[:] diff --git a/pypy/module/micronumpy/iter.py b/pypy/module/micronumpy/iter.py --- a/pypy/module/micronumpy/iter.py +++ b/pypy/module/micronumpy/iter.py @@ -79,14 +79,17 @@ class ArrayIterator(object): - def __init__(self, array): + _immutable_fields_ = ['array', 'start', 'size', 'ndim_m1', 'shape_m1', + 'strides', 'backstrides'] + + def __init__(self, array, shape, strides, backstrides): self.array = array self.start = array.start - self.size = array.get_size() - self.ndim_m1 = len(array.shape) - 1 - self.shape_m1 = [s - 1 for s in array.shape] - self.strides = array.strides[:] - self.backstrides = array.backstrides[:] + self.size = support.product(shape) + self.ndim_m1 = len(shape) - 1 + self.shape_m1 = [s - 1 for s in shape] + self.strides = strides + self.backstrides = backstrides self.reset() def reset(self): @@ -106,10 +109,24 @@ self.indices[i] = 0 self.offset -= self.backstrides[i] + @jit.unroll_safe def next_skip_x(self, step): - # XXX implement - for _ in range(step): - self.next() + assert step >= 0 + if step == 0: + return + self.index += step + for i in range(self.ndim_m1, -1, -1): + if self.indices[i] < (self.shape_m1[i] + 1) - step: + self.indices[i] += step + self.offset += self.strides[i] * step + break + else: + remaining_step = (self.indices[i] + step) // (self.shape_m1[i] + 1) + this_i_step = step - remaining_step * (self.shape_m1[i] + 1) + self.indices[i] = self.indices[i] + this_i_step + self.offset += self.strides[i] * this_i_step + step = remaining_step + assert step > 0 def done(self): return self.index >= self.size @@ -124,56 +141,6 @@ self.array.setitem(self.offset, elem) -class MultiDimViewIterator(ArrayIterator): - def __init__(self, array, start, strides, backstrides, shape): - self.indexes = [0] * len(shape) - self.array = array - self.shape = shape - self.offset = start - self.shapelen = len(shape) - self._done = self.shapelen == 0 or support.product(shape) == 0 - self.strides = strides - self.backstrides = backstrides - self.size = array.size - - @jit.unroll_safe - def next(self): - offset = self.offset - for i in range(self.shapelen - 1, -1, -1): - if self.indexes[i] < self.shape[i] - 1: - self.indexes[i] += 1 - offset += self.strides[i] - break - else: - self.indexes[i] = 0 - offset -= self.backstrides[i] - else: - self._done = True - self.offset = offset - - @jit.unroll_safe - def next_skip_x(self, step): - for i in range(len(self.shape) - 1, -1, -1): - if self.indexes[i] < self.shape[i] - step: - self.indexes[i] += step - self.offset += self.strides[i] * step - break - else: - remaining_step = (self.indexes[i] + step) // self.shape[i] - this_i_step = step - remaining_step * self.shape[i] - self.offset += self.strides[i] * this_i_step - self.indexes[i] = self.indexes[i] + this_i_step - step = remaining_step - else: - self._done = True - - def done(self): - return self._done - - def reset(self): - self.offset %= self.size - - class AxisIterator(ArrayIterator): def __init__(self, array, shape, dim, cumulative): self.shape = shape diff --git a/pypy/module/micronumpy/test/test_iter.py b/pypy/module/micronumpy/test/test_iter.py --- a/pypy/module/micronumpy/test/test_iter.py +++ b/pypy/module/micronumpy/test/test_iter.py @@ -1,74 +1,73 @@ -from pypy.module.micronumpy.iter import MultiDimViewIterator +from pypy.module.micronumpy.iter import ArrayIterator class MockArray(object): size = 1 + start = 0 class TestIterDirect(object): - def test_C_viewiterator(self): + def test_iterator_basic(self): #Let's get started, simple iteration in C order with #contiguous layout => strides[-1] is 1 - start = 0 shape = [3, 5] strides = [5, 1] backstrides = [x * (y - 1) for x,y in zip(strides, shape)] assert backstrides == [10, 4] - i = MultiDimViewIterator(MockArray, start, strides, backstrides, shape) + i = ArrayIterator(MockArray, shape, strides, backstrides) i.next() i.next() i.next() assert i.offset == 3 assert not i.done() - assert i.indexes == [0,3] + assert i.indices == [0,3] #cause a dimension overflow i.next() i.next() assert i.offset == 5 - assert i.indexes == [1,0] + assert i.indices == [1,0] #Now what happens if the array is transposed? strides[-1] != 1 # therefore layout is non-contiguous strides = [1, 3] backstrides = [x * (y - 1) for x,y in zip(strides, shape)] assert backstrides == [2, 12] - i = MultiDimViewIterator(MockArray, start, strides, backstrides, shape) + i = ArrayIterator(MockArray, shape, strides, backstrides) i.next() i.next() i.next() assert i.offset == 9 assert not i.done() - assert i.indexes == [0,3] + assert i.indices == [0,3] #cause a dimension overflow i.next() i.next() assert i.offset == 1 - assert i.indexes == [1,0] + assert i.indices == [1,0] - def test_C_viewiterator_step(self): + def test_iterator_step(self): #iteration in C order with #contiguous layout => strides[-1] is 1 #skip less than the shape - start = 0 shape = [3, 5] strides = [5, 1] backstrides = [x * (y - 1) for x,y in zip(strides, shape)] assert backstrides == [10, 4] - i = MultiDimViewIterator(MockArray, start, strides, backstrides, shape) + i = ArrayIterator(MockArray, shape, strides, backstrides) i.next_skip_x(2) i.next_skip_x(2) i.next_skip_x(2) assert i.offset == 6 assert not i.done() - assert i.indexes == [1,1] + assert i.indices == [1,1] #And for some big skips i.next_skip_x(5) assert i.offset == 11 - assert i.indexes == [2,1] + assert i.indices == [2,1] i.next_skip_x(5) # Note: the offset does not overflow but recycles, # this is good for broadcast assert i.offset == 1 - assert i.indexes == [0,1] + assert i.indices == [0,1] assert i.done() #Now what happens if the array is transposed? strides[-1] != 1 @@ -76,18 +75,18 @@ strides = [1, 3] backstrides = [x * (y - 1) for x,y in zip(strides, shape)] assert backstrides == [2, 12] - i = MultiDimViewIterator(MockArray, start, strides, backstrides, shape) + i = ArrayIterator(MockArray, shape, strides, backstrides) i.next_skip_x(2) i.next_skip_x(2) i.next_skip_x(2) assert i.offset == 4 - assert i.indexes == [1,1] + assert i.indices == [1,1] assert not i.done() i.next_skip_x(5) assert i.offset == 5 - assert i.indexes == [2,1] + assert i.indices == [2,1] assert not i.done() i.next_skip_x(5) - assert i.indexes == [0,1] + assert i.indices == [0,1] assert i.offset == 3 assert i.done() From noreply at buildbot.pypy.org Thu Feb 27 17:51:53 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 27 Feb 2014 17:51:53 +0100 (CET) Subject: [pypy-commit] pypy default: kill duplicate code in AxisIterator, add AllButAxisIterator Message-ID: <20140227165153.78D521C03FC@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69521:aab294de242a Date: 2014-02-27 11:29 -0500 http://bitbucket.org/pypy/pypy/changeset/aab294de242a/ Log: kill duplicate code in AxisIterator, add AllButAxisIterator diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py --- a/pypy/module/micronumpy/concrete.py +++ b/pypy/module/micronumpy/concrete.py @@ -283,9 +283,10 @@ self.get_backstrides(), self.get_shape(), shape, backward_broadcast) - return iter.ArrayIterator(self, shape, r[0], r[1]) - return iter.ArrayIterator(self, self.shape, self.strides, - self.backstrides) + return iter.ArrayIterator(self, support.product(shape), shape, + r[0], r[1]) + return iter.ArrayIterator(self, self.get_size(), self.shape, + self.strides, self.backstrides) def create_axis_iter(self, shape, dim, cum): return iter.AxisIterator(self, shape, dim, cum) @@ -293,7 +294,8 @@ def create_dot_iter(self, shape, skip): r = calculate_dot_strides(self.get_strides(), self.get_backstrides(), shape, skip) - return iter.ArrayIterator(self, shape, r[0], r[1]) + return iter.ArrayIterator(self, support.product(shape), shape, + r[0], r[1]) def swapaxes(self, space, orig_arr, axis1, axis2): shape = self.get_shape()[:] diff --git a/pypy/module/micronumpy/iter.py b/pypy/module/micronumpy/iter.py --- a/pypy/module/micronumpy/iter.py +++ b/pypy/module/micronumpy/iter.py @@ -82,10 +82,11 @@ _immutable_fields_ = ['array', 'start', 'size', 'ndim_m1', 'shape_m1', 'strides', 'backstrides'] - def __init__(self, array, shape, strides, backstrides): + def __init__(self, array, size, shape, strides, backstrides): + assert len(shape) == len(strides) == len(backstrides) self.array = array self.start = array.start - self.size = support.product(shape) + self.size = size self.ndim_m1 = len(shape) - 1 self.shape_m1 = [s - 1 for s in shape] self.strides = strides @@ -141,44 +142,25 @@ self.array.setitem(self.offset, elem) -class AxisIterator(ArrayIterator): - def __init__(self, array, shape, dim, cumulative): - self.shape = shape - strides = array.get_strides() - backstrides = array.get_backstrides() - if cumulative: - self.strides = strides - self.backstrides = backstrides - elif len(shape) == len(strides): +def AxisIterator(array, shape, axis, cumulative): + strides = array.get_strides() + backstrides = array.get_backstrides() + if not cumulative: + if len(shape) == len(strides): # keepdims = True - self.strides = strides[:dim] + [0] + strides[dim + 1:] - self.backstrides = backstrides[:dim] + [0] + backstrides[dim + 1:] + strides = strides[:axis] + [0] + strides[axis + 1:] + backstrides = backstrides[:axis] + [0] + backstrides[axis + 1:] else: - self.strides = strides[:dim] + [0] + strides[dim:] - self.backstrides = backstrides[:dim] + [0] + backstrides[dim:] - self.first_line = True - self.indices = [0] * len(shape) - self._done = array.get_size() == 0 - self.offset = array.start - self.dim = dim - self.array = array + strides = strides[:axis] + [0] + strides[axis:] + backstrides = backstrides[:axis] + [0] + backstrides[axis:] + return ArrayIterator(array, support.product(shape), shape, strides, backstrides) - @jit.unroll_safe - def next(self): - for i in range(len(self.shape) - 1, -1, -1): - if self.indices[i] < self.shape[i] - 1: - if i == self.dim: - self.first_line = False - self.indices[i] += 1 - self.offset += self.strides[i] - break - else: - if i == self.dim: - self.first_line = True - self.indices[i] = 0 - self.offset -= self.backstrides[i] - else: - self._done = True - def done(self): - return self._done +def AllButAxisIterator(array, axis): + size = array.get_size() + shape = array.get_shape()[:] + backstrides = array.backstrides[:] + if size: + size /= shape[axis] + shape[axis] = backstrides[axis] = 0 + return ArrayIterator(array, size, shape, array.strides, backstrides) diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -215,16 +215,14 @@ while not out_iter.done(): axis_reduce__driver.jit_merge_point(shapelen=shapelen, func=func, dtype=dtype) - if arr_iter.done(): - w_val = identity + assert not arr_iter.done() + w_val = arr_iter.getitem().convert_to(space, dtype) + if out_iter.indices[axis] == 0: + if identity is not None: + w_val = func(dtype, identity, w_val) else: - w_val = arr_iter.getitem().convert_to(space, dtype) - if out_iter.first_line: - if identity is not None: - w_val = func(dtype, identity, w_val) - else: - cur = temp_iter.getitem() - w_val = func(dtype, cur, w_val) + cur = temp_iter.getitem() + w_val = func(dtype, cur, w_val) out_iter.setitem(w_val) if cumulative: temp_iter.setitem(w_val) diff --git a/pypy/module/micronumpy/sort.py b/pypy/module/micronumpy/sort.py --- a/pypy/module/micronumpy/sort.py +++ b/pypy/module/micronumpy/sort.py @@ -11,7 +11,7 @@ from rpython.rtyper.lltypesystem import rffi, lltype from pypy.module.micronumpy import descriptor, types, constants as NPY from pypy.module.micronumpy.base import W_NDimArray -from pypy.module.micronumpy.iter import AxisIterator +from pypy.module.micronumpy.iter import AllButAxisIterator INT_SIZE = rffi.sizeof(lltype.Signed) @@ -146,21 +146,20 @@ if axis < 0 or axis >= len(shape): raise OperationError(space.w_IndexError, space.wrap( "Wrong axis %d" % axis)) - iterable_shape = shape[:axis] + [0] + shape[axis + 1:] - iter = AxisIterator(arr, iterable_shape, axis, False) + arr_iter = AllButAxisIterator(arr, axis) index_impl = index_arr.implementation - index_iter = AxisIterator(index_impl, iterable_shape, axis, False) + index_iter = AllButAxisIterator(index_impl, axis) stride_size = arr.strides[axis] index_stride_size = index_impl.strides[axis] axis_size = arr.shape[axis] - while not iter.done(): + while not arr_iter.done(): for i in range(axis_size): raw_storage_setitem(storage, i * index_stride_size + index_iter.offset, i) r = Repr(index_stride_size, stride_size, axis_size, - arr.get_storage(), storage, index_iter.offset, iter.offset) + arr.get_storage(), storage, index_iter.offset, arr_iter.offset) ArgSort(r).sort() - iter.next() + arr_iter.next() index_iter.next() return index_arr @@ -292,14 +291,13 @@ if axis < 0 or axis >= len(shape): raise OperationError(space.w_IndexError, space.wrap( "Wrong axis %d" % axis)) - iterable_shape = shape[:axis] + [0] + shape[axis + 1:] - iter = AxisIterator(arr, iterable_shape, axis, False) + arr_iter = AllButAxisIterator(arr, axis) stride_size = arr.strides[axis] axis_size = arr.shape[axis] - while not iter.done(): - r = Repr(stride_size, axis_size, arr.get_storage(), iter.offset) + while not arr_iter.done(): + r = Repr(stride_size, axis_size, arr.get_storage(), arr_iter.offset) ArgSort(r).sort() - iter.next() + arr_iter.next() return sort diff --git a/pypy/module/micronumpy/test/test_iter.py b/pypy/module/micronumpy/test/test_iter.py --- a/pypy/module/micronumpy/test/test_iter.py +++ b/pypy/module/micronumpy/test/test_iter.py @@ -1,8 +1,8 @@ +from pypy.module.micronumpy import support from pypy.module.micronumpy.iter import ArrayIterator class MockArray(object): - size = 1 start = 0 @@ -14,7 +14,8 @@ strides = [5, 1] backstrides = [x * (y - 1) for x,y in zip(strides, shape)] assert backstrides == [10, 4] - i = ArrayIterator(MockArray, shape, strides, backstrides) + i = ArrayIterator(MockArray, support.product(shape), shape, + strides, backstrides) i.next() i.next() i.next() @@ -32,7 +33,8 @@ strides = [1, 3] backstrides = [x * (y - 1) for x,y in zip(strides, shape)] assert backstrides == [2, 12] - i = ArrayIterator(MockArray, shape, strides, backstrides) + i = ArrayIterator(MockArray, support.product(shape), shape, + strides, backstrides) i.next() i.next() i.next() @@ -52,7 +54,8 @@ strides = [5, 1] backstrides = [x * (y - 1) for x,y in zip(strides, shape)] assert backstrides == [10, 4] - i = ArrayIterator(MockArray, shape, strides, backstrides) + i = ArrayIterator(MockArray, support.product(shape), shape, + strides, backstrides) i.next_skip_x(2) i.next_skip_x(2) i.next_skip_x(2) @@ -75,7 +78,8 @@ strides = [1, 3] backstrides = [x * (y - 1) for x,y in zip(strides, shape)] assert backstrides == [2, 12] - i = ArrayIterator(MockArray, shape, strides, backstrides) + i = ArrayIterator(MockArray, support.product(shape), shape, + strides, backstrides) i.next_skip_x(2) i.next_skip_x(2) i.next_skip_x(2) diff --git a/pypy/module/micronumpy/ufuncs.py b/pypy/module/micronumpy/ufuncs.py --- a/pypy/module/micronumpy/ufuncs.py +++ b/pypy/module/micronumpy/ufuncs.py @@ -237,6 +237,10 @@ dtype = out.get_dtype() else: out = W_NDimArray.from_shape(space, shape, dtype, w_instance=obj) + if obj.get_size() == 0: + if self.identity is not None: + out.fill(space, self.identity.convert_to(space, dtype)) + return out return loop.do_axis_reduce(space, shape, self.func, obj, dtype, axis, out, self.identity, cumulative, temp) if cumulative: From noreply at buildbot.pypy.org Thu Feb 27 17:51:54 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 27 Feb 2014 17:51:54 +0100 (CET) Subject: [pypy-commit] pypy default: cleanup Message-ID: <20140227165154.965561C03FC@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69522:3c6f3f1e357c Date: 2014-02-27 07:55 -0500 http://bitbucket.org/pypy/pypy/changeset/3c6f3f1e357c/ Log: cleanup diff --git a/pypy/module/micronumpy/sort.py b/pypy/module/micronumpy/sort.py --- a/pypy/module/micronumpy/sort.py +++ b/pypy/module/micronumpy/sort.py @@ -1,6 +1,3 @@ -""" This is the implementation of various sorting routines in numpy. It's here -because it only makes sense on a concrete array -""" from pypy.interpreter.error import OperationError, oefmt from rpython.rlib.listsort import make_timsort_class from rpython.rlib.objectmodel import specialize @@ -15,6 +12,11 @@ INT_SIZE = rffi.sizeof(lltype.Signed) +all_types = (types.all_float_types + types.all_complex_types + + types.all_int_types) +all_types = [i for i in all_types if not issubclass(i[0], types.Float16)] +all_types = unrolling_iterable(all_types) + def make_argsort_function(space, itemtype, comp_type, count=1): TP = itemtype.T @@ -317,11 +319,6 @@ "sorting of non-numeric types '%s' is not implemented", arr.dtype.get_name()) -all_types = (types.all_float_types + types.all_complex_types + - types.all_int_types) -all_types = [i for i in all_types if not issubclass(i[0], types.Float16)] -all_types = unrolling_iterable(all_types) - class ArgSortCache(object): built = False diff --git a/pypy/module/micronumpy/ufuncs.py b/pypy/module/micronumpy/ufuncs.py --- a/pypy/module/micronumpy/ufuncs.py +++ b/pypy/module/micronumpy/ufuncs.py @@ -210,7 +210,7 @@ if out: dtype = out.get_dtype() temp = W_NDimArray.from_shape(space, temp_shape, dtype, - w_instance=obj) + w_instance=obj) elif keepdims: shape = obj_shape[:axis] + [1] + obj_shape[axis + 1:] else: @@ -236,25 +236,28 @@ ) dtype = out.get_dtype() else: - out = W_NDimArray.from_shape(space, shape, dtype, w_instance=obj) + out = W_NDimArray.from_shape(space, shape, dtype, + w_instance=obj) if obj.get_size() == 0: if self.identity is not None: out.fill(space, self.identity.convert_to(space, dtype)) return out - return loop.do_axis_reduce(space, shape, self.func, obj, dtype, axis, out, - self.identity, cumulative, temp) + return loop.do_axis_reduce(space, shape, self.func, obj, dtype, + axis, out, self.identity, cumulative, + temp) if cumulative: if out: if out.get_shape() != [obj.get_size()]: raise OperationError(space.w_ValueError, space.wrap( "out of incompatible size")) else: - out = W_NDimArray.from_shape(space, [obj.get_size()], dtype, w_instance=obj) + out = W_NDimArray.from_shape(space, [obj.get_size()], dtype, + w_instance=obj) loop.compute_reduce_cumulative(space, obj, out, dtype, self.func, - self.identity) + self.identity) return out if out: - if len(out.get_shape())>0: + if len(out.get_shape()) > 0: raise oefmt(space.w_ValueError, "output parameter for reduction operation %s has " "too many dimensions", self.name) @@ -266,7 +269,8 @@ return out if keepdims: shape = [1] * len(obj_shape) - out = W_NDimArray.from_shape(space, [1] * len(obj_shape), dtype, w_instance=obj) + out = W_NDimArray.from_shape(space, [1] * len(obj_shape), dtype, + w_instance=obj) out.implementation.setitem(0, res) return out return res @@ -278,6 +282,7 @@ raise OperationError(space.w_ValueError, space.wrap( "outer product only supported for binary functions")) + class W_Ufunc1(W_Ufunc): _immutable_fields_ = ["func", "bool_result"] argcount = 1 From noreply at buildbot.pypy.org Thu Feb 27 17:51:55 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 27 Feb 2014 17:51:55 +0100 (CET) Subject: [pypy-commit] pypy default: rename iter to iterators to avoid clash with builtin Message-ID: <20140227165155.B35B21C03FC@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69523:a4d49b0849e1 Date: 2014-02-27 11:32 -0500 http://bitbucket.org/pypy/pypy/changeset/a4d49b0849e1/ Log: rename iter to iterators to avoid clash with builtin diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py --- a/pypy/module/micronumpy/concrete.py +++ b/pypy/module/micronumpy/concrete.py @@ -5,9 +5,10 @@ from rpython.rlib.rawstorage import alloc_raw_storage, free_raw_storage, \ raw_storage_getitem, raw_storage_setitem, RAW_STORAGE from rpython.rtyper.lltypesystem import rffi, lltype -from pypy.module.micronumpy import support, loop, iter +from pypy.module.micronumpy import support, loop from pypy.module.micronumpy.base import convert_to_array, W_NDimArray, \ ArrayArgumentException +from pypy.module.micronumpy.iterators import ArrayIterator, AxisIterator from pypy.module.micronumpy.strides import (Chunk, Chunks, NewAxisChunk, RecordChunk, calc_strides, calc_new_strides, shape_agreement, calculate_broadcast_strides, calculate_dot_strides) @@ -283,19 +284,19 @@ self.get_backstrides(), self.get_shape(), shape, backward_broadcast) - return iter.ArrayIterator(self, support.product(shape), shape, - r[0], r[1]) - return iter.ArrayIterator(self, self.get_size(), self.shape, - self.strides, self.backstrides) + return ArrayIterator(self, support.product(shape), shape, + r[0], r[1]) + return ArrayIterator(self, self.get_size(), self.shape, + self.strides, self.backstrides) def create_axis_iter(self, shape, dim, cum): - return iter.AxisIterator(self, shape, dim, cum) + return AxisIterator(self, shape, dim, cum) def create_dot_iter(self, shape, skip): r = calculate_dot_strides(self.get_strides(), self.get_backstrides(), shape, skip) - return iter.ArrayIterator(self, support.product(shape), shape, - r[0], r[1]) + return ArrayIterator(self, support.product(shape), shape, + r[0], r[1]) def swapaxes(self, space, orig_arr, axis1, axis2): shape = self.get_shape()[:] diff --git a/pypy/module/micronumpy/iter.py b/pypy/module/micronumpy/iterators.py rename from pypy/module/micronumpy/iter.py rename to pypy/module/micronumpy/iterators.py diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -8,7 +8,7 @@ from rpython.rtyper.lltypesystem import lltype, rffi from pypy.module.micronumpy import support, constants as NPY from pypy.module.micronumpy.base import W_NDimArray -from pypy.module.micronumpy.iter import PureShapeIterator +from pypy.module.micronumpy.iterators import PureShapeIterator call2_driver = jit.JitDriver(name='numpy_call2', diff --git a/pypy/module/micronumpy/sort.py b/pypy/module/micronumpy/sort.py --- a/pypy/module/micronumpy/sort.py +++ b/pypy/module/micronumpy/sort.py @@ -8,7 +8,7 @@ from rpython.rtyper.lltypesystem import rffi, lltype from pypy.module.micronumpy import descriptor, types, constants as NPY from pypy.module.micronumpy.base import W_NDimArray -from pypy.module.micronumpy.iter import AllButAxisIterator +from pypy.module.micronumpy.iterators import AllButAxisIterator INT_SIZE = rffi.sizeof(lltype.Signed) diff --git a/pypy/module/micronumpy/test/test_iter.py b/pypy/module/micronumpy/test/test_iterators.py rename from pypy/module/micronumpy/test/test_iter.py rename to pypy/module/micronumpy/test/test_iterators.py --- a/pypy/module/micronumpy/test/test_iter.py +++ b/pypy/module/micronumpy/test/test_iterators.py @@ -1,5 +1,5 @@ from pypy.module.micronumpy import support -from pypy.module.micronumpy.iter import ArrayIterator +from pypy.module.micronumpy.iterators import ArrayIterator class MockArray(object): From noreply at buildbot.pypy.org Thu Feb 27 17:51:56 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Thu, 27 Feb 2014 17:51:56 +0100 (CET) Subject: [pypy-commit] pypy default: just get this field from the array object Message-ID: <20140227165156.C68921C03FC@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69524:8df8a01902f8 Date: 2014-02-27 11:47 -0500 http://bitbucket.org/pypy/pypy/changeset/8df8a01902f8/ Log: just get this field from the array object diff --git a/pypy/module/micronumpy/iterators.py b/pypy/module/micronumpy/iterators.py --- a/pypy/module/micronumpy/iterators.py +++ b/pypy/module/micronumpy/iterators.py @@ -79,13 +79,12 @@ class ArrayIterator(object): - _immutable_fields_ = ['array', 'start', 'size', 'ndim_m1', 'shape_m1', + _immutable_fields_ = ['array', 'size', 'ndim_m1', 'shape_m1', 'strides', 'backstrides'] def __init__(self, array, size, shape, strides, backstrides): assert len(shape) == len(strides) == len(backstrides) self.array = array - self.start = array.start self.size = size self.ndim_m1 = len(shape) - 1 self.shape_m1 = [s - 1 for s in shape] @@ -96,7 +95,7 @@ def reset(self): self.index = 0 self.indices = [0] * (self.ndim_m1 + 1) - self.offset = self.start + self.offset = self.array.start @jit.unroll_safe def next(self): From noreply at buildbot.pypy.org Thu Feb 27 18:36:38 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 27 Feb 2014 18:36:38 +0100 (CET) Subject: [pypy-commit] stmgc default: detail Message-ID: <20140227173638.DE4111C03D5@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r895:fe22684ff934 Date: 2014-02-27 17:56 +0100 http://bitbucket.org/pypy/stmgc/changeset/fe22684ff934/ Log: detail diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -71,12 +71,12 @@ /* thread-safe: use the lock of pages.c to prevent any remapping from occurring under our feet */ mutex_pages_lock(); + increment_total_allocated(size + LARGE_MALLOC_OVERHEAD); /* Allocate the object with largemalloc.c from the lower addresses. */ char *addr = _stm_large_malloc(size); if (addr == NULL) stm_fatalerror("not enough memory!\n"); - increment_total_allocated(size + LARGE_MALLOC_OVERHEAD); if (addr + size > uninitialized_page_start) { uintptr_t npages; From noreply at buildbot.pypy.org Thu Feb 27 18:36:41 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 27 Feb 2014 18:36:41 +0100 (CET) Subject: [pypy-commit] stmgc default: become_inevitable(): was missing resetting jmpbuf_ptr to NULL Message-ID: <20140227173641.222C21C03D5@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r897:40ba082e00e8 Date: 2014-02-27 18:36 +0100 http://bitbucket.org/pypy/stmgc/changeset/40ba082e00e8/ Log: become_inevitable(): was missing resetting jmpbuf_ptr to NULL diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -505,7 +505,7 @@ __builtin_longjmp(*jmpbuf_ptr, 1); } -void _stm_become_inevitable(char *msg) +void _stm_become_inevitable(const char *msg) { mutex_lock(); switch (STM_PSEGMENT->transaction_state) { @@ -517,6 +517,7 @@ /* become inevitable */ wait_for_end_of_inevitable_transaction(true); STM_PSEGMENT->transaction_state = TS_INEVITABLE; + STM_SEGMENT->jmpbuf_ptr = NULL; break; case TS_MUST_ABORT: @@ -525,5 +526,6 @@ default: assert(!"invalid transaction_state in become_inevitable"); } + dprintf(("become_inevitable: %s\n", msg)); mutex_unlock(); } diff --git a/c7/stmgc.h b/c7/stmgc.h --- a/c7/stmgc.h +++ b/c7/stmgc.h @@ -64,7 +64,7 @@ void _stm_write_slowpath(object_t *); object_t *_stm_allocate_slowpath(ssize_t); object_t *_stm_allocate_external(ssize_t); -void _stm_become_inevitable(char*); +void _stm_become_inevitable(const char*); void _stm_start_transaction(stm_thread_local_t *, stm_jmpbuf_t *); void _stm_collectable_safe_point(void); @@ -236,7 +236,7 @@ /* Turn the current transaction inevitable. The 'jmpbuf' passed to STM_START_TRANSACTION() is not going to be used any more after this call (but the stm_become_inevitable() itself may still abort). */ -static inline void stm_become_inevitable(char* msg) { +static inline void stm_become_inevitable(const char* msg) { if (STM_SEGMENT->jmpbuf_ptr != NULL) _stm_become_inevitable(msg); } From noreply at buildbot.pypy.org Thu Feb 27 18:36:40 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 27 Feb 2014 18:36:40 +0100 (CET) Subject: [pypy-commit] stmgc default: Remove old files Message-ID: <20140227173640.1CD7B1C03D5@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r896:27c17a35ae32 Date: 2014-02-27 17:57 +0100 http://bitbucket.org/pypy/stmgc/changeset/27c17a35ae32/ Log: Remove old files diff --git a/c7/core.c b/c7/core.c deleted file mode 100644 --- a/c7/core.c +++ /dev/null @@ -1,721 +0,0 @@ -#define _GNU_SOURCE -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "core.h" -#include "list.h" -#include "pagecopy.h" - - -/* number of pages per thread: */ -#define NB_PAGES (256*256) // 256MB - -#define NB_THREADS 2 -#define MAP_PAGES_FLAGS (MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE) -#define LARGE_OBJECT_WORDS 36 - -#if defined(__i386__) || defined(__x86_64__) -# define HAVE_FULL_EXCHANGE_INSN -#endif - - -typedef TLPREFIX char localchar_t; -typedef TLPREFIX struct alloc_for_size_s alloc_for_size_t; -typedef TLPREFIX struct _thread_local2_s _thread_local2_t; - - -struct alloc_for_size_s { - localchar_t *next; - uint16_t start, stop; - bool flag_partial_page; -}; - -struct _thread_local2_s { - struct _thread_local1_s _tl1; - int thread_num; - char *thread_base; - struct stm_list_s *modified_objects; - struct stm_list_s *new_object_ranges; - struct alloc_for_size_s alloc[LARGE_OBJECT_WORDS]; -}; -#define _STM_TL2 ((_thread_local2_t *)_STM_TL1) - -/* Logical page number (offset) must be offset by thread_num*NB_PAGES to get - the real page number */ -enum { - /* shared read-only page, this (logical) page is shared between threads */ - SHARED_PAGE=0, - /* this page is private for all (2) threads */ - REMAPPING_PAGE, - /* page is already private for all (2) threads */ - PRIVATE_PAGE -}; /* flag_page_private */ - - -/* all pages for all threads: */ -static char *object_pages; -/* pages for the undo-log that contains copies for objs modified by the leader */ -static char *undo_log_pages; -static char *undo_log_current; - -static int num_threads_started; -/* the thread which may be the current leader (als check for global_history!=0) */ -static int leader_thread_num; -/* next free page to allocate objs from */ -static uintptr_t index_page_never_used; -/* the next global write version. incremented by transaction starts, set - to 0 by collections */ -static int next_write_version; -/* protects the undo log */ -static int undo_lock; -/* list of objs modified by the leader */ -static struct stm_list_s *global_history; -/* approximate range to check if an obj needs to be added to the undo_log - because it may be in the global_history */ -static uint16_t gh_write_version_first; -static uint16_t gh_write_version_last; -/* stores the state of a page (xxx_PAGE constants above) */ -static uint8_t flag_page_private[NB_PAGES]; - - -/************************************************************/ - -static void spin_loop(void) -{ - asm("pause" : : : "memory"); -} - -static void acquire_lock(int *lock) -{ - while (__sync_lock_test_and_set(lock, 1) != 0) { - while (*lock != 0) - spin_loop(); - } -} - -#define ACQUIRE_LOCK_IF(lock, condition) \ -({ \ - bool _acquired = false; \ - while (condition) { \ - if (__sync_lock_test_and_set(lock, 1) == 0) { \ - if (condition) \ - _acquired = true; \ - else \ - __sync_lock_release(lock); \ - break; \ - } \ - spin_loop(); \ - } \ - _acquired; \ -}) - - -static void release_lock(int *lock) -{ - __sync_lock_release(lock); -} - -static void write_fence(void) -{ -#if defined(__amd64__) || defined(__i386__) - asm("" : : : "memory"); -#else -# error "Define write_fence() for your architecture" -#endif -} - -/* check if obj was read in current transaction */ -static bool _stm_was_read(object_t *obj) -{ - read_marker_t *marker = (read_marker_t *)(((uintptr_t)obj) >> 4); - return (marker->rm == _STM_TL1->transaction_read_version); -} - - -/* 2-thread version to privatize a page. A (logical) page is either shared - by the 2 threads, or private for both. Needs more logic (e.g. ref-count) - for more threads. */ -static void _stm_privatize(uintptr_t pagenum) -{ - /* pagenum is a logical pagenum < NB_PAGES */ - - if (flag_page_private[pagenum] == PRIVATE_PAGE) - return; - -#ifdef HAVE_FULL_EXCHANGE_INSN - /* use __sync_lock_test_and_set() as a cheaper alternative to - __sync_bool_compare_and_swap(). */ - int previous = __sync_lock_test_and_set(&flag_page_private[pagenum], - REMAPPING_PAGE); - if (previous == PRIVATE_PAGE) { - flag_page_private[pagenum] = PRIVATE_PAGE; - return; - } - bool was_shared = (previous == SHARED_PAGE); -#else - bool was_shared = __sync_bool_compare_and_swap(&flag_page_private[pagenum], - SHARED_PAGE, REMAPPING_PAGE); -#endif - if (!was_shared) { - while (flag_page_private[pagenum] == REMAPPING_PAGE) - spin_loop(); - return; - } - - /* 2 threads for now: thread_num = 0 or 1 */ - ssize_t pgoff1 = pagenum; - ssize_t pgoff2 = pagenum + NB_PAGES; - ssize_t localpgoff = pgoff1 + NB_PAGES * _STM_TL2->thread_num; - ssize_t otherpgoff = pgoff1 + NB_PAGES * (1 - _STM_TL2->thread_num); - - void *localpg = object_pages + localpgoff * 4096UL; - void *otherpg = object_pages + otherpgoff * 4096UL; - - int res = remap_file_pages(localpg, 4096, 0, pgoff2, 0); - if (res < 0) { - perror("remap_file_pages"); - abort(); - } - pagecopy(localpg, otherpg); - write_fence(); - assert(flag_page_private[pagenum] == REMAPPING_PAGE); - flag_page_private[pagenum] = PRIVATE_PAGE; -} - - -#define REAL_ADDRESS(object_pages, src) ((object_pages) + (uintptr_t)(src)) - -static char *real_address(uintptr_t src) -{ - return REAL_ADDRESS(_STM_TL2->thread_base, src); -} - -static char *get_thread_base(long thread_num) -{ - return object_pages + thread_num * (NB_PAGES * 4096UL); -} - -void stm_abort_transaction(void); - -enum detect_conflicts_e { CANNOT_CONFLICT, CAN_CONFLICT }; - -/* copy current versions of objs from the leader's object space */ -static void update_to_current_version(enum detect_conflicts_e check_conflict) -{ - /* XXX this can be done by acquiring the undo_lock for much less time, - but it needs to be carefully synchronized with _stm_write_slowpath(). - For now it must be called with the undo_lock acquired. */ - - /* Loop over objects in 'global_history': if they have been - read by the current transaction, the current transaction must - abort; then copy them out of the leader's object space --- - which may have been modified by the leader's uncommitted - transaction; this case will be fixed afterwards. - */ - bool conflict_found_or_dont_check = (check_conflict == CANNOT_CONFLICT); - char *local_base = _STM_TL2->thread_base; - char *remote_base = get_thread_base(1 - _STM_TL2->thread_num); - struct stm_list_s *gh, *gh_next; - - assert(leader_thread_num != _STM_TL2->thread_num); - - for (gh = global_history; gh != NULL; gh = gh_next) { - - STM_LIST_FOREACH(gh, ({ - - if (!conflict_found_or_dont_check) - conflict_found_or_dont_check = _stm_was_read(item); - - char *dst = REAL_ADDRESS(local_base, item); - char *src = REAL_ADDRESS(remote_base, item); - char *src_rebased = src - (uintptr_t)local_base; - size_t size = stm_object_size_rounded_up((object_t *)src_rebased); - - memcpy(dst + sizeof(char *), - src + sizeof(char *), - size - sizeof(char *)); - })); - - gh_next = gh->nextlist; - stm_list_free(gh); - } - global_history = NULL; - gh_write_version_first = 0xffff; - gh_write_version_last = 0; - - /* Finally, loop over objects modified by the leader, - and copy them out of the undo log. - */ - char *undo = undo_log_pages; - char *undo_end = undo_log_current; - - while (undo < undo_end) { - - char *src = undo; - char *dst = *(char **)src; - char *src_rebased = src - (uintptr_t)local_base; - - *(char **)src = *(char **)dst; /* fix the first word of the object in - the undo log, for stm_object_size() */ - size_t size = stm_object_size_rounded_up((object_t *)src_rebased); - - memcpy(dst + sizeof(char *), - src + sizeof(char *), - size - sizeof(char *)); - - undo += size; - } - undo_log_current = undo_log_pages; /* make empty again */ - - if (conflict_found_or_dont_check && check_conflict == CAN_CONFLICT) { - release_lock(&undo_lock); - stm_abort_transaction(); - } -} - - -/* if we are not leader and there is a global_history, we check - for conflicts and update our pages */ -static void maybe_update(enum detect_conflicts_e check_conflict) -{ - if (leader_thread_num != _STM_TL2->thread_num && global_history != NULL) { - acquire_lock(&undo_lock); - update_to_current_version(check_conflict); - release_lock(&undo_lock); - } -} - - -void _stm_write_slowpath(object_t *obj) -{ - maybe_update(CAN_CONFLICT); - - _stm_privatize(((uintptr_t)obj) / 4096); - - stm_read(obj); - - _STM_TL2->modified_objects = stm_list_append( - _STM_TL2->modified_objects, obj); - - uint16_t wv = obj->write_version; - obj->write_version = _STM_TL1->transaction_write_version; - - /* We only need to store a copy of the current version of the object if: - - we are the leader; - - the object is present in the global_history. - The second condition is approximated by the following range check. - Storing a few more objects than strictly needed is not really a problem. - */ - /* XXX this can be done without acquiring the undo_lock at all, - but we need more care in update_to_current_version(). */ - - /* XXX can we avoid writing an unbounded number of copies of the - same object in case we run a lot of transactions while the other - thread is busy? Unlikely case but in theory annoying. Should - we anyway bound the undo log's size to much less than NB_PAGES, - and if full here, sleep? Should the bound also count the size - taken by the global_history lists? */ - if (ACQUIRE_LOCK_IF(&undo_lock, - wv <= gh_write_version_last && wv >= gh_write_version_first - && leader_thread_num == _STM_TL2->thread_num)) { - /* record in the undo log a copy of the content of the object */ - size_t size = stm_object_size_rounded_up(obj); - char *source = real_address((uintptr_t)obj); - char *undo = undo_log_current; - *((object_t **)undo) = obj; - memcpy(undo + sizeof(object_t *), - source + sizeof(object_t *), - size - sizeof(object_t *)); - /*write_fence();*/ - undo_log_current = undo + size; - release_lock(&undo_lock); - } -} - - -uintptr_t _stm_reserve_page(void) -{ - /* Grab a free page, initially shared between the threads. */ - - // XXX look in some free list first - - /* Return the index'th object page, which is so far never used. */ - uintptr_t index = __sync_fetch_and_add(&index_page_never_used, 1); - if (index >= NB_PAGES) { - fprintf(stderr, "Out of mmap'ed memory!\n"); - abort(); - } - return index; -} - -#define TO_RANGE(range, start, stop) \ - ((range) = (object_t *)((start) | (((uintptr_t)(stop)) << 16))) - -#define FROM_RANGE(start, stop, range) \ - ((start) = (uint16_t)(uintptr_t)(range), \ - (stop) = ((uintptr_t)(range)) >> 16) - -localchar_t *_stm_alloc_next_page(size_t i) -{ - /* 'alloc->next' points to where the next allocation should go. The - present function is called instead when this next allocation is - equal to 'alloc->stop'. As we know that 'start', 'next' and - 'stop' are always nearby pointers, we play tricks and only store - the lower 16 bits of 'start' and 'stop', so that the three - variables plus some flags fit in 16 bytes. - - 'flag_partial_page' is *cleared* to mean that the 'alloc' - describes a complete page, so that it needs not be listed inside - 'new_object_ranges'. In all other cases it is *set*. - */ - uintptr_t page; - localchar_t *result; - alloc_for_size_t *alloc = &_STM_TL2->alloc[i]; - size_t size = i * 8; - - if (alloc->flag_partial_page) { - /* record this range in 'new_object_ranges' */ - localchar_t *ptr1 = alloc->next - size - 1; - object_t *range; - TO_RANGE(range, alloc->start, alloc->stop); - page = ((uintptr_t)ptr1) / 4096; - _STM_TL2->new_object_ranges = stm_list_append( - _STM_TL2->new_object_ranges, (object_t *)page); - _STM_TL2->new_object_ranges = stm_list_append( - _STM_TL2->new_object_ranges, range); - } - - /* reserve a fresh new page */ - page = _stm_reserve_page(); - - result = (localchar_t *)(page * 4096UL); - alloc->start = (uintptr_t)result; - alloc->stop = alloc->start + (4096 / size) * size; - alloc->next = result + size; - alloc->flag_partial_page = false; - return result; -} - -object_t *stm_allocate(size_t size) -{ - assert(size % 8 == 0); - size_t i = size / 8; - assert(2 <= i && i < LARGE_OBJECT_WORDS);//XXX - alloc_for_size_t *alloc = &_STM_TL2->alloc[i]; - - localchar_t *p = alloc->next; - alloc->next = p + size; - if ((uint16_t)(uintptr_t)p == alloc->stop) - p = _stm_alloc_next_page(i); - - object_t *result = (object_t *)p; - result->write_version = _STM_TL1->transaction_write_version; - return result; -} - - -#define TOTAL_MEMORY (NB_PAGES * 4096UL * (NB_THREADS + 1)) -#define READMARKER_END ((NB_PAGES * 4096UL) >> 4) -#define FIRST_OBJECT_PAGE ((READMARKER_END + 4095) / 4096UL) -#define READMARKER_START ((FIRST_OBJECT_PAGE * 4096UL) >> 4) -#define FIRST_READMARKER_PAGE (READMARKER_START / 4096UL) - -void stm_setup(void) -{ - /* Check that some values are acceptable */ - assert(4096 <= ((uintptr_t)_STM_TL1)); - assert(((uintptr_t)_STM_TL1) == ((uintptr_t)_STM_TL2)); - assert(((uintptr_t)_STM_TL2) + sizeof(*_STM_TL2) <= 8192); - assert(2 <= FIRST_READMARKER_PAGE); - assert(FIRST_READMARKER_PAGE * 4096UL <= READMARKER_START); - assert(READMARKER_START < READMARKER_END); - assert(READMARKER_END <= 4096UL * FIRST_OBJECT_PAGE); - assert(FIRST_OBJECT_PAGE < NB_PAGES); - - object_pages = mmap(NULL, TOTAL_MEMORY, - PROT_READ | PROT_WRITE, - MAP_PAGES_FLAGS, -1, 0); - if (object_pages == MAP_FAILED) { - perror("object_pages mmap"); - abort(); - } - - long i; - for (i = 0; i < NB_THREADS; i++) { - char *thread_base = get_thread_base(i); - - /* In each thread's section, the first page is where TLPREFIX'ed - NULL accesses land. We mprotect it so that accesses fail. */ - mprotect(thread_base, 4096, PROT_NONE); - - /* Fill the TLS page (page 1) with 0xDD */ - memset(REAL_ADDRESS(thread_base, 4096), 0xDD, 4096); - /* Make a "hole" at _STM_TL1 / _STM_TL2 */ - memset(REAL_ADDRESS(thread_base, _STM_TL2), 0, sizeof(*_STM_TL2)); - - /* Pages in range(2, FIRST_READMARKER_PAGE) are never used */ - if (FIRST_READMARKER_PAGE > 2) - mprotect(thread_base + 8192, (FIRST_READMARKER_PAGE - 2) * 4096UL, - PROT_NONE); - - _STM_TL2->thread_num = i; - _STM_TL2->thread_base = thread_base; - - if (i > 0) { - int res; - res = remap_file_pages(thread_base + FIRST_OBJECT_PAGE * 4096UL, - (NB_PAGES - FIRST_OBJECT_PAGE) * 4096UL, - 0, FIRST_OBJECT_PAGE, 0); - if (res != 0) { - perror("remap_file_pages"); - abort(); - } - } - } - - undo_log_pages = get_thread_base(NB_THREADS); - mprotect(undo_log_pages, 4096, PROT_NONE); - mprotect(undo_log_pages + (NB_PAGES - 1) * 4096UL, 4096, PROT_NONE); - undo_log_pages += 4096; - undo_log_current = undo_log_pages; - - num_threads_started = 0; - index_page_never_used = FIRST_OBJECT_PAGE; - next_write_version = 1; - leader_thread_num = 0; - global_history = NULL; - gh_write_version_first = 0xffff; - gh_write_version_last = 0; -} - -#define INVALID_GS_VALUE 0xDDDDDDDDDDDDDDDDUL - -static void set_gs_register(uint64_t value) -{ - int result = syscall(SYS_arch_prctl, ARCH_SET_GS, value); - assert(result == 0); -} - -void stm_setup_thread(void) -{ - int thread_num = __sync_fetch_and_add(&num_threads_started, 1); - assert(thread_num < 2); /* only 2 threads for now */ - - char *thread_base = get_thread_base(thread_num); - set_gs_register((uintptr_t)thread_base); - - assert(_STM_TL2->thread_num == thread_num); - assert(_STM_TL2->thread_base == thread_base); - - _STM_TL2->modified_objects = stm_list_create(); -} - -void _stm_teardown_thread(void) -{ - stm_list_free(_STM_TL2->modified_objects); - _STM_TL2->modified_objects = NULL; - - set_gs_register(INVALID_GS_VALUE); -} - -void _stm_teardown(void) -{ - munmap(object_pages, TOTAL_MEMORY); - object_pages = NULL; - undo_log_pages = NULL; - undo_log_current = NULL; -} - - -static void reset_transaction_read_version(void) -{ - /* force-reset all read markers to 0 */ - - /* XXX measure the time taken by this madvise() and the following - zeroing of pages done lazily by the kernel; compare it with using - 16-bit read_versions. - */ - /* XXX try to use madvise() on smaller ranges of memory. In my - measures, we could gain a factor 2 --- not really more, even if - the range of virtual addresses below is very large, as long as it - is already mostly non-reserved pages. (The following call keeps - them non-reserved; apparently the kernel just skips them very - quickly.) - */ - int res = madvise(real_address(FIRST_READMARKER_PAGE * 4096UL), - (FIRST_OBJECT_PAGE - FIRST_READMARKER_PAGE) * 4096UL, - MADV_DONTNEED); - if (res < 0) { - perror("madvise"); - abort(); - } - _STM_TL1->transaction_read_version = 0; -} - -void stm_major_collection(void) -{ - abort(); -} - -void stm_start_transaction(jmp_buf *jmpbufptr) -{ - if (_STM_TL1->transaction_read_version == 0xff) - reset_transaction_read_version(); - _STM_TL1->transaction_read_version++; - _STM_TL1->jmpbufptr = NULL; - - while (1) { - int wv = __sync_fetch_and_add(&next_write_version, 1); - if (LIKELY(wv <= 0xffff)) { - _STM_TL1->transaction_write_version = wv; - break; - } - /* We run out of 16-bit numbers before we do the next major - collection, which resets it. XXX This case seems unlikely - for now, but check if it could become a bottleneck at some - point. */ - stm_major_collection(); - } - assert(stm_list_is_empty(_STM_TL2->modified_objects)); - assert(stm_list_is_empty(_STM_TL2->new_object_ranges)); - - maybe_update(CANNOT_CONFLICT); /* no read object: cannot conflict */ - - _STM_TL1->jmpbufptr = jmpbufptr; -} - -static void update_new_objects_in_other_threads(uintptr_t pagenum, - uint16_t start, uint16_t stop) -{ - size_t size = (uint16_t)(stop - start); - assert(size <= 4096 - (start & 4095)); - assert((start & ~4095) == (uint16_t)(pagenum * 4096)); - - int thread_num = _STM_TL2->thread_num; - uintptr_t local_src = (pagenum * 4096UL) + (start & 4095); - char *dst = REAL_ADDRESS(get_thread_base(1 - thread_num), local_src); - char *src = REAL_ADDRESS(_STM_TL2->thread_base, local_src); - - memcpy(dst, src, size); -} - -void stm_stop_transaction(void) -{ - write_fence(); /* see later in this function for why */ - - acquire_lock(&undo_lock); - - if (leader_thread_num != _STM_TL2->thread_num) { - /* non-leader thread */ - if (global_history != NULL) { - update_to_current_version(CAN_CONFLICT); - assert(global_history == NULL); - } - - /* steal leadership now */ - leader_thread_num = _STM_TL2->thread_num; - } - - /* now we are the leader thread. the leader can always commit */ - _STM_TL1->jmpbufptr = NULL; /* cannot abort any more */ - undo_log_current = undo_log_pages; /* throw away the content */ - - /* add these objects to the global_history */ - _STM_TL2->modified_objects->nextlist = global_history; - global_history = _STM_TL2->modified_objects; - _STM_TL2->modified_objects = stm_list_create(); - - uint16_t wv = _STM_TL1->transaction_write_version; - if (wv < gh_write_version_last) gh_write_version_last = wv; - if (wv > gh_write_version_first) gh_write_version_first = wv; - - /* walk the new_object_ranges and manually copy the new objects - to the other thread's pages in the (hopefully rare) case that - the page they belong to is already unshared */ - long i; - struct stm_list_s *lst = _STM_TL2->new_object_ranges; - for (i = stm_list_count(lst); i > 0; ) { - i -= 2; - uintptr_t pagenum = (uintptr_t)stm_list_item(lst, i); - - /* NB. the read next line should work even against a parallel - thread, thanks to the lock acquisition we do earlier (see the - beginning of this function). Indeed, if this read returns - SHARED_PAGE, then we know that the real value in memory was - actually SHARED_PAGE at least at the time of the - acquire_lock(). It may have been modified afterwards by a - compare_and_swap() in the other thread, but then we know for - sure that the other thread is seeing the last, up-to-date - version of our data --- this is the reason of the - write_fence() just before the acquire_lock(). - */ - if (flag_page_private[pagenum] != SHARED_PAGE) { - object_t *range = stm_list_item(lst, i + 1); - uint16_t start, stop; - FROM_RANGE(start, stop, range); - update_new_objects_in_other_threads(pagenum, start, stop); - } - } - - /* do the same for the partially-allocated pages */ - long j; - for (j = 2; j < LARGE_OBJECT_WORDS; j++) { - alloc_for_size_t *alloc = &_STM_TL2->alloc[j]; - uint16_t start = alloc->start; - uint16_t cur = (uintptr_t)alloc->next; - - if (start == cur) { - /* nothing to do: this page (or fraction thereof) was left - empty by the previous transaction, and starts empty as - well in the new transaction. 'flag_partial_page' is - unchanged. */ - } - else { - uintptr_t pagenum = ((uintptr_t)(alloc->next - 1)) / 4096UL; - /* for the new transaction, it will start here: */ - alloc->start = cur; - - if (alloc->flag_partial_page) { - if (flag_page_private[pagenum] != SHARED_PAGE) { - update_new_objects_in_other_threads(pagenum, start, cur); - } - } - else { - /* we can skip checking flag_page_private[] in non-debug - builds, because the whole page can only contain - objects made by the just-finished transaction. */ - assert(flag_page_private[pagenum] == SHARED_PAGE); - - /* the next transaction will start with this page - containing objects that are now committed, so - we need to set this flag now */ - alloc->flag_partial_page = true; - } - } - } - - release_lock(&undo_lock); -} - -void stm_abort_transaction(void) -{ - long j; - for (j = 2; j < LARGE_OBJECT_WORDS; j++) { - alloc_for_size_t *alloc = &_STM_TL2->alloc[j]; - uint16_t num_allocated = ((uintptr_t)alloc->next) - alloc->start; - alloc->next -= num_allocated; - } - stm_list_clear(_STM_TL2->new_object_ranges); - stm_list_clear(_STM_TL2->modified_objects); - assert(_STM_TL1->jmpbufptr != NULL); - assert(_STM_TL1->jmpbufptr != (jmp_buf *)-1); /* for tests only */ - longjmp(*_STM_TL1->jmpbufptr, 1); -} - diff --git a/c7/core.h b/c7/core.h deleted file mode 100644 --- a/c7/core.h +++ /dev/null @@ -1,78 +0,0 @@ -#ifndef _STM_CORE_H -#define _STM_CORE_H - -#include -#include -#include - - -#define TLPREFIX __attribute__((address_space(256))) - -typedef TLPREFIX struct _thread_local1_s _thread_local1_t; -typedef TLPREFIX struct object_s object_t; -typedef TLPREFIX struct read_marker_s read_marker_t; - - -/* Structure of objects - -------------------- - - Objects manipulated by the user program, and managed by this library, - must start with a "struct object_s" field. Pointers to any user object - must use the "TLPREFIX struct foo *" type --- don't forget TLPREFIX. - The best is to use typedefs like above. - - The object_s part contains some fields reserved for the STM library, - as well as a 32-bit integer field that can be freely used by the user - program. However, right now this field must be read-only --- i.e. it - must never be modified on any object that may already belong to a - past transaction; you can only set it on just-allocated objects. The - best is to consider it as a field that is written to only once on - newly allocated objects. -*/ - -struct object_s { - uint16_t write_version; /* reserved for the STM library */ - /*uint8_t stm_flags;*/ - uint32_t header; /* for the user program -- only write in - newly allocated objects */ -}; - -struct read_marker_s { - uint8_t rm; -}; - -struct _thread_local1_s { - jmp_buf *jmpbufptr; - uint8_t transaction_read_version; - uint16_t transaction_write_version; -}; -#define _STM_TL1 ((_thread_local1_t *)4352) - - -/* this should use llvm's coldcc calling convention, - but it's not exposed to C code so far */ -void _stm_write_slowpath(object_t *); - -#define LIKELY(x) __builtin_expect(x, true) -#define UNLIKELY(x) __builtin_expect(x, false) - -/* invisible read, simply add to read-set */ -static inline void stm_read(object_t *obj) -{ - ((read_marker_t *)(((uintptr_t)obj) >> 4))->rm = - _STM_TL1->transaction_read_version; -} - -/* open object for writing, eagerly detects write-write conflicts */ -static inline void stm_write(object_t *obj) -{ - if (UNLIKELY(obj->write_version != _STM_TL1->transaction_write_version)) - _stm_write_slowpath(obj); -} - - -/* must be provided by the user of this library */ -extern size_t stm_object_size_rounded_up(object_t *); - - -#endif From noreply at buildbot.pypy.org Thu Feb 27 19:45:37 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 27 Feb 2014 19:45:37 +0100 (CET) Subject: [pypy-commit] stmgc default: Start laying out the logic invoking major collections at the right time. Message-ID: <20140227184537.B80D91C03D5@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r898:8a0cf5b15157 Date: 2014-02-27 19:45 +0100 http://bitbucket.org/pypy/stmgc/changeset/8a0cf5b15157/ Log: Start laying out the logic invoking major collections at the right time. diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -370,6 +370,10 @@ assert(STM_PSEGMENT->transaction_state != TS_MUST_ABORT); STM_SEGMENT->jmpbuf_ptr = NULL; + /* if a major collection is required, do it here */ + if (is_major_collection_requested()) + major_collection_now_at_safe_point(); + /* synchronize overflow objects living in privatized pages */ push_overflow_objects_from_privatized_pages(); diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -105,3 +105,42 @@ o->stm_flags = STM_FLAGS_PREBUILT; return o; } + + +/************************************************************/ + + +static void major_collection(bool forced) +{ + assert(!_has_mutex()); + if (!forced && !is_major_collection_requested()) + return; + + mutex_lock(); + + assert(STM_PSEGMENT->safe_point == SP_RUNNING); + STM_PSEGMENT->safe_point = SP_SAFE_POINT; + + while (forced || is_major_collection_requested()) { + /* wait until the other thread is at a safe-point */ + if (try_wait_for_other_safe_points()) { + /* ok */ + major_collection_now_at_safe_point(); + break; + } + } + + assert(STM_PSEGMENT->safe_point == SP_SAFE_POINT); + STM_PSEGMENT->safe_point = SP_RUNNING; + + mutex_unlock(); +} + +static void major_collection_now_at_safe_point(void) +{ + assert(_has_mutex()); + + fprintf(stderr, "hi, I should be doing a major GC here\n"); + + reset_major_collection_requested(); +} diff --git a/c7/stm/gcpage.h b/c7/stm/gcpage.h --- a/c7/stm/gcpage.h +++ b/c7/stm/gcpage.h @@ -9,6 +9,11 @@ #define GC_N_SMALL_REQUESTS 36 +/* More parameters fished directly from PyPy's default GC + XXX document me */ +#define GC_MIN (NB_NURSERY_PAGES * 4096 * 8) +#define GC_MAJOR_COLLECT 1.82 + static char *uninitialized_page_start; /* within segment 0 */ static char *uninitialized_page_stop; @@ -28,6 +33,9 @@ static void teardown_gcpage(void); static char *allocate_outside_nursery_large(uint64_t size); +static void major_collection(bool forced); +static void major_collection_now_at_safe_point(void); + static char *_allocate_small_slowpath(uint64_t size); diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -282,8 +282,8 @@ void stm_collect(long level) { - assert(level == 0); minor_collection(/*commit=*/ false); + major_collection(/*forced=*/ level > 0); } @@ -309,13 +309,19 @@ return (object_t *)p; } - minor_collection(/*commit=*/ false); + stm_collect(0); goto restart; } object_t *_stm_allocate_external(ssize_t size_rounded_up) { - /* XXX force a minor/major collection if needed */ + /* first, force a collection if needed */ + if (is_major_collection_requested()) { + /* use stm_collect() with level 0: if another thread does a major GC + in-between, is_major_collection_requested() will become false + again, and we'll avoid doing yet another one afterwards. */ + stm_collect(0); + } char *result = allocate_outside_nursery_large(size_rounded_up); object_t *o = (object_t *)(result - stm_object_pages); diff --git a/c7/stm/pages.c b/c7/stm/pages.c --- a/c7/stm/pages.c +++ b/c7/stm/pages.c @@ -8,12 +8,20 @@ static union { struct { uint8_t mutex_pages; + bool major_collection_requested; uint64_t total_allocated; /* keep track of how much memory we're using, ignoring nurseries */ + uint64_t total_allocated_bound; }; char reserved[64]; } pages_ctl __attribute__((aligned(64))); + +static void setup_pages(void) +{ + pages_ctl.total_allocated_bound = GC_MIN; +} + static void teardown_pages(void) { memset(&pages_ctl, 0, sizeof(pages_ctl)); @@ -41,9 +49,31 @@ { assert(_has_mutex_pages()); pages_ctl.total_allocated += add_or_remove; + + if (pages_ctl.total_allocated >= pages_ctl.total_allocated_bound) + pages_ctl.major_collection_requested = true; + return pages_ctl.total_allocated; } +static bool is_major_collection_requested(void) +{ + return pages_ctl.major_collection_requested; +} + +static void reset_major_collection_requested(void) +{ + assert(_has_mutex()); + + uint64_t next_bound = (uint64_t)((double)pages_ctl.total_allocated * + GC_MAJOR_COLLECT); + if (next_bound < GC_MIN) + next_bound = GC_MIN; + + pages_ctl.total_allocated_bound = next_bound; + pages_ctl.major_collection_requested = false; +} + /************************************************************/ diff --git a/c7/stm/pages.h b/c7/stm/pages.h --- a/c7/stm/pages.h +++ b/c7/stm/pages.h @@ -24,6 +24,8 @@ static void mutex_pages_lock(void); static void mutex_pages_unlock(void); static uint64_t increment_total_allocated(ssize_t add_or_remove); +static bool is_major_collection_requested(void); +static void reset_major_collection_requested(void); inline static void pages_privatize(uintptr_t pagenum, uintptr_t count, bool full) { diff --git a/c7/stm/setup.c b/c7/stm/setup.c --- a/c7/stm/setup.c +++ b/c7/stm/setup.c @@ -77,6 +77,7 @@ setup_sync(); setup_nursery(); setup_gcpage(); + setup_pages(); } void stm_teardown(void) diff --git a/c7/stm/sync.c b/c7/stm/sync.c --- a/c7/stm/sync.c +++ b/c7/stm/sync.c @@ -240,7 +240,7 @@ #endif -static void wait_for_other_safe_points(void) +static bool try_wait_for_other_safe_points(void) { /* Must be called with the mutex. When all other threads are in a safe point of at least the requested kind, returns. Otherwise, @@ -257,11 +257,10 @@ This function requires that the calling thread is in a safe-point right now, so there is no deadlock if one thread calls - wait_for_other_safe_points() while another is currently blocked + try_wait_for_other_safe_points() while another is currently blocked in the cond_wait() in this same function. */ - restart: assert(_has_mutex()); assert(STM_PSEGMENT->safe_point == SP_SAFE_POINT); @@ -290,12 +289,20 @@ if (wait) { cond_wait(C_SAFE_POINT); - goto restart; + return false; } /* all threads are at a safe-point now. Broadcast C_RESUME, which will allow them to resume --- but only when we release the mutex. */ cond_broadcast(C_RESUME); + return true; +} + +static void wait_for_other_safe_points(void) +{ + while (!try_wait_for_other_safe_points()) { + /* loop */ + } } void _stm_collectable_safe_point(void) diff --git a/c7/stm/sync.h b/c7/stm/sync.h --- a/c7/stm/sync.h +++ b/c7/stm/sync.h @@ -28,4 +28,5 @@ /* see the source for an exact description */ static void wait_for_other_safe_points(void); +static bool try_wait_for_other_safe_points(void); static void collectable_safe_point(void); From noreply at buildbot.pypy.org Thu Feb 27 19:51:54 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 27 Feb 2014 19:51:54 +0100 (CET) Subject: [pypy-commit] stmgc default: Improve the logic behind stm_collect(1) Message-ID: <20140227185154.80F201C03D5@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r899:410c7a4975d0 Date: 2014-02-27 19:51 +0100 http://bitbucket.org/pypy/stmgc/changeset/410c7a4975d0/ Log: Improve the logic behind stm_collect(1) diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -110,10 +110,10 @@ /************************************************************/ -static void major_collection(bool forced) +static void major_collection_if_requested(void) { assert(!_has_mutex()); - if (!forced && !is_major_collection_requested()) + if (!is_major_collection_requested()) return; mutex_lock(); @@ -121,7 +121,7 @@ assert(STM_PSEGMENT->safe_point == SP_RUNNING); STM_PSEGMENT->safe_point = SP_SAFE_POINT; - while (forced || is_major_collection_requested()) { + while (is_major_collection_requested()) { /* wait until the other thread is at a safe-point */ if (try_wait_for_other_safe_points()) { /* ok */ diff --git a/c7/stm/gcpage.h b/c7/stm/gcpage.h --- a/c7/stm/gcpage.h +++ b/c7/stm/gcpage.h @@ -33,7 +33,7 @@ static void teardown_gcpage(void); static char *allocate_outside_nursery_large(uint64_t size); -static void major_collection(bool forced); +static void major_collection_if_requested(void); static void major_collection_now_at_safe_point(void); diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -282,8 +282,11 @@ void stm_collect(long level) { + if (level > 0) + force_major_collection_request(); + minor_collection(/*commit=*/ false); - major_collection(/*forced=*/ level > 0); + major_collection_if_requested(); } diff --git a/c7/stm/pages.c b/c7/stm/pages.c --- a/c7/stm/pages.c +++ b/c7/stm/pages.c @@ -8,7 +8,7 @@ static union { struct { uint8_t mutex_pages; - bool major_collection_requested; + volatile bool major_collection_requested; uint64_t total_allocated; /* keep track of how much memory we're using, ignoring nurseries */ uint64_t total_allocated_bound; @@ -61,6 +61,11 @@ return pages_ctl.major_collection_requested; } +static void force_major_collection_request(void) +{ + pages_ctl.major_collection_requested = true; +} + static void reset_major_collection_requested(void) { assert(_has_mutex()); diff --git a/c7/stm/pages.h b/c7/stm/pages.h --- a/c7/stm/pages.h +++ b/c7/stm/pages.h @@ -25,6 +25,7 @@ static void mutex_pages_unlock(void); static uint64_t increment_total_allocated(ssize_t add_or_remove); static bool is_major_collection_requested(void); +static void force_major_collection_request(void); static void reset_major_collection_requested(void); inline static void pages_privatize(uintptr_t pagenum, uintptr_t count, From noreply at buildbot.pypy.org Thu Feb 27 20:21:15 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Thu, 27 Feb 2014 20:21:15 +0100 (CET) Subject: [pypy-commit] pypy remove-remaining-smm: kill Message-ID: <20140227192115.5B20B1C03D5@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: remove-remaining-smm Changeset: r69525:9ab1160b829c Date: 2014-02-27 11:20 -0800 http://bitbucket.org/pypy/pypy/changeset/9ab1160b829c/ Log: kill diff --git a/pypy/objspace/std/complexobject.py b/pypy/objspace/std/complexobject.py --- a/pypy/objspace/std/complexobject.py +++ b/pypy/objspace/std/complexobject.py @@ -141,7 +141,6 @@ __complex__ to actually be a complex (and not e.g. a float). See test___complex___returning_non_complex. """ - from pypy.objspace.std.complexobject import W_ComplexObject if type(w_complex) is W_ComplexObject: return (w_complex.realval, w_complex.imagval) # @@ -297,8 +296,6 @@ @staticmethod @unwrap_spec(w_real=WrappedDefault(0.0)) def descr__new__(space, w_complextype, w_real, w_imag=None): - from pypy.objspace.std.complexobject import W_ComplexObject - # if w_real is already a complex number and there is no second # argument, return it. Note that we cannot return w_real if # it is an instance of a *subclass* of complex, or if w_complextype @@ -588,7 +585,6 @@ def complexwprop(name): def fget(space, w_obj): - from pypy.objspace.std.complexobject import W_ComplexObject if not isinstance(w_obj, W_ComplexObject): raise oefmt(space.w_TypeError, "descriptor is for 'complex'") return space.newfloat(getattr(w_obj, name)) From noreply at buildbot.pypy.org Thu Feb 27 20:24:55 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 27 Feb 2014 20:24:55 +0100 (CET) Subject: [pypy-commit] stmgc default: Add a comment: think more Message-ID: <20140227192455.8408A1C03FC@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r900:9d9a01ab921c Date: 2014-02-27 20:24 +0100 http://bitbucket.org/pypy/stmgc/changeset/9d9a01ab921c/ Log: Add a comment: think more diff --git a/c7/stm/sync.c b/c7/stm/sync.c --- a/c7/stm/sync.c +++ b/c7/stm/sync.c @@ -289,6 +289,12 @@ if (wait) { cond_wait(C_SAFE_POINT); + /* XXX think: I believe this can end in a busy-loop, with this thread + setting NSE_SIGNAL on the other thread; then the other thread + commits, sends C_SAFE_POINT, finish the transaction, start + the next one, and only then this thread resumes; then we're back + in the same situation as before with no progress here. + */ return false; } From noreply at buildbot.pypy.org Thu Feb 27 20:51:52 2014 From: noreply at buildbot.pypy.org (arigo) Date: Thu, 27 Feb 2014 20:51:52 +0100 (CET) Subject: [pypy-commit] stmgc default: Step 1 is to force minor collections to occur in all segments. Message-ID: <20140227195152.77B581C35DA@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r901:160d2a6843f3 Date: 2014-02-27 20:51 +0100 http://bitbucket.org/pypy/stmgc/changeset/160d2a6843f3/ Log: Step 1 is to force minor collections to occur in all segments. diff --git a/c7/stm/gcpage.c b/c7/stm/gcpage.c --- a/c7/stm/gcpage.c +++ b/c7/stm/gcpage.c @@ -138,8 +138,15 @@ static void major_collection_now_at_safe_point(void) { + dprintf((" .----- major_collection_now_at_safe_point -----\n")); assert(_has_mutex()); + /* first, force a minor collection in each of the other segments */ + major_do_minor_collections(); + + dprintf((" | used before collection: %ld\n", + (long)pages_ctl.total_allocated)); + fprintf(stderr, "hi, I should be doing a major GC here\n"); reset_major_collection_requested(); diff --git a/c7/stm/nursery.c b/c7/stm/nursery.c --- a/c7/stm/nursery.c +++ b/c7/stm/nursery.c @@ -231,21 +231,18 @@ } } -static void minor_collection(bool commit) +#define MINOR_NOTHING_TO_DO(pseg) \ + ((pseg)->pub.nursery_current == (stm_char *)_stm_nursery_start && \ + tree_is_cleared((pseg)->young_outside_nursery)) + + +static void _do_minor_collection(bool commit) { - assert(!_has_mutex()); - - stm_safe_point(); - abort_if_needed(); - /* We must move out of the nursery any object found within the nursery. All objects touched are either from the current transaction, or are from 'modified_old_objects'. In all cases, we should only read and change objects belonging to the current segment. - - XXX improve: it might be possible to run this function in - a safe-point but without the mutex, if we are careful */ dprintf(("minor_collection commit=%d\n", (int)commit)); @@ -277,9 +274,20 @@ throw_away_nursery(); + assert(MINOR_NOTHING_TO_DO(STM_PSEGMENT)); assert(list_is_empty(STM_PSEGMENT->objects_pointing_to_nursery)); } +static void minor_collection(bool commit) +{ + assert(!_has_mutex()); + + stm_safe_point(); + abort_if_needed(); + + _do_minor_collection(commit); +} + void stm_collect(long level) { if (level > 0) @@ -364,3 +372,24 @@ } #endif } + +static void major_do_minor_collections(void) +{ + int original_num = STM_SEGMENT->segment_num; + long i; + + for (i = 0; i < NB_SEGMENTS; i++) { + struct stm_priv_segment_info_s *pseg = get_priv_segment(i); + if (MINOR_NOTHING_TO_DO(pseg)) /*TS_NONE segments have NOTHING_TO_DO*/ + continue; + + assert(pseg->transaction_state != TS_NONE); + assert(pseg->safe_point == SP_SAFE_POINT); + + set_gs_register(get_segment_base(i)); + _do_minor_collection(/*commit=*/ false); + assert(MINOR_NOTHING_TO_DO(pseg)); + } + + set_gs_register(get_segment_base(original_num)); +} diff --git a/c7/stm/nursery.h b/c7/stm/nursery.h --- a/c7/stm/nursery.h +++ b/c7/stm/nursery.h @@ -8,3 +8,4 @@ static void minor_collection(bool commit); static void check_nursery_at_transaction_start(void); static void throw_away_nursery(void); +static void major_do_minor_collections(void); diff --git a/c7/stm/sync.h b/c7/stm/sync.h --- a/c7/stm/sync.h +++ b/c7/stm/sync.h @@ -19,6 +19,7 @@ #ifndef NDEBUG static bool _has_mutex(void); #endif +static void set_gs_register(char *value); /* acquire and release one of the segments for running the given thread (must have the mutex acquired!) */ diff --git a/c7/test/support.py b/c7/test/support.py --- a/c7/test/support.py +++ b/c7/test/support.py @@ -342,6 +342,9 @@ def stm_minor_collect(): lib.stm_collect(0) +def stm_major_collect(): + lib.stm_collect(1) + def stm_get_page_flag(pagenum): return lib._stm_get_page_flag(pagenum) diff --git a/c7/test/test_gcpage.py b/c7/test/test_gcpage.py --- a/c7/test/test_gcpage.py +++ b/c7/test/test_gcpage.py @@ -87,3 +87,18 @@ assert stm_get_page_flag(stm_get_obj_pages(newer)[0]) == SHARED_PAGE assert stm_get_flags(newer) & GCFLAG_WRITE_BARRIER + + def test_major_collection(self): + self.start_transaction() + new = stm_allocate(5000) + self.push_root(new) + stm_minor_collect() + assert 5000 <= lib._stm_total_allocated() <= 8192 + + self.pop_root() + stm_minor_collect() + assert 5000 <= lib._stm_total_allocated() <= 8192 + + stm_major_collect() + py.test.skip("in-progress") + assert lib._stm_total_allocated() == 0 From noreply at buildbot.pypy.org Thu Feb 27 21:09:29 2014 From: noreply at buildbot.pypy.org (pjenvey) Date: Thu, 27 Feb 2014 21:09:29 +0100 (CET) Subject: [pypy-commit] pypy kill-multimethod: kill builtinshortcut remnants Message-ID: <20140227200929.AEBCC1C03D5@cobra.cs.uni-duesseldorf.de> Author: Philip Jenvey Branch: kill-multimethod Changeset: r69526:9152b9f48bd0 Date: 2014-02-27 12:08 -0800 http://bitbucket.org/pypy/pypy/changeset/9152b9f48bd0/ Log: kill builtinshortcut remnants diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -257,10 +257,6 @@ BoolOption("optimized_list_getitem", "special case the 'list[integer]' expressions", default=False), - BoolOption("builtinshortcut", - "a shortcut for operations between built-in types. XXX: " - "deprecated, not really a shortcut any more.", - default=False), BoolOption("getattributeshortcut", "track types that override __getattribute__", default=False, diff --git a/pypy/doc/config/objspace.std.builtinshortcut.txt b/pypy/doc/config/objspace.std.builtinshortcut.txt deleted file mode 100644 --- a/pypy/doc/config/objspace.std.builtinshortcut.txt +++ /dev/null @@ -1,5 +0,0 @@ -A shortcut speeding up primitive operations between built-in types. - -This is a space-time trade-off: at the moment, this option makes a -translated pypy-c executable bigger by about 1.7 MB. (This can probably -be improved with careful analysis.) diff --git a/pypy/module/array/test/test_array.py b/pypy/module/array/test/test_array.py --- a/pypy/module/array/test/test_array.py +++ b/pypy/module/array/test/test_array.py @@ -1033,9 +1033,3 @@ b = a * 13 assert len(b) == 13 assert str(b[12]) == "-0.0" - - -class AppTestArrayBuiltinShortcut(AppTestArray): - spaceconfig = AppTestArray.spaceconfig.copy() - spaceconfig['objspace.std.builtinshortcut'] = True - diff --git a/pypy/objspace/std/setobject.py b/pypy/objspace/std/setobject.py --- a/pypy/objspace/std/setobject.py +++ b/pypy/objspace/std/setobject.py @@ -180,7 +180,6 @@ if not space.isinstance_w(w_other, space.w_set): return space.w_False - # tested in test_builtinshortcut.py # XXX do not make new setobject here w_other_as_set = self._newobj(space, w_other) return space.wrap(self.equals(w_other_as_set)) From noreply at buildbot.pypy.org Fri Feb 28 00:41:14 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 28 Feb 2014 00:41:14 +0100 (CET) Subject: [pypy-commit] pypy default: cleanups Message-ID: <20140227234114.37B4F1C03FC@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69527:88373c4f78f1 Date: 2014-02-27 18:33 -0500 http://bitbucket.org/pypy/pypy/changeset/88373c4f78f1/ Log: cleanups diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py --- a/pypy/module/micronumpy/concrete.py +++ b/pypy/module/micronumpy/concrete.py @@ -8,7 +8,7 @@ from pypy.module.micronumpy import support, loop from pypy.module.micronumpy.base import convert_to_array, W_NDimArray, \ ArrayArgumentException -from pypy.module.micronumpy.iterators import ArrayIterator, AxisIterator +from pypy.module.micronumpy.iterators import ArrayIter from pypy.module.micronumpy.strides import (Chunk, Chunks, NewAxisChunk, RecordChunk, calc_strides, calc_new_strides, shape_agreement, calculate_broadcast_strides, calculate_dot_strides) @@ -284,19 +284,14 @@ self.get_backstrides(), self.get_shape(), shape, backward_broadcast) - return ArrayIterator(self, support.product(shape), shape, - r[0], r[1]) - return ArrayIterator(self, self.get_size(), self.shape, - self.strides, self.backstrides) - - def create_axis_iter(self, shape, dim, cum): - return AxisIterator(self, shape, dim, cum) + return ArrayIter(self, support.product(shape), shape, r[0], r[1]) + return ArrayIter(self, self.get_size(), self.shape, + self.strides, self.backstrides) def create_dot_iter(self, shape, skip): r = calculate_dot_strides(self.get_strides(), self.get_backstrides(), shape, skip) - return ArrayIterator(self, support.product(shape), shape, - r[0], r[1]) + return ArrayIter(self, support.product(shape), shape, r[0], r[1]) def swapaxes(self, space, orig_arr, axis1, axis2): shape = self.get_shape()[:] diff --git a/pypy/module/micronumpy/iterators.py b/pypy/module/micronumpy/iterators.py --- a/pypy/module/micronumpy/iterators.py +++ b/pypy/module/micronumpy/iterators.py @@ -45,7 +45,7 @@ from pypy.module.micronumpy.base import W_NDimArray -class PureShapeIterator(object): +class PureShapeIter(object): def __init__(self, shape, idx_w): self.shape = shape self.shapelen = len(shape) @@ -78,7 +78,7 @@ return [space.wrap(self.indexes[i]) for i in range(shapelen)] -class ArrayIterator(object): +class ArrayIter(object): _immutable_fields_ = ['array', 'size', 'ndim_m1', 'shape_m1', 'strides', 'backstrides'] @@ -141,7 +141,7 @@ self.array.setitem(self.offset, elem) -def AxisIterator(array, shape, axis, cumulative): +def AxisIter(array, shape, axis, cumulative): strides = array.get_strides() backstrides = array.get_backstrides() if not cumulative: @@ -152,14 +152,14 @@ else: strides = strides[:axis] + [0] + strides[axis:] backstrides = backstrides[:axis] + [0] + backstrides[axis:] - return ArrayIterator(array, support.product(shape), shape, strides, backstrides) + return ArrayIter(array, support.product(shape), shape, strides, backstrides) -def AllButAxisIterator(array, axis): +def AllButAxisIter(array, axis): size = array.get_size() shape = array.get_shape()[:] backstrides = array.backstrides[:] if size: size /= shape[axis] shape[axis] = backstrides[axis] = 0 - return ArrayIterator(array, size, shape, array.strides, backstrides) + return ArrayIter(array, size, shape, array.strides, backstrides) diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -8,7 +8,7 @@ from rpython.rtyper.lltypesystem import lltype, rffi from pypy.module.micronumpy import support, constants as NPY from pypy.module.micronumpy.base import W_NDimArray -from pypy.module.micronumpy.iterators import PureShapeIterator +from pypy.module.micronumpy.iterators import PureShapeIter, AxisIter call2_driver = jit.JitDriver(name='numpy_call2', @@ -203,9 +203,9 @@ def do_axis_reduce(space, shape, func, arr, dtype, axis, out, identity, cumulative, temp): - out_iter = out.create_axis_iter(arr.get_shape(), axis, cumulative) + out_iter = AxisIter(out.implementation, arr.get_shape(), axis, cumulative) if cumulative: - temp_iter = temp.create_axis_iter(arr.get_shape(), axis, False) + temp_iter = AxisIter(temp.implementation, arr.get_shape(), axis, False) else: temp_iter = out_iter # hack arr_iter = arr.create_iter() @@ -286,9 +286,9 @@ right_skip = range(len(left_shape) - 1) result_skip = [len(result.get_shape()) - (len(right_shape) > 1)] assert result.get_dtype() == dtype - outi = result.create_dot_iter(broadcast_shape, result_skip) - lefti = left.create_dot_iter(broadcast_shape, left_skip) - righti = right.create_dot_iter(broadcast_shape, right_skip) + outi = result.implementation.create_dot_iter(broadcast_shape, result_skip) + lefti = left.implementation.create_dot_iter(broadcast_shape, left_skip) + righti = right.implementation.create_dot_iter(broadcast_shape, right_skip) while not outi.done(): dot_driver.jit_merge_point(dtype=dtype) lval = lefti.getitem().convert_to(space, dtype) @@ -476,7 +476,7 @@ prefixlen = len(prefix_w) indexlen = len(indexes_w) dtype = arr.get_dtype() - iter = PureShapeIterator(iter_shape, indexes_w) + iter = PureShapeIter(iter_shape, indexes_w) indexlen = len(indexes_w) while not iter.done(): getitem_int_driver.jit_merge_point(shapelen=shapelen, indexlen=indexlen, @@ -505,7 +505,7 @@ indexlen = len(indexes_w) prefixlen = len(prefix_w) dtype = arr.get_dtype() - iter = PureShapeIterator(iter_shape, indexes_w) + iter = PureShapeIter(iter_shape, indexes_w) while not iter.done(): setitem_int_driver.jit_merge_point(shapelen=shapelen, indexlen=indexlen, dtype=dtype, prefixlen=prefixlen) @@ -630,7 +630,7 @@ def diagonal_array(space, arr, out, offset, axis1, axis2, shape): out_iter = out.create_iter() - iter = PureShapeIterator(shape, []) + iter = PureShapeIter(shape, []) shapelen_minus_1 = len(shape) - 1 assert shapelen_minus_1 >= 0 if axis1 < axis2: diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -286,12 +286,6 @@ return self.implementation.create_iter( shape=shape, backward_broadcast=backward_broadcast) - def create_axis_iter(self, shape, dim, cum): - return self.implementation.create_axis_iter(shape, dim, cum) - - def create_dot_iter(self, shape, skip): - return self.implementation.create_dot_iter(shape, skip) - def is_scalar(self): return len(self.get_shape()) == 0 diff --git a/pypy/module/micronumpy/sort.py b/pypy/module/micronumpy/sort.py --- a/pypy/module/micronumpy/sort.py +++ b/pypy/module/micronumpy/sort.py @@ -8,7 +8,7 @@ from rpython.rtyper.lltypesystem import rffi, lltype from pypy.module.micronumpy import descriptor, types, constants as NPY from pypy.module.micronumpy.base import W_NDimArray -from pypy.module.micronumpy.iterators import AllButAxisIterator +from pypy.module.micronumpy.iterators import AllButAxisIter INT_SIZE = rffi.sizeof(lltype.Signed) @@ -148,9 +148,9 @@ if axis < 0 or axis >= len(shape): raise OperationError(space.w_IndexError, space.wrap( "Wrong axis %d" % axis)) - arr_iter = AllButAxisIterator(arr, axis) + arr_iter = AllButAxisIter(arr, axis) index_impl = index_arr.implementation - index_iter = AllButAxisIterator(index_impl, axis) + index_iter = AllButAxisIter(index_impl, axis) stride_size = arr.strides[axis] index_stride_size = index_impl.strides[axis] axis_size = arr.shape[axis] @@ -293,7 +293,7 @@ if axis < 0 or axis >= len(shape): raise OperationError(space.w_IndexError, space.wrap( "Wrong axis %d" % axis)) - arr_iter = AllButAxisIterator(arr, axis) + arr_iter = AllButAxisIter(arr, axis) stride_size = arr.strides[axis] axis_size = arr.shape[axis] while not arr_iter.done(): diff --git a/pypy/module/micronumpy/test/test_iterators.py b/pypy/module/micronumpy/test/test_iterators.py --- a/pypy/module/micronumpy/test/test_iterators.py +++ b/pypy/module/micronumpy/test/test_iterators.py @@ -1,5 +1,5 @@ from pypy.module.micronumpy import support -from pypy.module.micronumpy.iterators import ArrayIterator +from pypy.module.micronumpy.iterators import ArrayIter class MockArray(object): @@ -14,8 +14,8 @@ strides = [5, 1] backstrides = [x * (y - 1) for x,y in zip(strides, shape)] assert backstrides == [10, 4] - i = ArrayIterator(MockArray, support.product(shape), shape, - strides, backstrides) + i = ArrayIter(MockArray, support.product(shape), shape, + strides, backstrides) i.next() i.next() i.next() @@ -33,8 +33,8 @@ strides = [1, 3] backstrides = [x * (y - 1) for x,y in zip(strides, shape)] assert backstrides == [2, 12] - i = ArrayIterator(MockArray, support.product(shape), shape, - strides, backstrides) + i = ArrayIter(MockArray, support.product(shape), shape, + strides, backstrides) i.next() i.next() i.next() @@ -54,8 +54,8 @@ strides = [5, 1] backstrides = [x * (y - 1) for x,y in zip(strides, shape)] assert backstrides == [10, 4] - i = ArrayIterator(MockArray, support.product(shape), shape, - strides, backstrides) + i = ArrayIter(MockArray, support.product(shape), shape, + strides, backstrides) i.next_skip_x(2) i.next_skip_x(2) i.next_skip_x(2) @@ -78,8 +78,8 @@ strides = [1, 3] backstrides = [x * (y - 1) for x,y in zip(strides, shape)] assert backstrides == [2, 12] - i = ArrayIterator(MockArray, support.product(shape), shape, - strides, backstrides) + i = ArrayIter(MockArray, support.product(shape), shape, + strides, backstrides) i.next_skip_x(2) i.next_skip_x(2) i.next_skip_x(2) From noreply at buildbot.pypy.org Fri Feb 28 00:41:15 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 28 Feb 2014 00:41:15 +0100 (CET) Subject: [pypy-commit] pypy default: fix whatsnew Message-ID: <20140227234115.6D2631C03FC@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69528:5fda2607f040 Date: 2014-02-27 18:35 -0500 http://bitbucket.org/pypy/pypy/changeset/5fda2607f040/ Log: fix whatsnew diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -85,3 +85,6 @@ .. branch: remove-intlong-smm kills int/long/smalllong/bool multimethods + +.. branch: numpy-refactor +Cleanup micronumpy module From noreply at buildbot.pypy.org Fri Feb 28 00:58:37 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 28 Feb 2014 00:58:37 +0100 (CET) Subject: [pypy-commit] pypy default: update dot bench script Message-ID: <20140227235837.CCC171C3656@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69529:dd054f91e29d Date: 2014-02-27 15:57 -0800 http://bitbucket.org/pypy/pypy/changeset/dd054f91e29d/ Log: update dot bench script diff --git a/pypy/module/micronumpy/bench/dot.py b/pypy/module/micronumpy/bench/dot.py --- a/pypy/module/micronumpy/bench/dot.py +++ b/pypy/module/micronumpy/bench/dot.py @@ -1,28 +1,32 @@ +import sys import time try: - import numpypy + import numpypy as numpy except ImportError: - pass + import numpy -import numpy - -def get_matrix(): +def get_matrix(n): import random - n = 502 x = numpy.zeros((n,n), dtype=numpy.float64) for i in range(n): for j in range(n): x[i][j] = random.random() return x -def main(): - x = get_matrix() - y = get_matrix() +def main(n, r): + x = get_matrix(n) + y = get_matrix(n) a = time.time() - #z = numpy.dot(x, y) # uses numpy possibly-blas-lib dot - z = numpy.core.multiarray.dot(x, y) # uses strictly numpy C dot + for _ in xrange(r): + #z = numpy.dot(x, y) # uses numpy possibly-blas-lib dot + z = numpy.core.multiarray.dot(x, y) # uses strictly numpy C dot b = time.time() - print '%.2f seconds' % (b-a) + print '%d runs, %.2f seconds' % (r, b-a) -main() +n = int(sys.argv[1]) +try: + r = int(sys.argv[2]) +except IndexError: + r = 1 +main(n, r) diff --git a/pypy/module/micronumpy/iterators.py b/pypy/module/micronumpy/iterators.py --- a/pypy/module/micronumpy/iterators.py +++ b/pypy/module/micronumpy/iterators.py @@ -115,7 +115,7 @@ if step == 0: return self.index += step - for i in range(self.ndim_m1, -1, -1): + for i in xrange(self.ndim_m1, -1, -1): if self.indices[i] < (self.shape_m1[i] + 1) - step: self.indices[i] += step self.offset += self.strides[i] * step From noreply at buildbot.pypy.org Fri Feb 28 02:13:37 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 28 Feb 2014 02:13:37 +0100 (CET) Subject: [pypy-commit] pypy default: optimize multidim_dot loop Message-ID: <20140228011337.6328F1C03D5@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69530:e431aa28d934 Date: 2014-02-27 20:01 -0500 http://bitbucket.org/pypy/pypy/changeset/e431aa28d934/ Log: optimize multidim_dot loop diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -8,7 +8,8 @@ from rpython.rtyper.lltypesystem import lltype, rffi from pypy.module.micronumpy import support, constants as NPY from pypy.module.micronumpy.base import W_NDimArray -from pypy.module.micronumpy.iterators import PureShapeIter, AxisIter +from pypy.module.micronumpy.iterators import PureShapeIter, AxisIter, \ + AllButAxisIter call2_driver = jit.JitDriver(name='numpy_call2', @@ -259,7 +260,6 @@ argmin = _new_argmin_argmax('min') argmax = _new_argmin_argmax('max') -# note that shapelen == 2 always dot_driver = jit.JitDriver(name = 'numpy_dot', greens = ['dtype'], reds = 'auto') @@ -280,25 +280,30 @@ ''' left_shape = left.get_shape() right_shape = right.get_shape() - broadcast_shape = left_shape[:-1] + right_shape - left_skip = [len(left_shape) - 1 + i for i in range(len(right_shape)) - if i != right_critical_dim] - right_skip = range(len(left_shape) - 1) - result_skip = [len(result.get_shape()) - (len(right_shape) > 1)] + assert left_shape[-1] == right_shape[right_critical_dim] assert result.get_dtype() == dtype - outi = result.implementation.create_dot_iter(broadcast_shape, result_skip) - lefti = left.implementation.create_dot_iter(broadcast_shape, left_skip) - righti = right.implementation.create_dot_iter(broadcast_shape, right_skip) - while not outi.done(): - dot_driver.jit_merge_point(dtype=dtype) - lval = lefti.getitem().convert_to(space, dtype) - rval = righti.getitem().convert_to(space, dtype) - outval = outi.getitem() - v = dtype.itemtype.mul(lval, rval) - v = dtype.itemtype.add(v, outval) - outi.setitem(v) - outi.next() - righti.next() + outi = result.create_iter() + lefti = AllButAxisIter(left.implementation, len(left_shape) - 1) + righti = AllButAxisIter(right.implementation, right_critical_dim) + n = left.implementation.shape[-1] + s1 = left.implementation.strides[-1] + s2 = right.implementation.strides[right_critical_dim] + while not lefti.done(): + while not righti.done(): + oval = outi.getitem() + i1 = lefti.offset + i2 = righti.offset + for _ in xrange(n): + dot_driver.jit_merge_point(dtype=dtype) + lval = left.implementation.getitem(i1).convert_to(space, dtype) + rval = right.implementation.getitem(i2).convert_to(space, dtype) + oval = dtype.itemtype.add(oval, dtype.itemtype.mul(lval, rval)) + i1 += s1 + i2 += s2 + outi.setitem(oval) + outi.next() + righti.next() + righti.reset() lefti.next() return result diff --git a/pypy/module/micronumpy/test/test_arrayops.py b/pypy/module/micronumpy/test/test_arrayops.py --- a/pypy/module/micronumpy/test/test_arrayops.py +++ b/pypy/module/micronumpy/test/test_arrayops.py @@ -41,8 +41,7 @@ a[0] = 0 assert (b == [1, 1, 1, 0, 0]).all() - - def test_dot(self): + def test_dot_basic(self): from numpypy import array, dot, arange a = array(range(5)) assert dot(a, a) == 30.0 @@ -69,7 +68,7 @@ assert b.shape == (4, 3) c = dot(a, b) assert (c == [[[14, 38, 62], [38, 126, 214], [62, 214, 366]], - [[86, 302, 518], [110, 390, 670], [134, 478, 822]]]).all() + [[86, 302, 518], [110, 390, 670], [134, 478, 822]]]).all() c = dot(a, b[:, 2]) assert (c == [[62, 214, 366], [518, 670, 822]]).all() a = arange(3*2*6).reshape((3,2,6)) From noreply at buildbot.pypy.org Fri Feb 28 02:13:38 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 28 Feb 2014 02:13:38 +0100 (CET) Subject: [pypy-commit] pypy default: kill obsolete dot helpers Message-ID: <20140228011338.A1B5B1C03D5@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69531:bec36abe6311 Date: 2014-02-27 20:04 -0500 http://bitbucket.org/pypy/pypy/changeset/bec36abe6311/ Log: kill obsolete dot helpers diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py --- a/pypy/module/micronumpy/concrete.py +++ b/pypy/module/micronumpy/concrete.py @@ -11,7 +11,7 @@ from pypy.module.micronumpy.iterators import ArrayIter from pypy.module.micronumpy.strides import (Chunk, Chunks, NewAxisChunk, RecordChunk, calc_strides, calc_new_strides, shape_agreement, - calculate_broadcast_strides, calculate_dot_strides) + calculate_broadcast_strides) class BaseConcreteArray(object): @@ -288,11 +288,6 @@ return ArrayIter(self, self.get_size(), self.shape, self.strides, self.backstrides) - def create_dot_iter(self, shape, skip): - r = calculate_dot_strides(self.get_strides(), self.get_backstrides(), - shape, skip) - return ArrayIter(self, support.product(shape), shape, r[0], r[1]) - def swapaxes(self, space, orig_arr, axis1, axis2): shape = self.get_shape()[:] strides = self.get_strides()[:] diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py --- a/pypy/module/micronumpy/strides.py +++ b/pypy/module/micronumpy/strides.py @@ -421,18 +421,3 @@ n_old_elems_to_use *= old_shape[oldI] assert len(new_strides) == len(new_shape) return new_strides[:] - - -def calculate_dot_strides(strides, backstrides, res_shape, skip_dims): - rstrides = [0] * len(res_shape) - rbackstrides = [0] * len(res_shape) - j = 0 - for i in range(len(res_shape)): - if i in skip_dims: - rstrides[i] = 0 - rbackstrides[i] = 0 - else: - rstrides[i] = strides[j] - rbackstrides[i] = backstrides[j] - j += 1 - return rstrides, rbackstrides From noreply at buildbot.pypy.org Fri Feb 28 02:42:00 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 28 Feb 2014 02:42:00 +0100 (CET) Subject: [pypy-commit] pypy numpypy-ellipse-indexing: close abandoned branch Message-ID: <20140228014200.1F6831C03FC@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: numpypy-ellipse-indexing Changeset: r69532:d19334a5b622 Date: 2014-02-27 20:40 -0500 http://bitbucket.org/pypy/pypy/changeset/d19334a5b622/ Log: close abandoned branch From noreply at buildbot.pypy.org Fri Feb 28 02:42:01 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 28 Feb 2014 02:42:01 +0100 (CET) Subject: [pypy-commit] pypy numpy-refactor: close merged branch Message-ID: <20140228014201.45FA61C03FC@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: numpy-refactor Changeset: r69533:aaf78b39188c Date: 2014-02-27 20:40 -0500 http://bitbucket.org/pypy/pypy/changeset/aaf78b39188c/ Log: close merged branch From noreply at buildbot.pypy.org Fri Feb 28 04:26:14 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 28 Feb 2014 04:26:14 +0100 (CET) Subject: [pypy-commit] pypy default: fix test_compile when run alone Message-ID: <20140228032614.473FD1C03D5@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69534:b5d92e594183 Date: 2014-02-27 22:25 -0500 http://bitbucket.org/pypy/pypy/changeset/b5d92e594183/ Log: fix test_compile when run alone diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -9,7 +9,7 @@ from rpython.rlib.nonconst import NonConstant from pypy.module.micronumpy import boxes, ufuncs from pypy.module.micronumpy.arrayops import where -from pypy.module.micronumpy.base import W_NDimArray +from pypy.module.micronumpy.ndarray import W_NDimArray from pypy.module.micronumpy.ctors import array from pypy.module.micronumpy.descriptor import get_dtype_cache From noreply at buildbot.pypy.org Fri Feb 28 07:23:32 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 28 Feb 2014 07:23:32 +0100 (CET) Subject: [pypy-commit] pypy default: simplify iter/dot optimizations Message-ID: <20140228062332.C48281D22CF@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69535:27f7c050271b Date: 2014-02-27 22:20 -0800 http://bitbucket.org/pypy/pypy/changeset/27f7c050271b/ Log: simplify iter/dot optimizations diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py --- a/pypy/module/micronumpy/concrete.py +++ b/pypy/module/micronumpy/concrete.py @@ -15,6 +15,8 @@ class BaseConcreteArray(object): + _immutable_fields_ = ['dtype?', 'storage', 'start', 'size', 'shape[*]', + 'strides[*]', 'backstrides[*]', 'order'] start = 0 parent = None @@ -350,6 +352,8 @@ orig_array) def set_dtype(self, space, dtype): + # size/shape/strides shouldn't change + assert dtype.elsize == self.dtype.elsize self.dtype = dtype def argsort(self, space, w_axis): diff --git a/pypy/module/micronumpy/iterators.py b/pypy/module/micronumpy/iterators.py --- a/pypy/module/micronumpy/iterators.py +++ b/pypy/module/micronumpy/iterators.py @@ -79,29 +79,31 @@ class ArrayIter(object): - _immutable_fields_ = ['array', 'size', 'ndim_m1', 'shape_m1', - 'strides', 'backstrides'] + _immutable_fields_ = ['array', 'size', 'indices', 'shape[*]', + 'strides[*]', 'backstrides[*]'] def __init__(self, array, size, shape, strides, backstrides): assert len(shape) == len(strides) == len(backstrides) self.array = array self.size = size - self.ndim_m1 = len(shape) - 1 - self.shape_m1 = [s - 1 for s in shape] + self.indices = [0] * len(shape) + self.shape = shape self.strides = strides self.backstrides = backstrides self.reset() + @jit.unroll_safe def reset(self): self.index = 0 - self.indices = [0] * (self.ndim_m1 + 1) + for i in xrange(len(self.shape)): + self.indices[i] = 0 self.offset = self.array.start @jit.unroll_safe def next(self): self.index += 1 - for i in xrange(self.ndim_m1, -1, -1): - if self.indices[i] < self.shape_m1[i]: + for i in xrange(len(self.shape) - 1, -1, -1): + if self.indices[i] < self.shape[i] - 1: self.indices[i] += 1 self.offset += self.strides[i] break @@ -115,14 +117,14 @@ if step == 0: return self.index += step - for i in xrange(self.ndim_m1, -1, -1): - if self.indices[i] < (self.shape_m1[i] + 1) - step: + for i in xrange(len(self.shape) - 1, -1, -1): + if self.indices[i] < self.shape[i] - step: self.indices[i] += step self.offset += self.strides[i] * step break else: - remaining_step = (self.indices[i] + step) // (self.shape_m1[i] + 1) - this_i_step = step - remaining_step * (self.shape_m1[i] + 1) + remaining_step = (self.indices[i] + step) // self.shape[i] + this_i_step = step - remaining_step * self.shape[i] self.indices[i] = self.indices[i] + this_i_step self.offset += self.strides[i] * this_i_step step = remaining_step diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -285,21 +285,18 @@ outi = result.create_iter() lefti = AllButAxisIter(left.implementation, len(left_shape) - 1) righti = AllButAxisIter(right.implementation, right_critical_dim) - n = left.implementation.shape[-1] - s1 = left.implementation.strides[-1] - s2 = right.implementation.strides[right_critical_dim] while not lefti.done(): while not righti.done(): oval = outi.getitem() i1 = lefti.offset i2 = righti.offset - for _ in xrange(n): + for _ in xrange(left.implementation.shape[-1]): dot_driver.jit_merge_point(dtype=dtype) lval = left.implementation.getitem(i1).convert_to(space, dtype) rval = right.implementation.getitem(i2).convert_to(space, dtype) oval = dtype.itemtype.add(oval, dtype.itemtype.mul(lval, rval)) - i1 += s1 - i2 += s2 + i1 += left.implementation.strides[-1] + i2 += right.implementation.strides[right_critical_dim] outi.setitem(oval) outi.next() righti.next() diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -59,6 +59,7 @@ if self.graph is None: interp, graph = self.meta_interp(f, [0], listops=True, + listcomp=True, backendopt=True, graph_and_interp_only=True) self.__class__.interp = interp From noreply at buildbot.pypy.org Fri Feb 28 07:23:34 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 28 Feb 2014 07:23:34 +0100 (CET) Subject: [pypy-commit] pypy default: update/run the dot zjit test Message-ID: <20140228062334.2E3E11D22CF@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69536:ab14257e1e85 Date: 2014-02-27 20:42 -0800 http://bitbucket.org/pypy/pypy/changeset/ab14257e1e85/ Log: update/run the dot zjit test diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -70,7 +70,6 @@ reset_jit() i = self.code_mapping[name] retval = self.interp.eval_graph(self.graph, [i]) - py.test.skip("don't run for now") return retval def define_add(): @@ -82,6 +81,7 @@ def test_add(self): result = self.run("add") + py.test.skip("don't run for now") self.check_simple_loop({'raw_load': 2, 'float_add': 1, 'raw_store': 1, 'int_add': 1, 'int_ge': 1, 'guard_false': 1, 'jump': 1, @@ -97,6 +97,7 @@ def test_floatadd(self): result = self.run("float_add") assert result == 3 + 3 + py.test.skip("don't run for now") self.check_simple_loop({"raw_load": 1, "float_add": 1, "raw_store": 1, "int_add": 1, "int_ge": 1, "guard_false": 1, "jump": 1, @@ -112,6 +113,7 @@ def test_sum(self): result = self.run("sum") assert result == 2 * sum(range(30)) + py.test.skip("don't run for now") self.check_simple_loop({"raw_load": 2, "float_add": 2, "int_add": 1, "int_ge": 1, "guard_false": 1, "jump": 1, 'arraylen_gc': 1}) @@ -126,6 +128,7 @@ def test_axissum(self): result = self.run("axissum") assert result == 30 + py.test.skip("don't run for now") # XXX note - the bridge here is fairly crucial and yet it's pretty # bogus. We need to improve the situation somehow. self.check_simple_loop({'raw_load': 2, @@ -178,6 +181,7 @@ for i in range(30): expected *= i * 2 assert result == expected + py.test.skip("don't run for now") self.check_simple_loop({"raw_load": 2, "float_add": 1, "float_mul": 1, "int_add": 1, "int_ge": 1, "guard_false": 1, "jump": 1, @@ -223,6 +227,7 @@ def test_any(self): result = self.run("any") assert result == 1 + py.test.skip("don't run for now") self.check_simple_loop({"raw_load": 2, "float_add": 1, "int_and": 1, "int_add": 1, 'cast_float_to_int': 1, @@ -264,6 +269,7 @@ def test_ufunc(self): result = self.run("ufunc") assert result == -6 + py.test.skip("don't run for now") self.check_simple_loop({"raw_load": 2, "float_add": 1, "float_neg": 1, "raw_store": 1, "int_add": 1, @@ -292,6 +298,7 @@ def test_specialization(self): self.run("specialization") + py.test.skip("don't run for now") # This is 3, not 2 because there is a bridge for the exit. self.check_trace_count(3) @@ -306,6 +313,7 @@ def test_slice(self): result = self.run("slice") assert result == 18 + py.test.skip("don't run for now") self.check_simple_loop({'raw_load': 2, 'float_add': 1, 'raw_store': 1, @@ -346,6 +354,7 @@ def test_multidim(self): result = self.run('multidim') assert result == 8 + py.test.skip("don't run for now") # int_add might be 1 here if we try slightly harder with # reusing indexes or some optimization self.check_simple_loop({'float_add': 1, 'raw_load': 2, @@ -396,6 +405,7 @@ def test_setslice(self): result = self.run("setslice") assert result == 11.0 + py.test.skip("don't run for now") self.check_trace_count(1) self.check_simple_loop({'raw_load': 2, 'float_add': 1, 'raw_store': 1, 'int_add': 2, @@ -413,6 +423,7 @@ def test_virtual_slice(self): result = self.run("virtual_slice") assert result == 4 + py.test.skip("don't run for now") self.check_trace_count(1) self.check_simple_loop({'raw_load': 2, 'float_add': 1, 'raw_store': 1, 'int_add': 1, @@ -429,6 +440,7 @@ def test_flat_iter(self): result = self.run("flat_iter") assert result == 6 + py.test.skip("don't run for now") self.check_trace_count(1) self.check_simple_loop({'raw_load': 2, 'float_add': 1, 'raw_store': 1, 'int_add': 2, @@ -445,6 +457,7 @@ def test_flat_getitem(self): result = self.run("flat_getitem") assert result == 10.0 + py.test.skip("don't run for now") self.check_trace_count(1) self.check_simple_loop({'raw_load': 1, 'raw_store': 1, @@ -467,6 +480,7 @@ def test_flat_setitem(self): result = self.run("flat_setitem") assert result == 1.0 + py.test.skip("don't run for now") self.check_trace_count(1) # XXX not ideal, but hey, let's ignore it for now self.check_simple_loop({'raw_load': 1, @@ -495,18 +509,40 @@ def test_dot(self): result = self.run("dot") assert result == 184 - self.check_simple_loop({'arraylen_gc': 9, - 'float_add': 1, + self.check_simple_loop({'float_add': 1, 'float_mul': 1, - 'raw_load': 3, - 'guard_false': 3, - 'guard_true': 3, - 'int_add': 6, - 'int_lt': 6, - 'int_sub': 3, + 'guard_not_invalidated': 1, + 'guard_false': 1, + 'int_add': 3, + 'int_ge': 1, 'jump': 1, - 'raw_store': 1}) - + 'raw_load': 2, + 'setfield_gc': 1}) + self.check_resops({'arraylen_gc': 4, + 'float_add': 2, + 'float_mul': 2, + 'getarrayitem_gc': 11, + 'getarrayitem_gc_pure': 15, + 'getfield_gc': 26, + 'getfield_gc_pure': 32, + 'guard_class': 4, + 'guard_false': 18, + 'guard_not_invalidated': 2, + 'guard_true': 9, + 'int_add': 25, + 'int_ge': 8, + 'int_le': 8, + 'int_lt': 7, + 'int_sub': 15, + 'jump': 3, + 'new': 1, + 'new_with_vtable': 1, + 'raw_load': 6, + 'raw_store': 1, + 'same_as': 2, + 'setarrayitem_gc': 10, + 'setfield_gc': 19}) + def define_argsort(): return """ a = |30| From noreply at buildbot.pypy.org Fri Feb 28 07:32:03 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 28 Feb 2014 07:32:03 +0100 (CET) Subject: [pypy-commit] pypy default: cleanup Message-ID: <20140228063203.9A5711D23B7@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69537:c7852a2bf25d Date: 2014-02-28 01:27 -0500 http://bitbucket.org/pypy/pypy/changeset/c7852a2bf25d/ Log: cleanup diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -1740,10 +1740,11 @@ a = array([(1, 2)], dtype=[('a', 'int64'), ('b', 'int64')])[0] assert a.shape == () assert a.view('S16') == '\x01' + '\x00' * 7 + '\x02' - a = array(2, dtype='int64') - b = a.view('complex64') + a = array(2, dtype=' Author: Maciej Fijalkowski Branch: Changeset: r69538:f06389a51ea3 Date: 2014-02-27 16:32 +0200 http://bitbucket.org/pypy/pypy/changeset/f06389a51ea3/ Log: mark this as doing random gcops diff --git a/rpython/rlib/entrypoint.py b/rpython/rlib/entrypoint.py --- a/rpython/rlib/entrypoint.py +++ b/rpython/rlib/entrypoint.py @@ -78,7 +78,8 @@ # registered RPython_StartupCode = rffi.llexternal('RPython_StartupCode', [], lltype.Void, - _nowrapper=True) + _nowrapper=True, + random_effects_on_gcobjs=True) @entrypoint('main', [], c_name='rpython_startup_code') def rpython_startup_code(): From noreply at buildbot.pypy.org Fri Feb 28 09:02:20 2014 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 28 Feb 2014 09:02:20 +0100 (CET) Subject: [pypy-commit] pypy default: merge Message-ID: <20140228080220.A67321C3599@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r69539:b2ba475381d2 Date: 2014-02-28 10:01 +0200 http://bitbucket.org/pypy/pypy/changeset/b2ba475381d2/ Log: merge diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -85,3 +85,6 @@ .. branch: remove-intlong-smm kills int/long/smalllong/bool multimethods + +.. branch: numpy-refactor +Cleanup micronumpy module diff --git a/pypy/module/micronumpy/arrayops.py b/pypy/module/micronumpy/arrayops.py --- a/pypy/module/micronumpy/arrayops.py +++ b/pypy/module/micronumpy/arrayops.py @@ -8,6 +8,7 @@ shape_agreement_multiple + def where(space, w_arr, w_x=None, w_y=None): """where(condition, [x, y]) @@ -91,6 +92,7 @@ out = W_NDimArray.from_shape(space, shape, dtype) return loop.where(space, out, shape, arr, x, y, dtype) + def dot(space, w_obj1, w_obj2, w_out=None): w_arr = convert_to_array(space, w_obj1) if w_arr.is_scalar(): @@ -162,6 +164,7 @@ axis_start += arr.get_shape()[axis] return res + @unwrap_spec(repeats=int) def repeat(space, w_arr, repeats, w_axis): arr = convert_to_array(space, w_arr) @@ -186,9 +189,11 @@ Chunks(chunks).apply(space, w_res).implementation.setslice(space, arr) return w_res + def count_nonzero(space, w_obj): return space.wrap(loop.count_all_true(convert_to_array(space, w_obj))) + def choose(space, w_arr, w_choices, w_out, w_mode): arr = convert_to_array(space, w_arr) choices = [convert_to_array(space, w_item) for w_item @@ -208,6 +213,7 @@ loop.choose(space, arr, choices, shape, dtype, out, mode) return out + def put(space, w_arr, w_indices, w_values, w_mode): arr = convert_to_array(space, w_arr) mode = clipmode_converter(space, w_mode) @@ -256,6 +262,7 @@ arr.setitem(space, [index], dtype.coerce(space, value)) + def diagonal(space, arr, offset, axis1, axis2): shape = arr.get_shape() shapelen = len(shape) diff --git a/pypy/module/micronumpy/bench/dot.py b/pypy/module/micronumpy/bench/dot.py --- a/pypy/module/micronumpy/bench/dot.py +++ b/pypy/module/micronumpy/bench/dot.py @@ -1,28 +1,32 @@ +import sys import time try: - import numpypy + import numpypy as numpy except ImportError: - pass + import numpy -import numpy - -def get_matrix(): +def get_matrix(n): import random - n = 502 x = numpy.zeros((n,n), dtype=numpy.float64) for i in range(n): for j in range(n): x[i][j] = random.random() return x -def main(): - x = get_matrix() - y = get_matrix() +def main(n, r): + x = get_matrix(n) + y = get_matrix(n) a = time.time() - #z = numpy.dot(x, y) # uses numpy possibly-blas-lib dot - z = numpy.core.multiarray.dot(x, y) # uses strictly numpy C dot + for _ in xrange(r): + #z = numpy.dot(x, y) # uses numpy possibly-blas-lib dot + z = numpy.core.multiarray.dot(x, y) # uses strictly numpy C dot b = time.time() - print '%.2f seconds' % (b-a) + print '%d runs, %.2f seconds' % (r, b-a) -main() +n = int(sys.argv[1]) +try: + r = int(sys.argv[2]) +except IndexError: + r = 1 +main(n, r) diff --git a/pypy/module/micronumpy/boxes.py b/pypy/module/micronumpy/boxes.py --- a/pypy/module/micronumpy/boxes.py +++ b/pypy/module/micronumpy/boxes.py @@ -69,6 +69,7 @@ ret = space.newtuple([scalar, space.newtuple([space.wrap(self._get_dtype(space)), space.wrap(self.raw_str())])]) return ret + class PrimitiveBox(Box): _mixin_ = True _immutable_fields_ = ['value'] @@ -93,6 +94,7 @@ lltype.free(value, flavor="raw") return ret + class ComplexBox(Box): _mixin_ = True _immutable_fields_ = ['real', 'imag'] @@ -360,6 +362,7 @@ return self.get_dtype(space).itemtype.imag(self) w_flags = None + def descr_get_flags(self, space): if self.w_flags is None: self.w_flags = W_FlagsObject(self) diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -9,7 +9,7 @@ from rpython.rlib.nonconst import NonConstant from pypy.module.micronumpy import boxes, ufuncs from pypy.module.micronumpy.arrayops import where -from pypy.module.micronumpy.base import W_NDimArray +from pypy.module.micronumpy.ndarray import W_NDimArray from pypy.module.micronumpy.ctors import array from pypy.module.micronumpy.descriptor import get_dtype_cache diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py --- a/pypy/module/micronumpy/concrete.py +++ b/pypy/module/micronumpy/concrete.py @@ -5,15 +5,18 @@ from rpython.rlib.rawstorage import alloc_raw_storage, free_raw_storage, \ raw_storage_getitem, raw_storage_setitem, RAW_STORAGE from rpython.rtyper.lltypesystem import rffi, lltype -from pypy.module.micronumpy import support, loop, iter +from pypy.module.micronumpy import support, loop from pypy.module.micronumpy.base import convert_to_array, W_NDimArray, \ ArrayArgumentException +from pypy.module.micronumpy.iterators import ArrayIter from pypy.module.micronumpy.strides import (Chunk, Chunks, NewAxisChunk, RecordChunk, calc_strides, calc_new_strides, shape_agreement, - calculate_broadcast_strides, calculate_dot_strides) + calculate_broadcast_strides) class BaseConcreteArray(object): + _immutable_fields_ = ['dtype?', 'storage', 'start', 'size', 'shape[*]', + 'strides[*]', 'backstrides[*]', 'order'] start = 0 parent = None @@ -276,13 +279,16 @@ backstrides) return loop.setslice(space, self.get_shape(), impl, self) - def create_axis_iter(self, shape, dim, cum): - return iter.AxisIterator(self, shape, dim, cum) - - def create_dot_iter(self, shape, skip): - r = calculate_dot_strides(self.get_strides(), self.get_backstrides(), - shape, skip) - return iter.MultiDimViewIterator(self, self.start, r[0], r[1], shape) + def create_iter(self, shape=None, backward_broadcast=False): + if shape is not None and \ + support.product(shape) > support.product(self.get_shape()): + r = calculate_broadcast_strides(self.get_strides(), + self.get_backstrides(), + self.get_shape(), shape, + backward_broadcast) + return ArrayIter(self, support.product(shape), shape, r[0], r[1]) + return ArrayIter(self, self.get_size(), self.shape, + self.strides, self.backstrides) def swapaxes(self, space, orig_arr, axis1, axis2): shape = self.get_shape()[:] @@ -335,26 +341,6 @@ self.backstrides = backstrides self.storage = storage - def create_iter(self, shape=None, backward_broadcast=False, require_index=False): - if shape is not None and \ - support.product(shape) > support.product(self.get_shape()): - r = calculate_broadcast_strides(self.get_strides(), - self.get_backstrides(), - self.get_shape(), shape, - backward_broadcast) - return iter.MultiDimViewIterator(self, self.start, - r[0], r[1], shape) - if not require_index: - return iter.ConcreteArrayIterator(self) - if len(self.get_shape()) <= 1: - return iter.OneDimViewIterator(self, self.start, - self.get_strides(), - self.get_shape()) - return iter.MultiDimViewIterator(self, self.start, - self.get_strides(), - self.get_backstrides(), - self.get_shape()) - def fill(self, space, box): self.dtype.itemtype.fill(self.storage, self.dtype.elsize, box, 0, self.size, 0) @@ -366,6 +352,8 @@ orig_array) def set_dtype(self, space, dtype): + # size/shape/strides shouldn't change + assert dtype.elsize == self.dtype.elsize self.dtype = dtype def argsort(self, space, w_axis): @@ -440,24 +428,6 @@ def fill(self, space, box): loop.fill(self, box.convert_to(space, self.dtype)) - def create_iter(self, shape=None, backward_broadcast=False, require_index=False): - if shape is not None and \ - support.product(shape) > support.product(self.get_shape()): - r = calculate_broadcast_strides(self.get_strides(), - self.get_backstrides(), - self.get_shape(), shape, - backward_broadcast) - return iter.MultiDimViewIterator(self, self.start, - r[0], r[1], shape) - if len(self.get_shape()) <= 1: - return iter.OneDimViewIterator(self, self.start, - self.get_strides(), - self.get_shape()) - return iter.MultiDimViewIterator(self, self.start, - self.get_strides(), - self.get_backstrides(), - self.get_shape()) - def set_shape(self, space, orig_array, new_shape): if len(self.get_shape()) < 2 or self.size == 0: # TODO: this code could be refactored into calc_strides diff --git a/pypy/module/micronumpy/constants.py b/pypy/module/micronumpy/constants.py --- a/pypy/module/micronumpy/constants.py +++ b/pypy/module/micronumpy/constants.py @@ -57,7 +57,7 @@ INTPLTR = 'p' UINTPLTR = 'P' -GENBOOLLTR ='b' +GENBOOLLTR = 'b' SIGNEDLTR = 'i' UNSIGNEDLTR = 'u' FLOATINGLTR = 'f' diff --git a/pypy/module/micronumpy/flagsobj.py b/pypy/module/micronumpy/flagsobj.py --- a/pypy/module/micronumpy/flagsobj.py +++ b/pypy/module/micronumpy/flagsobj.py @@ -6,9 +6,13 @@ class W_FlagsObject(W_Root): def __init__(self, arr): - self.arr = arr self.flags = 0 + def descr__new__(space, w_subtype): + self = space.allocate_instance(W_FlagsObject, w_subtype) + W_FlagsObject.__init__(self, None) + return self + def descr_get_contiguous(self, space): return space.w_True @@ -60,6 +64,8 @@ W_FlagsObject.typedef = TypeDef("flagsobj", __module__ = "numpy", + __new__ = interp2app(W_FlagsObject.descr__new__.im_func), + __getitem__ = interp2app(W_FlagsObject.descr_getitem), __setitem__ = interp2app(W_FlagsObject.descr_setitem), __eq__ = interp2app(W_FlagsObject.descr_eq), diff --git a/pypy/module/micronumpy/flatiter.py b/pypy/module/micronumpy/flatiter.py --- a/pypy/module/micronumpy/flatiter.py +++ b/pypy/module/micronumpy/flatiter.py @@ -19,7 +19,7 @@ def get_shape(self): return self.shape - def create_iter(self, shape=None, backward_broadcast=False, require_index=False): + def create_iter(self, shape=None, backward_broadcast=False): assert isinstance(self.base(), W_NDimArray) return self.base().create_iter() @@ -33,7 +33,6 @@ def reset(self): self.iter = self.base.create_iter() - self.index = 0 def descr_len(self, space): return space.wrap(self.base.get_size()) @@ -43,14 +42,13 @@ raise OperationError(space.w_StopIteration, space.w_None) w_res = self.iter.getitem() self.iter.next() - self.index += 1 return w_res def descr_index(self, space): - return space.wrap(self.index) + return space.wrap(self.iter.index) def descr_coords(self, space): - coords = self.base.to_coords(space, space.wrap(self.index)) + coords = self.base.to_coords(space, space.wrap(self.iter.index)) return space.newtuple([space.wrap(c) for c in coords]) def descr_getitem(self, space, w_idx): diff --git a/pypy/module/micronumpy/iter.py b/pypy/module/micronumpy/iter.py deleted file mode 100644 --- a/pypy/module/micronumpy/iter.py +++ /dev/null @@ -1,254 +0,0 @@ -""" This is a mini-tutorial on iterators, strides, and -memory layout. It assumes you are familiar with the terms, see -http://docs.scipy.org/doc/numpy/reference/arrays.ndarray.html -for a more gentle introduction. - -Given an array x: x.shape == [5,6], where each element occupies one byte - -At which byte in x.data does the item x[3,4] begin? -if x.strides==[1,5]: - pData = x.pData + (x.start + 3*1 + 4*5)*sizeof(x.pData[0]) - pData = x.pData + (x.start + 24) * sizeof(x.pData[0]) -so the offset of the element is 24 elements after the first - -What is the next element in x after coordinates [3,4]? -if x.order =='C': - next == [3,5] => offset is 28 -if x.order =='F': - next == [4,4] => offset is 24 -so for the strides [1,5] x is 'F' contiguous -likewise, for the strides [6,1] x would be 'C' contiguous. - -Iterators have an internal representation of the current coordinates -(indices), the array, strides, and backstrides. A short digression to -explain backstrides: what is the coordinate and offset after [3,5] in -the example above? -if x.order == 'C': - next == [4,0] => offset is 4 -if x.order == 'F': - next == [4,5] => offset is 25 -Note that in 'C' order we stepped BACKWARDS 24 while 'overflowing' a -shape dimension - which is back 25 and forward 1, - which is x.strides[1] * (x.shape[1] - 1) + x.strides[0] -so if we precalculate the overflow backstride as -[x.strides[i] * (x.shape[i] - 1) for i in range(len(x.shape))] -we can go faster. -All the calculations happen in next() - -next_skip_x(steps) tries to do the iteration for a number of steps at once, -but then we cannot gaurentee that we only overflow one single shape -dimension, perhaps we could overflow times in one big step. -""" -from rpython.rlib import jit -from pypy.module.micronumpy import support -from pypy.module.micronumpy.base import W_NDimArray - - -class PureShapeIterator(object): - def __init__(self, shape, idx_w): - self.shape = shape - self.shapelen = len(shape) - self.indexes = [0] * len(shape) - self._done = False - self.idx_w = [None] * len(idx_w) - for i, w_idx in enumerate(idx_w): - if isinstance(w_idx, W_NDimArray): - self.idx_w[i] = w_idx.create_iter(shape) - - def done(self): - return self._done - - @jit.unroll_safe - def next(self): - for w_idx in self.idx_w: - if w_idx is not None: - w_idx.next() - for i in range(self.shapelen - 1, -1, -1): - if self.indexes[i] < self.shape[i] - 1: - self.indexes[i] += 1 - break - else: - self.indexes[i] = 0 - else: - self._done = True - - @jit.unroll_safe - def get_index(self, space, shapelen): - return [space.wrap(self.indexes[i]) for i in range(shapelen)] - - -class BaseArrayIterator(object): - def next(self): - raise NotImplementedError # purely abstract base class - - def setitem(self, elem): - raise NotImplementedError - - def set_scalar_object(self, value): - raise NotImplementedError # works only on scalars - - -class ConcreteArrayIterator(BaseArrayIterator): - _immutable_fields_ = ['array', 'skip', 'size'] - - def __init__(self, array): - self.array = array - self.offset = 0 - self.skip = array.dtype.elsize - self.size = array.size - - def setitem(self, elem): - self.array.setitem(self.offset, elem) - - def getitem(self): - return self.array.getitem(self.offset) - - def getitem_bool(self): - return self.array.getitem_bool(self.offset) - - def next(self): - self.offset += self.skip - - def next_skip_x(self, x): - self.offset += self.skip * x - - def done(self): - return self.offset >= self.size - - def reset(self): - self.offset %= self.size - - -class OneDimViewIterator(ConcreteArrayIterator): - def __init__(self, array, start, strides, shape): - self.array = array - self.offset = start - self.index = 0 - assert len(strides) == len(shape) - if len(shape) == 0: - self.skip = array.dtype.elsize - self.size = 1 - else: - assert len(shape) == 1 - self.skip = strides[0] - self.size = shape[0] - - def next(self): - self.offset += self.skip - self.index += 1 - - def next_skip_x(self, x): - self.offset += self.skip * x - self.index += x - - def done(self): - return self.index >= self.size - - def reset(self): - self.offset %= self.size - - def get_index(self, d): - return self.index - - -class MultiDimViewIterator(ConcreteArrayIterator): - def __init__(self, array, start, strides, backstrides, shape): - self.indexes = [0] * len(shape) - self.array = array - self.shape = shape - self.offset = start - self.shapelen = len(shape) - self._done = self.shapelen == 0 or support.product(shape) == 0 - self.strides = strides - self.backstrides = backstrides - self.size = array.size - - @jit.unroll_safe - def next(self): - offset = self.offset - for i in range(self.shapelen - 1, -1, -1): - if self.indexes[i] < self.shape[i] - 1: - self.indexes[i] += 1 - offset += self.strides[i] - break - else: - self.indexes[i] = 0 - offset -= self.backstrides[i] - else: - self._done = True - self.offset = offset - - @jit.unroll_safe - def next_skip_x(self, step): - for i in range(len(self.shape) - 1, -1, -1): - if self.indexes[i] < self.shape[i] - step: - self.indexes[i] += step - self.offset += self.strides[i] * step - break - else: - remaining_step = (self.indexes[i] + step) // self.shape[i] - this_i_step = step - remaining_step * self.shape[i] - self.offset += self.strides[i] * this_i_step - self.indexes[i] = self.indexes[i] + this_i_step - step = remaining_step - else: - self._done = True - - def done(self): - return self._done - - def reset(self): - self.offset %= self.size - - def get_index(self, d): - return self.indexes[d] - - -class AxisIterator(BaseArrayIterator): - def __init__(self, array, shape, dim, cumulative): - self.shape = shape - strides = array.get_strides() - backstrides = array.get_backstrides() - if cumulative: - self.strides = strides - self.backstrides = backstrides - elif len(shape) == len(strides): - # keepdims = True - self.strides = strides[:dim] + [0] + strides[dim + 1:] - self.backstrides = backstrides[:dim] + [0] + backstrides[dim + 1:] - else: - self.strides = strides[:dim] + [0] + strides[dim:] - self.backstrides = backstrides[:dim] + [0] + backstrides[dim:] - self.first_line = True - self.indices = [0] * len(shape) - self._done = array.get_size() == 0 - self.offset = array.start - self.dim = dim - self.array = array - - def setitem(self, elem): - self.array.setitem(self.offset, elem) - - def getitem(self): - return self.array.getitem(self.offset) - - @jit.unroll_safe - def next(self): - for i in range(len(self.shape) - 1, -1, -1): - if self.indices[i] < self.shape[i] - 1: - if i == self.dim: - self.first_line = False - self.indices[i] += 1 - self.offset += self.strides[i] - break - else: - if i == self.dim: - self.first_line = True - self.indices[i] = 0 - self.offset -= self.backstrides[i] - else: - self._done = True - - def done(self): - return self._done diff --git a/pypy/module/micronumpy/iterators.py b/pypy/module/micronumpy/iterators.py new file mode 100644 --- /dev/null +++ b/pypy/module/micronumpy/iterators.py @@ -0,0 +1,167 @@ +""" This is a mini-tutorial on iterators, strides, and +memory layout. It assumes you are familiar with the terms, see +http://docs.scipy.org/doc/numpy/reference/arrays.ndarray.html +for a more gentle introduction. + +Given an array x: x.shape == [5,6], where each element occupies one byte + +At which byte in x.data does the item x[3,4] begin? +if x.strides==[1,5]: + pData = x.pData + (x.start + 3*1 + 4*5)*sizeof(x.pData[0]) + pData = x.pData + (x.start + 24) * sizeof(x.pData[0]) +so the offset of the element is 24 elements after the first + +What is the next element in x after coordinates [3,4]? +if x.order =='C': + next == [3,5] => offset is 28 +if x.order =='F': + next == [4,4] => offset is 24 +so for the strides [1,5] x is 'F' contiguous +likewise, for the strides [6,1] x would be 'C' contiguous. + +Iterators have an internal representation of the current coordinates +(indices), the array, strides, and backstrides. A short digression to +explain backstrides: what is the coordinate and offset after [3,5] in +the example above? +if x.order == 'C': + next == [4,0] => offset is 4 +if x.order == 'F': + next == [4,5] => offset is 25 +Note that in 'C' order we stepped BACKWARDS 24 while 'overflowing' a +shape dimension + which is back 25 and forward 1, + which is x.strides[1] * (x.shape[1] - 1) + x.strides[0] +so if we precalculate the overflow backstride as +[x.strides[i] * (x.shape[i] - 1) for i in range(len(x.shape))] +we can go faster. +All the calculations happen in next() + +next_skip_x(steps) tries to do the iteration for a number of steps at once, +but then we cannot guarantee that we only overflow one single shape +dimension, perhaps we could overflow times in one big step. +""" +from rpython.rlib import jit +from pypy.module.micronumpy import support +from pypy.module.micronumpy.base import W_NDimArray + + +class PureShapeIter(object): + def __init__(self, shape, idx_w): + self.shape = shape + self.shapelen = len(shape) + self.indexes = [0] * len(shape) + self._done = False + self.idx_w = [None] * len(idx_w) + for i, w_idx in enumerate(idx_w): + if isinstance(w_idx, W_NDimArray): + self.idx_w[i] = w_idx.create_iter(shape) + + def done(self): + return self._done + + @jit.unroll_safe + def next(self): + for w_idx in self.idx_w: + if w_idx is not None: + w_idx.next() + for i in range(self.shapelen - 1, -1, -1): + if self.indexes[i] < self.shape[i] - 1: + self.indexes[i] += 1 + break + else: + self.indexes[i] = 0 + else: + self._done = True + + @jit.unroll_safe + def get_index(self, space, shapelen): + return [space.wrap(self.indexes[i]) for i in range(shapelen)] + + +class ArrayIter(object): + _immutable_fields_ = ['array', 'size', 'indices', 'shape[*]', + 'strides[*]', 'backstrides[*]'] + + def __init__(self, array, size, shape, strides, backstrides): + assert len(shape) == len(strides) == len(backstrides) + self.array = array + self.size = size + self.indices = [0] * len(shape) + self.shape = shape + self.strides = strides + self.backstrides = backstrides + self.reset() + + @jit.unroll_safe + def reset(self): + self.index = 0 + for i in xrange(len(self.shape)): + self.indices[i] = 0 + self.offset = self.array.start + + @jit.unroll_safe + def next(self): + self.index += 1 + for i in xrange(len(self.shape) - 1, -1, -1): + if self.indices[i] < self.shape[i] - 1: + self.indices[i] += 1 + self.offset += self.strides[i] + break + else: + self.indices[i] = 0 + self.offset -= self.backstrides[i] + + @jit.unroll_safe + def next_skip_x(self, step): + assert step >= 0 + if step == 0: + return + self.index += step + for i in xrange(len(self.shape) - 1, -1, -1): + if self.indices[i] < self.shape[i] - step: + self.indices[i] += step + self.offset += self.strides[i] * step + break + else: + remaining_step = (self.indices[i] + step) // self.shape[i] + this_i_step = step - remaining_step * self.shape[i] + self.indices[i] = self.indices[i] + this_i_step + self.offset += self.strides[i] * this_i_step + step = remaining_step + assert step > 0 + + def done(self): + return self.index >= self.size + + def getitem(self): + return self.array.getitem(self.offset) + + def getitem_bool(self): + return self.array.getitem_bool(self.offset) + + def setitem(self, elem): + self.array.setitem(self.offset, elem) + + +def AxisIter(array, shape, axis, cumulative): + strides = array.get_strides() + backstrides = array.get_backstrides() + if not cumulative: + if len(shape) == len(strides): + # keepdims = True + strides = strides[:axis] + [0] + strides[axis + 1:] + backstrides = backstrides[:axis] + [0] + backstrides[axis + 1:] + else: + strides = strides[:axis] + [0] + strides[axis:] + backstrides = backstrides[:axis] + [0] + backstrides[axis:] + return ArrayIter(array, support.product(shape), shape, strides, backstrides) + + +def AllButAxisIter(array, axis): + size = array.get_size() + shape = array.get_shape()[:] + backstrides = array.backstrides[:] + if size: + size /= shape[axis] + shape[axis] = backstrides[axis] = 0 + return ArrayIter(array, size, shape, array.strides, backstrides) diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -8,7 +8,8 @@ from rpython.rtyper.lltypesystem import lltype, rffi from pypy.module.micronumpy import support, constants as NPY from pypy.module.micronumpy.base import W_NDimArray -from pypy.module.micronumpy.iter import PureShapeIterator +from pypy.module.micronumpy.iterators import PureShapeIter, AxisIter, \ + AllButAxisIter call2_driver = jit.JitDriver(name='numpy_call2', @@ -203,9 +204,9 @@ def do_axis_reduce(space, shape, func, arr, dtype, axis, out, identity, cumulative, temp): - out_iter = out.create_axis_iter(arr.get_shape(), axis, cumulative) + out_iter = AxisIter(out.implementation, arr.get_shape(), axis, cumulative) if cumulative: - temp_iter = temp.create_axis_iter(arr.get_shape(), axis, False) + temp_iter = AxisIter(temp.implementation, arr.get_shape(), axis, False) else: temp_iter = out_iter # hack arr_iter = arr.create_iter() @@ -215,16 +216,14 @@ while not out_iter.done(): axis_reduce__driver.jit_merge_point(shapelen=shapelen, func=func, dtype=dtype) - if arr_iter.done(): - w_val = identity + assert not arr_iter.done() + w_val = arr_iter.getitem().convert_to(space, dtype) + if out_iter.indices[axis] == 0: + if identity is not None: + w_val = func(dtype, identity, w_val) else: - w_val = arr_iter.getitem().convert_to(space, dtype) - if out_iter.first_line: - if identity is not None: - w_val = func(dtype, identity, w_val) - else: - cur = temp_iter.getitem() - w_val = func(dtype, cur, w_val) + cur = temp_iter.getitem() + w_val = func(dtype, cur, w_val) out_iter.setitem(w_val) if cumulative: temp_iter.setitem(w_val) @@ -261,7 +260,6 @@ argmin = _new_argmin_argmax('min') argmax = _new_argmin_argmax('max') -# note that shapelen == 2 always dot_driver = jit.JitDriver(name = 'numpy_dot', greens = ['dtype'], reds = 'auto') @@ -282,25 +280,27 @@ ''' left_shape = left.get_shape() right_shape = right.get_shape() - broadcast_shape = left_shape[:-1] + right_shape - left_skip = [len(left_shape) - 1 + i for i in range(len(right_shape)) - if i != right_critical_dim] - right_skip = range(len(left_shape) - 1) - result_skip = [len(result.get_shape()) - (len(right_shape) > 1)] + assert left_shape[-1] == right_shape[right_critical_dim] assert result.get_dtype() == dtype - outi = result.create_dot_iter(broadcast_shape, result_skip) - lefti = left.create_dot_iter(broadcast_shape, left_skip) - righti = right.create_dot_iter(broadcast_shape, right_skip) - while not outi.done(): - dot_driver.jit_merge_point(dtype=dtype) - lval = lefti.getitem().convert_to(space, dtype) - rval = righti.getitem().convert_to(space, dtype) - outval = outi.getitem() - v = dtype.itemtype.mul(lval, rval) - v = dtype.itemtype.add(v, outval) - outi.setitem(v) - outi.next() - righti.next() + outi = result.create_iter() + lefti = AllButAxisIter(left.implementation, len(left_shape) - 1) + righti = AllButAxisIter(right.implementation, right_critical_dim) + while not lefti.done(): + while not righti.done(): + oval = outi.getitem() + i1 = lefti.offset + i2 = righti.offset + for _ in xrange(left.implementation.shape[-1]): + dot_driver.jit_merge_point(dtype=dtype) + lval = left.implementation.getitem(i1).convert_to(space, dtype) + rval = right.implementation.getitem(i2).convert_to(space, dtype) + oval = dtype.itemtype.add(oval, dtype.itemtype.mul(lval, rval)) + i1 += left.implementation.strides[-1] + i2 += right.implementation.strides[right_critical_dim] + outi.setitem(oval) + outi.next() + righti.next() + righti.reset() lefti.next() return result @@ -331,7 +331,7 @@ def nonzero(res, arr, box): res_iter = res.create_iter() - arr_iter = arr.create_iter(require_index=True) + arr_iter = arr.create_iter() shapelen = len(arr.shape) dtype = arr.dtype dims = range(shapelen) @@ -339,7 +339,7 @@ nonzero_driver.jit_merge_point(shapelen=shapelen, dims=dims, dtype=dtype) if arr_iter.getitem_bool(): for d in dims: - res_iter.setitem(box(arr_iter.get_index(d))) + res_iter.setitem(box(arr_iter.indices[d])) res_iter.next() arr_iter.next() return res @@ -435,8 +435,6 @@ arr_iter.next_skip_x(step) length -= 1 val_iter.next() - # WTF numpy? - val_iter.reset() fromstring_driver = jit.JitDriver(name = 'numpy_fromstring', greens = ['itemsize', 'dtype'], @@ -480,7 +478,7 @@ prefixlen = len(prefix_w) indexlen = len(indexes_w) dtype = arr.get_dtype() - iter = PureShapeIterator(iter_shape, indexes_w) + iter = PureShapeIter(iter_shape, indexes_w) indexlen = len(indexes_w) while not iter.done(): getitem_int_driver.jit_merge_point(shapelen=shapelen, indexlen=indexlen, @@ -509,7 +507,7 @@ indexlen = len(indexes_w) prefixlen = len(prefix_w) dtype = arr.get_dtype() - iter = PureShapeIterator(iter_shape, indexes_w) + iter = PureShapeIter(iter_shape, indexes_w) while not iter.done(): setitem_int_driver.jit_merge_point(shapelen=shapelen, indexlen=indexlen, dtype=dtype, prefixlen=prefixlen) @@ -634,7 +632,7 @@ def diagonal_array(space, arr, out, offset, axis1, axis2, shape): out_iter = out.create_iter() - iter = PureShapeIterator(shape, []) + iter = PureShapeIter(shape, []) shapelen_minus_1 = len(shape) - 1 assert shapelen_minus_1 >= 0 if axis1 < axis2: diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -44,6 +44,7 @@ "objects are not aligned")) return out_shape, right_critical_dim + class __extend__(W_NDimArray): @jit.unroll_safe def descr_get_shape(self, space): @@ -280,17 +281,10 @@ s.append(suffix) return s.build() - def create_iter(self, shape=None, backward_broadcast=False, require_index=False): + def create_iter(self, shape=None, backward_broadcast=False): assert isinstance(self.implementation, BaseConcreteArray) return self.implementation.create_iter( - shape=shape, backward_broadcast=backward_broadcast, - require_index=require_index) - - def create_axis_iter(self, shape, dim, cum): - return self.implementation.create_axis_iter(shape, dim, cum) - - def create_dot_iter(self, shape, skip): - return self.implementation.create_dot_iter(shape, skip) + shape=shape, backward_broadcast=backward_broadcast) def is_scalar(self): return len(self.get_shape()) == 0 @@ -1126,6 +1120,7 @@ return w_obj pass + @unwrap_spec(offset=int) def descr_new_array(space, w_subtype, w_shape, w_dtype=None, w_buffer=None, offset=0, w_strides=None, w_order=None): @@ -1176,6 +1171,7 @@ space.wrap('__array_finalize__')), w_subtype) return w_ret + @unwrap_spec(addr=int) def descr__from_shape_and_storage(space, w_cls, w_shape, addr, w_dtype, w_subtype=None): """ diff --git a/pypy/module/micronumpy/sort.py b/pypy/module/micronumpy/sort.py --- a/pypy/module/micronumpy/sort.py +++ b/pypy/module/micronumpy/sort.py @@ -1,6 +1,3 @@ -""" This is the implementation of various sorting routines in numpy. It's here -because it only makes sense on a concrete array -""" from pypy.interpreter.error import OperationError, oefmt from rpython.rlib.listsort import make_timsort_class from rpython.rlib.objectmodel import specialize @@ -11,10 +8,15 @@ from rpython.rtyper.lltypesystem import rffi, lltype from pypy.module.micronumpy import descriptor, types, constants as NPY from pypy.module.micronumpy.base import W_NDimArray -from pypy.module.micronumpy.iter import AxisIterator +from pypy.module.micronumpy.iterators import AllButAxisIter INT_SIZE = rffi.sizeof(lltype.Signed) +all_types = (types.all_float_types + types.all_complex_types + + types.all_int_types) +all_types = [i for i in all_types if not issubclass(i[0], types.Float16)] +all_types = unrolling_iterable(all_types) + def make_argsort_function(space, itemtype, comp_type, count=1): TP = itemtype.T @@ -146,21 +148,20 @@ if axis < 0 or axis >= len(shape): raise OperationError(space.w_IndexError, space.wrap( "Wrong axis %d" % axis)) - iterable_shape = shape[:axis] + [0] + shape[axis + 1:] - iter = AxisIterator(arr, iterable_shape, axis, False) + arr_iter = AllButAxisIter(arr, axis) index_impl = index_arr.implementation - index_iter = AxisIterator(index_impl, iterable_shape, axis, False) + index_iter = AllButAxisIter(index_impl, axis) stride_size = arr.strides[axis] index_stride_size = index_impl.strides[axis] axis_size = arr.shape[axis] - while not iter.done(): + while not arr_iter.done(): for i in range(axis_size): raw_storage_setitem(storage, i * index_stride_size + index_iter.offset, i) r = Repr(index_stride_size, stride_size, axis_size, - arr.get_storage(), storage, index_iter.offset, iter.offset) + arr.get_storage(), storage, index_iter.offset, arr_iter.offset) ArgSort(r).sort() - iter.next() + arr_iter.next() index_iter.next() return index_arr @@ -292,14 +293,13 @@ if axis < 0 or axis >= len(shape): raise OperationError(space.w_IndexError, space.wrap( "Wrong axis %d" % axis)) - iterable_shape = shape[:axis] + [0] + shape[axis + 1:] - iter = AxisIterator(arr, iterable_shape, axis, False) + arr_iter = AllButAxisIter(arr, axis) stride_size = arr.strides[axis] axis_size = arr.shape[axis] - while not iter.done(): - r = Repr(stride_size, axis_size, arr.get_storage(), iter.offset) + while not arr_iter.done(): + r = Repr(stride_size, axis_size, arr.get_storage(), arr_iter.offset) ArgSort(r).sort() - iter.next() + arr_iter.next() return sort @@ -319,11 +319,6 @@ "sorting of non-numeric types '%s' is not implemented", arr.dtype.get_name()) -all_types = (types.all_float_types + types.all_complex_types + - types.all_int_types) -all_types = [i for i in all_types if not issubclass(i[0], types.Float16)] -all_types = unrolling_iterable(all_types) - class ArgSortCache(object): built = False diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py --- a/pypy/module/micronumpy/strides.py +++ b/pypy/module/micronumpy/strides.py @@ -421,18 +421,3 @@ n_old_elems_to_use *= old_shape[oldI] assert len(new_strides) == len(new_shape) return new_strides[:] - - -def calculate_dot_strides(strides, backstrides, res_shape, skip_dims): - rstrides = [0] * len(res_shape) - rbackstrides = [0] * len(res_shape) - j = 0 - for i in range(len(res_shape)): - if i in skip_dims: - rstrides[i] = 0 - rbackstrides[i] = 0 - else: - rstrides[i] = strides[j] - rbackstrides[i] = backstrides[j] - j += 1 - return rstrides, rbackstrides diff --git a/pypy/module/micronumpy/test/test_arrayops.py b/pypy/module/micronumpy/test/test_arrayops.py --- a/pypy/module/micronumpy/test/test_arrayops.py +++ b/pypy/module/micronumpy/test/test_arrayops.py @@ -41,8 +41,7 @@ a[0] = 0 assert (b == [1, 1, 1, 0, 0]).all() - - def test_dot(self): + def test_dot_basic(self): from numpypy import array, dot, arange a = array(range(5)) assert dot(a, a) == 30.0 @@ -69,7 +68,7 @@ assert b.shape == (4, 3) c = dot(a, b) assert (c == [[[14, 38, 62], [38, 126, 214], [62, 214, 366]], - [[86, 302, 518], [110, 390, 670], [134, 478, 822]]]).all() + [[86, 302, 518], [110, 390, 670], [134, 478, 822]]]).all() c = dot(a, b[:, 2]) assert (c == [[62, 214, 366], [518, 670, 822]]).all() a = arange(3*2*6).reshape((3,2,6)) diff --git a/pypy/module/micronumpy/test/test_flagsobj.py b/pypy/module/micronumpy/test/test_flagsobj.py --- a/pypy/module/micronumpy/test/test_flagsobj.py +++ b/pypy/module/micronumpy/test/test_flagsobj.py @@ -2,6 +2,14 @@ class AppTestFlagsObj(BaseNumpyAppTest): + def test_init(self): + import numpy as np + a = np.array([1,2,3]) + assert a.flags['C'] is True + b = type(a.flags)() + assert b is not a.flags + assert b['C'] is True + def test_repr(self): import numpy as np a = np.array([1,2,3]) diff --git a/pypy/module/micronumpy/test/test_iter.py b/pypy/module/micronumpy/test/test_iter.py deleted file mode 100644 --- a/pypy/module/micronumpy/test/test_iter.py +++ /dev/null @@ -1,93 +0,0 @@ -from pypy.module.micronumpy.iter import MultiDimViewIterator - - -class MockArray(object): - size = 1 - - -class TestIterDirect(object): - def test_C_viewiterator(self): - #Let's get started, simple iteration in C order with - #contiguous layout => strides[-1] is 1 - start = 0 - shape = [3, 5] - strides = [5, 1] - backstrides = [x * (y - 1) for x,y in zip(strides, shape)] - assert backstrides == [10, 4] - i = MultiDimViewIterator(MockArray, start, strides, backstrides, shape) - i.next() - i.next() - i.next() - assert i.offset == 3 - assert not i.done() - assert i.indexes == [0,3] - #cause a dimension overflow - i.next() - i.next() - assert i.offset == 5 - assert i.indexes == [1,0] - - #Now what happens if the array is transposed? strides[-1] != 1 - # therefore layout is non-contiguous - strides = [1, 3] - backstrides = [x * (y - 1) for x,y in zip(strides, shape)] - assert backstrides == [2, 12] - i = MultiDimViewIterator(MockArray, start, strides, backstrides, shape) - i.next() - i.next() - i.next() - assert i.offset == 9 - assert not i.done() - assert i.indexes == [0,3] - #cause a dimension overflow - i.next() - i.next() - assert i.offset == 1 - assert i.indexes == [1,0] - - def test_C_viewiterator_step(self): - #iteration in C order with #contiguous layout => strides[-1] is 1 - #skip less than the shape - start = 0 - shape = [3, 5] - strides = [5, 1] - backstrides = [x * (y - 1) for x,y in zip(strides, shape)] - assert backstrides == [10, 4] - i = MultiDimViewIterator(MockArray, start, strides, backstrides, shape) - i.next_skip_x(2) - i.next_skip_x(2) - i.next_skip_x(2) - assert i.offset == 6 - assert not i.done() - assert i.indexes == [1,1] - #And for some big skips - i.next_skip_x(5) - assert i.offset == 11 - assert i.indexes == [2,1] - i.next_skip_x(5) - # Note: the offset does not overflow but recycles, - # this is good for broadcast - assert i.offset == 1 - assert i.indexes == [0,1] - assert i.done() - - #Now what happens if the array is transposed? strides[-1] != 1 - # therefore layout is non-contiguous - strides = [1, 3] - backstrides = [x * (y - 1) for x,y in zip(strides, shape)] - assert backstrides == [2, 12] - i = MultiDimViewIterator(MockArray, start, strides, backstrides, shape) - i.next_skip_x(2) - i.next_skip_x(2) - i.next_skip_x(2) - assert i.offset == 4 - assert i.indexes == [1,1] - assert not i.done() - i.next_skip_x(5) - assert i.offset == 5 - assert i.indexes == [2,1] - assert not i.done() - i.next_skip_x(5) - assert i.indexes == [0,1] - assert i.offset == 3 - assert i.done() diff --git a/pypy/module/micronumpy/test/test_iterators.py b/pypy/module/micronumpy/test/test_iterators.py new file mode 100644 --- /dev/null +++ b/pypy/module/micronumpy/test/test_iterators.py @@ -0,0 +1,96 @@ +from pypy.module.micronumpy import support +from pypy.module.micronumpy.iterators import ArrayIter + + +class MockArray(object): + start = 0 + + +class TestIterDirect(object): + def test_iterator_basic(self): + #Let's get started, simple iteration in C order with + #contiguous layout => strides[-1] is 1 + shape = [3, 5] + strides = [5, 1] + backstrides = [x * (y - 1) for x,y in zip(strides, shape)] + assert backstrides == [10, 4] + i = ArrayIter(MockArray, support.product(shape), shape, + strides, backstrides) + i.next() + i.next() + i.next() + assert i.offset == 3 + assert not i.done() + assert i.indices == [0,3] + #cause a dimension overflow + i.next() + i.next() + assert i.offset == 5 + assert i.indices == [1,0] + + #Now what happens if the array is transposed? strides[-1] != 1 + # therefore layout is non-contiguous + strides = [1, 3] + backstrides = [x * (y - 1) for x,y in zip(strides, shape)] + assert backstrides == [2, 12] + i = ArrayIter(MockArray, support.product(shape), shape, + strides, backstrides) + i.next() + i.next() + i.next() + assert i.offset == 9 + assert not i.done() + assert i.indices == [0,3] + #cause a dimension overflow + i.next() + i.next() + assert i.offset == 1 + assert i.indices == [1,0] + + def test_iterator_step(self): + #iteration in C order with #contiguous layout => strides[-1] is 1 + #skip less than the shape + shape = [3, 5] + strides = [5, 1] + backstrides = [x * (y - 1) for x,y in zip(strides, shape)] + assert backstrides == [10, 4] + i = ArrayIter(MockArray, support.product(shape), shape, + strides, backstrides) + i.next_skip_x(2) + i.next_skip_x(2) + i.next_skip_x(2) + assert i.offset == 6 + assert not i.done() + assert i.indices == [1,1] + #And for some big skips + i.next_skip_x(5) + assert i.offset == 11 + assert i.indices == [2,1] + i.next_skip_x(5) + # Note: the offset does not overflow but recycles, + # this is good for broadcast + assert i.offset == 1 + assert i.indices == [0,1] + assert i.done() + + #Now what happens if the array is transposed? strides[-1] != 1 + # therefore layout is non-contiguous + strides = [1, 3] + backstrides = [x * (y - 1) for x,y in zip(strides, shape)] + assert backstrides == [2, 12] + i = ArrayIter(MockArray, support.product(shape), shape, + strides, backstrides) + i.next_skip_x(2) + i.next_skip_x(2) + i.next_skip_x(2) + assert i.offset == 4 + assert i.indices == [1,1] + assert not i.done() + i.next_skip_x(5) + assert i.offset == 5 + assert i.indices == [2,1] + assert not i.done() + i.next_skip_x(5) + assert i.indices == [0,1] + assert i.offset == 3 + assert i.done() diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -1740,10 +1740,11 @@ a = array([(1, 2)], dtype=[('a', 'int64'), ('b', 'int64')])[0] assert a.shape == () assert a.view('S16') == '\x01' + '\x00' * 7 + '\x02' - a = array(2, dtype='int64') - b = a.view('complex64') + a = array(2, dtype='0: + if len(out.get_shape()) > 0: raise oefmt(space.w_ValueError, "output parameter for reduction operation %s has " "too many dimensions", self.name) @@ -260,7 +269,8 @@ return out if keepdims: shape = [1] * len(obj_shape) - out = W_NDimArray.from_shape(space, [1] * len(obj_shape), dtype, w_instance=obj) + out = W_NDimArray.from_shape(space, [1] * len(obj_shape), dtype, + w_instance=obj) out.implementation.setitem(0, res) return out return res @@ -272,6 +282,7 @@ raise OperationError(space.w_ValueError, space.wrap( "outer product only supported for binary functions")) + class W_Ufunc1(W_Ufunc): _immutable_fields_ = ["func", "bool_result"] argcount = 1 @@ -544,6 +555,7 @@ dtypenum += 2 return descriptor.get_dtype_cache(space).dtypes_by_num[dtypenum] + @jit.unroll_safe def find_unaryop_result_dtype(space, dt, promote_to_float=False, promote_bools=False, promote_to_largest=False): @@ -570,6 +582,7 @@ return dtype return dt + def find_dtype_for_scalar(space, w_obj, current_guess=None): bool_dtype = descriptor.get_dtype_cache(space).w_booldtype long_dtype = descriptor.get_dtype_cache(space).w_longdtype @@ -611,9 +624,9 @@ 'unable to create dtype from objects, "%T" instance not ' 'supported', w_obj) + def ufunc_dtype_caller(space, ufunc_name, op_name, argcount, comparison_func, bool_result): - dtype_cache = descriptor.get_dtype_cache(space) def get_op(dtype): try: return getattr(dtype.itemtype, op_name) @@ -621,6 +634,7 @@ raise oefmt(space.w_NotImplementedError, "%s not implemented for %s", ufunc_name, dtype.get_name()) + dtype_cache = descriptor.get_dtype_cache(space) if argcount == 1: def impl(res_dtype, value): res = get_op(res_dtype)(value) @@ -762,6 +776,6 @@ ufunc = W_Ufunc2(func, ufunc_name, **extra_kwargs) setattr(self, ufunc_name, ufunc) + def get(space): return space.fromcache(UfuncState) - diff --git a/rpython/jit/codewriter/call.py b/rpython/jit/codewriter/call.py --- a/rpython/jit/codewriter/call.py +++ b/rpython/jit/codewriter/call.py @@ -247,14 +247,14 @@ from rpython.jit.codewriter.policy import log; log.WARNING( "in operation %r: this calls a _jit_loop_invariant_ function," " but this contradicts other sources (e.g. it can have random" - " effects)" % (op,)) + " effects): EF=%s" % (op, extraeffect)) if elidable: if extraeffect not in (EffectInfo.EF_ELIDABLE_CANNOT_RAISE, EffectInfo.EF_ELIDABLE_CAN_RAISE): from rpython.jit.codewriter.policy import log; log.WARNING( "in operation %r: this calls an _elidable_function_," " but this contradicts other sources (e.g. it can have random" - " effects)" % (op,)) + " effects): EF=%s" % (op, extraeffect)) # effectinfo = effectinfo_from_writeanalyze( self.readwrite_analyzer.analyze(op, self.seen), self.cpu, From noreply at buildbot.pypy.org Fri Feb 28 09:56:55 2014 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 28 Feb 2014 09:56:55 +0100 (CET) Subject: [pypy-commit] pypy default: update doc Message-ID: <20140228085655.5D26E1D23C3@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r69540:ffbbc475629f Date: 2014-02-28 10:56 +0200 http://bitbucket.org/pypy/pypy/changeset/ffbbc475629f/ Log: update doc diff --git a/pypy/doc/embedding.rst b/pypy/doc/embedding.rst --- a/pypy/doc/embedding.rst +++ b/pypy/doc/embedding.rst @@ -30,7 +30,8 @@ it you would not be able to find the standard library (and run pretty much nothing). Arguments: - * ``home``: null terminated path + * ``home``: null terminated path to an executable inside the pypy directory + (can be a .so name, can be made up) * ``verbose``: if non-zero, would print error messages to stderr From noreply at buildbot.pypy.org Fri Feb 28 10:38:14 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 28 Feb 2014 10:38:14 +0100 (CET) Subject: [pypy-commit] cffi default: Update TODO Message-ID: <20140228093814.7DD2D1C0613@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1465:bf72e646e661 Date: 2014-02-28 10:36 +0100 http://bitbucket.org/cffi/cffi/changeset/bf72e646e661/ Log: Update TODO diff --git a/TODO b/TODO --- a/TODO +++ b/TODO @@ -7,4 +7,4 @@ an opaque type that works like a struct (so we can't get the value out of it). -_cffi backend for PyPy +accept and kill "static inline" in the cdefs From noreply at buildbot.pypy.org Fri Feb 28 10:38:15 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 28 Feb 2014 10:38:15 +0100 (CET) Subject: [pypy-commit] cffi default: Fix ffi.dlopen(None): we can't replace it with ffi.dlopen("c") on POSIX Message-ID: <20140228093815.9DC021C0613@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1466:90be5bcd6bc8 Date: 2014-02-28 10:37 +0100 http://bitbucket.org/cffi/cffi/changeset/90be5bcd6bc8/ Log: Fix ffi.dlopen(None): we can't replace it with ffi.dlopen("c") on POSIX systems. It has a different meaning: getting a handle that works for *any* already-loaded library, not just "libc.so". This breaks obscurely a test that is already obscure, so ignoring that (I checked that in C it would break the same way anyway). diff --git a/cffi/api.py b/cffi/api.py --- a/cffi/api.py +++ b/cffi/api.py @@ -1,4 +1,4 @@ -import types +import sys, types from .lock import allocate_lock try: @@ -389,22 +389,27 @@ return self._backend.from_handle(x) -def _make_ffi_library(ffi, libname, flags): - import os - name = libname +def _load_backend_lib(backend, name, flags): if name is None: - name = 'c' # on Posix only - backend = ffi._backend + if sys.platform != "win32": + return backend.load_library(None, flags) + name = "c" # Windows: load_library(None) fails, but this works + # (backward compatibility hack only) try: if '.' not in name and '/' not in name: raise OSError("library not found: %r" % (name,)) - backendlib = backend.load_library(name, flags) + return backend.load_library(name, flags) except OSError: import ctypes.util path = ctypes.util.find_library(name) if path is None: raise # propagate the original OSError - backendlib = backend.load_library(path, flags) + return backend.load_library(path, flags) + +def _make_ffi_library(ffi, libname, flags): + import os + backend = ffi._backend + backendlib = _load_backend_lib(backend, libname, flags) copied_enums = [] # def make_accessor_locked(name): diff --git a/testing/test_function.py b/testing/test_function.py --- a/testing/test_function.py +++ b/testing/test_function.py @@ -250,22 +250,14 @@ py.test.skip("probably no symbol 'stdout' in the lib") ffi = FFI(backend=self.Backend()) ffi.cdef(""" - int puts(const char *); - void *stdout, *stderr; + void *stdout; """) - ffi.C = ffi.dlopen(None) - pout = ffi.C.stdout - perr = ffi.C.stderr - assert repr(pout).startswith(" Author: Armin Rigo Branch: Changeset: r1467:fdc1c8a70bd9 Date: 2014-02-28 10:49 +0100 http://bitbucket.org/cffi/cffi/changeset/fdc1c8a70bd9/ Log: Add a passing test diff --git a/c/test_c.py b/c/test_c.py --- a/c/test_c.py +++ b/c/test_c.py @@ -370,6 +370,9 @@ assert x.load_function(BVoidP, 'strcpy') py.test.raises(KeyError, x.load_function, BVoidP, 'xxx_this_function_does_not_exist') + # the next one is from 'libm', not 'libc', but we assume + # that it is already loaded too, so it should work + assert x.load_function(BVoidP, 'sqrt') def test_hash_differences(): BChar = new_primitive_type("char") From noreply at buildbot.pypy.org Fri Feb 28 10:51:39 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 28 Feb 2014 10:51:39 +0100 (CET) Subject: [pypy-commit] cffi default: Upgrade the version number. (One test in test_version is known to fail) Message-ID: <20140228095139.AD2211C0613@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r1468:8aa5648077c6 Date: 2014-02-28 10:51 +0100 http://bitbucket.org/cffi/cffi/changeset/8aa5648077c6/ Log: Upgrade the version number. (One test in test_version is known to fail) diff --git a/cffi/__init__.py b/cffi/__init__.py --- a/cffi/__init__.py +++ b/cffi/__init__.py @@ -4,5 +4,5 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "0.8.1" -__version_info__ = (0, 8, 1) +__version__ = "0.8.2" +__version_info__ = (0, 8, 2) diff --git a/doc/source/conf.py b/doc/source/conf.py --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -47,7 +47,7 @@ # The short X.Y version. version = '0.8' # The full version, including alpha/beta/rc tags. -release = '0.8.1' +release = '0.8.2' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -112,7 +112,7 @@ `Mailing list `_ """, - version='0.8.1', + version='0.8.2', packages=['cffi'], zip_safe=False, diff --git a/testing/test_version.py b/testing/test_version.py --- a/testing/test_version.py +++ b/testing/test_version.py @@ -9,7 +9,8 @@ '0.4.2': '0.4', # did not change '0.7.1': '0.7', # did not change '0.7.2': '0.7', # did not change - '0.8.1': '0.8', # did not change + '0.8.1': '0.8', # did not change (essentially) + '0.8.2': '0.8', # did not change } def test_version(): From noreply at buildbot.pypy.org Fri Feb 28 10:56:01 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 28 Feb 2014 10:56:01 +0100 (CET) Subject: [pypy-commit] pypy default: Update to cffi/8aa5648077c6 Message-ID: <20140228095601.8639D1C0613@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69541:8d8d72389794 Date: 2014-02-28 10:52 +0100 http://bitbucket.org/pypy/pypy/changeset/8d8d72389794/ Log: Update to cffi/8aa5648077c6 diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -4,5 +4,5 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "0.8.1" -__version_info__ = (0, 8, 1) +__version__ = "0.8.2" +__version_info__ = (0, 8, 2) diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -1,4 +1,4 @@ -import types +import sys, types from .lock import allocate_lock try: @@ -88,18 +88,20 @@ self.NULL = self.cast(self.BVoidP, 0) self.CData, self.CType = backend._get_types() - def cdef(self, csource, override=False): + def cdef(self, csource, override=False, packed=False): """Parse the given C source. This registers all declared functions, types, and global variables. The functions and global variables can then be accessed via either 'ffi.dlopen()' or 'ffi.verify()'. The types can be used in 'ffi.new()' and other functions. + If 'packed' is specified as True, all structs declared inside this + cdef are packed, i.e. laid out without any field alignment at all. """ if not isinstance(csource, str): # unicode, on Python 2 if not isinstance(csource, basestring): raise TypeError("cdef() argument must be a string") csource = csource.encode('ascii') with self._lock: - self._parser.parse(csource, override=override) + self._parser.parse(csource, override=override, packed=packed) self._cdefsources.append(csource) if override: for cache in self._function_caches: @@ -387,22 +389,27 @@ return self._backend.from_handle(x) -def _make_ffi_library(ffi, libname, flags): - import os - name = libname +def _load_backend_lib(backend, name, flags): if name is None: - name = 'c' # on Posix only - backend = ffi._backend + if sys.platform != "win32": + return backend.load_library(None, flags) + name = "c" # Windows: load_library(None) fails, but this works + # (backward compatibility hack only) try: if '.' not in name and '/' not in name: raise OSError("library not found: %r" % (name,)) - backendlib = backend.load_library(name, flags) + return backend.load_library(name, flags) except OSError: import ctypes.util path = ctypes.util.find_library(name) if path is None: raise # propagate the original OSError - backendlib = backend.load_library(path, flags) + return backend.load_library(path, flags) + +def _make_ffi_library(ffi, libname, flags): + import os + backend = ffi._backend + backendlib = _load_backend_lib(backend, libname, flags) copied_enums = [] # def make_accessor_locked(name): diff --git a/lib_pypy/cffi/backend_ctypes.py b/lib_pypy/cffi/backend_ctypes.py --- a/lib_pypy/cffi/backend_ctypes.py +++ b/lib_pypy/cffi/backend_ctypes.py @@ -720,7 +720,7 @@ return self._new_struct_or_union('union', name, ctypes.Union) def complete_struct_or_union(self, CTypesStructOrUnion, fields, tp, - totalsize=-1, totalalignment=-1): + totalsize=-1, totalalignment=-1, sflags=0): if totalsize >= 0 or totalalignment >= 0: raise NotImplementedError("the ctypes backend of CFFI does not support " "structures completed by verify(); please " @@ -739,6 +739,8 @@ else: cfields.append((fname, BField._ctype, bitsize)) bfield_types[fname] = Ellipsis + if sflags & 8: + struct_or_union._pack_ = 1 struct_or_union._fields_ = cfields CTypesStructOrUnion._bfield_types = bfield_types # diff --git a/lib_pypy/cffi/cparser.py b/lib_pypy/cffi/cparser.py --- a/lib_pypy/cffi/cparser.py +++ b/lib_pypy/cffi/cparser.py @@ -98,6 +98,7 @@ self._anonymous_counter = 0 self._structnode2type = weakref.WeakKeyDictionary() self._override = False + self._packed = False def _parse(self, csource): csource, macros = _preprocess(csource) @@ -147,13 +148,16 @@ msg = 'parse error\n%s' % (msg,) raise api.CDefError(msg) - def parse(self, csource, override=False): + def parse(self, csource, override=False, packed=False): prev_override = self._override + prev_packed = self._packed try: self._override = override + self._packed = packed self._internal_parse(csource) finally: self._override = prev_override + self._packed = prev_packed def _internal_parse(self, csource): ast, macros = self._parse(csource) @@ -476,6 +480,7 @@ if isinstance(tp, model.StructType) and tp.partial: raise NotImplementedError("%s: using both bitfields and '...;'" % (tp,)) + tp.packed = self._packed return tp def _make_partial(self, tp, nested): diff --git a/lib_pypy/cffi/model.py b/lib_pypy/cffi/model.py --- a/lib_pypy/cffi/model.py +++ b/lib_pypy/cffi/model.py @@ -1,4 +1,6 @@ +import types import weakref + from .lock import allocate_lock @@ -81,29 +83,29 @@ 'long': 'i', 'long long': 'i', 'signed char': 'i', - 'unsigned char': 'u', - 'unsigned short': 'u', - 'unsigned int': 'u', - 'unsigned long': 'u', - 'unsigned long long': 'u', + 'unsigned char': 'i', + 'unsigned short': 'i', + 'unsigned int': 'i', + 'unsigned long': 'i', + 'unsigned long long': 'i', 'float': 'f', 'double': 'f', 'long double': 'f', - '_Bool': 'u', + '_Bool': 'i', # the following types are not primitive in the C sense 'wchar_t': 'c', 'int8_t': 'i', - 'uint8_t': 'u', + 'uint8_t': 'i', 'int16_t': 'i', - 'uint16_t': 'u', + 'uint16_t': 'i', 'int32_t': 'i', - 'uint32_t': 'u', + 'uint32_t': 'i', 'int64_t': 'i', - 'uint64_t': 'u', + 'uint64_t': 'i', 'intptr_t': 'i', - 'uintptr_t': 'u', + 'uintptr_t': 'i', 'ptrdiff_t': 'i', - 'size_t': 'u', + 'size_t': 'i', 'ssize_t': 'i', } @@ -114,12 +116,8 @@ def is_char_type(self): return self.ALL_PRIMITIVE_TYPES[self.name] == 'c' - def is_signed_type(self): + def is_integer_type(self): return self.ALL_PRIMITIVE_TYPES[self.name] == 'i' - def is_unsigned_type(self): - return self.ALL_PRIMITIVE_TYPES[self.name] == 'u' - def is_integer_type(self): - return self.ALL_PRIMITIVE_TYPES[self.name] in 'iu' def is_float_type(self): return self.ALL_PRIMITIVE_TYPES[self.name] == 'f' @@ -259,6 +257,7 @@ fixedlayout = None completed = False partial = False + packed = False def __init__(self, name, fldnames, fldtypes, fldbitsize): self.name = name @@ -315,7 +314,11 @@ fldtypes = [tp.get_cached_btype(ffi, finishlist) for tp in self.fldtypes] lst = list(zip(self.fldnames, fldtypes, self.fldbitsize)) - ffi._backend.complete_struct_or_union(BType, lst, self) + sflags = 0 + if self.packed: + sflags = 8 # SF_PACKED + ffi._backend.complete_struct_or_union(BType, lst, self, + -1, -1, sflags) # else: fldtypes = [] @@ -468,8 +471,7 @@ # initialize the __typecache attribute, either at the module level # if ffi._backend is a module, or at the class level if ffi._backend # is some instance. - ModuleType = type(weakref) - if isinstance(ffi._backend, ModuleType): + if isinstance(ffi._backend, types.ModuleType): ffi._backend.__typecache = weakref.WeakValueDictionary() else: type(ffi._backend).__typecache = weakref.WeakValueDictionary() diff --git a/lib_pypy/cffi/vengine_cpy.py b/lib_pypy/cffi/vengine_cpy.py --- a/lib_pypy/cffi/vengine_cpy.py +++ b/lib_pypy/cffi/vengine_cpy.py @@ -214,10 +214,7 @@ extraarg = '' if isinstance(tp, model.PrimitiveType): if tp.is_integer_type() and tp.name != '_Bool': - if tp.is_signed_type(): - converter = '_cffi_to_c_SIGNED' - else: - converter = '_cffi_to_c_UNSIGNED' + converter = '_cffi_to_c_int' extraarg = ', %s' % tp.name else: converter = '_cffi_to_c_%s' % (tp.name.replace(' ', '_'),) @@ -270,10 +267,7 @@ def _convert_expr_from_c(self, tp, var, context): if isinstance(tp, model.PrimitiveType): if tp.is_integer_type(): - if tp.is_signed_type(): - return '_cffi_from_c_SIGNED(%s, %s)' % (var, tp.name) - else: - return '_cffi_from_c_UNSIGNED(%s, %s)' % (var, tp.name) + return '_cffi_from_c_int(%s, %s)' % (var, tp.name) elif tp.name != 'long double': return '_cffi_from_c_%s(%s)' % (tp.name.replace(' ', '_'), var) else: @@ -801,25 +795,23 @@ #define _cffi_to_c_double PyFloat_AsDouble #define _cffi_to_c_float PyFloat_AsDouble -#define _cffi_from_c_SIGNED(x, type) \ - (sizeof(type) <= sizeof(long) ? PyInt_FromLong(x) : \ - PyLong_FromLongLong(x)) -#define _cffi_from_c_UNSIGNED(x, type) \ - (sizeof(type) < sizeof(long) ? PyInt_FromLong(x) : \ - sizeof(type) == sizeof(long) ? PyLong_FromUnsignedLong(x) : \ - PyLong_FromUnsignedLongLong(x)) +#define _cffi_from_c_int(x, type) \ + (((type)-1) > 0 ? /* unsigned */ \ + (sizeof(type) < sizeof(long) ? PyInt_FromLong(x) : \ + sizeof(type) == sizeof(long) ? PyLong_FromUnsignedLong(x) : \ + PyLong_FromUnsignedLongLong(x)) \ + : (sizeof(type) <= sizeof(long) ? PyInt_FromLong(x) : \ + PyLong_FromLongLong(x))) -#define _cffi_to_c_SIGNED(o, type) \ - (sizeof(type) == 1 ? _cffi_to_c_i8(o) : \ - sizeof(type) == 2 ? _cffi_to_c_i16(o) : \ - sizeof(type) == 4 ? _cffi_to_c_i32(o) : \ - sizeof(type) == 8 ? _cffi_to_c_i64(o) : \ - (Py_FatalError("unsupported size for type " #type), 0)) -#define _cffi_to_c_UNSIGNED(o, type) \ - (sizeof(type) == 1 ? _cffi_to_c_u8(o) : \ - sizeof(type) == 2 ? _cffi_to_c_u16(o) : \ - sizeof(type) == 4 ? _cffi_to_c_u32(o) : \ - sizeof(type) == 8 ? _cffi_to_c_u64(o) : \ +#define _cffi_to_c_int(o, type) \ + (sizeof(type) == 1 ? (((type)-1) > 0 ? _cffi_to_c_u8(o) \ + : _cffi_to_c_i8(o)) : \ + sizeof(type) == 2 ? (((type)-1) > 0 ? _cffi_to_c_u16(o) \ + : _cffi_to_c_i16(o)) : \ + sizeof(type) == 4 ? (((type)-1) > 0 ? _cffi_to_c_u32(o) \ + : _cffi_to_c_i32(o)) : \ + sizeof(type) == 8 ? (((type)-1) > 0 ? _cffi_to_c_u64(o) \ + : _cffi_to_c_i64(o)) : \ (Py_FatalError("unsupported size for type " #type), 0)) #define _cffi_to_c_i8 \ diff --git a/pypy/module/test_lib_pypy/cffi_tests/backend_tests.py b/pypy/module/test_lib_pypy/cffi_tests/backend_tests.py --- a/pypy/module/test_lib_pypy/cffi_tests/backend_tests.py +++ b/pypy/module/test_lib_pypy/cffi_tests/backend_tests.py @@ -1,5 +1,6 @@ # Generated by pypy/tool/import_cffi.py import py +import platform import sys, ctypes from cffi import FFI, CDefError from pypy.module.test_lib_pypy.cffi_tests.support import * @@ -756,6 +757,8 @@ p = ffi.cast("long long", ffi.cast("wchar_t", -1)) if SIZE_OF_WCHAR == 2: # 2 bytes, unsigned assert int(p) == 0xffff + elif platform.machine() == 'aarch64': # 4 bytes, unsigned + assert int(p) == 0xffffffff else: # 4 bytes, signed assert int(p) == -1 p = ffi.cast("int", u+'\u1234') @@ -1550,3 +1553,21 @@ ffi2.include(ffi1) p = ffi2.new("foo_p", [142]) assert p.x == 142 + + def test_struct_packed(self): + ffi = FFI(backend=self.Backend()) + ffi.cdef("struct nonpacked { char a; int b; };") + ffi.cdef("struct is_packed { char a; int b; };", packed=True) + assert ffi.sizeof("struct nonpacked") == 8 + assert ffi.sizeof("struct is_packed") == 5 + assert ffi.alignof("struct nonpacked") == 4 + assert ffi.alignof("struct is_packed") == 1 + s = ffi.new("struct is_packed[2]") + s[0].b = 42623381 + s[0].a = 'X' + s[1].b = -4892220 + s[1].a = 'Y' + assert s[0].b == 42623381 + assert s[0].a == 'X' + assert s[1].b == -4892220 + assert s[1].a == 'Y' diff --git a/pypy/module/test_lib_pypy/cffi_tests/test_function.py b/pypy/module/test_lib_pypy/cffi_tests/test_function.py --- a/pypy/module/test_lib_pypy/cffi_tests/test_function.py +++ b/pypy/module/test_lib_pypy/cffi_tests/test_function.py @@ -251,22 +251,14 @@ py.test.skip("probably no symbol 'stdout' in the lib") ffi = FFI(backend=self.Backend()) ffi.cdef(""" - int puts(const char *); - void *stdout, *stderr; + void *stdout; """) - ffi.C = ffi.dlopen(None) - pout = ffi.C.stdout - perr = ffi.C.stderr - assert repr(pout).startswith("') + lib = ffi.verify('#include ', libraries=["m"]) assert lib.sin(1.23) == math.sin(1.23) def test_rounding_1(): ffi = FFI() ffi.cdef("float sin(double x);") - lib = ffi.verify('#include ') + lib = ffi.verify('#include ', libraries=["m"]) res = lib.sin(1.23) assert res != math.sin(1.23) # not exact, because of double->float assert abs(res - math.sin(1.23)) < 1E-5 @@ -78,7 +78,7 @@ def test_rounding_2(): ffi = FFI() ffi.cdef("double sin(float x);") - lib = ffi.verify('#include ') + lib = ffi.verify('#include ', libraries=["m"]) res = lib.sin(1.23) assert res != math.sin(1.23) # not exact, because of double->float assert abs(res - math.sin(1.23)) < 1E-5 @@ -104,7 +104,7 @@ def test_longdouble(): ffi = FFI() ffi.cdef("long double sinl(long double x);") - lib = ffi.verify('#include ') + lib = ffi.verify('#include ', libraries=["m"]) for input in [1.23, ffi.cast("double", 1.23), ffi.cast("long double", 1.23)]: @@ -149,28 +149,27 @@ all_primitive_types = model.PrimitiveType.ALL_PRIMITIVE_TYPES -all_signed_integer_types = sorted(tp for tp in all_primitive_types - if all_primitive_types[tp] == 'i') -all_unsigned_integer_types = sorted(tp for tp in all_primitive_types - if all_primitive_types[tp] == 'u') +all_integer_types = sorted(tp for tp in all_primitive_types + if all_primitive_types[tp] == 'i') all_float_types = sorted(tp for tp in all_primitive_types if all_primitive_types[tp] == 'f') +def all_signed_integer_types(ffi): + return [x for x in all_integer_types if int(ffi.cast(x, -1)) < 0] + +def all_unsigned_integer_types(ffi): + return [x for x in all_integer_types if int(ffi.cast(x, -1)) > 0] + + def test_primitive_category(): for typename in all_primitive_types: tp = model.PrimitiveType(typename) C = tp.is_char_type() - U = tp.is_unsigned_type() - S = tp.is_signed_type() F = tp.is_float_type() I = tp.is_integer_type() assert C == (typename in ('char', 'wchar_t')) - assert U == (typename.startswith('unsigned ') or - typename == '_Bool' or typename == 'size_t' or - typename == 'uintptr_t' or typename.startswith('uint')) assert F == (typename in ('float', 'double', 'long double')) - assert S + U + F + C == 1 # one and only one of them is true - assert I == (S or U) + assert I + F + C == 1 # one and only one of them is true def test_all_integer_and_float_types(): typenames = [] @@ -208,7 +207,7 @@ def test_var_signed_integer_types(): ffi = FFI() - lst = all_signed_integer_types + lst = all_signed_integer_types(ffi) csource = "\n".join(["%s somevar_%s;" % (tp, tp.replace(' ', '_')) for tp in lst]) ffi.cdef(csource) @@ -227,7 +226,7 @@ def test_var_unsigned_integer_types(): ffi = FFI() - lst = all_unsigned_integer_types + lst = all_unsigned_integer_types(ffi) csource = "\n".join(["%s somevar_%s;" % (tp, tp.replace(' ', '_')) for tp in lst]) ffi.cdef(csource) @@ -248,7 +247,7 @@ def test_fn_signed_integer_types(): ffi = FFI() - lst = all_signed_integer_types + lst = all_signed_integer_types(ffi) cdefsrc = "\n".join(["%s somefn_%s(%s);" % (tp, tp.replace(' ', '_'), tp) for tp in lst]) ffi.cdef(cdefsrc) @@ -268,7 +267,7 @@ def test_fn_unsigned_integer_types(): ffi = FFI() - lst = all_unsigned_integer_types + lst = all_unsigned_integer_types(ffi) cdefsrc = "\n".join(["%s somefn_%s(%s);" % (tp, tp.replace(' ', '_'), tp) for tp in lst]) ffi.cdef(cdefsrc) @@ -465,11 +464,12 @@ def test_struct_float_vs_int(): if sys.platform == 'win32': py.test.skip("XXX fixme: only gives warnings") - for typename in all_signed_integer_types: + ffi = FFI() + for typename in all_signed_integer_types(ffi): for real in all_float_types: _check_field_match(typename, real, expect_mismatch=True) for typename in all_float_types: - for real in all_signed_integer_types: + for real in all_signed_integer_types(ffi): _check_field_match(typename, real, expect_mismatch=True) def test_struct_array_field(): @@ -1134,6 +1134,9 @@ xxx def test_opaque_integer_as_function_result(): + import platform + if platform.machine().startswith('sparc'): + py.test.skip('Breaks horribly on sparc (SIGILL + corrupted stack)') # XXX bad abuse of "struct { ...; }". It only works a bit by chance # anyway. XXX think about something better :-( ffi = FFI() @@ -1856,3 +1859,24 @@ def test_various_calls_libffi(): _test_various_calls(force_libffi=True) + +def test_ptr_to_opaque(): + ffi = FFI() + ffi.cdef("typedef ... foo_t; int f1(foo_t*); foo_t *f2(int);") + lib = ffi.verify(""" + #include + typedef struct { int x; } foo_t; + int f1(foo_t* p) { + int x = p->x; + free(p); + return x; + } + foo_t *f2(int x) { + foo_t *p = malloc(sizeof(foo_t)); + p->x = x; + return p; + } + """) + p = lib.f2(42) + x = lib.f1(p) + assert x == 42 diff --git a/pypy/module/test_lib_pypy/cffi_tests/test_version.py b/pypy/module/test_lib_pypy/cffi_tests/test_version.py --- a/pypy/module/test_lib_pypy/cffi_tests/test_version.py +++ b/pypy/module/test_lib_pypy/cffi_tests/test_version.py @@ -10,7 +10,8 @@ '0.4.2': '0.4', # did not change '0.7.1': '0.7', # did not change '0.7.2': '0.7', # did not change - '0.8.1': '0.8', # did not change + '0.8.1': '0.8', # did not change (essentially) + '0.8.2': '0.8', # did not change } def test_version(): diff --git a/pypy/module/test_lib_pypy/cffi_tests/test_zdistutils.py b/pypy/module/test_lib_pypy/cffi_tests/test_zdistutils.py --- a/pypy/module/test_lib_pypy/cffi_tests/test_zdistutils.py +++ b/pypy/module/test_lib_pypy/cffi_tests/test_zdistutils.py @@ -26,7 +26,8 @@ ffi = FFI() ffi.cdef("double sin(double x);") csrc = '/*hi there %s!*/\n#include \n' % self - v = Verifier(ffi, csrc, force_generic_engine=self.generic) + v = Verifier(ffi, csrc, force_generic_engine=self.generic, + libraries=["m"]) v.write_source() with open(v.sourcefilename, 'r') as f: data = f.read() @@ -36,7 +37,8 @@ ffi = FFI() ffi.cdef("double sin(double x);") csrc = '/*hi there %s!*/\n#include \n' % self - v = Verifier(ffi, csrc, force_generic_engine=self.generic) + v = Verifier(ffi, csrc, force_generic_engine=self.generic, + libraries=["m"]) v.sourcefilename = filename = str(udir.join('write_source.c')) v.write_source() assert filename == v.sourcefilename @@ -48,7 +50,8 @@ ffi = FFI() ffi.cdef("double sin(double x);") csrc = '/*hi there %s!*/\n#include \n' % self - v = Verifier(ffi, csrc, force_generic_engine=self.generic) + v = Verifier(ffi, csrc, force_generic_engine=self.generic, + libraries=["m"]) try: from StringIO import StringIO except ImportError: @@ -61,7 +64,8 @@ ffi = FFI() ffi.cdef("double sin(double x);") csrc = '/*hi there %s!*/\n#include \n' % self - v = Verifier(ffi, csrc, force_generic_engine=self.generic) + v = Verifier(ffi, csrc, force_generic_engine=self.generic, + libraries=["m"]) v.compile_module() assert v.get_module_name().startswith('_cffi_') if v.generates_python_module(): @@ -72,7 +76,8 @@ ffi = FFI() ffi.cdef("double sin(double x);") csrc = '/*hi there %s!2*/\n#include \n' % self - v = Verifier(ffi, csrc, force_generic_engine=self.generic) + v = Verifier(ffi, csrc, force_generic_engine=self.generic, + libraries=["m"]) basename = self.__class__.__name__ + 'test_compile_module' v.modulefilename = filename = str(udir.join(basename + '.so')) v.compile_module() @@ -88,7 +93,8 @@ ffi = FFI() ffi.cdef("%s sin(double x);" % csrc) v = Verifier(ffi, "#include ", - force_generic_engine=self.generic) + force_generic_engine=self.generic, + libraries=["m"]) names.append(v.get_module_name()) assert names[0] == names[1] != names[2] @@ -105,7 +111,8 @@ ffi = FFI() ffi.cdef("double sin(double x);") csrc = '/*hi there %s!3*/\n#include \n' % self - v = Verifier(ffi, csrc, force_generic_engine=self.generic) + v = Verifier(ffi, csrc, force_generic_engine=self.generic, + libraries=["m"]) library = v.load_library() assert library.sin(12.3) == math.sin(12.3) @@ -115,7 +122,8 @@ csrc = '/*hi there %s!4*/#include "test_verifier_args.h"\n' % self udir.join('test_verifier_args.h').write('#include \n') v = Verifier(ffi, csrc, include_dirs=[str(udir)], - force_generic_engine=self.generic) + force_generic_engine=self.generic, + libraries=["m"]) library = v.load_library() assert library.sin(12.3) == math.sin(12.3) @@ -123,7 +131,8 @@ ffi = FFI() ffi.cdef("double sin(double x);") csrc = "/*6%s*/\n#include " % self - lib = ffi.verify(csrc, force_generic_engine=self.generic) + lib = ffi.verify(csrc, force_generic_engine=self.generic, + libraries=["m"]) assert lib.sin(12.3) == math.sin(12.3) assert isinstance(ffi.verifier, Verifier) with open(ffi.verifier.sourcefilename, 'r') as f: @@ -140,7 +149,8 @@ #endif ''' lib = ffi.verify(csrc, define_macros=[('TEST_EXTENSION_OBJECT', '1')], - force_generic_engine=self.generic) + force_generic_engine=self.generic, + libraries=["m"]) assert lib.sin(12.3) == math.sin(12.3) v = ffi.verifier ext = v.get_extension() @@ -153,7 +163,8 @@ ffi = FFI() ffi.cdef("double sin(double x);") csrc = '/*hi there9!%s*/\n#include \n' % self - v = Verifier(ffi, csrc, force_generic_engine=self.generic) + v = Verifier(ffi, csrc, force_generic_engine=self.generic, + libraries=["m"]) assert not os.path.exists(v.sourcefilename) v.get_extension() assert os.path.exists(v.sourcefilename) From noreply at buildbot.pypy.org Fri Feb 28 10:56:02 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 28 Feb 2014 10:56:02 +0100 (CET) Subject: [pypy-commit] pypy default: merge heads Message-ID: <20140228095602.C88C51C0613@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69542:339fe18d37ef Date: 2014-02-28 10:55 +0100 http://bitbucket.org/pypy/pypy/changeset/339fe18d37ef/ Log: merge heads diff --git a/pypy/doc/embedding.rst b/pypy/doc/embedding.rst --- a/pypy/doc/embedding.rst +++ b/pypy/doc/embedding.rst @@ -30,7 +30,8 @@ it you would not be able to find the standard library (and run pretty much nothing). Arguments: - * ``home``: null terminated path + * ``home``: null terminated path to an executable inside the pypy directory + (can be a .so name, can be made up) * ``verbose``: if non-zero, would print error messages to stderr diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -85,3 +85,6 @@ .. branch: remove-intlong-smm kills int/long/smalllong/bool multimethods + +.. branch: numpy-refactor +Cleanup micronumpy module diff --git a/pypy/module/micronumpy/bench/dot.py b/pypy/module/micronumpy/bench/dot.py --- a/pypy/module/micronumpy/bench/dot.py +++ b/pypy/module/micronumpy/bench/dot.py @@ -1,28 +1,32 @@ +import sys import time try: - import numpypy + import numpypy as numpy except ImportError: - pass + import numpy -import numpy - -def get_matrix(): +def get_matrix(n): import random - n = 502 x = numpy.zeros((n,n), dtype=numpy.float64) for i in range(n): for j in range(n): x[i][j] = random.random() return x -def main(): - x = get_matrix() - y = get_matrix() +def main(n, r): + x = get_matrix(n) + y = get_matrix(n) a = time.time() - #z = numpy.dot(x, y) # uses numpy possibly-blas-lib dot - z = numpy.core.multiarray.dot(x, y) # uses strictly numpy C dot + for _ in xrange(r): + #z = numpy.dot(x, y) # uses numpy possibly-blas-lib dot + z = numpy.core.multiarray.dot(x, y) # uses strictly numpy C dot b = time.time() - print '%.2f seconds' % (b-a) + print '%d runs, %.2f seconds' % (r, b-a) -main() +n = int(sys.argv[1]) +try: + r = int(sys.argv[2]) +except IndexError: + r = 1 +main(n, r) diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -9,7 +9,7 @@ from rpython.rlib.nonconst import NonConstant from pypy.module.micronumpy import boxes, ufuncs from pypy.module.micronumpy.arrayops import where -from pypy.module.micronumpy.base import W_NDimArray +from pypy.module.micronumpy.ndarray import W_NDimArray from pypy.module.micronumpy.ctors import array from pypy.module.micronumpy.descriptor import get_dtype_cache diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py --- a/pypy/module/micronumpy/concrete.py +++ b/pypy/module/micronumpy/concrete.py @@ -5,15 +5,18 @@ from rpython.rlib.rawstorage import alloc_raw_storage, free_raw_storage, \ raw_storage_getitem, raw_storage_setitem, RAW_STORAGE from rpython.rtyper.lltypesystem import rffi, lltype -from pypy.module.micronumpy import support, loop, iter +from pypy.module.micronumpy import support, loop from pypy.module.micronumpy.base import convert_to_array, W_NDimArray, \ ArrayArgumentException +from pypy.module.micronumpy.iterators import ArrayIter from pypy.module.micronumpy.strides import (Chunk, Chunks, NewAxisChunk, RecordChunk, calc_strides, calc_new_strides, shape_agreement, - calculate_broadcast_strides, calculate_dot_strides) + calculate_broadcast_strides) class BaseConcreteArray(object): + _immutable_fields_ = ['dtype?', 'storage', 'start', 'size', 'shape[*]', + 'strides[*]', 'backstrides[*]', 'order'] start = 0 parent = None @@ -283,17 +286,9 @@ self.get_backstrides(), self.get_shape(), shape, backward_broadcast) - return iter.MultiDimViewIterator(self, self.start, - r[0], r[1], shape) - return iter.ArrayIterator(self) - - def create_axis_iter(self, shape, dim, cum): - return iter.AxisIterator(self, shape, dim, cum) - - def create_dot_iter(self, shape, skip): - r = calculate_dot_strides(self.get_strides(), self.get_backstrides(), - shape, skip) - return iter.MultiDimViewIterator(self, self.start, r[0], r[1], shape) + return ArrayIter(self, support.product(shape), shape, r[0], r[1]) + return ArrayIter(self, self.get_size(), self.shape, + self.strides, self.backstrides) def swapaxes(self, space, orig_arr, axis1, axis2): shape = self.get_shape()[:] @@ -357,6 +352,8 @@ orig_array) def set_dtype(self, space, dtype): + # size/shape/strides shouldn't change + assert dtype.elsize == self.dtype.elsize self.dtype = dtype def argsort(self, space, w_axis): diff --git a/pypy/module/micronumpy/iter.py b/pypy/module/micronumpy/iter.py deleted file mode 100644 --- a/pypy/module/micronumpy/iter.py +++ /dev/null @@ -1,217 +0,0 @@ -""" This is a mini-tutorial on iterators, strides, and -memory layout. It assumes you are familiar with the terms, see -http://docs.scipy.org/doc/numpy/reference/arrays.ndarray.html -for a more gentle introduction. - -Given an array x: x.shape == [5,6], where each element occupies one byte - -At which byte in x.data does the item x[3,4] begin? -if x.strides==[1,5]: - pData = x.pData + (x.start + 3*1 + 4*5)*sizeof(x.pData[0]) - pData = x.pData + (x.start + 24) * sizeof(x.pData[0]) -so the offset of the element is 24 elements after the first - -What is the next element in x after coordinates [3,4]? -if x.order =='C': - next == [3,5] => offset is 28 -if x.order =='F': - next == [4,4] => offset is 24 -so for the strides [1,5] x is 'F' contiguous -likewise, for the strides [6,1] x would be 'C' contiguous. - -Iterators have an internal representation of the current coordinates -(indices), the array, strides, and backstrides. A short digression to -explain backstrides: what is the coordinate and offset after [3,5] in -the example above? -if x.order == 'C': - next == [4,0] => offset is 4 -if x.order == 'F': - next == [4,5] => offset is 25 -Note that in 'C' order we stepped BACKWARDS 24 while 'overflowing' a -shape dimension - which is back 25 and forward 1, - which is x.strides[1] * (x.shape[1] - 1) + x.strides[0] -so if we precalculate the overflow backstride as -[x.strides[i] * (x.shape[i] - 1) for i in range(len(x.shape))] -we can go faster. -All the calculations happen in next() - -next_skip_x(steps) tries to do the iteration for a number of steps at once, -but then we cannot guarantee that we only overflow one single shape -dimension, perhaps we could overflow times in one big step. -""" -from rpython.rlib import jit -from pypy.module.micronumpy import support -from pypy.module.micronumpy.base import W_NDimArray - - -class PureShapeIterator(object): - def __init__(self, shape, idx_w): - self.shape = shape - self.shapelen = len(shape) - self.indexes = [0] * len(shape) - self._done = False - self.idx_w = [None] * len(idx_w) - for i, w_idx in enumerate(idx_w): - if isinstance(w_idx, W_NDimArray): - self.idx_w[i] = w_idx.create_iter(shape) - - def done(self): - return self._done - - @jit.unroll_safe - def next(self): - for w_idx in self.idx_w: - if w_idx is not None: - w_idx.next() - for i in range(self.shapelen - 1, -1, -1): - if self.indexes[i] < self.shape[i] - 1: - self.indexes[i] += 1 - break - else: - self.indexes[i] = 0 - else: - self._done = True - - @jit.unroll_safe - def get_index(self, space, shapelen): - return [space.wrap(self.indexes[i]) for i in range(shapelen)] - - -class ArrayIterator(object): - def __init__(self, array): - self.array = array - self.start = array.start - self.size = array.get_size() - self.ndim_m1 = len(array.shape) - 1 - self.shape_m1 = [s - 1 for s in array.shape] - self.strides = array.strides[:] - self.backstrides = array.backstrides[:] - self.reset() - - def reset(self): - self.index = 0 - self.indices = [0] * (self.ndim_m1 + 1) - self.offset = self.start - - @jit.unroll_safe - def next(self): - self.index += 1 - for i in xrange(self.ndim_m1, -1, -1): - if self.indices[i] < self.shape_m1[i]: - self.indices[i] += 1 - self.offset += self.strides[i] - break - else: - self.indices[i] = 0 - self.offset -= self.backstrides[i] - - def next_skip_x(self, step): - # XXX implement - for _ in range(step): - self.next() - - def done(self): - return self.index >= self.size - - def getitem(self): - return self.array.getitem(self.offset) - - def getitem_bool(self): - return self.array.getitem_bool(self.offset) - - def setitem(self, elem): - self.array.setitem(self.offset, elem) - - -class MultiDimViewIterator(ArrayIterator): - def __init__(self, array, start, strides, backstrides, shape): - self.indexes = [0] * len(shape) - self.array = array - self.shape = shape - self.offset = start - self.shapelen = len(shape) - self._done = self.shapelen == 0 or support.product(shape) == 0 - self.strides = strides - self.backstrides = backstrides - self.size = array.size - - @jit.unroll_safe - def next(self): - offset = self.offset - for i in range(self.shapelen - 1, -1, -1): - if self.indexes[i] < self.shape[i] - 1: - self.indexes[i] += 1 - offset += self.strides[i] - break - else: - self.indexes[i] = 0 - offset -= self.backstrides[i] - else: - self._done = True - self.offset = offset - - @jit.unroll_safe - def next_skip_x(self, step): - for i in range(len(self.shape) - 1, -1, -1): - if self.indexes[i] < self.shape[i] - step: - self.indexes[i] += step - self.offset += self.strides[i] * step - break - else: - remaining_step = (self.indexes[i] + step) // self.shape[i] - this_i_step = step - remaining_step * self.shape[i] - self.offset += self.strides[i] * this_i_step - self.indexes[i] = self.indexes[i] + this_i_step - step = remaining_step - else: - self._done = True - - def done(self): - return self._done - - def reset(self): - self.offset %= self.size - - -class AxisIterator(ArrayIterator): - def __init__(self, array, shape, dim, cumulative): - self.shape = shape - strides = array.get_strides() - backstrides = array.get_backstrides() - if cumulative: - self.strides = strides - self.backstrides = backstrides - elif len(shape) == len(strides): - # keepdims = True - self.strides = strides[:dim] + [0] + strides[dim + 1:] - self.backstrides = backstrides[:dim] + [0] + backstrides[dim + 1:] - else: - self.strides = strides[:dim] + [0] + strides[dim:] - self.backstrides = backstrides[:dim] + [0] + backstrides[dim:] - self.first_line = True - self.indices = [0] * len(shape) - self._done = array.get_size() == 0 - self.offset = array.start - self.dim = dim - self.array = array - - @jit.unroll_safe - def next(self): - for i in range(len(self.shape) - 1, -1, -1): - if self.indices[i] < self.shape[i] - 1: - if i == self.dim: - self.first_line = False - self.indices[i] += 1 - self.offset += self.strides[i] - break - else: - if i == self.dim: - self.first_line = True - self.indices[i] = 0 - self.offset -= self.backstrides[i] - else: - self._done = True - - def done(self): - return self._done diff --git a/pypy/module/micronumpy/iterators.py b/pypy/module/micronumpy/iterators.py new file mode 100644 --- /dev/null +++ b/pypy/module/micronumpy/iterators.py @@ -0,0 +1,167 @@ +""" This is a mini-tutorial on iterators, strides, and +memory layout. It assumes you are familiar with the terms, see +http://docs.scipy.org/doc/numpy/reference/arrays.ndarray.html +for a more gentle introduction. + +Given an array x: x.shape == [5,6], where each element occupies one byte + +At which byte in x.data does the item x[3,4] begin? +if x.strides==[1,5]: + pData = x.pData + (x.start + 3*1 + 4*5)*sizeof(x.pData[0]) + pData = x.pData + (x.start + 24) * sizeof(x.pData[0]) +so the offset of the element is 24 elements after the first + +What is the next element in x after coordinates [3,4]? +if x.order =='C': + next == [3,5] => offset is 28 +if x.order =='F': + next == [4,4] => offset is 24 +so for the strides [1,5] x is 'F' contiguous +likewise, for the strides [6,1] x would be 'C' contiguous. + +Iterators have an internal representation of the current coordinates +(indices), the array, strides, and backstrides. A short digression to +explain backstrides: what is the coordinate and offset after [3,5] in +the example above? +if x.order == 'C': + next == [4,0] => offset is 4 +if x.order == 'F': + next == [4,5] => offset is 25 +Note that in 'C' order we stepped BACKWARDS 24 while 'overflowing' a +shape dimension + which is back 25 and forward 1, + which is x.strides[1] * (x.shape[1] - 1) + x.strides[0] +so if we precalculate the overflow backstride as +[x.strides[i] * (x.shape[i] - 1) for i in range(len(x.shape))] +we can go faster. +All the calculations happen in next() + +next_skip_x(steps) tries to do the iteration for a number of steps at once, +but then we cannot guarantee that we only overflow one single shape +dimension, perhaps we could overflow times in one big step. +""" +from rpython.rlib import jit +from pypy.module.micronumpy import support +from pypy.module.micronumpy.base import W_NDimArray + + +class PureShapeIter(object): + def __init__(self, shape, idx_w): + self.shape = shape + self.shapelen = len(shape) + self.indexes = [0] * len(shape) + self._done = False + self.idx_w = [None] * len(idx_w) + for i, w_idx in enumerate(idx_w): + if isinstance(w_idx, W_NDimArray): + self.idx_w[i] = w_idx.create_iter(shape) + + def done(self): + return self._done + + @jit.unroll_safe + def next(self): + for w_idx in self.idx_w: + if w_idx is not None: + w_idx.next() + for i in range(self.shapelen - 1, -1, -1): + if self.indexes[i] < self.shape[i] - 1: + self.indexes[i] += 1 + break + else: + self.indexes[i] = 0 + else: + self._done = True + + @jit.unroll_safe + def get_index(self, space, shapelen): + return [space.wrap(self.indexes[i]) for i in range(shapelen)] + + +class ArrayIter(object): + _immutable_fields_ = ['array', 'size', 'indices', 'shape[*]', + 'strides[*]', 'backstrides[*]'] + + def __init__(self, array, size, shape, strides, backstrides): + assert len(shape) == len(strides) == len(backstrides) + self.array = array + self.size = size + self.indices = [0] * len(shape) + self.shape = shape + self.strides = strides + self.backstrides = backstrides + self.reset() + + @jit.unroll_safe + def reset(self): + self.index = 0 + for i in xrange(len(self.shape)): + self.indices[i] = 0 + self.offset = self.array.start + + @jit.unroll_safe + def next(self): + self.index += 1 + for i in xrange(len(self.shape) - 1, -1, -1): + if self.indices[i] < self.shape[i] - 1: + self.indices[i] += 1 + self.offset += self.strides[i] + break + else: + self.indices[i] = 0 + self.offset -= self.backstrides[i] + + @jit.unroll_safe + def next_skip_x(self, step): + assert step >= 0 + if step == 0: + return + self.index += step + for i in xrange(len(self.shape) - 1, -1, -1): + if self.indices[i] < self.shape[i] - step: + self.indices[i] += step + self.offset += self.strides[i] * step + break + else: + remaining_step = (self.indices[i] + step) // self.shape[i] + this_i_step = step - remaining_step * self.shape[i] + self.indices[i] = self.indices[i] + this_i_step + self.offset += self.strides[i] * this_i_step + step = remaining_step + assert step > 0 + + def done(self): + return self.index >= self.size + + def getitem(self): + return self.array.getitem(self.offset) + + def getitem_bool(self): + return self.array.getitem_bool(self.offset) + + def setitem(self, elem): + self.array.setitem(self.offset, elem) + + +def AxisIter(array, shape, axis, cumulative): + strides = array.get_strides() + backstrides = array.get_backstrides() + if not cumulative: + if len(shape) == len(strides): + # keepdims = True + strides = strides[:axis] + [0] + strides[axis + 1:] + backstrides = backstrides[:axis] + [0] + backstrides[axis + 1:] + else: + strides = strides[:axis] + [0] + strides[axis:] + backstrides = backstrides[:axis] + [0] + backstrides[axis:] + return ArrayIter(array, support.product(shape), shape, strides, backstrides) + + +def AllButAxisIter(array, axis): + size = array.get_size() + shape = array.get_shape()[:] + backstrides = array.backstrides[:] + if size: + size /= shape[axis] + shape[axis] = backstrides[axis] = 0 + return ArrayIter(array, size, shape, array.strides, backstrides) diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -8,7 +8,8 @@ from rpython.rtyper.lltypesystem import lltype, rffi from pypy.module.micronumpy import support, constants as NPY from pypy.module.micronumpy.base import W_NDimArray -from pypy.module.micronumpy.iter import PureShapeIterator +from pypy.module.micronumpy.iterators import PureShapeIter, AxisIter, \ + AllButAxisIter call2_driver = jit.JitDriver(name='numpy_call2', @@ -203,9 +204,9 @@ def do_axis_reduce(space, shape, func, arr, dtype, axis, out, identity, cumulative, temp): - out_iter = out.create_axis_iter(arr.get_shape(), axis, cumulative) + out_iter = AxisIter(out.implementation, arr.get_shape(), axis, cumulative) if cumulative: - temp_iter = temp.create_axis_iter(arr.get_shape(), axis, False) + temp_iter = AxisIter(temp.implementation, arr.get_shape(), axis, False) else: temp_iter = out_iter # hack arr_iter = arr.create_iter() @@ -215,16 +216,14 @@ while not out_iter.done(): axis_reduce__driver.jit_merge_point(shapelen=shapelen, func=func, dtype=dtype) - if arr_iter.done(): - w_val = identity + assert not arr_iter.done() + w_val = arr_iter.getitem().convert_to(space, dtype) + if out_iter.indices[axis] == 0: + if identity is not None: + w_val = func(dtype, identity, w_val) else: - w_val = arr_iter.getitem().convert_to(space, dtype) - if out_iter.first_line: - if identity is not None: - w_val = func(dtype, identity, w_val) - else: - cur = temp_iter.getitem() - w_val = func(dtype, cur, w_val) + cur = temp_iter.getitem() + w_val = func(dtype, cur, w_val) out_iter.setitem(w_val) if cumulative: temp_iter.setitem(w_val) @@ -261,7 +260,6 @@ argmin = _new_argmin_argmax('min') argmax = _new_argmin_argmax('max') -# note that shapelen == 2 always dot_driver = jit.JitDriver(name = 'numpy_dot', greens = ['dtype'], reds = 'auto') @@ -282,25 +280,27 @@ ''' left_shape = left.get_shape() right_shape = right.get_shape() - broadcast_shape = left_shape[:-1] + right_shape - left_skip = [len(left_shape) - 1 + i for i in range(len(right_shape)) - if i != right_critical_dim] - right_skip = range(len(left_shape) - 1) - result_skip = [len(result.get_shape()) - (len(right_shape) > 1)] + assert left_shape[-1] == right_shape[right_critical_dim] assert result.get_dtype() == dtype - outi = result.create_dot_iter(broadcast_shape, result_skip) - lefti = left.create_dot_iter(broadcast_shape, left_skip) - righti = right.create_dot_iter(broadcast_shape, right_skip) - while not outi.done(): - dot_driver.jit_merge_point(dtype=dtype) - lval = lefti.getitem().convert_to(space, dtype) - rval = righti.getitem().convert_to(space, dtype) - outval = outi.getitem() - v = dtype.itemtype.mul(lval, rval) - v = dtype.itemtype.add(v, outval) - outi.setitem(v) - outi.next() - righti.next() + outi = result.create_iter() + lefti = AllButAxisIter(left.implementation, len(left_shape) - 1) + righti = AllButAxisIter(right.implementation, right_critical_dim) + while not lefti.done(): + while not righti.done(): + oval = outi.getitem() + i1 = lefti.offset + i2 = righti.offset + for _ in xrange(left.implementation.shape[-1]): + dot_driver.jit_merge_point(dtype=dtype) + lval = left.implementation.getitem(i1).convert_to(space, dtype) + rval = right.implementation.getitem(i2).convert_to(space, dtype) + oval = dtype.itemtype.add(oval, dtype.itemtype.mul(lval, rval)) + i1 += left.implementation.strides[-1] + i2 += right.implementation.strides[right_critical_dim] + outi.setitem(oval) + outi.next() + righti.next() + righti.reset() lefti.next() return result @@ -478,7 +478,7 @@ prefixlen = len(prefix_w) indexlen = len(indexes_w) dtype = arr.get_dtype() - iter = PureShapeIterator(iter_shape, indexes_w) + iter = PureShapeIter(iter_shape, indexes_w) indexlen = len(indexes_w) while not iter.done(): getitem_int_driver.jit_merge_point(shapelen=shapelen, indexlen=indexlen, @@ -507,7 +507,7 @@ indexlen = len(indexes_w) prefixlen = len(prefix_w) dtype = arr.get_dtype() - iter = PureShapeIterator(iter_shape, indexes_w) + iter = PureShapeIter(iter_shape, indexes_w) while not iter.done(): setitem_int_driver.jit_merge_point(shapelen=shapelen, indexlen=indexlen, dtype=dtype, prefixlen=prefixlen) @@ -632,7 +632,7 @@ def diagonal_array(space, arr, out, offset, axis1, axis2, shape): out_iter = out.create_iter() - iter = PureShapeIterator(shape, []) + iter = PureShapeIter(shape, []) shapelen_minus_1 = len(shape) - 1 assert shapelen_minus_1 >= 0 if axis1 < axis2: diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -286,12 +286,6 @@ return self.implementation.create_iter( shape=shape, backward_broadcast=backward_broadcast) - def create_axis_iter(self, shape, dim, cum): - return self.implementation.create_axis_iter(shape, dim, cum) - - def create_dot_iter(self, shape, skip): - return self.implementation.create_dot_iter(shape, skip) - def is_scalar(self): return len(self.get_shape()) == 0 diff --git a/pypy/module/micronumpy/sort.py b/pypy/module/micronumpy/sort.py --- a/pypy/module/micronumpy/sort.py +++ b/pypy/module/micronumpy/sort.py @@ -1,6 +1,3 @@ -""" This is the implementation of various sorting routines in numpy. It's here -because it only makes sense on a concrete array -""" from pypy.interpreter.error import OperationError, oefmt from rpython.rlib.listsort import make_timsort_class from rpython.rlib.objectmodel import specialize @@ -11,10 +8,15 @@ from rpython.rtyper.lltypesystem import rffi, lltype from pypy.module.micronumpy import descriptor, types, constants as NPY from pypy.module.micronumpy.base import W_NDimArray -from pypy.module.micronumpy.iter import AxisIterator +from pypy.module.micronumpy.iterators import AllButAxisIter INT_SIZE = rffi.sizeof(lltype.Signed) +all_types = (types.all_float_types + types.all_complex_types + + types.all_int_types) +all_types = [i for i in all_types if not issubclass(i[0], types.Float16)] +all_types = unrolling_iterable(all_types) + def make_argsort_function(space, itemtype, comp_type, count=1): TP = itemtype.T @@ -146,21 +148,20 @@ if axis < 0 or axis >= len(shape): raise OperationError(space.w_IndexError, space.wrap( "Wrong axis %d" % axis)) - iterable_shape = shape[:axis] + [0] + shape[axis + 1:] - iter = AxisIterator(arr, iterable_shape, axis, False) + arr_iter = AllButAxisIter(arr, axis) index_impl = index_arr.implementation - index_iter = AxisIterator(index_impl, iterable_shape, axis, False) + index_iter = AllButAxisIter(index_impl, axis) stride_size = arr.strides[axis] index_stride_size = index_impl.strides[axis] axis_size = arr.shape[axis] - while not iter.done(): + while not arr_iter.done(): for i in range(axis_size): raw_storage_setitem(storage, i * index_stride_size + index_iter.offset, i) r = Repr(index_stride_size, stride_size, axis_size, - arr.get_storage(), storage, index_iter.offset, iter.offset) + arr.get_storage(), storage, index_iter.offset, arr_iter.offset) ArgSort(r).sort() - iter.next() + arr_iter.next() index_iter.next() return index_arr @@ -292,14 +293,13 @@ if axis < 0 or axis >= len(shape): raise OperationError(space.w_IndexError, space.wrap( "Wrong axis %d" % axis)) - iterable_shape = shape[:axis] + [0] + shape[axis + 1:] - iter = AxisIterator(arr, iterable_shape, axis, False) + arr_iter = AllButAxisIter(arr, axis) stride_size = arr.strides[axis] axis_size = arr.shape[axis] - while not iter.done(): - r = Repr(stride_size, axis_size, arr.get_storage(), iter.offset) + while not arr_iter.done(): + r = Repr(stride_size, axis_size, arr.get_storage(), arr_iter.offset) ArgSort(r).sort() - iter.next() + arr_iter.next() return sort @@ -319,11 +319,6 @@ "sorting of non-numeric types '%s' is not implemented", arr.dtype.get_name()) -all_types = (types.all_float_types + types.all_complex_types + - types.all_int_types) -all_types = [i for i in all_types if not issubclass(i[0], types.Float16)] -all_types = unrolling_iterable(all_types) - class ArgSortCache(object): built = False diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py --- a/pypy/module/micronumpy/strides.py +++ b/pypy/module/micronumpy/strides.py @@ -421,18 +421,3 @@ n_old_elems_to_use *= old_shape[oldI] assert len(new_strides) == len(new_shape) return new_strides[:] - - -def calculate_dot_strides(strides, backstrides, res_shape, skip_dims): - rstrides = [0] * len(res_shape) - rbackstrides = [0] * len(res_shape) - j = 0 - for i in range(len(res_shape)): - if i in skip_dims: - rstrides[i] = 0 - rbackstrides[i] = 0 - else: - rstrides[i] = strides[j] - rbackstrides[i] = backstrides[j] - j += 1 - return rstrides, rbackstrides diff --git a/pypy/module/micronumpy/test/test_arrayops.py b/pypy/module/micronumpy/test/test_arrayops.py --- a/pypy/module/micronumpy/test/test_arrayops.py +++ b/pypy/module/micronumpy/test/test_arrayops.py @@ -41,8 +41,7 @@ a[0] = 0 assert (b == [1, 1, 1, 0, 0]).all() - - def test_dot(self): + def test_dot_basic(self): from numpypy import array, dot, arange a = array(range(5)) assert dot(a, a) == 30.0 @@ -69,7 +68,7 @@ assert b.shape == (4, 3) c = dot(a, b) assert (c == [[[14, 38, 62], [38, 126, 214], [62, 214, 366]], - [[86, 302, 518], [110, 390, 670], [134, 478, 822]]]).all() + [[86, 302, 518], [110, 390, 670], [134, 478, 822]]]).all() c = dot(a, b[:, 2]) assert (c == [[62, 214, 366], [518, 670, 822]]).all() a = arange(3*2*6).reshape((3,2,6)) diff --git a/pypy/module/micronumpy/test/test_iter.py b/pypy/module/micronumpy/test/test_iter.py deleted file mode 100644 --- a/pypy/module/micronumpy/test/test_iter.py +++ /dev/null @@ -1,93 +0,0 @@ -from pypy.module.micronumpy.iter import MultiDimViewIterator - - -class MockArray(object): - size = 1 - - -class TestIterDirect(object): - def test_C_viewiterator(self): - #Let's get started, simple iteration in C order with - #contiguous layout => strides[-1] is 1 - start = 0 - shape = [3, 5] - strides = [5, 1] - backstrides = [x * (y - 1) for x,y in zip(strides, shape)] - assert backstrides == [10, 4] - i = MultiDimViewIterator(MockArray, start, strides, backstrides, shape) - i.next() - i.next() - i.next() - assert i.offset == 3 - assert not i.done() - assert i.indexes == [0,3] - #cause a dimension overflow - i.next() - i.next() - assert i.offset == 5 - assert i.indexes == [1,0] - - #Now what happens if the array is transposed? strides[-1] != 1 - # therefore layout is non-contiguous - strides = [1, 3] - backstrides = [x * (y - 1) for x,y in zip(strides, shape)] - assert backstrides == [2, 12] - i = MultiDimViewIterator(MockArray, start, strides, backstrides, shape) - i.next() - i.next() - i.next() - assert i.offset == 9 - assert not i.done() - assert i.indexes == [0,3] - #cause a dimension overflow - i.next() - i.next() - assert i.offset == 1 - assert i.indexes == [1,0] - - def test_C_viewiterator_step(self): - #iteration in C order with #contiguous layout => strides[-1] is 1 - #skip less than the shape - start = 0 - shape = [3, 5] - strides = [5, 1] - backstrides = [x * (y - 1) for x,y in zip(strides, shape)] - assert backstrides == [10, 4] - i = MultiDimViewIterator(MockArray, start, strides, backstrides, shape) - i.next_skip_x(2) - i.next_skip_x(2) - i.next_skip_x(2) - assert i.offset == 6 - assert not i.done() - assert i.indexes == [1,1] - #And for some big skips - i.next_skip_x(5) - assert i.offset == 11 - assert i.indexes == [2,1] - i.next_skip_x(5) - # Note: the offset does not overflow but recycles, - # this is good for broadcast - assert i.offset == 1 - assert i.indexes == [0,1] - assert i.done() - - #Now what happens if the array is transposed? strides[-1] != 1 - # therefore layout is non-contiguous - strides = [1, 3] - backstrides = [x * (y - 1) for x,y in zip(strides, shape)] - assert backstrides == [2, 12] - i = MultiDimViewIterator(MockArray, start, strides, backstrides, shape) - i.next_skip_x(2) - i.next_skip_x(2) - i.next_skip_x(2) - assert i.offset == 4 - assert i.indexes == [1,1] - assert not i.done() - i.next_skip_x(5) - assert i.offset == 5 - assert i.indexes == [2,1] - assert not i.done() - i.next_skip_x(5) - assert i.indexes == [0,1] - assert i.offset == 3 - assert i.done() diff --git a/pypy/module/micronumpy/test/test_iterators.py b/pypy/module/micronumpy/test/test_iterators.py new file mode 100644 --- /dev/null +++ b/pypy/module/micronumpy/test/test_iterators.py @@ -0,0 +1,96 @@ +from pypy.module.micronumpy import support +from pypy.module.micronumpy.iterators import ArrayIter + + +class MockArray(object): + start = 0 + + +class TestIterDirect(object): + def test_iterator_basic(self): + #Let's get started, simple iteration in C order with + #contiguous layout => strides[-1] is 1 + shape = [3, 5] + strides = [5, 1] + backstrides = [x * (y - 1) for x,y in zip(strides, shape)] + assert backstrides == [10, 4] + i = ArrayIter(MockArray, support.product(shape), shape, + strides, backstrides) + i.next() + i.next() + i.next() + assert i.offset == 3 + assert not i.done() + assert i.indices == [0,3] + #cause a dimension overflow + i.next() + i.next() + assert i.offset == 5 + assert i.indices == [1,0] + + #Now what happens if the array is transposed? strides[-1] != 1 + # therefore layout is non-contiguous + strides = [1, 3] + backstrides = [x * (y - 1) for x,y in zip(strides, shape)] + assert backstrides == [2, 12] + i = ArrayIter(MockArray, support.product(shape), shape, + strides, backstrides) + i.next() + i.next() + i.next() + assert i.offset == 9 + assert not i.done() + assert i.indices == [0,3] + #cause a dimension overflow + i.next() + i.next() + assert i.offset == 1 + assert i.indices == [1,0] + + def test_iterator_step(self): + #iteration in C order with #contiguous layout => strides[-1] is 1 + #skip less than the shape + shape = [3, 5] + strides = [5, 1] + backstrides = [x * (y - 1) for x,y in zip(strides, shape)] + assert backstrides == [10, 4] + i = ArrayIter(MockArray, support.product(shape), shape, + strides, backstrides) + i.next_skip_x(2) + i.next_skip_x(2) + i.next_skip_x(2) + assert i.offset == 6 + assert not i.done() + assert i.indices == [1,1] + #And for some big skips + i.next_skip_x(5) + assert i.offset == 11 + assert i.indices == [2,1] + i.next_skip_x(5) + # Note: the offset does not overflow but recycles, + # this is good for broadcast + assert i.offset == 1 + assert i.indices == [0,1] + assert i.done() + + #Now what happens if the array is transposed? strides[-1] != 1 + # therefore layout is non-contiguous + strides = [1, 3] + backstrides = [x * (y - 1) for x,y in zip(strides, shape)] + assert backstrides == [2, 12] + i = ArrayIter(MockArray, support.product(shape), shape, + strides, backstrides) + i.next_skip_x(2) + i.next_skip_x(2) + i.next_skip_x(2) + assert i.offset == 4 + assert i.indices == [1,1] + assert not i.done() + i.next_skip_x(5) + assert i.offset == 5 + assert i.indices == [2,1] + assert not i.done() + i.next_skip_x(5) + assert i.indices == [0,1] + assert i.offset == 3 + assert i.done() diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -1740,10 +1740,11 @@ a = array([(1, 2)], dtype=[('a', 'int64'), ('b', 'int64')])[0] assert a.shape == () assert a.view('S16') == '\x01' + '\x00' * 7 + '\x02' - a = array(2, dtype='int64') - b = a.view('complex64') + a = array(2, dtype='0: + if len(out.get_shape()) > 0: raise oefmt(space.w_ValueError, "output parameter for reduction operation %s has " "too many dimensions", self.name) @@ -262,7 +269,8 @@ return out if keepdims: shape = [1] * len(obj_shape) - out = W_NDimArray.from_shape(space, [1] * len(obj_shape), dtype, w_instance=obj) + out = W_NDimArray.from_shape(space, [1] * len(obj_shape), dtype, + w_instance=obj) out.implementation.setitem(0, res) return out return res @@ -274,6 +282,7 @@ raise OperationError(space.w_ValueError, space.wrap( "outer product only supported for binary functions")) + class W_Ufunc1(W_Ufunc): _immutable_fields_ = ["func", "bool_result"] argcount = 1 diff --git a/rpython/rlib/entrypoint.py b/rpython/rlib/entrypoint.py --- a/rpython/rlib/entrypoint.py +++ b/rpython/rlib/entrypoint.py @@ -78,7 +78,8 @@ # registered RPython_StartupCode = rffi.llexternal('RPython_StartupCode', [], lltype.Void, - _nowrapper=True) + _nowrapper=True, + random_effects_on_gcobjs=True) @entrypoint('main', [], c_name='rpython_startup_code') def rpython_startup_code(): From noreply at buildbot.pypy.org Fri Feb 28 11:13:39 2014 From: noreply at buildbot.pypy.org (antocuni) Date: Fri, 28 Feb 2014 11:13:39 +0100 (CET) Subject: [pypy-commit] pypy int_w-refactor: hg merge default (the last default with green buildbots) Message-ID: <20140228101339.6C9D01C244E@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: int_w-refactor Changeset: r69543:977275de67e8 Date: 2014-02-28 11:06 +0100 http://bitbucket.org/pypy/pypy/changeset/977275de67e8/ Log: hg merge default (the last default with green buildbots) diff too long, truncating to 2000 out of 2661 lines diff --git a/lib-python/2.7/threading.py b/lib-python/2.7/threading.py --- a/lib-python/2.7/threading.py +++ b/lib-python/2.7/threading.py @@ -246,7 +246,14 @@ else: # PyPy patch: use _py3k_acquire() if timeout > 0: - gotit = waiter._py3k_acquire(True, timeout) + try: + gotit = waiter._py3k_acquire(True, timeout) + except OverflowError: + # bah, in Python 3, acquire(True, timeout) raises + # OverflowError if the timeout is too huge. For + # forward-compatibility reasons we do the same. + waiter.acquire() + gotit = True else: gotit = waiter.acquire(False) if not gotit: diff --git a/pypy/module/_cffi_backend/ctypeobj.py b/pypy/module/_cffi_backend/ctypeobj.py --- a/pypy/module/_cffi_backend/ctypeobj.py +++ b/pypy/module/_cffi_backend/ctypeobj.py @@ -14,6 +14,8 @@ _immutable_fields_ = ['size?', 'name', 'name_position'] # note that 'size' is not strictly immutable, because it can change # from -1 to the real value in the W_CTypeStruct subclass. + # XXX this could be improved with an elidable method get_size() + # that raises in case it's still -1... cast_anything = False is_primitive_integer = False diff --git a/pypy/module/_lsprof/interp_lsprof.py b/pypy/module/_lsprof/interp_lsprof.py --- a/pypy/module/_lsprof/interp_lsprof.py +++ b/pypy/module/_lsprof/interp_lsprof.py @@ -159,7 +159,7 @@ subentry = ProfilerSubEntry(entry.frame) self.calls[entry] = subentry return subentry - return None + raise class ProfilerContext(object): def __init__(self, profobj, entry): @@ -181,8 +181,11 @@ entry._stop(tt, it) if profobj.subcalls and self.previous: caller = jit.promote(self.previous.entry) - subentry = caller._get_or_make_subentry(entry, False) - if subentry is not None: + try: + subentry = caller._get_or_make_subentry(entry, False) + except KeyError: + pass + else: subentry._stop(tt, it) @@ -308,7 +311,7 @@ entry = ProfilerEntry(f_code) self.data[f_code] = entry return entry - return None + raise @jit.elidable def _get_or_make_builtin_entry(self, key, make=True): @@ -319,7 +322,7 @@ entry = ProfilerEntry(self.space.wrap(key)) self.builtin_data[key] = entry return entry - return None + raise def _enter_call(self, f_code): # we have a superb gc, no point in freelist :) @@ -332,8 +335,11 @@ if context is None: return self = jit.promote(self) - entry = self._get_or_make_entry(f_code, False) - if entry is not None: + try: + entry = self._get_or_make_entry(f_code, False) + except KeyError: + pass + else: context._stop(self, entry) self.current_context = context.previous @@ -347,8 +353,11 @@ if context is None: return self = jit.promote(self) - entry = self._get_or_make_builtin_entry(key, False) - if entry is not None: + try: + entry = self._get_or_make_builtin_entry(key, False) + except KeyError: + pass + else: context._stop(self, entry) self.current_context = context.previous diff --git a/pypy/module/cpyext/ndarrayobject.py b/pypy/module/cpyext/ndarrayobject.py --- a/pypy/module/cpyext/ndarrayobject.py +++ b/pypy/module/cpyext/ndarrayobject.py @@ -94,12 +94,12 @@ @cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL) def _PyArray_ITEMSIZE(space, w_array): assert isinstance(w_array, W_NDimArray) - return w_array.get_dtype().get_size() + return w_array.get_dtype().elsize @cpython_api([PyObject], Py_ssize_t, error=CANNOT_FAIL) def _PyArray_NBYTES(space, w_array): assert isinstance(w_array, W_NDimArray) - return w_array.get_size() * w_array.get_dtype().get_size() + return w_array.get_size() * w_array.get_dtype().elsize @cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL) def _PyArray_TYPE(space, w_array): diff --git a/pypy/module/micronumpy/arrayimpl/concrete.py b/pypy/module/micronumpy/arrayimpl/concrete.py --- a/pypy/module/micronumpy/arrayimpl/concrete.py +++ b/pypy/module/micronumpy/arrayimpl/concrete.py @@ -55,7 +55,7 @@ loop.setslice(space, shape, self, impl) def get_size(self): - return self.size // self.dtype.get_size() + return self.size // self.dtype.elsize def get_storage_size(self): return self.size @@ -89,7 +89,7 @@ def get_real(self, space, orig_array): strides = self.get_strides() backstrides = self.get_backstrides() - if self.dtype.is_complex_type(): + if self.dtype.is_complex(): dtype = self.dtype.get_float_dtype(space) return SliceArray(self.start, strides, backstrides, self.get_shape(), self, orig_array, dtype=dtype) @@ -103,13 +103,13 @@ def get_imag(self, space, orig_array): strides = self.get_strides() backstrides = self.get_backstrides() - if self.dtype.is_complex_type(): + if self.dtype.is_complex(): dtype = self.dtype.get_float_dtype(space) - return SliceArray(self.start + dtype.get_size(), strides, + return SliceArray(self.start + dtype.elsize, strides, backstrides, self.get_shape(), self, orig_array, dtype=dtype) impl = NonWritableArray(self.get_shape(), self.dtype, self.order, strides, backstrides) - if not self.dtype.is_flexible_type(): + if not self.dtype.is_flexible(): impl.fill(space, self.dtype.box(0)) return impl @@ -204,7 +204,7 @@ if space.isinstance_w(w_idx, space.w_str): idx = space.str_w(w_idx) dtype = self.dtype - if not dtype.is_record_type() or idx not in dtype.fields: + if not dtype.is_record() or idx not in dtype.fields: raise OperationError(space.w_ValueError, space.wrap( "field named %s not found" % idx)) return RecordChunk(idx) @@ -324,7 +324,7 @@ make_sure_not_resized(strides) make_sure_not_resized(backstrides) self.shape = shape - self.size = support.product(shape) * dtype.get_size() + self.size = support.product(shape) * dtype.elsize self.order = order self.dtype = dtype self.strides = strides @@ -352,7 +352,7 @@ self.get_shape()) def fill(self, space, box): - self.dtype.itemtype.fill(self.storage, self.dtype.get_size(), + self.dtype.itemtype.fill(self.storage, self.dtype.elsize, box, 0, self.size, 0) def set_shape(self, space, orig_array, new_shape): @@ -425,7 +425,7 @@ self.storage = parent.storage self.order = parent.order self.dtype = dtype - self.size = support.product(shape) * self.dtype.get_size() + self.size = support.product(shape) * self.dtype.elsize self.start = start self.orig_arr = orig_arr @@ -460,12 +460,12 @@ strides = [] backstrides = [] dtype = self.dtype - s = self.get_strides()[0] // dtype.get_size() + s = self.get_strides()[0] // dtype.elsize if self.order == 'C': new_shape.reverse() for sh in new_shape: - strides.append(s * dtype.get_size()) - backstrides.append(s * (sh - 1) * dtype.get_size()) + strides.append(s * dtype.elsize) + backstrides.append(s * (sh - 1) * dtype.elsize) s *= max(1, sh) if self.order == 'C': strides.reverse() diff --git a/pypy/module/micronumpy/arrayimpl/scalar.py b/pypy/module/micronumpy/arrayimpl/scalar.py --- a/pypy/module/micronumpy/arrayimpl/scalar.py +++ b/pypy/module/micronumpy/arrayimpl/scalar.py @@ -70,7 +70,7 @@ scalar = Scalar(dtype) if dtype.is_str_or_unicode(): scalar.value = dtype.coerce(space, space.wrap(self.value.raw_str())) - elif dtype.is_record_type(): + elif dtype.is_record(): raise OperationError(space.w_NotImplementedError, space.wrap( "viewing scalar as record not implemented")) else: @@ -78,7 +78,7 @@ return scalar def get_real(self, space, orig_array): - if self.dtype.is_complex_type(): + if self.dtype.is_complex(): scalar = Scalar(self.dtype.get_float_dtype(space)) scalar.value = self.value.convert_real_to(scalar.dtype) return scalar @@ -91,7 +91,7 @@ "could not broadcast input array from shape " + "(%s) into shape ()" % ( ','.join([str(x) for x in w_arr.get_shape()],)))) - if self.dtype.is_complex_type(): + if self.dtype.is_complex(): dtype = self.dtype.get_float_dtype(space) self.value = self.dtype.itemtype.composite( w_arr.get_scalar_value().convert_to(space, dtype), @@ -100,7 +100,7 @@ self.value = w_arr.get_scalar_value() def get_imag(self, space, orig_array): - if self.dtype.is_complex_type(): + if self.dtype.is_complex(): scalar = Scalar(self.dtype.get_float_dtype(space)) scalar.value = self.value.convert_imag_to(scalar.dtype) return scalar @@ -110,7 +110,7 @@ def set_imag(self, space, orig_array, w_val): #Only called on complex dtype - assert self.dtype.is_complex_type() + assert self.dtype.is_complex() w_arr = convert_to_array(space, w_val) if len(w_arr.get_shape()) > 0: raise OperationError(space.w_ValueError, space.wrap( @@ -127,7 +127,7 @@ if space.len_w(w_idx) == 0: return self.get_scalar_value() elif space.isinstance_w(w_idx, space.w_str): - if self.dtype.is_record_type(): + if self.dtype.is_record(): w_val = self.value.descr_getitem(space, w_idx) return convert_to_array(space, w_val) elif space.is_none(w_idx): @@ -148,7 +148,7 @@ if space.len_w(w_idx) == 0: return self.set_scalar_value(self.dtype.coerce(space, w_val)) elif space.isinstance_w(w_idx, space.w_str): - if self.dtype.is_record_type(): + if self.dtype.is_record(): return self.value.descr_setitem(space, w_idx, w_val) raise OperationError(space.w_IndexError, space.wrap("0-d arrays can't be indexed")) diff --git a/pypy/module/micronumpy/arrayimpl/sort.py b/pypy/module/micronumpy/arrayimpl/sort.py --- a/pypy/module/micronumpy/arrayimpl/sort.py +++ b/pypy/module/micronumpy/arrayimpl/sort.py @@ -71,10 +71,10 @@ def __init__(self, index_stride_size, stride_size, size): start = 0 dtype = interp_dtype.get_dtype_cache(space).w_longdtype - indexes = dtype.itemtype.malloc(size*dtype.get_size()) + indexes = dtype.itemtype.malloc(size * dtype.elsize) values = alloc_raw_storage(size * stride_size, track_allocation=False) - Repr.__init__(self, dtype.get_size(), stride_size, + Repr.__init__(self, dtype.elsize, stride_size, size, values, indexes, start, start) def __del__(self): @@ -177,7 +177,7 @@ # XXX this should probably be changed raise oefmt(space.w_NotImplementedError, "sorting of non-numeric types '%s' is not implemented", - arr.dtype.name) + arr.dtype.get_name()) all_types = (types.all_float_types + types.all_complex_types + types.all_int_types) @@ -320,7 +320,7 @@ # XXX this should probably be changed raise oefmt(space.w_NotImplementedError, "sorting of non-numeric types '%s' is not implemented", - arr.dtype.name) + arr.dtype.get_name()) all_types = (types.all_float_types + types.all_complex_types + types.all_int_types) diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -218,7 +218,7 @@ return w_type.lookup(name) def gettypefor(self, w_obj): - return None + return W_TypeObject(w_obj.typedef.name) def call_function(self, tp, w_dtype): return w_dtype diff --git a/pypy/module/micronumpy/interp_arrayops.py b/pypy/module/micronumpy/interp_arrayops.py --- a/pypy/module/micronumpy/interp_arrayops.py +++ b/pypy/module/micronumpy/interp_arrayops.py @@ -137,14 +137,14 @@ "all the input array dimensions except for the " "concatenation axis must match exactly")) a_dt = arr.get_dtype() - if dtype.is_record_type() and a_dt.is_record_type(): + if dtype.is_record() and a_dt.is_record(): # Record types must match for f in dtype.fields: if f not in a_dt.fields or \ dtype.fields[f] != a_dt.fields[f]: raise OperationError(space.w_TypeError, space.wrap("invalid type promotion")) - elif dtype.is_record_type() or a_dt.is_record_type(): + elif dtype.is_record() or a_dt.is_record(): raise OperationError(space.w_TypeError, space.wrap("invalid type promotion")) dtype = interp_ufuncs.find_binop_result_dtype(space, dtype, diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -16,7 +16,7 @@ from pypy.interpreter.mixedmodule import MixedModule from rpython.rtyper.lltypesystem import lltype from rpython.rlib.rstring import StringBuilder -from rpython.rlib import jit +from rpython.rlib.objectmodel import specialize from pypy.module.micronumpy import constants as NPY @@ -33,13 +33,13 @@ long_double_size = 8 -def new_dtype_getter(name): - @jit.elidable +def new_dtype_getter(num): + @specialize.memo() def _get_dtype(space): from pypy.module.micronumpy.interp_dtype import get_dtype_cache - return get_dtype_cache(space).dtypes_by_name[name] + return get_dtype_cache(space).dtypes_by_num[num] - def new(space, w_subtype, w_value=None): + def descr__new__(space, w_subtype, w_value=None): from pypy.module.micronumpy.interp_numarray import array dtype = _get_dtype(space) if not space.is_none(w_value): @@ -52,7 +52,9 @@ def descr_reduce(self, space): return self.reduce(space) - return func_with_new_name(new, name + "_box_new"), staticmethod(_get_dtype), func_with_new_name(descr_reduce, "descr_reduce") + return (func_with_new_name(descr__new__, 'descr__new__%d' % num), + staticmethod(_get_dtype), + descr_reduce) class Box(object): @@ -303,15 +305,15 @@ else: dtype = space.interp_w(W_Dtype, space.call_function(space.gettypefor(W_Dtype), w_dtype)) - if dtype.get_size() == 0: + if dtype.elsize == 0: raise OperationError(space.w_TypeError, space.wrap( "data-type must not be 0-sized")) - if dtype.get_size() != self.get_dtype(space).get_size(): + if dtype.elsize != self.get_dtype(space).elsize: raise OperationError(space.w_ValueError, space.wrap( "new type not compatible with array.")) if dtype.is_str_or_unicode(): return dtype.coerce(space, space.wrap(self.raw_str())) - elif dtype.is_record_type(): + elif dtype.is_record(): raise OperationError(space.w_NotImplementedError, space.wrap( "viewing scalar as record not implemented")) else: @@ -327,7 +329,7 @@ return space.wrap(1) def descr_get_itemsize(self, space): - return self.get_dtype(space).descr_get_itemsize(space) + return space.wrap(self.get_dtype(space).elsize) def descr_get_shape(self, space): return space.newtuple([]) @@ -352,6 +354,12 @@ w_meth = space.getattr(self.descr_ravel(space), space.wrap('reshape')) return space.call_args(w_meth, __args__) + def descr_get_real(self, space): + return self.get_dtype(space).itemtype.real(self) + + def descr_get_imag(self, space): + return self.get_dtype(space).itemtype.imag(self) + w_flags = None def descr_get_flags(self, space): if self.w_flags is None: @@ -359,7 +367,7 @@ return self.w_flags class W_BoolBox(W_GenericBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("bool") + descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY.BOOL) class W_NumberBox(W_GenericBox): pass @@ -375,34 +383,34 @@ pass class W_Int8Box(W_SignedIntegerBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("int8") + descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY.BYTE) class W_UInt8Box(W_UnsignedIntegerBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("uint8") + descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY.UBYTE) class W_Int16Box(W_SignedIntegerBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("int16") + descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY.SHORT) class W_UInt16Box(W_UnsignedIntegerBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("uint16") + descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY.USHORT) class W_Int32Box(W_SignedIntegerBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("i") + descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY.INT) class W_UInt32Box(W_UnsignedIntegerBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("I") + descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY.UINT) + +class W_LongBox(W_SignedIntegerBox, PrimitiveBox): + descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY.LONG) + +class W_ULongBox(W_UnsignedIntegerBox, PrimitiveBox): + descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY.ULONG) class W_Int64Box(W_SignedIntegerBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("q") + descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY.LONGLONG) class W_UInt64Box(W_UnsignedIntegerBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("Q") - -class W_LongBox(W_SignedIntegerBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("l") - -class W_ULongBox(W_UnsignedIntegerBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("L") + descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY.ULONGLONG) class W_InexactBox(W_NumberBox): pass @@ -411,45 +419,32 @@ pass class W_Float16Box(W_FloatingBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("float16") + descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY.HALF) class W_Float32Box(W_FloatingBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("float32") + descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY.FLOAT) class W_Float64Box(W_FloatingBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("float64") + descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY.DOUBLE) def descr_as_integer_ratio(self, space): return space.call_method(self.item(space), 'as_integer_ratio') class W_ComplexFloatingBox(W_InexactBox): - def descr_get_real(self, space): - dtype = self._COMPONENTS_BOX._get_dtype(space) - box = self.convert_real_to(dtype) - assert isinstance(box, self._COMPONENTS_BOX) - return space.wrap(box) - - def descr_get_imag(self, space): - dtype = self._COMPONENTS_BOX._get_dtype(space) - box = self.convert_imag_to(dtype) - assert isinstance(box, self._COMPONENTS_BOX) - return space.wrap(box) + pass class W_Complex64Box(ComplexBox, W_ComplexFloatingBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("complex64") - _COMPONENTS_BOX = W_Float32Box + descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY.CFLOAT) class W_Complex128Box(ComplexBox, W_ComplexFloatingBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter("complex128") - _COMPONENTS_BOX = W_Float64Box + descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY.CDOUBLE) if long_double_size in (8, 12, 16): class W_FloatLongBox(W_FloatingBox, PrimitiveBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY.LONGDOUBLELTR) + descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY.LONGDOUBLE) class W_ComplexLongBox(ComplexBox, W_ComplexFloatingBox): - descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY.CLONGDOUBLELTR) - _COMPONENTS_BOX = W_FloatLongBox + descr__new__, _get_dtype, descr_reduce = new_dtype_getter(NPY.CLONGDOUBLE) class W_FlexibleBox(W_GenericBox): _attrs_ = ['arr', 'ofs', 'dtype'] @@ -635,6 +630,8 @@ strides = GetSetProperty(W_GenericBox.descr_get_shape), ndim = GetSetProperty(W_GenericBox.descr_get_ndim), T = GetSetProperty(W_GenericBox.descr_self), + real = GetSetProperty(W_GenericBox.descr_get_real), + imag = GetSetProperty(W_GenericBox.descr_get_imag), flags = GetSetProperty(W_GenericBox.descr_get_flags), ) @@ -768,16 +765,12 @@ __new__ = interp2app(W_Complex64Box.descr__new__.im_func), __reduce__ = interp2app(W_Complex64Box.descr_reduce), __complex__ = interp2app(W_GenericBox.item), - real = GetSetProperty(W_ComplexFloatingBox.descr_get_real), - imag = GetSetProperty(W_ComplexFloatingBox.descr_get_imag), ) W_Complex128Box.typedef = TypeDef("complex128", (W_ComplexFloatingBox.typedef, complex_typedef), __module__ = "numpy", __new__ = interp2app(W_Complex128Box.descr__new__.im_func), __reduce__ = interp2app(W_Complex128Box.descr_reduce), - real = GetSetProperty(W_ComplexFloatingBox.descr_get_real), - imag = GetSetProperty(W_ComplexFloatingBox.descr_get_imag), ) if long_double_size in (8, 12, 16): @@ -792,8 +785,6 @@ __new__ = interp2app(W_ComplexLongBox.descr__new__.im_func), __reduce__ = interp2app(W_ComplexLongBox.descr_reduce), __complex__ = interp2app(W_GenericBox.item), - real = GetSetProperty(W_ComplexFloatingBox.descr_get_real), - imag = GetSetProperty(W_ComplexFloatingBox.descr_get_imag), ) W_FlexibleBox.typedef = TypeDef("flexible", W_GenericBox.typedef, diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py --- a/pypy/module/micronumpy/interp_dtype.py +++ b/pypy/module/micronumpy/interp_dtype.py @@ -6,7 +6,7 @@ interp_attrproperty, interp_attrproperty_w) from pypy.module.micronumpy import types, interp_boxes, base from rpython.rlib.objectmodel import specialize -from rpython.rlib.rarithmetic import LONG_BIT, r_longlong, r_ulonglong +from rpython.rlib.rarithmetic import r_longlong, r_ulonglong from rpython.rlib import jit from pypy.module.micronumpy.appbridge import get_appbridge_cache from pypy.module.micronumpy.conversion_utils import byteorder_converter @@ -38,23 +38,19 @@ class W_Dtype(W_Root): _immutable_fields_ = [ - "num", "kind", "name", "char", "w_box_type", "float_type", - "itemtype?", "byteorder?", "names?", "fields?", "size?", + "itemtype?", "num", "kind", "char", "w_box_type", + "byteorder?", "names?", "fields?", "elsize?", "alignment?", "shape?", "subdtype?", "base?", - "alternate_constructors", "aliases", ] - def __init__(self, itemtype, num, kind, name, char, w_box_type, - float_type=None, byteorder=None, names=[], fields={}, - size=1, shape=[], subdtype=None, - alternate_constructors=[], aliases=[]): + def __init__(self, itemtype, num, kind, char, w_box_type, + byteorder=None, names=[], fields={}, + elsize=None, shape=[], subdtype=None): self.itemtype = itemtype self.num = num self.kind = kind - self.name = name self.char = char self.w_box_type = w_box_type - self.float_type = float_type if byteorder is None: if itemtype.get_element_size() == 1: byteorder = NPY.IGNORE @@ -63,15 +59,16 @@ self.byteorder = byteorder self.names = names self.fields = fields - self.size = size + if elsize is None: + elsize = itemtype.get_element_size() + self.elsize = elsize + self.alignment = itemtype.alignment self.shape = shape self.subdtype = subdtype if not subdtype: self.base = self else: self.base = subdtype.base - self.alternate_constructors = alternate_constructors - self.aliases = aliases def __repr__(self): if self.fields is not None: @@ -86,121 +83,74 @@ def box_complex(self, real, imag): return self.itemtype.box_complex(real, imag) - def build_and_convert(self, space, box): - return self.itemtype.build_and_convert(space, self, box) - def coerce(self, space, w_item): return self.itemtype.coerce(space, self, w_item) - def is_int_type(self): - return (self.kind == NPY.SIGNEDLTR or self.kind == NPY.UNSIGNEDLTR or - self.kind == NPY.GENBOOLLTR) + def is_bool(self): + return self.kind == NPY.GENBOOLLTR def is_signed(self): return self.kind == NPY.SIGNEDLTR - def is_complex_type(self): + def is_unsigned(self): + return self.kind == NPY.UNSIGNEDLTR + + def is_int(self): + return (self.kind == NPY.SIGNEDLTR or self.kind == NPY.UNSIGNEDLTR or + self.kind == NPY.GENBOOLLTR) + + def is_float(self): + return self.kind == NPY.FLOATINGLTR + + def is_complex(self): return self.kind == NPY.COMPLEXLTR - def is_float_type(self): - return self.kind == NPY.FLOATINGLTR or self.kind == NPY.COMPLEXLTR - - def is_bool_type(self): - return self.kind == NPY.GENBOOLLTR - - def is_record_type(self): - return bool(self.fields) - - def is_str_type(self): + def is_str(self): return self.num == NPY.STRING def is_str_or_unicode(self): return self.num == NPY.STRING or self.num == NPY.UNICODE - def is_flexible_type(self): + def is_flexible(self): return self.is_str_or_unicode() or self.num == NPY.VOID + def is_record(self): + return bool(self.fields) + def is_native(self): return self.byteorder in (NPY.NATIVE, NPY.NATBYTE) - def get_size(self): - return self.size * self.itemtype.get_element_size() - def get_float_dtype(self, space): - assert self.kind == NPY.COMPLEXLTR - assert self.float_type is not None - dtype = get_dtype_cache(space).dtypes_by_name[self.float_type] + assert self.is_complex() + dtype = get_dtype_cache(space).component_dtypes[self.num] if self.byteorder == NPY.OPPBYTE: dtype = dtype.descr_newbyteorder(space) + assert dtype.is_float() return dtype - def descr_str(self, space): - if self.fields: - return space.str(self.descr_get_descr(space)) - elif self.subdtype is not None: - return space.str(space.newtuple([ - self.subdtype.descr_get_str(space), - self.descr_get_shape(space)])) - else: - if self.is_flexible_type(): - return self.descr_get_str(space) - else: - return self.descr_get_name(space) - - def descr_repr(self, space): - if self.fields: - r = self.descr_get_descr(space) - elif self.subdtype is not None: - r = space.newtuple([self.subdtype.descr_get_str(space), - self.descr_get_shape(space)]) - else: - if self.is_flexible_type(): - if self.byteorder != NPY.IGNORE: - byteorder = NPY.NATBYTE if self.is_native() else NPY.OPPBYTE - else: - byteorder = '' - r = space.wrap(byteorder + self.char + str(self.size)) - else: - r = self.descr_get_name(space) - return space.wrap("dtype(%s)" % space.str_w(space.repr(r))) - - def descr_get_itemsize(self, space): - return space.wrap(self.get_size()) - - def descr_get_alignment(self, space): - return space.wrap(self.itemtype.alignment) - - def descr_get_isbuiltin(self, space): - if self.fields is None: - return space.wrap(1) - return space.wrap(0) - - def descr_get_subdtype(self, space): - if self.subdtype is None: - return space.w_None - return space.newtuple([space.wrap(self.subdtype), self.descr_get_shape(space)]) + def get_name(self): + return self.w_box_type.name def descr_get_name(self, space): - if self.is_flexible_type(): - return space.wrap(self.name + str(self.get_size() * 8)) - return space.wrap(self.name) + name = self.get_name() + if name[-1] == '_': + name = name[:-1] + if self.is_flexible() and self.elsize != 0: + return space.wrap(name + str(self.elsize * 8)) + return space.wrap(name) def descr_get_str(self, space): - size = self.get_size() basic = self.kind - if basic == NPY.UNICODELTR: + endian = self.byteorder + size = self.elsize + if endian == NPY.NATIVE: + endian = NPY.NATBYTE + if self.num == NPY.UNICODE: size >>= 2 - endian = NPY.NATBYTE - elif size // (self.size or 1) <= 1: - endian = NPY.IGNORE - else: - endian = self.byteorder - if endian == NPY.NATIVE: - endian = NPY.NATBYTE return space.wrap("%s%s%s" % (endian, basic, size)) def descr_get_descr(self, space): - if not self.is_record_type(): + if not self.is_record(): return space.newlist([space.newtuple([space.wrap(""), self.descr_get_str(space)])]) else: @@ -208,7 +158,7 @@ for name in self.names: subdtype = self.fields[name][1] subdescr = [space.wrap(name)] - if subdtype.is_record_type(): + if subdtype.is_record(): subdescr.append(subdtype.descr_get_descr(space)) elif subdtype.subdtype is not None: subdescr.append(subdtype.subdtype.descr_get_str(space)) @@ -219,38 +169,37 @@ descr.append(space.newtuple(subdescr[:])) return space.newlist(descr) - def descr_get_base(self, space): - return space.wrap(self.base) + def descr_get_hasobject(self, space): + return space.w_False + + def descr_get_isbuiltin(self, space): + if self.fields is None: + return space.wrap(1) + return space.wrap(0) def descr_get_isnative(self, space): return space.wrap(self.is_native()) + def descr_get_base(self, space): + return space.wrap(self.base) + + def descr_get_subdtype(self, space): + if self.subdtype is None: + return space.w_None + return space.newtuple([space.wrap(self.subdtype), + self.descr_get_shape(space)]) + def descr_get_shape(self, space): - w_shape = [space.wrap(dim) for dim in self.shape] - return space.newtuple(w_shape) - - def eq(self, space, w_other): - w_other = space.call_function(space.gettypefor(W_Dtype), w_other) - if space.is_w(self, w_other): - return True - if isinstance(w_other, W_Dtype): - return space.eq_w(self.descr_reduce(space), w_other.descr_reduce(space)) - return False - - def descr_eq(self, space, w_other): - return space.wrap(self.eq(space, w_other)) - - def descr_ne(self, space, w_other): - return space.wrap(not self.eq(space, w_other)) + return space.newtuple([space.wrap(dim) for dim in self.shape]) def descr_get_fields(self, space): if not self.fields: return space.w_None - w_d = space.newdict() + w_fields = space.newdict() for name, (offset, subdtype) in self.fields.iteritems(): - space.setitem(w_d, space.wrap(name), + space.setitem(w_fields, space.wrap(name), space.newtuple([subdtype, space.wrap(offset)])) - return w_d + return w_fields def descr_get_names(self, space): if not self.fields: @@ -285,13 +234,61 @@ raise OperationError(space.w_AttributeError, space.wrap( "Cannot delete dtype names attribute")) - def descr_get_hasobject(self, space): - return space.w_False + def eq(self, space, w_other): + w_other = space.call_function(space.gettypefor(W_Dtype), w_other) + if space.is_w(self, w_other): + return True + if isinstance(w_other, W_Dtype): + return space.eq_w(self.descr_reduce(space), + w_other.descr_reduce(space)) + return False + + def descr_eq(self, space, w_other): + return space.wrap(self.eq(space, w_other)) + + def descr_ne(self, space, w_other): + return space.wrap(not self.eq(space, w_other)) + + def descr_hash(self, space): + return space.hash(self.descr_reduce(space)) + + def descr_str(self, space): + if self.fields: + return space.str(self.descr_get_descr(space)) + elif self.subdtype is not None: + return space.str(space.newtuple([ + self.subdtype.descr_get_str(space), + self.descr_get_shape(space)])) + else: + if self.is_flexible(): + return self.descr_get_str(space) + else: + return self.descr_get_name(space) + + def descr_repr(self, space): + if self.fields: + r = self.descr_get_descr(space) + elif self.subdtype is not None: + r = space.newtuple([self.subdtype.descr_get_str(space), + self.descr_get_shape(space)]) + else: + if self.is_flexible(): + if self.byteorder != NPY.IGNORE: + byteorder = NPY.NATBYTE if self.is_native() else NPY.OPPBYTE + else: + byteorder = '' + size = self.elsize + if self.num == NPY.UNICODE: + size >>= 2 + r = space.wrap(byteorder + self.char + str(size)) + else: + r = self.descr_get_name(space) + return space.wrap("dtype(%s)" % space.str_w(space.repr(r))) def descr_getitem(self, space, w_item): - if self.fields is None: - raise OperationError(space.w_KeyError, space.wrap( - "There are no fields in dtype %s." % self.name)) + if not self.fields: + raise oefmt(space.w_KeyError, "There are no fields in dtype %s.", + self.get_name()) if space.isinstance_w(w_item, space.w_basestring): item = space.str_w(w_item) elif space.isinstance_w(w_item, space.w_int): @@ -311,45 +308,33 @@ "Field named '%s' not found." % item)) def descr_len(self, space): - if self.fields is None: + if not self.fields: return space.wrap(0) return space.wrap(len(self.fields)) - def descr_hash(self, space): - return space.hash(self.descr_reduce(space)) - def descr_reduce(self, space): w_class = space.type(self) - - kind = self.kind - elemsize = self.get_size() - builder_args = space.newtuple([space.wrap("%s%d" % (kind, elemsize)), space.wrap(0), space.wrap(1)]) + builder_args = space.newtuple([ + space.wrap("%s%d" % (self.kind, self.elsize)), + space.wrap(0), space.wrap(1)]) version = space.wrap(3) + endian = self.byteorder + if endian == NPY.NATIVE: + endian = NPY.NATBYTE + subdescr = self.descr_get_subdtype(space) names = self.descr_get_names(space) values = self.descr_get_fields(space) - if self.fields: - endian = NPY.IGNORE - #TODO: Implement this when subarrays are implemented - subdescr = space.w_None - size = 0 - for key in self.fields: - dtype = self.fields[key][1] - assert isinstance(dtype, W_Dtype) - size += dtype.get_size() - w_size = space.wrap(size) - #TODO: Change this when alignment is implemented - alignment = space.wrap(1) + if self.is_flexible(): + w_size = space.wrap(self.elsize) + alignment = space.wrap(self.alignment) else: - endian = self.byteorder - if endian == NPY.NATIVE: - endian = NPY.NATBYTE - subdescr = space.w_None w_size = space.wrap(-1) alignment = space.wrap(-1) flags = space.wrap(0) - data = space.newtuple([version, space.wrap(endian), subdescr, names, values, w_size, alignment, flags]) + data = space.newtuple([version, space.wrap(endian), subdescr, + names, values, w_size, alignment, flags]) return space.newtuple([w_class, builder_args, data]) def descr_setstate(self, space, w_data): @@ -370,6 +355,7 @@ w_names = space.getitem(w_data, space.wrap(3)) w_fields = space.getitem(w_data, space.wrap(4)) size = space.int_w(space.getitem(w_data, space.wrap(5))) + alignment = space.int_w(space.getitem(w_data, space.wrap(6))) if (w_names == space.w_None) != (w_fields == space.w_None): raise oefmt(space.w_ValueError, "inconsistent fields and names") @@ -408,8 +394,9 @@ self.fields[name] = offset, dtype self.itemtype = types.RecordType() - if self.is_flexible_type(): - self.size = size + if self.is_flexible(): + self.elsize = size + self.alignment = alignment @unwrap_spec(new_order=str) def descr_newbyteorder(self, space, new_order=NPY.SWAP): @@ -421,9 +408,13 @@ elif newendian != NPY.IGNORE: endian = newendian itemtype = self.itemtype.__class__(endian in (NPY.NATIVE, NPY.NATBYTE)) - return W_Dtype(itemtype, self.num, self.kind, self.name, self.char, - self.w_box_type, self.float_type, byteorder=endian, - size=self.size) + fields = self.fields + if fields is None: + fields = {} + return W_Dtype(itemtype, self.num, self.kind, self.char, + self.w_box_type, byteorder=endian, elsize=self.elsize, + names=self.names, fields=fields, + shape=self.shape, subdtype=self.subdtype) @specialize.arg(2) @@ -453,11 +444,11 @@ raise oefmt(space.w_ValueError, "two fields with the same name") assert isinstance(subdtype, W_Dtype) fields[fldname] = (offset, subdtype) - offset += subdtype.get_size() + offset += subdtype.elsize names.append(fldname) - return W_Dtype(types.RecordType(), NPY.VOID, NPY.VOIDLTR, "void", - NPY.VOIDLTR, space.gettypefor(interp_boxes.W_VoidBox), - names=names, fields=fields, size=offset) + return W_Dtype(types.RecordType(), NPY.VOID, NPY.VOIDLTR, NPY.VOIDLTR, + space.gettypefor(interp_boxes.W_VoidBox), + names=names, fields=fields, elsize=offset) def dtype_from_dict(space, w_dict): @@ -496,11 +487,10 @@ size *= dim if size == 1: return subdtype - return W_Dtype(types.VoidType(), NPY.VOID, NPY.VOIDLTR, - "void" + str(8 * subdtype.get_size() * size), - NPY.VOIDLTR, space.gettypefor(interp_boxes.W_VoidBox), - shape=shape, subdtype=subdtype, - size=subdtype.get_size() * size) + size *= subdtype.elsize + return W_Dtype(types.VoidType(), NPY.VOID, NPY.VOIDLTR, NPY.VOIDLTR, + space.gettypefor(interp_boxes.W_VoidBox), + shape=shape, subdtype=subdtype, elsize=size) if space.is_none(w_dtype): return cache.w_float64dtype @@ -529,58 +519,59 @@ w_dtype1 = space.getitem(w_dtype, space.wrap(1)) subdtype = descr__new__(space, w_subtype, w_dtype0, w_align, w_copy) assert isinstance(subdtype, W_Dtype) - if subdtype.get_size() == 0: + if subdtype.elsize == 0: name = "%s%d" % (subdtype.kind, space.int_w(w_dtype1)) return descr__new__(space, w_subtype, space.wrap(name), w_align, w_copy) return descr__new__(space, w_subtype, w_dtype0, w_align, w_copy, w_shape=w_dtype1) elif space.isinstance_w(w_dtype, space.w_dict): return dtype_from_dict(space, w_dtype) for dtype in cache.builtin_dtypes: - if w_dtype in dtype.alternate_constructors: + if dtype.num in cache.alternate_constructors and \ + w_dtype in cache.alternate_constructors[dtype.num]: return dtype if w_dtype is dtype.w_box_type: return dtype if space.isinstance_w(w_dtype, space.w_type): - raise oefmt(space.w_NotImplementedError, "object dtype not implemented") + raise oefmt(space.w_NotImplementedError, + "cannot create dtype with type '%N'", w_dtype) raise oefmt(space.w_TypeError, "data type not understood") W_Dtype.typedef = TypeDef("dtype", __module__ = "numpy", __new__ = interp2app(descr__new__), - __str__= interp2app(W_Dtype.descr_str), - __repr__ = interp2app(W_Dtype.descr_repr), - __eq__ = interp2app(W_Dtype.descr_eq), - __ne__ = interp2app(W_Dtype.descr_ne), - __getitem__ = interp2app(W_Dtype.descr_getitem), - __len__ = interp2app(W_Dtype.descr_len), - - __hash__ = interp2app(W_Dtype.descr_hash), - __reduce__ = interp2app(W_Dtype.descr_reduce), - __setstate__ = interp2app(W_Dtype.descr_setstate), - newbyteorder = interp2app(W_Dtype.descr_newbyteorder), - type = interp_attrproperty_w("w_box_type", cls=W_Dtype), kind = interp_attrproperty("kind", cls=W_Dtype), char = interp_attrproperty("char", cls=W_Dtype), num = interp_attrproperty("num", cls=W_Dtype), byteorder = interp_attrproperty("byteorder", cls=W_Dtype), - itemsize = GetSetProperty(W_Dtype.descr_get_itemsize), - alignment = GetSetProperty(W_Dtype.descr_get_alignment), + itemsize = interp_attrproperty("elsize", cls=W_Dtype), + alignment = interp_attrproperty("alignment", cls=W_Dtype), + + name = GetSetProperty(W_Dtype.descr_get_name), + str = GetSetProperty(W_Dtype.descr_get_str), + descr = GetSetProperty(W_Dtype.descr_get_descr), + hasobject = GetSetProperty(W_Dtype.descr_get_hasobject), isbuiltin = GetSetProperty(W_Dtype.descr_get_isbuiltin), - + isnative = GetSetProperty(W_Dtype.descr_get_isnative), + base = GetSetProperty(W_Dtype.descr_get_base), subdtype = GetSetProperty(W_Dtype.descr_get_subdtype), - str = GetSetProperty(W_Dtype.descr_get_str), - name = GetSetProperty(W_Dtype.descr_get_name), - base = GetSetProperty(W_Dtype.descr_get_base), shape = GetSetProperty(W_Dtype.descr_get_shape), - isnative = GetSetProperty(W_Dtype.descr_get_isnative), fields = GetSetProperty(W_Dtype.descr_get_fields), names = GetSetProperty(W_Dtype.descr_get_names, W_Dtype.descr_set_names, W_Dtype.descr_del_names), - hasobject = GetSetProperty(W_Dtype.descr_get_hasobject), - descr = GetSetProperty(W_Dtype.descr_get_descr), + + __eq__ = interp2app(W_Dtype.descr_eq), + __ne__ = interp2app(W_Dtype.descr_ne), + __hash__ = interp2app(W_Dtype.descr_hash), + __str__= interp2app(W_Dtype.descr_str), + __repr__ = interp2app(W_Dtype.descr_repr), + __getitem__ = interp2app(W_Dtype.descr_getitem), + __len__ = interp2app(W_Dtype.descr_len), + __reduce__ = interp2app(W_Dtype.descr_reduce), + __setstate__ = interp2app(W_Dtype.descr_setstate), + newbyteorder = interp2app(W_Dtype.descr_newbyteorder), ) W_Dtype.typedef.acceptable_as_base_class = False @@ -597,10 +588,8 @@ except ValueError: raise oefmt(space.w_TypeError, "data type not understood") if char == NPY.CHARLTR: - char = NPY.STRINGLTR - size = 1 - - if char == NPY.STRINGLTR: + return new_string_dtype(space, 1, NPY.CHARLTR) + elif char == NPY.STRINGLTR: return new_string_dtype(space, size) elif char == NPY.UNICODELTR: return new_unicode_dtype(space, size) @@ -609,25 +598,24 @@ assert False -def new_string_dtype(space, size): +def new_string_dtype(space, size, char=NPY.STRINGLTR): return W_Dtype( types.StringType(), - size=size, + elsize=size, num=NPY.STRING, kind=NPY.STRINGLTR, - name='string', - char=NPY.STRINGLTR, + char=char, w_box_type=space.gettypefor(interp_boxes.W_StringBox), ) def new_unicode_dtype(space, size): + itemtype = types.UnicodeType() return W_Dtype( - types.UnicodeType(), - size=size, + itemtype, + elsize=size * itemtype.get_element_size(), num=NPY.UNICODE, kind=NPY.UNICODELTR, - name='unicode', char=NPY.UNICODELTR, w_box_type=space.gettypefor(interp_boxes.W_UnicodeBox), ) @@ -636,10 +624,9 @@ def new_void_dtype(space, size): return W_Dtype( types.VoidType(), - size=size, + elsize=size, num=NPY.VOID, kind=NPY.VOIDLTR, - name='void', char=NPY.VOIDLTR, w_box_type=space.gettypefor(interp_boxes.W_VoidBox), ) @@ -651,53 +638,41 @@ types.Bool(), num=NPY.BOOL, kind=NPY.GENBOOLLTR, - name="bool", char=NPY.BOOLLTR, w_box_type=space.gettypefor(interp_boxes.W_BoolBox), - alternate_constructors=[space.w_bool], - aliases=['bool8'], ) self.w_int8dtype = W_Dtype( types.Int8(), num=NPY.BYTE, kind=NPY.SIGNEDLTR, - name="int8", char=NPY.BYTELTR, w_box_type=space.gettypefor(interp_boxes.W_Int8Box), - aliases=['byte'], ) self.w_uint8dtype = W_Dtype( types.UInt8(), num=NPY.UBYTE, kind=NPY.UNSIGNEDLTR, - name="uint8", char=NPY.UBYTELTR, w_box_type=space.gettypefor(interp_boxes.W_UInt8Box), - aliases=['ubyte'], ) self.w_int16dtype = W_Dtype( types.Int16(), num=NPY.SHORT, kind=NPY.SIGNEDLTR, - name="int16", char=NPY.SHORTLTR, w_box_type=space.gettypefor(interp_boxes.W_Int16Box), - aliases=['short'], ) self.w_uint16dtype = W_Dtype( types.UInt16(), num=NPY.USHORT, kind=NPY.UNSIGNEDLTR, - name="uint16", char=NPY.USHORTLTR, w_box_type=space.gettypefor(interp_boxes.W_UInt16Box), - aliases=['ushort'], ) self.w_int32dtype = W_Dtype( types.Int32(), num=NPY.INT, kind=NPY.SIGNEDLTR, - name="int32", char=NPY.INTLTR, w_box_type=space.gettypefor(interp_boxes.W_Int32Box), ) @@ -705,7 +680,6 @@ types.UInt32(), num=NPY.UINT, kind=NPY.UNSIGNEDLTR, - name="uint32", char=NPY.UINTLTR, w_box_type=space.gettypefor(interp_boxes.W_UInt32Box), ) @@ -713,148 +687,100 @@ types.Long(), num=NPY.LONG, kind=NPY.SIGNEDLTR, - name="int%d" % LONG_BIT, char=NPY.LONGLTR, w_box_type=space.gettypefor(interp_boxes.W_LongBox), - alternate_constructors=[space.w_int, - space.gettypefor(interp_boxes.W_IntegerBox), - space.gettypefor(interp_boxes.W_SignedIntegerBox), - ], - aliases=['int'], ) self.w_ulongdtype = W_Dtype( types.ULong(), num=NPY.ULONG, kind=NPY.UNSIGNEDLTR, - name="uint%d" % LONG_BIT, char=NPY.ULONGLTR, w_box_type=space.gettypefor(interp_boxes.W_ULongBox), - alternate_constructors=[space.gettypefor(interp_boxes.W_UnsignedIntegerBox), - ], - aliases=['uint'], ) self.w_int64dtype = W_Dtype( types.Int64(), num=NPY.LONGLONG, kind=NPY.SIGNEDLTR, - name="int64", char=NPY.LONGLONGLTR, w_box_type=space.gettypefor(interp_boxes.W_Int64Box), - alternate_constructors=[space.w_long], - aliases=['longlong'], ) self.w_uint64dtype = W_Dtype( types.UInt64(), num=NPY.ULONGLONG, kind=NPY.UNSIGNEDLTR, - name="uint64", char=NPY.ULONGLONGLTR, w_box_type=space.gettypefor(interp_boxes.W_UInt64Box), - aliases=['ulonglong'], ) self.w_float32dtype = W_Dtype( types.Float32(), num=NPY.FLOAT, kind=NPY.FLOATINGLTR, - name="float32", char=NPY.FLOATLTR, w_box_type=space.gettypefor(interp_boxes.W_Float32Box), - aliases=['single'] ) self.w_float64dtype = W_Dtype( types.Float64(), num=NPY.DOUBLE, kind=NPY.FLOATINGLTR, - name="float64", char=NPY.DOUBLELTR, - w_box_type = space.gettypefor(interp_boxes.W_Float64Box), - alternate_constructors=[space.w_float, - space.gettypefor(interp_boxes.W_NumberBox), - space.gettypefor(interp_boxes.W_FloatingBox), - ], - aliases=["float", "double"], + w_box_type=space.gettypefor(interp_boxes.W_Float64Box), ) self.w_floatlongdtype = W_Dtype( types.FloatLong(), num=NPY.LONGDOUBLE, kind=NPY.FLOATINGLTR, - name="float%d" % (interp_boxes.long_double_size * 8), char=NPY.LONGDOUBLELTR, w_box_type=space.gettypefor(interp_boxes.W_FloatLongBox), - aliases=["longdouble", "longfloat"], ) self.w_complex64dtype = W_Dtype( types.Complex64(), num=NPY.CFLOAT, kind=NPY.COMPLEXLTR, - name="complex64", char=NPY.CFLOATLTR, - w_box_type = space.gettypefor(interp_boxes.W_Complex64Box), - aliases=['csingle'], - float_type=NPY.FLOATLTR, + w_box_type=space.gettypefor(interp_boxes.W_Complex64Box), ) self.w_complex128dtype = W_Dtype( types.Complex128(), num=NPY.CDOUBLE, kind=NPY.COMPLEXLTR, - name="complex128", char=NPY.CDOUBLELTR, - w_box_type = space.gettypefor(interp_boxes.W_Complex128Box), - alternate_constructors=[space.w_complex, - space.gettypefor(interp_boxes.W_ComplexFloatingBox)], - aliases=["complex", 'cfloat', 'cdouble'], - float_type=NPY.DOUBLELTR, + w_box_type=space.gettypefor(interp_boxes.W_Complex128Box), ) self.w_complexlongdtype = W_Dtype( types.ComplexLong(), num=NPY.CLONGDOUBLE, kind=NPY.COMPLEXLTR, - name="complex%d" % (interp_boxes.long_double_size * 16), char=NPY.CLONGDOUBLELTR, - w_box_type = space.gettypefor(interp_boxes.W_ComplexLongBox), - aliases=["clongdouble", "clongfloat"], - float_type=NPY.LONGDOUBLELTR, + w_box_type=space.gettypefor(interp_boxes.W_ComplexLongBox), ) self.w_stringdtype = W_Dtype( types.StringType(), - size=0, + elsize=0, num=NPY.STRING, kind=NPY.STRINGLTR, - name='string', char=NPY.STRINGLTR, - w_box_type = space.gettypefor(interp_boxes.W_StringBox), - alternate_constructors=[space.w_str, - space.gettypefor(interp_boxes.W_CharacterBox)], - aliases=["str"], + w_box_type=space.gettypefor(interp_boxes.W_StringBox), ) self.w_unicodedtype = W_Dtype( types.UnicodeType(), - size=0, + elsize=0, num=NPY.UNICODE, kind=NPY.UNICODELTR, - name='unicode', char=NPY.UNICODELTR, - w_box_type = space.gettypefor(interp_boxes.W_UnicodeBox), - alternate_constructors=[space.w_unicode], + w_box_type=space.gettypefor(interp_boxes.W_UnicodeBox), ) self.w_voiddtype = W_Dtype( types.VoidType(), - size=0, + elsize=0, num=NPY.VOID, kind=NPY.VOIDLTR, - name='void', char=NPY.VOIDLTR, - w_box_type = space.gettypefor(interp_boxes.W_VoidBox), - #alternate_constructors=[space.w_buffer], - # XXX no buffer in space - #alternate_constructors=[space.gettypefor(interp_boxes.W_GenericBox)], - # XXX fix, leads to _coerce error + w_box_type=space.gettypefor(interp_boxes.W_VoidBox), ) self.w_float16dtype = W_Dtype( types.Float16(), num=NPY.HALF, kind=NPY.FLOATINGLTR, - name="float16", char=NPY.HALFLTR, w_box_type=space.gettypefor(interp_boxes.W_Float16Box), ) @@ -862,22 +788,62 @@ types.Long(), num=NPY.LONG, kind=NPY.SIGNEDLTR, - name='intp', char=NPY.INTPLTR, - w_box_type = space.gettypefor(interp_boxes.W_LongBox), + w_box_type=space.gettypefor(interp_boxes.W_LongBox), ) self.w_uintpdtype = W_Dtype( types.ULong(), num=NPY.ULONG, kind=NPY.UNSIGNEDLTR, - name='uintp', char=NPY.UINTPLTR, - w_box_type = space.gettypefor(interp_boxes.W_ULongBox), + w_box_type=space.gettypefor(interp_boxes.W_ULongBox), ) + aliases = { + NPY.BOOL: ['bool', 'bool8'], + NPY.BYTE: ['byte'], + NPY.UBYTE: ['ubyte'], + NPY.SHORT: ['short'], + NPY.USHORT: ['ushort'], + NPY.LONG: ['int', 'intp', 'p'], + NPY.ULONG: ['uint', 'uintp', 'P'], + NPY.LONGLONG: ['longlong'], + NPY.ULONGLONG: ['ulonglong'], + NPY.FLOAT: ['single'], + NPY.DOUBLE: ['float', 'double'], + NPY.LONGDOUBLE: ['longdouble', 'longfloat'], + NPY.CFLOAT: ['csingle'], + NPY.CDOUBLE: ['complex', 'cfloat', 'cdouble'], + NPY.CLONGDOUBLE: ['clongdouble', 'clongfloat'], + NPY.STRING: ['string', 'str'], + NPY.UNICODE: ['unicode'], + } + self.alternate_constructors = { + NPY.BOOL: [space.w_bool], + NPY.LONG: [space.w_int, + space.gettypefor(interp_boxes.W_IntegerBox), + space.gettypefor(interp_boxes.W_SignedIntegerBox)], + NPY.ULONG: [space.gettypefor(interp_boxes.W_UnsignedIntegerBox)], + NPY.LONGLONG: [space.w_long], + NPY.DOUBLE: [space.w_float, + space.gettypefor(interp_boxes.W_NumberBox), + space.gettypefor(interp_boxes.W_FloatingBox)], + NPY.CDOUBLE: [space.w_complex, + space.gettypefor(interp_boxes.W_ComplexFloatingBox)], + NPY.STRING: [space.w_str, + space.gettypefor(interp_boxes.W_CharacterBox)], + NPY.UNICODE: [space.w_unicode], + NPY.VOID: [space.gettypefor(interp_boxes.W_GenericBox)], + #space.w_buffer, # XXX no buffer in space + } float_dtypes = [self.w_float16dtype, self.w_float32dtype, self.w_float64dtype, self.w_floatlongdtype] complex_dtypes = [self.w_complex64dtype, self.w_complex128dtype, self.w_complexlongdtype] + self.component_dtypes = { + NPY.CFLOAT: self.w_float32dtype, + NPY.CDOUBLE: self.w_float64dtype, + NPY.CLONGDOUBLE: self.w_floatlongdtype, + } self.builtin_dtypes = [ self.w_booldtype, self.w_int8dtype, self.w_uint8dtype, @@ -890,7 +856,7 @@ self.w_intpdtype, self.w_uintpdtype, ] self.float_dtypes_by_num_bytes = sorted( - (dtype.get_size(), dtype) + (dtype.elsize, dtype) for dtype in float_dtypes ) self.dtypes_by_num = {} @@ -900,15 +866,16 @@ for dtype in reversed(self.builtin_dtypes): dtype.fields = None # mark these as builtin self.dtypes_by_num[dtype.num] = dtype - self.dtypes_by_name[dtype.name] = dtype - for can_name in [dtype.kind + str(dtype.get_size()), + self.dtypes_by_name[dtype.get_name()] = dtype + for can_name in [dtype.kind + str(dtype.elsize), dtype.char]: self.dtypes_by_name[can_name] = dtype self.dtypes_by_name[NPY.NATBYTE + can_name] = dtype self.dtypes_by_name[NPY.NATIVE + can_name] = dtype self.dtypes_by_name[NPY.IGNORE + can_name] = dtype - for alias in dtype.aliases: - self.dtypes_by_name[alias] = dtype + if dtype.num in aliases: + for alias in aliases[dtype.num]: + self.dtypes_by_name[alias] = dtype typeinfo_full = { 'LONGLONG': self.w_int64dtype, @@ -956,13 +923,13 @@ for k, v in typeinfo_partial.iteritems(): space.setitem(w_typeinfo, space.wrap(k), space.gettypefor(v)) for k, dtype in typeinfo_full.iteritems(): - itembits = dtype.get_size() * 8 + itembits = dtype.elsize * 8 items_w = [space.wrap(dtype.char), space.wrap(dtype.num), space.wrap(itembits), space.wrap(dtype.itemtype.get_element_size())] - if dtype.is_int_type(): - if dtype.kind == NPY.GENBOOLLTR: + if dtype.is_int(): + if dtype.is_bool(): w_maxobj = space.wrap(1) w_minobj = space.wrap(0) elif dtype.is_signed(): diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py --- a/pypy/module/micronumpy/interp_numarray.py +++ b/pypy/module/micronumpy/interp_numarray.py @@ -87,8 +87,8 @@ def descr_set_dtype(self, space, w_dtype): dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype)) - if (dtype.get_size() != self.get_dtype().get_size() or - dtype.is_flexible_type() or self.get_dtype().is_flexible_type()): + if (dtype.elsize != self.get_dtype().elsize or + dtype.is_flexible() or self.get_dtype().is_flexible()): raise OperationError(space.w_ValueError, space.wrap( "new type not compatible with array.")) self.implementation.set_dtype(space, dtype) @@ -101,10 +101,10 @@ return space.wrap(len(self.get_shape())) def descr_get_itemsize(self, space): - return space.wrap(self.get_dtype().get_size()) + return space.wrap(self.get_dtype().elsize) def descr_get_nbytes(self, space): - return space.wrap(self.get_size() * self.get_dtype().get_size()) + return space.wrap(self.get_size() * self.get_dtype().elsize) def descr_fill(self, space, w_value): self.fill(space, self.get_dtype().coerce(space, w_value)) @@ -220,7 +220,7 @@ def descr_getitem(self, space, w_idx): if space.is_w(w_idx, space.w_Ellipsis): return self - elif isinstance(w_idx, W_NDimArray) and w_idx.get_dtype().is_bool_type() \ + elif isinstance(w_idx, W_NDimArray) and w_idx.get_dtype().is_bool() \ and len(w_idx.get_shape()) > 0: return self.getitem_filter(space, w_idx) try: @@ -235,7 +235,7 @@ self.implementation.setitem_index(space, index_list, w_value) def descr_setitem(self, space, w_idx, w_value): - if isinstance(w_idx, W_NDimArray) and w_idx.get_dtype().is_bool_type() \ + if isinstance(w_idx, W_NDimArray) and w_idx.get_dtype().is_bool() \ and len(w_idx.get_shape()) > 0: self.setitem_filter(space, w_idx, convert_to_array(space, w_value)) return @@ -281,7 +281,7 @@ else: s.append(separator) s.append(' ') - if self.is_scalar() and dtype.is_str_type(): + if self.is_scalar() and dtype.is_str(): s.append(dtype.itemtype.to_str(i.getitem())) else: s.append(dtype.itemtype.str_format(i.getitem())) @@ -344,7 +344,7 @@ def descr_set_imag(self, space, w_value): # if possible, copy (broadcast) values into self - if not self.get_dtype().is_complex_type(): + if not self.get_dtype().is_complex(): raise OperationError(space.w_TypeError, space.wrap('array does not have imaginary part to set')) self.implementation.set_imag(space, self, w_value) @@ -573,11 +573,12 @@ space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype)) if new_dtype.num == NPY.VOID: raise oefmt(space.w_NotImplementedError, - "%s.astype(%s) not implemented yet", cur_dtype.name, new_dtype.name) - if new_dtype.num == NPY.STRING and new_dtype.size == 0: + "astype(%s) not implemented yet", + new_dtype.get_name()) + if new_dtype.num == NPY.STRING and new_dtype.elsize == 0: if cur_dtype.num == NPY.STRING: new_dtype = interp_dtype.variable_dtype(space, - 'S' + str(cur_dtype.size)) + 'S' + str(cur_dtype.elsize)) impl = self.implementation if isinstance(impl, scalar.Scalar): return W_NDimArray.new_scalar(space, new_dtype, impl.value) @@ -688,7 +689,7 @@ @unwrap_spec(decimals=int) def descr_round(self, space, decimals=0, w_out=None): if space.is_none(w_out): - if self.get_dtype().is_bool_type(): + if self.get_dtype().is_bool(): #numpy promotes bool.round() to float16. Go figure. w_out = W_NDimArray.from_shape(space, self.get_shape(), interp_dtype.get_dtype_cache(space).w_float16dtype) @@ -699,7 +700,7 @@ "return arrays must be of ArrayType")) out = interp_dtype.dtype_agreement(space, [self], self.get_shape(), w_out) - if out.get_dtype().is_bool_type() and self.get_dtype().is_bool_type(): + if out.get_dtype().is_bool() and self.get_dtype().is_bool(): calc_dtype = interp_dtype.get_dtype_cache(space).w_longdtype else: calc_dtype = out.get_dtype() @@ -780,8 +781,8 @@ w_dtype)) else: dtype = self.get_dtype() - old_itemsize = self.get_dtype().get_size() - new_itemsize = dtype.get_size() + old_itemsize = self.get_dtype().elsize + new_itemsize = dtype.elsize impl = self.implementation if new_itemsize == 0: raise OperationError(space.w_TypeError, space.wrap( @@ -1029,7 +1030,7 @@ except AttributeError: raise oefmt(space.w_NotImplementedError, '%s not implemented for %s', - op_name, self.get_dtype().name) + op_name, self.get_dtype().get_name()) return space.wrap(res) return func_with_new_name(impl, "reduce_arg%s_impl" % op_name) @@ -1092,7 +1093,7 @@ raise OperationError(space.w_TypeError, space.wrap( "only integer arrays with one element " "can be converted to an index")) - if not self.get_dtype().is_int_type() or self.get_dtype().is_bool_type(): + if not self.get_dtype().is_int() or self.get_dtype().is_bool(): raise OperationError(space.w_TypeError, space.wrap( "only integer arrays with one element " "can be converted to an index")) @@ -1187,7 +1188,7 @@ if not shape: raise OperationError(space.w_TypeError, space.wrap( "numpy scalars from buffers not supported yet")) - totalsize = support.product(shape) * dtype.get_size() + totalsize = support.product(shape) * dtype.elsize if totalsize+offset > buf.getlength(): raise OperationError(space.w_TypeError, space.wrap( "buffer is too small for requested array")) @@ -1447,9 +1448,10 @@ # scalars and strings w/o __array__ method isstr = space.isinstance_w(w_object, space.w_str) if not issequence_w(space, w_object) or isstr: - if dtype is None or (dtype.is_str_or_unicode() and dtype.get_size() < 1): - dtype = interp_ufuncs.find_dtype_for_scalar(space, w_object) - return W_NDimArray.new_scalar(space, dtype, w_object) + if dtype is None or dtype.char != NPY.CHARLTR: + if dtype is None or (dtype.is_str_or_unicode() and dtype.elsize < 1): + dtype = interp_ufuncs.find_dtype_for_scalar(space, w_object) + return W_NDimArray.new_scalar(space, dtype, w_object) if space.is_none(w_order): order = 'C' @@ -1477,14 +1479,14 @@ # not an array or incorrect dtype shape, elems_w = find_shape_and_elems(space, w_object, dtype) - if dtype is None or (dtype.is_str_or_unicode() and dtype.get_size() < 1): + if dtype is None or (dtype.is_str_or_unicode() and dtype.elsize < 1): for w_elem in elems_w: if isinstance(w_elem, W_NDimArray) and w_elem.is_scalar(): w_elem = w_elem.get_scalar_value() dtype = interp_ufuncs.find_dtype_for_scalar(space, w_elem, dtype) if dtype is None: dtype = interp_dtype.get_dtype_cache(space).w_float64dtype - elif dtype.is_str_or_unicode() and dtype.get_size() < 1: + elif dtype.is_str_or_unicode() and dtype.elsize < 1: # promote S0 -> S1, U0 -> U1 dtype = interp_dtype.variable_dtype(space, dtype.char + '1') @@ -1500,7 +1502,7 @@ def zeros(space, w_shape, w_dtype=None, w_order=None): dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype)) - if dtype.is_str_or_unicode() and dtype.get_size() < 1: + if dtype.is_str_or_unicode() and dtype.elsize < 1: dtype = interp_dtype.variable_dtype(space, dtype.char + '1') shape = _find_shape(space, w_shape, dtype) return W_NDimArray.from_shape(space, shape, dtype=dtype) @@ -1513,24 +1515,30 @@ else: dtype = space.interp_w(interp_dtype.W_Dtype, space.call_function(space.gettypefor(interp_dtype.W_Dtype), w_dtype)) - if dtype.is_str_or_unicode() and dtype.get_size() < 1: + if dtype.is_str_or_unicode() and dtype.elsize < 1: dtype = interp_dtype.variable_dtype(space, dtype.char + '1') return W_NDimArray.from_shape(space, w_a.get_shape(), dtype=dtype, w_instance=w_a if subok else None) -def _reconstruct(space, w_subtype, w_shape, w_dtype): - return descr_new_array(space, w_subtype, w_shape, w_dtype) - def build_scalar(space, w_dtype, w_state): from rpython.rtyper.lltypesystem import rffi, lltype - - assert isinstance(w_dtype, interp_dtype.W_Dtype) - + if not isinstance(w_dtype, interp_dtype.W_Dtype): + raise oefmt(space.w_TypeError, + "argument 1 must be numpy.dtype, not %T", w_dtype) + if w_dtype.elsize == 0: + raise oefmt(space.w_ValueError, "itemsize cannot be zero") + if not space.isinstance_w(w_state, space.w_str): + raise oefmt(space.w_TypeError, "initializing object must be a string") + if space.len_w(w_state) != w_dtype.elsize: + raise oefmt(space.w_ValueError, "initialization string is too small") state = rffi.str2charp(space.str_w(w_state)) box = w_dtype.itemtype.box_raw_data(state) lltype.free(state, flavor="raw") return box +def _reconstruct(space, w_subtype, w_shape, w_dtype): + return descr_new_array(space, w_subtype, w_shape, w_dtype) + W_FlatIterator.typedef = TypeDef("flatiter", __module__ = "numpy", diff --git a/pypy/module/micronumpy/interp_support.py b/pypy/module/micronumpy/interp_support.py --- a/pypy/module/micronumpy/interp_support.py +++ b/pypy/module/micronumpy/interp_support.py @@ -59,7 +59,7 @@ return space.wrap(a) def _fromstring_bin(space, s, count, length, dtype): - itemsize = dtype.get_size() + itemsize = dtype.elsize assert itemsize >= 0 if count == -1: count = length / itemsize diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py --- a/pypy/module/micronumpy/interp_ufuncs.py +++ b/pypy/module/micronumpy/interp_ufuncs.py @@ -168,7 +168,7 @@ "supported for binary functions")) assert isinstance(self, W_Ufunc2) obj = convert_to_array(space, w_obj) - if obj.get_dtype().is_flexible_type(): + if obj.get_dtype().is_flexible(): raise OperationError(space.w_TypeError, space.wrap('cannot perform reduce with flexible type')) obj_shape = obj.get_shape() @@ -287,12 +287,12 @@ out = None w_obj = convert_to_array(space, w_obj) dtype = w_obj.get_dtype() - if dtype.is_flexible_type(): + if dtype.is_flexible(): raise OperationError(space.w_TypeError, space.wrap('Not implemented for this type')) - if (self.int_only and not dtype.is_int_type() or - not self.allow_bool and dtype.is_bool_type() or - not self.allow_complex and dtype.is_complex_type()): + if (self.int_only and not dtype.is_int() or + not self.allow_bool and dtype.is_bool() or + not self.allow_complex and dtype.is_complex()): raise OperationError(space.w_TypeError, space.wrap( "ufunc %s not supported for the input type" % self.name)) calc_dtype = find_unaryop_result_dtype(space, @@ -311,7 +311,7 @@ res_dtype = interp_dtype.get_dtype_cache(space).w_booldtype else: res_dtype = calc_dtype - if self.complex_to_float and calc_dtype.is_complex_type(): + if self.complex_to_float and calc_dtype.is_complex(): if calc_dtype.num == NPY.CFLOAT: res_dtype = interp_dtype.get_dtype_cache(space).w_float32dtype else: @@ -351,11 +351,11 @@ self.done_func = None def are_common_types(self, dtype1, dtype2): - if dtype1.is_complex_type() and dtype2.is_complex_type(): - return True - elif not (dtype1.is_complex_type() or dtype2.is_complex_type()) and \ - (dtype1.is_int_type() and dtype2.is_int_type() or dtype1.is_float_type() and dtype2.is_float_type()) and \ - not (dtype1.is_bool_type() or dtype2.is_bool_type()): + if dtype1.is_bool() or dtype2.is_bool(): + return False + if (dtype1.is_int() and dtype2.is_int() or + dtype1.is_float() and dtype2.is_float() or + dtype1.is_complex() and dtype2.is_complex()): return True return False @@ -370,13 +370,13 @@ w_rhs = convert_to_array(space, w_rhs) w_ldtype = w_lhs.get_dtype() w_rdtype = w_rhs.get_dtype() - if w_ldtype.is_str_type() and w_rdtype.is_str_type() and \ + if w_ldtype.is_str() and w_rdtype.is_str() and \ self.comparison_func: pass - elif (w_ldtype.is_str_type() or w_rdtype.is_str_type()) and \ + elif (w_ldtype.is_str() or w_rdtype.is_str()) and \ self.comparison_func and w_out is None: return space.wrap(False) - elif w_ldtype.is_flexible_type() or w_rdtype.is_flexible_type(): + elif w_ldtype.is_flexible() or w_rdtype.is_flexible(): if self.comparison_func: if self.name == 'equal' or self.name == 'not_equal': res = w_ldtype.eq(space, w_rdtype) @@ -386,8 +386,9 @@ return space.w_NotImplemented else: raise oefmt(space.w_TypeError, - 'unsupported operand dtypes %s and %s for "%s"', - w_rdtype.name, w_ldtype.name, self.name) + 'unsupported operand dtypes %s and %s for "%s"', + w_rdtype.get_name(), w_ldtype.get_name(), + self.name) if self.are_common_types(w_ldtype, w_rdtype): if not w_lhs.is_scalar() and w_rhs.is_scalar(): @@ -398,13 +399,13 @@ w_ldtype, w_rdtype, promote_to_float=self.promote_to_float, promote_bools=self.promote_bools) - if (self.int_only and (not w_ldtype.is_int_type() or - not w_rdtype.is_int_type() or - not calc_dtype.is_int_type()) or - not self.allow_bool and (w_ldtype.is_bool_type() or - w_rdtype.is_bool_type()) or - not self.allow_complex and (w_ldtype.is_complex_type() or - w_rdtype.is_complex_type())): + if (self.int_only and (not w_ldtype.is_int() or + not w_rdtype.is_int() or + not calc_dtype.is_int()) or + not self.allow_bool and (w_ldtype.is_bool() or + w_rdtype.is_bool()) or + not self.allow_complex and (w_ldtype.is_complex() or + w_rdtype.is_complex())): raise OperationError(space.w_TypeError, space.wrap( "ufunc '%s' not supported for the input types" % self.name)) if space.is_none(w_out): @@ -466,7 +467,7 @@ return interp_dtype.get_dtype_cache(space).w_int8dtype # Everything numeric promotes to complex - if dt2.is_complex_type() or dt1.is_complex_type(): + if dt2.is_complex() or dt1.is_complex(): if dt2.num == NPY.HALF: dt1, dt2 = dt2, dt1 if dt2.num == NPY.CFLOAT: @@ -487,7 +488,7 @@ if promote_to_float: return find_unaryop_result_dtype(space, dt2, promote_to_float=True) # If they're the same kind, choose the greater one. - if dt1.kind == dt2.kind and not dt2.is_flexible_type(): + if dt1.kind == dt2.kind and not dt2.is_flexible(): if dt2.num == NPY.HALF: return dt1 return dt2 @@ -512,13 +513,13 @@ elif dt2.num == NPY.ULONGLONG or (LONG_BIT == 64 and dt2.num == NPY.ULONG): # UInt64 + signed = Float64 dtypenum = NPY.DOUBLE - elif dt2.is_flexible_type(): + elif dt2.is_flexible(): # For those operations that get here (concatenate, stack), # flexible types take precedence over numeric type - if dt2.is_record_type(): + if dt2.is_record(): return dt2 if dt1.is_str_or_unicode(): - if dt2.get_size() >= dt1.get_size(): + if dt2.elsize >= dt1.elsize: return dt2 return dt1 return dt2 @@ -541,10 +542,10 @@ promote_bools=False, promote_to_largest=False): if promote_to_largest: if dt.kind == NPY.GENBOOLLTR or dt.kind == NPY.SIGNEDLTR: - if dt.get_size() * 8 < LONG_BIT: + if dt.elsize * 8 < LONG_BIT: return interp_dtype.get_dtype_cache(space).w_longdtype elif dt.kind == NPY.UNSIGNEDLTR: - if dt.get_size() * 8 < LONG_BIT: + if dt.elsize * 8 < LONG_BIT: return interp_dtype.get_dtype_cache(space).w_ulongdtype else: assert dt.kind == NPY.FLOATINGLTR or dt.kind == NPY.COMPLEXLTR @@ -595,7 +596,7 @@ return interp_dtype.variable_dtype(space, 'S%d' % space.len_w(w_obj)) elif current_guess.num == NPY.STRING: - if current_guess.get_size() < space.len_w(w_obj): + if current_guess.elsize < space.len_w(w_obj): return interp_dtype.variable_dtype(space, 'S%d' % space.len_w(w_obj)) return current_guess @@ -612,7 +613,7 @@ except AttributeError: raise oefmt(space.w_NotImplementedError, "%s not implemented for %s", - ufunc_name, dtype.name) + ufunc_name, dtype.get_name()) if argcount == 1: def impl(res_dtype, value): res = get_op(res_dtype)(value) diff --git a/pypy/module/micronumpy/iter.py b/pypy/module/micronumpy/iter.py --- a/pypy/module/micronumpy/iter.py +++ b/pypy/module/micronumpy/iter.py @@ -174,7 +174,7 @@ def __init__(self, array): self.array = array self.offset = 0 - self.skip = array.dtype.get_size() + self.skip = array.dtype.elsize self.size = array.size def setitem(self, elem): diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -459,7 +459,7 @@ builder = StringBuilder() iter = arr.create_iter() w_res_str = W_NDimArray.from_shape(space, [1], arr.get_dtype(), order='C') - itemsize = arr.get_dtype().get_size() + itemsize = arr.get_dtype().elsize res_str_casted = rffi.cast(rffi.CArrayPtr(lltype.Char), w_res_str.implementation.get_storage_as_int(space)) while not iter.done(): diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py --- a/pypy/module/micronumpy/strides.py +++ b/pypy/module/micronumpy/strides.py @@ -69,7 +69,7 @@ return True def find_shape_and_elems(space, w_iterable, dtype): - is_rec_type = dtype is not None and dtype.is_record_type() + is_rec_type = dtype is not None and dtype.is_record() if is_rec_type and is_single_elem(space, w_iterable, is_rec_type): return [], [w_iterable] if isinstance(w_iterable, W_NDimArray) and w_iterable.is_scalar(): diff --git a/pypy/module/micronumpy/support.py b/pypy/module/micronumpy/support.py --- a/pypy/module/micronumpy/support.py +++ b/pypy/module/micronumpy/support.py @@ -28,8 +28,8 @@ shape_rev.reverse() for sh in shape_rev: slimit = max(sh, 1) - strides.append(s * dtype.get_size()) - backstrides.append(s * (slimit - 1) * dtype.get_size()) + strides.append(s * dtype.elsize) + backstrides.append(s * (slimit - 1) * dtype.elsize) s *= slimit if order == 'C': strides.reverse() diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py --- a/pypy/module/micronumpy/test/test_dtypes.py +++ b/pypy/module/micronumpy/test/test_dtypes.py @@ -58,6 +58,7 @@ assert dtype('int8').num == 1 assert dtype('int8').name == 'int8' + assert dtype('void').name == 'void' assert dtype(int).fields is None assert dtype(int).names is None assert dtype(int).hasobject is False @@ -65,13 +66,10 @@ assert dtype(None) is dtype(float) - e = dtype('int8') - exc = raises(KeyError, "e[2]") - assert exc.value.message == "There are no fields in dtype int8." - exc = raises(KeyError, "e['z']") - assert exc.value.message == "There are no fields in dtype int8." - exc = raises(KeyError, "e[None]") - assert exc.value.message == "There are no fields in dtype int8." + for d in [dtype('i4')]: + for key in ["d[2]", "d['z']", "d[None]"]: + exc = raises(KeyError, key) From noreply at buildbot.pypy.org Fri Feb 28 11:13:40 2014 From: noreply at buildbot.pypy.org (antocuni) Date: Fri, 28 Feb 2014 11:13:40 +0100 (CET) Subject: [pypy-commit] pypy int_w-refactor: cool, now a ctypes test started working for free :) Message-ID: <20140228101340.9E2481C244E@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: int_w-refactor Changeset: r69544:8fdc3ffcf225 Date: 2014-02-28 11:12 +0100 http://bitbucket.org/pypy/pypy/changeset/8fdc3ffcf225/ Log: cool, now a ctypes test started working for free :) diff --git a/lib-python/2.7/ctypes/test/test_numbers.py b/lib-python/2.7/ctypes/test/test_numbers.py --- a/lib-python/2.7/ctypes/test/test_numbers.py +++ b/lib-python/2.7/ctypes/test/test_numbers.py @@ -105,7 +105,6 @@ self.assertEqual(ArgType, type(parm)) - @xfail def test_floats(self): # c_float and c_double can be created from # Python int, long and float From noreply at buildbot.pypy.org Fri Feb 28 14:36:16 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 28 Feb 2014 14:36:16 +0100 (CET) Subject: [pypy-commit] pypy default: fix optimization of getarrayitem_gc_pure/strgetitem/unicodegetitem Message-ID: <20140228133616.6D0A31C244E@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69545:58c3d8552833 Date: 2014-02-28 06:39 -0500 http://bitbucket.org/pypy/pypy/changeset/58c3d8552833/ Log: fix optimization of getarrayitem_gc_pure/strgetitem/unicodegetitem diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -628,13 +628,6 @@ def optimize_DEBUG_MERGE_POINT(self, op): self.emit_operation(op) - def optimize_GETARRAYITEM_GC_PURE(self, op): - indexvalue = self.getvalue(op.getarg(1)) - if indexvalue.is_constant(): - arrayvalue = self.getvalue(op.getarg(0)) - arrayvalue.make_len_gt(MODE_ARRAY, op.getdescr(), indexvalue.box.getint()) - self.optimize_default(op) - def optimize_STRGETITEM(self, op): indexvalue = self.getvalue(op.getarg(1)) if indexvalue.is_constant(): diff --git a/rpython/jit/metainterp/optimizeopt/pure.py b/rpython/jit/metainterp/optimizeopt/pure.py --- a/rpython/jit/metainterp/optimizeopt/pure.py +++ b/rpython/jit/metainterp/optimizeopt/pure.py @@ -110,11 +110,6 @@ def produce_potential_short_preamble_ops(self, sb): for op in self.emitted_pure_operations: - if op.getopnum() == rop.GETARRAYITEM_GC_PURE or \ - op.getopnum() == rop.STRGETITEM or \ - op.getopnum() == rop.UNICODEGETITEM: - if not self.getvalue(op.getarg(1)).is_constant(): - continue sb.add_potential(op) dispatch_opt = make_dispatcher_method(OptPure, 'optimize_', diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -6128,13 +6128,12 @@ i5 = int_add(i1, i3) i4 = strgetitem(p1, i5) escape(i4) - jump(p1, i1, i2, i3, i5) - """ - expected = """ - [p1, i1, i2, i3, i5] - i4 = strgetitem(p1, i5) + jump(p1, i1, i2, i3, i4) + """ + expected = """ + [p1, i1, i2, i3, i4] escape(i4) - jump(p1, i1, i2, i3, i5) + jump(p1, i1, i2, i3, i4) """ self.optimize_strunicode_loop(ops, expected, preamble) @@ -6195,7 +6194,6 @@ """ expected = """ [p0, i0] - i1 = strgetitem(p0, i0) jump(p0, i0) """ self.optimize_loop(ops, expected) @@ -6211,7 +6209,6 @@ """ expected = """ [p0, i0] - i1 = unicodegetitem(p0, i0) jump(p0, i0) """ self.optimize_loop(ops, expected) @@ -7183,7 +7180,12 @@ call(i843, descr=nonwritedescr) jump(p9, i1) """ - self.optimize_loop(ops, ops) + expected = """ + [p9, i1, i843] + call(i843, descr=nonwritedescr) + jump(p9, i1, i843) + """ + self.optimize_loop(ops, expected) def test_loopinvariant_unicodelen(self): ops = """ @@ -7206,7 +7208,12 @@ call(i843, descr=nonwritedescr) jump(p9, i1) """ - self.optimize_loop(ops, ops) + expected = """ + [p9, i1, i843] + call(i843, descr=nonwritedescr) + jump(p9, i1, i843) + """ + self.optimize_loop(ops, expected) def test_loopinvariant_arraylen(self): ops = """ @@ -7332,7 +7339,12 @@ call(i843, descr=nonwritedescr) jump(p9, i1) """ - self.optimize_loop(ops, ops) + expected = """ + [p9, i1, i843] + call(i843, descr=nonwritedescr) + jump(p9, i1, i843) + """ + self.optimize_loop(ops, expected) def test_loopinvariant_constant_getarrayitem_pure(self): ops = """ From noreply at buildbot.pypy.org Fri Feb 28 14:36:17 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 28 Feb 2014 14:36:17 +0100 (CET) Subject: [pypy-commit] pypy default: more optimizations for dot loop Message-ID: <20140228133617.8EDDF1C244E@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69546:7862a38e2a22 Date: 2014-02-28 08:23 -0500 http://bitbucket.org/pypy/pypy/changeset/7862a38e2a22/ Log: more optimizations for dot loop diff --git a/pypy/module/micronumpy/iterators.py b/pypy/module/micronumpy/iterators.py --- a/pypy/module/micronumpy/iterators.py +++ b/pypy/module/micronumpy/iterators.py @@ -79,7 +79,7 @@ class ArrayIter(object): - _immutable_fields_ = ['array', 'size', 'indices', 'shape[*]', + _immutable_fields_ = ['array', 'size', 'indices', 'shape_m1[*]', 'strides[*]', 'backstrides[*]'] def __init__(self, array, size, shape, strides, backstrides): @@ -87,7 +87,7 @@ self.array = array self.size = size self.indices = [0] * len(shape) - self.shape = shape + self.shape_m1 = [s - 1 for s in shape] self.strides = strides self.backstrides = backstrides self.reset() @@ -95,15 +95,15 @@ @jit.unroll_safe def reset(self): self.index = 0 - for i in xrange(len(self.shape)): + for i in xrange(len(self.shape_m1)): self.indices[i] = 0 self.offset = self.array.start @jit.unroll_safe def next(self): self.index += 1 - for i in xrange(len(self.shape) - 1, -1, -1): - if self.indices[i] < self.shape[i] - 1: + for i in xrange(len(self.shape_m1) - 1, -1, -1): + if self.indices[i] < self.shape_m1[i]: self.indices[i] += 1 self.offset += self.strides[i] break @@ -117,14 +117,14 @@ if step == 0: return self.index += step - for i in xrange(len(self.shape) - 1, -1, -1): - if self.indices[i] < self.shape[i] - step: + for i in xrange(len(self.shape_m1) - 1, -1, -1): + if self.indices[i] < (self.shape_m1[i] + 1) - step: self.indices[i] += step self.offset += self.strides[i] * step break else: - remaining_step = (self.indices[i] + step) // self.shape[i] - this_i_step = step - remaining_step * self.shape[i] + remaining_step = (self.indices[i] + step) // (self.shape_m1[i] + 1) + this_i_step = step - remaining_step * (self.shape_m1[i] + 1) self.indices[i] = self.indices[i] + this_i_step self.offset += self.strides[i] * this_i_step step = remaining_step diff --git a/pypy/module/micronumpy/loop.py b/pypy/module/micronumpy/loop.py --- a/pypy/module/micronumpy/loop.py +++ b/pypy/module/micronumpy/loop.py @@ -280,23 +280,30 @@ ''' left_shape = left.get_shape() right_shape = right.get_shape() + left_impl = left.implementation + right_impl = right.implementation assert left_shape[-1] == right_shape[right_critical_dim] assert result.get_dtype() == dtype outi = result.create_iter() - lefti = AllButAxisIter(left.implementation, len(left_shape) - 1) - righti = AllButAxisIter(right.implementation, right_critical_dim) + lefti = AllButAxisIter(left_impl, len(left_shape) - 1) + righti = AllButAxisIter(right_impl, right_critical_dim) + n = left_impl.shape[-1] + s1 = left_impl.strides[-1] + s2 = right_impl.strides[right_critical_dim] while not lefti.done(): while not righti.done(): oval = outi.getitem() i1 = lefti.offset i2 = righti.offset - for _ in xrange(left.implementation.shape[-1]): + i = 0 + while i < n: + i += 1 dot_driver.jit_merge_point(dtype=dtype) - lval = left.implementation.getitem(i1).convert_to(space, dtype) - rval = right.implementation.getitem(i2).convert_to(space, dtype) + lval = left_impl.getitem(i1).convert_to(space, dtype) + rval = right_impl.getitem(i2).convert_to(space, dtype) oval = dtype.itemtype.add(oval, dtype.itemtype.mul(lval, rval)) - i1 += left.implementation.strides[-1] - i2 += right.implementation.strides[right_critical_dim] + i1 += s1 + i2 += s2 outi.setitem(oval) outi.next() righti.next() diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -512,36 +512,35 @@ self.check_simple_loop({'float_add': 1, 'float_mul': 1, 'guard_not_invalidated': 1, - 'guard_false': 1, + 'guard_true': 1, 'int_add': 3, - 'int_ge': 1, + 'int_lt': 1, 'jump': 1, - 'raw_load': 2, - 'setfield_gc': 1}) + 'raw_load': 2}) self.check_resops({'arraylen_gc': 4, 'float_add': 2, 'float_mul': 2, 'getarrayitem_gc': 11, 'getarrayitem_gc_pure': 15, - 'getfield_gc': 26, - 'getfield_gc_pure': 32, + 'getfield_gc': 30, + 'getfield_gc_pure': 40, 'guard_class': 4, - 'guard_false': 18, + 'guard_false': 14, + 'guard_nonnull': 8, + 'guard_nonnull_class': 4, 'guard_not_invalidated': 2, - 'guard_true': 9, + 'guard_true': 13, + 'guard_value': 4, 'int_add': 25, - 'int_ge': 8, + 'int_ge': 4, 'int_le': 8, - 'int_lt': 7, - 'int_sub': 15, + 'int_lt': 11, + 'int_sub': 8, 'jump': 3, - 'new': 1, - 'new_with_vtable': 1, 'raw_load': 6, 'raw_store': 1, - 'same_as': 2, 'setarrayitem_gc': 10, - 'setfield_gc': 19}) + 'setfield_gc': 14}) def define_argsort(): return """ From noreply at buildbot.pypy.org Fri Feb 28 15:13:09 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 28 Feb 2014 15:13:09 +0100 (CET) Subject: [pypy-commit] pypy default: move 58c3d8552833 to branch Message-ID: <20140228141309.AEE791D2522@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69547:dcb7660cb5f5 Date: 2014-02-28 09:08 -0500 http://bitbucket.org/pypy/pypy/changeset/dcb7660cb5f5/ Log: move 58c3d8552833 to branch diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -628,6 +628,13 @@ def optimize_DEBUG_MERGE_POINT(self, op): self.emit_operation(op) + def optimize_GETARRAYITEM_GC_PURE(self, op): + indexvalue = self.getvalue(op.getarg(1)) + if indexvalue.is_constant(): + arrayvalue = self.getvalue(op.getarg(0)) + arrayvalue.make_len_gt(MODE_ARRAY, op.getdescr(), indexvalue.box.getint()) + self.optimize_default(op) + def optimize_STRGETITEM(self, op): indexvalue = self.getvalue(op.getarg(1)) if indexvalue.is_constant(): diff --git a/rpython/jit/metainterp/optimizeopt/pure.py b/rpython/jit/metainterp/optimizeopt/pure.py --- a/rpython/jit/metainterp/optimizeopt/pure.py +++ b/rpython/jit/metainterp/optimizeopt/pure.py @@ -110,6 +110,11 @@ def produce_potential_short_preamble_ops(self, sb): for op in self.emitted_pure_operations: + if op.getopnum() == rop.GETARRAYITEM_GC_PURE or \ + op.getopnum() == rop.STRGETITEM or \ + op.getopnum() == rop.UNICODEGETITEM: + if not self.getvalue(op.getarg(1)).is_constant(): + continue sb.add_potential(op) dispatch_opt = make_dispatcher_method(OptPure, 'optimize_', diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -6128,12 +6128,13 @@ i5 = int_add(i1, i3) i4 = strgetitem(p1, i5) escape(i4) - jump(p1, i1, i2, i3, i4) - """ - expected = """ - [p1, i1, i2, i3, i4] + jump(p1, i1, i2, i3, i5) + """ + expected = """ + [p1, i1, i2, i3, i5] + i4 = strgetitem(p1, i5) escape(i4) - jump(p1, i1, i2, i3, i4) + jump(p1, i1, i2, i3, i5) """ self.optimize_strunicode_loop(ops, expected, preamble) @@ -6194,6 +6195,7 @@ """ expected = """ [p0, i0] + i1 = strgetitem(p0, i0) jump(p0, i0) """ self.optimize_loop(ops, expected) @@ -6209,6 +6211,7 @@ """ expected = """ [p0, i0] + i1 = unicodegetitem(p0, i0) jump(p0, i0) """ self.optimize_loop(ops, expected) @@ -7180,12 +7183,7 @@ call(i843, descr=nonwritedescr) jump(p9, i1) """ - expected = """ - [p9, i1, i843] - call(i843, descr=nonwritedescr) - jump(p9, i1, i843) - """ - self.optimize_loop(ops, expected) + self.optimize_loop(ops, ops) def test_loopinvariant_unicodelen(self): ops = """ @@ -7208,12 +7206,7 @@ call(i843, descr=nonwritedescr) jump(p9, i1) """ - expected = """ - [p9, i1, i843] - call(i843, descr=nonwritedescr) - jump(p9, i1, i843) - """ - self.optimize_loop(ops, expected) + self.optimize_loop(ops, ops) def test_loopinvariant_arraylen(self): ops = """ @@ -7339,12 +7332,7 @@ call(i843, descr=nonwritedescr) jump(p9, i1) """ - expected = """ - [p9, i1, i843] - call(i843, descr=nonwritedescr) - jump(p9, i1, i843) - """ - self.optimize_loop(ops, expected) + self.optimize_loop(ops, ops) def test_loopinvariant_constant_getarrayitem_pure(self): ops = """ From noreply at buildbot.pypy.org Fri Feb 28 15:13:10 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 28 Feb 2014 15:13:10 +0100 (CET) Subject: [pypy-commit] pypy test-58c3d8552833: fix optimization of getarrayitem_gc_pure/strgetitem/unicodegetitem Message-ID: <20140228141310.D2BC21D2522@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: test-58c3d8552833 Changeset: r69548:9603b1f96636 Date: 2014-02-28 06:39 -0500 http://bitbucket.org/pypy/pypy/changeset/9603b1f96636/ Log: fix optimization of getarrayitem_gc_pure/strgetitem/unicodegetitem diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -628,13 +628,6 @@ def optimize_DEBUG_MERGE_POINT(self, op): self.emit_operation(op) - def optimize_GETARRAYITEM_GC_PURE(self, op): - indexvalue = self.getvalue(op.getarg(1)) - if indexvalue.is_constant(): - arrayvalue = self.getvalue(op.getarg(0)) - arrayvalue.make_len_gt(MODE_ARRAY, op.getdescr(), indexvalue.box.getint()) - self.optimize_default(op) - def optimize_STRGETITEM(self, op): indexvalue = self.getvalue(op.getarg(1)) if indexvalue.is_constant(): diff --git a/rpython/jit/metainterp/optimizeopt/pure.py b/rpython/jit/metainterp/optimizeopt/pure.py --- a/rpython/jit/metainterp/optimizeopt/pure.py +++ b/rpython/jit/metainterp/optimizeopt/pure.py @@ -110,11 +110,6 @@ def produce_potential_short_preamble_ops(self, sb): for op in self.emitted_pure_operations: - if op.getopnum() == rop.GETARRAYITEM_GC_PURE or \ - op.getopnum() == rop.STRGETITEM or \ - op.getopnum() == rop.UNICODEGETITEM: - if not self.getvalue(op.getarg(1)).is_constant(): - continue sb.add_potential(op) dispatch_opt = make_dispatcher_method(OptPure, 'optimize_', diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -6128,13 +6128,12 @@ i5 = int_add(i1, i3) i4 = strgetitem(p1, i5) escape(i4) - jump(p1, i1, i2, i3, i5) - """ - expected = """ - [p1, i1, i2, i3, i5] - i4 = strgetitem(p1, i5) + jump(p1, i1, i2, i3, i4) + """ + expected = """ + [p1, i1, i2, i3, i4] escape(i4) - jump(p1, i1, i2, i3, i5) + jump(p1, i1, i2, i3, i4) """ self.optimize_strunicode_loop(ops, expected, preamble) @@ -6195,7 +6194,6 @@ """ expected = """ [p0, i0] - i1 = strgetitem(p0, i0) jump(p0, i0) """ self.optimize_loop(ops, expected) @@ -6211,7 +6209,6 @@ """ expected = """ [p0, i0] - i1 = unicodegetitem(p0, i0) jump(p0, i0) """ self.optimize_loop(ops, expected) @@ -7183,7 +7180,12 @@ call(i843, descr=nonwritedescr) jump(p9, i1) """ - self.optimize_loop(ops, ops) + expected = """ + [p9, i1, i843] + call(i843, descr=nonwritedescr) + jump(p9, i1, i843) + """ + self.optimize_loop(ops, expected) def test_loopinvariant_unicodelen(self): ops = """ @@ -7206,7 +7208,12 @@ call(i843, descr=nonwritedescr) jump(p9, i1) """ - self.optimize_loop(ops, ops) + expected = """ + [p9, i1, i843] + call(i843, descr=nonwritedescr) + jump(p9, i1, i843) + """ + self.optimize_loop(ops, expected) def test_loopinvariant_arraylen(self): ops = """ @@ -7332,7 +7339,12 @@ call(i843, descr=nonwritedescr) jump(p9, i1) """ - self.optimize_loop(ops, ops) + expected = """ + [p9, i1, i843] + call(i843, descr=nonwritedescr) + jump(p9, i1, i843) + """ + self.optimize_loop(ops, expected) def test_loopinvariant_constant_getarrayitem_pure(self): ops = """ From noreply at buildbot.pypy.org Fri Feb 28 16:21:54 2014 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 28 Feb 2014 16:21:54 +0100 (CET) Subject: [pypy-commit] pypy default: Implement another hack Message-ID: <20140228152154.A6E681D2481@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r69549:85643c8758f1 Date: 2014-02-28 17:21 +0200 http://bitbucket.org/pypy/pypy/changeset/85643c8758f1/ Log: Implement another hack diff --git a/include/PyPy.h b/include/PyPy.h --- a/include/PyPy.h +++ b/include/PyPy.h @@ -46,6 +46,12 @@ */ int pypy_execute_source(char *source); +/* a similar function, but inside Python code it'll register + a magic argument c_argument as int, which will be passed as void* from C. + Useful for passing pointers to arbitrary structs that contain callbacks + to register */ +int pypy_execute_source_ptr(char *source, void* ptr); + #ifdef __cplusplus } diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -129,6 +129,19 @@ if before: before() return rffi.cast(rffi.INT, res) + @entrypoint('main', [rffi.CCHARP, lltype.Signed], + c_name='pypy_execute_source_ptr') + def pypy_execute_source_ptr(ll_source, ll_ptr): + after = rffi.aroundstate.after + if after: after() + source = rffi.charp2str(ll_source) + space.setitem(w_globals, space.wrap('c_argument'), + space.wrap(ll_ptr)) + res = _pypy_execute_source(source) + before = rffi.aroundstate.before + if before: before() + return rffi.cast(rffi.INT, res) + @entrypoint('main', [], c_name='pypy_init_threads') def pypy_init_threads(): if not space.config.objspace.usemodules.thread: @@ -166,6 +179,7 @@ return 0 return entry_point, {'pypy_execute_source': pypy_execute_source, + 'pypy_execute_source_ptr': pypy_execute_source_ptr, 'pypy_init_threads': pypy_init_threads, 'pypy_thread_attach': pypy_thread_attach, 'pypy_setup_home': pypy_setup_home} From noreply at buildbot.pypy.org Fri Feb 28 16:27:14 2014 From: noreply at buildbot.pypy.org (antocuni) Date: Fri, 28 Feb 2014 16:27:14 +0100 (CET) Subject: [pypy-commit] pypy int_w-refactor: close to be merged branch Message-ID: <20140228152714.2DB051D2616@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: int_w-refactor Changeset: r69550:bcbdb7638f71 Date: 2014-02-28 16:22 +0100 http://bitbucket.org/pypy/pypy/changeset/bcbdb7638f71/ Log: close to be merged branch diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py --- a/pypy/module/micronumpy/interp_boxes.py +++ b/pypy/module/micronumpy/interp_boxes.py @@ -173,6 +173,7 @@ return space.long(box.item(space)) def descr_float(self, space): + import pdb;pdb.set_trace() box = self.convert_to(space, W_Float64Box._get_dtype(space)) return space.float(box.item(space)) From noreply at buildbot.pypy.org Fri Feb 28 16:27:16 2014 From: noreply at buildbot.pypy.org (antocuni) Date: Fri, 28 Feb 2014 16:27:16 +0100 (CET) Subject: [pypy-commit] pypy default: merge the int_w-refactor branch: in a lot of places CPython allows objects with __int__ and __float__ instead of actual ints and floats, while until now pypy disallowed them. We fix it by making space.{int_w, float_w, etc.} accepting those objects by default, and disallowing conversions only when explicitly needed Message-ID: <20140228152716.5D4001D2616@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: Changeset: r69551:ccdc7e797e3b Date: 2014-02-28 16:26 +0100 http://bitbucket.org/pypy/pypy/changeset/ccdc7e797e3b/ Log: merge the int_w-refactor branch: in a lot of places CPython allows objects with __int__ and __float__ instead of actual ints and floats, while until now pypy disallowed them. We fix it by making space.{int_w,float_w,etc.} accepting those objects by default, and disallowing conversions only when explicitly needed diff --git a/lib-python/2.7/ctypes/test/test_numbers.py b/lib-python/2.7/ctypes/test/test_numbers.py --- a/lib-python/2.7/ctypes/test/test_numbers.py +++ b/lib-python/2.7/ctypes/test/test_numbers.py @@ -105,7 +105,6 @@ self.assertEqual(ArgType, type(parm)) - @xfail def test_floats(self): # c_float and c_double can be created from # Python int, long and float diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -92,7 +92,7 @@ i = 2 * HUGEVAL_BYTES addrstring = [' '] * i while True: - n = space.int_w(space.and_(w_id, w_0x0F)) + n = space.int_w(space.and_(w_id, w_0x0F), allow_conversion=False) n += ord('0') if n > ord('9'): n += (ord('a') - ord('9') - 1) @@ -201,16 +201,38 @@ def unicode_w(self, space): self._typed_unwrap_error(space, "unicode") - def int_w(self, space): + def int_w(self, space, allow_conversion=True): + # note that W_IntObject.int_w has a fast path and W_FloatObject.int_w + # raises w_TypeError + w_obj = self + if allow_conversion: + w_obj = space.int(self) + return w_obj._int_w(space) + + def _int_w(self, space): self._typed_unwrap_error(space, "integer") - def float_w(self, space): + def float_w(self, space, allow_conversion=True): + w_obj = self + if allow_conversion: + w_obj = space.float(self) + return w_obj._float_w(space) + + def _float_w(self, space): self._typed_unwrap_error(space, "float") def uint_w(self, space): self._typed_unwrap_error(space, "integer") - def bigint_w(self, space): + def bigint_w(self, space, allow_conversion=True): + # note that W_IntObject and W_LongObject have fast paths, + # W_FloatObject.rbigint_w raises w_TypeError raises + w_obj = self + if allow_conversion: + w_obj = space.long(self) + return w_obj._bigint_w(space) + + def _bigint_w(self, space): self._typed_unwrap_error(space, "integer") def _typed_unwrap_error(self, space, expected): @@ -220,8 +242,7 @@ def int(self, space): w_impl = space.lookup(self, '__int__') if w_impl is None: - raise oefmt(space.w_TypeError, - "unsupported operand type for int(): '%T'", self) + self._typed_unwrap_error(space, "integer") w_result = space.get_and_call_function(w_impl, self) if (space.isinstance_w(w_result, space.w_int) or @@ -1210,7 +1231,7 @@ assert isinstance(w_index_or_slice, W_SliceObject) start, stop, step = w_index_or_slice.indices3(self, seqlength) else: - start = self.int_w(w_index_or_slice) + start = self.int_w(w_index_or_slice, allow_conversion=False) if start < 0: start += seqlength if not (0 <= start < seqlength): @@ -1231,7 +1252,7 @@ start, stop, step, length = w_index_or_slice.indices4(self, seqlength) else: - start = self.int_w(w_index_or_slice) + start = self.int_w(w_index_or_slice, allow_conversion=False) if start < 0: start += seqlength if not (0 <= start < seqlength): @@ -1255,7 +1276,10 @@ raise oefmt(self.w_TypeError, "%s must be an integer, not %T", objdescr, w_obj) try: - index = self.int_w(w_index) + # allow_conversion=False it's not really necessary because the + # return type of __index__ is already checked by space.index(), + # but there is no reason to allow conversions anyway + index = self.int_w(w_index, allow_conversion=False) except OperationError, err: if not err.match(self, self.w_OverflowError): raise @@ -1272,16 +1296,16 @@ else: return index - def r_longlong_w(self, w_obj): - bigint = self.bigint_w(w_obj) + def r_longlong_w(self, w_obj, allow_conversion=True): + bigint = self.bigint_w(w_obj, allow_conversion) try: return bigint.tolonglong() except OverflowError: raise OperationError(self.w_OverflowError, self.wrap('integer too large')) - def r_ulonglong_w(self, w_obj): - bigint = self.bigint_w(w_obj) + def r_ulonglong_w(self, w_obj, allow_conversion=True): + bigint = self.bigint_w(w_obj, allow_conversion) try: return bigint.toulonglong() except OverflowError: @@ -1348,8 +1372,19 @@ 'argument must be a string without NUL characters')) return rstring.assert_str0(result) - def int_w(self, w_obj): - return w_obj.int_w(self) + def int_w(self, w_obj, allow_conversion=True): + """ + Unwrap an app-level int object into an interpret-level int. + + If allow_conversion==True, w_obj might be of any type which implements + __int__, *except* floats which are explicitly rejected. This is the + same logic as CPython's PyArg_ParseTuple. If you want to also allow + floats, you can call space.int_w(space.int(w_obj)). + + If allow_conversion=False, w_obj needs to be an app-level int or a + subclass. + """ + return w_obj.int_w(self, allow_conversion) def int(self, w_obj): return w_obj.int(self) @@ -1357,11 +1392,19 @@ def uint_w(self, w_obj): return w_obj.uint_w(self) - def bigint_w(self, w_obj): - return w_obj.bigint_w(self) + def bigint_w(self, w_obj, allow_conversion=True): + """ + Like int_w, but return a rlib.rbigint object and call __long__ if + allow_conversion is True. + """ + return w_obj.bigint_w(self, allow_conversion) - def float_w(self, w_obj): - return w_obj.float_w(self) + def float_w(self, w_obj, allow_conversion=True): + """ + Like int_w, but return an interp-level float and call __float__ if + allow_conversion is True. + """ + return w_obj.float_w(self, allow_conversion) def realstr_w(self, w_obj): # Like str_w, but only works if w_obj is really of type 'str'. @@ -1399,20 +1442,10 @@ return w_obj.ord(self) # This is all interface for gateway.py. - def gateway_int_w(self, w_obj): - if self.isinstance_w(w_obj, self.w_float): - raise OperationError(self.w_TypeError, - self.wrap("integer argument expected, got float")) - return self.int_w(self.int(w_obj)) - - def gateway_float_w(self, w_obj): - return self.float_w(self.float(w_obj)) - - def gateway_r_longlong_w(self, w_obj): - if self.isinstance_w(w_obj, self.w_float): - raise OperationError(self.w_TypeError, - self.wrap("integer argument expected, got float")) - return self.r_longlong_w(self.int(w_obj)) + gateway_int_w = int_w + gateway_float_w = float_w + gateway_r_longlong_w = r_longlong_w + gateway_r_ulonglong_w = r_ulonglong_w def gateway_r_uint_w(self, w_obj): if self.isinstance_w(w_obj, self.w_float): @@ -1420,12 +1453,6 @@ self.wrap("integer argument expected, got float")) return self.uint_w(self.int(w_obj)) - def gateway_r_ulonglong_w(self, w_obj): - if self.isinstance_w(w_obj, self.w_float): - raise OperationError(self.w_TypeError, - self.wrap("integer argument expected, got float")) - return self.r_ulonglong_w(self.int(w_obj)) - def gateway_nonnegint_w(self, w_obj): # Like space.gateway_int_w(), but raises an app-level ValueError if # the integer is negative. Here for gateway.py. @@ -1447,7 +1474,7 @@ def c_uint_w(self, w_obj): # Like space.gateway_uint_w(), but raises an app-level OverflowError if # the integer does not fit in 32 bits. Here for gateway.py. - value = self.gateway_r_uint_w(w_obj) + value = self.uint_w(w_obj) if value > UINT_MAX_32_BITS: raise OperationError(self.w_OverflowError, self.wrap("expected an unsigned 32-bit integer")) @@ -1457,7 +1484,7 @@ # Like space.gateway_int_w(), but raises an app-level ValueError if # the integer is negative or does not fit in 32 bits. Here # for gateway.py. - value = self.gateway_int_w(w_obj) + value = self.int_w(w_obj) if value < 0: raise OperationError(self.w_ValueError, self.wrap("expected a non-negative integer")) @@ -1466,22 +1493,22 @@ self.wrap("expected a 32-bit integer")) return value - def truncatedint_w(self, w_obj): + def truncatedint_w(self, w_obj, allow_conversion=True): # Like space.gateway_int_w(), but return the integer truncated # instead of raising OverflowError. For obscure cases only. try: - return self.int_w(w_obj) + return self.int_w(w_obj, allow_conversion) except OperationError, e: if not e.match(self, self.w_OverflowError): raise from rpython.rlib.rarithmetic import intmask return intmask(self.bigint_w(w_obj).uintmask()) - def truncatedlonglong_w(self, w_obj): + def truncatedlonglong_w(self, w_obj, allow_conversion=True): # Like space.gateway_r_longlong_w(), but return the integer truncated # instead of raising OverflowError. try: - return self.r_longlong_w(w_obj) + return self.r_longlong_w(w_obj, allow_conversion) except OperationError, e: if not e.match(self, self.w_OverflowError): raise diff --git a/pypy/interpreter/main.py b/pypy/interpreter/main.py --- a/pypy/interpreter/main.py +++ b/pypy/interpreter/main.py @@ -134,7 +134,7 @@ exitcode = 0 else: try: - exitcode = space.int_w(w_exitcode) + exitcode = space.int_w(w_exitcode, allow_conversion=False) except OperationError: # not an integer: print it to stderr msg = space.str_w(space.str(w_exitcode)) diff --git a/pypy/interpreter/test/test_argument.py b/pypy/interpreter/test/test_argument.py --- a/pypy/interpreter/test/test_argument.py +++ b/pypy/interpreter/test/test_argument.py @@ -106,7 +106,7 @@ def len(self, x): return len(x) - def int_w(self, x): + def int_w(self, x, allow_conversion=True): return x def eq_w(self, x, y): diff --git a/pypy/interpreter/test/test_gateway.py b/pypy/interpreter/test/test_gateway.py --- a/pypy/interpreter/test/test_gateway.py +++ b/pypy/interpreter/test/test_gateway.py @@ -457,6 +457,8 @@ space.mul(space.wrap(sys.maxint), space.wrap(-7))) def test_interp2app_unwrap_spec_typechecks(self): + from rpython.rlib.rarithmetic import r_longlong + space = self.space w = space.wrap def g3_id(space, x): @@ -491,6 +493,12 @@ raises(gateway.OperationError,space.call_function,w_app_g3_f,w(None)) raises(gateway.OperationError,space.call_function,w_app_g3_f,w("foo")) + app_g3_r = gateway.interp2app_temp(g3_id, + unwrap_spec=[gateway.ObjSpace, + r_longlong]) + w_app_g3_r = space.wrap(app_g3_r) + raises(gateway.OperationError,space.call_function,w_app_g3_r,w(1.0)) + def test_interp2app_unwrap_spec_unicode(self): space = self.space w = space.wrap diff --git a/pypy/interpreter/test/test_objspace.py b/pypy/interpreter/test/test_objspace.py --- a/pypy/interpreter/test/test_objspace.py +++ b/pypy/interpreter/test/test_objspace.py @@ -167,6 +167,40 @@ self.space.setattr(w_oldstyle, self.space.wrap("__call__"), w_func) assert is_callable(w_oldstyle) + def test_int_w(self): + space = self.space + w_x = space.wrap(42) + assert space.int_w(w_x) == 42 + assert space.int_w(w_x, allow_conversion=False) == 42 + # + w_x = space.wrap(44.0) + space.raises_w(space.w_TypeError, space.int_w, w_x) + space.raises_w(space.w_TypeError, space.int_w, w_x, allow_conversion=False) + # + w_instance = self.space.appexec([], """(): + class MyInt(object): + def __int__(self): + return 43 + return MyInt() + """) + assert space.int_w(w_instance) == 43 + space.raises_w(space.w_TypeError, space.int_w, w_instance, allow_conversion=False) + # + w_instance = self.space.appexec([], """(): + class MyInt(object): + def __int__(self): + return 43 + + class AnotherInt(object): + def __int__(self): + return MyInt() + + return AnotherInt() + """) + space.raises_w(space.w_TypeError, space.int_w, w_instance) + space.raises_w(space.w_TypeError, space.int_w, w_instance, allow_conversion=False) + + def test_interp_w(self): w = self.space.wrap w_bltinfunction = self.space.builtin.get('len') diff --git a/pypy/module/__builtin__/test/test_buffer.py b/pypy/module/__builtin__/test/test_buffer.py --- a/pypy/module/__builtin__/test/test_buffer.py +++ b/pypy/module/__builtin__/test/test_buffer.py @@ -170,6 +170,19 @@ for step in indices[1:]: assert b[start:stop:step] == s[start:stop:step] + def test_getitem_only_ints(self): + class MyInt(object): + def __init__(self, x): + self.x = x + + def __int__(self): + return self.x + + buf = buffer('hello world') + raises(TypeError, "buf[MyInt(0)]") + raises(TypeError, "buf[MyInt(0):MyInt(5)]") + + class AppTestMemoryView: def test_basic(self): v = memoryview("abc") diff --git a/pypy/module/_cffi_backend/misc.py b/pypy/module/_cffi_backend/misc.py --- a/pypy/module/_cffi_backend/misc.py +++ b/pypy/module/_cffi_backend/misc.py @@ -131,13 +131,13 @@ if space.is_w(space.type(w_ob), space.w_int): # shortcut return space.int_w(w_ob) try: - bigint = space.bigint_w(w_ob) + bigint = space.bigint_w(w_ob, allow_conversion=False) except OperationError, e: if not e.match(space, space.w_TypeError): raise if _is_a_float(space, w_ob): raise - bigint = space.bigint_w(space.int(w_ob)) + bigint = space.bigint_w(space.int(w_ob), allow_conversion=False) try: return bigint.tolonglong() except OverflowError: @@ -148,13 +148,13 @@ if space.is_w(space.type(w_ob), space.w_int): # shortcut return space.int_w(w_ob) try: - bigint = space.bigint_w(w_ob) + bigint = space.bigint_w(w_ob, allow_conversion=False) except OperationError, e: if not e.match(space, space.w_TypeError): raise if _is_a_float(space, w_ob): raise - bigint = space.bigint_w(space.int(w_ob)) + bigint = space.bigint_w(space.int(w_ob), allow_conversion=False) try: return bigint.toint() except OverflowError: @@ -171,13 +171,13 @@ raise OperationError(space.w_OverflowError, space.wrap(neg_msg)) return r_ulonglong(value) try: - bigint = space.bigint_w(w_ob) + bigint = space.bigint_w(w_ob, allow_conversion=False) except OperationError, e: if not e.match(space, space.w_TypeError): raise if strict and _is_a_float(space, w_ob): raise - bigint = space.bigint_w(space.int(w_ob)) + bigint = space.bigint_w(space.int(w_ob), allow_conversion=False) if strict: try: return bigint.toulonglong() @@ -196,13 +196,13 @@ raise OperationError(space.w_OverflowError, space.wrap(neg_msg)) return r_uint(value) try: - bigint = space.bigint_w(w_ob) + bigint = space.bigint_w(w_ob, allow_conversion=False) except OperationError, e: if not e.match(space, space.w_TypeError): raise if strict and _is_a_float(space, w_ob): raise - bigint = space.bigint_w(space.int(w_ob)) + bigint = space.bigint_w(space.int(w_ob), allow_conversion=False) if strict: try: return bigint.touint() diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -1418,8 +1418,10 @@ p = newp(BStructPtr, [12]) assert p.a1 == 12 e = py.test.raises(TypeError, newp, BStructPtr, [None]) - assert ("an integer is required" in str(e.value) or - "unsupported operand type for int(): 'NoneType'" in str(e.value)) #PyPy + msg = str(e.value) + assert ("an integer is required" in msg or # CPython + "unsupported operand type for int(): 'NoneType'" in msg or # old PyPys + "expected integer, got NoneType object" in msg) # newer PyPys py.test.raises(TypeError, 'p.a1 = "def"') if sys.version_info < (3,): BEnum2 = new_enum_type(unicode("foo"), (unicode('abc'),), (5,), BInt) diff --git a/pypy/module/_rawffi/alt/test/test_funcptr.py b/pypy/module/_rawffi/alt/test/test_funcptr.py --- a/pypy/module/_rawffi/alt/test/test_funcptr.py +++ b/pypy/module/_rawffi/alt/test/test_funcptr.py @@ -185,6 +185,10 @@ set_val_to_ptr(ptr2, 123) assert get_dummy() == 123 set_val_to_ptr(ptr2, 0) + # + class OldStyle: + pass + raises(TypeError, "set_val_to_ptr(OldStyle(), 0)") def test_convert_strings_to_char_p(self): """ diff --git a/pypy/module/_rawffi/alt/type_converter.py b/pypy/module/_rawffi/alt/type_converter.py --- a/pypy/module/_rawffi/alt/type_converter.py +++ b/pypy/module/_rawffi/alt/type_converter.py @@ -25,7 +25,7 @@ assert libffi.IS_32_BIT self._longlong(w_ffitype, w_obj) elif w_ffitype.is_signed(): - intval = space.truncatedint_w(w_obj) + intval = space.truncatedint_w(w_obj, allow_conversion=False) self.handle_signed(w_ffitype, w_obj, intval) elif self.maybe_handle_char_or_unichar_p(w_ffitype, w_obj): # the object was already handled from within @@ -33,16 +33,16 @@ pass elif w_ffitype.is_pointer(): w_obj = self.convert_pointer_arg_maybe(w_obj, w_ffitype) - intval = space.truncatedint_w(w_obj) + intval = space.truncatedint_w(w_obj, allow_conversion=False) self.handle_pointer(w_ffitype, w_obj, intval) elif w_ffitype.is_unsigned(): - uintval = r_uint(space.truncatedint_w(w_obj)) + uintval = r_uint(space.truncatedint_w(w_obj, allow_conversion=False)) self.handle_unsigned(w_ffitype, w_obj, uintval) elif w_ffitype.is_char(): - intval = space.int_w(space.ord(w_obj)) + intval = space.int_w(space.ord(w_obj), allow_conversion=False) self.handle_char(w_ffitype, w_obj, intval) elif w_ffitype.is_unichar(): - intval = space.int_w(space.ord(w_obj)) + intval = space.int_w(space.ord(w_obj), allow_conversion=False) self.handle_unichar(w_ffitype, w_obj, intval) elif w_ffitype.is_double(): self._float(w_ffitype, w_obj) @@ -60,20 +60,20 @@ def _longlong(self, w_ffitype, w_obj): # a separate function, which can be seen by the jit or not, # depending on whether longlongs are supported - longlongval = self.space.truncatedlonglong_w(w_obj) + longlongval = self.space.truncatedlonglong_w(w_obj, allow_conversion=False) self.handle_longlong(w_ffitype, w_obj, longlongval) def _float(self, w_ffitype, w_obj): # a separate function, which can be seen by the jit or not, # depending on whether floats are supported - floatval = self.space.float_w(w_obj) + floatval = self.space.float_w(w_obj, allow_conversion=False) self.handle_float(w_ffitype, w_obj, floatval) def _singlefloat(self, w_ffitype, w_obj): # a separate function, which can be seen by the jit or not, # depending on whether singlefloats are supported from rpython.rlib.rarithmetic import r_singlefloat - floatval = self.space.float_w(w_obj) + floatval = self.space.float_w(w_obj, allow_conversion=False) singlefloatval = r_singlefloat(floatval) self.handle_singlefloat(w_ffitype, w_obj, singlefloatval) diff --git a/pypy/module/array/test/test_array.py b/pypy/module/array/test/test_array.py --- a/pypy/module/array/test/test_array.py +++ b/pypy/module/array/test/test_array.py @@ -1034,6 +1034,18 @@ assert len(b) == 13 assert str(b[12]) == "-0.0" + def test_getitem_only_ints(self): + class MyInt(object): + def __init__(self, x): + self.x = x + + def __int__(self): + return self.x + + a = self.array('i', [1, 2, 3, 4, 5, 6]) + raises(TypeError, "a[MyInt(0)]") + raises(TypeError, "a[MyInt(0):MyInt(5)]") + class AppTestArrayBuiltinShortcut(AppTestArray): spaceconfig = AppTestArray.spaceconfig.copy() diff --git a/pypy/module/cppyy/test/test_zjit.py b/pypy/module/cppyy/test/test_zjit.py --- a/pypy/module/cppyy/test/test_zjit.py +++ b/pypy/module/cppyy/test/test_zjit.py @@ -120,7 +120,7 @@ return FakeInt(int(obj)) assert 0 - def float_w(self, w_obj): + def float_w(self, w_obj, allow_conversion=True): assert isinstance(w_obj, FakeFloat) return w_obj.val @@ -141,7 +141,7 @@ def is_w(self, w_one, w_two): return w_one is w_two - def int_w(self, w_obj): + def int_w(self, w_obj, allow_conversion=True): assert isinstance(w_obj, FakeInt) return w_obj.val diff --git a/pypy/module/micronumpy/boxes.py b/pypy/module/micronumpy/boxes.py --- a/pypy/module/micronumpy/boxes.py +++ b/pypy/module/micronumpy/boxes.py @@ -375,7 +375,7 @@ pass class W_IntegerBox(W_NumberBox): - def int_w(self, space): + def _int_w(self, space): return space.int_w(self.descr_int(space)) class W_SignedIntegerBox(W_IntegerBox): diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -151,11 +151,11 @@ assert isinstance(w_obj, boxes.W_GenericBox) return self.float(w_obj.descr_float(self)) - def float_w(self, w_obj): + def float_w(self, w_obj, allow_conversion=True): assert isinstance(w_obj, FloatObject) return w_obj.floatval - def int_w(self, w_obj): + def int_w(self, w_obj, allow_conversion=True): if isinstance(w_obj, IntObject): return w_obj.intval elif isinstance(w_obj, FloatObject): diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -868,8 +868,8 @@ args_w = space.fixedview(w_tuple) if len(args_w) != 2: raise OperationError(space.w_TypeError, space.wrap(msg)) - actime = space.float_w(args_w[0]) - modtime = space.float_w(args_w[1]) + actime = space.float_w(args_w[0], allow_conversion=False) + modtime = space.float_w(args_w[1], allow_conversion=False) dispatch_filename(rposix.utime, 2)(space, w_path, (actime, modtime)) except OSError, e: raise wrap_oserror2(space, e, w_path) diff --git a/pypy/module/struct/test/test_struct.py b/pypy/module/struct/test/test_struct.py --- a/pypy/module/struct/test/test_struct.py +++ b/pypy/module/struct/test/test_struct.py @@ -7,7 +7,7 @@ class AppTestStruct(object): - spaceconfig = dict(usemodules=['struct']) + spaceconfig = dict(usemodules=['struct', 'micronumpy']) def setup_class(cls): """ @@ -19,7 +19,7 @@ return struct """) cls.w_native_is_bigendian = cls.space.wrap(native_is_bigendian) - + cls.w_runappdirect = cls.space.wrap(cls.runappdirect) def test_error(self): """ @@ -384,6 +384,19 @@ assert self.struct.unpack("ii", b) == (62, 12) raises(self.struct.error, self.struct.unpack, "i", b) + def test___float__(self): + class MyFloat(object): + def __init__(self, x): + self.x = x + def __float__(self): + return self.x + + obj = MyFloat(42.3) + data = self.struct.pack('d', obj) + obj2, = self.struct.unpack('d', data) + assert type(obj2) is float + assert obj2 == 42.3 + class AppTestStructBuffer(object): spaceconfig = dict(usemodules=['struct', '__pypy__']) diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py --- a/pypy/objspace/fake/objspace.py +++ b/pypy/objspace/fake/objspace.py @@ -45,13 +45,13 @@ def unicode_w(self, space): return NonConstant(u"foobar") - def int_w(self, space): + def int_w(self, space, allow_conversion=True): return NonConstant(-42) def uint_w(self, space): return r_uint(NonConstant(42)) - def bigint_w(self, space): + def bigint_w(self, space, allow_conversion=True): from rpython.rlib.rbigint import rbigint return rbigint.fromint(NonConstant(42)) @@ -117,7 +117,7 @@ def _freeze_(self): return True - def float_w(self, w_obj): + def float_w(self, w_obj, allow_conversion=True): is_root(w_obj) return NonConstant(42.5) diff --git a/pypy/objspace/std/floatobject.py b/pypy/objspace/std/floatobject.py --- a/pypy/objspace/std/floatobject.py +++ b/pypy/objspace/std/floatobject.py @@ -34,7 +34,16 @@ def unwrap(self, space): return self.floatval - def float_w(self, space): + def int_w(self, space, allow_conversion=True): + self._typed_unwrap_error(space, "integer") + + def bigint_w(self, space, allow_conversion=True): + self._typed_unwrap_error(space, "integer") + + def float_w(self, space, allow_conversion=True): + return self.floatval + + def _float_w(self, space): return self.floatval def int(self, space): diff --git a/pypy/objspace/std/intobject.py b/pypy/objspace/std/intobject.py --- a/pypy/objspace/std/intobject.py +++ b/pypy/objspace/std/intobject.py @@ -42,7 +42,8 @@ return False if self.user_overridden_class or w_other.user_overridden_class: return self is w_other - return space.int_w(self) == space.int_w(w_other) + return (space.int_w(self, allow_conversion=False) == + space.int_w(w_other, allow_conversion=False)) def immutable_unique_id(self, space): if self.user_overridden_class: @@ -309,9 +310,13 @@ """representation for debugging purposes""" return "%s(%d)" % (self.__class__.__name__, self.intval) - def int_w(self, space): + def int_w(self, space, allow_conversion=True): return int(self.intval) - unwrap = int_w + + def _int_w(self, space): + return int(self.intval) + + unwrap = _int_w def uint_w(self, space): intval = self.intval @@ -320,12 +325,18 @@ "cannot convert negative integer to unsigned") return r_uint(intval) - def bigint_w(self, space): + def bigint_w(self, space, allow_conversion=True): return rbigint.fromint(self.intval) - def float_w(self, space): + def _bigint_w(self, space): + return rbigint.fromint(self.intval) + + def float_w(self, space, allow_conversion=True): return float(self.intval) + # note that we do NOT implement _float_w, because __float__ cannot return + # an int + def int(self, space): if type(self) is W_IntObject: return self @@ -665,7 +676,7 @@ # int_w is effectively what we want in this case, # we cannot construct a subclass of int instance with an # an overflowing long - value = space.int_w(w_obj) + value = space.int_w(w_obj, allow_conversion=False) elif space.isinstance_w(w_value, space.w_str): value, w_longval = _string_to_int_or_long(space, w_value, space.str_w(w_value)) diff --git a/pypy/objspace/std/longobject.py b/pypy/objspace/std/longobject.py --- a/pypy/objspace/std/longobject.py +++ b/pypy/objspace/std/longobject.py @@ -244,7 +244,7 @@ def fromrarith_int(i): return W_LongObject(rbigint.fromrarith_int(i)) - def int_w(self, space): + def _int_w(self, space): try: return self.num.toint() except OverflowError: @@ -261,10 +261,16 @@ raise oefmt(space.w_OverflowError, "long int too large to convert to unsigned int") - def bigint_w(self, space): + def bigint_w(self, space, allow_conversion=True): return self.num - def float_w(self, space): + def _bigint_w(self, space): + return self.num + + def float_w(self, space, allow_conversion=True): + return self.tofloat(space) + + def _float_w(self, space): return self.tofloat(space) def int(self, space): diff --git a/pypy/objspace/std/smalllongobject.py b/pypy/objspace/std/smalllongobject.py --- a/pypy/objspace/std/smalllongobject.py +++ b/pypy/objspace/std/smalllongobject.py @@ -44,7 +44,7 @@ def __repr__(self): return '' % self.longlong - def int_w(self, space): + def _int_w(self, space): a = self.longlong b = intmask(a) if b == a: @@ -63,10 +63,13 @@ raise oefmt(space.w_OverflowError, "long int too large to convert to unsigned int") - def bigint_w(self, space): + def bigint_w(self, space, allow_conversion=True): return self.asbigint() - def float_w(self, space): + def _bigint_w(self, space): + return self.asbigint() + + def _float_w(self, space): return float(self.longlong) def int(self, space): diff --git a/pypy/objspace/std/test/test_dictmultiobject.py b/pypy/objspace/std/test/test_dictmultiobject.py --- a/pypy/objspace/std/test/test_dictmultiobject.py +++ b/pypy/objspace/std/test/test_dictmultiobject.py @@ -1046,7 +1046,7 @@ assert isinstance(string, str) return string - def int_w(self, integer): + def int_w(self, integer, allow_conversion=True): assert isinstance(integer, int) return integer diff --git a/pypy/objspace/std/test/test_intobject.py b/pypy/objspace/std/test/test_intobject.py --- a/pypy/objspace/std/test/test_intobject.py +++ b/pypy/objspace/std/test/test_intobject.py @@ -553,6 +553,15 @@ assert 3 .__coerce__(4) == (3, 4) assert 3 .__coerce__(4L) == NotImplemented + def test_fake_int_as_base(self): + class MyInt(object): + def __init__(self, x): + self.x = x + def __int__(self): + return self.x + + base = MyInt(24) + assert int('10', base) == 24 class AppTestIntShortcut(AppTestInt): spaceconfig = {"objspace.std.intshortcut": True} diff --git a/pypy/objspace/std/test/test_listobject.py b/pypy/objspace/std/test/test_listobject.py --- a/pypy/objspace/std/test/test_listobject.py +++ b/pypy/objspace/std/test/test_listobject.py @@ -842,6 +842,26 @@ except TypeError: pass + def test_mul___index__(self): + class MyInt(object): + def __init__(self, x): + self.x = x + + def __int__(self): + return self.x + + class MyIndex(object): + def __init__(self, x): + self.x = x + + def __index__(self): + return self.x + + assert [0] * MyIndex(3) == [0, 0, 0] + raises(TypeError, "[0]*MyInt(3)") + raises(TypeError, "[0]*MyIndex(MyInt(3))") + + def test_index(self): c = range(10) assert c.index(0) == 0 diff --git a/pypy/objspace/std/test/test_stringformat.py b/pypy/objspace/std/test/test_stringformat.py --- a/pypy/objspace/std/test/test_stringformat.py +++ b/pypy/objspace/std/test/test_stringformat.py @@ -186,6 +186,22 @@ def test_broken_unicode(self): raises(UnicodeDecodeError, 'Názov: %s'.__mod__, u'Jerry') + def test___int__(self): + class MyInt(object): + def __init__(self, x): + self.x = x + def __int__(self): + return self.x + # + x = MyInt(65) + assert '%c' % x == 'A' + + +class Foo(object): + def __cmp__(self, other): + return MyInt(0) + + class AppTestWidthPrec: def test_width(self): a = 'a' diff --git a/pypy/objspace/test/test_descroperation.py b/pypy/objspace/test/test_descroperation.py --- a/pypy/objspace/test/test_descroperation.py +++ b/pypy/objspace/test/test_descroperation.py @@ -721,8 +721,19 @@ return CannotConvertToBool() x = X() raises(MyError, "'foo' in x") - - + + def test___cmp___fake_int(self): + class MyInt(object): + def __init__(self, x): + self.x = x + def __int__(self): + return self.x + class X(object): + def __cmp__(self, other): + return MyInt(0) + + assert X() == 'hello' + class AppTestWithBuiltinShortcut(AppTest_Descroperation): spaceconfig = {'objspace.std.builtinshortcut': True} From noreply at buildbot.pypy.org Fri Feb 28 16:38:22 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 28 Feb 2014 16:38:22 +0100 (CET) Subject: [pypy-commit] pypy default: these seem unnecessary Message-ID: <20140228153822.D7B0F1D2655@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69552:f62e0c5136c7 Date: 2014-02-28 10:37 -0500 http://bitbucket.org/pypy/pypy/changeset/f62e0c5136c7/ Log: these seem unnecessary diff --git a/pypy/module/struct/test/test_struct.py b/pypy/module/struct/test/test_struct.py --- a/pypy/module/struct/test/test_struct.py +++ b/pypy/module/struct/test/test_struct.py @@ -7,7 +7,7 @@ class AppTestStruct(object): - spaceconfig = dict(usemodules=['struct', 'micronumpy']) + spaceconfig = dict(usemodules=['struct']) def setup_class(cls): """ @@ -19,7 +19,6 @@ return struct """) cls.w_native_is_bigendian = cls.space.wrap(native_is_bigendian) - cls.w_runappdirect = cls.space.wrap(cls.runappdirect) def test_error(self): """ From noreply at buildbot.pypy.org Fri Feb 28 16:45:27 2014 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 28 Feb 2014 16:45:27 +0100 (CET) Subject: [pypy-commit] pypy default: work a bit on this doc Message-ID: <20140228154527.7651B1D24F6@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r69553:acf94ed26643 Date: 2014-02-28 17:43 +0200 http://bitbucket.org/pypy/pypy/changeset/acf94ed26643/ Log: work a bit on this doc diff --git a/pypy/doc/embedding.rst b/pypy/doc/embedding.rst --- a/pypy/doc/embedding.rst +++ b/pypy/doc/embedding.rst @@ -82,7 +82,7 @@ If we save it as ``x.c`` now, compile it and run it with:: fijal at hermann:/opt/pypy$ gcc -o x x.c -lpypy-c -L. - fijal at hermann:~/src/pypy$ LD_LIBRARY_PATH=. ./x + fijal at hermann:/opt/pypy$ LD_LIBRARY_PATH=. ./x hello from pypy Worked! @@ -92,8 +92,56 @@ Typically we need something more to do than simply execute source. The following is a fully fledged example, please consult cffi documentation for details. +It's a bit longish, but it captures a gist what can be done with the PyPy +embedding interface:: -xxx + #include "include/PyPy.h" + #include + + char source[] = "from cffi import FFI\n\ + ffi = FFI()\n\ + @ffi.callback('int(int)')\n\ + def func(a):\n\ + print 'Got from C %d' % a\n\ + return a * 2\n\ + ffi.cdef('int callback(int (*func)(int));')\n\ + c_func = ffi.cast('int(*)(int(*)(int))', c_argument)\n\ + c_func(func)\n\ + print 'finished the Python part'\n\ + "; + + int callback(int (*func)(int)) + { + printf("Calling to Python, result: %d\n", func(3)); + } + + int main() + { + int res; + void *lib, *func; + + rpython_startup_code(); + res = pypy_setup_home("/opt/pypy/pypy/libpypy-c.so", 1); + if (res) { + printf("Error setting pypy home!\n"); + return 1; + } + res = pypy_execute_source_ptr(source, (void*)callback); + if (res) { + printf("Error calling pypy_execute_source_ptr!\n"); + } + return res; + } + +you can compile and run it with:: + + fijal at hermann:/opt/pypy$ gcc -g -o x x.c -lpypy-c -L. + fijal at hermann:/opt/pypy$ LD_LIBRARY_PATH=. ./x + Got from C 3 + Calling to Python, result: 6 + finished the Python part + + Threading --------- From noreply at buildbot.pypy.org Fri Feb 28 16:45:28 2014 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 28 Feb 2014 16:45:28 +0100 (CET) Subject: [pypy-commit] pypy default: merge Message-ID: <20140228154528.D84D81D24F6@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r69554:3da10e6aef02 Date: 2014-02-28 17:44 +0200 http://bitbucket.org/pypy/pypy/changeset/3da10e6aef02/ Log: merge diff --git a/lib-python/2.7/ctypes/test/test_numbers.py b/lib-python/2.7/ctypes/test/test_numbers.py --- a/lib-python/2.7/ctypes/test/test_numbers.py +++ b/lib-python/2.7/ctypes/test/test_numbers.py @@ -105,7 +105,6 @@ self.assertEqual(ArgType, type(parm)) - @xfail def test_floats(self): # c_float and c_double can be created from # Python int, long and float diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -92,7 +92,7 @@ i = 2 * HUGEVAL_BYTES addrstring = [' '] * i while True: - n = space.int_w(space.and_(w_id, w_0x0F)) + n = space.int_w(space.and_(w_id, w_0x0F), allow_conversion=False) n += ord('0') if n > ord('9'): n += (ord('a') - ord('9') - 1) @@ -201,16 +201,38 @@ def unicode_w(self, space): self._typed_unwrap_error(space, "unicode") - def int_w(self, space): + def int_w(self, space, allow_conversion=True): + # note that W_IntObject.int_w has a fast path and W_FloatObject.int_w + # raises w_TypeError + w_obj = self + if allow_conversion: + w_obj = space.int(self) + return w_obj._int_w(space) + + def _int_w(self, space): self._typed_unwrap_error(space, "integer") - def float_w(self, space): + def float_w(self, space, allow_conversion=True): + w_obj = self + if allow_conversion: + w_obj = space.float(self) + return w_obj._float_w(space) + + def _float_w(self, space): self._typed_unwrap_error(space, "float") def uint_w(self, space): self._typed_unwrap_error(space, "integer") - def bigint_w(self, space): + def bigint_w(self, space, allow_conversion=True): + # note that W_IntObject and W_LongObject have fast paths, + # W_FloatObject.rbigint_w raises w_TypeError raises + w_obj = self + if allow_conversion: + w_obj = space.long(self) + return w_obj._bigint_w(space) + + def _bigint_w(self, space): self._typed_unwrap_error(space, "integer") def _typed_unwrap_error(self, space, expected): @@ -220,8 +242,7 @@ def int(self, space): w_impl = space.lookup(self, '__int__') if w_impl is None: - raise oefmt(space.w_TypeError, - "unsupported operand type for int(): '%T'", self) + self._typed_unwrap_error(space, "integer") w_result = space.get_and_call_function(w_impl, self) if (space.isinstance_w(w_result, space.w_int) or @@ -1210,7 +1231,7 @@ assert isinstance(w_index_or_slice, W_SliceObject) start, stop, step = w_index_or_slice.indices3(self, seqlength) else: - start = self.int_w(w_index_or_slice) + start = self.int_w(w_index_or_slice, allow_conversion=False) if start < 0: start += seqlength if not (0 <= start < seqlength): @@ -1231,7 +1252,7 @@ start, stop, step, length = w_index_or_slice.indices4(self, seqlength) else: - start = self.int_w(w_index_or_slice) + start = self.int_w(w_index_or_slice, allow_conversion=False) if start < 0: start += seqlength if not (0 <= start < seqlength): @@ -1255,7 +1276,10 @@ raise oefmt(self.w_TypeError, "%s must be an integer, not %T", objdescr, w_obj) try: - index = self.int_w(w_index) + # allow_conversion=False it's not really necessary because the + # return type of __index__ is already checked by space.index(), + # but there is no reason to allow conversions anyway + index = self.int_w(w_index, allow_conversion=False) except OperationError, err: if not err.match(self, self.w_OverflowError): raise @@ -1272,16 +1296,16 @@ else: return index - def r_longlong_w(self, w_obj): - bigint = self.bigint_w(w_obj) + def r_longlong_w(self, w_obj, allow_conversion=True): + bigint = self.bigint_w(w_obj, allow_conversion) try: return bigint.tolonglong() except OverflowError: raise OperationError(self.w_OverflowError, self.wrap('integer too large')) - def r_ulonglong_w(self, w_obj): - bigint = self.bigint_w(w_obj) + def r_ulonglong_w(self, w_obj, allow_conversion=True): + bigint = self.bigint_w(w_obj, allow_conversion) try: return bigint.toulonglong() except OverflowError: @@ -1348,8 +1372,19 @@ 'argument must be a string without NUL characters')) return rstring.assert_str0(result) - def int_w(self, w_obj): - return w_obj.int_w(self) + def int_w(self, w_obj, allow_conversion=True): + """ + Unwrap an app-level int object into an interpret-level int. + + If allow_conversion==True, w_obj might be of any type which implements + __int__, *except* floats which are explicitly rejected. This is the + same logic as CPython's PyArg_ParseTuple. If you want to also allow + floats, you can call space.int_w(space.int(w_obj)). + + If allow_conversion=False, w_obj needs to be an app-level int or a + subclass. + """ + return w_obj.int_w(self, allow_conversion) def int(self, w_obj): return w_obj.int(self) @@ -1357,11 +1392,19 @@ def uint_w(self, w_obj): return w_obj.uint_w(self) - def bigint_w(self, w_obj): - return w_obj.bigint_w(self) + def bigint_w(self, w_obj, allow_conversion=True): + """ + Like int_w, but return a rlib.rbigint object and call __long__ if + allow_conversion is True. + """ + return w_obj.bigint_w(self, allow_conversion) - def float_w(self, w_obj): - return w_obj.float_w(self) + def float_w(self, w_obj, allow_conversion=True): + """ + Like int_w, but return an interp-level float and call __float__ if + allow_conversion is True. + """ + return w_obj.float_w(self, allow_conversion) def realstr_w(self, w_obj): # Like str_w, but only works if w_obj is really of type 'str'. @@ -1399,20 +1442,10 @@ return w_obj.ord(self) # This is all interface for gateway.py. - def gateway_int_w(self, w_obj): - if self.isinstance_w(w_obj, self.w_float): - raise OperationError(self.w_TypeError, - self.wrap("integer argument expected, got float")) - return self.int_w(self.int(w_obj)) - - def gateway_float_w(self, w_obj): - return self.float_w(self.float(w_obj)) - - def gateway_r_longlong_w(self, w_obj): - if self.isinstance_w(w_obj, self.w_float): - raise OperationError(self.w_TypeError, - self.wrap("integer argument expected, got float")) - return self.r_longlong_w(self.int(w_obj)) + gateway_int_w = int_w + gateway_float_w = float_w + gateway_r_longlong_w = r_longlong_w + gateway_r_ulonglong_w = r_ulonglong_w def gateway_r_uint_w(self, w_obj): if self.isinstance_w(w_obj, self.w_float): @@ -1420,12 +1453,6 @@ self.wrap("integer argument expected, got float")) return self.uint_w(self.int(w_obj)) - def gateway_r_ulonglong_w(self, w_obj): - if self.isinstance_w(w_obj, self.w_float): - raise OperationError(self.w_TypeError, - self.wrap("integer argument expected, got float")) - return self.r_ulonglong_w(self.int(w_obj)) - def gateway_nonnegint_w(self, w_obj): # Like space.gateway_int_w(), but raises an app-level ValueError if # the integer is negative. Here for gateway.py. @@ -1447,7 +1474,7 @@ def c_uint_w(self, w_obj): # Like space.gateway_uint_w(), but raises an app-level OverflowError if # the integer does not fit in 32 bits. Here for gateway.py. - value = self.gateway_r_uint_w(w_obj) + value = self.uint_w(w_obj) if value > UINT_MAX_32_BITS: raise OperationError(self.w_OverflowError, self.wrap("expected an unsigned 32-bit integer")) @@ -1457,7 +1484,7 @@ # Like space.gateway_int_w(), but raises an app-level ValueError if # the integer is negative or does not fit in 32 bits. Here # for gateway.py. - value = self.gateway_int_w(w_obj) + value = self.int_w(w_obj) if value < 0: raise OperationError(self.w_ValueError, self.wrap("expected a non-negative integer")) @@ -1466,22 +1493,22 @@ self.wrap("expected a 32-bit integer")) return value - def truncatedint_w(self, w_obj): + def truncatedint_w(self, w_obj, allow_conversion=True): # Like space.gateway_int_w(), but return the integer truncated # instead of raising OverflowError. For obscure cases only. try: - return self.int_w(w_obj) + return self.int_w(w_obj, allow_conversion) except OperationError, e: if not e.match(self, self.w_OverflowError): raise from rpython.rlib.rarithmetic import intmask return intmask(self.bigint_w(w_obj).uintmask()) - def truncatedlonglong_w(self, w_obj): + def truncatedlonglong_w(self, w_obj, allow_conversion=True): # Like space.gateway_r_longlong_w(), but return the integer truncated # instead of raising OverflowError. try: - return self.r_longlong_w(w_obj) + return self.r_longlong_w(w_obj, allow_conversion) except OperationError, e: if not e.match(self, self.w_OverflowError): raise diff --git a/pypy/interpreter/main.py b/pypy/interpreter/main.py --- a/pypy/interpreter/main.py +++ b/pypy/interpreter/main.py @@ -134,7 +134,7 @@ exitcode = 0 else: try: - exitcode = space.int_w(w_exitcode) + exitcode = space.int_w(w_exitcode, allow_conversion=False) except OperationError: # not an integer: print it to stderr msg = space.str_w(space.str(w_exitcode)) diff --git a/pypy/interpreter/test/test_argument.py b/pypy/interpreter/test/test_argument.py --- a/pypy/interpreter/test/test_argument.py +++ b/pypy/interpreter/test/test_argument.py @@ -106,7 +106,7 @@ def len(self, x): return len(x) - def int_w(self, x): + def int_w(self, x, allow_conversion=True): return x def eq_w(self, x, y): diff --git a/pypy/interpreter/test/test_gateway.py b/pypy/interpreter/test/test_gateway.py --- a/pypy/interpreter/test/test_gateway.py +++ b/pypy/interpreter/test/test_gateway.py @@ -457,6 +457,8 @@ space.mul(space.wrap(sys.maxint), space.wrap(-7))) def test_interp2app_unwrap_spec_typechecks(self): + from rpython.rlib.rarithmetic import r_longlong + space = self.space w = space.wrap def g3_id(space, x): @@ -491,6 +493,12 @@ raises(gateway.OperationError,space.call_function,w_app_g3_f,w(None)) raises(gateway.OperationError,space.call_function,w_app_g3_f,w("foo")) + app_g3_r = gateway.interp2app_temp(g3_id, + unwrap_spec=[gateway.ObjSpace, + r_longlong]) + w_app_g3_r = space.wrap(app_g3_r) + raises(gateway.OperationError,space.call_function,w_app_g3_r,w(1.0)) + def test_interp2app_unwrap_spec_unicode(self): space = self.space w = space.wrap diff --git a/pypy/interpreter/test/test_objspace.py b/pypy/interpreter/test/test_objspace.py --- a/pypy/interpreter/test/test_objspace.py +++ b/pypy/interpreter/test/test_objspace.py @@ -167,6 +167,40 @@ self.space.setattr(w_oldstyle, self.space.wrap("__call__"), w_func) assert is_callable(w_oldstyle) + def test_int_w(self): + space = self.space + w_x = space.wrap(42) + assert space.int_w(w_x) == 42 + assert space.int_w(w_x, allow_conversion=False) == 42 + # + w_x = space.wrap(44.0) + space.raises_w(space.w_TypeError, space.int_w, w_x) + space.raises_w(space.w_TypeError, space.int_w, w_x, allow_conversion=False) + # + w_instance = self.space.appexec([], """(): + class MyInt(object): + def __int__(self): + return 43 + return MyInt() + """) + assert space.int_w(w_instance) == 43 + space.raises_w(space.w_TypeError, space.int_w, w_instance, allow_conversion=False) + # + w_instance = self.space.appexec([], """(): + class MyInt(object): + def __int__(self): + return 43 + + class AnotherInt(object): + def __int__(self): + return MyInt() + + return AnotherInt() + """) + space.raises_w(space.w_TypeError, space.int_w, w_instance) + space.raises_w(space.w_TypeError, space.int_w, w_instance, allow_conversion=False) + + def test_interp_w(self): w = self.space.wrap w_bltinfunction = self.space.builtin.get('len') diff --git a/pypy/module/__builtin__/test/test_buffer.py b/pypy/module/__builtin__/test/test_buffer.py --- a/pypy/module/__builtin__/test/test_buffer.py +++ b/pypy/module/__builtin__/test/test_buffer.py @@ -170,6 +170,19 @@ for step in indices[1:]: assert b[start:stop:step] == s[start:stop:step] + def test_getitem_only_ints(self): + class MyInt(object): + def __init__(self, x): + self.x = x + + def __int__(self): + return self.x + + buf = buffer('hello world') + raises(TypeError, "buf[MyInt(0)]") + raises(TypeError, "buf[MyInt(0):MyInt(5)]") + + class AppTestMemoryView: def test_basic(self): v = memoryview("abc") diff --git a/pypy/module/_cffi_backend/misc.py b/pypy/module/_cffi_backend/misc.py --- a/pypy/module/_cffi_backend/misc.py +++ b/pypy/module/_cffi_backend/misc.py @@ -131,13 +131,13 @@ if space.is_w(space.type(w_ob), space.w_int): # shortcut return space.int_w(w_ob) try: - bigint = space.bigint_w(w_ob) + bigint = space.bigint_w(w_ob, allow_conversion=False) except OperationError, e: if not e.match(space, space.w_TypeError): raise if _is_a_float(space, w_ob): raise - bigint = space.bigint_w(space.int(w_ob)) + bigint = space.bigint_w(space.int(w_ob), allow_conversion=False) try: return bigint.tolonglong() except OverflowError: @@ -148,13 +148,13 @@ if space.is_w(space.type(w_ob), space.w_int): # shortcut return space.int_w(w_ob) try: - bigint = space.bigint_w(w_ob) + bigint = space.bigint_w(w_ob, allow_conversion=False) except OperationError, e: if not e.match(space, space.w_TypeError): raise if _is_a_float(space, w_ob): raise - bigint = space.bigint_w(space.int(w_ob)) + bigint = space.bigint_w(space.int(w_ob), allow_conversion=False) try: return bigint.toint() except OverflowError: @@ -171,13 +171,13 @@ raise OperationError(space.w_OverflowError, space.wrap(neg_msg)) return r_ulonglong(value) try: - bigint = space.bigint_w(w_ob) + bigint = space.bigint_w(w_ob, allow_conversion=False) except OperationError, e: if not e.match(space, space.w_TypeError): raise if strict and _is_a_float(space, w_ob): raise - bigint = space.bigint_w(space.int(w_ob)) + bigint = space.bigint_w(space.int(w_ob), allow_conversion=False) if strict: try: return bigint.toulonglong() @@ -196,13 +196,13 @@ raise OperationError(space.w_OverflowError, space.wrap(neg_msg)) return r_uint(value) try: - bigint = space.bigint_w(w_ob) + bigint = space.bigint_w(w_ob, allow_conversion=False) except OperationError, e: if not e.match(space, space.w_TypeError): raise if strict and _is_a_float(space, w_ob): raise - bigint = space.bigint_w(space.int(w_ob)) + bigint = space.bigint_w(space.int(w_ob), allow_conversion=False) if strict: try: return bigint.touint() diff --git a/pypy/module/_cffi_backend/test/_backend_test_c.py b/pypy/module/_cffi_backend/test/_backend_test_c.py --- a/pypy/module/_cffi_backend/test/_backend_test_c.py +++ b/pypy/module/_cffi_backend/test/_backend_test_c.py @@ -1418,8 +1418,10 @@ p = newp(BStructPtr, [12]) assert p.a1 == 12 e = py.test.raises(TypeError, newp, BStructPtr, [None]) - assert ("an integer is required" in str(e.value) or - "unsupported operand type for int(): 'NoneType'" in str(e.value)) #PyPy + msg = str(e.value) + assert ("an integer is required" in msg or # CPython + "unsupported operand type for int(): 'NoneType'" in msg or # old PyPys + "expected integer, got NoneType object" in msg) # newer PyPys py.test.raises(TypeError, 'p.a1 = "def"') if sys.version_info < (3,): BEnum2 = new_enum_type(unicode("foo"), (unicode('abc'),), (5,), BInt) diff --git a/pypy/module/_rawffi/alt/test/test_funcptr.py b/pypy/module/_rawffi/alt/test/test_funcptr.py --- a/pypy/module/_rawffi/alt/test/test_funcptr.py +++ b/pypy/module/_rawffi/alt/test/test_funcptr.py @@ -185,6 +185,10 @@ set_val_to_ptr(ptr2, 123) assert get_dummy() == 123 set_val_to_ptr(ptr2, 0) + # + class OldStyle: + pass + raises(TypeError, "set_val_to_ptr(OldStyle(), 0)") def test_convert_strings_to_char_p(self): """ diff --git a/pypy/module/_rawffi/alt/type_converter.py b/pypy/module/_rawffi/alt/type_converter.py --- a/pypy/module/_rawffi/alt/type_converter.py +++ b/pypy/module/_rawffi/alt/type_converter.py @@ -25,7 +25,7 @@ assert libffi.IS_32_BIT self._longlong(w_ffitype, w_obj) elif w_ffitype.is_signed(): - intval = space.truncatedint_w(w_obj) + intval = space.truncatedint_w(w_obj, allow_conversion=False) self.handle_signed(w_ffitype, w_obj, intval) elif self.maybe_handle_char_or_unichar_p(w_ffitype, w_obj): # the object was already handled from within @@ -33,16 +33,16 @@ pass elif w_ffitype.is_pointer(): w_obj = self.convert_pointer_arg_maybe(w_obj, w_ffitype) - intval = space.truncatedint_w(w_obj) + intval = space.truncatedint_w(w_obj, allow_conversion=False) self.handle_pointer(w_ffitype, w_obj, intval) elif w_ffitype.is_unsigned(): - uintval = r_uint(space.truncatedint_w(w_obj)) + uintval = r_uint(space.truncatedint_w(w_obj, allow_conversion=False)) self.handle_unsigned(w_ffitype, w_obj, uintval) elif w_ffitype.is_char(): - intval = space.int_w(space.ord(w_obj)) + intval = space.int_w(space.ord(w_obj), allow_conversion=False) self.handle_char(w_ffitype, w_obj, intval) elif w_ffitype.is_unichar(): - intval = space.int_w(space.ord(w_obj)) + intval = space.int_w(space.ord(w_obj), allow_conversion=False) self.handle_unichar(w_ffitype, w_obj, intval) elif w_ffitype.is_double(): self._float(w_ffitype, w_obj) @@ -60,20 +60,20 @@ def _longlong(self, w_ffitype, w_obj): # a separate function, which can be seen by the jit or not, # depending on whether longlongs are supported - longlongval = self.space.truncatedlonglong_w(w_obj) + longlongval = self.space.truncatedlonglong_w(w_obj, allow_conversion=False) self.handle_longlong(w_ffitype, w_obj, longlongval) def _float(self, w_ffitype, w_obj): # a separate function, which can be seen by the jit or not, # depending on whether floats are supported - floatval = self.space.float_w(w_obj) + floatval = self.space.float_w(w_obj, allow_conversion=False) self.handle_float(w_ffitype, w_obj, floatval) def _singlefloat(self, w_ffitype, w_obj): # a separate function, which can be seen by the jit or not, # depending on whether singlefloats are supported from rpython.rlib.rarithmetic import r_singlefloat - floatval = self.space.float_w(w_obj) + floatval = self.space.float_w(w_obj, allow_conversion=False) singlefloatval = r_singlefloat(floatval) self.handle_singlefloat(w_ffitype, w_obj, singlefloatval) diff --git a/pypy/module/array/test/test_array.py b/pypy/module/array/test/test_array.py --- a/pypy/module/array/test/test_array.py +++ b/pypy/module/array/test/test_array.py @@ -1034,6 +1034,18 @@ assert len(b) == 13 assert str(b[12]) == "-0.0" + def test_getitem_only_ints(self): + class MyInt(object): + def __init__(self, x): + self.x = x + + def __int__(self): + return self.x + + a = self.array('i', [1, 2, 3, 4, 5, 6]) + raises(TypeError, "a[MyInt(0)]") + raises(TypeError, "a[MyInt(0):MyInt(5)]") + class AppTestArrayBuiltinShortcut(AppTestArray): spaceconfig = AppTestArray.spaceconfig.copy() diff --git a/pypy/module/cppyy/test/test_zjit.py b/pypy/module/cppyy/test/test_zjit.py --- a/pypy/module/cppyy/test/test_zjit.py +++ b/pypy/module/cppyy/test/test_zjit.py @@ -120,7 +120,7 @@ return FakeInt(int(obj)) assert 0 - def float_w(self, w_obj): + def float_w(self, w_obj, allow_conversion=True): assert isinstance(w_obj, FakeFloat) return w_obj.val @@ -141,7 +141,7 @@ def is_w(self, w_one, w_two): return w_one is w_two - def int_w(self, w_obj): + def int_w(self, w_obj, allow_conversion=True): assert isinstance(w_obj, FakeInt) return w_obj.val diff --git a/pypy/module/micronumpy/boxes.py b/pypy/module/micronumpy/boxes.py --- a/pypy/module/micronumpy/boxes.py +++ b/pypy/module/micronumpy/boxes.py @@ -375,7 +375,7 @@ pass class W_IntegerBox(W_NumberBox): - def int_w(self, space): + def _int_w(self, space): return space.int_w(self.descr_int(space)) class W_SignedIntegerBox(W_IntegerBox): diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py --- a/pypy/module/micronumpy/compile.py +++ b/pypy/module/micronumpy/compile.py @@ -151,11 +151,11 @@ assert isinstance(w_obj, boxes.W_GenericBox) return self.float(w_obj.descr_float(self)) - def float_w(self, w_obj): + def float_w(self, w_obj, allow_conversion=True): assert isinstance(w_obj, FloatObject) return w_obj.floatval - def int_w(self, w_obj): + def int_w(self, w_obj, allow_conversion=True): if isinstance(w_obj, IntObject): return w_obj.intval elif isinstance(w_obj, FloatObject): diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -868,8 +868,8 @@ args_w = space.fixedview(w_tuple) if len(args_w) != 2: raise OperationError(space.w_TypeError, space.wrap(msg)) - actime = space.float_w(args_w[0]) - modtime = space.float_w(args_w[1]) + actime = space.float_w(args_w[0], allow_conversion=False) + modtime = space.float_w(args_w[1], allow_conversion=False) dispatch_filename(rposix.utime, 2)(space, w_path, (actime, modtime)) except OSError, e: raise wrap_oserror2(space, e, w_path) diff --git a/pypy/module/struct/test/test_struct.py b/pypy/module/struct/test/test_struct.py --- a/pypy/module/struct/test/test_struct.py +++ b/pypy/module/struct/test/test_struct.py @@ -20,7 +20,6 @@ """) cls.w_native_is_bigendian = cls.space.wrap(native_is_bigendian) - def test_error(self): """ struct.error should be an exception class. @@ -384,6 +383,19 @@ assert self.struct.unpack("ii", b) == (62, 12) raises(self.struct.error, self.struct.unpack, "i", b) + def test___float__(self): + class MyFloat(object): + def __init__(self, x): + self.x = x + def __float__(self): + return self.x + + obj = MyFloat(42.3) + data = self.struct.pack('d', obj) + obj2, = self.struct.unpack('d', data) + assert type(obj2) is float + assert obj2 == 42.3 + class AppTestStructBuffer(object): spaceconfig = dict(usemodules=['struct', '__pypy__']) diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py --- a/pypy/objspace/fake/objspace.py +++ b/pypy/objspace/fake/objspace.py @@ -45,13 +45,13 @@ def unicode_w(self, space): return NonConstant(u"foobar") - def int_w(self, space): + def int_w(self, space, allow_conversion=True): return NonConstant(-42) def uint_w(self, space): return r_uint(NonConstant(42)) - def bigint_w(self, space): + def bigint_w(self, space, allow_conversion=True): from rpython.rlib.rbigint import rbigint return rbigint.fromint(NonConstant(42)) @@ -117,7 +117,7 @@ def _freeze_(self): return True - def float_w(self, w_obj): + def float_w(self, w_obj, allow_conversion=True): is_root(w_obj) return NonConstant(42.5) diff --git a/pypy/objspace/std/floatobject.py b/pypy/objspace/std/floatobject.py --- a/pypy/objspace/std/floatobject.py +++ b/pypy/objspace/std/floatobject.py @@ -34,7 +34,16 @@ def unwrap(self, space): return self.floatval - def float_w(self, space): + def int_w(self, space, allow_conversion=True): + self._typed_unwrap_error(space, "integer") + + def bigint_w(self, space, allow_conversion=True): + self._typed_unwrap_error(space, "integer") + + def float_w(self, space, allow_conversion=True): + return self.floatval + + def _float_w(self, space): return self.floatval def int(self, space): diff --git a/pypy/objspace/std/intobject.py b/pypy/objspace/std/intobject.py --- a/pypy/objspace/std/intobject.py +++ b/pypy/objspace/std/intobject.py @@ -42,7 +42,8 @@ return False if self.user_overridden_class or w_other.user_overridden_class: return self is w_other - return space.int_w(self) == space.int_w(w_other) + return (space.int_w(self, allow_conversion=False) == + space.int_w(w_other, allow_conversion=False)) def immutable_unique_id(self, space): if self.user_overridden_class: @@ -309,9 +310,13 @@ """representation for debugging purposes""" return "%s(%d)" % (self.__class__.__name__, self.intval) - def int_w(self, space): + def int_w(self, space, allow_conversion=True): return int(self.intval) - unwrap = int_w + + def _int_w(self, space): + return int(self.intval) + + unwrap = _int_w def uint_w(self, space): intval = self.intval @@ -320,12 +325,18 @@ "cannot convert negative integer to unsigned") return r_uint(intval) - def bigint_w(self, space): + def bigint_w(self, space, allow_conversion=True): return rbigint.fromint(self.intval) - def float_w(self, space): + def _bigint_w(self, space): + return rbigint.fromint(self.intval) + + def float_w(self, space, allow_conversion=True): return float(self.intval) + # note that we do NOT implement _float_w, because __float__ cannot return + # an int + def int(self, space): if type(self) is W_IntObject: return self @@ -665,7 +676,7 @@ # int_w is effectively what we want in this case, # we cannot construct a subclass of int instance with an # an overflowing long - value = space.int_w(w_obj) + value = space.int_w(w_obj, allow_conversion=False) elif space.isinstance_w(w_value, space.w_str): value, w_longval = _string_to_int_or_long(space, w_value, space.str_w(w_value)) diff --git a/pypy/objspace/std/longobject.py b/pypy/objspace/std/longobject.py --- a/pypy/objspace/std/longobject.py +++ b/pypy/objspace/std/longobject.py @@ -244,7 +244,7 @@ def fromrarith_int(i): return W_LongObject(rbigint.fromrarith_int(i)) - def int_w(self, space): + def _int_w(self, space): try: return self.num.toint() except OverflowError: @@ -261,10 +261,16 @@ raise oefmt(space.w_OverflowError, "long int too large to convert to unsigned int") - def bigint_w(self, space): + def bigint_w(self, space, allow_conversion=True): return self.num - def float_w(self, space): + def _bigint_w(self, space): + return self.num + + def float_w(self, space, allow_conversion=True): + return self.tofloat(space) + + def _float_w(self, space): return self.tofloat(space) def int(self, space): diff --git a/pypy/objspace/std/smalllongobject.py b/pypy/objspace/std/smalllongobject.py --- a/pypy/objspace/std/smalllongobject.py +++ b/pypy/objspace/std/smalllongobject.py @@ -44,7 +44,7 @@ def __repr__(self): return '' % self.longlong - def int_w(self, space): + def _int_w(self, space): a = self.longlong b = intmask(a) if b == a: @@ -63,10 +63,13 @@ raise oefmt(space.w_OverflowError, "long int too large to convert to unsigned int") - def bigint_w(self, space): + def bigint_w(self, space, allow_conversion=True): return self.asbigint() - def float_w(self, space): + def _bigint_w(self, space): + return self.asbigint() + + def _float_w(self, space): return float(self.longlong) def int(self, space): diff --git a/pypy/objspace/std/test/test_dictmultiobject.py b/pypy/objspace/std/test/test_dictmultiobject.py --- a/pypy/objspace/std/test/test_dictmultiobject.py +++ b/pypy/objspace/std/test/test_dictmultiobject.py @@ -1046,7 +1046,7 @@ assert isinstance(string, str) return string - def int_w(self, integer): + def int_w(self, integer, allow_conversion=True): assert isinstance(integer, int) return integer diff --git a/pypy/objspace/std/test/test_intobject.py b/pypy/objspace/std/test/test_intobject.py --- a/pypy/objspace/std/test/test_intobject.py +++ b/pypy/objspace/std/test/test_intobject.py @@ -553,6 +553,15 @@ assert 3 .__coerce__(4) == (3, 4) assert 3 .__coerce__(4L) == NotImplemented + def test_fake_int_as_base(self): + class MyInt(object): + def __init__(self, x): + self.x = x + def __int__(self): + return self.x + + base = MyInt(24) + assert int('10', base) == 24 class AppTestIntShortcut(AppTestInt): spaceconfig = {"objspace.std.intshortcut": True} diff --git a/pypy/objspace/std/test/test_listobject.py b/pypy/objspace/std/test/test_listobject.py --- a/pypy/objspace/std/test/test_listobject.py +++ b/pypy/objspace/std/test/test_listobject.py @@ -842,6 +842,26 @@ except TypeError: pass + def test_mul___index__(self): + class MyInt(object): + def __init__(self, x): + self.x = x + + def __int__(self): + return self.x + + class MyIndex(object): + def __init__(self, x): + self.x = x + + def __index__(self): + return self.x + + assert [0] * MyIndex(3) == [0, 0, 0] + raises(TypeError, "[0]*MyInt(3)") + raises(TypeError, "[0]*MyIndex(MyInt(3))") + + def test_index(self): c = range(10) assert c.index(0) == 0 diff --git a/pypy/objspace/std/test/test_stringformat.py b/pypy/objspace/std/test/test_stringformat.py --- a/pypy/objspace/std/test/test_stringformat.py +++ b/pypy/objspace/std/test/test_stringformat.py @@ -186,6 +186,22 @@ def test_broken_unicode(self): raises(UnicodeDecodeError, 'Názov: %s'.__mod__, u'Jerry') + def test___int__(self): + class MyInt(object): + def __init__(self, x): + self.x = x + def __int__(self): + return self.x + # + x = MyInt(65) + assert '%c' % x == 'A' + + +class Foo(object): + def __cmp__(self, other): + return MyInt(0) + + class AppTestWidthPrec: def test_width(self): a = 'a' diff --git a/pypy/objspace/test/test_descroperation.py b/pypy/objspace/test/test_descroperation.py --- a/pypy/objspace/test/test_descroperation.py +++ b/pypy/objspace/test/test_descroperation.py @@ -721,8 +721,19 @@ return CannotConvertToBool() x = X() raises(MyError, "'foo' in x") - - + + def test___cmp___fake_int(self): + class MyInt(object): + def __init__(self, x): + self.x = x + def __int__(self): + return self.x + class X(object): + def __cmp__(self, other): + return MyInt(0) + + assert X() == 'hello' + class AppTestWithBuiltinShortcut(AppTest_Descroperation): spaceconfig = {'objspace.std.builtinshortcut': True} From noreply at buildbot.pypy.org Fri Feb 28 16:52:00 2014 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 28 Feb 2014 16:52:00 +0100 (CET) Subject: [pypy-commit] pypy default: finish the doc (I hope) Message-ID: <20140228155200.CA1ED1D251B@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r69555:e20dda5d1f83 Date: 2014-02-28 17:50 +0200 http://bitbucket.org/pypy/pypy/changeset/e20dda5d1f83/ Log: finish the doc (I hope) diff --git a/pypy/doc/embedding.rst b/pypy/doc/embedding.rst --- a/pypy/doc/embedding.rst +++ b/pypy/doc/embedding.rst @@ -45,6 +45,11 @@ You should really do your own error handling in the source. It'll acquire the GIL. +.. function:: int pypy_execute_source_ptr(char* source, void* ptr); + + Just like the above, except it registers a magic argument in the source + scope as ``c_argument``, where ``void*`` is encoded as Python int. + .. function:: void pypy_thread_attach(void); In case your application uses threads that are initialized outside of PyPy, @@ -141,12 +146,17 @@ Calling to Python, result: 6 finished the Python part - +As you can see, we successfully managed to call Python from C and C from +Python. Now having one callback might not be enough, so what typically happens +is that we would pass a struct full of callbacks to ``pypy_execute_source_ptr`` +and fill the structure from Python side for the future use. Threading --------- -XXXX I don't understand what's going on, discuss with unbit +In case you want to use pthreads, what you need to do is to call +``pypy_thread_attach`` from each of the threads that you created (but not +from the main thread) and call ``pypy_init_threads`` from the main thread. .. _`cffi`: http://cffi.readthedocs.org/ .. _`uwsgi`: http://uwsgi-docs.readthedocs.org/en/latest/ From noreply at buildbot.pypy.org Fri Feb 28 16:53:12 2014 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 28 Feb 2014 16:53:12 +0100 (CET) Subject: [pypy-commit] pypy default: add a title Message-ID: <20140228155312.875741D251B@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r69556:2ac5b22f126e Date: 2014-02-28 17:52 +0200 http://bitbucket.org/pypy/pypy/changeset/2ac5b22f126e/ Log: add a title diff --git a/pypy/doc/embedding.rst b/pypy/doc/embedding.rst --- a/pypy/doc/embedding.rst +++ b/pypy/doc/embedding.rst @@ -1,3 +1,6 @@ + +Embedding PyPy +-------------- PyPy has a very minimal and a very strange embedding interface, based on the usage of `cffi`_ and the philosophy that Python is a better language in C. From noreply at buildbot.pypy.org Fri Feb 28 17:00:47 2014 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 28 Feb 2014 17:00:47 +0100 (CET) Subject: [pypy-commit] pypy default: fix a link and a function signature Message-ID: <20140228160047.A7CA91D251B@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r69557:cbcb6d0aaec7 Date: 2014-02-28 17:59 +0200 http://bitbucket.org/pypy/pypy/changeset/cbcb6d0aaec7/ Log: fix a link and a function signature diff --git a/include/PyPy.h b/include/PyPy.h --- a/include/PyPy.h +++ b/include/PyPy.h @@ -9,7 +9,7 @@ #endif // call this first -char* rpython_startup_code(void); +void rpython_startup_code(void); // pypy_init_threads has to be called in case you want to use threads void pypy_init_threads(void); diff --git a/pypy/doc/embedding.rst b/pypy/doc/embedding.rst --- a/pypy/doc/embedding.rst +++ b/pypy/doc/embedding.rst @@ -17,11 +17,11 @@ to make a full API working, provided you'll follow a few principles. The API is: -.. function:: char* rpython_startup_code(void); +.. function:: void rpython_startup_code(void); This is a function that you have to call (once) before calling anything. It initializes the RPython/PyPy GC and does a bunch of necessary startup - code. This function cannot fail and always returns NULL. + code. This function cannot fail. .. function:: void pypy_init_threads(void); @@ -164,3 +164,4 @@ .. _`cffi`: http://cffi.readthedocs.org/ .. _`uwsgi`: http://uwsgi-docs.readthedocs.org/en/latest/ .. _`PyPy uwsgi plugin`: http://uwsgi-docs.readthedocs.org/en/latest/PyPy.html +.. _`how to compile PyPy`: getting-started.html diff --git a/rpython/rlib/entrypoint.py b/rpython/rlib/entrypoint.py --- a/rpython/rlib/entrypoint.py +++ b/rpython/rlib/entrypoint.py @@ -83,4 +83,4 @@ @entrypoint('main', [], c_name='rpython_startup_code') def rpython_startup_code(): - return RPython_StartupCode() + RPython_StartupCode() From noreply at buildbot.pypy.org Fri Feb 28 17:06:02 2014 From: noreply at buildbot.pypy.org (fijal) Date: Fri, 28 Feb 2014 17:06:02 +0100 (CET) Subject: [pypy-commit] pypy default: Return -1 on failure (as opposed to just 1) Message-ID: <20140228160602.0D0211D2520@cobra.cs.uni-duesseldorf.de> Author: Maciej Fijalkowski Branch: Changeset: r69558:a73ce680712f Date: 2014-02-28 18:05 +0200 http://bitbucket.org/pypy/pypy/changeset/a73ce680712f/ Log: Return -1 on failure (as opposed to just 1) diff --git a/pypy/doc/embedding.rst b/pypy/doc/embedding.rst --- a/pypy/doc/embedding.rst +++ b/pypy/doc/embedding.rst @@ -38,13 +38,13 @@ * ``verbose``: if non-zero, would print error messages to stderr - Function returns 0 on success or 1 on failure, can be called multiple times + Function returns 0 on success or -1 on failure, can be called multiple times until the library is found. .. function:: int pypy_execute_source(char* source); Execute the source code given in the ``source`` argument. Will print - the error message to stderr upon failure and return 1, otherwise returns 0. + the error message to stderr upon failure and return -1, otherwise returns 0. You should really do your own error handling in the source. It'll acquire the GIL. diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -117,7 +117,7 @@ debug("OperationError:") debug(" operror-type: " + e.w_type.getname(space)) debug(" operror-value: " + space.str_w(space.str(e.get_w_value(space)))) - return 1 + return -1 @entrypoint('main', [rffi.CCHARP], c_name='pypy_execute_source') def pypy_execute_source(ll_source): @@ -175,7 +175,7 @@ debug("OperationError:") debug(" operror-type: " + e.w_type.getname(space)) debug(" operror-value: " + space.str_w(space.str(e.get_w_value(space)))) - return 1 + return -1 return 0 return entry_point, {'pypy_execute_source': pypy_execute_source, From noreply at buildbot.pypy.org Fri Feb 28 17:08:00 2014 From: noreply at buildbot.pypy.org (antocuni) Date: Fri, 28 Feb 2014 17:08:00 +0100 (CET) Subject: [pypy-commit] pypy default: tweaks Message-ID: <20140228160800.C0C1B1D2520@cobra.cs.uni-duesseldorf.de> Author: Antonio Cuni Branch: Changeset: r69559:c61f7fffb7aa Date: 2014-02-28 17:05 +0100 http://bitbucket.org/pypy/pypy/changeset/c61f7fffb7aa/ Log: tweaks diff --git a/pypy/doc/embedding.rst b/pypy/doc/embedding.rst --- a/pypy/doc/embedding.rst +++ b/pypy/doc/embedding.rst @@ -7,13 +7,13 @@ It was developed in collaboration with Roberto De Ioris from the `uwsgi`_ project. The `PyPy uwsgi plugin`_ is a good example of usage of such interface. -The first thing that you need, that we plan to change in the future, is to -compile PyPy yourself with an option ``--shared``. Consult the +The first thing that you need is to compile PyPy yourself with an option +``--shared``. We plan to make ``--shared`` the default in the future. Consult the `how to compile PyPy`_ doc for details. That should result in ``libpypy.so`` or ``pypy.dll`` file or something similar, depending on your platform. Consult your platform specification for details. -The resulting shared library has very few functions that are however enough +The resulting shared library exports very few functions that are however enough to make a full API working, provided you'll follow a few principles. The API is: @@ -29,24 +29,25 @@ .. function:: long pypy_setup_home(char* home, int verbose); - This is another function that you have to call at some point, without - it you would not be able to find the standard library (and run pretty much - nothing). Arguments: + This function searches the PyPy standard library starting from the given + "PyPy home directory". It is not strictly necessary to execute it before + running Python code, but without it you will not be able to import any + non-builtin module from the standard library. The arguments are: - * ``home``: null terminated path to an executable inside the pypy directory + * ``home``: NULL terminated path to an executable inside the pypy directory (can be a .so name, can be made up) - * ``verbose``: if non-zero, would print error messages to stderr + * ``verbose``: if non-zero, it will print error messages to stderr Function returns 0 on success or -1 on failure, can be called multiple times until the library is found. .. function:: int pypy_execute_source(char* source); - Execute the source code given in the ``source`` argument. Will print - the error message to stderr upon failure and return -1, otherwise returns 0. - You should really do your own error handling in the source. It'll acquire - the GIL. + Execute the Python source code given in the ``source`` argument. In case of + exceptions, it will print the Python traceback to stderr and return 1, + otherwise return 0. You should really do your own error handling in the + source. It'll acquire the GIL. .. function:: int pypy_execute_source_ptr(char* source, void* ptr); From noreply at buildbot.pypy.org Fri Feb 28 17:24:21 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 28 Feb 2014 17:24:21 +0100 (CET) Subject: [pypy-commit] pypy default: eliminate a few more ops in dot Message-ID: <20140228162421.5CC021D251B@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69560:fb2dae86f26e Date: 2014-02-28 11:23 -0500 http://bitbucket.org/pypy/pypy/changeset/fb2dae86f26e/ Log: eliminate a few more ops in dot diff --git a/pypy/module/micronumpy/iterators.py b/pypy/module/micronumpy/iterators.py --- a/pypy/module/micronumpy/iterators.py +++ b/pypy/module/micronumpy/iterators.py @@ -79,30 +79,31 @@ class ArrayIter(object): - _immutable_fields_ = ['array', 'size', 'indices', 'shape_m1[*]', - 'strides[*]', 'backstrides[*]'] + _immutable_fields_ = ['array', 'size', 'ndim_m1', 'shape_m1[*]', + 'strides[*]', 'backstrides[*]', 'indices'] def __init__(self, array, size, shape, strides, backstrides): assert len(shape) == len(strides) == len(backstrides) self.array = array self.size = size - self.indices = [0] * len(shape) + self.ndim_m1 = len(shape) - 1 self.shape_m1 = [s - 1 for s in shape] self.strides = strides self.backstrides = backstrides + self.indices = [0] * len(shape) self.reset() @jit.unroll_safe def reset(self): self.index = 0 - for i in xrange(len(self.shape_m1)): + for i in xrange(self.ndim_m1, -1, -1): self.indices[i] = 0 self.offset = self.array.start @jit.unroll_safe def next(self): self.index += 1 - for i in xrange(len(self.shape_m1) - 1, -1, -1): + for i in xrange(self.ndim_m1, -1, -1): if self.indices[i] < self.shape_m1[i]: self.indices[i] += 1 self.offset += self.strides[i] diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py --- a/pypy/module/micronumpy/test/test_zjit.py +++ b/pypy/module/micronumpy/test/test_zjit.py @@ -517,13 +517,12 @@ 'int_lt': 1, 'jump': 1, 'raw_load': 2}) - self.check_resops({'arraylen_gc': 4, - 'float_add': 2, + self.check_resops({'float_add': 2, 'float_mul': 2, 'getarrayitem_gc': 11, 'getarrayitem_gc_pure': 15, 'getfield_gc': 30, - 'getfield_gc_pure': 40, + 'getfield_gc_pure': 44, 'guard_class': 4, 'guard_false': 14, 'guard_nonnull': 8, @@ -535,7 +534,7 @@ 'int_ge': 4, 'int_le': 8, 'int_lt': 11, - 'int_sub': 8, + 'int_sub': 4, 'jump': 3, 'raw_load': 6, 'raw_store': 1, From noreply at buildbot.pypy.org Fri Feb 28 17:46:29 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 28 Feb 2014 17:46:29 +0100 (CET) Subject: [pypy-commit] pypy default: Remove the sequence of words Message-ID: <20140228164629.DEE4B1C244E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r69561:3bf9087dd9ac Date: 2014-02-28 17:45 +0100 http://bitbucket.org/pypy/pypy/changeset/3bf9087dd9ac/ Log: Remove the sequence of words "pypy is put in ``/opt/pypy`` (a source checkout)" because for me it sounds too much like: "you checkout pypy in your home dir, build it, and then you need really to copy the complete checkout to /opt/pypy; it's not enough to just install it there". diff --git a/pypy/doc/embedding.rst b/pypy/doc/embedding.rst --- a/pypy/doc/embedding.rst +++ b/pypy/doc/embedding.rst @@ -67,9 +67,10 @@ Note that this API is a lot more minimal than say CPython C API, so at first it's obvious to think that you can't do much. However, the trick is to do all the logic in Python and expose it via `cffi`_ callbacks. Let's assume -we're on linux and pypy is put in ``/opt/pypy`` (a source checkout) and -library is in ``/opt/pypy/libpypy-c.so``. We write a little C program -(for simplicity assuming that all operations will be performed:: +we're on linux and pypy is installed in ``/opt/pypy`` with the +library in ``/opt/pypy/bin/libpypy-c.so``. (It doesn't need to be +installed; you can also replace this path with your local checkout.) +We write a little C program:: #include "include/PyPy.h" #include @@ -81,6 +82,7 @@ int res; rpython_startup_code(); + // pypy_setup_home() is not needed in this trivial example res = pypy_execute_source((char*)source); if (res) { printf("Error calling pypy_execute_source!\n"); @@ -130,7 +132,7 @@ void *lib, *func; rpython_startup_code(); - res = pypy_setup_home("/opt/pypy/pypy/libpypy-c.so", 1); + res = pypy_setup_home("/opt/pypy/bin/libpypy-c.so", 1); if (res) { printf("Error setting pypy home!\n"); return 1; From noreply at buildbot.pypy.org Fri Feb 28 18:20:10 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 28 Feb 2014 18:20:10 +0100 (CET) Subject: [pypy-commit] pypy default: cleanups Message-ID: <20140228172010.4407A1D251B@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: Changeset: r69562:a6b53dfd3c04 Date: 2014-02-28 11:59 -0500 http://bitbucket.org/pypy/pypy/changeset/a6b53dfd3c04/ Log: cleanups diff --git a/pypy/module/micronumpy/arrayops.py b/pypy/module/micronumpy/arrayops.py --- a/pypy/module/micronumpy/arrayops.py +++ b/pypy/module/micronumpy/arrayops.py @@ -8,7 +8,6 @@ shape_agreement_multiple - def where(space, w_arr, w_x=None, w_y=None): """where(condition, [x, y]) @@ -199,8 +198,7 @@ choices = [convert_to_array(space, w_item) for w_item in space.listview(w_choices)] if not choices: - raise OperationError(space.w_ValueError, - space.wrap("choices list cannot be empty")) + raise oefmt(space.w_ValueError, "choices list cannot be empty") if space.is_none(w_out): w_out = None elif not isinstance(w_out, W_NDimArray): @@ -219,11 +217,9 @@ mode = clipmode_converter(space, w_mode) if not w_indices: - raise OperationError(space.w_ValueError, - space.wrap("indice list cannot be empty")) + raise oefmt(space.w_ValueError, "indices list cannot be empty") if not w_values: - raise OperationError(space.w_ValueError, - space.wrap("value list cannot be empty")) + raise oefmt(space.w_ValueError, "value list cannot be empty") dtype = arr.get_dtype() @@ -243,8 +239,9 @@ if index < 0 or index >= arr.get_size(): if mode == NPY.RAISE: - raise OperationError(space.w_IndexError, space.wrap( - "index %d is out of bounds for axis 0 with size %d" % (index, arr.get_size()))) + raise oefmt(space.w_IndexError, + "index %d is out of bounds for axis 0 with size %d", + index, arr.get_size()) elif mode == NPY.WRAP: index = index % arr.get_size() elif mode == NPY.CLIP: diff --git a/pypy/module/micronumpy/boxes.py b/pypy/module/micronumpy/boxes.py --- a/pypy/module/micronumpy/boxes.py +++ b/pypy/module/micronumpy/boxes.py @@ -18,7 +18,6 @@ from pypy.module.micronumpy.concrete import VoidBoxStorage from pypy.module.micronumpy.flagsobj import W_FlagsObject - MIXIN_32 = (W_IntObject.typedef,) if LONG_BIT == 32 else () MIXIN_64 = (W_IntObject.typedef,) if LONG_BIT == 64 else () @@ -474,16 +473,13 @@ except IndexError: if indx < 0: indx += len(self.dtype.names) - raise OperationError(space.w_IndexError, space.wrap( - "invalid index (%d)" % indx)) + raise oefmt(space.w_IndexError, "invalid index (%d)", indx) else: - raise OperationError(space.w_IndexError, space.wrap( - "invalid index")) + raise oefmt(space.w_IndexError, "invalid index") try: ofs, dtype = self.dtype.fields[item] except KeyError: - raise OperationError(space.w_IndexError, space.wrap( - "invalid index")) + raise oefmt(space.w_IndexError, "invalid index") from pypy.module.micronumpy.types import VoidType if isinstance(dtype.itemtype, VoidType): @@ -499,13 +495,11 @@ if space.isinstance_w(w_item, space.w_basestring): item = space.str_w(w_item) else: - raise OperationError(space.w_IndexError, space.wrap( - "invalid index")) + raise oefmt(space.w_IndexError, "invalid index") try: ofs, dtype = self.dtype.fields[item] except KeyError: - raise OperationError(space.w_ValueError, - space.wrap("field named %s not found" % item)) + raise oefmt(space.w_ValueError, "field named %s not found", item) dtype.itemtype.store(self.arr, self.ofs, ofs, dtype.coerce(space, w_value)) @@ -531,10 +525,8 @@ class W_UnicodeBox(W_CharacterBox): def descr__new__unicode_box(space, w_subtype, w_arg): - raise OperationError(space.w_NotImplementedError, space.wrap("Unicode is not supported yet")) - + raise oefmt(space.w_NotImplementedError, "Unicode is not supported yet") from pypy.module.micronumpy.descriptor import new_unicode_dtype - arg = space.unicode_w(space.unicode_from_object(w_arg)) # XXX size computations, we need tests anyway arr = VoidBoxStorage(len(arg), new_unicode_dtype(space, len(arg))) @@ -543,6 +535,7 @@ # arr.storage[i] = arg[i] return W_UnicodeBox(arr, 0, arr.dtype) + W_GenericBox.typedef = TypeDef("generic", __module__ = "numpy", diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py --- a/pypy/module/micronumpy/concrete.py +++ b/pypy/module/micronumpy/concrete.py @@ -191,8 +191,7 @@ count -= 1 if count == shape_len: raise IndexError # but it's still not a single item - raise OperationError(space.w_IndexError, - space.wrap("invalid index")) + raise oefmt(space.w_IndexError, "invalid index") # check for arrays for w_item in view_w: if (isinstance(w_item, W_NDimArray) or @@ -212,8 +211,7 @@ idx = space.str_w(w_idx) dtype = self.dtype if not dtype.is_record() or idx not in dtype.fields: - raise OperationError(space.w_ValueError, space.wrap( - "field named %s not found" % idx)) + raise oefmt(space.w_ValueError, "field named %s not found", idx) return RecordChunk(idx) elif (space.isinstance_w(w_idx, space.w_int) or space.isinstance_w(w_idx, space.w_slice)): @@ -455,8 +453,8 @@ self.get_strides(), self.order) if new_strides is None: - raise OperationError(space.w_AttributeError, space.wrap( - "incompatible shape for a non-contiguous array")) + raise oefmt(space.w_AttributeError, + "incompatible shape for a non-contiguous array") new_backstrides = [0] * len(new_shape) for nd in range(len(new_shape)): new_backstrides[nd] = (new_shape[nd] - 1) * new_strides[nd] diff --git a/pypy/module/micronumpy/descriptor.py b/pypy/module/micronumpy/descriptor.py --- a/pypy/module/micronumpy/descriptor.py +++ b/pypy/module/micronumpy/descriptor.py @@ -295,16 +295,16 @@ try: item = self.names[indx] except IndexError: - raise OperationError(space.w_IndexError, space.wrap( - "Field index %d out of range." % indx)) + raise oefmt(space.w_IndexError, + "Field index %d out of range.", indx) else: - raise OperationError(space.w_ValueError, space.wrap( - "Field key must be an integer, string, or unicode.")) + raise oefmt(space.w_ValueError, + "Field key must be an integer, string, or unicode.") try: return self.fields[item][1] except KeyError: - raise OperationError(space.w_KeyError, space.wrap( - "Field named '%s' not found." % item)) + raise oefmt(space.w_KeyError, + "Field named '%s' not found.", item) def descr_len(self, space): if not self.fields: @@ -535,6 +535,7 @@ "cannot create dtype with type '%N'", w_dtype) raise oefmt(space.w_TypeError, "data type not understood") + W_Dtype.typedef = TypeDef("dtype", __module__ = "numpy", __new__ = interp2app(descr__new__), diff --git a/pypy/module/micronumpy/iterators.py b/pypy/module/micronumpy/iterators.py --- a/pypy/module/micronumpy/iterators.py +++ b/pypy/module/micronumpy/iterators.py @@ -118,17 +118,17 @@ if step == 0: return self.index += step - for i in xrange(len(self.shape_m1) - 1, -1, -1): + for i in xrange(self.ndim_m1, -1, -1): if self.indices[i] < (self.shape_m1[i] + 1) - step: self.indices[i] += step self.offset += self.strides[i] * step break else: - remaining_step = (self.indices[i] + step) // (self.shape_m1[i] + 1) - this_i_step = step - remaining_step * (self.shape_m1[i] + 1) - self.indices[i] = self.indices[i] + this_i_step - self.offset += self.strides[i] * this_i_step - step = remaining_step + rem_step = (self.indices[i] + step) // (self.shape_m1[i] + 1) + cur_step = step - rem_step * (self.shape_m1[i] + 1) + self.indices[i] += cur_step + self.offset += self.strides[i] * cur_step + step = rem_step assert step > 0 def done(self): diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -127,11 +127,11 @@ "index out of range for array")) size = loop.count_all_true(idx) if size > val.get_size() and val.get_size() != 1: - raise OperationError(space.w_ValueError, space.wrap( + raise oefmt(space.w_ValueError, "NumPy boolean array indexing assignment " "cannot assign %d input values to " - "the %d output values where the mask is true" % - (val.get_size(), size))) + "the %d output values where the mask is true", + val.get_size(), size) loop.setitem_filter(space, self, idx, val) def _prepare_array_index(self, space, w_index): @@ -994,14 +994,14 @@ def _reduce_argmax_argmin_impl(op_name): def impl(self, space, w_axis=None, w_out=None): if not space.is_none(w_axis): - raise OperationError(space.w_NotImplementedError, space.wrap( - "axis unsupported for %s" % op_name)) + raise oefmt(space.w_NotImplementedError, + "axis unsupported for %s", op_name) if not space.is_none(w_out): - raise OperationError(space.w_NotImplementedError, space.wrap( - "out unsupported for %s" % op_name)) + raise oefmt(space.w_NotImplementedError, + "out unsupported for %s", op_name) if self.get_size() == 0: - raise OperationError(space.w_ValueError, - space.wrap("Can't call %s on zero-size arrays" % op_name)) + raise oefmt(space.w_ValueError, + "Can't call %s on zero-size arrays", op_name) op = getattr(loop, op_name) try: res = op(self) @@ -1096,15 +1096,16 @@ elif lens == 4: base_index = 0 else: - raise OperationError(space.w_ValueError, space.wrap( - "__setstate__ called with len(args[1])==%d, not 5 or 4" % lens)) + raise oefmt(space.w_ValueError, + "__setstate__ called with len(args[1])==%d, not 5 or 4", lens) shape = space.getitem(w_state, space.wrap(base_index)) dtype = space.getitem(w_state, space.wrap(base_index+1)) #isfortran = space.getitem(w_state, space.wrap(base_index+2)) storage = space.getitem(w_state, space.wrap(base_index+3)) if not isinstance(dtype, descriptor.W_Dtype): - raise OperationError(space.w_ValueError, space.wrap( - "__setstate__(self, (shape, dtype, .. called with improper dtype '%r'" % dtype)) + raise oefmt(space.w_ValueError, + "__setstate__(self, (shape, dtype, .. called with " + "improper dtype '%R'", dtype) self.implementation = W_NDimArray.from_shape_and_storage(space, [space.int_w(i) for i in space.listview(shape)], rffi.str2charp(space.str_w(storage), track_allocation=False), diff --git a/pypy/module/micronumpy/sort.py b/pypy/module/micronumpy/sort.py --- a/pypy/module/micronumpy/sort.py +++ b/pypy/module/micronumpy/sort.py @@ -146,8 +146,7 @@ if axis < 0: axis = len(shape) + axis if axis < 0 or axis >= len(shape): - raise OperationError(space.w_IndexError, space.wrap( - "Wrong axis %d" % axis)) + raise oefmt(space.w_IndexError, "Wrong axis %d", axis) arr_iter = AllButAxisIter(arr, axis) index_impl = index_arr.implementation index_iter = AllButAxisIter(index_impl, axis) @@ -291,8 +290,7 @@ if axis < 0: axis = len(shape) + axis if axis < 0 or axis >= len(shape): - raise OperationError(space.w_IndexError, space.wrap( - "Wrong axis %d" % axis)) + raise oefmt(space.w_IndexError, "Wrong axis %d", axis) arr_iter = AllButAxisIter(arr, axis) stride_size = arr.strides[axis] axis_size = arr.shape[axis] diff --git a/pypy/module/micronumpy/ufuncs.py b/pypy/module/micronumpy/ufuncs.py --- a/pypy/module/micronumpy/ufuncs.py +++ b/pypy/module/micronumpy/ufuncs.py @@ -310,16 +310,15 @@ if (self.int_only and not dtype.is_int() or not self.allow_bool and dtype.is_bool() or not self.allow_complex and dtype.is_complex()): - raise OperationError(space.w_TypeError, space.wrap( - "ufunc %s not supported for the input type" % self.name)) + raise oefmt(space.w_TypeError, + "ufunc %s not supported for the input type", self.name) calc_dtype = find_unaryop_result_dtype(space, w_obj.get_dtype(), promote_to_float=self.promote_to_float, promote_bools=self.promote_bools) if out is not None: if not isinstance(out, W_NDimArray): - raise OperationError(space.w_TypeError, space.wrap( - 'output must be an array')) + raise oefmt(space.w_TypeError, 'output must be an array') res_dtype = out.get_dtype() #if not w_obj.get_dtype().can_cast_to(res_dtype): # raise oefmt(space.w_TypeError, @@ -424,13 +423,12 @@ w_rdtype.is_bool()) or not self.allow_complex and (w_ldtype.is_complex() or w_rdtype.is_complex())): - raise OperationError(space.w_TypeError, space.wrap( - "ufunc '%s' not supported for the input types" % self.name)) + raise oefmt(space.w_TypeError, + "ufunc '%s' not supported for the input types", self.name) if space.is_none(w_out): out = None elif not isinstance(w_out, W_NDimArray): - raise OperationError(space.w_TypeError, space.wrap( - 'output must be an array')) + raise oefmt(space.w_TypeError, 'output must be an array') else: out = w_out calc_dtype = out.get_dtype() @@ -578,7 +576,8 @@ return descriptor.get_dtype_cache(space).w_float64dtype for bytes, dtype in descriptor.get_dtype_cache(space).float_dtypes_by_num_bytes: if (dtype.kind == NPY.FLOATINGLTR and - dtype.itemtype.get_element_size() > dt.itemtype.get_element_size()): + dtype.itemtype.get_element_size() > + dt.itemtype.get_element_size()): return dtype return dt @@ -763,7 +762,7 @@ identity = extra_kwargs.get("identity") if identity is not None: identity = \ - descriptor.get_dtype_cache(space).w_longdtype.box(identity) + descriptor.get_dtype_cache(space).w_longdtype.box(identity) extra_kwargs["identity"] = identity func = ufunc_dtype_caller(space, ufunc_name, op_name, argcount, From noreply at buildbot.pypy.org Fri Feb 28 18:42:31 2014 From: noreply at buildbot.pypy.org (bdkearns) Date: Fri, 28 Feb 2014 18:42:31 +0100 (CET) Subject: [pypy-commit] pypy test-58c3d8552833: fix this test Message-ID: <20140228174231.332061C244E@cobra.cs.uni-duesseldorf.de> Author: Brian Kearns Branch: test-58c3d8552833 Changeset: r69563:033298f9b61f Date: 2014-02-28 09:41 -0800 http://bitbucket.org/pypy/pypy/changeset/033298f9b61f/ Log: fix this test diff --git a/pypy/module/pypyjit/test_pypy_c/test_call.py b/pypy/module/pypyjit/test_pypy_c/test_call.py --- a/pypy/module/pypyjit/test_pypy_c/test_call.py +++ b/pypy/module/pypyjit/test_pypy_c/test_call.py @@ -194,7 +194,6 @@ assert log.result == 1000 loop, = log.loops_by_id('call') assert loop.match_by_id('call', """ - p14 = getarrayitem_gc_pure(p8, i9, descr=) i14 = force_token() i16 = force_token() """) From noreply at buildbot.pypy.org Fri Feb 28 19:54:02 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 28 Feb 2014 19:54:02 +0100 (CET) Subject: [pypy-commit] stmgc default: Fix Message-ID: <20140228185402.879021C244E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r902:4e72f77f0188 Date: 2014-02-28 07:57 +0100 http://bitbucket.org/pypy/stmgc/changeset/4e72f77f0188/ Log: Fix diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -389,7 +389,7 @@ /* if we were inevitable, signal */ if (STM_PSEGMENT->transaction_state == TS_INEVITABLE) - cond_signal(C_INEVITABLE_DONE); + cond_broadcast(C_INEVITABLE_DONE); /* done */ _finish_transaction(); From noreply at buildbot.pypy.org Fri Feb 28 19:54:03 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 28 Feb 2014 19:54:03 +0100 (CET) Subject: [pypy-commit] stmgc default: First simplification step: unify all condition variables into one, Message-ID: <20140228185403.929421C244E@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r903:a14d95357717 Date: 2014-02-28 19:53 +0100 http://bitbucket.org/pypy/stmgc/changeset/a14d95357717/ Log: First simplification step: unify all condition variables into one, again. Should fix obscure synchronization bugs that are theoretically possible at least with more than two threads. diff --git a/c7/stm/contention.c b/c7/stm/contention.c --- a/c7/stm/contention.c +++ b/c7/stm/contention.c @@ -44,7 +44,7 @@ cond_wait(C_SAFE_POINT). Thus broadcasting C_SAFE_POINT is enough to wake it up in the second case. */ - cond_broadcast(C_SAFE_POINT); + cond_broadcast(); } } @@ -78,10 +78,10 @@ /* wait, hopefully until the other thread broadcasts "I'm done aborting" (spurious wake-ups are ok). */ dprintf(("contention: wait C_SAFE_POINT...\n")); - cond_wait(C_SAFE_POINT); + cond_wait(); dprintf(("contention: done\n")); - cond_broadcast(C_RESUME); + cond_broadcast(); /* now we return into _stm_write_slowpath() and will try again to acquire the write lock on our object. */ diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -326,7 +326,6 @@ /* signal all the threads blocked in wait_for_other_safe_points() */ if (STM_SEGMENT->nursery_end == NSE_SIGNAL) { STM_SEGMENT->nursery_end = NURSERY_END; - cond_broadcast(C_SAFE_POINT); } STM_PSEGMENT->safe_point = SP_NO_TRANSACTION; @@ -339,6 +338,9 @@ stm_thread_local_t *tl = STM_SEGMENT->running_thread; release_thread_segment(tl); /* cannot access STM_SEGMENT or STM_PSEGMENT from here ! */ + + /* wake up other threads waiting. */ + cond_broadcast(); } void stm_commit_transaction(void) @@ -387,16 +389,9 @@ STM_PSEGMENT->overflow_number = highest_overflow_number; } - /* if we were inevitable, signal */ - if (STM_PSEGMENT->transaction_state == TS_INEVITABLE) - cond_broadcast(C_INEVITABLE_DONE); - /* done */ _finish_transaction(); - /* wake up one other thread waiting for a segment. */ - cond_signal(C_RELEASE_THREAD_SEGMENT); - mutex_unlock(); } @@ -478,18 +473,6 @@ _finish_transaction(); - /* wake up one other thread waiting for a segment. In order to support - contention.c, we use a broadcast, to make sure that all threads are - signalled, including the one that requested an abort, if any. - Moreover, we wake up any thread waiting for this one to do a safe - point, if any (in _finish_transaction above). Finally, it's - possible that we reach this place from the middle of a piece of - code like wait_for_other_safe_points() which ends in broadcasting - C_RESUME; we must make sure to broadcast it. - */ - cond_broadcast(C_RELEASE_THREAD_SEGMENT); - cond_broadcast(C_RESUME); - mutex_unlock(); /* It seems to be a good idea, at least in some examples, to sleep diff --git a/c7/stm/sync.c b/c7/stm/sync.c --- a/c7/stm/sync.c +++ b/c7/stm/sync.c @@ -16,7 +16,7 @@ static union { struct { pthread_mutex_t global_mutex; - pthread_cond_t cond[_C_TOTAL]; + pthread_cond_t global_cond; /* some additional pieces of global state follow */ uint8_t in_use[NB_SEGMENTS]; /* 1 if running a pthread */ uint64_t global_time; @@ -30,11 +30,8 @@ if (pthread_mutex_init(&sync_ctl.global_mutex, NULL) != 0) stm_fatalerror("mutex initialization: %m\n"); - long i; - for (i = 0; i < _C_TOTAL; i++) { - if (pthread_cond_init(&sync_ctl.cond[i], NULL) != 0) - stm_fatalerror("cond initialization: %m\n"); - } + if (pthread_cond_init(&sync_ctl.global_cond, NULL) != 0) + stm_fatalerror("cond initialization: %m\n"); } static void teardown_sync(void) @@ -42,11 +39,8 @@ if (pthread_mutex_destroy(&sync_ctl.global_mutex) != 0) stm_fatalerror("mutex destroy: %m\n"); - long i; - for (i = 0; i < _C_TOTAL; i++) { - if (pthread_cond_destroy(&sync_ctl.cond[i]) != 0) - stm_fatalerror("cond destroy: %m\n"); - } + if (pthread_cond_destroy(&sync_ctl.global_cond) != 0) + stm_fatalerror("cond destroy: %m\n"); memset(&sync_ctl, 0, sizeof(sync_ctl.in_use)); } @@ -91,35 +85,29 @@ assert((_has_mutex_here = false, 1)); } -static inline void cond_wait_no_abort(enum cond_type_e ctype) +static inline void cond_wait_no_abort(void) { #ifdef STM_NO_COND_WAIT - stm_fatalerror("*** cond_wait/%d called!\n", (int)ctype); + stm_fatalerror("*** cond_wait called!\n"); #endif assert(_has_mutex_here); - if (UNLIKELY(pthread_cond_wait(&sync_ctl.cond[ctype], + if (UNLIKELY(pthread_cond_wait(&sync_ctl.global_cond, &sync_ctl.global_mutex) != 0)) - stm_fatalerror("pthread_cond_wait/%d: %m\n", (int)ctype); + stm_fatalerror("pthread_cond_wait: %m\n"); } -static inline void cond_wait(enum cond_type_e ctype) +static inline void cond_wait(void) { - cond_wait_no_abort(ctype); + cond_wait_no_abort(); if (STM_PSEGMENT->transaction_state == TS_MUST_ABORT) abort_with_mutex(); } -static inline void cond_broadcast(enum cond_type_e ctype) +static inline void cond_broadcast(void) { - if (UNLIKELY(pthread_cond_broadcast(&sync_ctl.cond[ctype]) != 0)) - stm_fatalerror("pthread_cond_broadcast/%d: %m\n", (int)ctype); -} - -static inline void cond_signal(enum cond_type_e ctype) -{ - if (UNLIKELY(pthread_cond_signal(&sync_ctl.cond[ctype]) != 0)) - stm_fatalerror("pthread_cond_signal/%d: %m\n", (int)ctype); + if (UNLIKELY(pthread_cond_broadcast(&sync_ctl.global_cond) != 0)) + stm_fatalerror("pthread_cond_broadcast: %m\n"); } static bool acquire_thread_segment(stm_thread_local_t *tl) @@ -154,8 +142,8 @@ } /* Wait and retry. It is guaranteed that any thread releasing its segment will do so by acquiring the mutex and calling - cond_signal(C_RELEASE_THREAD_SEGMENT). */ - cond_wait_no_abort(C_RELEASE_THREAD_SEGMENT); + cond_broadcast(). */ + cond_wait_no_abort(); /* Return false to the caller, which will call us again */ return false; @@ -192,10 +180,10 @@ /* XXX should we wait here? or abort? or a mix? for now, always abort */ abort_with_mutex(); - //cond_wait(C_INEVITABLE_DONE); + //cond_wait(); } else { - cond_wait_no_abort(C_INEVITABLE_DONE); + cond_wait_no_abort(); } goto restart; } @@ -288,7 +276,7 @@ } if (wait) { - cond_wait(C_SAFE_POINT); + cond_wait(); /* XXX think: I believe this can end in a busy-loop, with this thread setting NSE_SIGNAL on the other thread; then the other thread commits, sends C_SAFE_POINT, finish the transaction, start @@ -300,7 +288,7 @@ /* all threads are at a safe-point now. Broadcast C_RESUME, which will allow them to resume --- but only when we release the mutex. */ - cond_broadcast(C_RESUME); + cond_broadcast(); return true; } @@ -336,9 +324,9 @@ /* signal all the threads blocked in wait_for_other_safe_points() */ - cond_broadcast(C_SAFE_POINT); + cond_broadcast(); - cond_wait(C_RESUME); + cond_wait(); STM_PSEGMENT->safe_point = SP_RUNNING; } diff --git a/c7/stm/sync.h b/c7/stm/sync.h --- a/c7/stm/sync.h +++ b/c7/stm/sync.h @@ -3,19 +3,11 @@ static void setup_sync(void); static void teardown_sync(void); -/* all synchronization is done via a mutex and a few condition variables */ -enum cond_type_e { - C_RELEASE_THREAD_SEGMENT, - C_SAFE_POINT, - C_RESUME, - C_INEVITABLE_DONE, - _C_TOTAL -}; +/* all synchronization is done via a mutex and a condition variable */ static void mutex_lock(void); static void mutex_unlock(void); -static void cond_wait(enum cond_type_e); -static void cond_broadcast(enum cond_type_e); -static void cond_signal(enum cond_type_e); +static void cond_wait(void); +static void cond_broadcast(void); #ifndef NDEBUG static bool _has_mutex(void); #endif From noreply at buildbot.pypy.org Fri Feb 28 21:03:37 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Fri, 28 Feb 2014 21:03:37 +0100 (CET) Subject: [pypy-commit] pypy default: A few cleanups Message-ID: <20140228200337.7FD1A1D24F6@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r69564:aa6570e5c64e Date: 2014-02-28 12:03 -0800 http://bitbucket.org/pypy/pypy/changeset/aa6570e5c64e/ Log: A few cleanups diff --git a/pypy/doc/embedding.rst b/pypy/doc/embedding.rst --- a/pypy/doc/embedding.rst +++ b/pypy/doc/embedding.rst @@ -3,23 +3,23 @@ -------------- PyPy has a very minimal and a very strange embedding interface, based on -the usage of `cffi`_ and the philosophy that Python is a better language in C. -It was developed in collaboration with Roberto De Ioris from the `uwsgi`_ -project. The `PyPy uwsgi plugin`_ is a good example of usage of such interface. +the usage of `cffi`_ and the philosophy that Python is a better language than +C. It was developed in collaboration with Roberto De Ioris from the `uwsgi`_ +project. The `PyPy uwsgi plugin`_ is a good example of using the embedding API. -The first thing that you need is to compile PyPy yourself with an option -``--shared``. We plan to make ``--shared`` the default in the future. Consult the -`how to compile PyPy`_ doc for details. That should result in ``libpypy.so`` +The first thing that you need is to compile PyPy yourself with the option +``--shared``. We plan to make ``--shared`` the default in the future. Consult +the `how to compile PyPy`_ doc for details. This will result in ``libpypy.so`` or ``pypy.dll`` file or something similar, depending on your platform. Consult your platform specification for details. -The resulting shared library exports very few functions that are however enough -to make a full API working, provided you'll follow a few principles. The API -is: +The resulting shared library exports very few functions, however they are +enough to accomplish everything you need, provided you follow a few principles. +The API is: .. function:: void rpython_startup_code(void); - This is a function that you have to call (once) before calling anything. + This is a function that you have to call (once) before calling anything else. It initializes the RPython/PyPy GC and does a bunch of necessary startup code. This function cannot fail. @@ -70,31 +70,33 @@ we're on linux and pypy is installed in ``/opt/pypy`` with the library in ``/opt/pypy/bin/libpypy-c.so``. (It doesn't need to be installed; you can also replace this path with your local checkout.) -We write a little C program:: +We write a little C program: - #include "include/PyPy.h" - #include +.. code-block: c - const char source[] = "print 'hello from pypy'"; + #include "include/PyPy.h" + #include - int main() - { - int res; + const char source[] = "print 'hello from pypy'"; - rpython_startup_code(); - // pypy_setup_home() is not needed in this trivial example - res = pypy_execute_source((char*)source); - if (res) { - printf("Error calling pypy_execute_source!\n"); + int main() + { + int res; + + rpython_startup_code(); + // pypy_setup_home() is not needed in this trivial example + res = pypy_execute_source((char*)source); + if (res) { + printf("Error calling pypy_execute_source!\n"); + } + return res; } - return res; - } If we save it as ``x.c`` now, compile it and run it with:: - fijal at hermann:/opt/pypy$ gcc -o x x.c -lpypy-c -L. - fijal at hermann:/opt/pypy$ LD_LIBRARY_PATH=. ./x - hello from pypy + fijal at hermann:/opt/pypy$ gcc -o x x.c -lpypy-c -L. + fijal at hermann:/opt/pypy$ LD_LIBRARY_PATH=. ./x + hello from pypy Worked! @@ -104,45 +106,47 @@ Typically we need something more to do than simply execute source. The following is a fully fledged example, please consult cffi documentation for details. It's a bit longish, but it captures a gist what can be done with the PyPy -embedding interface:: +embedding interface: - #include "include/PyPy.h" - #include +.. code-block: c - char source[] = "from cffi import FFI\n\ - ffi = FFI()\n\ - @ffi.callback('int(int)')\n\ - def func(a):\n\ - print 'Got from C %d' % a\n\ - return a * 2\n\ - ffi.cdef('int callback(int (*func)(int));')\n\ - c_func = ffi.cast('int(*)(int(*)(int))', c_argument)\n\ - c_func(func)\n\ - print 'finished the Python part'\n\ - "; + #include "include/PyPy.h" + #include - int callback(int (*func)(int)) - { - printf("Calling to Python, result: %d\n", func(3)); + char source[] = "from cffi import FFI\n\ + ffi = FFI()\n\ + @ffi.callback('int(int)')\n\ + def func(a):\n\ + print 'Got from C %d' % a\n\ + return a * 2\n\ + ffi.cdef('int callback(int (*func)(int));')\n\ + c_func = ffi.cast('int(*)(int(*)(int))', c_argument)\n\ + c_func(func)\n\ + print 'finished the Python part'\n\ + "; + + int callback(int (*func)(int)) + { + printf("Calling to Python, result: %d\n", func(3)); + } + + int main() + { + int res; + void *lib, *func; + + rpython_startup_code(); + res = pypy_setup_home("/opt/pypy/bin/libpypy-c.so", 1); + if (res) { + printf("Error setting pypy home!\n"); + return 1; } - - int main() - { - int res; - void *lib, *func; - - rpython_startup_code(); - res = pypy_setup_home("/opt/pypy/bin/libpypy-c.so", 1); - if (res) { - printf("Error setting pypy home!\n"); - return 1; - } - res = pypy_execute_source_ptr(source, (void*)callback); - if (res) { - printf("Error calling pypy_execute_source_ptr!\n"); - } - return res; + res = pypy_execute_source_ptr(source, (void*)callback); + if (res) { + printf("Error calling pypy_execute_source_ptr!\n"); } + return res; + } you can compile and run it with:: From noreply at buildbot.pypy.org Fri Feb 28 21:06:43 2014 From: noreply at buildbot.pypy.org (alex_gaynor) Date: Fri, 28 Feb 2014 21:06:43 +0100 (CET) Subject: [pypy-commit] pypy default: syntax fix Message-ID: <20140228200643.2AEC31D24F6@cobra.cs.uni-duesseldorf.de> Author: Alex Gaynor Branch: Changeset: r69565:a02be6558ee0 Date: 2014-02-28 12:06 -0800 http://bitbucket.org/pypy/pypy/changeset/a02be6558ee0/ Log: syntax fix diff --git a/pypy/doc/embedding.rst b/pypy/doc/embedding.rst --- a/pypy/doc/embedding.rst +++ b/pypy/doc/embedding.rst @@ -72,7 +72,7 @@ installed; you can also replace this path with your local checkout.) We write a little C program: -.. code-block: c +.. code-block:: c #include "include/PyPy.h" #include @@ -108,7 +108,7 @@ It's a bit longish, but it captures a gist what can be done with the PyPy embedding interface: -.. code-block: c +.. code-block:: c #include "include/PyPy.h" #include From noreply at buildbot.pypy.org Fri Feb 28 21:47:24 2014 From: noreply at buildbot.pypy.org (arigo) Date: Fri, 28 Feb 2014 21:47:24 +0100 (CET) Subject: [pypy-commit] stmgc default: More tweaks Message-ID: <20140228204724.5AB191D24E7@cobra.cs.uni-duesseldorf.de> Author: Armin Rigo Branch: Changeset: r904:032926d523bb Date: 2014-02-28 21:47 +0100 http://bitbucket.org/pypy/stmgc/changeset/032926d523bb/ Log: More tweaks diff --git a/c7/stm/core.c b/c7/stm/core.c --- a/c7/stm/core.c +++ b/c7/stm/core.c @@ -323,11 +323,6 @@ static void _finish_transaction(void) { - /* signal all the threads blocked in wait_for_other_safe_points() */ - if (STM_SEGMENT->nursery_end == NSE_SIGNAL) { - STM_SEGMENT->nursery_end = NURSERY_END; - } - STM_PSEGMENT->safe_point = SP_NO_TRANSACTION; STM_PSEGMENT->transaction_state = TS_NONE; @@ -355,10 +350,18 @@ minor_collection(/*commit=*/ true); mutex_lock(); + + retry: + if (STM_SEGMENT->nursery_end != NURSERY_END) + collectable_safe_point(); + STM_PSEGMENT->safe_point = SP_SAFE_POINT; /* wait until the other thread is at a safe-point */ - wait_for_other_safe_points(); + if (!try_wait_for_other_safe_points()) { + STM_PSEGMENT->safe_point = SP_RUNNING; + goto retry; + } /* the rest of this function either runs atomically without releasing the mutex, or aborts the current thread. */ @@ -369,6 +372,7 @@ /* cannot abort any more from here */ dprintf(("commit_transaction\n")); + assert(STM_SEGMENT->nursery_end == NURSERY_END); assert(STM_PSEGMENT->transaction_state != TS_MUST_ABORT); STM_SEGMENT->jmpbuf_ptr = NULL; @@ -392,6 +396,8 @@ /* done */ _finish_transaction(); + assert(STM_SEGMENT->nursery_end == NURSERY_END); + mutex_unlock(); } @@ -473,6 +479,8 @@ _finish_transaction(); + STM_SEGMENT->nursery_end = NURSERY_END; + mutex_unlock(); /* It seems to be a good idea, at least in some examples, to sleep diff --git a/c7/stm/sync.c b/c7/stm/sync.c --- a/c7/stm/sync.c +++ b/c7/stm/sync.c @@ -3,13 +3,24 @@ #include -/* XXX Getting the most efficient locks is hard, but the following - simplification is probably good enough for small numbers of threads: - when a thread wants to check or change any global state (e.g. start - running a transaction, etc.), it acquires this single mutex. If - additionally it wants to wait until the global state is changed by - someone else, it waits on the condition variable. This should be - all we need for synchronization. +/* Each segment can be in one of three possible states, described by + the segment variable 'safe_point': + + - SP_NO_TRANSACTION: no thread is running any transaction using this + segment. + + - SP_RUNNING: a thread is running a transaction using this segment. + + - SP_SAFE_POINT: the thread that owns this segment is currently + suspended in a safe-point. (A safe-point means that it is not + changing anything right now, and the current shadowstack is correct.) + + Synchronization is done with a single mutex / condition variable. A + thread needs to have acquired the mutex in order to do things like + acquiring or releasing ownership of a segment or updating this + segment's state. No other thread can acquire the mutex concurrently, + and so there is no race: the (single) thread owning the mutex can + freely inspect or even change the state of other segments too. */ @@ -268,8 +279,7 @@ struct stm_priv_segment_info_s *other_pseg = get_priv_segment(i); if (other_pseg->safe_point == SP_RUNNING) { /* we need to wait for this thread. Use NSE_SIGNAL to ask - it (and possibly all other threads in the same case) to - enter a safe-point soon. */ + it to enter a safe-point soon. */ other_pseg->pub.nursery_end = NSE_SIGNAL; wait = true; } @@ -292,13 +302,6 @@ return true; } -static void wait_for_other_safe_points(void) -{ - while (!try_wait_for_other_safe_points()) { - /* loop */ - } -} - void _stm_collectable_safe_point(void) { /* If _stm_nursery_end was set to NSE_SIGNAL by another thread, @@ -315,6 +318,7 @@ static void collectable_safe_point(void) { + assert(_has_mutex()); assert(STM_PSEGMENT->safe_point == SP_RUNNING); while (STM_SEGMENT->nursery_end == NSE_SIGNAL) { @@ -330,5 +334,6 @@ STM_PSEGMENT->safe_point = SP_RUNNING; } + assert(STM_SEGMENT->nursery_end == NURSERY_END); dprintf(("collectable_safe_point done\n")); } diff --git a/c7/stm/sync.h b/c7/stm/sync.h --- a/c7/stm/sync.h +++ b/c7/stm/sync.h @@ -20,6 +20,5 @@ static void wait_for_end_of_inevitable_transaction(bool can_abort); /* see the source for an exact description */ -static void wait_for_other_safe_points(void); static bool try_wait_for_other_safe_points(void); static void collectable_safe_point(void);