From pypy.commits at gmail.com Tue Mar 1 02:06:29 2016 From: pypy.commits at gmail.com (plan_rich) Date: Mon, 29 Feb 2016 23:06:29 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: list append, insert, del. indexing functions do not generate byte code if IndexError would be raised Message-ID: <56d53f75.a3abc20a.b9fc8.fffffe8c@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r82621:53b6b15a4a97 Date: 2016-03-01 08:05 +0100 http://bitbucket.org/pypy/pypy/changeset/53b6b15a4a97/ Log: list append, insert, del. indexing functions do not generate byte code if IndexError would be raised diff --git a/rpython/jit/backend/llsupport/tl/code.py b/rpython/jit/backend/llsupport/tl/code.py --- a/rpython/jit/backend/llsupport/tl/code.py +++ b/rpython/jit/backend/llsupport/tl/code.py @@ -14,14 +14,16 @@ LIST_TYP = 'l' INT_TYP = 'i' +OBJ_TYP = 'o' +STR_TYP = 's' +VAL_TYP = 'v' # either one of the earlier + +all_types = [INT_TYP, LIST_TYP, STR_TYP] # TODO OBJ_TYP + SHORT_TYP = 'h' BYTE_TYP = 'b' -OBJ_TYP = 'o' -STR_TYP = 's' COND_TYP = 'c' -VAL_TYP = 'v' # either one of the earlier - -all_types = [INT_TYP, LIST_TYP, STR_TYP] +IDX_TYP = 'x' def unique_code(): @@ -146,6 +148,27 @@ ctx.append_byte(self.BYTE_CODE) ctx.append_short(self.size) + at requires_stack(LIST_TYP, IDX_TYP, INT_TYP) # TODO VAL_TYP + at leaves_on_stack(LIST_TYP) +class InsertList(ByteCode): + BYTE_CODE = unique_code() + def __init__(self): + pass + + at requires_stack(LIST_TYP, IDX_TYP) + at leaves_on_stack(LIST_TYP) +class DelList(ByteCode): + BYTE_CODE = unique_code() + def __init__(self): + pass + + at requires_stack(LIST_TYP, INT_TYP) # TODO VAL_TYP) + at leaves_on_stack(LIST_TYP) +class AppendList(ByteCode): + BYTE_CODE = unique_code() + def __init__(self): + pass + # remove comment one by one! @@ -176,33 +199,6 @@ # pass # -#@requires_stack(LIST_TYP, INT_TYP, INT_TYP) # TODO VAL_TYP -#class InsertList(ByteCode): -# BYTE_CODE = unique_code() -# @requires_param(INT_TYP) -# def __init__(self, index): -# self.index = index -# def encode(self, ctx): -# ctx.append_byte(self.BYTE_CODE) -# ctx.append_int(self.index) -# -#@requires_stack(LIST_TYP, INT_TYP) -#@leaves_on_stack(LIST_TYP) -#class DelList(ByteCode): -# BYTE_CODE = unique_code() -# @requires_param(INT_TYP) -# def __init__(self, index): -# self.index = index -# def encode(self, ctx): -# ctx.append_byte(self.BYTE_CODE) -# ctx.append_int(self.index) -# -#@requires_stack(LIST_TYP, INT_TYP, INT_TYP) # TODO VAL_TYP) -#class AppendList(ByteCode): -# BYTE_CODE = unique_code() -# def __init__(self): -# pass -# #@requires_stack(LIST_TYP) #@leaves_on_stack(LIST_TYP, INT_TYP) #class LenList(ByteCode): diff --git a/rpython/jit/backend/llsupport/tl/interp.py b/rpython/jit/backend/llsupport/tl/interp.py --- a/rpython/jit/backend/llsupport/tl/interp.py +++ b/rpython/jit/backend/llsupport/tl/interp.py @@ -84,6 +84,21 @@ size = runpack('h', bytecode[i+1:i+3]) stack.append(space.wrap([None] * size)) i += 2 + elif opcode == code.AppendList.BYTE_CODE: + w_val = stack.pop() + w_lst = stack.peek(0) + w_lst.items.append(w_val) + elif opcode == code.InsertList.BYTE_CODE: + w_val = stack.pop() + w_idx = stack.pop() + w_lst = stack.peek(0) + w_lst.items[w_idx.value] = w_val + # index error, just crash here! + elif opcode == code.DelList.BYTE_CODE: + w_idx = stack.pop() + w_lst = stack.peek(0) + del w_lst.items[w_idx.value] + # index error, just crash the machine!! else: raise NotImplementedError return i + 1 diff --git a/rpython/jit/backend/llsupport/tl/stack.py b/rpython/jit/backend/llsupport/tl/stack.py --- a/rpython/jit/backend/llsupport/tl/stack.py +++ b/rpython/jit/backend/llsupport/tl/stack.py @@ -17,6 +17,12 @@ self.stack[self.stackpos] = elem self.stackpos += 1 + def peek(self, i): + stackpos = self.stackpos - i - 1 + if stackpos < 0: + raise IndexError + return self.stack[stackpos] + def pop(self): stackpos = self.stackpos - 1 if stackpos < 0: diff --git a/rpython/jit/backend/llsupport/tl/test/code_strategies.py b/rpython/jit/backend/llsupport/tl/test/code_strategies.py --- a/rpython/jit/backend/llsupport/tl/test/code_strategies.py +++ b/rpython/jit/backend/llsupport/tl/test/code_strategies.py @@ -1,15 +1,18 @@ from hypothesis import strategies as st +from hypothesis.control import assume from hypothesis.strategies import defines_strategy, composite from rpython.jit.backend.llsupport.tl import code, interp, stack from rpython.jit.backend.llsupport.tl.code import (all_types, INT_TYP, STR_TYP, LIST_TYP, SHORT_TYP, BYTE_TYP, - COND_TYP) + COND_TYP, IDX_TYP) from hypothesis.searchstrategy.strategies import OneOfStrategy from hypothesis.searchstrategy.collections import TupleStrategy def get_strategy_for(typ): if typ == INT_TYP: return st.integers(min_value=-2**31, max_value=2**31-1) + elif typ == IDX_TYP: + return st.integers(min_value=-2**31, max_value=2**31-1) elif typ == SHORT_TYP: return st.integers(min_value=-2**15, max_value=2**15-1) elif typ == BYTE_TYP: @@ -23,21 +26,23 @@ else: raise NotImplementedError("type: " + str(typ)) - at defines_strategy -def wrapped_tl_objects(self, types=all_types): - if len(types) == 1: - return get_strategy_for(types[0]) - return OneOfStrategy([get_strategy_for(t) for t in types]) - STD_SPACE = interp.Space() @composite def runtime_stack(draw, clazz): strats = [get_strategy_for(t) for t in clazz._stack_types] - st = stack.Stack(len(strats)) - for strat in strats: - st.append(STD_SPACE.wrap(draw(strat))) - return st + stack_obj = stack.Stack(len(strats)) + for i,strat in enumerate(strats): + if clazz._stack_types[i] == IDX_TYP: + # it is only valid to access a list with a valid index! + w_list = stack_obj.peek(i-1) + l = len(w_list.items) + assume(l > 0) + integrals = st.integers(min_value=0, max_value=l-1) + stack_obj.append(STD_SPACE.wrap(draw(integrals))) + continue + stack_obj.append(STD_SPACE.wrap(draw(strat))) + return stack_obj def byte_code_classes(): for name, clazz in code.__dict__.items(): @@ -45,9 +50,9 @@ yield clazz @composite -def single_bytecode(draw, clazzes=st.sampled_from(byte_code_classes()), - integrals=st.integers(), - texts=st.text()): +def single_bytecode(draw, + clazzes=st.sampled_from(byte_code_classes()), + integrals=st.integers(), texts=st.text()): clazz = draw(clazzes) inst = clazz.create_from(draw, get_strategy_for) bytecode, consts = code.Context().transform([inst]) From pypy.commits at gmail.com Tue Mar 1 02:35:30 2016 From: pypy.commits at gmail.com (plan_rich) Date: Mon, 29 Feb 2016 23:35:30 -0800 (PST) Subject: [pypy-commit] pypy memop-simplify3: merged default Message-ID: <56d54642.4c181c0a.f23b9.ffffc413@mx.google.com> Author: Richard Plangger Branch: memop-simplify3 Changeset: r82622:6593ec12ff85 Date: 2016-03-01 08:16 +0100 http://bitbucket.org/pypy/pypy/changeset/6593ec12ff85/ Log: merged default diff too long, truncating to 2000 out of 25290 lines diff --git a/.hgignore b/.hgignore --- a/.hgignore +++ b/.hgignore @@ -22,6 +22,7 @@ ^pypy/module/cpyext/test/.+\.obj$ ^pypy/module/cpyext/test/.+\.manifest$ ^pypy/module/test_lib_pypy/ctypes_tests/.+\.o$ +^pypy/module/test_lib_pypy/ctypes_tests/_ctypes_test\.o$ ^pypy/module/cppyy/src/.+\.o$ ^pypy/module/cppyy/bench/.+\.so$ ^pypy/module/cppyy/bench/.+\.root$ @@ -35,7 +36,6 @@ ^pypy/module/test_lib_pypy/cffi_tests/__pycache__.+$ ^pypy/doc/.+\.html$ ^pypy/doc/config/.+\.rst$ -^pypy/doc/basicblock\.asc$ ^pypy/doc/.+\.svninfo$ ^rpython/translator/c/src/libffi_msvc/.+\.obj$ ^rpython/translator/c/src/libffi_msvc/.+\.dll$ @@ -45,52 +45,33 @@ ^rpython/translator/c/src/cjkcodecs/.+\.obj$ ^rpython/translator/c/src/stacklet/.+\.o$ ^rpython/translator/c/src/.+\.o$ -^rpython/translator/jvm/\.project$ -^rpython/translator/jvm/\.classpath$ -^rpython/translator/jvm/eclipse-bin$ -^rpython/translator/jvm/src/pypy/.+\.class$ -^rpython/translator/benchmark/docutils$ -^rpython/translator/benchmark/templess$ -^rpython/translator/benchmark/gadfly$ -^rpython/translator/benchmark/mako$ -^rpython/translator/benchmark/bench-custom\.benchmark_result$ -^rpython/translator/benchmark/shootout_benchmarks$ +^rpython/translator/llvm/.+\.so$ ^rpython/translator/goal/target.+-c$ ^rpython/translator/goal/.+\.exe$ ^rpython/translator/goal/.+\.dll$ ^pypy/goal/pypy-translation-snapshot$ ^pypy/goal/pypy-c -^pypy/goal/pypy-jvm -^pypy/goal/pypy-jvm.jar ^pypy/goal/.+\.exe$ ^pypy/goal/.+\.dll$ ^pypy/goal/.+\.lib$ ^pypy/_cache$ -^pypy/doc/statistic/.+\.html$ -^pypy/doc/statistic/.+\.eps$ -^pypy/doc/statistic/.+\.pdf$ -^rpython/translator/cli/src/pypylib\.dll$ -^rpython/translator/cli/src/query\.exe$ -^rpython/translator/cli/src/main\.exe$ +^lib-python/2.7/lib2to3/.+\.pickle$ ^lib_pypy/__pycache__$ ^lib_pypy/ctypes_config_cache/_.+_cache\.py$ ^lib_pypy/ctypes_config_cache/_.+_.+_\.py$ -^rpython/translator/cli/query-descriptions$ +^lib_pypy/_libmpdec/.+.o$ ^pypy/doc/discussion/.+\.html$ ^include/.+\.h$ ^include/.+\.inl$ ^pypy/doc/_build/.*$ ^pypy/doc/config/.+\.html$ ^pypy/doc/config/style\.css$ -^pypy/doc/jit/.+\.html$ -^pypy/doc/jit/style\.css$ ^pypy/doc/image/lattice1\.png$ ^pypy/doc/image/lattice2\.png$ ^pypy/doc/image/lattice3\.png$ ^pypy/doc/image/stackless_informal\.png$ ^pypy/doc/image/parsing_example.+\.png$ ^rpython/doc/_build/.*$ -^pypy/module/test_lib_pypy/ctypes_tests/_ctypes_test\.o$ ^compiled ^.git/ ^release/ diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -41,29 +41,29 @@ Amaury Forgeot d'Arc Antonio Cuni Samuele Pedroni + Matti Picus Alex Gaynor Brian Kearns - Matti Picus Philip Jenvey Michael Hudson + Ronan Lamy David Schneider + Manuel Jacob Holger Krekel Christian Tismer Hakan Ardo - Manuel Jacob - Ronan Lamy Benjamin Peterson + Richard Plangger Anders Chrigstrom Eric van Riet Paap Wim Lavrijsen - Richard Plangger Richard Emslie Alexander Schremmer Dan Villiom Podlaski Christiansen + Remi Meier Lukas Diekmann Sven Hager Anders Lehmann - Remi Meier Aurelien Campeas Niklaus Haldimann Camillo Bruni @@ -72,8 +72,8 @@ Romain Guillebert Leonardo Santagada Seo Sanghyeon + Ronny Pfannschmidt Justin Peel - Ronny Pfannschmidt David Edelsohn Anders Hammarquist Jakub Gustak @@ -95,6 +95,7 @@ Tyler Wade Michael Foord Stephan Diehl + Vincent Legoll Stefan Schwarzer Valentino Volonghi Tomek Meka @@ -105,9 +106,9 @@ Jean-Paul Calderone Timo Paulssen Squeaky + Marius Gedminas Alexandre Fayolle Simon Burton - Marius Gedminas Martin Matusiak Konstantin Lopuhin Wenzhu Man @@ -116,16 +117,20 @@ Ivan Sichmann Freitas Greg Price Dario Bertini + Stefano Rivera Mark Pearse Simon Cross Andreas Stührk - Stefano Rivera + Edd Barrett Jean-Philippe St. Pierre Guido van Rossum Pavel Vinogradov + Jeremy Thurgood Paweł Piotr Przeradowski + Spenser Bauman Paul deGrandis Ilya Osadchiy + marky1991 Tobias Oberstein Adrian Kuhn Boris Feigin @@ -134,14 +139,12 @@ Georg Brandl Bert Freudenberg Stian Andreassen - Edd Barrett + Tobias Pape Wanja Saatkamp Gerald Klix Mike Blume - Tobias Pape Oscar Nierstrasz Stefan H. Muller - Jeremy Thurgood Rami Chowdhury Eugene Oden Henry Mason @@ -153,6 +156,8 @@ Lukas Renggli Guenter Jantzen Ned Batchelder + Tim Felgentreff + Anton Gulenko Amit Regmi Ben Young Nicolas Chauvat @@ -162,12 +167,12 @@ Nicholas Riley Jason Chu Igor Trindade Oliveira - Tim Felgentreff + Yichao Yu Rocco Moretti Gintautas Miliauskas Michael Twomey Lucian Branescu Mihaila - Yichao Yu + Devin Jeanpierre Gabriel Lavoie Olivier Dormond Jared Grubb @@ -191,33 +196,33 @@ Stanislaw Halik Mikael Schönenberg Berkin Ilbeyi - Elmo M?ntynen + Elmo Mäntynen + Faye Zhao Jonathan David Riehl Anders Qvist Corbin Simpson Chirag Jadwani Beatrice During Alex Perry - Vincent Legoll + Vaibhav Sood Alan McIntyre - Spenser Bauman + William Leslie Alexander Sedov Attila Gobi + Jasper.Schulz Christopher Pope - Devin Jeanpierre - Vaibhav Sood Christian Tismer Marc Abramowitz Dan Stromberg Arjun Naik Valentina Mukhamedzhanova Stefano Parmesan + Mark Young Alexis Daboville Jens-Uwe Mager Carl Meyer Karl Ramm Pieter Zieschang - Anton Gulenko Gabriel Lukas Vacek Andrew Dalke @@ -225,6 +230,7 @@ Jakub Stasiak Nathan Taylor Vladimir Kryachko + Omer Katz Jacek Generowicz Alejandro J. Cura Jacob Oscarson @@ -239,6 +245,7 @@ Lars Wassermann Philipp Rustemeuer Henrik Vendelbo + Richard Lancaster Dan Buch Miguel de Val Borro Artur Lisiecki @@ -250,18 +257,18 @@ Tomo Cocoa Kim Jin Su Toni Mattis + Amber Brown Lucas Stadler Julian Berman Markus Holtermann roberto at goyle Yury V. Zaytsev Anna Katrina Dominguez - William Leslie Bobby Impollonia - Faye Zhao timo at eistee.fritz.box Andrew Thompson Yusei Tahara + Aaron Tubbs Ben Darnell Roberto De Ioris Juan Francisco Cantero Hurtado @@ -273,6 +280,7 @@ Christopher Armstrong Michael Hudson-Doyle Anders Sigfridsson + Nikolay Zinov Yasir Suhail Jason Michalski rafalgalczynski at gmail.com @@ -282,6 +290,7 @@ Gustavo Niemeyer Stephan Busemann Rafał Gałczyński + Matt Bogosian Christian Muirhead Berker Peksag James Lan @@ -316,9 +325,9 @@ Stefan Marr jiaaro Mads Kiilerich - Richard Lancaster opassembler.py Antony Lee + Jason Madden Yaroslav Fedevych Jim Hunziker Markus Unterwaditzer @@ -327,6 +336,7 @@ squeaky Zearin soareschen + Jonas Pfannschmidt Kurt Griffiths Mike Bayer Matthew Miller diff --git a/Makefile b/Makefile --- a/Makefile +++ b/Makefile @@ -39,5 +39,5 @@ # runs. We cannot get their original value either: # http://lists.gnu.org/archive/html/help-make/2010-08/msg00106.html -cffi_imports: +cffi_imports: pypy-c PYTHONPATH=. ./pypy-c pypy/tool/build_cffi_imports.py diff --git a/dotviewer/drawgraph.py b/dotviewer/drawgraph.py --- a/dotviewer/drawgraph.py +++ b/dotviewer/drawgraph.py @@ -14,12 +14,661 @@ FONT = os.path.join(this_dir, 'font', 'DroidSans.ttf') FIXEDFONT = os.path.join(this_dir, 'font', 'DroidSansMono.ttf') COLOR = { - 'black': (0,0,0), - 'white': (255,255,255), - 'red': (255,0,0), - 'green': (0,255,0), - 'blue': (0,0,255), - 'yellow': (255,255,0), + 'aliceblue': (240, 248, 255), + 'antiquewhite': (250, 235, 215), + 'antiquewhite1': (255, 239, 219), + 'antiquewhite2': (238, 223, 204), + 'antiquewhite3': (205, 192, 176), + 'antiquewhite4': (139, 131, 120), + 'aquamarine': (127, 255, 212), + 'aquamarine1': (127, 255, 212), + 'aquamarine2': (118, 238, 198), + 'aquamarine3': (102, 205, 170), + 'aquamarine4': (69, 139, 116), + 'azure': (240, 255, 255), + 'azure1': (240, 255, 255), + 'azure2': (224, 238, 238), + 'azure3': (193, 205, 205), + 'azure4': (131, 139, 139), + 'beige': (245, 245, 220), + 'bisque': (255, 228, 196), + 'bisque1': (255, 228, 196), + 'bisque2': (238, 213, 183), + 'bisque3': (205, 183, 158), + 'bisque4': (139, 125, 107), + 'black': (0, 0, 0), + 'blanchedalmond': (255, 235, 205), + 'blue': (0, 0, 255), + 'blue1': (0, 0, 255), + 'blue2': (0, 0, 238), + 'blue3': (0, 0, 205), + 'blue4': (0, 0, 139), + 'blueviolet': (138, 43, 226), + 'brown': (165, 42, 42), + 'brown1': (255, 64, 64), + 'brown2': (238, 59, 59), + 'brown3': (205, 51, 51), + 'brown4': (139, 35, 35), + 'burlywood': (222, 184, 135), + 'burlywood1': (255, 211, 155), + 'burlywood2': (238, 197, 145), + 'burlywood3': (205, 170, 125), + 'burlywood4': (139, 115, 85), + 'cadetblue': (95, 158, 160), + 'cadetblue1': (152, 245, 255), + 'cadetblue2': (142, 229, 238), + 'cadetblue3': (122, 197, 205), + 'cadetblue4': (83, 134, 139), + 'chartreuse': (127, 255, 0), + 'chartreuse1': (127, 255, 0), + 'chartreuse2': (118, 238, 0), + 'chartreuse3': (102, 205, 0), + 'chartreuse4': (69, 139, 0), + 'chocolate': (210, 105, 30), + 'chocolate1': (255, 127, 36), + 'chocolate2': (238, 118, 33), + 'chocolate3': (205, 102, 29), + 'chocolate4': (139, 69, 19), + 'coral': (255, 127, 80), + 'coral1': (255, 114, 86), + 'coral2': (238, 106, 80), + 'coral3': (205, 91, 69), + 'coral4': (139, 62, 47), + 'cornflowerblue': (100, 149, 237), + 'cornsilk': (255, 248, 220), + 'cornsilk1': (255, 248, 220), + 'cornsilk2': (238, 232, 205), + 'cornsilk3': (205, 200, 177), + 'cornsilk4': (139, 136, 120), + 'crimson': (220, 20, 60), + 'cyan': (0, 255, 255), + 'cyan1': (0, 255, 255), + 'cyan2': (0, 238, 238), + 'cyan3': (0, 205, 205), + 'cyan4': (0, 139, 139), + 'darkgoldenrod': (184, 134, 11), + 'darkgoldenrod1': (255, 185, 15), + 'darkgoldenrod2': (238, 173, 14), + 'darkgoldenrod3': (205, 149, 12), + 'darkgoldenrod4': (139, 101, 8), + 'darkgreen': (0, 100, 0), + 'darkkhaki': (189, 183, 107), + 'darkolivegreen': (85, 107, 47), + 'darkolivegreen1': (202, 255, 112), + 'darkolivegreen2': (188, 238, 104), + 'darkolivegreen3': (162, 205, 90), + 'darkolivegreen4': (110, 139, 61), + 'darkorange': (255, 140, 0), + 'darkorange1': (255, 127, 0), + 'darkorange2': (238, 118, 0), + 'darkorange3': (205, 102, 0), + 'darkorange4': (139, 69, 0), + 'darkorchid': (153, 50, 204), + 'darkorchid1': (191, 62, 255), + 'darkorchid2': (178, 58, 238), + 'darkorchid3': (154, 50, 205), + 'darkorchid4': (104, 34, 139), + 'darksalmon': (233, 150, 122), + 'darkseagreen': (143, 188, 143), + 'darkseagreen1': (193, 255, 193), + 'darkseagreen2': (180, 238, 180), + 'darkseagreen3': (155, 205, 155), + 'darkseagreen4': (105, 139, 105), + 'darkslateblue': (72, 61, 139), + 'darkslategray': (47, 79, 79), + 'darkslategray1': (151, 255, 255), + 'darkslategray2': (141, 238, 238), + 'darkslategray3': (121, 205, 205), + 'darkslategray4': (82, 139, 139), + 'darkslategrey': (47, 79, 79), + 'darkturquoise': (0, 206, 209), + 'darkviolet': (148, 0, 211), + 'deeppink': (255, 20, 147), + 'deeppink1': (255, 20, 147), + 'deeppink2': (238, 18, 137), + 'deeppink3': (205, 16, 118), + 'deeppink4': (139, 10, 80), + 'deepskyblue': (0, 191, 255), + 'deepskyblue1': (0, 191, 255), + 'deepskyblue2': (0, 178, 238), + 'deepskyblue3': (0, 154, 205), + 'deepskyblue4': (0, 104, 139), + 'dimgray': (105, 105, 105), + 'dimgrey': (105, 105, 105), + 'dodgerblue': (30, 144, 255), + 'dodgerblue1': (30, 144, 255), + 'dodgerblue2': (28, 134, 238), + 'dodgerblue3': (24, 116, 205), + 'dodgerblue4': (16, 78, 139), + 'firebrick': (178, 34, 34), + 'firebrick1': (255, 48, 48), + 'firebrick2': (238, 44, 44), + 'firebrick3': (205, 38, 38), + 'firebrick4': (139, 26, 26), + 'floralwhite': (255, 250, 240), + 'forestgreen': (34, 139, 34), + 'gainsboro': (220, 220, 220), + 'ghostwhite': (248, 248, 255), + 'gold': (255, 215, 0), + 'gold1': (255, 215, 0), + 'gold2': (238, 201, 0), + 'gold3': (205, 173, 0), + 'gold4': (139, 117, 0), + 'goldenrod': (218, 165, 32), + 'goldenrod1': (255, 193, 37), + 'goldenrod2': (238, 180, 34), + 'goldenrod3': (205, 155, 29), + 'goldenrod4': (139, 105, 20), + 'gray': (192, 192, 192), + 'gray0': (0, 0, 0), + 'gray1': (3, 3, 3), + 'gray10': (26, 26, 26), + 'gray100': (255, 255, 255), + 'gray11': (28, 28, 28), + 'gray12': (31, 31, 31), + 'gray13': (33, 33, 33), + 'gray14': (36, 36, 36), + 'gray15': (38, 38, 38), + 'gray16': (41, 41, 41), + 'gray17': (43, 43, 43), + 'gray18': (46, 46, 46), + 'gray19': (48, 48, 48), + 'gray2': (5, 5, 5), + 'gray20': (51, 51, 51), + 'gray21': (54, 54, 54), + 'gray22': (56, 56, 56), + 'gray23': (59, 59, 59), + 'gray24': (61, 61, 61), + 'gray25': (64, 64, 64), + 'gray26': (66, 66, 66), + 'gray27': (69, 69, 69), + 'gray28': (71, 71, 71), + 'gray29': (74, 74, 74), + 'gray3': (8, 8, 8), + 'gray30': (77, 77, 77), + 'gray31': (79, 79, 79), + 'gray32': (82, 82, 82), + 'gray33': (84, 84, 84), + 'gray34': (87, 87, 87), + 'gray35': (89, 89, 89), + 'gray36': (92, 92, 92), + 'gray37': (94, 94, 94), + 'gray38': (97, 97, 97), + 'gray39': (99, 99, 99), + 'gray4': (10, 10, 10), + 'gray40': (102, 102, 102), + 'gray41': (105, 105, 105), + 'gray42': (107, 107, 107), + 'gray43': (110, 110, 110), + 'gray44': (112, 112, 112), + 'gray45': (115, 115, 115), + 'gray46': (117, 117, 117), + 'gray47': (120, 120, 120), + 'gray48': (122, 122, 122), + 'gray49': (125, 125, 125), + 'gray5': (13, 13, 13), + 'gray50': (127, 127, 127), + 'gray51': (130, 130, 130), + 'gray52': (133, 133, 133), + 'gray53': (135, 135, 135), + 'gray54': (138, 138, 138), + 'gray55': (140, 140, 140), + 'gray56': (143, 143, 143), + 'gray57': (145, 145, 145), + 'gray58': (148, 148, 148), + 'gray59': (150, 150, 150), + 'gray6': (15, 15, 15), + 'gray60': (153, 153, 153), + 'gray61': (156, 156, 156), + 'gray62': (158, 158, 158), + 'gray63': (161, 161, 161), + 'gray64': (163, 163, 163), + 'gray65': (166, 166, 166), + 'gray66': (168, 168, 168), + 'gray67': (171, 171, 171), + 'gray68': (173, 173, 173), + 'gray69': (176, 176, 176), + 'gray7': (18, 18, 18), + 'gray70': (179, 179, 179), + 'gray71': (181, 181, 181), + 'gray72': (184, 184, 184), + 'gray73': (186, 186, 186), + 'gray74': (189, 189, 189), + 'gray75': (191, 191, 191), + 'gray76': (194, 194, 194), + 'gray77': (196, 196, 196), + 'gray78': (199, 199, 199), + 'gray79': (201, 201, 201), + 'gray8': (20, 20, 20), + 'gray80': (204, 204, 204), + 'gray81': (207, 207, 207), + 'gray82': (209, 209, 209), + 'gray83': (212, 212, 212), + 'gray84': (214, 214, 214), + 'gray85': (217, 217, 217), + 'gray86': (219, 219, 219), + 'gray87': (222, 222, 222), + 'gray88': (224, 224, 224), + 'gray89': (227, 227, 227), + 'gray9': (23, 23, 23), + 'gray90': (229, 229, 229), + 'gray91': (232, 232, 232), + 'gray92': (235, 235, 235), + 'gray93': (237, 237, 237), + 'gray94': (240, 240, 240), + 'gray95': (242, 242, 242), + 'gray96': (245, 245, 245), + 'gray97': (247, 247, 247), + 'gray98': (250, 250, 250), + 'gray99': (252, 252, 252), + 'green': (0, 255, 0), + 'green1': (0, 255, 0), + 'green2': (0, 238, 0), + 'green3': (0, 205, 0), + 'green4': (0, 139, 0), + 'greenyellow': (173, 255, 47), + 'grey': (192, 192, 192), + 'grey0': (0, 0, 0), + 'grey1': (3, 3, 3), + 'grey10': (26, 26, 26), + 'grey100': (255, 255, 255), + 'grey11': (28, 28, 28), + 'grey12': (31, 31, 31), + 'grey13': (33, 33, 33), + 'grey14': (36, 36, 36), + 'grey15': (38, 38, 38), + 'grey16': (41, 41, 41), + 'grey17': (43, 43, 43), + 'grey18': (46, 46, 46), + 'grey19': (48, 48, 48), + 'grey2': (5, 5, 5), + 'grey20': (51, 51, 51), + 'grey21': (54, 54, 54), + 'grey22': (56, 56, 56), + 'grey23': (59, 59, 59), + 'grey24': (61, 61, 61), + 'grey25': (64, 64, 64), + 'grey26': (66, 66, 66), + 'grey27': (69, 69, 69), + 'grey28': (71, 71, 71), + 'grey29': (74, 74, 74), + 'grey3': (8, 8, 8), + 'grey30': (77, 77, 77), + 'grey31': (79, 79, 79), + 'grey32': (82, 82, 82), + 'grey33': (84, 84, 84), + 'grey34': (87, 87, 87), + 'grey35': (89, 89, 89), + 'grey36': (92, 92, 92), + 'grey37': (94, 94, 94), + 'grey38': (97, 97, 97), + 'grey39': (99, 99, 99), + 'grey4': (10, 10, 10), + 'grey40': (102, 102, 102), + 'grey41': (105, 105, 105), + 'grey42': (107, 107, 107), + 'grey43': (110, 110, 110), + 'grey44': (112, 112, 112), + 'grey45': (115, 115, 115), + 'grey46': (117, 117, 117), + 'grey47': (120, 120, 120), + 'grey48': (122, 122, 122), + 'grey49': (125, 125, 125), + 'grey5': (13, 13, 13), + 'grey50': (127, 127, 127), + 'grey51': (130, 130, 130), + 'grey52': (133, 133, 133), + 'grey53': (135, 135, 135), + 'grey54': (138, 138, 138), + 'grey55': (140, 140, 140), + 'grey56': (143, 143, 143), + 'grey57': (145, 145, 145), + 'grey58': (148, 148, 148), + 'grey59': (150, 150, 150), + 'grey6': (15, 15, 15), + 'grey60': (153, 153, 153), + 'grey61': (156, 156, 156), + 'grey62': (158, 158, 158), + 'grey63': (161, 161, 161), + 'grey64': (163, 163, 163), + 'grey65': (166, 166, 166), + 'grey66': (168, 168, 168), + 'grey67': (171, 171, 171), + 'grey68': (173, 173, 173), + 'grey69': (176, 176, 176), + 'grey7': (18, 18, 18), + 'grey70': (179, 179, 179), + 'grey71': (181, 181, 181), + 'grey72': (184, 184, 184), + 'grey73': (186, 186, 186), + 'grey74': (189, 189, 189), + 'grey75': (191, 191, 191), + 'grey76': (194, 194, 194), + 'grey77': (196, 196, 196), + 'grey78': (199, 199, 199), + 'grey79': (201, 201, 201), + 'grey8': (20, 20, 20), + 'grey80': (204, 204, 204), + 'grey81': (207, 207, 207), + 'grey82': (209, 209, 209), + 'grey83': (212, 212, 212), + 'grey84': (214, 214, 214), + 'grey85': (217, 217, 217), + 'grey86': (219, 219, 219), + 'grey87': (222, 222, 222), + 'grey88': (224, 224, 224), + 'grey89': (227, 227, 227), + 'grey9': (23, 23, 23), + 'grey90': (229, 229, 229), + 'grey91': (232, 232, 232), + 'grey92': (235, 235, 235), + 'grey93': (237, 237, 237), + 'grey94': (240, 240, 240), + 'grey95': (242, 242, 242), + 'grey96': (245, 245, 245), + 'grey97': (247, 247, 247), + 'grey98': (250, 250, 250), + 'grey99': (252, 252, 252), + 'honeydew': (240, 255, 240), + 'honeydew1': (240, 255, 240), + 'honeydew2': (224, 238, 224), + 'honeydew3': (193, 205, 193), + 'honeydew4': (131, 139, 131), + 'hotpink': (255, 105, 180), + 'hotpink1': (255, 110, 180), + 'hotpink2': (238, 106, 167), + 'hotpink3': (205, 96, 144), + 'hotpink4': (139, 58, 98), + 'indianred': (205, 92, 92), + 'indianred1': (255, 106, 106), + 'indianred2': (238, 99, 99), + 'indianred3': (205, 85, 85), + 'indianred4': (139, 58, 58), + 'indigo': (75, 0, 130), + 'invis': (255, 255, 254), + 'ivory': (255, 255, 240), + 'ivory1': (255, 255, 240), + 'ivory2': (238, 238, 224), + 'ivory3': (205, 205, 193), + 'ivory4': (139, 139, 131), + 'khaki': (240, 230, 140), + 'khaki1': (255, 246, 143), + 'khaki2': (238, 230, 133), + 'khaki3': (205, 198, 115), + 'khaki4': (139, 134, 78), + 'lavender': (230, 230, 250), + 'lavenderblush': (255, 240, 245), + 'lavenderblush1': (255, 240, 245), + 'lavenderblush2': (238, 224, 229), + 'lavenderblush3': (205, 193, 197), + 'lavenderblush4': (139, 131, 134), + 'lawngreen': (124, 252, 0), + 'lemonchiffon': (255, 250, 205), + 'lemonchiffon1': (255, 250, 205), + 'lemonchiffon2': (238, 233, 191), + 'lemonchiffon3': (205, 201, 165), + 'lemonchiffon4': (139, 137, 112), + 'lightblue': (173, 216, 230), + 'lightblue1': (191, 239, 255), + 'lightblue2': (178, 223, 238), + 'lightblue3': (154, 192, 205), + 'lightblue4': (104, 131, 139), + 'lightcoral': (240, 128, 128), + 'lightcyan': (224, 255, 255), + 'lightcyan1': (224, 255, 255), + 'lightcyan2': (209, 238, 238), + 'lightcyan3': (180, 205, 205), + 'lightcyan4': (122, 139, 139), + 'lightgoldenrod': (238, 221, 130), + 'lightgoldenrod1': (255, 236, 139), + 'lightgoldenrod2': (238, 220, 130), + 'lightgoldenrod3': (205, 190, 112), + 'lightgoldenrod4': (139, 129, 76), + 'lightgoldenrodyellow': (250, 250, 210), + 'lightgray': (211, 211, 211), + 'lightgrey': (211, 211, 211), + 'lightpink': (255, 182, 193), + 'lightpink1': (255, 174, 185), + 'lightpink2': (238, 162, 173), + 'lightpink3': (205, 140, 149), + 'lightpink4': (139, 95, 101), + 'lightsalmon': (255, 160, 122), + 'lightsalmon1': (255, 160, 122), + 'lightsalmon2': (238, 149, 114), + 'lightsalmon3': (205, 129, 98), + 'lightsalmon4': (139, 87, 66), + 'lightseagreen': (32, 178, 170), + 'lightskyblue': (135, 206, 250), + 'lightskyblue1': (176, 226, 255), + 'lightskyblue2': (164, 211, 238), + 'lightskyblue3': (141, 182, 205), + 'lightskyblue4': (96, 123, 139), + 'lightslateblue': (132, 112, 255), + 'lightslategray': (119, 136, 153), + 'lightslategrey': (119, 136, 153), + 'lightsteelblue': (176, 196, 222), + 'lightsteelblue1': (202, 225, 255), + 'lightsteelblue2': (188, 210, 238), + 'lightsteelblue3': (162, 181, 205), + 'lightsteelblue4': (110, 123, 139), + 'lightyellow': (255, 255, 224), + 'lightyellow1': (255, 255, 224), + 'lightyellow2': (238, 238, 209), + 'lightyellow3': (205, 205, 180), + 'lightyellow4': (139, 139, 122), + 'limegreen': (50, 205, 50), + 'linen': (250, 240, 230), + 'magenta': (255, 0, 255), + 'magenta1': (255, 0, 255), + 'magenta2': (238, 0, 238), + 'magenta3': (205, 0, 205), + 'magenta4': (139, 0, 139), + 'maroon': (176, 48, 96), + 'maroon1': (255, 52, 179), + 'maroon2': (238, 48, 167), + 'maroon3': (205, 41, 144), + 'maroon4': (139, 28, 98), + 'mediumaquamarine': (102, 205, 170), + 'mediumblue': (0, 0, 205), + 'mediumorchid': (186, 85, 211), + 'mediumorchid1': (224, 102, 255), + 'mediumorchid2': (209, 95, 238), + 'mediumorchid3': (180, 82, 205), + 'mediumorchid4': (122, 55, 139), + 'mediumpurple': (147, 112, 219), + 'mediumpurple1': (171, 130, 255), + 'mediumpurple2': (159, 121, 238), + 'mediumpurple3': (137, 104, 205), + 'mediumpurple4': (93, 71, 139), + 'mediumseagreen': (60, 179, 113), + 'mediumslateblue': (123, 104, 238), + 'mediumspringgreen': (0, 250, 154), + 'mediumturquoise': (72, 209, 204), + 'mediumvioletred': (199, 21, 133), + 'midnightblue': (25, 25, 112), + 'mintcream': (245, 255, 250), + 'mistyrose': (255, 228, 225), + 'mistyrose1': (255, 228, 225), + 'mistyrose2': (238, 213, 210), + 'mistyrose3': (205, 183, 181), + 'mistyrose4': (139, 125, 123), + 'moccasin': (255, 228, 181), + 'navajowhite': (255, 222, 173), + 'navajowhite1': (255, 222, 173), + 'navajowhite2': (238, 207, 161), + 'navajowhite3': (205, 179, 139), + 'navajowhite4': (139, 121, 94), + 'navy': (0, 0, 128), + 'navyblue': (0, 0, 128), + 'none': (255, 255, 254), + 'oldlace': (253, 245, 230), + 'olivedrab': (107, 142, 35), + 'olivedrab1': (192, 255, 62), + 'olivedrab2': (179, 238, 58), + 'olivedrab3': (154, 205, 50), + 'olivedrab4': (105, 139, 34), + 'orange': (255, 165, 0), + 'orange1': (255, 165, 0), + 'orange2': (238, 154, 0), + 'orange3': (205, 133, 0), + 'orange4': (139, 90, 0), + 'orangered': (255, 69, 0), + 'orangered1': (255, 69, 0), + 'orangered2': (238, 64, 0), + 'orangered3': (205, 55, 0), + 'orangered4': (139, 37, 0), + 'orchid': (218, 112, 214), + 'orchid1': (255, 131, 250), + 'orchid2': (238, 122, 233), + 'orchid3': (205, 105, 201), + 'orchid4': (139, 71, 137), + 'palegoldenrod': (238, 232, 170), + 'palegreen': (152, 251, 152), + 'palegreen1': (154, 255, 154), + 'palegreen2': (144, 238, 144), + 'palegreen3': (124, 205, 124), + 'palegreen4': (84, 139, 84), + 'paleturquoise': (175, 238, 238), + 'paleturquoise1': (187, 255, 255), + 'paleturquoise2': (174, 238, 238), + 'paleturquoise3': (150, 205, 205), + 'paleturquoise4': (102, 139, 139), + 'palevioletred': (219, 112, 147), + 'palevioletred1': (255, 130, 171), + 'palevioletred2': (238, 121, 159), + 'palevioletred3': (205, 104, 137), + 'palevioletred4': (139, 71, 93), + 'papayawhip': (255, 239, 213), + 'peachpuff': (255, 218, 185), + 'peachpuff1': (255, 218, 185), + 'peachpuff2': (238, 203, 173), + 'peachpuff3': (205, 175, 149), + 'peachpuff4': (139, 119, 101), + 'peru': (205, 133, 63), + 'pink': (255, 192, 203), + 'pink1': (255, 181, 197), + 'pink2': (238, 169, 184), + 'pink3': (205, 145, 158), + 'pink4': (139, 99, 108), + 'plum': (221, 160, 221), + 'plum1': (255, 187, 255), + 'plum2': (238, 174, 238), + 'plum3': (205, 150, 205), + 'plum4': (139, 102, 139), + 'powderblue': (176, 224, 230), + 'purple': (160, 32, 240), + 'purple1': (155, 48, 255), + 'purple2': (145, 44, 238), + 'purple3': (125, 38, 205), + 'purple4': (85, 26, 139), + 'red': (255, 0, 0), + 'red1': (255, 0, 0), + 'red2': (238, 0, 0), + 'red3': (205, 0, 0), + 'red4': (139, 0, 0), + 'rosybrown': (188, 143, 143), + 'rosybrown1': (255, 193, 193), + 'rosybrown2': (238, 180, 180), + 'rosybrown3': (205, 155, 155), + 'rosybrown4': (139, 105, 105), + 'royalblue': (65, 105, 225), + 'royalblue1': (72, 118, 255), + 'royalblue2': (67, 110, 238), + 'royalblue3': (58, 95, 205), + 'royalblue4': (39, 64, 139), + 'saddlebrown': (139, 69, 19), + 'salmon': (250, 128, 114), + 'salmon1': (255, 140, 105), + 'salmon2': (238, 130, 98), + 'salmon3': (205, 112, 84), + 'salmon4': (139, 76, 57), + 'sandybrown': (244, 164, 96), + 'seagreen': (46, 139, 87), + 'seagreen1': (84, 255, 159), + 'seagreen2': (78, 238, 148), + 'seagreen3': (67, 205, 128), + 'seagreen4': (46, 139, 87), + 'seashell': (255, 245, 238), + 'seashell1': (255, 245, 238), + 'seashell2': (238, 229, 222), + 'seashell3': (205, 197, 191), + 'seashell4': (139, 134, 130), + 'sienna': (160, 82, 45), + 'sienna1': (255, 130, 71), + 'sienna2': (238, 121, 66), + 'sienna3': (205, 104, 57), + 'sienna4': (139, 71, 38), + 'skyblue': (135, 206, 235), + 'skyblue1': (135, 206, 255), + 'skyblue2': (126, 192, 238), + 'skyblue3': (108, 166, 205), + 'skyblue4': (74, 112, 139), + 'slateblue': (106, 90, 205), + 'slateblue1': (131, 111, 255), + 'slateblue2': (122, 103, 238), + 'slateblue3': (105, 89, 205), + 'slateblue4': (71, 60, 139), + 'slategray': (112, 128, 144), + 'slategray1': (198, 226, 255), + 'slategray2': (185, 211, 238), + 'slategray3': (159, 182, 205), + 'slategray4': (108, 123, 139), + 'slategrey': (112, 128, 144), + 'snow': (255, 250, 250), + 'snow1': (255, 250, 250), + 'snow2': (238, 233, 233), + 'snow3': (205, 201, 201), + 'snow4': (139, 137, 137), + 'springgreen': (0, 255, 127), + 'springgreen1': (0, 255, 127), + 'springgreen2': (0, 238, 118), + 'springgreen3': (0, 205, 102), + 'springgreen4': (0, 139, 69), + 'steelblue': (70, 130, 180), + 'steelblue1': (99, 184, 255), + 'steelblue2': (92, 172, 238), + 'steelblue3': (79, 148, 205), + 'steelblue4': (54, 100, 139), + 'tan': (210, 180, 140), + 'tan1': (255, 165, 79), + 'tan2': (238, 154, 73), + 'tan3': (205, 133, 63), + 'tan4': (139, 90, 43), + 'thistle': (216, 191, 216), + 'thistle1': (255, 225, 255), + 'thistle2': (238, 210, 238), + 'thistle3': (205, 181, 205), + 'thistle4': (139, 123, 139), + 'tomato': (255, 99, 71), + 'tomato1': (255, 99, 71), + 'tomato2': (238, 92, 66), + 'tomato3': (205, 79, 57), + 'tomato4': (139, 54, 38), + 'transparent': (255, 255, 254), + 'turquoise': (64, 224, 208), + 'turquoise1': (0, 245, 255), + 'turquoise2': (0, 229, 238), + 'turquoise3': (0, 197, 205), + 'turquoise4': (0, 134, 139), + 'violet': (238, 130, 238), + 'violetred': (208, 32, 144), + 'violetred1': (255, 62, 150), + 'violetred2': (238, 58, 140), + 'violetred3': (205, 50, 120), + 'violetred4': (139, 34, 82), + 'wheat': (245, 222, 179), + 'wheat1': (255, 231, 186), + 'wheat2': (238, 216, 174), + 'wheat3': (205, 186, 150), + 'wheat4': (139, 126, 102), + 'white': (255, 255, 255), + 'whitesmoke': (245, 245, 245), + 'yellow': (255, 255, 0), + 'yellow1': (255, 255, 0), + 'yellow2': (238, 238, 0), + 'yellow3': (205, 205, 0), + 'yellow4': (139, 139, 0), + 'yellowgreen': (154, 205, 50), } re_nonword=re.compile(r'([^0-9a-zA-Z_.]+)') re_linewidth=re.compile(r'setlinewidth\((\d+(\.\d*)?|\.\d+)\)') diff --git a/lib-python/2.7/distutils/command/build_ext.py b/lib-python/2.7/distutils/command/build_ext.py --- a/lib-python/2.7/distutils/command/build_ext.py +++ b/lib-python/2.7/distutils/command/build_ext.py @@ -188,7 +188,7 @@ # the 'libs' directory is for binary installs - we assume that # must be the *native* platform. But we don't really support # cross-compiling via a binary install anyway, so we let it go. - self.library_dirs.append(os.path.join(sys.exec_prefix, 'include')) + self.library_dirs.append(os.path.join(sys.exec_prefix, 'libs')) if self.debug: self.build_temp = os.path.join(self.build_temp, "Debug") else: @@ -685,13 +685,17 @@ # the previous version of this code did. This should work for # CPython too. The point is that on PyPy with cpyext, the # config var 'SO' is just ".so" but we want to return - # ".pypy-VERSION.so" instead. - so_ext = _get_c_extension_suffix() + # ".pypy-VERSION.so" instead. Note a further tweak for cffi's + # embedding mode: if EXT_SUFFIX is also defined, use that + # directly. + so_ext = get_config_var('EXT_SUFFIX') if so_ext is None: - so_ext = get_config_var('SO') # fall-back - # extensions in debug_mode are named 'module_d.pyd' under windows - if os.name == 'nt' and self.debug: - so_ext = '_d.pyd' + so_ext = _get_c_extension_suffix() + if so_ext is None: + so_ext = get_config_var('SO') # fall-back + # extensions in debug_mode are named 'module_d.pyd' under windows + if os.name == 'nt' and self.debug: + so_ext = '_d.pyd' return os.path.join(*ext_path) + so_ext def get_export_symbols (self, ext): diff --git a/lib-python/2.7/test/capath/0e4015b9.0 b/lib-python/2.7/test/capath/0e4015b9.0 new file mode 100644 --- /dev/null +++ b/lib-python/2.7/test/capath/0e4015b9.0 @@ -0,0 +1,16 @@ +-----BEGIN CERTIFICATE----- +MIIClTCCAf6gAwIBAgIJAKGU95wKR8pTMA0GCSqGSIb3DQEBBQUAMHAxCzAJBgNV +BAYTAlhZMRcwFQYDVQQHDA5DYXN0bGUgQW50aHJheDEjMCEGA1UECgwaUHl0aG9u +IFNvZnR3YXJlIEZvdW5kYXRpb24xIzAhBgNVBAMMGnNlbGYtc2lnbmVkLnB5dGhv +bnRlc3QubmV0MB4XDTE0MTEwMjE4MDkyOVoXDTI0MTAzMDE4MDkyOVowcDELMAkG +A1UEBhMCWFkxFzAVBgNVBAcMDkNhc3RsZSBBbnRocmF4MSMwIQYDVQQKDBpQeXRo +b24gU29mdHdhcmUgRm91bmRhdGlvbjEjMCEGA1UEAwwac2VsZi1zaWduZWQucHl0 +aG9udGVzdC5uZXQwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBANDXQXW9tjyZ +Xt0Iv2tLL1+jinr4wGg36ioLDLFkMf+2Y1GL0v0BnKYG4N1OKlAU15LXGeGer8vm +Sv/yIvmdrELvhAbbo3w4a9TMYQA4XkIVLdvu3mvNOAet+8PMJxn26dbDhG809ALv +EHY57lQsBS3G59RZyBPVqAqmImWNJnVzAgMBAAGjNzA1MCUGA1UdEQQeMByCGnNl +bGYtc2lnbmVkLnB5dGhvbnRlc3QubmV0MAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcN +AQEFBQADgYEAIuzAhgMouJpNdf3URCHIineyoSt6WK/9+eyUcjlKOrDoXNZaD72h +TXMeKYoWvJyVcSLKL8ckPtDobgP2OTt0UkyAaj0n+ZHaqq1lH2yVfGUA1ILJv515 +C8BqbvVZuqm3i7ygmw3bqE/lYMgOrYtXXnqOrz6nvsE6Yc9V9rFflOM= +-----END CERTIFICATE----- diff --git a/lib-python/2.7/test/capath/ce7b8643.0 b/lib-python/2.7/test/capath/ce7b8643.0 new file mode 100644 --- /dev/null +++ b/lib-python/2.7/test/capath/ce7b8643.0 @@ -0,0 +1,16 @@ +-----BEGIN CERTIFICATE----- +MIIClTCCAf6gAwIBAgIJAKGU95wKR8pTMA0GCSqGSIb3DQEBBQUAMHAxCzAJBgNV +BAYTAlhZMRcwFQYDVQQHDA5DYXN0bGUgQW50aHJheDEjMCEGA1UECgwaUHl0aG9u +IFNvZnR3YXJlIEZvdW5kYXRpb24xIzAhBgNVBAMMGnNlbGYtc2lnbmVkLnB5dGhv +bnRlc3QubmV0MB4XDTE0MTEwMjE4MDkyOVoXDTI0MTAzMDE4MDkyOVowcDELMAkG +A1UEBhMCWFkxFzAVBgNVBAcMDkNhc3RsZSBBbnRocmF4MSMwIQYDVQQKDBpQeXRo +b24gU29mdHdhcmUgRm91bmRhdGlvbjEjMCEGA1UEAwwac2VsZi1zaWduZWQucHl0 +aG9udGVzdC5uZXQwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBANDXQXW9tjyZ +Xt0Iv2tLL1+jinr4wGg36ioLDLFkMf+2Y1GL0v0BnKYG4N1OKlAU15LXGeGer8vm +Sv/yIvmdrELvhAbbo3w4a9TMYQA4XkIVLdvu3mvNOAet+8PMJxn26dbDhG809ALv +EHY57lQsBS3G59RZyBPVqAqmImWNJnVzAgMBAAGjNzA1MCUGA1UdEQQeMByCGnNl +bGYtc2lnbmVkLnB5dGhvbnRlc3QubmV0MAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcN +AQEFBQADgYEAIuzAhgMouJpNdf3URCHIineyoSt6WK/9+eyUcjlKOrDoXNZaD72h +TXMeKYoWvJyVcSLKL8ckPtDobgP2OTt0UkyAaj0n+ZHaqq1lH2yVfGUA1ILJv515 +C8BqbvVZuqm3i7ygmw3bqE/lYMgOrYtXXnqOrz6nvsE6Yc9V9rFflOM= +-----END CERTIFICATE----- diff --git a/lib-python/2.7/test/https_svn_python_org_root.pem b/lib-python/2.7/test/https_svn_python_org_root.pem deleted file mode 100644 --- a/lib-python/2.7/test/https_svn_python_org_root.pem +++ /dev/null @@ -1,41 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIHPTCCBSWgAwIBAgIBADANBgkqhkiG9w0BAQQFADB5MRAwDgYDVQQKEwdSb290 -IENBMR4wHAYDVQQLExVodHRwOi8vd3d3LmNhY2VydC5vcmcxIjAgBgNVBAMTGUNB -IENlcnQgU2lnbmluZyBBdXRob3JpdHkxITAfBgkqhkiG9w0BCQEWEnN1cHBvcnRA -Y2FjZXJ0Lm9yZzAeFw0wMzAzMzAxMjI5NDlaFw0zMzAzMjkxMjI5NDlaMHkxEDAO -BgNVBAoTB1Jvb3QgQ0ExHjAcBgNVBAsTFWh0dHA6Ly93d3cuY2FjZXJ0Lm9yZzEi -MCAGA1UEAxMZQ0EgQ2VydCBTaWduaW5nIEF1dGhvcml0eTEhMB8GCSqGSIb3DQEJ -ARYSc3VwcG9ydEBjYWNlcnQub3JnMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIIC -CgKCAgEAziLA4kZ97DYoB1CW8qAzQIxL8TtmPzHlawI229Z89vGIj053NgVBlfkJ -8BLPRoZzYLdufujAWGSuzbCtRRcMY/pnCujW0r8+55jE8Ez64AO7NV1sId6eINm6 -zWYyN3L69wj1x81YyY7nDl7qPv4coRQKFWyGhFtkZip6qUtTefWIonvuLwphK42y -fk1WpRPs6tqSnqxEQR5YYGUFZvjARL3LlPdCfgv3ZWiYUQXw8wWRBB0bF4LsyFe7 -w2t6iPGwcswlWyCR7BYCEo8y6RcYSNDHBS4CMEK4JZwFaz+qOqfrU0j36NK2B5jc -G8Y0f3/JHIJ6BVgrCFvzOKKrF11myZjXnhCLotLddJr3cQxyYN/Nb5gznZY0dj4k -epKwDpUeb+agRThHqtdB7Uq3EvbXG4OKDy7YCbZZ16oE/9KTfWgu3YtLq1i6L43q -laegw1SJpfvbi1EinbLDvhG+LJGGi5Z4rSDTii8aP8bQUWWHIbEZAWV/RRyH9XzQ -QUxPKZgh/TMfdQwEUfoZd9vUFBzugcMd9Zi3aQaRIt0AUMyBMawSB3s42mhb5ivU -fslfrejrckzzAeVLIL+aplfKkQABi6F1ITe1Yw1nPkZPcCBnzsXWWdsC4PDSy826 -YreQQejdIOQpvGQpQsgi3Hia/0PsmBsJUUtaWsJx8cTLc6nloQsCAwEAAaOCAc4w -ggHKMB0GA1UdDgQWBBQWtTIb1Mfz4OaO873SsDrusjkY0TCBowYDVR0jBIGbMIGY -gBQWtTIb1Mfz4OaO873SsDrusjkY0aF9pHsweTEQMA4GA1UEChMHUm9vdCBDQTEe -MBwGA1UECxMVaHR0cDovL3d3dy5jYWNlcnQub3JnMSIwIAYDVQQDExlDQSBDZXJ0 -IFNpZ25pbmcgQXV0aG9yaXR5MSEwHwYJKoZIhvcNAQkBFhJzdXBwb3J0QGNhY2Vy -dC5vcmeCAQAwDwYDVR0TAQH/BAUwAwEB/zAyBgNVHR8EKzApMCegJaAjhiFodHRw -czovL3d3dy5jYWNlcnQub3JnL3Jldm9rZS5jcmwwMAYJYIZIAYb4QgEEBCMWIWh0 -dHBzOi8vd3d3LmNhY2VydC5vcmcvcmV2b2tlLmNybDA0BglghkgBhvhCAQgEJxYl -aHR0cDovL3d3dy5jYWNlcnQub3JnL2luZGV4LnBocD9pZD0xMDBWBglghkgBhvhC -AQ0ESRZHVG8gZ2V0IHlvdXIgb3duIGNlcnRpZmljYXRlIGZvciBGUkVFIGhlYWQg -b3ZlciB0byBodHRwOi8vd3d3LmNhY2VydC5vcmcwDQYJKoZIhvcNAQEEBQADggIB -ACjH7pyCArpcgBLKNQodgW+JapnM8mgPf6fhjViVPr3yBsOQWqy1YPaZQwGjiHCc -nWKdpIevZ1gNMDY75q1I08t0AoZxPuIrA2jxNGJARjtT6ij0rPtmlVOKTV39O9lg -18p5aTuxZZKmxoGCXJzN600BiqXfEVWqFcofN8CCmHBh22p8lqOOLlQ+TyGpkO/c -gr/c6EWtTZBzCDyUZbAEmXZ/4rzCahWqlwQ3JNgelE5tDlG+1sSPypZt90Pf6DBl -Jzt7u0NDY8RD97LsaMzhGY4i+5jhe1o+ATc7iwiwovOVThrLm82asduycPAtStvY -sONvRUgzEv/+PDIqVPfE94rwiCPCR/5kenHA0R6mY7AHfqQv0wGP3J8rtsYIqQ+T -SCX8Ev2fQtzzxD72V7DX3WnRBnc0CkvSyqD/HMaMyRa+xMwyN2hzXwj7UfdJUzYF -CpUCTPJ5GhD22Dp1nPMd8aINcGeGG7MW9S/lpOt5hvk9C8JzC6WZrG/8Z7jlLwum -GCSNe9FINSkYQKyTYOGWhlC0elnYjyELn8+CkcY7v2vcB5G5l1YjqrZslMZIBjzk -zk6q5PYvCdxTby78dOs6Y5nCpqyJvKeyRKANihDjbPIky/qbn3BHLt4Ui9SyIAmW -omTxJBzcoTWcFbLUvFUufQb1nA5V9FrWk9p2rSVzTMVD ------END CERTIFICATE----- diff --git a/lib-python/2.7/test/selfsigned_pythontestdotnet.pem b/lib-python/2.7/test/selfsigned_pythontestdotnet.pem --- a/lib-python/2.7/test/selfsigned_pythontestdotnet.pem +++ b/lib-python/2.7/test/selfsigned_pythontestdotnet.pem @@ -1,5 +1,5 @@ -----BEGIN CERTIFICATE----- -MIIChzCCAfCgAwIBAgIJAKGU95wKR8pSMA0GCSqGSIb3DQEBBQUAMHAxCzAJBgNV +MIIClTCCAf6gAwIBAgIJAKGU95wKR8pTMA0GCSqGSIb3DQEBBQUAMHAxCzAJBgNV BAYTAlhZMRcwFQYDVQQHDA5DYXN0bGUgQW50aHJheDEjMCEGA1UECgwaUHl0aG9u IFNvZnR3YXJlIEZvdW5kYXRpb24xIzAhBgNVBAMMGnNlbGYtc2lnbmVkLnB5dGhv bnRlc3QubmV0MB4XDTE0MTEwMjE4MDkyOVoXDTI0MTAzMDE4MDkyOVowcDELMAkG @@ -8,9 +8,9 @@ aG9udGVzdC5uZXQwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBANDXQXW9tjyZ Xt0Iv2tLL1+jinr4wGg36ioLDLFkMf+2Y1GL0v0BnKYG4N1OKlAU15LXGeGer8vm Sv/yIvmdrELvhAbbo3w4a9TMYQA4XkIVLdvu3mvNOAet+8PMJxn26dbDhG809ALv -EHY57lQsBS3G59RZyBPVqAqmImWNJnVzAgMBAAGjKTAnMCUGA1UdEQQeMByCGnNl -bGYtc2lnbmVkLnB5dGhvbnRlc3QubmV0MA0GCSqGSIb3DQEBBQUAA4GBAIOXmdtM -eG9qzP9TiXW/Gc/zI4cBfdCpC+Y4gOfC9bQUC7hefix4iO3+iZjgy3X/FaRxUUoV -HKiXcXIaWqTSUWp45cSh0MbwZXudp6JIAptzdAhvvCrPKeC9i9GvxsPD4LtDAL97 -vSaxQBezA7hdxZd90/EeyMgVZgAnTCnvAWX9 +EHY57lQsBS3G59RZyBPVqAqmImWNJnVzAgMBAAGjNzA1MCUGA1UdEQQeMByCGnNl +bGYtc2lnbmVkLnB5dGhvbnRlc3QubmV0MAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcN +AQEFBQADgYEAIuzAhgMouJpNdf3URCHIineyoSt6WK/9+eyUcjlKOrDoXNZaD72h +TXMeKYoWvJyVcSLKL8ckPtDobgP2OTt0UkyAaj0n+ZHaqq1lH2yVfGUA1ILJv515 +C8BqbvVZuqm3i7ygmw3bqE/lYMgOrYtXXnqOrz6nvsE6Yc9V9rFflOM= -----END CERTIFICATE----- diff --git a/lib-python/2.7/test/test_ssl.py b/lib-python/2.7/test/test_ssl.py --- a/lib-python/2.7/test/test_ssl.py +++ b/lib-python/2.7/test/test_ssl.py @@ -57,7 +57,8 @@ SIGNED_CERTFILE2 = data_file("keycert4.pem") SIGNING_CA = data_file("pycacert.pem") -SVN_PYTHON_ORG_ROOT_CERT = data_file("https_svn_python_org_root.pem") +REMOTE_HOST = "self-signed.pythontest.net" +REMOTE_ROOT_CERT = data_file("selfsigned_pythontestdotnet.pem") EMPTYCERT = data_file("nullcert.pem") BADCERT = data_file("badcert.pem") @@ -244,7 +245,7 @@ self.assertEqual(p['subjectAltName'], san) def test_DER_to_PEM(self): - with open(SVN_PYTHON_ORG_ROOT_CERT, 'r') as f: + with open(CAFILE_CACERT, 'r') as f: pem = f.read() d1 = ssl.PEM_cert_to_DER_cert(pem) p2 = ssl.DER_cert_to_PEM_cert(d1) @@ -792,7 +793,7 @@ # Mismatching key and cert ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1) with self.assertRaisesRegexp(ssl.SSLError, "key values mismatch"): - ctx.load_cert_chain(SVN_PYTHON_ORG_ROOT_CERT, ONLYKEY) + ctx.load_cert_chain(CAFILE_CACERT, ONLYKEY) # Password protected key and cert ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD) ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD.encode()) @@ -1013,7 +1014,7 @@ ctx.load_verify_locations(CERTFILE) self.assertEqual(ctx.cert_store_stats(), {'x509_ca': 0, 'crl': 0, 'x509': 1}) - ctx.load_verify_locations(SVN_PYTHON_ORG_ROOT_CERT) + ctx.load_verify_locations(CAFILE_CACERT) self.assertEqual(ctx.cert_store_stats(), {'x509_ca': 1, 'crl': 0, 'x509': 2}) @@ -1023,8 +1024,8 @@ # CERTFILE is not flagged as X509v3 Basic Constraints: CA:TRUE ctx.load_verify_locations(CERTFILE) self.assertEqual(ctx.get_ca_certs(), []) - # but SVN_PYTHON_ORG_ROOT_CERT is a CA cert - ctx.load_verify_locations(SVN_PYTHON_ORG_ROOT_CERT) + # but CAFILE_CACERT is a CA cert + ctx.load_verify_locations(CAFILE_CACERT) self.assertEqual(ctx.get_ca_certs(), [{'issuer': ((('organizationName', 'Root CA'),), (('organizationalUnitName', 'http://www.cacert.org'),), @@ -1040,7 +1041,7 @@ (('emailAddress', 'support at cacert.org'),)), 'version': 3}]) - with open(SVN_PYTHON_ORG_ROOT_CERT) as f: + with open(CAFILE_CACERT) as f: pem = f.read() der = ssl.PEM_cert_to_DER_cert(pem) self.assertEqual(ctx.get_ca_certs(True), [der]) @@ -1215,11 +1216,11 @@ class NetworkedTests(unittest.TestCase): def test_connect(self): - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): s = ssl.wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_NONE) try: - s.connect(("svn.python.org", 443)) + s.connect((REMOTE_HOST, 443)) self.assertEqual({}, s.getpeercert()) finally: s.close() @@ -1228,27 +1229,27 @@ s = ssl.wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_REQUIRED) self.assertRaisesRegexp(ssl.SSLError, "certificate verify failed", - s.connect, ("svn.python.org", 443)) + s.connect, (REMOTE_HOST, 443)) s.close() # this should succeed because we specify the root cert s = ssl.wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_REQUIRED, - ca_certs=SVN_PYTHON_ORG_ROOT_CERT) + ca_certs=REMOTE_ROOT_CERT) try: - s.connect(("svn.python.org", 443)) + s.connect((REMOTE_HOST, 443)) self.assertTrue(s.getpeercert()) finally: s.close() def test_connect_ex(self): # Issue #11326: check connect_ex() implementation - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): s = ssl.wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_REQUIRED, - ca_certs=SVN_PYTHON_ORG_ROOT_CERT) + ca_certs=REMOTE_ROOT_CERT) try: - self.assertEqual(0, s.connect_ex(("svn.python.org", 443))) + self.assertEqual(0, s.connect_ex((REMOTE_HOST, 443))) self.assertTrue(s.getpeercert()) finally: s.close() @@ -1256,14 +1257,14 @@ def test_non_blocking_connect_ex(self): # Issue #11326: non-blocking connect_ex() should allow handshake # to proceed after the socket gets ready. - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): s = ssl.wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_REQUIRED, - ca_certs=SVN_PYTHON_ORG_ROOT_CERT, + ca_certs=REMOTE_ROOT_CERT, do_handshake_on_connect=False) try: s.setblocking(False) - rc = s.connect_ex(('svn.python.org', 443)) + rc = s.connect_ex((REMOTE_HOST, 443)) # EWOULDBLOCK under Windows, EINPROGRESS elsewhere self.assertIn(rc, (0, errno.EINPROGRESS, errno.EWOULDBLOCK)) # Wait for connect to finish @@ -1285,58 +1286,62 @@ def test_timeout_connect_ex(self): # Issue #12065: on a timeout, connect_ex() should return the original # errno (mimicking the behaviour of non-SSL sockets). - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): s = ssl.wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_REQUIRED, - ca_certs=SVN_PYTHON_ORG_ROOT_CERT, + ca_certs=REMOTE_ROOT_CERT, do_handshake_on_connect=False) try: s.settimeout(0.0000001) - rc = s.connect_ex(('svn.python.org', 443)) + rc = s.connect_ex((REMOTE_HOST, 443)) if rc == 0: - self.skipTest("svn.python.org responded too quickly") + self.skipTest("REMOTE_HOST responded too quickly") self.assertIn(rc, (errno.EAGAIN, errno.EWOULDBLOCK)) finally: s.close() def test_connect_ex_error(self): - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): s = ssl.wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_REQUIRED, - ca_certs=SVN_PYTHON_ORG_ROOT_CERT) + ca_certs=REMOTE_ROOT_CERT) try: - rc = s.connect_ex(("svn.python.org", 444)) + rc = s.connect_ex((REMOTE_HOST, 444)) # Issue #19919: Windows machines or VMs hosted on Windows # machines sometimes return EWOULDBLOCK. - self.assertIn(rc, (errno.ECONNREFUSED, errno.EWOULDBLOCK)) + errors = ( + errno.ECONNREFUSED, errno.EHOSTUNREACH, errno.ETIMEDOUT, + errno.EWOULDBLOCK, + ) + self.assertIn(rc, errors) finally: s.close() def test_connect_with_context(self): - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): # Same as test_connect, but with a separately created context ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23) s = ctx.wrap_socket(socket.socket(socket.AF_INET)) - s.connect(("svn.python.org", 443)) + s.connect((REMOTE_HOST, 443)) try: self.assertEqual({}, s.getpeercert()) finally: s.close() # Same with a server hostname s = ctx.wrap_socket(socket.socket(socket.AF_INET), - server_hostname="svn.python.org") - s.connect(("svn.python.org", 443)) + server_hostname=REMOTE_HOST) + s.connect((REMOTE_HOST, 443)) s.close() # This should fail because we have no verification certs ctx.verify_mode = ssl.CERT_REQUIRED s = ctx.wrap_socket(socket.socket(socket.AF_INET)) self.assertRaisesRegexp(ssl.SSLError, "certificate verify failed", - s.connect, ("svn.python.org", 443)) + s.connect, (REMOTE_HOST, 443)) s.close() # This should succeed because we specify the root cert - ctx.load_verify_locations(SVN_PYTHON_ORG_ROOT_CERT) + ctx.load_verify_locations(REMOTE_ROOT_CERT) s = ctx.wrap_socket(socket.socket(socket.AF_INET)) - s.connect(("svn.python.org", 443)) + s.connect((REMOTE_HOST, 443)) try: cert = s.getpeercert() self.assertTrue(cert) @@ -1349,12 +1354,12 @@ # OpenSSL 0.9.8n and 1.0.0, as a result the capath directory must # contain both versions of each certificate (same content, different # filename) for this test to be portable across OpenSSL releases. - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23) ctx.verify_mode = ssl.CERT_REQUIRED ctx.load_verify_locations(capath=CAPATH) s = ctx.wrap_socket(socket.socket(socket.AF_INET)) - s.connect(("svn.python.org", 443)) + s.connect((REMOTE_HOST, 443)) try: cert = s.getpeercert() self.assertTrue(cert) @@ -1365,7 +1370,7 @@ ctx.verify_mode = ssl.CERT_REQUIRED ctx.load_verify_locations(capath=BYTES_CAPATH) s = ctx.wrap_socket(socket.socket(socket.AF_INET)) - s.connect(("svn.python.org", 443)) + s.connect((REMOTE_HOST, 443)) try: cert = s.getpeercert() self.assertTrue(cert) @@ -1373,15 +1378,15 @@ s.close() def test_connect_cadata(self): - with open(CAFILE_CACERT) as f: + with open(REMOTE_ROOT_CERT) as f: pem = f.read().decode('ascii') der = ssl.PEM_cert_to_DER_cert(pem) - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23) ctx.verify_mode = ssl.CERT_REQUIRED ctx.load_verify_locations(cadata=pem) with closing(ctx.wrap_socket(socket.socket(socket.AF_INET))) as s: - s.connect(("svn.python.org", 443)) + s.connect((REMOTE_HOST, 443)) cert = s.getpeercert() self.assertTrue(cert) @@ -1390,7 +1395,7 @@ ctx.verify_mode = ssl.CERT_REQUIRED ctx.load_verify_locations(cadata=der) with closing(ctx.wrap_socket(socket.socket(socket.AF_INET))) as s: - s.connect(("svn.python.org", 443)) + s.connect((REMOTE_HOST, 443)) cert = s.getpeercert() self.assertTrue(cert) @@ -1399,9 +1404,9 @@ # Issue #5238: creating a file-like object with makefile() shouldn't # delay closing the underlying "real socket" (here tested with its # file descriptor, hence skipping the test under Windows). - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): ss = ssl.wrap_socket(socket.socket(socket.AF_INET)) - ss.connect(("svn.python.org", 443)) + ss.connect((REMOTE_HOST, 443)) fd = ss.fileno() f = ss.makefile() f.close() @@ -1415,9 +1420,9 @@ self.assertEqual(e.exception.errno, errno.EBADF) def test_non_blocking_handshake(self): - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): s = socket.socket(socket.AF_INET) - s.connect(("svn.python.org", 443)) + s.connect((REMOTE_HOST, 443)) s.setblocking(False) s = ssl.wrap_socket(s, cert_reqs=ssl.CERT_NONE, @@ -1460,12 +1465,12 @@ if support.verbose: sys.stdout.write("\nVerified certificate for %s:%s is\n%s\n" % (host, port ,pem)) - _test_get_server_certificate('svn.python.org', 443, SVN_PYTHON_ORG_ROOT_CERT) + _test_get_server_certificate(REMOTE_HOST, 443, REMOTE_ROOT_CERT) if support.IPV6_ENABLED: _test_get_server_certificate('ipv6.google.com', 443) def test_ciphers(self): - remote = ("svn.python.org", 443) + remote = (REMOTE_HOST, 443) with support.transient_internet(remote[0]): with closing(ssl.wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_NONE, ciphers="ALL")) as s: @@ -1510,13 +1515,13 @@ def test_get_ca_certs_capath(self): # capath certs are loaded on request - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23) ctx.verify_mode = ssl.CERT_REQUIRED ctx.load_verify_locations(capath=CAPATH) self.assertEqual(ctx.get_ca_certs(), []) s = ctx.wrap_socket(socket.socket(socket.AF_INET)) - s.connect(("svn.python.org", 443)) + s.connect((REMOTE_HOST, 443)) try: cert = s.getpeercert() self.assertTrue(cert) @@ -1527,12 +1532,12 @@ @needs_sni def test_context_setget(self): # Check that the context of a connected socket can be replaced. - with support.transient_internet("svn.python.org"): + with support.transient_internet(REMOTE_HOST): ctx1 = ssl.SSLContext(ssl.PROTOCOL_TLSv1) ctx2 = ssl.SSLContext(ssl.PROTOCOL_SSLv23) s = socket.socket(socket.AF_INET) with closing(ctx1.wrap_socket(s)) as ss: - ss.connect(("svn.python.org", 443)) + ss.connect((REMOTE_HOST, 443)) self.assertIs(ss.context, ctx1) self.assertIs(ss._sslobj.context, ctx1) ss.context = ctx2 @@ -3026,7 +3031,7 @@ pass for filename in [ - CERTFILE, SVN_PYTHON_ORG_ROOT_CERT, BYTES_CERTFILE, + CERTFILE, REMOTE_ROOT_CERT, BYTES_CERTFILE, ONLYCERT, ONLYKEY, BYTES_ONLYCERT, BYTES_ONLYKEY, SIGNED_CERTFILE, SIGNED_CERTFILE2, SIGNING_CA, BADCERT, BADKEY, EMPTYCERT]: diff --git a/lib_pypy/_pypy_testcapi.py b/lib_pypy/_pypy_testcapi.py --- a/lib_pypy/_pypy_testcapi.py +++ b/lib_pypy/_pypy_testcapi.py @@ -7,6 +7,7 @@ content = fid.read() # from cffi's Verifier() key = '\x00'.join([sys.version[:3], content]) + key += 'cpyext-gc-support-2' # this branch requires recompilation! if sys.version_info >= (3,): key = key.encode('utf-8') k1 = hex(binascii.crc32(key[0::2]) & 0xffffffff) @@ -62,7 +63,7 @@ if sys.platform == 'win32': # XXX pyconfig.h uses a pragma to link to the import library, # which is currently python27.lib - library = os.path.join(thisdir, '..', 'include', 'python27') + library = os.path.join(thisdir, '..', 'libs', 'python27') if not os.path.exists(library + '.lib'): # For a local translation or nightly build library = os.path.join(thisdir, '..', 'pypy', 'goal', 'python27') diff --git a/lib_pypy/cffi.egg-info/PKG-INFO b/lib_pypy/cffi.egg-info/PKG-INFO --- a/lib_pypy/cffi.egg-info/PKG-INFO +++ b/lib_pypy/cffi.egg-info/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: cffi -Version: 1.4.2 +Version: 1.5.2 Summary: Foreign Function Interface for Python calling C code. Home-page: http://cffi.readthedocs.org Author: Armin Rigo, Maciej Fijalkowski diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py --- a/lib_pypy/cffi/__init__.py +++ b/lib_pypy/cffi/__init__.py @@ -4,8 +4,8 @@ from .api import FFI, CDefError, FFIError from .ffiplatform import VerificationError, VerificationMissing -__version__ = "1.4.2" -__version_info__ = (1, 4, 2) +__version__ = "1.5.2" +__version_info__ = (1, 5, 2) # The verifier module file names are based on the CRC32 of a string that # contains the following version number. It may be older than __version__ diff --git a/lib_pypy/cffi/_cffi_include.h b/lib_pypy/cffi/_cffi_include.h --- a/lib_pypy/cffi/_cffi_include.h +++ b/lib_pypy/cffi/_cffi_include.h @@ -146,8 +146,9 @@ ((Py_ssize_t(*)(CTypeDescrObject *, PyObject *, char **))_cffi_exports[23]) #define _cffi_convert_array_from_object \ ((int(*)(char *, CTypeDescrObject *, PyObject *))_cffi_exports[24]) +#define _CFFI_CPIDX 25 #define _cffi_call_python \ - ((void(*)(struct _cffi_externpy_s *, char *))_cffi_exports[25]) + ((void(*)(struct _cffi_externpy_s *, char *))_cffi_exports[_CFFI_CPIDX]) #define _CFFI_NUM_EXPORTS 26 typedef struct _ctypedescr CTypeDescrObject; @@ -206,7 +207,8 @@ /********** end CPython-specific section **********/ #else _CFFI_UNUSED_FN -static void (*_cffi_call_python)(struct _cffi_externpy_s *, char *); +static void (*_cffi_call_python_org)(struct _cffi_externpy_s *, char *); +# define _cffi_call_python _cffi_call_python_org #endif @@ -229,6 +231,12 @@ ((got_nonpos) == (expected <= 0) && \ (got) == (unsigned long long)expected) +#ifdef MS_WIN32 +# define _cffi_stdcall __stdcall +#else +# define _cffi_stdcall /* nothing */ +#endif + #ifdef __cplusplus } #endif diff --git a/lib_pypy/cffi/_embedding.h b/lib_pypy/cffi/_embedding.h new file mode 100644 --- /dev/null +++ b/lib_pypy/cffi/_embedding.h @@ -0,0 +1,517 @@ + +/***** Support code for embedding *****/ + +#if defined(_MSC_VER) +# define CFFI_DLLEXPORT __declspec(dllexport) +#elif defined(__GNUC__) +# define CFFI_DLLEXPORT __attribute__((visibility("default"))) +#else +# define CFFI_DLLEXPORT /* nothing */ +#endif + + +/* There are two global variables of type _cffi_call_python_fnptr: + + * _cffi_call_python, which we declare just below, is the one called + by ``extern "Python"`` implementations. + + * _cffi_call_python_org, which on CPython is actually part of the + _cffi_exports[] array, is the function pointer copied from + _cffi_backend. + + After initialization is complete, both are equal. However, the + first one remains equal to &_cffi_start_and_call_python until the + very end of initialization, when we are (or should be) sure that + concurrent threads also see a completely initialized world, and + only then is it changed. +*/ +#undef _cffi_call_python +typedef void (*_cffi_call_python_fnptr)(struct _cffi_externpy_s *, char *); +static void _cffi_start_and_call_python(struct _cffi_externpy_s *, char *); +static _cffi_call_python_fnptr _cffi_call_python = &_cffi_start_and_call_python; + + +#ifndef _MSC_VER + /* --- Assuming a GCC not infinitely old --- */ +# define cffi_compare_and_swap(l,o,n) __sync_bool_compare_and_swap(l,o,n) +# define cffi_write_barrier() __sync_synchronize() +# if !defined(__amd64__) && !defined(__x86_64__) && \ + !defined(__i386__) && !defined(__i386) +# define cffi_read_barrier() __sync_synchronize() +# else +# define cffi_read_barrier() (void)0 +# endif +#else + /* --- Windows threads version --- */ +# include +# define cffi_compare_and_swap(l,o,n) \ + (InterlockedCompareExchangePointer(l,n,o) == (o)) +# define cffi_write_barrier() InterlockedCompareExchange(&_cffi_dummy,0,0) +# define cffi_read_barrier() (void)0 +static volatile LONG _cffi_dummy; +#endif + +#ifdef WITH_THREAD +# ifndef _MSC_VER +# include + static pthread_mutex_t _cffi_embed_startup_lock; +# else + static CRITICAL_SECTION _cffi_embed_startup_lock; +# endif + static char _cffi_embed_startup_lock_ready = 0; +#endif + +static void _cffi_acquire_reentrant_mutex(void) +{ + static void *volatile lock = NULL; + + while (!cffi_compare_and_swap(&lock, NULL, (void *)1)) { + /* should ideally do a spin loop instruction here, but + hard to do it portably and doesn't really matter I + think: pthread_mutex_init() should be very fast, and + this is only run at start-up anyway. */ + } + +#ifdef WITH_THREAD + if (!_cffi_embed_startup_lock_ready) { +# ifndef _MSC_VER + pthread_mutexattr_t attr; + pthread_mutexattr_init(&attr); + pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE); + pthread_mutex_init(&_cffi_embed_startup_lock, &attr); +# else + InitializeCriticalSection(&_cffi_embed_startup_lock); +# endif + _cffi_embed_startup_lock_ready = 1; + } +#endif + + while (!cffi_compare_and_swap(&lock, (void *)1, NULL)) + ; + +#ifndef _MSC_VER + pthread_mutex_lock(&_cffi_embed_startup_lock); +#else + EnterCriticalSection(&_cffi_embed_startup_lock); +#endif +} + +static void _cffi_release_reentrant_mutex(void) +{ +#ifndef _MSC_VER + pthread_mutex_unlock(&_cffi_embed_startup_lock); +#else + LeaveCriticalSection(&_cffi_embed_startup_lock); +#endif +} + + +/********** CPython-specific section **********/ +#ifndef PYPY_VERSION + + +#define _cffi_call_python_org _cffi_exports[_CFFI_CPIDX] + +PyMODINIT_FUNC _CFFI_PYTHON_STARTUP_FUNC(void); /* forward */ + +static void _cffi_py_initialize(void) +{ + /* XXX use initsigs=0, which "skips initialization registration of + signal handlers, which might be useful when Python is + embedded" according to the Python docs. But review and think + if it should be a user-controllable setting. + + XXX we should also give a way to write errors to a buffer + instead of to stderr. + + XXX if importing 'site' fails, CPython (any version) calls + exit(). Should we try to work around this behavior here? + */ + Py_InitializeEx(0); +} + +static int _cffi_initialize_python(void) +{ + /* This initializes Python, imports _cffi_backend, and then the + present .dll/.so is set up as a CPython C extension module. + */ + int result; + PyGILState_STATE state; + PyObject *pycode=NULL, *global_dict=NULL, *x; + +#if PY_MAJOR_VERSION >= 3 + /* see comments in _cffi_carefully_make_gil() about the + Python2/Python3 difference + */ +#else + /* Acquire the GIL. We have no threadstate here. If Python is + already initialized, it is possible that there is already one + existing for this thread, but it is not made current now. + */ + PyEval_AcquireLock(); + + _cffi_py_initialize(); + + /* The Py_InitializeEx() sometimes made a threadstate for us, but + not always. Indeed Py_InitializeEx() could be called and do + nothing. So do we have a threadstate, or not? We don't know, + but we can replace it with NULL in all cases. + */ + (void)PyThreadState_Swap(NULL); + + /* Now we can release the GIL and re-acquire immediately using the + logic of PyGILState(), which handles making or installing the + correct threadstate. + */ + PyEval_ReleaseLock(); +#endif + state = PyGILState_Ensure(); + + /* Call the initxxx() function from the present module. It will + create and initialize us as a CPython extension module, instead + of letting the startup Python code do it---it might reimport + the same .dll/.so and get maybe confused on some platforms. + It might also have troubles locating the .dll/.so again for all + I know. + */ + (void)_CFFI_PYTHON_STARTUP_FUNC(); + if (PyErr_Occurred()) + goto error; + + /* Now run the Python code provided to ffi.embedding_init_code(). + */ + pycode = Py_CompileString(_CFFI_PYTHON_STARTUP_CODE, + "", + Py_file_input); + if (pycode == NULL) + goto error; + global_dict = PyDict_New(); + if (global_dict == NULL) + goto error; + if (PyDict_SetItemString(global_dict, "__builtins__", + PyThreadState_GET()->interp->builtins) < 0) + goto error; + x = PyEval_EvalCode( +#if PY_MAJOR_VERSION < 3 + (PyCodeObject *) +#endif + pycode, global_dict, global_dict); + if (x == NULL) + goto error; + Py_DECREF(x); + + /* Done! Now if we've been called from + _cffi_start_and_call_python() in an ``extern "Python"``, we can + only hope that the Python code did correctly set up the + corresponding @ffi.def_extern() function. Otherwise, the + general logic of ``extern "Python"`` functions (inside the + _cffi_backend module) will find that the reference is still + missing and print an error. + */ + result = 0; + done: + Py_XDECREF(pycode); + Py_XDECREF(global_dict); + PyGILState_Release(state); + return result; + + error:; + { + /* Print as much information as potentially useful. + Debugging load-time failures with embedding is not fun + */ + PyObject *exception, *v, *tb, *f, *modules, *mod; + PyErr_Fetch(&exception, &v, &tb); + if (exception != NULL) { + PyErr_NormalizeException(&exception, &v, &tb); + PyErr_Display(exception, v, tb); + } + Py_XDECREF(exception); + Py_XDECREF(v); + Py_XDECREF(tb); + + f = PySys_GetObject((char *)"stderr"); + if (f != NULL && f != Py_None) { + PyFile_WriteString("\nFrom: " _CFFI_MODULE_NAME + "\ncompiled with cffi version: 1.5.2" + "\n_cffi_backend module: ", f); + modules = PyImport_GetModuleDict(); + mod = PyDict_GetItemString(modules, "_cffi_backend"); + if (mod == NULL) { + PyFile_WriteString("not loaded", f); + } + else { + v = PyObject_GetAttrString(mod, "__file__"); + PyFile_WriteObject(v, f, 0); + Py_XDECREF(v); + } + PyFile_WriteString("\nsys.path: ", f); + PyFile_WriteObject(PySys_GetObject((char *)"path"), f, 0); + PyFile_WriteString("\n\n", f); + } + } + result = -1; + goto done; +} + +PyAPI_DATA(char *) _PyParser_TokenNames[]; /* from CPython */ + +static int _cffi_carefully_make_gil(void) +{ + /* This does the basic initialization of Python. It can be called + completely concurrently from unrelated threads. It assumes + that we don't hold the GIL before (if it exists), and we don't + hold it afterwards. + + What it really does is completely different in Python 2 and + Python 3. + + Python 2 + ======== + + Initialize the GIL, without initializing the rest of Python, + by calling PyEval_InitThreads(). + + PyEval_InitThreads() must not be called concurrently at all. + So we use a global variable as a simple spin lock. This global + variable must be from 'libpythonX.Y.so', not from this + cffi-based extension module, because it must be shared from + different cffi-based extension modules. We choose + _PyParser_TokenNames[0] as a completely arbitrary pointer value + that is never written to. The default is to point to the + string "ENDMARKER". We change it temporarily to point to the + next character in that string. (Yes, I know it's REALLY + obscure.) + + Python 3 + ======== + + In Python 3, PyEval_InitThreads() cannot be called before + Py_InitializeEx() any more. So this function calls + Py_InitializeEx() first. It uses the same obscure logic to + make sure we never call it concurrently. + + Arguably, this is less good on the spinlock, because + Py_InitializeEx() takes much longer to run than + PyEval_InitThreads(). But I didn't find a way around it. + */ + +#ifdef WITH_THREAD + char *volatile *lock = (char *volatile *)_PyParser_TokenNames; + char *old_value; + + while (1) { /* spin loop */ + old_value = *lock; + if (old_value[0] == 'E') { + assert(old_value[1] == 'N'); + if (cffi_compare_and_swap(lock, old_value, old_value + 1)) + break; + } + else { + assert(old_value[0] == 'N'); + /* should ideally do a spin loop instruction here, but + hard to do it portably and doesn't really matter I + think: PyEval_InitThreads() should be very fast, and + this is only run at start-up anyway. */ + } + } +#endif + +#if PY_MAJOR_VERSION >= 3 + /* Python 3: call Py_InitializeEx() */ + { + PyGILState_STATE state = PyGILState_UNLOCKED; + if (!Py_IsInitialized()) + _cffi_py_initialize(); + else + state = PyGILState_Ensure(); + + PyEval_InitThreads(); + PyGILState_Release(state); + } +#else + /* Python 2: call PyEval_InitThreads() */ +# ifdef WITH_THREAD + if (!PyEval_ThreadsInitialized()) { + PyEval_InitThreads(); /* makes the GIL */ + PyEval_ReleaseLock(); /* then release it */ + } + /* else: there is already a GIL, but we still needed to do the + spinlock dance to make sure that we see it as fully ready */ +# endif +#endif + +#ifdef WITH_THREAD + /* release the lock */ + while (!cffi_compare_and_swap(lock, old_value + 1, old_value)) + ; +#endif + + return 0; +} + +/********** end CPython-specific section **********/ + + +#else + + +/********** PyPy-specific section **********/ + +PyMODINIT_FUNC _CFFI_PYTHON_STARTUP_FUNC(const void *[]); /* forward */ + +static struct _cffi_pypy_init_s { + const char *name; + void (*func)(const void *[]); + const char *code; +} _cffi_pypy_init = { + _CFFI_MODULE_NAME, + _CFFI_PYTHON_STARTUP_FUNC, + _CFFI_PYTHON_STARTUP_CODE, +}; + +extern int pypy_carefully_make_gil(const char *); +extern int pypy_init_embedded_cffi_module(int, struct _cffi_pypy_init_s *); + +static int _cffi_carefully_make_gil(void) +{ + return pypy_carefully_make_gil(_CFFI_MODULE_NAME); +} + +static int _cffi_initialize_python(void) +{ + return pypy_init_embedded_cffi_module(0xB011, &_cffi_pypy_init); +} + +/********** end PyPy-specific section **********/ + + +#endif + + +#ifdef __GNUC__ +__attribute__((noinline)) +#endif +static _cffi_call_python_fnptr _cffi_start_python(void) +{ + /* Delicate logic to initialize Python. This function can be + called multiple times concurrently, e.g. when the process calls + its first ``extern "Python"`` functions in multiple threads at + once. It can also be called recursively, in which case we must + ignore it. We also have to consider what occurs if several + different cffi-based extensions reach this code in parallel + threads---it is a different copy of the code, then, and we + can't have any shared global variable unless it comes from + 'libpythonX.Y.so'. + + Idea: + + * _cffi_carefully_make_gil(): "carefully" call + PyEval_InitThreads() (possibly with Py_InitializeEx() first). + + * then we use a (local) custom lock to make sure that a call to this + cffi-based extension will wait if another call to the *same* + extension is running the initialization in another thread. + It is reentrant, so that a recursive call will not block, but + only one from a different thread. + + * then we grab the GIL and (Python 2) we call Py_InitializeEx(). + At this point, concurrent calls to Py_InitializeEx() are not + possible: we have the GIL. + + * do the rest of the specific initialization, which may + temporarily release the GIL but not the custom lock. + Only release the custom lock when we are done. + */ + static char called = 0; + + if (_cffi_carefully_make_gil() != 0) + return NULL; + + _cffi_acquire_reentrant_mutex(); + From pypy.commits at gmail.com Tue Mar 1 02:35:32 2016 From: pypy.commits at gmail.com (plan_rich) Date: Mon, 29 Feb 2016 23:35:32 -0800 (PST) Subject: [pypy-commit] pypy memop-simplify3: calculate index value (const scale, const offset) before emiting the load for ConstPtrs Message-ID: <56d54644.03321c0a.d22c2.ffffbf9e@mx.google.com> Author: Richard Plangger Branch: memop-simplify3 Changeset: r82623:6f7d32f89861 Date: 2016-03-01 08:32 +0100 http://bitbucket.org/pypy/pypy/changeset/6f7d32f89861/ Log: calculate index value (const scale, const offset) before emiting the load for ConstPtrs diff --git a/rpython/jit/backend/llsupport/gc.py b/rpython/jit/backend/llsupport/gc.py --- a/rpython/jit/backend/llsupport/gc.py +++ b/rpython/jit/backend/llsupport/gc.py @@ -164,13 +164,11 @@ array_index = moving_obj_tracker.get_array_index(v) size, offset, _ = unpack_arraydescr(moving_obj_tracker.ptr_array_descr) - scale = size + array_index = array_index * size + offset args = [moving_obj_tracker.const_ptr_gcref_array, ConstInt(array_index), - ConstInt(scale), - ConstInt(offset), ConstInt(size)] - load_op = ResOperation(rop.GC_LOAD_INDEXED_R, args) + load_op = ResOperation(rop.GC_LOAD_R, args) newops.append(load_op) op.setarg(arg_i, load_op) # From pypy.commits at gmail.com Tue Mar 1 02:35:34 2016 From: pypy.commits at gmail.com (plan_rich) Date: Mon, 29 Feb 2016 23:35:34 -0800 (PST) Subject: [pypy-commit] pypy memop-simplify3: removed getfield_gc_pure_* which where removed on default some time ago Message-ID: <56d54646.838d1c0a.834a8.ffffc36e@mx.google.com> Author: Richard Plangger Branch: memop-simplify3 Changeset: r82624:08f7c1c3571d Date: 2016-03-01 08:34 +0100 http://bitbucket.org/pypy/pypy/changeset/08f7c1c3571d/ Log: removed getfield_gc_pure_* which where removed on default some time ago diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py +++ b/rpython/jit/backend/llsupport/rewrite.py @@ -243,7 +243,6 @@ self.emit_gc_store_or_indexed(op, ptr_box, index_box, value_box, fieldsize, itemsize, ofs) elif opnum in (rop.GETFIELD_GC_I, rop.GETFIELD_GC_F, rop.GETFIELD_GC_R, - rop.GETFIELD_GC_PURE_I, rop.GETFIELD_GC_PURE_F, rop.GETFIELD_GC_PURE_R, rop.GETFIELD_RAW_I, rop.GETFIELD_RAW_F, rop.GETFIELD_RAW_R): ofs, itemsize, sign = unpack_fielddescr(op.getdescr()) ptr_box = op.getarg(0) @@ -534,8 +533,6 @@ # See emit_pending_zeros(). (This optimization is done by # hacking the object 'o' in-place: e.g., o.getarg(1) may be # replaced with another constant greater than 0.) - #o = ResOperation(rop.ZERO_ARRAY, [v_arr, self.c_zero, v_length], - # descr=arraydescr) assert isinstance(arraydescr, ArrayDescr) scale = arraydescr.itemsize v_length_scaled = v_length From pypy.commits at gmail.com Tue Mar 1 03:53:39 2016 From: pypy.commits at gmail.com (plan_rich) Date: Tue, 01 Mar 2016 00:53:39 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: assembly instructions now check the immediate values, asserting if a value too big/small is passed Message-ID: <56d55893.4412c30a.60bf3.ffffb625@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r82625:1e7875e46f1c Date: 2016-03-01 09:47 +0100 http://bitbucket.org/pypy/pypy/changeset/1e7875e46f1c/ Log: assembly instructions now check the immediate values, asserting if a value too big/small is passed diff --git a/rpython/jit/backend/zarch/assembler.py b/rpython/jit/backend/zarch/assembler.py --- a/rpython/jit/backend/zarch/assembler.py +++ b/rpython/jit/backend/zarch/assembler.py @@ -1369,6 +1369,7 @@ assert lengthloc is not r.RES and lengthloc is not r.RSZ assert lengthloc.is_reg() + assert maxlength >= 0 if maxlength > 2**16-1: maxlength = 2**16-1 # makes things easier mc = self.mc diff --git a/rpython/jit/backend/zarch/instruction_builder.py b/rpython/jit/backend/zarch/instruction_builder.py --- a/rpython/jit/backend/zarch/instruction_builder.py +++ b/rpython/jit/backend/zarch/instruction_builder.py @@ -461,7 +461,45 @@ def build_unpack_func(mnemonic, func): @always_inline + def check_arg_type(arg, type): + #iX - immediate X bits (signed) + if type.startswith('i'): + value = arg.value + if type == 'i8': assert -2**7 <= value <= 2**7-1 + if type == 'i12': assert -2**11 <= value <= 2**11-1 + if type == 'i16': assert -2**15 <= value <= 2**15-1 + if type == 'i20': assert -2**19 <= value <= 2**19-1 + if type == 'i32': assert -2**31 <= value <= 2**31-1 + #uX - immediate X bits (unsigend) + if type.startswith('u'): + value = arg.value + if type == 'u8': assert 0 <= value <= 2**8-1 + if type == 'u12': assert 0 <= value <= 2**12-1 + if type == 'u16': assert 0 <= value <= 2**16-1 + if type == 'u20': assert 0 <= value <= 2**20-1 + if type == 'u32': assert 0 <= value <= 2**32-1 + #bd - base displacement (unsigned 12 bit) + #bid - index base displacement (unsigned 12 bit) + if type == 'bd' or type == 'bid': + value = arg.displace + assert 0 <= value <= 2**12-1 + #bdl - base displacement long (20 bit) + #bidl - index base displacement (20 bit) + if type == 'bdl' or type == 'bidl': + value = arg.displace + assert -2**19 <= value <= 2**19-1 + #l4bd - length base displacement (4 bit) + if type == 'l4db': + value = arg.displace + assert 0 <= value <= 2**4-1 + #h32 - halfwords 32 bit (e.g. LARL, or other relative instr.) + if type == 'h32': + value = arg.value + assert -2**31 <= value <= 2**31-1 + assert value & 0x1 == 0 + @always_inline def unpack_arg(arg, argtype): + check_arg_type(arg, argtype) if argtype == '-': return 0 elif argtype == 'r' or argtype == 'r/m' or \ @@ -565,3 +603,4 @@ setattr(clazz, mnemonic, build_unpack_func(mnemonic, func)) setattr(clazz, mnemonic + '_byte_count', func._byte_count) del func._byte_count + del func._arguments_ diff --git a/rpython/jit/backend/zarch/instructions.py b/rpython/jit/backend/zarch/instructions.py --- a/rpython/jit/backend/zarch/instructions.py +++ b/rpython/jit/backend/zarch/instructions.py @@ -182,6 +182,7 @@ 'STE': ('rx', ['\x70']), # note displacement is UNsigned 12 bit 'STD': ('rx', ['\x60']), + # here it is 20 bit signed 'STDY': ('rxy', ['\xED','\x67']), 'SPM': ('rr', ['\x04'], 'r,-'), diff --git a/rpython/jit/backend/zarch/opassembler.py b/rpython/jit/backend/zarch/opassembler.py --- a/rpython/jit/backend/zarch/opassembler.py +++ b/rpython/jit/backend/zarch/opassembler.py @@ -454,7 +454,8 @@ [lengthloc] = arglocs arraydescr = op.getdescr() itemsize = op.getarg(1).getint() - maxlength = (gc_ll_descr.max_size_of_young_obj - WORD * 2) / itemsize + assert itemsize == 1 + maxlength = (gc_ll_descr.max_size_of_young_obj - WORD * 2) gcmap = regalloc.get_gcmap([r.RES, r.RSZ]) self.malloc_cond_varsize( op.getarg(0).getint(), diff --git a/rpython/jit/backend/zarch/regalloc.py b/rpython/jit/backend/zarch/regalloc.py --- a/rpython/jit/backend/zarch/regalloc.py +++ b/rpython/jit/backend/zarch/regalloc.py @@ -803,6 +803,7 @@ # sure it is in a register different from r.RES and r.RSZ. (It # should not be a ConstInt at all.) length_box = op.getarg(2) + assert not isinstance(length_box, Const) lengthloc = self.ensure_reg(length_box) return [lengthloc] From pypy.commits at gmail.com Tue Mar 1 04:04:07 2016 From: pypy.commits at gmail.com (plan_rich) Date: Tue, 01 Mar 2016 01:04:07 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: RISBGN use RISBG (does alter the cc), changed parameter of build_rie_f (signed to unsigned). removed some unnecessay asserts (check now for each parameter) Message-ID: <56d55b07.6718c20a.34fb4.1410@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r82626:9fc4da160aeb Date: 2016-03-01 09:56 +0100 http://bitbucket.org/pypy/pypy/changeset/9fc4da160aeb/ Log: RISBGN use RISBG (does alter the cc), changed parameter of build_rie_f (signed to unsigned). removed some unnecessay asserts (check now for each parameter) diff --git a/rpython/jit/backend/zarch/assembler.py b/rpython/jit/backend/zarch/assembler.py --- a/rpython/jit/backend/zarch/assembler.py +++ b/rpython/jit/backend/zarch/assembler.py @@ -1158,8 +1158,6 @@ for reg in includes: v = r.ALL_REG_INDEXES[reg] offset = base_ofs + v * WORD - assert offset >= 0 - assert offset <= 2**16-1 mc.STD(reg, l.addr(offset, r.SPP)) def _pop_fp_regs_from_jitframe(self, mc, includes=r.MANAGED_FP_REGS): @@ -1167,8 +1165,6 @@ for reg in includes: v = r.ALL_REG_INDEXES[reg] offset = base_ofs + v * WORD - assert offset >= 0 - assert offset <= 2**16-1 mc.LD(reg, l.addr(offset, r.SPP)) @@ -1399,7 +1395,7 @@ mc.AGHIK(r.RSZ, lengthloc, l.imm(constsize)) if force_realignment: # "& ~(WORD-1)" - mc.RISBGN(r.RSZ, r.RSZ, l.imm(0), l.imm(0x80 | 60), l.imm(0)) + mc.RISBG(r.RSZ, r.RSZ, l.imm(0), l.imm(0x80 | 60), l.imm(0)) mc.AGRK(r.RSZ, r.RES, r.RSZ) # now RSZ contains the total size in bytes, rounded up to a multiple diff --git a/rpython/jit/backend/zarch/instruction_builder.py b/rpython/jit/backend/zarch/instruction_builder.py --- a/rpython/jit/backend/zarch/instruction_builder.py +++ b/rpython/jit/backend/zarch/instruction_builder.py @@ -341,7 +341,7 @@ return encode_rie_e def build_rie_f(mnemonic, (opcode1,opcode2)): - @builder.arguments('r,r,i8,i8,i8') + @builder.arguments('r,r,u8,u8,u8') def encode_rie_f(self, reg1, reg2, i1, i2, i3): self.writechar(opcode1) byte = (reg1 & BIT_MASK_4) << 4 | (reg2 & BIT_MASK_4) @@ -496,7 +496,6 @@ if type == 'h32': value = arg.value assert -2**31 <= value <= 2**31-1 - assert value & 0x1 == 0 @always_inline def unpack_arg(arg, argtype): check_arg_type(arg, argtype) diff --git a/rpython/jit/backend/zarch/instructions.py b/rpython/jit/backend/zarch/instructions.py --- a/rpython/jit/backend/zarch/instructions.py +++ b/rpython/jit/backend/zarch/instructions.py @@ -42,11 +42,6 @@ 'SLLG': ('rsy_a', ['\xEB','\x0D']), # rotating - # rotate, then insert selected bits - # on the VM the miscellaneous-instruction-extensions - # does not seem to be installed - # cpu fails at this instruction, and gnu assembler - # does not recognize mnemonic 'RISBG': ('rie_f', ['\xEC','\x55']), 'RISBGN': ('rie_f', ['\xEC','\x59']), diff --git a/rpython/jit/backend/zarch/opassembler.py b/rpython/jit/backend/zarch/opassembler.py --- a/rpython/jit/backend/zarch/opassembler.py +++ b/rpython/jit/backend/zarch/opassembler.py @@ -568,14 +568,13 @@ # compute in SCRATCH the index of the bit inside the byte: # scratch = (index >> card_page_shift) & 7 # 0x80 sets zero flag. will store 0 into all not selected bits - mc.RISBGN(r.SCRATCH, loc_index, l.imm(61), l.imm(0x80 | 63), l.imm(64-n)) + mc.RISBG(r.SCRATCH, loc_index, l.imm(61), l.imm(0x80 | 63), l.imm(64-n)) mc.XG(tmp_loc, l.pool(self.pool.constant_64_ones)) # set SCRATCH2 to 1 << r1 mc.LGHI(r.SCRATCH2, l.imm(1)) mc.SLLG(r.SCRATCH2, r.SCRATCH2, l.addr(0,r.SCRATCH)) - # set this bit inside the byte of interest addr = l.addr(0, loc_base, tmp_loc) mc.LLGC(r.SCRATCH, addr) From pypy.commits at gmail.com Tue Mar 1 04:18:55 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 01 Mar 2016 01:18:55 -0800 (PST) Subject: [pypy-commit] pypy.org extradoc: update the values Message-ID: <56d55e7f.c3e01c0a.40ffb.ffffe02e@mx.google.com> Author: Armin Rigo Branch: extradoc Changeset: r713:0d8788f07836 Date: 2016-03-01 10:18 +0100 http://bitbucket.org/pypy/pypy.org/changeset/0d8788f07836/ Log: update the values diff --git a/don1.html b/don1.html --- a/don1.html +++ b/don1.html @@ -15,7 +15,7 @@ - $62984 of $105000 (60.0%) + $63003 of $105000 (60.0%)
@@ -23,7 +23,7 @@
  • diff --git a/don4.html b/don4.html --- a/don4.html +++ b/don4.html @@ -9,7 +9,7 @@ @@ -17,7 +17,7 @@ 2nd call: - $30423 of $80000 (38.0%) + $30524 of $80000 (38.2%)
    @@ -25,7 +25,7 @@
  • From pypy.commits at gmail.com Tue Mar 1 04:35:49 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 01 Mar 2016 01:35:49 -0800 (PST) Subject: [pypy-commit] pypy default: hg backout 38deea741bed Message-ID: <56d56275.034cc20a.d4ccb.337f@mx.google.com> Author: Armin Rigo Branch: Changeset: r82627:2523cc45c5d9 Date: 2016-03-01 10:27 +0100 http://bitbucket.org/pypy/pypy/changeset/2523cc45c5d9/ Log: hg backout 38deea741bed Causes failures on Linux. Looks easy to fix, but first backing this out, because fixes should go to cffi/cffi in the testing/embedding directory and then be copied here (there is the script pypy/tool/import_cffi.py for that). diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/perf-test.c b/pypy/module/test_lib_pypy/cffi_tests/embedding/perf-test.c --- a/pypy/module/test_lib_pypy/cffi_tests/embedding/perf-test.c +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/perf-test.c @@ -1,12 +1,10 @@ #include #include -#include #include -#include #ifdef PTEST_USE_THREAD # include # include -sem_t *done; +static sem_t done; #endif @@ -56,7 +54,7 @@ printf("time per call: %.3g\n", t); #ifdef PTEST_USE_THREAD - int status = sem_post(done); + int status = sem_post(&done); assert(status == 0); #endif @@ -70,8 +68,8 @@ start_routine(0); #else pthread_t th; - done = sem_open("perf-test", O_CREAT, 0777, 0); - int i, status; + int i, status = sem_init(&done, 0, 0); + assert(status == 0); add1(0, 0); /* this is the main thread */ @@ -80,9 +78,7 @@ assert(status == 0); } for (i = 0; i < PTEST_USE_THREAD; i++) { - status = sem_wait(done); - if (status) - fprintf(stderr, "%s\n", strerror(errno)); + status = sem_wait(&done); assert(status == 0); } #endif diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/thread1-test.c b/pypy/module/test_lib_pypy/cffi_tests/embedding/thread1-test.c --- a/pypy/module/test_lib_pypy/cffi_tests/embedding/thread1-test.c +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/thread1-test.c @@ -7,7 +7,7 @@ extern int add1(int, int); -static sem_t* done; +static sem_t done; static void *start_routine(void *arg) @@ -16,7 +16,7 @@ x = add1(40, 2); assert(x == 42); - status = sem_post(done); + status = sem_post(&done); assert(status == 0); return arg; @@ -25,8 +25,8 @@ int main(void) { pthread_t th; - int i, status; - done = sem_open("thread1-test", O_CREAT, 0777, 0); + int i, status = sem_init(&done, 0, 0); + assert(status == 0); printf("starting\n"); fflush(stdout); @@ -35,7 +35,7 @@ assert(status == 0); } for (i = 0; i < NTHREADS; i++) { - status = sem_wait(done); + status = sem_wait(&done); assert(status == 0); } printf("done\n"); diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/thread2-test.c b/pypy/module/test_lib_pypy/cffi_tests/embedding/thread2-test.c --- a/pypy/module/test_lib_pypy/cffi_tests/embedding/thread2-test.c +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/thread2-test.c @@ -5,7 +5,7 @@ extern int add1(int, int); extern int add2(int, int, int); -static sem_t* done; +static sem_t done; static void *start_routine_1(void *arg) @@ -14,7 +14,7 @@ x = add1(40, 2); assert(x == 42); - status = sem_post(done); + status = sem_post(&done); assert(status == 0); return arg; @@ -29,7 +29,7 @@ x = add2(1000, 200, 30); assert(x == 1230); - status = sem_post(done); + status = sem_post(&done); assert(status == 0); return arg; @@ -38,8 +38,8 @@ int main(void) { pthread_t th; - int i, status; - done = sem_open("thread2-test", O_CREAT, 0777, 0); + int i, status = sem_init(&done, 0, 0); + assert(status == 0); printf("starting\n"); fflush(stdout); @@ -49,7 +49,7 @@ assert(status == 0); for (i = 0; i < 2; i++) { - status = sem_wait(done); + status = sem_wait(&done); assert(status == 0); } printf("done\n"); diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/thread3-test.c b/pypy/module/test_lib_pypy/cffi_tests/embedding/thread3-test.c --- a/pypy/module/test_lib_pypy/cffi_tests/embedding/thread3-test.c +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/thread3-test.c @@ -5,7 +5,7 @@ extern int add2(int, int, int); extern int add3(int, int, int, int); -static sem_t* done; +static sem_t done; static void *start_routine_2(void *arg) @@ -14,7 +14,7 @@ x = add2(40, 2, 100); assert(x == 142); - status = sem_post(done); + status = sem_post(&done); assert(status == 0); return arg; @@ -26,7 +26,7 @@ x = add3(1000, 200, 30, 4); assert(x == 1234); - status = sem_post(done); + status = sem_post(&done); assert(status == 0); return arg; @@ -35,8 +35,8 @@ int main(void) { pthread_t th; - int i, status; - done = sem_open("thread-test3", O_CREAT, 0777, 0); + int i, status = sem_init(&done, 0, 0); + assert(status == 0); printf("starting\n"); fflush(stdout); @@ -47,7 +47,7 @@ assert(status == 0); } for (i = 0; i < 20; i++) { - status = sem_wait(done); + status = sem_wait(&done); assert(status == 0); } printf("done\n"); diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/tlocal-test.c b/pypy/module/test_lib_pypy/cffi_tests/embedding/tlocal-test.c --- a/pypy/module/test_lib_pypy/cffi_tests/embedding/tlocal-test.c +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/tlocal-test.c @@ -7,7 +7,7 @@ extern int add1(int, int); -static sem_t *done; +static sem_t done; static void *start_routine(void *arg) @@ -22,7 +22,7 @@ assert(x == expected + 8 + i); } - status = sem_post(done); + status = sem_post(&done); assert(status == 0); return arg; @@ -31,8 +31,7 @@ int main(void) { pthread_t th; - int i, status; - done = sem_open("tlocal-test", O_CREAT, 0777, 0); + int i, status = sem_init(&done, 0, 0); assert(status == 0); for (i = 0; i < NTHREADS; i++) { @@ -40,7 +39,7 @@ assert(status == 0); } for (i = 0; i < NTHREADS; i++) { - status = sem_wait(done); + status = sem_wait(&done); assert(status == 0); } printf("done\n"); From pypy.commits at gmail.com Tue Mar 1 04:35:51 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 01 Mar 2016 01:35:51 -0800 (PST) Subject: [pypy-commit] pypy default: Fix import_cffi to also copy the .c and .h files. Re-run it to import Message-ID: <56d56277.e6ebc20a.a372c.2c0a@mx.google.com> Author: Armin Rigo Branch: Changeset: r82628:cca076442762 Date: 2016-03-01 10:35 +0100 http://bitbucket.org/pypy/pypy/changeset/cca076442762/ Log: Fix import_cffi to also copy the .c and .h files. Re-run it to import cffi/5d4960993342. diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_zintegration.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_zintegration.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_zintegration.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_zintegration.py @@ -12,7 +12,7 @@ def create_venv(name): tmpdir = udir.join(name) try: - subprocess.check_call(['virtualenv', '--distribute', + subprocess.check_call(['virtualenv', '--never-download', '-p', os.path.abspath(sys.executable), str(tmpdir)]) except OSError as e: diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/add1-test.c b/pypy/module/test_lib_pypy/cffi_tests/embedding/add1-test.c --- a/pypy/module/test_lib_pypy/cffi_tests/embedding/add1-test.c +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/add1-test.c @@ -1,3 +1,4 @@ +/* Generated by pypy/tool/import_cffi.py */ #include extern int add1(int, int); diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/add2-test.c b/pypy/module/test_lib_pypy/cffi_tests/embedding/add2-test.c --- a/pypy/module/test_lib_pypy/cffi_tests/embedding/add2-test.c +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/add2-test.c @@ -1,3 +1,4 @@ +/* Generated by pypy/tool/import_cffi.py */ #include extern int add1(int, int); diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/add_recursive-test.c b/pypy/module/test_lib_pypy/cffi_tests/embedding/add_recursive-test.c --- a/pypy/module/test_lib_pypy/cffi_tests/embedding/add_recursive-test.c +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/add_recursive-test.c @@ -1,3 +1,4 @@ +/* Generated by pypy/tool/import_cffi.py */ #include #ifdef _MSC_VER diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/perf-test.c b/pypy/module/test_lib_pypy/cffi_tests/embedding/perf-test.c --- a/pypy/module/test_lib_pypy/cffi_tests/embedding/perf-test.c +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/perf-test.c @@ -1,10 +1,12 @@ +/* Generated by pypy/tool/import_cffi.py */ #include #include #include #ifdef PTEST_USE_THREAD # include -# include -static sem_t done; +static pthread_mutex_t mutex1 = PTHREAD_MUTEX_INITIALIZER; +static pthread_cond_t cond1 = PTHREAD_COND_INITIALIZER; +static int remaining; #endif @@ -54,8 +56,11 @@ printf("time per call: %.3g\n", t); #ifdef PTEST_USE_THREAD - int status = sem_post(&done); - assert(status == 0); + pthread_mutex_lock(&mutex1); + remaining -= 1; + if (!remaining) + pthread_cond_signal(&cond1); + pthread_mutex_unlock(&mutex1); #endif return arg; @@ -68,19 +73,19 @@ start_routine(0); #else pthread_t th; - int i, status = sem_init(&done, 0, 0); - assert(status == 0); + int i, status; add1(0, 0); /* this is the main thread */ + remaining = PTEST_USE_THREAD; for (i = 0; i < PTEST_USE_THREAD; i++) { status = pthread_create(&th, NULL, start_routine, NULL); assert(status == 0); } - for (i = 0; i < PTEST_USE_THREAD; i++) { - status = sem_wait(&done); - assert(status == 0); - } + pthread_mutex_lock(&mutex1); + while (remaining) + pthread_cond_wait(&cond1, &mutex1); + pthread_mutex_unlock(&mutex1); #endif return 0; } diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/thread-test.h b/pypy/module/test_lib_pypy/cffi_tests/embedding/thread-test.h --- a/pypy/module/test_lib_pypy/cffi_tests/embedding/thread-test.h +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/thread-test.h @@ -1,10 +1,45 @@ +/* Generated by pypy/tool/import_cffi.py */ /************************************************************/ #ifndef _MSC_VER /************************************************************/ #include -#include + +/* don't include , it is not available on OS/X */ + +typedef struct { + pthread_mutex_t mutex1; + pthread_cond_t cond1; + unsigned int value; +} sem_t; + +static int sem_init(sem_t *sem, int pshared, unsigned int value) +{ + assert(pshared == 0); + sem->value = value; + return (pthread_mutex_init(&sem->mutex1, NULL) || + pthread_cond_init(&sem->cond1, NULL)); +} + +static int sem_post(sem_t *sem) +{ + pthread_mutex_lock(&sem->mutex1); + sem->value += 1; + pthread_cond_signal(&sem->cond1); + pthread_mutex_unlock(&sem->mutex1); + return 0; +} + +static int sem_wait(sem_t *sem) +{ + pthread_mutex_lock(&sem->mutex1); + while (sem->value == 0) + pthread_cond_wait(&sem->cond1, &sem->mutex1); + sem->value -= 1; + pthread_mutex_unlock(&sem->mutex1); + return 0; +} /************************************************************/ @@ -22,7 +57,7 @@ typedef HANDLE sem_t; typedef HANDLE pthread_t; -int sem_init(sem_t *sem, int pshared, unsigned int value) +static int sem_init(sem_t *sem, int pshared, unsigned int value) { assert(pshared == 0); assert(value == 0); @@ -30,26 +65,26 @@ return *sem ? 0 : -1; } -int sem_post(sem_t *sem) +static int sem_post(sem_t *sem) { return ReleaseSemaphore(*sem, 1, NULL) ? 0 : -1; } -int sem_wait(sem_t *sem) +static int sem_wait(sem_t *sem) { WaitForSingleObject(*sem, INFINITE); return 0; } -DWORD WINAPI myThreadProc(LPVOID lpParameter) +static DWORD WINAPI myThreadProc(LPVOID lpParameter) { void *(* start_routine)(void *) = (void *(*)(void *))lpParameter; start_routine(NULL); return 0; } -int pthread_create(pthread_t *thread, void *attr, - void *start_routine(void *), void *arg) +static int pthread_create(pthread_t *thread, void *attr, + void *start_routine(void *), void *arg) { assert(arg == NULL); *thread = CreateThread(NULL, 0, myThreadProc, start_routine, 0, NULL); diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/thread1-test.c b/pypy/module/test_lib_pypy/cffi_tests/embedding/thread1-test.c --- a/pypy/module/test_lib_pypy/cffi_tests/embedding/thread1-test.c +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/thread1-test.c @@ -1,3 +1,4 @@ +/* Generated by pypy/tool/import_cffi.py */ #include #include #include "thread-test.h" diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/thread2-test.c b/pypy/module/test_lib_pypy/cffi_tests/embedding/thread2-test.c --- a/pypy/module/test_lib_pypy/cffi_tests/embedding/thread2-test.c +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/thread2-test.c @@ -1,3 +1,4 @@ +/* Generated by pypy/tool/import_cffi.py */ #include #include #include "thread-test.h" diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/thread3-test.c b/pypy/module/test_lib_pypy/cffi_tests/embedding/thread3-test.c --- a/pypy/module/test_lib_pypy/cffi_tests/embedding/thread3-test.c +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/thread3-test.c @@ -1,3 +1,4 @@ +/* Generated by pypy/tool/import_cffi.py */ #include #include #include "thread-test.h" diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/tlocal-test.c b/pypy/module/test_lib_pypy/cffi_tests/embedding/tlocal-test.c --- a/pypy/module/test_lib_pypy/cffi_tests/embedding/tlocal-test.c +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/tlocal-test.c @@ -1,3 +1,4 @@ +/* Generated by pypy/tool/import_cffi.py */ #include #include #include "thread-test.h" diff --git a/pypy/tool/import_cffi.py b/pypy/tool/import_cffi.py --- a/pypy/tool/import_cffi.py +++ b/pypy/tool/import_cffi.py @@ -7,11 +7,18 @@ import sys, py -def mangle(lines): - yield "# Generated by pypy/tool/import_cffi.py\n" - for line in lines: - line = line.replace('from testing', 'from pypy.module.test_lib_pypy.cffi_tests') - yield line +def mangle(lines, ext): + if ext == '.py': + yield "# Generated by pypy/tool/import_cffi.py\n" + for line in lines: + line = line.replace('from testing', 'from pypy.module.test_lib_pypy.cffi_tests') + yield line + elif ext in ('.c', '.h'): + yield "/* Generated by pypy/tool/import_cffi.py */\n" + for line in lines: + yield line + else: + raise AssertionError(ext) def main(cffi_dir): cffi_dir = py.path.local(cffi_dir) @@ -23,10 +30,12 @@ for p in (list(cffi_dir.join('cffi').visit(fil='*.py')) + list(cffi_dir.join('cffi').visit(fil='*.h'))): cffi_dest.join('..', p.relto(cffi_dir)).write(p.read()) - for p in cffi_dir.join('testing').visit(fil='*.py'): + for p in (list(cffi_dir.join('testing').visit(fil='*.py')) + + list(cffi_dir.join('testing').visit(fil='*.h')) + + list(cffi_dir.join('testing').visit(fil='*.c'))): path = test_dest.join(p.relto(cffi_dir.join('testing'))) path.join('..').ensure(dir=1) - path.write(''.join(mangle(p.readlines()))) + path.write(''.join(mangle(p.readlines(), p.ext))) if __name__ == '__main__': if len(sys.argv) != 2: From pypy.commits at gmail.com Tue Mar 1 04:39:11 2016 From: pypy.commits at gmail.com (fijal) Date: Tue, 01 Mar 2016 01:39:11 -0800 (PST) Subject: [pypy-commit] pypy jit-leaner-frontend: start writing a test Message-ID: <56d5633f.a2afc20a.98948.3261@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r82629:aaad0add05ac Date: 2016-03-01 10:38 +0100 http://bitbucket.org/pypy/pypy/changeset/aaad0add05ac/ Log: start writing a test diff --git a/rpython/jit/metainterp/test/test_opencoder.py b/rpython/jit/metainterp/test/test_opencoder.py --- a/rpython/jit/metainterp/test/test_opencoder.py +++ b/rpython/jit/metainterp/test/test_opencoder.py @@ -28,6 +28,22 @@ assert l[0].getarg(1) is i1 def test_rd_snapshot(self): + class JitCode(object): + def __init__(self, index): + self.index = index + + class FakeFrame(object): + parent_resumedata_frame_info_list = None + parent_resumedata_snapshot = None + + def __init__(self, pc, jitcode, boxes): + self.pc = pc + self.jitcode = jitcode + self.boxes = boxes + + def get_list_of_active_boxes(self, flag): + return self.boxes + i0, i1 = InputArgInt(), InputArgInt() t = Trace([i0, i1]) add = t.record_op(rop.INT_ADD, [i0, i1]) @@ -35,10 +51,9 @@ # now we write rd_snapshot and friends virtualizable_boxes = [] virutalref_boxes = [] - framestack = [] - framestack.xxx + framestack = [FakeFrame(1, JitCode(2), [i0, i1])] resume.capture_resumedata(framestack, virtualizable_boxes, virutalref_boxes, t) (i0, i1), l = self.unpack(t) assert l[1].opnum == rop.GUARD_FALSE - assert l[1].rd_snapshot == [i0, i1] \ No newline at end of file + assert l[1].rd_snapshot == [i0, i1] From pypy.commits at gmail.com Tue Mar 1 06:44:05 2016 From: pypy.commits at gmail.com (plan_rich) Date: Tue, 01 Mar 2016 03:44:05 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: undo some of the changes of memop-simplify3 and pass length to the length parameter instead of byte size Message-ID: <56d58085.86351c0a.4fbf3.0617@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r82630:2ea190038b88 Date: 2016-03-01 12:43 +0100 http://bitbucket.org/pypy/pypy/changeset/2ea190038b88/ Log: undo some of the changes of memop-simplify3 and pass length to the length parameter instead of byte size diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py +++ b/rpython/jit/backend/llsupport/rewrite.py @@ -204,15 +204,15 @@ NOT_SIGNED = 0 CINT_ZERO = ConstInt(0) opnum = op.getopnum() - if opnum == rop.CALL_MALLOC_NURSERY_VARSIZE: - v_length = op.getarg(2) - scale = op.getarg(1).getint() - if scale not in self.cpu.load_supported_factors: - scale, offset, v_length = \ - self._emit_mul_if_factor_offset_not_supported(v_length, scale, 0) - op.setarg(1, ConstInt(scale)) - op.setarg(2, v_length) - elif op.is_getarrayitem() or \ + #if opnum == rop.CALL_MALLOC_NURSERY_VARSIZE: + # v_length = op.getarg(2) + # scale = op.getarg(1).getint() + # if scale not in self.cpu.load_supported_factors: + # scale, offset, v_length = \ + # self._emit_mul_if_factor_offset_not_supported(v_length, scale, 0) + # op.setarg(1, ConstInt(scale)) + # op.setarg(2, v_length) + if op.is_getarrayitem() or \ opnum in (rop.GETARRAYITEM_RAW_I, rop.GETARRAYITEM_RAW_F): self.handle_getarrayitem(op) @@ -793,12 +793,12 @@ arraydescr.lendescr.offset != gc_descr.standard_array_length_ofs)): return False self.emitting_an_operation_that_can_collect() - scale = itemsize - if scale not in self.cpu.load_supported_factors: - scale, offset, v_length = \ - self._emit_mul_if_factor_offset_not_supported(v_length, scale, 0) + #scale = itemsize + #if scale not in self.cpu.load_supported_factors: + # scale, offset, v_length = \ + # self._emit_mul_if_factor_offset_not_supported(v_length, scale, 0) op = ResOperation(rop.CALL_MALLOC_NURSERY_VARSIZE, - [ConstInt(kind), ConstInt(scale), v_length], + [ConstInt(kind), ConstInt(itemsize), v_length], descr=arraydescr) self.replace_op_with(v_result, op) self.emit_op(op) diff --git a/rpython/jit/backend/zarch/assembler.py b/rpython/jit/backend/zarch/assembler.py --- a/rpython/jit/backend/zarch/assembler.py +++ b/rpython/jit/backend/zarch/assembler.py @@ -432,7 +432,7 @@ return mc.materialize(self.cpu, []) def _build_malloc_slowpath(self, kind): - """ While arriving on slowpath, we have a gcmap in SCRATCH. + """ While arriving on slowpath, we have a gcmap in r1. The arguments are passed in r.RES and r.RSZ, as follows: kind == 'fixed': nursery_head in r.RES and the size in r.RSZ - r.RES. @@ -440,7 +440,7 @@ kind == 'str/unicode': length of the string to allocate in r.RES. kind == 'var': itemsize in r.RES, length to allocate in r.RSZ, - and tid in r.SCRATCH2. + and tid in r.r0. This function must preserve all registers apart from r.RES and r.RSZ. On return, SCRATCH must contain the address of nursery_free. @@ -480,7 +480,7 @@ # arguments to the called function are [itemsize, tid, length] # itemsize is already in r2 mc.LGR(r.r4, r.RSZ) # length - mc.LGR(r.r3, r.SCRATCH2) # tid + mc.LGR(r.r3, r.r0) # tid # Do the call addr = rffi.cast(lltype.Signed, addr) @@ -1355,6 +1355,26 @@ mc.STG(r.RSZ, l.addr(0, r.r1)) # store into nursery_free + SIZE2SCALE = dict([(1<<_i, _i) for _i in range(32)]) + def _multiply_by_constant(self, loc, multiply_by, scratch_loc): + # XXX should die together with _apply_scale() but can't because + # of emit_zero_array() and malloc_cond_varsize() at the moment + assert loc.is_reg() + if multiply_by == 1: + return loc + try: + scale = self.SIZE2SCALE[multiply_by] + except KeyError: + if check_imm_value(multiply_by, lower_bound=-2**31, upper_bound=2**31-1): + self.mc.LGR(scratch_loc, loc) + self.mc.MSGFI(scratch_loc, l.imm(multiply_by)) + else: + self.mc.load_imm(scratch_loc, multiply_by) + self.mc.MSGR(scratch_loc, loc) + else: + self.mc.SLLG(scratch_loc, loc, l.addr(scale)) + return scratch_loc + def malloc_cond_varsize(self, kind, nursery_free_adr, nursery_top_adr, lengthloc, itemsize, maxlength, gcmap, arraydescr): @@ -1381,8 +1401,11 @@ assert check_imm_value(diff) mc.load_imm(r.r1, nursery_free_adr) - # no shifting needed, lengthloc is already multiplied by the - # item size + varsizeloc = self._multiply_by_constant(lengthloc, itemsize, r.RSZ) + + # varsizeloc is either RSZ here, or equal to lengthloc if + # itemsize == 1. It is the size of the variable part of the + # array, in bytes. mc.load(r.RES, r.r1, 0) # load nursery_free mc.load(r.SCRATCH2, r.r1, diff) # load nursery_top @@ -1392,7 +1415,7 @@ force_realignment = (itemsize % WORD) != 0 if force_realignment: constsize += WORD - 1 - mc.AGHIK(r.RSZ, lengthloc, l.imm(constsize)) + mc.AGHIK(r.RSZ, varsizeloc, l.imm(constsize)) if force_realignment: # "& ~(WORD-1)" mc.RISBG(r.RSZ, r.RSZ, l.imm(0), l.imm(0x80 | 60), l.imm(0)) diff --git a/rpython/jit/backend/zarch/opassembler.py b/rpython/jit/backend/zarch/opassembler.py --- a/rpython/jit/backend/zarch/opassembler.py +++ b/rpython/jit/backend/zarch/opassembler.py @@ -454,8 +454,7 @@ [lengthloc] = arglocs arraydescr = op.getdescr() itemsize = op.getarg(1).getint() - assert itemsize == 1 - maxlength = (gc_ll_descr.max_size_of_young_obj - WORD * 2) + maxlength = (gc_ll_descr.max_size_of_young_obj - WORD * 2) // itemsize gcmap = regalloc.get_gcmap([r.RES, r.RSZ]) self.malloc_cond_varsize( op.getarg(0).getint(), From pypy.commits at gmail.com Tue Mar 1 07:33:48 2016 From: pypy.commits at gmail.com (cfbolz) Date: Tue, 01 Mar 2016 04:33:48 -0800 (PST) Subject: [pypy-commit] pypy default: fix typo Message-ID: <56d58c2c.0775c20a.81e6.68e8@mx.google.com> Author: Carl Friedrich Bolz Branch: Changeset: r82631:8855d18e3957 Date: 2016-02-27 11:37 +0100 http://bitbucket.org/pypy/pypy/changeset/8855d18e3957/ Log: fix typo diff --git a/pypy/objspace/std/setobject.py b/pypy/objspace/std/setobject.py --- a/pypy/objspace/std/setobject.py +++ b/pypy/objspace/std/setobject.py @@ -1076,7 +1076,7 @@ if self is w_other.strategy: strategy = self if w_set.length() > w_other.length(): - # swap operants + # swap operands storage = self._intersect_unwrapped(w_other, w_set) else: storage = self._intersect_unwrapped(w_set, w_other) @@ -1086,7 +1086,7 @@ else: strategy = self.space.fromcache(ObjectSetStrategy) if w_set.length() > w_other.length(): - # swap operants + # swap operands storage = w_other.strategy._intersect_wrapped(w_other, w_set) else: storage = self._intersect_wrapped(w_set, w_other) From pypy.commits at gmail.com Tue Mar 1 07:33:51 2016 From: pypy.commits at gmail.com (cfbolz) Date: Tue, 01 Mar 2016 04:33:51 -0800 (PST) Subject: [pypy-commit] pypy default: remove a guard from str.count Message-ID: <56d58c2f.6718c20a.34fb4.6a26@mx.google.com> Author: Carl Friedrich Bolz Branch: Changeset: r82632:8d422c65fe9a Date: 2016-02-27 16:52 +0100 http://bitbucket.org/pypy/pypy/changeset/8d422c65fe9a/ Log: remove a guard from str.count diff --git a/rpython/rtyper/lltypesystem/rstr.py b/rpython/rtyper/lltypesystem/rstr.py --- a/rpython/rtyper/lltypesystem/rstr.py +++ b/rpython/rtyper/lltypesystem/rstr.py @@ -717,10 +717,7 @@ return cls.ll_count_char(s1, s2.chars[0], start, end) res = cls.ll_search(s1, s2, start, end, FAST_COUNT) - # For a few cases ll_search can return -1 to indicate an "impossible" - # condition for a string match, count just returns 0 in these cases. - if res < 0: - res = 0 + assert res >= 0 return res @staticmethod @@ -741,6 +738,8 @@ w = n - m if w < 0: + if mode == FAST_COUNT: + return 0 return -1 mlast = m - 1 From pypy.commits at gmail.com Tue Mar 1 07:33:52 2016 From: pypy.commits at gmail.com (cfbolz) Date: Tue, 01 Mar 2016 04:33:52 -0800 (PST) Subject: [pypy-commit] pypy default: merge Message-ID: <56d58c30.e6bbc20a.c6f05.7a6f@mx.google.com> Author: Carl Friedrich Bolz Branch: Changeset: r82633:cb435db64836 Date: 2016-03-01 13:33 +0100 http://bitbucket.org/pypy/pypy/changeset/cb435db64836/ Log: merge diff --git a/pypy/objspace/std/setobject.py b/pypy/objspace/std/setobject.py --- a/pypy/objspace/std/setobject.py +++ b/pypy/objspace/std/setobject.py @@ -1076,7 +1076,7 @@ if self is w_other.strategy: strategy = self if w_set.length() > w_other.length(): - # swap operants + # swap operands storage = self._intersect_unwrapped(w_other, w_set) else: storage = self._intersect_unwrapped(w_set, w_other) @@ -1086,7 +1086,7 @@ else: strategy = self.space.fromcache(ObjectSetStrategy) if w_set.length() > w_other.length(): - # swap operants + # swap operands storage = w_other.strategy._intersect_wrapped(w_other, w_set) else: storage = self._intersect_wrapped(w_set, w_other) diff --git a/rpython/rtyper/lltypesystem/rstr.py b/rpython/rtyper/lltypesystem/rstr.py --- a/rpython/rtyper/lltypesystem/rstr.py +++ b/rpython/rtyper/lltypesystem/rstr.py @@ -717,10 +717,7 @@ return cls.ll_count_char(s1, s2.chars[0], start, end) res = cls.ll_search(s1, s2, start, end, FAST_COUNT) - # For a few cases ll_search can return -1 to indicate an "impossible" - # condition for a string match, count just returns 0 in these cases. - if res < 0: - res = 0 + assert res >= 0 return res @staticmethod @@ -741,6 +738,8 @@ w = n - m if w < 0: + if mode == FAST_COUNT: + return 0 return -1 mlast = m - 1 From pypy.commits at gmail.com Tue Mar 1 08:05:52 2016 From: pypy.commits at gmail.com (fijal) Date: Tue, 01 Mar 2016 05:05:52 -0800 (PST) Subject: [pypy-commit] pypy jit-leaner-frontend: merge default Message-ID: <56d593b0.e853c20a.e2a2a.ffff858e@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r82635:3d2c4e4fc169 Date: 2016-03-01 14:04 +0100 http://bitbucket.org/pypy/pypy/changeset/3d2c4e4fc169/ Log: merge default diff too long, truncating to 2000 out of 12385 lines diff --git a/.hgignore b/.hgignore --- a/.hgignore +++ b/.hgignore @@ -22,6 +22,7 @@ ^pypy/module/cpyext/test/.+\.obj$ ^pypy/module/cpyext/test/.+\.manifest$ ^pypy/module/test_lib_pypy/ctypes_tests/.+\.o$ +^pypy/module/test_lib_pypy/ctypes_tests/_ctypes_test\.o$ ^pypy/module/cppyy/src/.+\.o$ ^pypy/module/cppyy/bench/.+\.so$ ^pypy/module/cppyy/bench/.+\.root$ @@ -35,7 +36,6 @@ ^pypy/module/test_lib_pypy/cffi_tests/__pycache__.+$ ^pypy/doc/.+\.html$ ^pypy/doc/config/.+\.rst$ -^pypy/doc/basicblock\.asc$ ^pypy/doc/.+\.svninfo$ ^rpython/translator/c/src/libffi_msvc/.+\.obj$ ^rpython/translator/c/src/libffi_msvc/.+\.dll$ @@ -45,53 +45,33 @@ ^rpython/translator/c/src/cjkcodecs/.+\.obj$ ^rpython/translator/c/src/stacklet/.+\.o$ ^rpython/translator/c/src/.+\.o$ -^rpython/translator/jvm/\.project$ -^rpython/translator/jvm/\.classpath$ -^rpython/translator/jvm/eclipse-bin$ -^rpython/translator/jvm/src/pypy/.+\.class$ -^rpython/translator/benchmark/docutils$ -^rpython/translator/benchmark/templess$ -^rpython/translator/benchmark/gadfly$ -^rpython/translator/benchmark/mako$ -^rpython/translator/benchmark/bench-custom\.benchmark_result$ -^rpython/translator/benchmark/shootout_benchmarks$ +^rpython/translator/llvm/.+\.so$ ^rpython/translator/goal/target.+-c$ ^rpython/translator/goal/.+\.exe$ ^rpython/translator/goal/.+\.dll$ ^pypy/goal/pypy-translation-snapshot$ ^pypy/goal/pypy-c -^pypy/goal/pypy-jvm -^pypy/goal/pypy-jvm.jar ^pypy/goal/.+\.exe$ ^pypy/goal/.+\.dll$ ^pypy/goal/.+\.lib$ ^pypy/_cache$ -^pypy/doc/statistic/.+\.html$ -^pypy/doc/statistic/.+\.eps$ -^pypy/doc/statistic/.+\.pdf$ -^rpython/translator/cli/src/pypylib\.dll$ -^rpython/translator/cli/src/query\.exe$ -^rpython/translator/cli/src/main\.exe$ +^lib-python/2.7/lib2to3/.+\.pickle$ ^lib_pypy/__pycache__$ ^lib_pypy/ctypes_config_cache/_.+_cache\.py$ ^lib_pypy/ctypes_config_cache/_.+_.+_\.py$ ^lib_pypy/_libmpdec/.+.o$ -^rpython/translator/cli/query-descriptions$ ^pypy/doc/discussion/.+\.html$ ^include/.+\.h$ ^include/.+\.inl$ ^pypy/doc/_build/.*$ ^pypy/doc/config/.+\.html$ ^pypy/doc/config/style\.css$ -^pypy/doc/jit/.+\.html$ -^pypy/doc/jit/style\.css$ ^pypy/doc/image/lattice1\.png$ ^pypy/doc/image/lattice2\.png$ ^pypy/doc/image/lattice3\.png$ ^pypy/doc/image/stackless_informal\.png$ ^pypy/doc/image/parsing_example.+\.png$ ^rpython/doc/_build/.*$ -^pypy/module/test_lib_pypy/ctypes_tests/_ctypes_test\.o$ ^compiled ^.git/ ^release/ diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -41,29 +41,29 @@ Amaury Forgeot d'Arc Antonio Cuni Samuele Pedroni + Matti Picus Alex Gaynor Brian Kearns - Matti Picus Philip Jenvey Michael Hudson + Ronan Lamy David Schneider + Manuel Jacob Holger Krekel Christian Tismer Hakan Ardo - Manuel Jacob - Ronan Lamy Benjamin Peterson + Richard Plangger Anders Chrigstrom Eric van Riet Paap Wim Lavrijsen - Richard Plangger Richard Emslie Alexander Schremmer Dan Villiom Podlaski Christiansen + Remi Meier Lukas Diekmann Sven Hager Anders Lehmann - Remi Meier Aurelien Campeas Niklaus Haldimann Camillo Bruni @@ -72,8 +72,8 @@ Romain Guillebert Leonardo Santagada Seo Sanghyeon + Ronny Pfannschmidt Justin Peel - Ronny Pfannschmidt David Edelsohn Anders Hammarquist Jakub Gustak @@ -95,6 +95,7 @@ Tyler Wade Michael Foord Stephan Diehl + Vincent Legoll Stefan Schwarzer Valentino Volonghi Tomek Meka @@ -105,9 +106,9 @@ Jean-Paul Calderone Timo Paulssen Squeaky + Marius Gedminas Alexandre Fayolle Simon Burton - Marius Gedminas Martin Matusiak Konstantin Lopuhin Wenzhu Man @@ -116,16 +117,20 @@ Ivan Sichmann Freitas Greg Price Dario Bertini + Stefano Rivera Mark Pearse Simon Cross Andreas Stührk - Stefano Rivera + Edd Barrett Jean-Philippe St. Pierre Guido van Rossum Pavel Vinogradov + Jeremy Thurgood Paweł Piotr Przeradowski + Spenser Bauman Paul deGrandis Ilya Osadchiy + marky1991 Tobias Oberstein Adrian Kuhn Boris Feigin @@ -134,14 +139,12 @@ Georg Brandl Bert Freudenberg Stian Andreassen - Edd Barrett + Tobias Pape Wanja Saatkamp Gerald Klix Mike Blume - Tobias Pape Oscar Nierstrasz Stefan H. Muller - Jeremy Thurgood Rami Chowdhury Eugene Oden Henry Mason @@ -153,6 +156,8 @@ Lukas Renggli Guenter Jantzen Ned Batchelder + Tim Felgentreff + Anton Gulenko Amit Regmi Ben Young Nicolas Chauvat @@ -162,12 +167,12 @@ Nicholas Riley Jason Chu Igor Trindade Oliveira - Tim Felgentreff + Yichao Yu Rocco Moretti Gintautas Miliauskas Michael Twomey Lucian Branescu Mihaila - Yichao Yu + Devin Jeanpierre Gabriel Lavoie Olivier Dormond Jared Grubb @@ -191,33 +196,33 @@ Stanislaw Halik Mikael Schönenberg Berkin Ilbeyi - Elmo M?ntynen + Elmo Mäntynen + Faye Zhao Jonathan David Riehl Anders Qvist Corbin Simpson Chirag Jadwani Beatrice During Alex Perry - Vincent Legoll + Vaibhav Sood Alan McIntyre - Spenser Bauman + William Leslie Alexander Sedov Attila Gobi + Jasper.Schulz Christopher Pope - Devin Jeanpierre - Vaibhav Sood Christian Tismer Marc Abramowitz Dan Stromberg Arjun Naik Valentina Mukhamedzhanova Stefano Parmesan + Mark Young Alexis Daboville Jens-Uwe Mager Carl Meyer Karl Ramm Pieter Zieschang - Anton Gulenko Gabriel Lukas Vacek Andrew Dalke @@ -225,6 +230,7 @@ Jakub Stasiak Nathan Taylor Vladimir Kryachko + Omer Katz Jacek Generowicz Alejandro J. Cura Jacob Oscarson @@ -239,6 +245,7 @@ Lars Wassermann Philipp Rustemeuer Henrik Vendelbo + Richard Lancaster Dan Buch Miguel de Val Borro Artur Lisiecki @@ -250,18 +257,18 @@ Tomo Cocoa Kim Jin Su Toni Mattis + Amber Brown Lucas Stadler Julian Berman Markus Holtermann roberto at goyle Yury V. Zaytsev Anna Katrina Dominguez - William Leslie Bobby Impollonia - Faye Zhao timo at eistee.fritz.box Andrew Thompson Yusei Tahara + Aaron Tubbs Ben Darnell Roberto De Ioris Juan Francisco Cantero Hurtado @@ -273,6 +280,7 @@ Christopher Armstrong Michael Hudson-Doyle Anders Sigfridsson + Nikolay Zinov Yasir Suhail Jason Michalski rafalgalczynski at gmail.com @@ -282,6 +290,7 @@ Gustavo Niemeyer Stephan Busemann Rafał Gałczyński + Matt Bogosian Christian Muirhead Berker Peksag James Lan @@ -316,9 +325,9 @@ Stefan Marr jiaaro Mads Kiilerich - Richard Lancaster opassembler.py Antony Lee + Jason Madden Yaroslav Fedevych Jim Hunziker Markus Unterwaditzer @@ -327,6 +336,7 @@ squeaky Zearin soareschen + Jonas Pfannschmidt Kurt Griffiths Mike Bayer Matthew Miller diff --git a/lib-python/2.7/distutils/command/build_ext.py b/lib-python/2.7/distutils/command/build_ext.py --- a/lib-python/2.7/distutils/command/build_ext.py +++ b/lib-python/2.7/distutils/command/build_ext.py @@ -188,7 +188,7 @@ # the 'libs' directory is for binary installs - we assume that # must be the *native* platform. But we don't really support # cross-compiling via a binary install anyway, so we let it go. - self.library_dirs.append(os.path.join(sys.exec_prefix, 'include')) + self.library_dirs.append(os.path.join(sys.exec_prefix, 'libs')) if self.debug: self.build_temp = os.path.join(self.build_temp, "Debug") else: diff --git a/lib_pypy/_pypy_testcapi.py b/lib_pypy/_pypy_testcapi.py --- a/lib_pypy/_pypy_testcapi.py +++ b/lib_pypy/_pypy_testcapi.py @@ -7,6 +7,7 @@ content = fid.read() # from cffi's Verifier() key = '\x00'.join([sys.version[:3], content]) + key += 'cpyext-gc-support-2' # this branch requires recompilation! if sys.version_info >= (3,): key = key.encode('utf-8') k1 = hex(binascii.crc32(key[0::2]) & 0xffffffff) @@ -62,7 +63,7 @@ if sys.platform == 'win32': # XXX pyconfig.h uses a pragma to link to the import library, # which is currently python27.lib - library = os.path.join(thisdir, '..', 'include', 'python27') + library = os.path.join(thisdir, '..', 'libs', 'python27') if not os.path.exists(library + '.lib'): # For a local translation or nightly build library = os.path.join(thisdir, '..', 'pypy', 'goal', 'python27') diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -550,21 +550,24 @@ lst.append(value) # if '__pypy__' in sys.builtin_module_names: + import os if sys.platform == "win32": - # we need 'libpypy-c.lib'. Right now, distributions of - # pypy contain it as 'include/python27.lib'. You need - # to manually copy it back to 'libpypy-c.lib'. XXX Will - # be fixed in the next pypy release. - pythonlib = "libpypy-c" + # we need 'libpypy-c.lib'. Current distributions of + # pypy (>= 4.1) contain it as 'libs/python27.lib'. + pythonlib = "python27" if hasattr(sys, 'prefix'): - ensure('library_dirs', sys.prefix) + ensure('library_dirs', os.path.join(sys.prefix, 'libs')) else: # we need 'libpypy-c.{so,dylib}', which should be by - # default located in 'sys.prefix/bin' + # default located in 'sys.prefix/bin' for installed + # systems. pythonlib = "pypy-c" if hasattr(sys, 'prefix'): - import os ensure('library_dirs', os.path.join(sys.prefix, 'bin')) + # On uninstalled pypy's, the libpypy-c is typically found in + # .../pypy/goal/. + if hasattr(sys, 'prefix'): + ensure('library_dirs', os.path.join(sys.prefix, 'pypy', 'goal')) else: if sys.platform == "win32": template = "python%d%d" diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -36,9 +36,15 @@ "cStringIO", "thread", "itertools", "pyexpat", "_ssl", "cpyext", "array", "binascii", "_multiprocessing", '_warnings', "_collections", "_multibytecodec", "micronumpy", "_continuation", "_cffi_backend", - "_csv", "cppyy", "_pypyjson", "_vmprof", + "_csv", "cppyy", "_pypyjson", ]) +from rpython.jit.backend import detect_cpu +try: + if detect_cpu.autodetect().startswith('x86'): + working_modules.add('_vmprof') +except detect_cpu.ProcessorAutodetectError: + pass translation_modules = default_modules.copy() translation_modules.update([ @@ -163,12 +169,8 @@ cmdline="--translationmodules", suggests=[("objspace.allworkingmodules", False)]), - BoolOption("usepycfiles", "Write and read pyc files when importing", - default=True), - BoolOption("lonepycfiles", "Import pyc files with no matching py file", - default=False, - requires=[("objspace.usepycfiles", True)]), + default=False), StrOption("soabi", "Tag to differentiate extension modules built for different Python interpreters", diff --git a/pypy/doc/discussion/rawrefcount.rst b/pypy/doc/discussion/rawrefcount.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/discussion/rawrefcount.rst @@ -0,0 +1,158 @@ +====================== +Rawrefcount and the GC +====================== + + +GC Interface +------------ + +"PyObject" is a raw structure with at least two fields, ob_refcnt and +ob_pypy_link. The ob_refcnt is the reference counter as used on +CPython. If the PyObject structure is linked to a live PyPy object, +its current address is stored in ob_pypy_link and ob_refcnt is bumped +by either the constant REFCNT_FROM_PYPY, or the constant +REFCNT_FROM_PYPY_LIGHT (== REFCNT_FROM_PYPY + SOME_HUGE_VALUE) +(to mean "light finalizer"). + +Most PyPy objects exist outside cpyext, and conversely in cpyext it is +possible that a lot of PyObjects exist without being seen by the rest +of PyPy. At the interface, however, we can "link" a PyPy object and a +PyObject. There are two kinds of link: + +rawrefcount.create_link_pypy(p, ob) + + Makes a link between an exising object gcref 'p' and a newly + allocated PyObject structure 'ob'. ob->ob_refcnt must be + initialized to either REFCNT_FROM_PYPY, or + REFCNT_FROM_PYPY_LIGHT. (The second case is an optimization: + when the GC finds the PyPy object and PyObject no longer + referenced, it can just free() the PyObject.) + +rawrefcount.create_link_pyobj(p, ob) + + Makes a link from an existing PyObject structure 'ob' to a newly + allocated W_CPyExtPlaceHolderObject 'p'. You must also add + REFCNT_FROM_PYPY to ob->ob_refcnt. For cases where the PyObject + contains all the data, and the PyPy object is just a proxy. The + W_CPyExtPlaceHolderObject should have only a field that contains + the address of the PyObject, but that's outside the scope of the + GC. + +rawrefcount.from_obj(p) + + If there is a link from object 'p' made with create_link_pypy(), + returns the corresponding 'ob'. Otherwise, returns NULL. + +rawrefcount.to_obj(Class, ob) + + Returns ob->ob_pypy_link, cast to an instance of 'Class'. + + +Collection logic +---------------- + +Objects existing purely on the C side have ob->ob_pypy_link == 0; +these are purely reference counted. On the other hand, if +ob->ob_pypy_link != 0, then ob->ob_refcnt is at least REFCNT_FROM_PYPY +and the object is part of a "link". + +The idea is that links whose 'p' is not reachable from other PyPy +objects *and* whose 'ob->ob_refcnt' is REFCNT_FROM_PYPY or +REFCNT_FROM_PYPY_LIGHT are the ones who die. But it is more messy +because PyObjects still (usually) need to have a tp_dealloc called, +and this cannot occur immediately (and can do random things like +accessing other references this object points to, or resurrecting the +object). + +Let P = list of links created with rawrefcount.create_link_pypy() +and O = list of links created with rawrefcount.create_link_pyobj(). +The PyPy objects in the list O are all W_CPyExtPlaceHolderObject: all +the data is in the PyObjects, and all outsite references (if any) are +in C, as "PyObject *" fields. + +So, during the collection we do this about P links: + + for (p, ob) in P: + if ob->ob_refcnt != REFCNT_FROM_PYPY + and ob->ob_refcnt != REFCNT_FROM_PYPY_LIGHT: + mark 'p' as surviving, as well as all its dependencies + +At the end of the collection, the P and O links are both handled like +this: + + for (p, ob) in P + O: + if p is not surviving: # even if 'ob' might be surviving + unlink p and ob + if ob->ob_refcnt == REFCNT_FROM_PYPY_LIGHT: + free(ob) + elif ob->ob_refcnt > REFCNT_FROM_PYPY_LIGHT: + ob->ob_refcnt -= REFCNT_FROM_PYPY_LIGHT + else: + ob->ob_refcnt -= REFCNT_FROM_PYPY + if ob->ob_refcnt == 0: + invoke _Py_Dealloc(ob) later, outside the GC + + +GC Implementation +----------------- + +We need two copies of both the P list and O list, for young or old +objects. All four lists can be regular AddressLists of 'ob' objects. + +We also need an AddressDict mapping 'p' to 'ob' for all links in the P +list, and update it when PyPy objects move. + + +Further notes +------------- + +XXX +XXX the rest is the ideal world, but as a first step, we'll look +XXX for the minimal tweaks needed to adapt the existing cpyext +XXX + +For objects that are opaque in CPython, like , we always create +a PyPy object, and then when needed we make an empty PyObject and +attach it with create_link_pypy()/REFCNT_FROM_PYPY_LIGHT. + +For and objects, the corresponding PyObjects contain a +"long" or "double" field too. We link them with create_link_pypy() +and we can use REFCNT_FROM_PYPY_LIGHT too: 'tp_dealloc' doesn't +need to be called, and instead just calling free() is fine. + +For objects, we need both a PyPy and a PyObject side. These +are made with create_link_pypy()/REFCNT_FROM_PYPY. + +For custom PyXxxObjects allocated from the C extension module, we +need create_link_pyobj(). + +For or objects coming from PyPy, we use +create_link_pypy()/REFCNT_FROM_PYPY_LIGHT with a PyObject +preallocated with the size of the string. We copy the string +lazily into that area if PyString_AS_STRING() is called. + +For , , or objects in the C extension +module, we first allocate it as only a PyObject, which supports +mutation of the data from C, like CPython. When it is exported to +PyPy we could make a W_CPyExtPlaceHolderObject with +create_link_pyobj(). + +For objects coming from PyPy, if they are not specialized, +then the PyPy side holds a regular reference to the items. Then we +can allocate a PyTupleObject and store in it borrowed PyObject +pointers to the items. Such a case is created with +create_link_pypy()/REFCNT_FROM_PYPY_LIGHT. If it is specialized, +then it doesn't work because the items are created just-in-time on the +PyPy side. In this case, the PyTupleObject needs to hold real +references to the PyObject items, and we use create_link_pypy()/ +REFCNT_FROM_PYPY. In all cases, we have a C array of PyObjects +that we can directly return from PySequence_Fast_ITEMS, PyTuple_ITEMS, +PyTuple_GetItem, and so on. + +For objects coming from PyPy, we can use a cpyext list +strategy. The list turns into a PyListObject, as if it had been +allocated from C in the first place. The special strategy can hold +(only) a direct reference to the PyListObject, and we can use either +create_link_pyobj() or create_link_pypy() (to be decided). +PySequence_Fast_ITEMS then works for lists too, and PyList_GetItem +can return a borrowed reference, and so on. diff --git a/pypy/doc/how-to-release.rst b/pypy/doc/how-to-release.rst --- a/pypy/doc/how-to-release.rst +++ b/pypy/doc/how-to-release.rst @@ -1,5 +1,20 @@ -Making a PyPy Release -===================== +The PyPy Release Process +======================== + +Release Policy +++++++++++++++ + +We try to create a stable release a few times a year. These are released on +a branch named like release-2.x or release-4.x, and each release is tagged, +for instance release-4.0.1. + +After release, inevitably there are bug fixes. It is the responsibility of +the commiter who fixes a bug to make sure this fix is on the release branch, +so that we can then create a tagged bug-fix release, which will hopefully +happen more often than stable releases. + +How to Create a PyPy Release +++++++++++++++++++++++++++++ Overview -------- diff --git a/pypy/doc/tool/makecontributor.py b/pypy/doc/tool/makecontributor.py --- a/pypy/doc/tool/makecontributor.py +++ b/pypy/doc/tool/makecontributor.py @@ -72,6 +72,7 @@ 'Anton Gulenko':['anton gulenko', 'anton_gulenko'], 'Richard Lancaster':['richardlancaster'], 'William Leslie':['William ML Leslie'], + 'Spenser Bauman':['Spenser Andrew Bauman'], } alias_map = {} diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -128,6 +128,7 @@ Fix SSL tests by importing cpython's patch + .. branch: remove-getfield-pure Remove pure variants of ``getfield_gc_*`` operations from the JIT. Relevant @@ -152,3 +153,34 @@ Seperate structmember.h from Python.h Also enhance creating api functions to specify which header file they appear in (previously only pypy_decl.h) + +.. branch: llimpl + +Refactor register_external(), remove running_on_llinterp mechanism and +apply sandbox transform on externals at the end of annotation. + +.. branch: cffi-embedding-win32 + +.. branch: windows-vmprof-support + +vmprof should work on Windows. + + +.. branch: reorder-map-attributes + +When creating instances and adding attributes in several different orders +depending on some condition, the JIT would create too much code. This is now +fixed. + +.. branch: cpyext-gc-support-2 + +Improve CPython C API support, which means lxml now runs unmodified +(after removing pypy hacks, pending pull request) + +.. branch: look-inside-tuple-hash + +Look inside tuple hash, improving mdp benchmark + +.. branch: vlen-resume + +Compress resume data, saving 10-20% of memory consumed by the JIT \ No newline at end of file diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -239,6 +239,9 @@ raise Exception("Cannot use the --output option with PyPy " "when --shared is on (it is by default). " "See issue #1971.") + if sys.platform == 'win32': + config.translation.libname = '..\\..\\libs\\python27.lib' + thisdir.join('..', '..', 'libs').ensure(dir=1) if config.translation.thread: config.objspace.usemodules.thread = True @@ -274,7 +277,6 @@ if config.translation.sandbox: config.objspace.lonepycfiles = False - config.objspace.usepycfiles = False config.translating = True diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -27,7 +27,7 @@ class W_Root(object): """This is the abstract root class of all wrapped objects that live in a 'normal' object space like StdObjSpace.""" - __slots__ = () + __slots__ = ('__weakref__',) user_overridden_class = False def getdict(self, space): diff --git a/pypy/interpreter/pyparser/pytokenizer.py b/pypy/interpreter/pyparser/pytokenizer.py --- a/pypy/interpreter/pyparser/pytokenizer.py +++ b/pypy/interpreter/pyparser/pytokenizer.py @@ -91,6 +91,7 @@ strstart = (0, 0, "") for line in lines: lnum = lnum + 1 + line = universal_newline(line) pos, max = 0, len(line) if contstr: @@ -259,3 +260,14 @@ token_list.append((tokens.ENDMARKER, '', lnum, pos, line)) return token_list + + +def universal_newline(line): + # show annotator that indexes below are non-negative + line_len_m2 = len(line) - 2 + if line_len_m2 >= 0 and line[-2] == '\r' and line[-1] == '\n': + return line[:line_len_m2] + '\n' + line_len_m1 = len(line) - 1 + if line_len_m1 >= 0 and line[-1] == '\r': + return line[:line_len_m1] + '\n' + return line diff --git a/pypy/interpreter/pyparser/test/test_pyparse.py b/pypy/interpreter/pyparser/test/test_pyparse.py --- a/pypy/interpreter/pyparser/test/test_pyparse.py +++ b/pypy/interpreter/pyparser/test/test_pyparse.py @@ -158,3 +158,10 @@ def test_print_function(self): self.parse("from __future__ import print_function\nx = print\n") + + def test_universal_newlines(self): + fmt = 'stuff = """hello%sworld"""' + expected_tree = self.parse(fmt % '\n') + for linefeed in ["\r\n","\r"]: + tree = self.parse(fmt % linefeed) + assert expected_tree == tree diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -156,20 +156,6 @@ get_unique_interplevel_subclass._annspecialcase_ = "specialize:memo" _subclass_cache = {} -def enum_interplevel_subclasses(config, cls): - """Return a list of all the extra interp-level subclasses of 'cls' that - can be built by get_unique_interplevel_subclass().""" - result = [] - for flag1 in (False, True): - for flag2 in (False, True): - for flag3 in (False, True): - for flag4 in (False, True): - result.append(get_unique_interplevel_subclass( - config, cls, flag1, flag2, flag3, flag4)) - result = dict.fromkeys(result) - assert len(result) <= 6 - return result.keys() - def _getusercls(config, cls, wants_dict, wants_slots, wants_del, weakrefable): typedef = cls.typedef if wants_dict and typedef.hasdict: @@ -262,7 +248,7 @@ def user_setup(self, space, w_subtype): self.space = space self.w__class__ = w_subtype - self.user_setup_slots(w_subtype.nslots) + self.user_setup_slots(w_subtype.layout.nslots) def user_setup_slots(self, nslots): assert nslots == 0 diff --git a/pypy/module/_cffi_backend/embedding.py b/pypy/module/_cffi_backend/embedding.py --- a/pypy/module/_cffi_backend/embedding.py +++ b/pypy/module/_cffi_backend/embedding.py @@ -57,7 +57,7 @@ # pypy_init_embedded_cffi_module(). if not glob.patched_sys: space.appexec([], """(): - import os + import os, sys sys.stdin = sys.__stdin__ = os.fdopen(0, 'rb', 0) sys.stdout = sys.__stdout__ = os.fdopen(1, 'wb', 0) sys.stderr = sys.__stderr__ = os.fdopen(2, 'wb', 0) diff --git a/pypy/module/_vmprof/test/test__vmprof.py b/pypy/module/_vmprof/test/test__vmprof.py --- a/pypy/module/_vmprof/test/test__vmprof.py +++ b/pypy/module/_vmprof/test/test__vmprof.py @@ -5,14 +5,15 @@ class AppTestVMProf(object): def setup_class(cls): cls.space = gettestobjspace(usemodules=['_vmprof', 'struct']) - cls.tmpfile = udir.join('test__vmprof.1').open('wb') - cls.w_tmpfileno = cls.space.wrap(cls.tmpfile.fileno()) - cls.w_tmpfilename = cls.space.wrap(cls.tmpfile.name) - cls.tmpfile2 = udir.join('test__vmprof.2').open('wb') - cls.w_tmpfileno2 = cls.space.wrap(cls.tmpfile2.fileno()) - cls.w_tmpfilename2 = cls.space.wrap(cls.tmpfile2.name) + cls.w_tmpfilename = cls.space.wrap(str(udir.join('test__vmprof.1'))) + cls.w_tmpfilename2 = cls.space.wrap(str(udir.join('test__vmprof.2'))) def test_import_vmprof(self): + tmpfile = open(self.tmpfilename, 'wb') + tmpfileno = tmpfile.fileno() + tmpfile2 = open(self.tmpfilename2, 'wb') + tmpfileno2 = tmpfile2.fileno() + import struct, sys WORD = struct.calcsize('l') @@ -45,7 +46,7 @@ return count import _vmprof - _vmprof.enable(self.tmpfileno, 0.01) + _vmprof.enable(tmpfileno, 0.01) _vmprof.disable() s = open(self.tmpfilename, 'rb').read() no_of_codes = count(s) @@ -56,7 +57,7 @@ pass """ in d - _vmprof.enable(self.tmpfileno2, 0.01) + _vmprof.enable(tmpfileno2, 0.01) exec """def foo2(): pass diff --git a/pypy/module/cpyext/__init__.py b/pypy/module/cpyext/__init__.py --- a/pypy/module/cpyext/__init__.py +++ b/pypy/module/cpyext/__init__.py @@ -34,7 +34,7 @@ import pypy.module.cpyext.pyerrors import pypy.module.cpyext.typeobject import pypy.module.cpyext.object -import pypy.module.cpyext.stringobject +import pypy.module.cpyext.bytesobject import pypy.module.cpyext.tupleobject import pypy.module.cpyext.setobject import pypy.module.cpyext.dictobject @@ -60,7 +60,6 @@ import pypy.module.cpyext.funcobject import pypy.module.cpyext.frameobject import pypy.module.cpyext.classobject -import pypy.module.cpyext.pypyintf import pypy.module.cpyext.memoryobject import pypy.module.cpyext.codecs import pypy.module.cpyext.pyfile diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -9,7 +9,7 @@ from rpython.rtyper.tool import rffi_platform from rpython.rtyper.lltypesystem import ll2ctypes from rpython.rtyper.annlowlevel import llhelper -from rpython.rlib.objectmodel import we_are_translated +from rpython.rlib.objectmodel import we_are_translated, keepalive_until_here from rpython.translator import cdir from rpython.translator.tool.cbuild import ExternalCompilationInfo from rpython.translator.gensupp import NameManager @@ -30,13 +30,13 @@ from rpython.rlib.rposix import is_valid_fd, validate_fd from rpython.rlib.unroll import unrolling_iterable from rpython.rlib.objectmodel import specialize -from rpython.rlib.exports import export_struct from pypy.module import exceptions from pypy.module.exceptions import interp_exceptions # CPython 2.4 compatibility from py.builtin import BaseException from rpython.tool.sourcetools import func_with_new_name from rpython.rtyper.lltypesystem.lloperation import llop +from rpython.rlib import rawrefcount DEBUG_WRAPPER = True @@ -194,7 +194,7 @@ class ApiFunction: def __init__(self, argtypes, restype, callable, error=_NOT_SPECIFIED, - c_name=None, gil=None): + c_name=None, gil=None, result_borrowed=False): self.argtypes = argtypes self.restype = restype self.functype = lltype.Ptr(lltype.FuncType(argtypes, restype)) @@ -211,17 +211,15 @@ self.argnames = argnames[1:] assert len(self.argnames) == len(self.argtypes) self.gil = gil + self.result_borrowed = result_borrowed + # + def get_llhelper(space): + return llhelper(self.functype, self.get_wrapper(space)) + self.get_llhelper = get_llhelper def _freeze_(self): return True - def get_llhelper(self, space): - llh = getattr(self, '_llhelper', None) - if llh is None: - llh = llhelper(self.functype, self.get_wrapper(space)) - self._llhelper = llh - return llh - @specialize.memo() def get_wrapper(self, space): wrapper = getattr(self, '_wrapper', None) @@ -234,7 +232,7 @@ return wrapper def cpython_api(argtypes, restype, error=_NOT_SPECIFIED, header='pypy_decl.h', - gil=None): + gil=None, result_borrowed=False): """ Declares a function to be exported. - `argtypes`, `restype` are lltypes and describe the function signature. @@ -263,13 +261,15 @@ rffi.cast(restype, 0) == 0) def decorate(func): + func._always_inline_ = 'try' func_name = func.func_name if header is not None: c_name = None else: c_name = func_name api_function = ApiFunction(argtypes, restype, func, error, - c_name=c_name, gil=gil) + c_name=c_name, gil=gil, + result_borrowed=result_borrowed) func.api_func = api_function if header is not None: @@ -280,6 +280,10 @@ raise ValueError("function %s has no return value for exceptions" % func) def make_unwrapper(catch_exception): + # ZZZ is this whole logic really needed??? It seems to be only + # for RPython code calling PyXxx() functions directly. I would + # think that usually directly calling the function is clean + # enough now names = api_function.argnames types_names_enum_ui = unrolling_iterable(enumerate( zip(api_function.argtypes, @@ -287,56 +291,58 @@ @specialize.ll() def unwrapper(space, *args): - from pypy.module.cpyext.pyobject import Py_DecRef - from pypy.module.cpyext.pyobject import make_ref, from_ref - from pypy.module.cpyext.pyobject import Reference + from pypy.module.cpyext.pyobject import Py_DecRef, is_pyobj + from pypy.module.cpyext.pyobject import from_ref, as_pyobj newargs = () - to_decref = [] + keepalives = () assert len(args) == len(api_function.argtypes) for i, (ARG, is_wrapped) in types_names_enum_ui: input_arg = args[i] if is_PyObject(ARG) and not is_wrapped: - # build a reference - if input_arg is None: - arg = lltype.nullptr(PyObject.TO) - elif isinstance(input_arg, W_Root): - ref = make_ref(space, input_arg) - to_decref.append(ref) - arg = rffi.cast(ARG, ref) + # build a 'PyObject *' (not holding a reference) + if not is_pyobj(input_arg): + keepalives += (input_arg,) + arg = rffi.cast(ARG, as_pyobj(space, input_arg)) + else: + arg = rffi.cast(ARG, input_arg) + elif is_PyObject(ARG) and is_wrapped: + # build a W_Root, possibly from a 'PyObject *' + if is_pyobj(input_arg): + arg = from_ref(space, input_arg) else: arg = input_arg - elif is_PyObject(ARG) and is_wrapped: - # convert to a wrapped object - if input_arg is None: - arg = input_arg - elif isinstance(input_arg, W_Root): - arg = input_arg - else: - try: - arg = from_ref(space, - rffi.cast(PyObject, input_arg)) - except TypeError, e: - err = OperationError(space.w_TypeError, - space.wrap( - "could not cast arg to PyObject")) - if not catch_exception: - raise err - state = space.fromcache(State) - state.set_exception(err) - if is_PyObject(restype): - return None - else: - return api_function.error_value + + ## ZZZ: for is_pyobj: + ## try: + ## arg = from_ref(space, + ## rffi.cast(PyObject, input_arg)) + ## except TypeError, e: + ## err = OperationError(space.w_TypeError, + ## space.wrap( + ## "could not cast arg to PyObject")) + ## if not catch_exception: + ## raise err + ## state = space.fromcache(State) + ## state.set_exception(err) + ## if is_PyObject(restype): + ## return None + ## else: + ## return api_function.error_value else: - # convert to a wrapped object + # arg is not declared as PyObject, no magic arg = input_arg newargs += (arg, ) - try: + if not catch_exception: + try: + res = func(space, *newargs) + finally: + keepalive_until_here(*keepalives) + else: + # non-rpython variant + assert not we_are_translated() try: res = func(space, *newargs) except OperationError, e: - if not catch_exception: - raise if not hasattr(api_function, "error_value"): raise state = space.fromcache(State) @@ -345,21 +351,13 @@ return None else: return api_function.error_value - if not we_are_translated(): - got_integer = isinstance(res, (int, long, float)) - assert got_integer == expect_integer,'got %r not integer' % res - if res is None: - return None - elif isinstance(res, Reference): - return res.get_wrapped(space) - else: - return res - finally: - for arg in to_decref: - Py_DecRef(space, arg) + # 'keepalives' is alive here (it's not rpython) + got_integer = isinstance(res, (int, long, float)) + assert got_integer == expect_integer, ( + 'got %r not integer' % (res,)) + return res unwrapper.func = func unwrapper.api_func = api_function - unwrapper._always_inline_ = 'try' return unwrapper unwrapper_catch = make_unwrapper(True) @@ -501,7 +499,7 @@ GLOBALS['%s#' % (cpyname, )] = ('PyTypeObject*', pypyexpr) for cpyname in '''PyMethodObject PyListObject PyLongObject - PyDictObject PyTupleObject PyClassObject'''.split(): + PyDictObject PyClassObject'''.split(): FORWARD_DECLS.append('typedef struct { PyObject_HEAD } %s' % (cpyname, )) build_exported_objects() @@ -514,14 +512,16 @@ "PyIntObject*": PyIntObject, "PyDateTime_CAPI*": lltype.Ptr(PyDateTime_CAPI)}[ctype] +# Note: as a special case, "PyObject" is the pointer type in RPython, +# corresponding to "PyObject *" in C. We do that only for PyObject. +# For example, "PyTypeObject" is the struct type even in RPython. PyTypeObject = lltype.ForwardReference() PyTypeObjectPtr = lltype.Ptr(PyTypeObject) -# It is important that these PyObjects are allocated in a raw fashion -# Thus we cannot save a forward pointer to the wrapped object -# So we need a forward and backward mapping in our State instance PyObjectStruct = lltype.ForwardReference() PyObject = lltype.Ptr(PyObjectStruct) -PyObjectFields = (("ob_refcnt", lltype.Signed), ("ob_type", PyTypeObjectPtr)) +PyObjectFields = (("ob_refcnt", lltype.Signed), + ("ob_pypy_link", lltype.Signed), + ("ob_type", PyTypeObjectPtr)) PyVarObjectFields = PyObjectFields + (("ob_size", Py_ssize_t), ) cpython_struct('PyObject', PyObjectFields, PyObjectStruct) PyVarObjectStruct = cpython_struct("PyVarObject", PyVarObjectFields) @@ -618,8 +618,8 @@ @specialize.ll() def wrapper(*args): - from pypy.module.cpyext.pyobject import make_ref, from_ref - from pypy.module.cpyext.pyobject import Reference + from pypy.module.cpyext.pyobject import make_ref, from_ref, is_pyobj + from pypy.module.cpyext.pyobject import as_pyobj # we hope that malloc removal removes the newtuple() that is # inserted exactly here by the varargs specializer if gil_acquire: @@ -628,6 +628,7 @@ llop.gc_stack_bottom(lltype.Void) # marker for trackgcroot.py retval = fatal_value boxed_args = () + tb = None try: if not we_are_translated() and DEBUG_WRAPPER: print >>sys.stderr, callable, @@ -635,10 +636,8 @@ for i, (typ, is_wrapped) in argtypes_enum_ui: arg = args[i] if is_PyObject(typ) and is_wrapped: - if arg: - arg_conv = from_ref(space, rffi.cast(PyObject, arg)) - else: - arg_conv = None + assert is_pyobj(arg) + arg_conv = from_ref(space, rffi.cast(PyObject, arg)) else: arg_conv = arg boxed_args += (arg_conv, ) @@ -653,6 +652,7 @@ except BaseException, e: failed = True if not we_are_translated(): + tb = sys.exc_info()[2] message = repr(e) import traceback traceback.print_exc() @@ -671,29 +671,34 @@ retval = error_value elif is_PyObject(callable.api_func.restype): - if result is None: - retval = rffi.cast(callable.api_func.restype, - make_ref(space, None)) - elif isinstance(result, Reference): - retval = result.get_ref(space) - elif not rffi._isllptr(result): - retval = rffi.cast(callable.api_func.restype, - make_ref(space, result)) + if is_pyobj(result): + retval = result else: - retval = result + if result is not None: + if callable.api_func.result_borrowed: + retval = as_pyobj(space, result) + else: + retval = make_ref(space, result) + retval = rffi.cast(callable.api_func.restype, retval) + else: + retval = lltype.nullptr(PyObject.TO) elif callable.api_func.restype is not lltype.Void: retval = rffi.cast(callable.api_func.restype, result) except Exception, e: print 'Fatal error in cpyext, CPython compatibility layer, calling', callable.__name__ print 'Either report a bug or consider not using this particular extension' if not we_are_translated(): + if tb is None: + tb = sys.exc_info()[2] import traceback traceback.print_exc() - print str(e) + if sys.stdout == sys.__stdout__: + import pdb; pdb.post_mortem(tb) # we can't do much here, since we're in ctypes, swallow else: print str(e) pypy_debug_catch_fatal_exception() + assert False rffi.stackcounter.stacks_counter -= 1 if gil_release: rgil.release() @@ -827,6 +832,19 @@ outputfilename=str(udir / "module_cache" / "pypyapi")) modulename = py.path.local(eci.libraries[-1]) + def dealloc_trigger(): + from pypy.module.cpyext.pyobject import _Py_Dealloc + print 'dealloc_trigger...' + while True: + ob = rawrefcount.next_dead(PyObject) + if not ob: + break + print ob + _Py_Dealloc(space, ob) + print 'dealloc_trigger DONE' + return "RETRY" + rawrefcount.init(dealloc_trigger) + run_bootstrap_functions(space) # load the bridge, and init structure @@ -836,9 +854,9 @@ space.fromcache(State).install_dll(eci) # populate static data - builder = StaticObjectBuilder(space) + builder = space.fromcache(StaticObjectBuilder) for name, (typ, expr) in GLOBALS.iteritems(): - from pypy.module import cpyext + from pypy.module import cpyext # for the eval() below w_obj = eval(expr) if name.endswith('#'): name = name[:-1] @@ -894,27 +912,44 @@ class StaticObjectBuilder: def __init__(self, space): self.space = space - self.to_attach = [] + self.static_pyobjs = [] + self.static_objs_w = [] + self.cpyext_type_init = None + # + # add a "method" that is overridden in setup_library() + # ('self.static_pyobjs' is completely ignored in that case) + self.get_static_pyobjs = lambda: self.static_pyobjs def prepare(self, py_obj, w_obj): - from pypy.module.cpyext.pyobject import track_reference - py_obj.c_ob_refcnt = 1 - track_reference(self.space, py_obj, w_obj) - self.to_attach.append((py_obj, w_obj)) + "NOT_RPYTHON" + if py_obj: + py_obj.c_ob_refcnt = 1 # 1 for kept immortal + self.static_pyobjs.append(py_obj) + self.static_objs_w.append(w_obj) def attach_all(self): + # this is RPython, called once in pypy-c when it imports cpyext from pypy.module.cpyext.pyobject import get_typedescr, make_ref from pypy.module.cpyext.typeobject import finish_type_1, finish_type_2 + from pypy.module.cpyext.pyobject import track_reference + # space = self.space - space._cpyext_type_init = [] - for py_obj, w_obj in self.to_attach: + static_pyobjs = self.get_static_pyobjs() + static_objs_w = self.static_objs_w + for i in range(len(static_objs_w)): + track_reference(space, static_pyobjs[i], static_objs_w[i]) + # + self.cpyext_type_init = [] + for i in range(len(static_objs_w)): + py_obj = static_pyobjs[i] + w_obj = static_objs_w[i] w_type = space.type(w_obj) - typedescr = get_typedescr(w_type.instancetypedef) + typedescr = get_typedescr(w_type.layout.typedef) py_obj.c_ob_type = rffi.cast(PyTypeObjectPtr, make_ref(space, w_type)) typedescr.attach(space, py_obj, w_obj) - cpyext_type_init = space._cpyext_type_init - del space._cpyext_type_init + cpyext_type_init = self.cpyext_type_init + self.cpyext_type_init = None for pto, w_type in cpyext_type_init: finish_type_1(space, pto) finish_type_2(space, pto, w_type) @@ -1067,7 +1102,7 @@ if name.endswith('#'): structs.append('%s %s;' % (typ[:-1], name[:-1])) elif name.startswith('PyExc_'): - structs.append('extern PyTypeObject _%s;' % (name,)) + structs.append('PyTypeObject _%s;' % (name,)) structs.append('PyObject* %s = (PyObject*)&_%s;' % (name, name)) elif typ == 'PyDateTime_CAPI*': structs.append('%s %s = NULL;' % (typ, name)) @@ -1107,7 +1142,7 @@ if not use_micronumpy: return use_micronumpy # import to register api functions by side-effect - import pypy.module.cpyext.ndarrayobject + import pypy.module.cpyext.ndarrayobject global GLOBALS, SYMBOLS_C, separate_module_files GLOBALS["PyArray_Type#"]= ('PyTypeObject*', "space.gettypeobject(W_NDimArray.typedef)") SYMBOLS_C += ['PyArray_Type', '_PyArray_FILLWBYTE', '_PyArray_ZEROS'] @@ -1116,10 +1151,8 @@ def setup_library(space): "NOT_RPYTHON" - from pypy.module.cpyext.pyobject import make_ref use_micronumpy = setup_micronumpy(space) - - export_symbols = list(FUNCTIONS) + SYMBOLS_C + list(GLOBALS) + export_symbols = sorted(FUNCTIONS) + sorted(SYMBOLS_C) + sorted(GLOBALS) from rpython.translator.c.database import LowLevelDatabase db = LowLevelDatabase() @@ -1135,41 +1168,37 @@ run_bootstrap_functions(space) setup_va_functions(eci) - from pypy.module import cpyext # for eval() below - - # Set up the types. Needs a special case, because of the - # immediate cycle involving 'c_ob_type', and because we don't - # want these types to be Py_TPFLAGS_HEAPTYPE. - static_types = {} - for name, (typ, expr) in GLOBALS.items(): - if typ == 'PyTypeObject*': - pto = lltype.malloc(PyTypeObject, immortal=True, - zero=True, flavor='raw') - pto.c_ob_refcnt = 1 - pto.c_tp_basicsize = -1 - static_types[name] = pto - builder = StaticObjectBuilder(space) - for name, pto in static_types.items(): - pto.c_ob_type = static_types['PyType_Type#'] - w_type = eval(GLOBALS[name][1]) - builder.prepare(rffi.cast(PyObject, pto), w_type) - builder.attach_all() - - # populate static data - for name, (typ, expr) in GLOBALS.iteritems(): - name = name.replace("#", "") - if name.startswith('PyExc_'): + # emit uninitialized static data + builder = space.fromcache(StaticObjectBuilder) + lines = ['PyObject *pypy_static_pyobjs[] = {\n'] + include_lines = ['RPY_EXTERN PyObject *pypy_static_pyobjs[];\n'] + for name, (typ, expr) in sorted(GLOBALS.items()): + if name.endswith('#'): + assert typ in ('PyObject*', 'PyTypeObject*', 'PyIntObject*') + typ, name = typ[:-1], name[:-1] + elif name.startswith('PyExc_'): + typ = 'PyTypeObject' name = '_' + name - w_obj = eval(expr) - if typ in ('PyObject*', 'PyTypeObject*', 'PyIntObject*'): - struct_ptr = make_ref(space, w_obj) elif typ == 'PyDateTime_CAPI*': continue else: assert False, "Unknown static data: %s %s" % (typ, name) - struct = rffi.cast(get_structtype_for_ctype(typ), struct_ptr)._obj - struct._compilation_info = eci - export_struct(name, struct) + + from pypy.module import cpyext # for the eval() below + w_obj = eval(expr) + builder.prepare(None, w_obj) + lines.append('\t(PyObject *)&%s,\n' % (name,)) + include_lines.append('RPY_EXPORTED %s %s;\n' % (typ, name)) + + lines.append('};\n') + eci2 = CConfig._compilation_info_.merge(ExternalCompilationInfo( + separate_module_sources = [''.join(lines)], + post_include_bits = [''.join(include_lines)], + )) + # override this method to return a pointer to this C array directly + builder.get_static_pyobjs = rffi.CExternVariable( + PyObjectP, 'pypy_static_pyobjs', eci2, c_type='PyObject **', + getter_only=True, declare_as_extern=False) for name, func in FUNCTIONS.iteritems(): newname = mangle_name('PyPy', name) or name @@ -1180,6 +1209,10 @@ trunk_include = pypydir.dirpath() / 'include' copy_header_files(trunk_include, use_micronumpy) +def init_static_data_translated(space): + builder = space.fromcache(StaticObjectBuilder) + builder.attach_all() + def _load_from_cffi(space, name, path, initptr): from pypy.module._cffi_backend import cffi1_module cffi1_module.load_cffi1_module(space, name, path, initptr) @@ -1262,22 +1295,18 @@ @specialize.ll() def generic_cpy_call(space, func, *args): FT = lltype.typeOf(func).TO - return make_generic_cpy_call(FT, True, False)(space, func, *args) - - at specialize.ll() -def generic_cpy_call_dont_decref(space, func, *args): - FT = lltype.typeOf(func).TO - return make_generic_cpy_call(FT, False, False)(space, func, *args) + return make_generic_cpy_call(FT, False)(space, func, *args) @specialize.ll() def generic_cpy_call_expect_null(space, func, *args): FT = lltype.typeOf(func).TO - return make_generic_cpy_call(FT, True, True)(space, func, *args) + return make_generic_cpy_call(FT, True)(space, func, *args) @specialize.memo() -def make_generic_cpy_call(FT, decref_args, expect_null): +def make_generic_cpy_call(FT, expect_null): from pypy.module.cpyext.pyobject import make_ref, from_ref, Py_DecRef - from pypy.module.cpyext.pyobject import RefcountState + from pypy.module.cpyext.pyobject import is_pyobj, as_pyobj + from pypy.module.cpyext.pyobject import get_w_obj_and_decref from pypy.module.cpyext.pyerrors import PyErr_Occurred unrolling_arg_types = unrolling_iterable(enumerate(FT.ARGS)) RESULT_TYPE = FT.RESULT @@ -1305,65 +1334,49 @@ @specialize.ll() def generic_cpy_call(space, func, *args): boxed_args = () - to_decref = [] + keepalives = () assert len(args) == len(FT.ARGS) for i, ARG in unrolling_arg_types: arg = args[i] if is_PyObject(ARG): - if arg is None: - boxed_args += (lltype.nullptr(PyObject.TO),) - elif isinstance(arg, W_Root): - ref = make_ref(space, arg) - boxed_args += (ref,) - if decref_args: - to_decref.append(ref) - else: - boxed_args += (arg,) - else: - boxed_args += (arg,) + if not is_pyobj(arg): + keepalives += (arg,) + arg = as_pyobj(space, arg) + boxed_args += (arg,) try: - # create a new container for borrowed references - state = space.fromcache(RefcountState) - old_container = state.swap_borrow_container(None) - try: - # Call the function - result = call_external_function(func, *boxed_args) - finally: - state.swap_borrow_container(old_container) + # Call the function + result = call_external_function(func, *boxed_args) + finally: + keepalive_until_here(*keepalives) - if is_PyObject(RESULT_TYPE): - if result is None: - ret = result - elif isinstance(result, W_Root): - ret = result + if is_PyObject(RESULT_TYPE): + if not is_pyobj(result): + ret = result + else: + # The object reference returned from a C function + # that is called from Python must be an owned reference + # - ownership is transferred from the function to its caller. + if result: + ret = get_w_obj_and_decref(space, result) else: - ret = from_ref(space, result) - # The object reference returned from a C function - # that is called from Python must be an owned reference - # - ownership is transferred from the function to its caller. - if result: - Py_DecRef(space, result) + ret = None - # Check for exception consistency - has_error = PyErr_Occurred(space) is not None - has_result = ret is not None - if has_error and has_result: - raise OperationError(space.w_SystemError, space.wrap( - "An exception was set, but function returned a value")) - elif not expect_null and not has_error and not has_result: - raise OperationError(space.w_SystemError, space.wrap( - "Function returned a NULL result without setting an exception")) + # Check for exception consistency + has_error = PyErr_Occurred(space) is not None + has_result = ret is not None + if has_error and has_result: + raise OperationError(space.w_SystemError, space.wrap( + "An exception was set, but function returned a value")) + elif not expect_null and not has_error and not has_result: + raise OperationError(space.w_SystemError, space.wrap( + "Function returned a NULL result without setting an exception")) - if has_error: - state = space.fromcache(State) - state.check_and_raise_exception() + if has_error: + state = space.fromcache(State) + state.check_and_raise_exception() - return ret - return result - finally: - if decref_args: - for ref in to_decref: - Py_DecRef(space, ref) + return ret + return result + return generic_cpy_call - diff --git a/pypy/module/cpyext/bufferobject.py b/pypy/module/cpyext/bufferobject.py --- a/pypy/module/cpyext/bufferobject.py +++ b/pypy/module/cpyext/bufferobject.py @@ -25,7 +25,7 @@ @bootstrap_function def init_bufferobject(space): "Type description of PyBufferObject" - make_typedescr(space.w_buffer.instancetypedef, + make_typedescr(space.w_buffer.layout.typedef, basestruct=PyBufferObject.TO, attach=buffer_attach, dealloc=buffer_dealloc, diff --git a/pypy/module/cpyext/bytesobject.py b/pypy/module/cpyext/bytesobject.py new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/bytesobject.py @@ -0,0 +1,319 @@ +from pypy.interpreter.error import OperationError +from rpython.rtyper.lltypesystem import rffi, lltype +from pypy.module.cpyext.api import ( + cpython_api, cpython_struct, bootstrap_function, build_type_checkers, + PyObjectFields, Py_ssize_t, CONST_STRING, CANNOT_FAIL) +from pypy.module.cpyext.pyerrors import PyErr_BadArgument +from pypy.module.cpyext.pyobject import ( + PyObject, PyObjectP, Py_DecRef, make_ref, from_ref, track_reference, + make_typedescr, get_typedescr) + +## +## Implementation of PyStringObject +## ================================ +## +## The problem +## ----------- +## +## PyString_AsString() must return a (non-movable) pointer to the underlying +## buffer, whereas pypy strings are movable. C code may temporarily store +## this address and use it, as long as it owns a reference to the PyObject. +## There is no "release" function to specify that the pointer is not needed +## any more. +## +## Also, the pointer may be used to fill the initial value of string. This is +## valid only when the string was just allocated, and is not used elsewhere. +## +## Solution +## -------- +## +## PyStringObject contains two additional members: the size and a pointer to a +## char buffer; it may be NULL. +## +## - A string allocated by pypy will be converted into a PyStringObject with a +## NULL buffer. The first time PyString_AsString() is called, memory is +## allocated (with flavor='raw') and content is copied. +## +## - A string allocated with PyString_FromStringAndSize(NULL, size) will +## allocate a PyStringObject structure, and a buffer with the specified +## size, but the reference won't be stored in the global map; there is no +## corresponding object in pypy. When from_ref() or Py_INCREF() is called, +## the pypy string is created, and added to the global map of tracked +## objects. The buffer is then supposed to be immutable. +## +## - _PyString_Resize() works only on not-yet-pypy'd strings, and returns a +## similar object. +## +## - PyString_Size() doesn't need to force the object. +## +## - There could be an (expensive!) check in from_ref() that the buffer still +## corresponds to the pypy gc-managed string. +## + +PyStringObjectStruct = lltype.ForwardReference() +PyStringObject = lltype.Ptr(PyStringObjectStruct) +PyStringObjectFields = PyObjectFields + \ + (("buffer", rffi.CCHARP), ("size", Py_ssize_t)) +cpython_struct("PyStringObject", PyStringObjectFields, PyStringObjectStruct) + + at bootstrap_function +def init_stringobject(space): + "Type description of PyStringObject" + make_typedescr(space.w_str.layout.typedef, + basestruct=PyStringObject.TO, + attach=string_attach, + dealloc=string_dealloc, + realize=string_realize) + +PyString_Check, PyString_CheckExact = build_type_checkers("String", "w_str") + +def new_empty_str(space, length): + """ + Allocate a PyStringObject and its buffer, but without a corresponding + interpreter object. The buffer may be mutated, until string_realize() is + called. Refcount of the result is 1. + """ + typedescr = get_typedescr(space.w_str.layout.typedef) + py_obj = typedescr.allocate(space, space.w_str) + py_str = rffi.cast(PyStringObject, py_obj) + + buflen = length + 1 + py_str.c_size = length + py_str.c_buffer = lltype.malloc(rffi.CCHARP.TO, buflen, + flavor='raw', zero=True) + return py_str + +def string_attach(space, py_obj, w_obj): + """ + Fills a newly allocated PyStringObject with the given string object. The + buffer must not be modified. + """ + py_str = rffi.cast(PyStringObject, py_obj) + py_str.c_size = len(space.str_w(w_obj)) + py_str.c_buffer = lltype.nullptr(rffi.CCHARP.TO) + +def string_realize(space, py_obj): + """ + Creates the string in the interpreter. The PyStringObject buffer must not + be modified after this call. + """ + py_str = rffi.cast(PyStringObject, py_obj) + s = rffi.charpsize2str(py_str.c_buffer, py_str.c_size) + w_obj = space.wrap(s) + track_reference(space, py_obj, w_obj) + return w_obj + + at cpython_api([PyObject], lltype.Void, header=None) +def string_dealloc(space, py_obj): + """Frees allocated PyStringObject resources. + """ + py_str = rffi.cast(PyStringObject, py_obj) + if py_str.c_buffer: + lltype.free(py_str.c_buffer, flavor="raw") + from pypy.module.cpyext.object import PyObject_dealloc + PyObject_dealloc(space, py_obj) + +#_______________________________________________________________________ + + at cpython_api([CONST_STRING, Py_ssize_t], PyObject) +def PyString_FromStringAndSize(space, char_p, length): + if char_p: + s = rffi.charpsize2str(char_p, length) + return make_ref(space, space.wrap(s)) + else: + return rffi.cast(PyObject, new_empty_str(space, length)) + + at cpython_api([CONST_STRING], PyObject) +def PyString_FromString(space, char_p): + s = rffi.charp2str(char_p) + return space.wrap(s) + + at cpython_api([PyObject], rffi.CCHARP, error=0) +def PyString_AsString(space, ref): + if from_ref(space, rffi.cast(PyObject, ref.c_ob_type)) is space.w_str: + pass # typecheck returned "ok" without forcing 'ref' at all + elif not PyString_Check(space, ref): # otherwise, use the alternate way + raise OperationError(space.w_TypeError, space.wrap( + "PyString_AsString only support strings")) + ref_str = rffi.cast(PyStringObject, ref) + if not ref_str.c_buffer: + # copy string buffer + w_str = from_ref(space, ref) + s = space.str_w(w_str) + ref_str.c_buffer = rffi.str2charp(s) + return ref_str.c_buffer + + at cpython_api([PyObject, rffi.CCHARPP, rffi.CArrayPtr(Py_ssize_t)], rffi.INT_real, error=-1) +def PyString_AsStringAndSize(space, ref, buffer, length): + if not PyString_Check(space, ref): + raise OperationError(space.w_TypeError, space.wrap( + "PyString_AsStringAndSize only support strings")) + ref_str = rffi.cast(PyStringObject, ref) + if not ref_str.c_buffer: + # copy string buffer + w_str = from_ref(space, ref) + s = space.str_w(w_str) + ref_str.c_buffer = rffi.str2charp(s) + buffer[0] = ref_str.c_buffer + if length: + length[0] = ref_str.c_size + else: + i = 0 + while ref_str.c_buffer[i] != '\0': + i += 1 + if i != ref_str.c_size: + raise OperationError(space.w_TypeError, space.wrap( + "expected string without null bytes")) + return 0 + + at cpython_api([PyObject], Py_ssize_t, error=-1) +def PyString_Size(space, ref): + if from_ref(space, rffi.cast(PyObject, ref.c_ob_type)) is space.w_str: + ref = rffi.cast(PyStringObject, ref) + return ref.c_size + else: + w_obj = from_ref(space, ref) + return space.len_w(w_obj) + + at cpython_api([PyObjectP, Py_ssize_t], rffi.INT_real, error=-1) +def _PyString_Resize(space, ref, newsize): + """A way to resize a string object even though it is "immutable". Only use this to + build up a brand new string object; don't use this if the string may already be + known in other parts of the code. It is an error to call this function if the + refcount on the input string object is not one. Pass the address of an existing + string object as an lvalue (it may be written into), and the new size desired. + On success, *string holds the resized string object and 0 is returned; + the address in *string may differ from its input value. If the reallocation + fails, the original string object at *string is deallocated, *string is + set to NULL, a memory exception is set, and -1 is returned. + """ + # XXX always create a new string so far + py_str = rffi.cast(PyStringObject, ref[0]) + if not py_str.c_buffer: + raise OperationError(space.w_SystemError, space.wrap( + "_PyString_Resize called on already created string")) + try: + py_newstr = new_empty_str(space, newsize) + except MemoryError: + Py_DecRef(space, ref[0]) + ref[0] = lltype.nullptr(PyObject.TO) + raise + to_cp = newsize + oldsize = py_str.c_size + if oldsize < newsize: + to_cp = oldsize + for i in range(to_cp): + py_newstr.c_buffer[i] = py_str.c_buffer[i] + Py_DecRef(space, ref[0]) + ref[0] = rffi.cast(PyObject, py_newstr) + return 0 + + at cpython_api([PyObject, PyObject], rffi.INT, error=CANNOT_FAIL) +def _PyString_Eq(space, w_str1, w_str2): + return space.eq_w(w_str1, w_str2) + + at cpython_api([PyObjectP, PyObject], lltype.Void) +def PyString_Concat(space, ref, w_newpart): + """Create a new string object in *string containing the contents of newpart + appended to string; the caller will own the new reference. The reference to + the old value of string will be stolen. If the new string cannot be created, + the old reference to string will still be discarded and the value of + *string will be set to NULL; the appropriate exception will be set.""" + + if not ref[0]: + return + + if w_newpart is None or not PyString_Check(space, ref[0]) or \ + not PyString_Check(space, w_newpart): + Py_DecRef(space, ref[0]) + ref[0] = lltype.nullptr(PyObject.TO) + return + w_str = from_ref(space, ref[0]) + w_newstr = space.add(w_str, w_newpart) + Py_DecRef(space, ref[0]) + ref[0] = make_ref(space, w_newstr) + + at cpython_api([PyObjectP, PyObject], lltype.Void) +def PyString_ConcatAndDel(space, ref, newpart): + """Create a new string object in *string containing the contents of newpart + appended to string. This version decrements the reference count of newpart.""" + PyString_Concat(space, ref, newpart) + Py_DecRef(space, newpart) + + at cpython_api([PyObject, PyObject], PyObject) +def PyString_Format(space, w_format, w_args): + """Return a new string object from format and args. Analogous to format % + args. The args argument must be a tuple.""" + return space.mod(w_format, w_args) + + at cpython_api([CONST_STRING], PyObject) +def PyString_InternFromString(space, string): + """A combination of PyString_FromString() and + PyString_InternInPlace(), returning either a new string object that has + been interned, or a new ("owned") reference to an earlier interned string + object with the same value.""" + s = rffi.charp2str(string) + return space.new_interned_str(s) + + at cpython_api([PyObjectP], lltype.Void) +def PyString_InternInPlace(space, string): + """Intern the argument *string in place. The argument must be the + address of a pointer variable pointing to a Python string object. + If there is an existing interned string that is the same as + *string, it sets *string to it (decrementing the reference count + of the old string object and incrementing the reference count of + the interned string object), otherwise it leaves *string alone and + interns it (incrementing its reference count). (Clarification: + even though there is a lot of talk about reference counts, think + of this function as reference-count-neutral; you own the object + after the call if and only if you owned it before the call.) + + This function is not available in 3.x and does not have a PyBytes + alias.""" + w_str = from_ref(space, string[0]) + w_str = space.new_interned_w_str(w_str) + Py_DecRef(space, string[0]) + string[0] = make_ref(space, w_str) + + at cpython_api([PyObject, CONST_STRING, CONST_STRING], PyObject) +def PyString_AsEncodedObject(space, w_str, encoding, errors): + """Encode a string object using the codec registered for encoding and return + the result as Python object. encoding and errors have the same meaning as + the parameters of the same name in the string encode() method. The codec to + be used is looked up using the Python codec registry. Return NULL if an + exception was raised by the codec. + + This function is not available in 3.x and does not have a PyBytes alias.""" + if not PyString_Check(space, w_str): + PyErr_BadArgument(space) + + w_encoding = w_errors = None + if encoding: + w_encoding = space.wrap(rffi.charp2str(encoding)) + if errors: + w_errors = space.wrap(rffi.charp2str(errors)) + return space.call_method(w_str, 'encode', w_encoding, w_errors) + + at cpython_api([PyObject, CONST_STRING, CONST_STRING], PyObject) +def PyString_AsDecodedObject(space, w_str, encoding, errors): + """Decode a string object by passing it to the codec registered + for encoding and return the result as Python object. encoding and + errors have the same meaning as the parameters of the same name in + the string encode() method. The codec to be used is looked up + using the Python codec registry. Return NULL if an exception was + raised by the codec. + + This function is not available in 3.x and does not have a PyBytes alias.""" + if not PyString_Check(space, w_str): + PyErr_BadArgument(space) + + w_encoding = w_errors = None + if encoding: + w_encoding = space.wrap(rffi.charp2str(encoding)) + if errors: + w_errors = space.wrap(rffi.charp2str(errors)) + return space.call_method(w_str, "decode", w_encoding, w_errors) + + at cpython_api([PyObject, PyObject], PyObject) +def _PyString_Join(space, w_sep, w_seq): + return space.call_method(w_sep, 'join', w_seq) diff --git a/pypy/module/cpyext/complexobject.py b/pypy/module/cpyext/complexobject.py --- a/pypy/module/cpyext/complexobject.py +++ b/pypy/module/cpyext/complexobject.py @@ -43,7 +43,7 @@ # lltype does not handle functions returning a structure. This implements a # helper function, which takes as argument a reference to the return value. - at cpython_api([PyObject, Py_complex_ptr], lltype.Void) + at cpython_api([PyObject, Py_complex_ptr], rffi.INT_real, error=-1) def _PyComplex_AsCComplex(space, w_obj, result): """Return the Py_complex value of the complex number op. @@ -60,7 +60,7 @@ # if the above did not work, interpret obj as a float giving the # real part of the result, and fill in the imaginary part as 0. result.c_real = PyFloat_AsDouble(space, w_obj) # -1 on failure - return + return 0 if not PyComplex_Check(space, w_obj): raise OperationError(space.w_TypeError, space.wrap( @@ -69,3 +69,4 @@ assert isinstance(w_obj, W_ComplexObject) result.c_real = w_obj.realval result.c_imag = w_obj.imagval + return 0 diff --git a/pypy/module/cpyext/dictobject.py b/pypy/module/cpyext/dictobject.py --- a/pypy/module/cpyext/dictobject.py +++ b/pypy/module/cpyext/dictobject.py @@ -2,8 +2,7 @@ from pypy.module.cpyext.api import ( cpython_api, CANNOT_FAIL, build_type_checkers, Py_ssize_t, Py_ssize_tP, CONST_STRING) -from pypy.module.cpyext.pyobject import PyObject, PyObjectP, borrow_from -from pypy.module.cpyext.pyobject import RefcountState +from pypy.module.cpyext.pyobject import PyObject, PyObjectP, as_pyobj from pypy.module.cpyext.pyerrors import PyErr_BadInternalCall from pypy.interpreter.error import OperationError from rpython.rlib.objectmodel import specialize @@ -14,13 +13,17 @@ PyDict_Check, PyDict_CheckExact = build_type_checkers("Dict") - at cpython_api([PyObject, PyObject], PyObject, error=CANNOT_FAIL) + at cpython_api([PyObject, PyObject], PyObject, error=CANNOT_FAIL, + result_borrowed=True) def PyDict_GetItem(space, w_dict, w_key): try: w_res = space.getitem(w_dict, w_key) except: return None - return borrow_from(w_dict, w_res) + # NOTE: this works so far because all our dict strategies store + # *values* as full objects, which stay alive as long as the dict is + # alive and not modified. So we can return a borrowed ref. + return w_res @cpython_api([PyObject, PyObject, PyObject], rffi.INT_real, error=-1) def PyDict_SetItem(space, w_dict, w_key, w_obj): @@ -47,7 +50,8 @@ else: PyErr_BadInternalCall(space) - at cpython_api([PyObject, CONST_STRING], PyObject, error=CANNOT_FAIL) + at cpython_api([PyObject, CONST_STRING], PyObject, error=CANNOT_FAIL, + result_borrowed=True) def PyDict_GetItemString(space, w_dict, key): """This is the same as PyDict_GetItem(), but key is specified as a char*, rather than a PyObject*.""" @@ -55,9 +59,10 @@ w_res = space.finditem_str(w_dict, rffi.charp2str(key)) except: w_res = None - if w_res is None: - return None - return borrow_from(w_dict, w_res) + # NOTE: this works so far because all our dict strategies store + # *values* as full objects, which stay alive as long as the dict is + # alive and not modified. So we can return a borrowed ref. + return w_res @cpython_api([PyObject, CONST_STRING], rffi.INT_real, error=-1) def PyDict_DelItemString(space, w_dict, key_ptr): @@ -170,10 +175,13 @@ if w_dict is None: return 0 - # Note: this is not efficient. Storing an iterator would probably + # XXX XXX PyDict_Next is not efficient. Storing an iterator would probably # work, but we can't work out how to not leak it if iteration does - # not complete. + # not complete. Alternatively, we could add some RPython-only + # dict-iterator method to move forward by N steps. + w_dict.ensure_object_strategy() # make sure both keys and values can + # be borrwed try: w_iter = space.call_method(space.w_dict, "iteritems", w_dict) pos = ppos[0] @@ -183,11 +191,10 @@ w_item = space.call_method(w_iter, "next") w_key, w_value = space.fixedview(w_item, 2) - state = space.fromcache(RefcountState) if pkey: - pkey[0] = state.make_borrowed(w_dict, w_key) + pkey[0] = as_pyobj(space, w_key) if pvalue: - pvalue[0] = state.make_borrowed(w_dict, w_value) + pvalue[0] = as_pyobj(space, w_value) ppos[0] += 1 except OperationError, e: if not e.match(space, space.w_StopIteration): diff --git a/pypy/module/cpyext/eval.py b/pypy/module/cpyext/eval.py --- a/pypy/module/cpyext/eval.py +++ b/pypy/module/cpyext/eval.py @@ -4,7 +4,7 @@ from pypy.module.cpyext.api import ( cpython_api, CANNOT_FAIL, CONST_STRING, FILEP, fread, feof, Py_ssize_tP, cpython_struct, is_valid_fp) -from pypy.module.cpyext.pyobject import PyObject, borrow_from +from pypy.module.cpyext.pyobject import PyObject from pypy.module.cpyext.pyerrors import PyErr_SetFromErrno from pypy.module.cpyext.funcobject import PyCodeObject from pypy.module.__builtin__ import compiling @@ -23,7 +23,7 @@ def PyEval_CallObjectWithKeywords(space, w_obj, w_arg, w_kwds): return space.call(w_obj, w_arg, w_kwds) - at cpython_api([], PyObject) + at cpython_api([], PyObject, result_borrowed=True) def PyEval_GetBuiltins(space): """Return a dictionary of the builtins in the current execution frame, or the interpreter of the thread state if no frame is @@ -36,25 +36,25 @@ w_builtins = w_builtins.getdict(space) else: w_builtins = space.builtin.getdict(space) - return borrow_from(None, w_builtins) + return w_builtins # borrowed ref in all cases - at cpython_api([], PyObject, error=CANNOT_FAIL) + at cpython_api([], PyObject, error=CANNOT_FAIL, result_borrowed=True) def PyEval_GetLocals(space): """Return a dictionary of the local variables in the current execution frame, or NULL if no frame is currently executing.""" caller = space.getexecutioncontext().gettopframe_nohidden() if caller is None: return None - return borrow_from(None, caller.getdictscope()) + return caller.getdictscope() # borrowed ref - at cpython_api([], PyObject, error=CANNOT_FAIL) + at cpython_api([], PyObject, error=CANNOT_FAIL, result_borrowed=True) def PyEval_GetGlobals(space): """Return a dictionary of the global variables in the current execution frame, or NULL if no frame is currently executing.""" caller = space.getexecutioncontext().gettopframe_nohidden() if caller is None: return None From pypy.commits at gmail.com Tue Mar 1 08:05:50 2016 From: pypy.commits at gmail.com (fijal) Date: Tue, 01 Mar 2016 05:05:50 -0800 (PST) Subject: [pypy-commit] pypy jit-leaner-frontend: progress, pass the first own test of opencoder Message-ID: <56d593ae.c3e01c0a.40ffb.3dbe@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r82634:ae45cb0da255 Date: 2016-03-01 14:02 +0100 http://bitbucket.org/pypy/pypy/changeset/ae45cb0da255/ Log: progress, pass the first own test of opencoder diff --git a/rpython/jit/metainterp/opencoder.py b/rpython/jit/metainterp/opencoder.py --- a/rpython/jit/metainterp/opencoder.py +++ b/rpython/jit/metainterp/opencoder.py @@ -4,6 +4,7 @@ from rpython.jit.metainterp.resoperation import AbstractResOp, AbstractInputArg,\ ResOperation, oparity, opname, rop, ResOperation, opwithdescr from rpython.rlib.rarithmetic import intmask +from rpython.jit.metainterp import resume TAGINT, TAGCONST, TAGBOX = range(3) TAGMASK = 0x3 @@ -46,8 +47,20 @@ else: yyyy + def read_resume(self, op): + jc_index = self._next() + pc = self._next() + f = resume.FrameInfo(None, jc_index, pc) + op.rd_frame_info_list = f + lgt = self._next() + box_list = [] + for i in range(lgt): + box = self._get(self._next()) + assert box + box_list.append(box) + op.rd_snapshot = resume.Snapshot(None, box_list) + def next(self): - pos = self.pos opnum = self._next() if oparity[opnum] == -1: argnum = self._next() @@ -65,6 +78,8 @@ else: descr = None res = ResOperation(opnum, args, -1, descr=descr) + if rop.is_guard(opnum): + self.read_resume(res) self._cache[self._count] = res self._count += 1 return res @@ -138,6 +153,14 @@ def record_op_tag(self, opnum, tagged_args, descr=None): return tag(TAGBOX, self._record_raw(opnum, tagged_args, descr)) + def record_snapshot(self, jitcode, pc, active_boxes): + self._ops.append(jitcode.index) + self._ops.append(pc) + self._ops.append(len(active_boxes)) # unnecessary, can be read from + # jitcode + for box in active_boxes: + self._ops.append(box.position) # not tagged, as it must be boxes + def get_iter(self): return TraceIterator(self, len(self._ops)) diff --git a/rpython/jit/metainterp/resume.py b/rpython/jit/metainterp/resume.py --- a/rpython/jit/metainterp/resume.py +++ b/rpython/jit/metainterp/resume.py @@ -39,9 +39,9 @@ class FrameInfo(object): __slots__ = ('prev', 'packed_jitcode_pc') - def __init__(self, prev, jitcode, pc): + def __init__(self, prev, jitcode_index, pc): self.prev = prev - self.packed_jitcode_pc = combine_uint(jitcode.index, pc) + self.packed_jitcode_pc = combine_uint(jitcode_index, pc) class VectorInfo(object): """ @@ -123,8 +123,7 @@ back.parent_resumedata_snapshot, back.get_list_of_active_boxes(True)) -def capture_resumedata(framestack, virtualizable_boxes, virtualref_boxes, - snapshot_storage): +def capture_resumedata(framestack, virtualizable_boxes, virtualref_boxes, t): n = len(framestack) - 1 if virtualizable_boxes is not None: boxes = virtualref_boxes + virtualizable_boxes @@ -132,15 +131,19 @@ boxes = virtualref_boxes[:] if n >= 0: top = framestack[n] - _ensure_parent_resumedata(framestack, n) - frame_info_list = FrameInfo(top.parent_resumedata_frame_info_list, - top.jitcode, top.pc) - snapshot_storage.rd_frame_info_list = frame_info_list - snapshot = Snapshot(top.parent_resumedata_snapshot, - top.get_list_of_active_boxes(False)) - snapshot = Snapshot(snapshot, boxes) - snapshot_storage.rd_snapshot = snapshot + #_ensure_parent_resumedata(framestack, n) + t.record_snapshot(top.jitcode, top.pc, + top.get_list_of_active_boxes(False)) + #XXX + #frame_info_list = FrameInfo(top.parent_resumedata_frame_info_list, + # top.jitcode, top.pc) + #snapshot_storage.rd_frame_info_list = frame_info_list + #snapshot = Snapshot(top.parent_resumedata_snapshot, + # top.get_list_of_active_boxes(False)) + #snapshot = Snapshot(snapshot, boxes) + #snapshot_storage.rd_snapshot = snapshot else: + yyy snapshot_storage.rd_frame_info_list = None snapshot_storage.rd_snapshot = Snapshot(None, boxes) diff --git a/rpython/jit/metainterp/test/test_opencoder.py b/rpython/jit/metainterp/test/test_opencoder.py --- a/rpython/jit/metainterp/test/test_opencoder.py +++ b/rpython/jit/metainterp/test/test_opencoder.py @@ -56,4 +56,4 @@ virutalref_boxes, t) (i0, i1), l = self.unpack(t) assert l[1].opnum == rop.GUARD_FALSE - assert l[1].rd_snapshot == [i0, i1] + assert l[1].rd_snapshot.boxes == [i0, i1] From pypy.commits at gmail.com Tue Mar 1 09:03:37 2016 From: pypy.commits at gmail.com (plan_rich) Date: Tue, 01 Mar 2016 06:03:37 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: removed the function field _arguments_, a new function rebuilds the information needed for the auto encoding test Message-ID: <56d5a139.02931c0a.4907.567d@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r82636:a5e2222f58cc Date: 2016-03-01 15:02 +0100 http://bitbucket.org/pypy/pypy/changeset/a5e2222f58cc/ Log: removed the function field _arguments_, a new function rebuilds the information needed for the auto encoding test diff --git a/rpython/jit/backend/zarch/instruction_builder.py b/rpython/jit/backend/zarch/instruction_builder.py --- a/rpython/jit/backend/zarch/instruction_builder.py +++ b/rpython/jit/backend/zarch/instruction_builder.py @@ -585,6 +585,21 @@ def is_branch_relative(name): return name.startswith('BR') or name.endswith('J') +def get_arg_types_of(mnemonic): + """ NOT_RPYTHON """ + params = all_mnemonic_codes[mnemonic.split("_")[0]] + if len(params) == 2: + argtypes = None + (instrtype, args) = params + else: + (instrtype, args, argtypes) = params + builder = globals()['build_' + instrtype] + if argtypes: + func = builder(mnemonic, args, argtypes) + else: + func = builder(mnemonic, args) + return func._arguments_ + def build_instr_codes(clazz): for mnemonic, params in all_mnemonic_codes.items(): argtypes = None diff --git a/rpython/jit/backend/zarch/test/test_auto_encoding.py b/rpython/jit/backend/zarch/test/test_auto_encoding.py --- a/rpython/jit/backend/zarch/test/test_auto_encoding.py +++ b/rpython/jit/backend/zarch/test/test_auto_encoding.py @@ -167,9 +167,8 @@ methname = '?' def get_func_arg_types(self, methodname): - from rpython.jit.backend.zarch.codebuilder import AbstractZARCHBuilder - func = getattr(AbstractZARCHBuilder, methodname) - return func._arguments_ + from rpython.jit.backend.zarch.instruction_builder import get_arg_types_of + return get_arg_types_of(methodname) def operand_combinations(self, methodname, modes, arguments): mapping = { From pypy.commits at gmail.com Tue Mar 1 09:46:37 2016 From: pypy.commits at gmail.com (plan_rich) Date: Tue, 01 Mar 2016 06:46:37 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: trying to translate the current interpreter in a test and later feed it with hypothesis. Message-ID: <56d5ab4d.4577c20a.7d582.ffffabbb@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r82637:9eab49cb6677 Date: 2016-03-01 15:45 +0100 http://bitbucket.org/pypy/pypy/changeset/9eab49cb6677/ Log: trying to translate the current interpreter in a test and later feed it with hypothesis. in addition fixed a bug that occurs while emitting jump. the assembled loop is too long, and BRC could not reach the top. thus BRCL is emitted if needed diff --git a/rpython/jit/backend/llsupport/tl/interp.py b/rpython/jit/backend/llsupport/tl/interp.py --- a/rpython/jit/backend/llsupport/tl/interp.py +++ b/rpython/jit/backend/llsupport/tl/interp.py @@ -1,6 +1,7 @@ from rpython.rlib.rstruct.runpack import runpack from rpython.rlib.objectmodel import specialize, always_inline -from rpython.jit.backend.llsupport.tl import code, stack +from rpython.jit.backend.llsupport.tl import code +from rpython.jit.backend.llsupport.tl.stack import Stack class W_Root(object): pass @@ -9,25 +10,28 @@ def __init__(self, items): self.items = items - def concat(self, w_lst): + def concat(self, space, w_lst): assert isinstance(w_lst, W_ListObject) - return self.items + w_lst.items + return space.wrap(self.items + w_lst.items) class W_IntObject(W_Root): def __init__(self, value): self.value = value - def compare(self, w_int): + def compare(self, space, w_int): assert isinstance(w_int, W_IntObject) - return cmp(self.value, w_int.value) + return space.wrap(self.value - w_int.value) + + def concat(self, space, w_obj): + raise NotImplementedError("cannot concat int with object") class W_StrObject(W_Root): def __init__(self, value): self.value = value - def concat(self, w_str): + def concat(self, space, w_str): assert isinstance(w_str, W_StrObject) - return self.value + w_str.value + return space.wrap(self.value + w_str.value) class Space(object): @specialize.argtype(1) @@ -42,17 +46,18 @@ return W_StrObject(val.encode('utf-8')) if isinstance(val, list): return W_ListObject(val) - raise NotImplementedError("cannot handle: " + str(val) + str(type(val))) + raise NotImplementedError("cannot handle: " + str(val)) def entry_point(argv): bytecode = argv[0] pc = 0 end = len(bytecode) stack = Stack(16) - space = space.Space() - consts = [] - while i < end: - i = dispatch_once(space, i, bytecode, consts, stack) + space = Space() + consts = ["hello"] * 100 + consts[0] = "world" + while pc < end: + pc = dispatch_once(space, pc, bytecode, consts, stack) return 0 @always_inline @@ -65,8 +70,7 @@ elif opcode == code.CompareInt.BYTE_CODE: w_int2 = stack.pop() w_int1 = stack.pop() - w_int3 = space.wrap(w_int1.compare(w_int2)) - stack.append(w_int3) + stack.append(w_int1.compare(space, w_int2)) elif opcode == code.LoadStr.BYTE_CODE: pos = runpack('h', bytecode[i+1:i+3]) w_str = space.wrap(consts[pos]) @@ -75,11 +79,11 @@ elif opcode == code.AddStr.BYTE_CODE: w_str2 = stack.pop() w_str1 = stack.pop() - stack.append(space.wrap(w_str1.concat(w_str2))) + stack.append(w_str1.concat(space, w_str2)) elif opcode == code.AddList.BYTE_CODE: w_lst2 = stack.pop() w_lst1 = stack.pop() - stack.append(space.wrap(w_lst1.concat(w_lst2))) + stack.append(w_lst1.concat(space, w_lst2)) elif opcode == code.CreateList.BYTE_CODE: size = runpack('h', bytecode[i+1:i+3]) stack.append(space.wrap([None] * size)) @@ -91,11 +95,13 @@ elif opcode == code.InsertList.BYTE_CODE: w_val = stack.pop() w_idx = stack.pop() + assert isinstance(w_idx, W_IntObject) w_lst = stack.peek(0) w_lst.items[w_idx.value] = w_val # index error, just crash here! elif opcode == code.DelList.BYTE_CODE: w_idx = stack.pop() + assert isinstance(w_idx, W_IntObject) w_lst = stack.peek(0) del w_lst.items[w_idx.value] # index error, just crash the machine!! diff --git a/rpython/jit/backend/llsupport/tl/stack.py b/rpython/jit/backend/llsupport/tl/stack.py --- a/rpython/jit/backend/llsupport/tl/stack.py +++ b/rpython/jit/backend/llsupport/tl/stack.py @@ -5,7 +5,7 @@ def __init__(self, size): self = hint(self, access_directly=True, fresh_virtualizable=True) - self.stack = [0] * size + self.stack = [None] * size self.stackpos = 0 # always store a known-nonneg integer here def size(self): diff --git a/rpython/jit/backend/llsupport/tl/test/zrpy_gc_hypo_test.py b/rpython/jit/backend/llsupport/tl/test/zrpy_gc_hypo_test.py --- a/rpython/jit/backend/llsupport/tl/test/zrpy_gc_hypo_test.py +++ b/rpython/jit/backend/llsupport/tl/test/zrpy_gc_hypo_test.py @@ -10,7 +10,7 @@ from rpython.translator.unsimplify import call_initial_function from rpython.translator.translator import TranslationContext from rpython.translator.c import genc -from rpython.jit.backend.llsupport.gcstress import interp +from rpython.jit.backend.llsupport.tl import interp class GCHypothesis(object): def setup_class(self): diff --git a/rpython/jit/backend/zarch/codebuilder.py b/rpython/jit/backend/zarch/codebuilder.py --- a/rpython/jit/backend/zarch/codebuilder.py +++ b/rpython/jit/backend/zarch/codebuilder.py @@ -128,7 +128,11 @@ def b_offset(self, reladdr): offset = reladdr - self.get_relative_pos() - self.BRC(c.ANY, l.imm(offset)) + if -2**15 <= offset <= 2**15-1: + self.BRC(c.ANY, l.imm(offset)) + else: + # we have big loops! + self.BRCL(c.ANY, l.imm(offset)) def reserve_guard_branch(self): self.BRCL(l.imm(0x0), l.imm(0)) diff --git a/rpython/jit/backend/zarch/test/test_zrpy_gc_hypo.py b/rpython/jit/backend/zarch/test/test_zrpy_gc_hypo.py --- a/rpython/jit/backend/zarch/test/test_zrpy_gc_hypo.py +++ b/rpython/jit/backend/zarch/test/test_zrpy_gc_hypo.py @@ -1,7 +1,6 @@ -from rpython.jit.backend.llsupport.gcstress.test.zrpy_gc_hypo_test import GCHypothesis - +from rpython.jit.backend.llsupport.tl.test.zrpy_gc_hypo_test import GCHypothesis class TestGCHypothesis(GCHypothesis): - # runs ../../llsupport/gcstress/test/zrpy_gc_hypo_test.py + # runs ../../llsupport/tl/test/zrpy_gc_hypo_test.py gcrootfinder = "shadowstack" gc = "incminimark" From pypy.commits at gmail.com Tue Mar 1 11:26:46 2016 From: pypy.commits at gmail.com (plan_rich) Date: Tue, 01 Mar 2016 08:26:46 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: translating the interpreter and feeding it with hypothesis, it compiles but does not correctly enter the dispatch loop Message-ID: <56d5c2c6.e6ebc20a.a372c.ffffd332@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r82638:ae5c221a741c Date: 2016-03-01 17:25 +0100 http://bitbucket.org/pypy/pypy/changeset/ae5c221a741c/ Log: translating the interpreter and feeding it with hypothesis, it compiles but does not correctly enter the dispatch loop diff --git a/rpython/jit/backend/llsupport/tl/interp.py b/rpython/jit/backend/llsupport/tl/interp.py --- a/rpython/jit/backend/llsupport/tl/interp.py +++ b/rpython/jit/backend/llsupport/tl/interp.py @@ -2,6 +2,7 @@ from rpython.rlib.objectmodel import specialize, always_inline from rpython.jit.backend.llsupport.tl import code from rpython.jit.backend.llsupport.tl.stack import Stack +from rpython.rlib import rstring class W_Root(object): pass @@ -48,14 +49,28 @@ return W_ListObject(val) raise NotImplementedError("cannot handle: " + str(val)) +def _read_all_from_file(file): + with open(file, 'rb') as fd: + return fd.read() + +_read_bytecode_from_file = _read_all_from_file + +def _read_consts_from_file(file): + consts = [] + bytestring = _read_all_from_file(file) + for line in bytestring.splitlines(): + consts.append(rstring.replace(line, "\\n", "\n")) + return consts + def entry_point(argv): - bytecode = argv[0] + bytecode = _read_bytecode_from_file(argv[0]) + consts = _read_consts_from_file(argv[1]) + print(bytecode) + print(consts) pc = 0 end = len(bytecode) stack = Stack(16) space = Space() - consts = ["hello"] * 100 - consts[0] = "world" while pc < end: pc = dispatch_once(space, pc, bytecode, consts, stack) return 0 @@ -106,5 +121,6 @@ del w_lst.items[w_idx.value] # index error, just crash the machine!! else: + print("opcode %d is not implemented" % opcode) raise NotImplementedError return i + 1 diff --git a/rpython/jit/backend/llsupport/tl/test/zrpy_gc_hypo_test.py b/rpython/jit/backend/llsupport/tl/test/zrpy_gc_hypo_test.py --- a/rpython/jit/backend/llsupport/tl/test/zrpy_gc_hypo_test.py +++ b/rpython/jit/backend/llsupport/tl/test/zrpy_gc_hypo_test.py @@ -1,19 +1,35 @@ -from rpython.jit.backend.detect_cpu import getcpuclass -from rpython.jit.tool.oparser import parse -from rpython.jit.metainterp.history import JitCellToken, NoStats -from rpython.jit.metainterp.history import BasicFinalDescr, BasicFailDescr -from rpython.jit.metainterp.gc import get_description +import py +from hypothesis import given +from rpython.tool.udir import udir from rpython.jit.metainterp.optimize import SpeculativeError from rpython.annotator.listdef import s_list_of_strings -from rpython.rtyper.lltypesystem import lltype, llmemory, rffi -from rpython.rtyper.rclass import getclassrepr, getinstancerepr -from rpython.translator.unsimplify import call_initial_function from rpython.translator.translator import TranslationContext from rpython.translator.c import genc from rpython.jit.backend.llsupport.tl import interp +from rpython.jit.backend.llsupport.tl.test import code_strategies as st + +def persist(type, contents): + dir = udir.ensure(type) + print "written", type, "to", dir + with open(dir.strpath, 'wb') as fd: + fd.write(contents) + return dir.strpath + +def persist_constants(consts): + contents = "" + for string in consts: + contents += string.replace("\n", "\\n") + "\n" + return persist('constants', contents) + +def persist_bytecode(bc): + return persist('bytecode', bc) class GCHypothesis(object): - def setup_class(self): + builder = None + def setup_method(self, name): + if self.builder: + return + t = TranslationContext() t.config.translation.gc = "incminimark" t.config.translation.gcremovetypeptr = True @@ -22,12 +38,23 @@ rtyper = t.buildrtyper() rtyper.specialize() - cbuilder = genc.CStandaloneBuilder(t, f, t.config) + cbuilder = genc.CStandaloneBuilder(t, interp.entry_point, t.config) cbuilder.generate_source(defines=cbuilder.DEBUG_DEFINES) cbuilder.compile() + # prevent from rebuilding the c object! + self.builder = cbuilder - import pdb; pdb.set_trace() + def execute(self, bytecode, consts): + exe = self.builder.executable_name + bc_file = persist_bytecode(bytecode) + consts_file = persist_constants(consts) + args = [bc_file, consts_file] + env = {} + res = self.builder.translator.platform.execute(exe, args, env=env) + return res.returncode, res.out, res.err - - def test_void(self): - pass + @given(st.single_bytecode()) + def test_execute_single_bytecode(self, program): + clazz, bytecode, consts, stack = program + result, out, err = self.execute(bytecode, consts) + assert result == 0 diff --git a/rpython/jit/backend/x86/test/test_zrpy_gc_hypo.py b/rpython/jit/backend/x86/test/test_zrpy_gc_hypo.py new file mode 100644 --- /dev/null +++ b/rpython/jit/backend/x86/test/test_zrpy_gc_hypo.py @@ -0,0 +1,6 @@ +from rpython.jit.backend.llsupport.tl.test.zrpy_gc_hypo_test import GCHypothesis + +class TestGCHypothesis(GCHypothesis): + # runs ../../llsupport/tl/test/zrpy_gc_hypo_test.py + gcrootfinder = "shadowstack" + gc = "incminimark" From pypy.commits at gmail.com Tue Mar 1 11:53:30 2016 From: pypy.commits at gmail.com (rlamy) Date: Tue, 01 Mar 2016 08:53:30 -0800 (PST) Subject: [pypy-commit] pypy default: Test rdict also with char, unicode, unichar Message-ID: <56d5c90a.89bd1c0a.45f78.00d0@mx.google.com> Author: Ronan Lamy Branch: Changeset: r82639:b8922598b1c9 Date: 2016-03-01 16:52 +0000 http://bitbucket.org/pypy/pypy/changeset/b8922598b1c9/ Log: Test rdict also with char, unicode, unichar diff --git a/rpython/rtyper/test/test_rdict.py b/rpython/rtyper/test/test_rdict.py --- a/rpython/rtyper/test/test_rdict.py +++ b/rpython/rtyper/test/test_rdict.py @@ -3,26 +3,35 @@ import signal from rpython.translator.translator import TranslationContext -from rpython.annotator.model import SomeInteger, SomeString +from rpython.annotator.model import ( + SomeInteger, SomeString, SomeChar, SomeUnicodeString, SomeUnicodeCodePoint) from rpython.annotator.dictdef import DictKey, DictValue from rpython.rtyper.lltypesystem import lltype, rffi -from rpython.rtyper.lltypesystem.rstr import string_repr -from rpython.rtyper import rint -from rpython.rtyper.lltypesystem import rdict, rstr +from rpython.rtyper.lltypesystem import rdict from rpython.rtyper.test.tool import BaseRtypingTest from rpython.rlib.objectmodel import r_dict from rpython.rlib.rarithmetic import r_int, r_uint, r_longlong, r_ulonglong import py -from hypothesis.strategies import builds, sampled_from, binary, just, integers +from hypothesis.strategies import ( + builds, sampled_from, binary, just, integers, text, characters) from hypothesis.stateful import GenericStateMachine, run_state_machine_as_test def ann2strategy(s_value): - if isinstance(s_value, SomeString): + if isinstance(s_value, SomeChar): + return builds(chr, integers(min_value=0, max_value=255)) + elif isinstance(s_value, SomeString): if s_value.can_be_None: return binary() | just(None) else: return binary() + elif isinstance(s_value, SomeUnicodeCodePoint): + return characters() + elif isinstance(s_value, SomeUnicodeString): + if s_value.can_be_None: + return text() | just(None) + else: + return text() elif isinstance(s_value, SomeInteger): return integers(min_value=~sys.maxint, max_value=sys.maxint) else: @@ -239,9 +248,8 @@ def test_dict_copy(self): def func(): - # XXX this does not work if we use chars, only! dic = self.newdict() - dic['ab'] = 1 + dic['a'] = 1 dic['b'] = 2 d2 = dic.copy() ok = 1 @@ -1146,9 +1154,9 @@ # XXX: None keys crash the test, but translation sort-of allows it @py.test.mark.parametrize('s_key', - [SomeString(), SomeInteger()]) + [SomeString(), SomeInteger(), SomeChar(), SomeUnicodeString(), SomeUnicodeCodePoint()]) @py.test.mark.parametrize('s_value', - [SomeString(can_be_None=True), SomeString(), SomeInteger()]) + [SomeString(can_be_None=True), SomeString(), SomeChar(), SomeInteger(), SomeUnicodeString(), SomeUnicodeCodePoint()]) def test_hypothesis(s_key, s_value): rtyper = PseudoRTyper() r_key = s_key.rtyper_makerepr(rtyper) From pypy.commits at gmail.com Tue Mar 1 11:57:55 2016 From: pypy.commits at gmail.com (plan_rich) Date: Tue, 01 Mar 2016 08:57:55 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: bytecode and constants are correctly passed, need to modify hypothesis to generate correct programs Message-ID: <56d5ca13.703dc20a.b310.760a@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r82640:b9cd7126874f Date: 2016-03-01 17:57 +0100 http://bitbucket.org/pypy/pypy/changeset/b9cd7126874f/ Log: bytecode and constants are correctly passed, need to modify hypothesis to generate correct programs diff --git a/rpython/jit/backend/llsupport/tl/interp.py b/rpython/jit/backend/llsupport/tl/interp.py --- a/rpython/jit/backend/llsupport/tl/interp.py +++ b/rpython/jit/backend/llsupport/tl/interp.py @@ -63,8 +63,8 @@ return consts def entry_point(argv): - bytecode = _read_bytecode_from_file(argv[0]) - consts = _read_consts_from_file(argv[1]) + bytecode = _read_bytecode_from_file(argv[1]) + consts = _read_consts_from_file(argv[2]) print(bytecode) print(consts) pc = 0 diff --git a/rpython/jit/backend/llsupport/tl/test/zrpy_gc_hypo_test.py b/rpython/jit/backend/llsupport/tl/test/zrpy_gc_hypo_test.py --- a/rpython/jit/backend/llsupport/tl/test/zrpy_gc_hypo_test.py +++ b/rpython/jit/backend/llsupport/tl/test/zrpy_gc_hypo_test.py @@ -57,4 +57,6 @@ def test_execute_single_bytecode(self, program): clazz, bytecode, consts, stack = program result, out, err = self.execute(bytecode, consts) - assert result == 0 + if result != 0: + raise Exception(("could not run program. returned %d" + " stderr:\n%s\nstdout:\n%s\n") % (result, err, out)) From pypy.commits at gmail.com Tue Mar 1 14:16:40 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 01 Mar 2016 11:16:40 -0800 (PST) Subject: [pypy-commit] pypy cpyext-ext: Add an XXX Message-ID: <56d5ea98.86e31c0a.a4196.417d@mx.google.com> Author: Armin Rigo Branch: cpyext-ext Changeset: r82642:d290ba429283 Date: 2016-03-01 20:04 +0100 http://bitbucket.org/pypy/pypy/changeset/d290ba429283/ Log: Add an XXX diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -517,6 +517,10 @@ assert float(F(10.5)) == -66.66 assert module.nb_float(int, I(10)) == 10.0 assert module.nb_float(float, F(10.5)) == 10.5 + # XXX but the subtype's tp_as_number->nb_float(x) should really invoke + # the user-defined __float__(); it doesn't so far + #assert module.nb_float(I, I(10)) == -55.55 + #assert module.nb_float(F, F(10.5)) == -66.66 def test_tp_call(self): module = self.import_extension('foo', [ From pypy.commits at gmail.com Tue Mar 1 14:16:38 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 01 Mar 2016 11:16:38 -0800 (PST) Subject: [pypy-commit] pypy cpyext-ext: Test and fix (for nb_float only for now) Message-ID: <56d5ea96.465ec20a.98f88.ffffebc1@mx.google.com> Author: Armin Rigo Branch: cpyext-ext Changeset: r82641:359d2065421a Date: 2016-03-01 20:03 +0100 http://bitbucket.org/pypy/pypy/changeset/359d2065421a/ Log: Test and fix (for nb_float only for now) diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -349,10 +349,6 @@ return space.int(w_self) @cpython_api([PyObject], PyObject, header=None) -def slot_nb_float(space, w_self): - return space.float(w_self) - - at cpython_api([PyObject], PyObject, header=None) def slot_tp_iter(space, w_self): return space.iter(w_self) @@ -406,6 +402,16 @@ def slot_tp_getattro(space, w_self, w_name): return space.call_function(getattr_fn, w_self, w_name) api_func = slot_tp_getattro.api_func + elif name == 'tp_as_number.c_nb_float': + float_fn = w_type.getdictvalue(space, '__float__') + if float_fn is None: + return + + @cpython_api([PyObject], PyObject, header=header) + @func_renamer("cpyext_%s_%s" % (name.replace('.', '_'), typedef.name)) + def slot_nb_float(space, w_self): + return space.call_function(float_fn, w_self) + api_func = slot_nb_float.api_func else: return diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -487,21 +487,36 @@ def test_nb_float(self): module = self.import_extension('foo', [ - ("nb_float", "METH_O", + ("nb_float", "METH_VARARGS", ''' - if (!args->ob_type->tp_as_number || - !args->ob_type->tp_as_number->nb_float) + PyTypeObject *type = (PyTypeObject *)PyTuple_GET_ITEM(args, 0); + PyObject *obj = PyTuple_GET_ITEM(args, 1); + if (!type->tp_as_number || + !type->tp_as_number->nb_float) { PyErr_SetNone(PyExc_ValueError); return NULL; } - return args->ob_type->tp_as_number->nb_float(args); + return type->tp_as_number->nb_float(obj); ''' ) ]) - assert module.nb_float(10) == 10.0 - assert module.nb_float(-12.3) == -12.3 - raises(ValueError, module.nb_float, "123") + assert module.nb_float(int, 10) == 10.0 + assert module.nb_float(float, -12.3) == -12.3 + raises(ValueError, module.nb_float, str, "123") + # + # check that calling PyInt_Type->tp_as_number->nb_float(x) + # does not invoke a user-defined __float__() + class I(int): + def __float__(self): + return -55.55 + class F(float): + def __float__(self): + return -66.66 + assert float(I(10)) == -55.55 + assert float(F(10.5)) == -66.66 + assert module.nb_float(int, I(10)) == 10.0 + assert module.nb_float(float, F(10.5)) == 10.5 def test_tp_call(self): module = self.import_extension('foo', [ From pypy.commits at gmail.com Tue Mar 1 14:49:56 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 01 Mar 2016 11:49:56 -0800 (PST) Subject: [pypy-commit] pypy cpyext-ext: test and fix: slot_tp_call Message-ID: <56d5f264.86351c0a.2db31.4b62@mx.google.com> Author: Armin Rigo Branch: cpyext-ext Changeset: r82643:df19cf072629 Date: 2016-03-01 20:49 +0100 http://bitbucket.org/pypy/pypy/changeset/df19cf072629/ Log: test and fix: slot_tp_call diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -336,10 +336,6 @@ space.get_and_call_args(w_descr, w_self, args) return 0 - at cpython_api([PyObject, PyObject, PyObject], PyObject, header=None) -def slot_tp_call(space, w_self, w_args, w_kwds): - return space.call(w_self, w_args, w_kwds) - @cpython_api([PyObject], PyObject, header=None) def slot_tp_str(space, w_self): return space.str(w_self) @@ -402,6 +398,7 @@ def slot_tp_getattro(space, w_self, w_name): return space.call_function(getattr_fn, w_self, w_name) api_func = slot_tp_getattro.api_func + elif name == 'tp_as_number.c_nb_float': float_fn = w_type.getdictvalue(space, '__float__') if float_fn is None: @@ -412,6 +409,20 @@ def slot_nb_float(space, w_self): return space.call_function(float_fn, w_self) api_func = slot_nb_float.api_func + + elif name == 'tp_call': + call_fn = w_type.getdictvalue(space, '__call__') + if call_fn is None: + return + + @cpython_api([PyObject, PyObject, PyObject], PyObject, header=header) + @func_renamer("cpyext_%s_%s" % (name.replace('.', '_'), typedef.name)) + def slot_tp_call(space, w_self, w_args, w_kwds): + args = Arguments(space, [w_self], + w_stararg=w_args, w_starstararg=w_kwds) + return space.call_args(call_fn, args) + api_func = slot_tp_call.api_func + else: return diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -526,21 +526,28 @@ module = self.import_extension('foo', [ ("tp_call", "METH_VARARGS", ''' - PyObject *obj = PyTuple_GET_ITEM(args, 0); - PyObject *c_args = PyTuple_GET_ITEM(args, 1); - if (!obj->ob_type->tp_call) + PyTypeObject *type = (PyTypeObject *)PyTuple_GET_ITEM(args, 0); + PyObject *obj = PyTuple_GET_ITEM(args, 1); + PyObject *c_args = PyTuple_GET_ITEM(args, 2); + if (!type->tp_call) { PyErr_SetNone(PyExc_ValueError); return NULL; } - return obj->ob_type->tp_call(obj, c_args, NULL); + return type->tp_call(obj, c_args, NULL); ''' ) ]) class C: def __call__(self, *args): return args - assert module.tp_call(C(), ('x', 2)) == ('x', 2) + assert module.tp_call(type(C()), C(), ('x', 2)) == ('x', 2) + class D(type): + def __call__(self, *args): + return "foo! %r" % (args,) + typ1 = D('d', (), {}) + #assert module.tp_call(D, typ1, ()) == "foo! ()" XXX not working so far + assert isinstance(module.tp_call(type, typ1, ()), typ1) def test_tp_str(self): module = self.import_extension('foo', [ From pypy.commits at gmail.com Tue Mar 1 15:08:17 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 01 Mar 2016 12:08:17 -0800 (PST) Subject: [pypy-commit] pypy cpyext-ext: fix slot_nb_int, and attempt a test, but blocked by issue 2248 Message-ID: <56d5f6b1.06b01c0a.3dc1a.56a5@mx.google.com> Author: Armin Rigo Branch: cpyext-ext Changeset: r82645:11338c928fad Date: 2016-03-01 21:07 +0100 http://bitbucket.org/pypy/pypy/changeset/11338c928fad/ Log: fix slot_nb_int, and attempt a test, but blocked by issue 2248 diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -337,10 +337,6 @@ return 0 @cpython_api([PyObject], PyObject, header=None) -def slot_nb_int(space, w_self): - return space.int(w_self) - - at cpython_api([PyObject], PyObject, header=None) def slot_tp_iter(space, w_self): return space.iter(w_self) @@ -395,6 +391,17 @@ return space.call_function(getattr_fn, w_self, w_name) api_func = slot_tp_getattro.api_func + elif name == 'tp_as_number.c_nb_int': + int_fn = w_type.getdictvalue(space, '__int__') + if int_fn is None: + return + + @cpython_api([PyObject], PyObject, header=header) + @func_renamer("cpyext_%s_%s" % (name.replace('.', '_'), typedef.name)) + def slot_nb_int(space, w_self): + return space.call_function(int_fn, w_self) + api_func = slot_nb_int.api_func + elif name == 'tp_as_number.c_nb_float': float_fn = w_type.getdictvalue(space, '__float__') if float_fn is None: diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -469,21 +469,28 @@ def test_nb_int(self): module = self.import_extension('foo', [ - ("nb_int", "METH_O", + ("nb_int", "METH_VARARGS", ''' - if (!args->ob_type->tp_as_number || - !args->ob_type->tp_as_number->nb_int) + PyTypeObject *type = (PyTypeObject *)PyTuple_GET_ITEM(args, 0); + PyObject *obj = PyTuple_GET_ITEM(args, 1); + if (!type->tp_as_number || + !type->tp_as_number->nb_int) { PyErr_SetNone(PyExc_ValueError); return NULL; } - return args->ob_type->tp_as_number->nb_int(args); + return type->tp_as_number->nb_int(obj); ''' ) ]) - assert module.nb_int(10) == 10 - assert module.nb_int(-12.3) == -12 - raises(ValueError, module.nb_int, "123") + assert module.nb_int(int, 10) == 10 + assert module.nb_int(float, -12.3) == -12 + raises(ValueError, module.nb_int, str, "123") + class F(float): + def __int__(self): + return 666 + skip("XXX fix issue 2248 first") + assert module.nb_int(float, F(-12.3)) == -12 def test_nb_float(self): module = self.import_extension('foo', [ From pypy.commits at gmail.com Tue Mar 1 15:08:16 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 01 Mar 2016 12:08:16 -0800 (PST) Subject: [pypy-commit] pypy cpyext-ext: test and fix: slot_tp_str Message-ID: <56d5f6b0.a3f6c20a.d9d04.2c49@mx.google.com> Author: Armin Rigo Branch: cpyext-ext Changeset: r82644:b530a83f16e8 Date: 2016-03-01 20:53 +0100 http://bitbucket.org/pypy/pypy/changeset/b530a83f16e8/ Log: test and fix: slot_tp_str diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -337,10 +337,6 @@ return 0 @cpython_api([PyObject], PyObject, header=None) -def slot_tp_str(space, w_self): - return space.str(w_self) - - at cpython_api([PyObject], PyObject, header=None) def slot_nb_int(space, w_self): return space.int(w_self) @@ -423,6 +419,17 @@ return space.call_args(call_fn, args) api_func = slot_tp_call.api_func + elif name == 'tp_str': + str_fn = w_type.getdictvalue(space, '__str__') + if str_fn is None: + return + + @cpython_api([PyObject], PyObject, header=header) + @func_renamer("cpyext_%s_%s" % (name.replace('.', '_'), typedef.name)) + def slot_tp_str(space, w_self): + return space.call_function(str_fn, w_self) + api_func = slot_tp_str.api_func + else: return diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -551,21 +551,27 @@ def test_tp_str(self): module = self.import_extension('foo', [ - ("tp_str", "METH_O", + ("tp_str", "METH_VARARGS", ''' - if (!args->ob_type->tp_str) + PyTypeObject *type = (PyTypeObject *)PyTuple_GET_ITEM(args, 0); + PyObject *obj = PyTuple_GET_ITEM(args, 1); + if (!type->tp_str) { PyErr_SetNone(PyExc_ValueError); return NULL; } - return args->ob_type->tp_str(args); + return type->tp_str(obj); ''' ) ]) class C: def __str__(self): return "text" - assert module.tp_str(C()) == "text" + assert module.tp_str(type(C()), C()) == "text" + class D(int): + def __str__(self): + return "more text" + assert module.tp_str(int, D(42)) == "42" def test_mp_ass_subscript(self): module = self.import_extension('foo', [ From pypy.commits at gmail.com Tue Mar 1 15:16:23 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 01 Mar 2016 12:16:23 -0800 (PST) Subject: [pypy-commit] pypy cpyext-ext: Test and fix for tp_iter and tp_iternext (also fixes the latter to not Message-ID: <56d5f897.6bb8c20a.36ad2.3153@mx.google.com> Author: Armin Rigo Branch: cpyext-ext Changeset: r82646:3af62800d459 Date: 2016-03-01 21:15 +0100 http://bitbucket.org/pypy/pypy/changeset/3af62800d459/ Log: Test and fix for tp_iter and tp_iternext (also fixes the latter to not raise StopIteration but simply return NULL in case of exhaustion) diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -336,14 +336,6 @@ space.get_and_call_args(w_descr, w_self, args) return 0 - at cpython_api([PyObject], PyObject, header=None) -def slot_tp_iter(space, w_self): - return space.iter(w_self) - - at cpython_api([PyObject], PyObject, header=None) -def slot_tp_iternext(space, w_self): - return space.next(w_self) - from rpython.rlib.nonconst import NonConstant SLOTS = {} @@ -437,6 +429,33 @@ return space.call_function(str_fn, w_self) api_func = slot_tp_str.api_func + elif name == 'tp_iter': + iter_fn = w_type.getdictvalue(space, '__iter__') + if iter_fn is None: + return + + @cpython_api([PyObject], PyObject, header=header) + @func_renamer("cpyext_%s_%s" % (name.replace('.', '_'), typedef.name)) + def slot_tp_iter(space, w_self): + return space.call_function(iter_fn, w_self) + api_func = slot_tp_iter.api_func + + elif name == 'tp_iternext': + iternext_fn = w_type.getdictvalue(space, 'next') + if iternext_fn is None: + return + + @cpython_api([PyObject], PyObject, header=header) + @func_renamer("cpyext_%s_%s" % (name.replace('.', '_'), typedef.name)) + def slot_tp_iternext(space, w_self): + try: + return space.call_function(iternext_fn, w_self) + except OperationError, e: + if not e.match(space, space.w_StopIteration): + raise + return None + api_func = slot_tp_iternext.api_func + else: return diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -645,32 +645,49 @@ def test_tp_iter(self): module = self.import_extension('foo', [ - ("tp_iter", "METH_O", + ("tp_iter", "METH_VARARGS", ''' - if (!args->ob_type->tp_iter) + PyTypeObject *type = (PyTypeObject *)PyTuple_GET_ITEM(args, 0); + PyObject *obj = PyTuple_GET_ITEM(args, 1); + if (!type->tp_iter) { PyErr_SetNone(PyExc_ValueError); return NULL; } - return args->ob_type->tp_iter(args); + return type->tp_iter(obj); ''' ), - ("tp_iternext", "METH_O", + ("tp_iternext", "METH_VARARGS", ''' - if (!args->ob_type->tp_iternext) + PyTypeObject *type = (PyTypeObject *)PyTuple_GET_ITEM(args, 0); + PyObject *obj = PyTuple_GET_ITEM(args, 1); + PyObject *result; + if (!type->tp_iternext) { PyErr_SetNone(PyExc_ValueError); return NULL; } - return args->ob_type->tp_iternext(args); + result = type->tp_iternext(obj); + if (!result && !PyErr_Occurred()) + result = PyString_FromString("stop!"); + return result; ''' ) ]) l = [1] - it = module.tp_iter(l) + it = module.tp_iter(list, l) assert type(it) is type(iter([])) - assert module.tp_iternext(it) == 1 - raises(StopIteration, module.tp_iternext, it) + assert module.tp_iternext(type(it), it) == 1 + assert module.tp_iternext(type(it), it) == "stop!" + # + class LL(list): + def __iter__(self): + return iter(()) + ll = LL([1]) + it = module.tp_iter(list, ll) + assert type(it) is type(iter([])) + x = list(it) + assert x == [1] def test_bool(self): module = self.import_extension('foo', [ From pypy.commits at gmail.com Tue Mar 1 15:21:37 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 01 Mar 2016 12:21:37 -0800 (PST) Subject: [pypy-commit] pypy cpyext-ext: Rewrite the test in a way independent of issue 2248 Message-ID: <56d5f9d1.11301c0a.922d9.5bd7@mx.google.com> Author: Armin Rigo Branch: cpyext-ext Changeset: r82647:1c037e8cb40d Date: 2016-03-01 21:20 +0100 http://bitbucket.org/pypy/pypy/changeset/1c037e8cb40d/ Log: Rewrite the test in a way independent of issue 2248 diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -489,8 +489,11 @@ class F(float): def __int__(self): return 666 - skip("XXX fix issue 2248 first") - assert module.nb_int(float, F(-12.3)) == -12 + # as long as issue 2248 is not fixed, 'expected' is 666 on pypy, + # but it should be -12. This test is not concerned about that, + # but only about getting the same answer with module.nb_int(). + expected = float.__int__(F(-12.3)) + assert module.nb_int(float, F(-12.3)) == expected def test_nb_float(self): module = self.import_extension('foo', [ From pypy.commits at gmail.com Tue Mar 1 16:16:57 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 01 Mar 2016 13:16:57 -0800 (PST) Subject: [pypy-commit] pypy cpyext-ext: test and fix: slot_tp_init Message-ID: <56d606c9.49f9c20a.c7eb3.4307@mx.google.com> Author: Armin Rigo Branch: cpyext-ext Changeset: r82648:51eac7129726 Date: 2016-03-01 22:15 +0100 http://bitbucket.org/pypy/pypy/changeset/51eac7129726/ Log: test and fix: slot_tp_init diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -329,13 +329,6 @@ w_args_new = space.newtuple(args_w) return space.call(w_func, w_args_new, w_kwds) - at cpython_api([PyObject, PyObject, PyObject], rffi.INT_real, error=-1, header=None) -def slot_tp_init(space, w_self, w_args, w_kwds): - w_descr = space.lookup(w_self, '__init__') - args = Arguments.frompacked(space, w_args, w_kwds) - space.get_and_call_args(w_descr, w_self, args) - return 0 - from rpython.rlib.nonconst import NonConstant SLOTS = {} @@ -456,6 +449,21 @@ return None api_func = slot_tp_iternext.api_func + elif name == 'tp_init': + init_fn = w_type.getdictvalue(space, '__init__') + if init_fn is None: + return + + @cpython_api([PyObject, PyObject, PyObject], rffi.INT_real, error=-1, + header=header) + @func_renamer("cpyext_%s_%s" % (name.replace('.', '_'), typedef.name)) + def slot_tp_init(space, w_self, w_args, w_kwds): + args = Arguments(space, [w_self], + w_stararg=w_args, w_starstararg=w_kwds) + space.call_args(init_fn, args) + return 0 + api_func = slot_tp_init.api_func + else: return diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -559,6 +559,35 @@ #assert module.tp_call(D, typ1, ()) == "foo! ()" XXX not working so far assert isinstance(module.tp_call(type, typ1, ()), typ1) + def test_tp_init(self): + module = self.import_extension('foo', [ + ("tp_init", "METH_VARARGS", + ''' + PyTypeObject *type = (PyTypeObject *)PyTuple_GET_ITEM(args, 0); + PyObject *obj = PyTuple_GET_ITEM(args, 1); + PyObject *c_args = PyTuple_GET_ITEM(args, 2); + if (!type->tp_init) + { + PyErr_SetNone(PyExc_ValueError); + return NULL; + } + if (type->tp_init(obj, c_args, NULL) < 0) + return NULL; + Py_INCREF(Py_None); + return Py_None; + ''' + ) + ]) + x = [42] + assert module.tp_init(list, x, ("hi",)) is None + assert x == ["h", "i"] + class LL(list): + def __init__(self, *ignored): + raise Exception + x = LL.__new__(LL) + assert module.tp_init(list, x, ("hi",)) is None + assert x == ["h", "i"] + def test_tp_str(self): module = self.import_extension('foo', [ ("tp_str", "METH_VARARGS", From pypy.commits at gmail.com Tue Mar 1 21:59:22 2016 From: pypy.commits at gmail.com (mattip) Date: Tue, 01 Mar 2016 18:59:22 -0800 (PST) Subject: [pypy-commit] pypy cpyext-ext: fix fix Message-ID: <56d6570a.86e31c0a.a4196.ffffb5fe@mx.google.com> Author: mattip Branch: cpyext-ext Changeset: r82649:d34b903fd57b Date: 2016-03-01 21:45 -0500 http://bitbucket.org/pypy/pypy/changeset/d34b903fd57b/ Log: fix fix diff --git a/pypy/module/cpyext/unicodeobject.py b/pypy/module/cpyext/unicodeobject.py --- a/pypy/module/cpyext/unicodeobject.py +++ b/pypy/module/cpyext/unicodeobject.py @@ -8,7 +8,7 @@ from pypy.module.cpyext.pyerrors import PyErr_BadArgument from pypy.module.cpyext.pyobject import ( PyObject, PyObjectP, Py_DecRef, make_ref, from_ref, track_reference, - make_typedescr, get_typedescr) + make_typedescr, get_typedescr, as_pyobj) from pypy.module.cpyext.stringobject import PyString_Check from pypy.module.sys.interp_encoding import setdefaultencoding from pypy.module._codecs.interp_codecs import CodecState @@ -59,14 +59,14 @@ py_uni.c_ob_refcnt = 1 py_uni.c_ob_type = pytype if length > 0: - py_uni.c_str = lltype.malloc(rffi.CCHARP.TO, length+1, + py_uni.c_str = lltype.malloc(rffi.CWCHARP.TO, length+1, flavor='raw', zero=True) - py_str.c_length = length + py_uni.c_length = length s = rffi.wcharpsize2unicode(py_uni.c_str, py_uni.c_length) w_obj = space.wrap(s) - py_str.c_ob_shash = space.hash_w(w_obj) - track_reference(space, rffi.cast(PyObject, py_str), w_obj) - return rffi.cast(PyObject, py_str) + py_uni.c_hash = space.hash_w(w_obj) + track_reference(space, rffi.cast(PyObject, py_uni), w_obj) + return rffi.cast(PyObject, py_uni) def new_empty_unicode(space, length): """ From pypy.commits at gmail.com Wed Mar 2 00:30:00 2016 From: pypy.commits at gmail.com (mattip) Date: Tue, 01 Mar 2016 21:30:00 -0800 (PST) Subject: [pypy-commit] pypy issue-2248: fix for issue #2248, can W_Float.int() be removed? Message-ID: <56d67a58.8e301c0a.161b8.ffffd20a@mx.google.com> Author: mattip Branch: issue-2248 Changeset: r82650:64144f654a33 Date: 2016-03-02 00:27 -0500 http://bitbucket.org/pypy/pypy/changeset/64144f654a33/ Log: fix for issue #2248, can W_Float.int() be removed? diff --git a/pypy/module/__builtin__/test/test_classobj.py b/pypy/module/__builtin__/test/test_classobj.py --- a/pypy/module/__builtin__/test/test_classobj.py +++ b/pypy/module/__builtin__/test/test_classobj.py @@ -452,7 +452,6 @@ assert a + 1 == 2 assert a + 1.1 == 2 - def test_binaryop_calls_coerce_always(self): l = [] class A: @@ -1076,6 +1075,16 @@ assert (D() > A()) == 'D:A.gt' assert (D() >= A()) == 'D:A.ge' + def test_override___int__(self): + class F(float): + def __int__(self): + return 666 + f = F(-12.3) + assert int(f) == 666 + # on cpython, this calls float_trunc() in floatobject.c + # which ends up calling PyFloat_AS_DOUBLE((PyFloatObject*) f) + assert float.__int__(f) == -12 + class AppTestOldStyleClassBytesDict(object): def setup_class(cls): diff --git a/pypy/objspace/std/floatobject.py b/pypy/objspace/std/floatobject.py --- a/pypy/objspace/std/floatobject.py +++ b/pypy/objspace/std/floatobject.py @@ -661,7 +661,7 @@ __format__ = interp2app(W_FloatObject.descr_format), __coerce__ = interp2app(W_FloatObject.descr_coerce), __nonzero__ = interp2app(W_FloatObject.descr_nonzero), - __int__ = interp2app(W_FloatObject.int), + __int__ = interp2app(W_FloatObject.descr_trunc), __float__ = interp2app(W_FloatObject.descr_float), __long__ = interp2app(W_FloatObject.descr_long), __trunc__ = interp2app(W_FloatObject.descr_trunc), From pypy.commits at gmail.com Wed Mar 2 06:34:35 2016 From: pypy.commits at gmail.com (cfbolz) Date: Wed, 02 Mar 2016 03:34:35 -0800 (PST) Subject: [pypy-commit] pypy default: kill two tests that were important for an older module dict implementation Message-ID: <56d6cfcb.a2afc20a.98948.3798@mx.google.com> Author: Carl Friedrich Bolz Branch: Changeset: r82651:61022b28fd0f Date: 2016-03-01 15:27 +0100 http://bitbucket.org/pypy/pypy/changeset/61022b28fd0f/ Log: kill two tests that were important for an older module dict implementation approach diff --git a/pypy/objspace/std/test/test_celldict.py b/pypy/objspace/std/test/test_celldict.py --- a/pypy/objspace/std/test/test_celldict.py +++ b/pypy/objspace/std/test/test_celldict.py @@ -109,22 +109,10 @@ class TestModuleDictImplementation(BaseTestRDictImplementation): StrategyClass = ModuleDictStrategy -class TestModuleDictImplementationWithBuiltinNames(BaseTestRDictImplementation): - StrategyClass = ModuleDictStrategy - - string = "int" - string2 = "isinstance" - class TestDevolvedModuleDictImplementation(BaseTestDevolvedDictImplementation): StrategyClass = ModuleDictStrategy -class TestDevolvedModuleDictImplementationWithBuiltinNames(BaseTestDevolvedDictImplementation): - StrategyClass = ModuleDictStrategy - - string = "int" - string2 = "isinstance" - class AppTestCellDict(object): spaceconfig = {"objspace.std.withcelldict": True} From pypy.commits at gmail.com Wed Mar 2 06:34:37 2016 From: pypy.commits at gmail.com (cfbolz) Date: Wed, 02 Mar 2016 03:34:37 -0800 (PST) Subject: [pypy-commit] pypy default: make test_setdefault_fast pass with celldicts on pypy Message-ID: <56d6cfcd.2953c20a.9a073.30e7@mx.google.com> Author: Carl Friedrich Bolz Branch: Changeset: r82652:5213e8be2304 Date: 2016-03-02 12:31 +0100 http://bitbucket.org/pypy/pypy/changeset/5213e8be2304/ Log: make test_setdefault_fast pass with celldicts on pypy - reduce the nubmer of hash calls from 3 to 2 - since reducing it to 1 is impractical, adapt the test diff --git a/pypy/objspace/std/celldict.py b/pypy/objspace/std/celldict.py --- a/pypy/objspace/std/celldict.py +++ b/pypy/objspace/std/celldict.py @@ -64,6 +64,9 @@ def setitem_str(self, w_dict, key, w_value): cell = self.getdictvalue_no_unwrapping(w_dict, key) + return self._setitem_str_cell_known(cell, w_dict, key, w_value) + + def _setitem_str_cell_known(self, cell, w_dict, key, w_value): w_value = write_cell(self.space, cell, w_value) if w_value is None: return @@ -74,10 +77,11 @@ space = self.space if space.is_w(space.type(w_key), space.w_str): key = space.str_w(w_key) - w_result = self.getitem_str(w_dict, key) + cell = self.getdictvalue_no_unwrapping(w_dict, key) + w_result = unwrap_cell(self.space, cell) if w_result is not None: return w_result - self.setitem_str(w_dict, key, w_default) + self._setitem_str_cell_known(cell, w_dict, key, w_default) return w_default else: self.switch_to_object_strategy(w_dict) diff --git a/pypy/objspace/std/test/test_celldict.py b/pypy/objspace/std/test/test_celldict.py --- a/pypy/objspace/std/test/test_celldict.py +++ b/pypy/objspace/std/test/test_celldict.py @@ -108,10 +108,11 @@ class TestModuleDictImplementation(BaseTestRDictImplementation): StrategyClass = ModuleDictStrategy - + setdefault_hash_count = 2 class TestDevolvedModuleDictImplementation(BaseTestDevolvedDictImplementation): StrategyClass = ModuleDictStrategy + setdefault_hash_count = 2 class AppTestCellDict(object): diff --git a/pypy/objspace/std/test/test_dictmultiobject.py b/pypy/objspace/std/test/test_dictmultiobject.py --- a/pypy/objspace/std/test/test_dictmultiobject.py +++ b/pypy/objspace/std/test/test_dictmultiobject.py @@ -1248,6 +1248,9 @@ impl.setitem(x, x) assert type(impl.get_strategy()) is ObjectDictStrategy + + setdefault_hash_count = 1 + def test_setdefault_fast(self): on_pypy = "__pypy__" in sys.builtin_module_names impl = self.impl @@ -1255,11 +1258,11 @@ x = impl.setdefault(key, 1) assert x == 1 if on_pypy: - assert key.hash_count == 1 + assert key.hash_count == self.setdefault_hash_count x = impl.setdefault(key, 2) assert x == 1 if on_pypy: - assert key.hash_count == 2 + assert key.hash_count == self.setdefault_hash_count + 1 def test_fallback_evil_key(self): class F(object): From pypy.commits at gmail.com Wed Mar 2 07:05:34 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 02 Mar 2016 04:05:34 -0800 (PST) Subject: [pypy-commit] pypy.org extradoc: update the values Message-ID: <56d6d70e.aa0ac20a.a021f.40d6@mx.google.com> Author: Armin Rigo Branch: extradoc Changeset: r714:82f6f24d6fe5 Date: 2016-03-02 13:05 +0100 http://bitbucket.org/pypy/pypy.org/changeset/82f6f24d6fe5/ Log: update the values diff --git a/don1.html b/don1.html --- a/don1.html +++ b/don1.html @@ -9,13 +9,13 @@ - $63003 of $105000 (60.0%) + $63060 of $105000 (60.1%)
    @@ -23,7 +23,7 @@
  • From pypy.commits at gmail.com Wed Mar 2 08:04:06 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 02 Mar 2016 05:04:06 -0800 (PST) Subject: [pypy-commit] pypy default: update cffi/cc2d534f1ed7 Message-ID: <56d6e4c6.49f9c20a.c7eb3.595a@mx.google.com> Author: Armin Rigo Branch: Changeset: r82653:7159d98fd574 Date: 2016-03-02 14:03 +0100 http://bitbucket.org/pypy/pypy/changeset/7159d98fd574/ Log: update cffi/cc2d534f1ed7 diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_zintegration.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_zintegration.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_zintegration.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_zintegration.py @@ -12,7 +12,9 @@ def create_venv(name): tmpdir = udir.join(name) try: - subprocess.check_call(['virtualenv', '--never-download', + subprocess.check_call(['virtualenv', + #'--never-download', <= could be added, but causes failures + # in random cases on random machines '-p', os.path.abspath(sys.executable), str(tmpdir)]) except OSError as e: From pypy.commits at gmail.com Wed Mar 2 08:59:06 2016 From: pypy.commits at gmail.com (mattip) Date: Wed, 02 Mar 2016 05:59:06 -0800 (PST) Subject: [pypy-commit] pypy issue-2248: remove unused function Message-ID: <56d6f1aa.a3f6c20a.d9d04.70bf@mx.google.com> Author: mattip Branch: issue-2248 Changeset: r82654:a6507c5a6623 Date: 2016-03-02 08:58 -0500 http://bitbucket.org/pypy/pypy/changeset/a6507c5a6623/ Log: remove unused function diff --git a/pypy/objspace/std/floatobject.py b/pypy/objspace/std/floatobject.py --- a/pypy/objspace/std/floatobject.py +++ b/pypy/objspace/std/floatobject.py @@ -159,17 +159,6 @@ def _float_w(self, space): return self.floatval - def int(self, space): - if (type(self) is not W_FloatObject and - space.is_overloaded(self, space.w_float, '__int__')): - return W_Root.int(self, space) - try: - value = ovfcheck_float_to_int(self.floatval) - except OverflowError: - return space.long(self) - else: - return space.newint(value) - def is_w(self, space, w_other): from rpython.rlib.longlong2float import float2longlong if not isinstance(w_other, W_FloatObject): From pypy.commits at gmail.com Wed Mar 2 10:03:29 2016 From: pypy.commits at gmail.com (plan_rich) Date: Wed, 02 Mar 2016 07:03:29 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: created a test that tries to execute a byte code block (stack is not prepared) Message-ID: <56d700c1.c96cc20a.f363c.ffff88a7@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r82655:2c9ed008895a Date: 2016-03-01 18:48 +0100 http://bitbucket.org/pypy/pypy/changeset/2c9ed008895a/ Log: created a test that tries to execute a byte code block (stack is not prepared) diff --git a/rpython/jit/backend/llsupport/tl/test/code_strategies.py b/rpython/jit/backend/llsupport/tl/test/code_strategies.py --- a/rpython/jit/backend/llsupport/tl/test/code_strategies.py +++ b/rpython/jit/backend/llsupport/tl/test/code_strategies.py @@ -49,6 +49,12 @@ if hasattr(clazz, 'BYTE_CODE'): yield clazz +def get_byte_code_class(num): + for clazz in byte_code_classes(): + if clazz.BYTE_CODE == num: + return clazz + return None + @composite def single_bytecode(draw, clazzes=st.sampled_from(byte_code_classes()), @@ -57,5 +63,14 @@ inst = clazz.create_from(draw, get_strategy_for) bytecode, consts = code.Context().transform([inst]) _stack = draw(runtime_stack(clazz)) - return clazz, bytecode, consts, _stack + return bytecode, consts, _stack + at composite +def bytecode_block(draw, + clazzes=st.sampled_from(byte_code_classes()), + integrals=st.integers(), texts=st.text()): + clazz = draw(clazzes) + inst = clazz.create_from(draw, get_strategy_for) + bytecode, consts = code.Context().transform([inst]) + return bytecode, consts + diff --git a/rpython/jit/backend/llsupport/tl/test/test_tl_interp.py b/rpython/jit/backend/llsupport/tl/test/test_tl_interp.py --- a/rpython/jit/backend/llsupport/tl/test/test_tl_interp.py +++ b/rpython/jit/backend/llsupport/tl/test/test_tl_interp.py @@ -1,6 +1,7 @@ import py from hypothesis import given -from rpython.jit.backend.llsupport.tl import code, stack, interp +from rpython.jit.backend.llsupport.tl import code, interp +from rpython.jit.backend.llsupport.tl.stack import Stack from rpython.jit.backend.llsupport.tl.test import code_strategies as st class TestByteCode(object): @@ -23,8 +24,20 @@ class TestInterp(object): @given(st.single_bytecode()) def test_consume_stack(self, args): - clazz, bytecode, consts, stack = args + bytecode, consts, stack = args space = interp.Space() i = interp.dispatch_once(space, 0, bytecode, consts, stack) assert i == len(bytecode) + clazz = code.get_byte_code_class(ord(bytecode[0])) assert stack.size() == len(clazz._return_on_stack_types) + + @given(st.bytecode_block()) + def test_execute_bytecode_block(self, args): + bytecode, consts = args + space = interp.Space() + stack = Stack(16) + pc = 0 + end = len(bytecode) + while pc < end: + pc = interp.dispatch_once(space, pc, bytecode, consts, stack) + assert pc == len(bytecode) diff --git a/rpython/jit/backend/llsupport/tl/test/zrpy_gc_hypo_test.py b/rpython/jit/backend/llsupport/tl/test/zrpy_gc_hypo_test.py --- a/rpython/jit/backend/llsupport/tl/test/zrpy_gc_hypo_test.py +++ b/rpython/jit/backend/llsupport/tl/test/zrpy_gc_hypo_test.py @@ -53,7 +53,7 @@ res = self.builder.translator.platform.execute(exe, args, env=env) return res.returncode, res.out, res.err - @given(st.single_bytecode()) + @given(st.bytecode_block()) def test_execute_single_bytecode(self, program): clazz, bytecode, consts, stack = program result, out, err = self.execute(bytecode, consts) From pypy.commits at gmail.com Wed Mar 2 10:03:31 2016 From: pypy.commits at gmail.com (plan_rich) Date: Wed, 02 Mar 2016 07:03:31 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: removed pool and loading constant using immediate values, test_runner passes already Message-ID: <56d700c3.4577c20a.7d582.ffff9240@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r82656:62b48d6dd7ca Date: 2016-03-02 14:41 +0100 http://bitbucket.org/pypy/pypy/changeset/62b48d6dd7ca/ Log: removed pool and loading constant using immediate values, test_runner passes already diff --git a/rpython/jit/backend/llsupport/tl/test/code_strategies.py b/rpython/jit/backend/llsupport/tl/test/code_strategies.py --- a/rpython/jit/backend/llsupport/tl/test/code_strategies.py +++ b/rpython/jit/backend/llsupport/tl/test/code_strategies.py @@ -73,4 +73,3 @@ inst = clazz.create_from(draw, get_strategy_for) bytecode, consts = code.Context().transform([inst]) return bytecode, consts - diff --git a/rpython/jit/backend/zarch/assembler.py b/rpython/jit/backend/zarch/assembler.py --- a/rpython/jit/backend/zarch/assembler.py +++ b/rpython/jit/backend/zarch/assembler.py @@ -50,7 +50,7 @@ self.gcrootmap_retaddr_forced = 0 self.failure_recovery_code = [0, 0, 0, 0] self.wb_slowpath = [0,0,0,0,0] - self.pool = None + # self.pool = None def setup(self, looptoken): BaseAssembler.setup(self, looptoken) @@ -58,8 +58,8 @@ if we_are_translated(): self.debug = False self.current_clt = looptoken.compiled_loop_token - self.pool = LiteralPool() - self.mc = InstrBuilder(self.pool) + # POOL self.pool = LiteralPool() + self.mc = InstrBuilder(None) self.pending_guard_tokens = [] self.pending_guard_tokens_recovered = 0 #assert self.datablockwrapper is None --- but obscure case @@ -76,7 +76,8 @@ self.current_clt = None self._regalloc = None self.mc = None - self.pool = None + # self.pool = None + def target_arglocs(self, looptoken): return looptoken._zarch_arglocs @@ -92,7 +93,7 @@ self.mc.BCR_rr(0xf, register.value) def _build_failure_recovery(self, exc, withfloats=False): - mc = InstrBuilder(self.pool) + mc = InstrBuilder(None) self.mc = mc # fill in the jf_descr and jf_gcmap fields of the frame according # to which failure we are resuming from. These are set before @@ -132,20 +133,23 @@ startpos = self.mc.currpos() fail_descr, target = self.store_info_on_descr(startpos, guardtok) assert target != 0 - pool_offset = guardtok._pool_offset - assert pool_offset != -1 + # POOL + #pool_offset = guardtok._pool_offset + #assert pool_offset != -1 # overwrite the gcmap in the jitframe - offset = pool_offset + RECOVERY_GCMAP_POOL_OFFSET - self.mc.LG(r.SCRATCH2, l.pool(offset)) + #offset = pool_offset + RECOVERY_GCMAP_POOL_OFFSET + #self.mc.LG(r.SCRATCH2, l.pool(offset)) + ## overwrite the target in pool + #offset = pool_offset + RECOVERY_TARGET_POOL_OFFSET + ## overwrite!! + #self.pool.overwrite_64(self.mc, offset, target) + #self.mc.LG(r.r14, l.pool(offset)) - # overwrite the target in pool - offset = pool_offset + RECOVERY_TARGET_POOL_OFFSET - self.pool.overwrite_64(self.mc, offset, target) - self.mc.LG(r.r14, l.pool(offset)) - + self.load_gcmap(self.mc, r.SCRATCH2, gcmap=guardtok.gcmap) + self.mc.load_imm(r.r14, target) self.mc.load_imm(r.SCRATCH, fail_descr) - self.mc.BCR(l.imm(0xf), r.r14) + self.mc.BCR(c.ANY, r.r14) return startpos @@ -632,7 +636,7 @@ # operations = regalloc.prepare_loop(inputargs, operations, looptoken, clt.allgcrefs) - self.pool.pre_assemble(self, operations) + # POOL self.pool.pre_assemble(self, operations) entrypos = self.mc.get_relative_pos() self._call_header_with_stack_check() looppos = self.mc.get_relative_pos() @@ -641,7 +645,7 @@ self.update_frame_depth(frame_depth_no_fixed_size + JITFRAME_FIXED_SIZE) # size_excluding_failure_stuff = self.mc.get_relative_pos() - self.pool.post_assemble(self) + # POOL self.pool.post_assemble(self) self.write_pending_failure_recoveries() full_size = self.mc.get_relative_pos() # @@ -700,13 +704,13 @@ operations, self.current_clt.allgcrefs, self.current_clt.frame_info) - self.pool.pre_assemble(self, operations, bridge=True) + # POOL self.pool.pre_assemble(self, operations, bridge=True) startpos = self.mc.get_relative_pos() - self.mc.LARL(r.POOL, l.halfword(self.pool.pool_start - startpos)) + # POOL self.mc.LARL(r.POOL, l.halfword(self.pool.pool_start - startpos)) self._check_frame_depth(self.mc, regalloc.get_gcmap()) frame_depth_no_fixed_size = self._assemble(regalloc, inputargs, operations) codeendpos = self.mc.get_relative_pos() - self.pool.post_assemble(self) + # POOL self.pool.post_assemble(self) self.write_pending_failure_recoveries() fullsize = self.mc.get_relative_pos() # @@ -733,7 +737,7 @@ # to 'adr_new_target'. # Updates the pool address mc = InstrBuilder() - mc.write_i64(adr_new_target) + mc.b_abs(adr_new_target) mc.copy_to_raw_memory(faildescr.adr_jump_offset) assert faildescr.adr_jump_offset != 0 faildescr.adr_jump_offset = 0 # means "patched" @@ -878,14 +882,16 @@ self.mc.STG(r.SCRATCH, l.addr(offset, r.SPP)) return assert 0, "not supported location" - elif prev_loc.is_in_pool(): - if loc.is_reg(): - self.mc.LG(loc, prev_loc) + elif prev_loc.is_imm_float(): + self.mc.load_imm(r.SCRATCH, prev_loc.value) + if loc.is_fp_reg(): + self.mc.LDY(loc, l.addr(0, r.SCRATCH)) return - elif loc.is_fp_reg(): - self.mc.LDY(loc, prev_loc) + elif loc.is_stack(): + src_adr = l.addr(0, r.SCRATCH) + tgt_adr = l.AddressLocation(r.SPP, None, loc.value, l.imm(7)) + self.mc.MVC(tgt_adr, src_adr) return - assert 0, "not supported location (previous is pool loc)" elif prev_loc.is_stack(): offset = prev_loc.value # move from memory to register @@ -989,9 +995,11 @@ for tok in self.pending_guard_tokens: addr = rawstart + tok.pos_jump_offset # - tok.faildescr.adr_jump_offset = rawstart + \ - self.pool.pool_start + tok._pool_offset + \ - RECOVERY_TARGET_POOL_OFFSET + # POOL + #tok.faildescr.adr_jump_offset = rawstart + \ + # self.pool.pool_start + tok._pool_offset + \ + # RECOVERY_TARGET_POOL_OFFSET + tok.faildescr.adr_jump_offset = rawstart + tok.pos_recovery_stub relative_target = tok.pos_recovery_stub - tok.pos_jump_offset # if not tok.guard_not_invalidated(): @@ -1011,7 +1019,7 @@ # Build a new stackframe of size STD_FRAME_SIZE_IN_BYTES fpoff = JIT_ENTER_EXTRA_STACK_SPACE self.mc.STMG(r.r6, r.r15, l.addr(-fpoff+6*WORD, r.SP)) - self.mc.LARL(r.POOL, l.halfword(self.pool.pool_start - self.mc.get_relative_pos())) + # POOL self.mc.LARL(r.POOL, l.halfword(self.pool.pool_start - self.mc.get_relative_pos())) # f8 through f15 are saved registers (= non volatile) # TODO it would be good to detect if any float is used in the loop # and to skip this push/pop whenever no float operation occurs @@ -1172,9 +1180,11 @@ # ASSEMBLER EMISSION def emit_label(self, op, arglocs, regalloc): - offset = self.pool.pool_start - self.mc.get_relative_pos() + pass + # POOL + #offset = self.pool.pool_start - self.mc.get_relative_pos() # load the pool address at each label - self.mc.LARL(r.POOL, l.halfword(offset)) + #self.mc.LARL(r.POOL, l.halfword(offset)) def emit_jump(self, op, arglocs, regalloc): # The backend's logic assumes that the target code is in a piece of @@ -1191,14 +1201,16 @@ if descr in self.target_tokens_currently_compiling: # a label has a LARL instruction that does not need # to be executed, thus remove the first opcode - self.mc.b_offset(descr._ll_loop_code + self.mc.LARL_byte_count) + self.mc.b_offset(descr._ll_loop_code) # POOL + self.mc.LARL_byte_count) else: - offset = self.pool.get_descr_offset(descr) + \ - JUMPABS_TARGET_ADDR__POOL_OFFSET - self.mc.LG(r.SCRATCH, l.pool(offset)) + # POOL + #offset = self.pool.get_descr_offset(descr) + \ + # JUMPABS_TARGET_ADDR__POOL_OFFSET + #self.mc.LG(r.SCRATCH, l.pool(offset)) + #self.pool.overwrite_64(self.mc, offset, descr._ll_loop_code) + self.mc.load_imm(r.SCRATCH, descr._ll_loop_code) self.mc.BCR(c.ANY, r.SCRATCH) - self.pool.overwrite_64(self.mc, offset, descr._ll_loop_code) def emit_finish(self, op, arglocs, regalloc): diff --git a/rpython/jit/backend/zarch/codebuilder.py b/rpython/jit/backend/zarch/codebuilder.py --- a/rpython/jit/backend/zarch/codebuilder.py +++ b/rpython/jit/backend/zarch/codebuilder.py @@ -35,7 +35,7 @@ GuardToken.__init__(self, cpu, gcmap, descr, failargs, faillocs, guard_opnum, frame_depth) self.fcond = fcond - self._pool_offset = -1 + # POOL self._pool_offset = -1 class AbstractZARCHBuilder(object): @@ -122,6 +122,10 @@ def currpos(self): return self.get_relative_pos() + def b_abs(self, addr): + self.load_imm(r.r14, addr) + self.BCR(c.ANY, r.r14) + def b_cond_offset(self, offset, condition): assert condition != c.cond_none self.BRCL(condition, l.imm(offset)) @@ -171,7 +175,6 @@ # 64 bit unsigned self.CLGR(a, b) - def load_imm(self, dest_reg, word): if -2**15 <= word <= 2**15-1: self.LGHI(dest_reg, l.imm(word)) @@ -181,8 +184,6 @@ if self.pool and self.pool.contains_constant(word): self.LG(dest_reg, l.pool(self.pool.get_direct_offset(word))) return - # this is not put into the constant pool, because it - # is an immediate value that cannot easily be forseen self.IILF(dest_reg, l.imm(word & 0xFFFFffff)) self.IIHF(dest_reg, l.imm((word >> 32) & 0xFFFFffff)) diff --git a/rpython/jit/backend/zarch/helper/assembler.py b/rpython/jit/backend/zarch/helper/assembler.py --- a/rpython/jit/backend/zarch/helper/assembler.py +++ b/rpython/jit/backend/zarch/helper/assembler.py @@ -2,6 +2,7 @@ import rpython.jit.backend.zarch.registers as r from rpython.rlib.rarithmetic import intmask from rpython.jit.backend.zarch.arch import WORD +from rpython.jit.backend.zarch.helper.regalloc import check_imm_value from rpython.jit.metainterp.history import FLOAT from rpython.jit.metainterp.resoperation import rop from rpython.rtyper.lltypesystem import rffi, lltype @@ -11,7 +12,8 @@ l1 = arglocs[1] assert not l0.is_imm() # do the comparison - self.mc.cmp_op(l0, l1, pool=l1.is_in_pool(), imm=l1.is_imm(), signed=signed, fp=fp) + # POOL self.mc.cmp_op(l0, l1, pool=l1.is_in_pool(), imm=l1.is_imm(), signed=signed, fp=fp) + self.mc.cmp_op(l0, l1, imm=l1.is_imm(), signed=signed, fp=fp) self.flush_cc(condition, arglocs[2]) @@ -28,43 +30,60 @@ f.name = 'emit_shift_' + func return f -def gen_emit_rr_or_rpool(rr_func, rp_func): - """ the parameters can either be both in registers or - the first is in the register, second in literal pool. - """ +def gen_emit_rr(rr_func): def f(self, op, arglocs, regalloc): l0, l1 = arglocs - if l1.is_imm() and not l1.is_in_pool(): - assert 0, "logical imm must reside in pool!" - if l1.is_in_pool(): - getattr(self.mc, rp_func)(l0, l1) - else: - getattr(self.mc, rr_func)(l0, l1) + getattr(self.mc, rr_func)(l0, l1) return f -def gen_emit_imm_pool_rr(imm_func, pool_func, rr_func): +# POOL +#def gen_emit_rr_or_rpool(rr_func, rp_func): +# """ the parameters can either be both in registers or +# the first is in the register, second in literal pool. +# """ +# def f(self, op, arglocs, regalloc): +# l0, l1 = arglocs +# if l1.is_imm() and not l1.is_in_pool(): +# assert 0, "logical imm must reside in pool!" +# if l1.is_in_pool(): +# getattr(self.mc, rp_func)(l0, l1) +# else: +# getattr(self.mc, rr_func)(l0, l1) +# return f + +def gen_emit_rr_rh_ri(rr_func, rh_func, ri_func): def emit(self, op, arglocs, regalloc): l0, l1 = arglocs - if l1.is_in_pool(): - getattr(self.mc, pool_func)(l0, l1) - elif l1.is_imm(): - getattr(self.mc, imm_func)(l0, l1) + if l1.is_imm(): + if check_imm_value(l1.value): + getattr(self.mc, rh_func)(l0, l1) + else: + getattr(self.mc, ri_func)(l0, l1) else: getattr(self.mc, rr_func)(l0, l1) return emit -def gen_emit_pool_or_rr_evenodd(pool_func, rr_func): +# POOL +#def gen_emit_imm_pool_rr(imm_func, pool_func, rr_func): +# def emit(self, op, arglocs, regalloc): +# l0, l1 = arglocs +# if l1.is_in_pool(): +# getattr(self.mc, pool_func)(l0, l1) +# elif l1.is_imm(): +# getattr(self.mc, imm_func)(l0, l1) +# else: +# getattr(self.mc, rr_func)(l0, l1) +# return emit + +def gen_emit_div_mod(rr_func): def emit(self, op, arglocs, regalloc): lr, lq, l1 = arglocs # lr == remainer, lq == quotient # when entering the function lr contains the dividend # after this operation either lr or lq is used further - assert l1.is_in_pool() or not l1.is_imm() , "imm divider not supported" + assert not l1.is_imm(), "imm divider not supported" # remainer is always a even register r0, r2, ... , r14 assert lr.is_even() assert lq.is_odd() self.mc.XGR(lr, lr) - if l1.is_in_pool(): - getattr(self.mc,pool_func)(lr, l1) - else: - getattr(self.mc,rr_func)(lr, l1) + getattr(self.mc,rr_func)(lr, l1) return emit diff --git a/rpython/jit/backend/zarch/helper/regalloc.py b/rpython/jit/backend/zarch/helper/regalloc.py --- a/rpython/jit/backend/zarch/helper/regalloc.py +++ b/rpython/jit/backend/zarch/helper/regalloc.py @@ -26,7 +26,8 @@ if check_imm32(a1): l1 = imm(a1.getint()) else: - l1 = self.ensure_reg_or_pool(a1) + # POOL l1 = self.ensure_reg_or_pool(a1) + l1 = self.ensure_reg(a1) l0 = self.force_result_in_reg(op, a0) return [l0, l1] @@ -38,7 +39,7 @@ if check_imm32(a1): l1 = imm(a1.getint()) else: - l1 = self.ensure_reg_or_pool(a1) + l1 = self.ensure_reg(a1) l0 = self.force_result_in_reg(op, a0) return [l0, l1] @@ -50,7 +51,7 @@ if check_imm32(a1): l1 = imm(a1.getint()) else: - l1 = self.ensure_reg_or_pool(a1) + l1 = self.ensure_reg(a1) lr,lq = self.rm.ensure_even_odd_pair(a0, op, bind_first=False) return [lr, lq, l1] @@ -60,11 +61,11 @@ a1 = op.getarg(1) l1 = self.ensure_reg(a1) if isinstance(a0, Const): - poolloc = self.ensure_reg_or_pool(a0) + loc = self.ensure_reg(a0) lr,lq = self.rm.ensure_even_odd_pair(a0, op, bind_first=modulus, must_exist=False, move_regs=False) - self.assembler.regalloc_mov(poolloc, lq) + self.assembler.regalloc_mov(loc, lq) else: lr,lq = self.rm.ensure_even_odd_pair(a0, op, bind_first=modulus) return [lr, lq, l1] @@ -77,16 +78,18 @@ a0 = op.getarg(0) a1 = op.getarg(1) # sub is not commotative, thus cannot swap operands - l1 = self.ensure_reg_or_pool(a1) - l0 = self.force_result_in_reg(op, a0) - return [l0, l1] + # POOL l1 = self.ensure_reg_or_pool(a1) + l0 = self.ensure_reg(a0) + l1 = self.ensure_reg(a1) + res = self.force_allocate_reg(op) + return [res, l0, l1] def prepare_int_logic(self, op): a0 = op.getarg(0) a1 = op.getarg(1) if a0.is_constant(): a0, a1 = a1, a0 - l1 = self.ensure_reg_or_pool(a1) + l1 = self.ensure_reg(a1) l0 = self.force_result_in_reg(op, a0) return [l0, l1] @@ -120,7 +123,7 @@ def prepare_float_cmp_op(self, op): l0 = self.ensure_reg(op.getarg(0)) - l1 = self.ensure_reg_or_pool(op.getarg(1)) + l1 = self.ensure_reg(op.getarg(1)) res = self.force_allocate_reg_or_cc(op) return [l0, l1, res] @@ -139,7 +142,7 @@ if allow_swap: if isinstance(a0, Const): a0,a1 = a1,a0 - l1 = self.ensure_reg_or_pool(a1) + l1 = self.ensure_reg(a1) l0 = self.force_result_in_reg(op, a0) return [l0, l1] return prepare_float_binary_op diff --git a/rpython/jit/backend/zarch/instructions.py b/rpython/jit/backend/zarch/instructions.py --- a/rpython/jit/backend/zarch/instructions.py +++ b/rpython/jit/backend/zarch/instructions.py @@ -26,6 +26,7 @@ # mul 'MSGR': ('rre', ['\xB9','\x0C']), 'MSG': ('rxy', ['\xE3','\x0C']), + 'MGHI': ('ri', ['\xA7','\x0D']), 'MSGFI': ('ril', ['\xC2','\x00']), 'MLGR': ('rre', ['\xB9','\x86'], 'eo,r'), # div/mod @@ -131,6 +132,7 @@ # move 'MVCLE': ('rs', ['\xA8'], 'eo,eo,bd'), + 'MVC': ('ssa', ['\xD2']), # load memory diff --git a/rpython/jit/backend/zarch/locations.py b/rpython/jit/backend/zarch/locations.py --- a/rpython/jit/backend/zarch/locations.py +++ b/rpython/jit/backend/zarch/locations.py @@ -1,6 +1,8 @@ from rpython.jit.metainterp.history import INT, FLOAT from rpython.jit.backend.zarch.arch import WORD, DOUBLE_WORD +FWORD = 8 + class AssemblerLocation(object): _immutable_ = True type = INT @@ -60,6 +62,30 @@ def as_key(self): # 0 <= as_key <= 15 return self.value +class ConstFloatLoc(AssemblerLocation): + """This class represents an imm float value which is stored in memory at + the address stored in the field value""" + _immutable_ = True + width = FWORD + type = FLOAT + + def __init__(self, value): + self.value = value + + def getint(self): + return self.value + + def __repr__(self): + return "imm_float(stored at %d)" % (self.value) + + def is_imm_float(self): + return True + + def is_float(self): + return True + + def as_key(self): + return self.value class FloatRegisterLocation(RegisterLocation): _immutable_ = True diff --git a/rpython/jit/backend/zarch/opassembler.py b/rpython/jit/backend/zarch/opassembler.py --- a/rpython/jit/backend/zarch/opassembler.py +++ b/rpython/jit/backend/zarch/opassembler.py @@ -3,8 +3,7 @@ STD_FRAME_SIZE_IN_BYTES) from rpython.jit.backend.zarch.arch import THREADLOCAL_ADDR_OFFSET from rpython.jit.backend.zarch.helper.assembler import (gen_emit_cmp_op, - gen_emit_rr_or_rpool, gen_emit_shift, gen_emit_pool_or_rr_evenodd, - gen_emit_imm_pool_rr) + gen_emit_rr, gen_emit_shift, gen_emit_rr_rh_ri, gen_emit_div_mod) from rpython.jit.backend.zarch.helper.regalloc import (check_imm, check_imm_value) from rpython.jit.metainterp.history import (ConstInt) @@ -29,29 +28,33 @@ class IntOpAssembler(object): _mixin_ = True - emit_int_add = gen_emit_imm_pool_rr('AGFI','AG','AGR') + emit_int_add = gen_emit_rr_rh_ri('AGR', 'AGHI', 'AGFI') emit_int_add_ovf = emit_int_add emit_nursery_ptr_increment = emit_int_add def emit_int_sub(self, op, arglocs, regalloc): - l0, l1 = arglocs - if l1.is_imm() and not l1.is_in_pool(): - assert 0, "logical imm must reside in pool!" - if l1.is_in_pool(): - self.mc.SG(l0, l1) - else: - self.mc.SGR(l0, l1) + res, l0, l1 = arglocs + self.mc.SGRK(res, l0, l1) + # POOL + #if l1.is_imm() and not l1.is_in_pool(): + # assert 0, "logical imm must reside in pool!" + #if l1.is_in_pool(): + # self.mc.SG(l0, l1) + #else: + # self.mc.SGR(l0, l1) emit_int_sub_ovf = emit_int_sub - emit_int_mul = gen_emit_imm_pool_rr('MSGFI', 'MSG', 'MSGR') + emit_int_mul = gen_emit_rr_rh_ri('MSGR', 'MGHI', 'MSGFI') def emit_int_mul_ovf(self, op, arglocs, regalloc): lr, lq, l1 = arglocs - if l1.is_in_pool(): - self.mc.LG(r.SCRATCH, l1) - l1 = r.SCRATCH - elif l1.is_imm(): + # POOL + # if l1.is_in_pool(): + # self.mc.LG(r.SCRATCH, l1) + # l1 = r.SCRATCH + # elif + if l1.is_imm(): self.mc.LGFI(r.SCRATCH, l1) l1 = r.SCRATCH else: @@ -77,7 +80,8 @@ mc.LPGR(lq, lq) mc.LPGR(l1, l1) mc.MLGR(lr, l1) - mc.LG(r.SCRATCH, l.pool(self.pool.constant_64_sign_bit)) + mc.LGHI(r.SCRATCH, l.imm(-1)) + mc.RISBG(r.SCRATCH, r.SCRATCH, l.imm(0), l.imm(0x80 | 0), l.imm(0)) # is the value greater than 2**63 ? then an overflow occured jmp_xor_lq_overflow = mc.get_relative_pos() mc.reserve_cond_jump() # CLGRJ lq > 0x8000 ... 00 -> (label_overflow) @@ -94,7 +98,9 @@ mc.LPGR(lq, lq) mc.LPGR(l1, l1) mc.MLGR(lr, l1) - mc.LG(r.SCRATCH, l.pool(self.pool.constant_max_64_positive)) + mc.LGHI(r.SCRATCH, l.imm(-1)) + # 0xff -> shift 0 -> 0xff set MSB on pos 0 to zero -> 7f + mc.RISBG(r.SCRATCH, r.SCRATCH, l.imm(1), l.imm(0x80 | 63), l.imm(0)) jmp_lq_overflow = mc.get_relative_pos() mc.reserve_cond_jump() # CLGRJ lq > 0x7fff ... ff -> (label_overflow) jmp_lr_overflow = mc.get_relative_pos() @@ -163,16 +169,17 @@ omc.BRC(c.ANY, l.imm(label_end - jmp_neither_lqlr_overflow)) omc.overwrite() - emit_int_floordiv = gen_emit_pool_or_rr_evenodd('DSG','DSGR') - emit_uint_floordiv = gen_emit_pool_or_rr_evenodd('DLG','DLGR') + emit_int_floordiv = gen_emit_div_mod('DSGR') + emit_uint_floordiv = gen_emit_div_mod('DLGR') # NOTE division sets one register with the modulo value, thus # the regalloc ensures the right register survives. - emit_int_mod = gen_emit_pool_or_rr_evenodd('DSG','DSGR') + emit_int_mod = gen_emit_div_mod('DSGR') def emit_int_invert(self, op, arglocs, regalloc): l0, = arglocs assert not l0.is_imm() - self.mc.XG(l0, l.pool(self.pool.constant_64_ones)) + self.mc.LGHI(r.SCRATCH, l.imm(-1)) + self.mc.XGR(l0, r.SCRATCH) def emit_int_neg(self, op, arglocs, regalloc): l0, = arglocs @@ -206,9 +213,9 @@ self.mc.CGHI(l0, l.imm(0)) self.flush_cc(c.NE, res) - emit_int_and = gen_emit_rr_or_rpool("NGR", "NG") - emit_int_or = gen_emit_rr_or_rpool("OGR", "OG") - emit_int_xor = gen_emit_rr_or_rpool("XGR", "XG") + emit_int_and = gen_emit_rr("NGR") + emit_int_or = gen_emit_rr("OGR") + emit_int_xor = gen_emit_rr("XGR") emit_int_rshift = gen_emit_shift("SRAG") emit_int_lshift = gen_emit_shift("SLLG") @@ -235,10 +242,10 @@ class FloatOpAssembler(object): _mixin_ = True - emit_float_add = gen_emit_rr_or_rpool('ADBR','ADB') - emit_float_sub = gen_emit_rr_or_rpool('SDBR','SDB') - emit_float_mul = gen_emit_rr_or_rpool('MDBR','MDB') - emit_float_truediv = gen_emit_rr_or_rpool('DDBR','DDB') + emit_float_add = gen_emit_rr('ADBR') + emit_float_sub = gen_emit_rr('SDBR') + emit_float_mul = gen_emit_rr('MDBR') + emit_float_truediv = gen_emit_rr('DDBR') # Support for NaNs: S390X sets condition code to 0x3 (unordered) # whenever any operand is nan. @@ -568,7 +575,8 @@ # scratch = (index >> card_page_shift) & 7 # 0x80 sets zero flag. will store 0 into all not selected bits mc.RISBG(r.SCRATCH, loc_index, l.imm(61), l.imm(0x80 | 63), l.imm(64-n)) - mc.XG(tmp_loc, l.pool(self.pool.constant_64_ones)) + mc.LGHI(r.SCRATCH2, l.imm(-1)) + mc.XGR(tmp_loc, r.SCRATCH2) # set SCRATCH2 to 1 << r1 mc.LGHI(r.SCRATCH2, l.imm(1)) @@ -636,7 +644,7 @@ token = ZARCHGuardToken(self.cpu, gcmap, descr, op.getfailargs(), arglocs, op.getopnum(), frame_depth, fcond) - token._pool_offset = self.pool.get_descr_offset(descr) + #token._pool_offset = self.pool.get_descr_offset(descr) return token def emit_guard_true(self, op, arglocs, regalloc): @@ -901,9 +909,9 @@ def _emit_gc_load(self, op, arglocs, regalloc): result_loc, base_loc, ofs_loc, size_loc, sign_loc = arglocs - assert not result_loc.is_in_pool() - assert not base_loc.is_in_pool() - assert not ofs_loc.is_in_pool() + # POOL assert not result_loc.is_in_pool() + # POOL assert not base_loc.is_in_pool() + # POOL assert not ofs_loc.is_in_pool() if ofs_loc.is_imm(): assert self._mem_offset_supported(ofs_loc.value) src_addr = l.addr(ofs_loc.value, base_loc) @@ -917,15 +925,14 @@ def _emit_gc_load_indexed(self, op, arglocs, regalloc): result_loc, base_loc, index_loc, offset_loc, size_loc, sign_loc=arglocs - assert not result_loc.is_in_pool() - assert not base_loc.is_in_pool() - assert not index_loc.is_in_pool() - assert not offset_loc.is_in_pool() + # POOL assert not result_loc.is_in_pool() + # POOL assert not base_loc.is_in_pool() + # POOL assert not index_loc.is_in_pool() + # POOL assert not offset_loc.is_in_pool() if offset_loc.is_imm() and self._mem_offset_supported(offset_loc.value): addr_loc = l.addr(offset_loc.value, base_loc, index_loc) else: - self.mc.LGR(r.SCRATCH, index_loc) - self.mc.AGR(r.SCRATCH, offset_loc) + self.mc.AGRK(r.SCRATCH, index_loc, offset_loc) addr_loc = l.addr(0, base_loc, r.SCRATCH) self._memory_read(result_loc, addr_loc, size_loc.value, sign_loc.value) @@ -935,9 +942,9 @@ def emit_gc_store(self, op, arglocs, regalloc): (base_loc, index_loc, value_loc, size_loc) = arglocs - assert not base_loc.is_in_pool() - assert not index_loc.is_in_pool() - assert not value_loc.is_in_pool() + # POOL assert not base_loc.is_in_pool() + # POOL assert not index_loc.is_in_pool() + # POOL assert not value_loc.is_in_pool() if index_loc.is_imm() and self._mem_offset_supported(index_loc.value): addr_loc = l.addr(index_loc.value, base_loc) else: @@ -947,9 +954,9 @@ def emit_gc_store_indexed(self, op, arglocs, regalloc): (base_loc, index_loc, value_loc, offset_loc, size_loc) = arglocs - assert not base_loc.is_in_pool() - assert not index_loc.is_in_pool() - assert not value_loc.is_in_pool() + # POOL assert not base_loc.is_in_pool() + # POOL assert not index_loc.is_in_pool() + # POOL assert not value_loc.is_in_pool() addr_loc = self._load_address(base_loc, index_loc, offset_loc, r.SCRATCH) self._memory_store(value_loc, addr_loc, size_loc) @@ -962,8 +969,7 @@ assert index_loc.is_core_reg() addr_loc = l.addr(offset_loc.value, base_loc, index_loc) else: - self.mc.LGR(helper_reg, index_loc) - self.mc.AGR(helper_reg, offset_loc) + self.mc.AGRK(helper_reg, index_loc, offset_loc) addr_loc = l.addr(0, base_loc, helper_reg) return addr_loc @@ -1088,7 +1094,7 @@ self._store_force_index(self._find_nearby_operation(regalloc, +1)) # 'result_loc' is either r2, f0 or None self.call_assembler(op, argloc, vloc, result_loc, r.r2) - self.mc.LARL(r.POOL, l.halfword(self.pool.pool_start - self.mc.get_relative_pos())) + # POOL self.mc.LARL(r.POOL, l.halfword(self.pool.pool_start - self.mc.get_relative_pos())) emit_call_assembler_i = _genop_call_assembler emit_call_assembler_r = _genop_call_assembler diff --git a/rpython/jit/backend/zarch/regalloc.py b/rpython/jit/backend/zarch/regalloc.py --- a/rpython/jit/backend/zarch/regalloc.py +++ b/rpython/jit/backend/zarch/regalloc.py @@ -70,8 +70,13 @@ return adr def convert_to_imm(self, c): - off = self.pool.get_offset(c) - return l.pool(off, float=True) + adr = self.convert_to_adr(c) + return l.ConstFloatLoc(adr) + + # POOL + #def convert_to_imm(self, c): + # off = self.pool.get_offset(c) + # return l.pool(off, float=True) def __init__(self, longevity, frame_manager=None, assembler=None): RegisterManager.__init__(self, longevity, frame_manager, assembler) @@ -79,31 +84,46 @@ def call_result_location(self, v): return r.FPR_RETURN - def place_in_pool(self, var): - offset = self.assembler.pool.get_offset(var) - return l.pool(offset, float=True) + # POOL + # def place_in_pool(self, var): + # offset = self.assembler.pool.get_offset(var) + # return l.pool(offset, float=True) - def ensure_reg_or_pool(self, box): - if isinstance(box, Const): - return self.place_in_pool(box) - else: - assert box in self.temp_boxes - loc = self.make_sure_var_in_reg(box, - forbidden_vars=self.temp_boxes) - return loc + # POOL + #def ensure_reg_or_pool(self, box): + # if isinstance(box, Const): + # loc = self.get_scratch_reg() + # immvalue = self.convert_to_int(box) + # self.assembler.mc.load_imm(loc, immvalue) + # else: + # assert box in self.temp_boxes + # loc = self.make_sure_var_in_reg(box, + # forbidden_vars=self.temp_boxes) + # return loc + + def get_scratch_reg(self): + box = TempVar() + reg = self.force_allocate_reg(box, forbidden_vars=self.temp_boxes) + self.temp_boxes.append(box) + return reg def ensure_reg(self, box): if isinstance(box, Const): - poolloc = self.place_in_pool(box) - tmp = TempVar() - reg = self.force_allocate_reg(tmp, self.temp_boxes) - self.temp_boxes.append(tmp) - assert poolloc.displace >= 0 - if poolloc.displace <= 2**16-1: - self.assembler.mc.LD(reg, poolloc) - else: - self.assembler.mc.LDY(reg, poolloc) - return reg + # POOL + #poolloc = self.place_in_pool(box) + #tmp = TempVar() + #reg = self.force_allocate_reg(tmp, self.temp_boxes) + #self.temp_boxes.append(tmp) + #assert poolloc.displace >= 0 + #if poolloc.displace <= 2**12-1: + # self.assembler.mc.LD(reg, poolloc) + #else: + # self.assembler.mc.LDY(reg, poolloc) + loc = self.get_scratch_reg() + immadrvalue = self.convert_to_adr(box) + mc = self.assembler.mc + mc.load_imm(r.SCRATCH, immadrvalue) + mc.LD(loc, l.addr(0, r.SCRATCH)) else: assert box in self.temp_boxes loc = self.make_sure_var_in_reg(box, @@ -140,28 +160,36 @@ return rffi.cast(lltype.Signed, c.value) def convert_to_imm(self, c): - off = self.pool.get_offset(c) - return l.pool(off) + val = self.convert_to_int(c) + return l.imm(val) - def ensure_reg_or_pool(self, box): - if isinstance(box, Const): - offset = self.assembler.pool.get_offset(box) - return l.pool(offset) - else: - assert box in self.temp_boxes - loc = self.make_sure_var_in_reg(box, - forbidden_vars=self.temp_boxes) - return loc + # POOL + #def convert_to_imm(self, c): + # off = self.pool.get_offset(c) + # return l.pool(off) + #def ensure_reg_or_pool(self, box): + # if isinstance(box, Const): + # offset = self.assembler.pool.get_offset(box) + # return l.pool(offset) + # else: + # assert box in self.temp_boxes + # loc = self.make_sure_var_in_reg(box, + # forbidden_vars=self.temp_boxes) + # return loc + + # POOL + #offset = self.assembler.pool.get_offset(box) + #poolloc = l.pool(offset) + #tmp = TempInt() + #reg = self.force_allocate_reg(tmp, forbidden_vars=self.temp_boxes) + #self.temp_boxes.append(tmp) + #self.assembler.mc.LG(reg, poolloc) def ensure_reg(self, box): if isinstance(box, Const): - offset = self.assembler.pool.get_offset(box) - poolloc = l.pool(offset) - tmp = TempInt() - reg = self.force_allocate_reg(tmp, forbidden_vars=self.temp_boxes) - self.temp_boxes.append(tmp) - self.assembler.mc.LG(reg, poolloc) - return reg + loc = self.get_scratch_reg() + immvalue = self.convert_to_int(box) + self.assembler.mc.load_imm(loc, immvalue) else: assert box in self.temp_boxes loc = self.make_sure_var_in_reg(box, @@ -357,10 +385,10 @@ self.rm = ZARCHRegisterManager(self.longevity, frame_manager = self.fm, assembler = self.assembler) - self.rm.pool = self.assembler.pool + #self.rm.pool = self.assembler.pool self.fprm = FPRegisterManager(self.longevity, frame_manager = self.fm, assembler = self.assembler) - self.fprm.pool = self.assembler.pool + #self.fprm.pool = self.assembler.pool return operations def prepare_loop(self, inputargs, operations, looptoken, allgcrefs): @@ -576,11 +604,12 @@ else: return self.rm.call_result_location(v) - def ensure_reg_or_pool(self, box): - if box.type == FLOAT: - return self.fprm.ensure_reg_or_pool(box) - else: - return self.rm.ensure_reg_or_pool(box) + # POOL + #def ensure_reg_or_pool(self, box): + # if box.type == FLOAT: + # return self.fprm.ensure_reg_or_pool(box) + # else: + # return self.rm.ensure_reg_or_pool(box) def ensure_reg(self, box): if box.type == FLOAT: diff --git a/rpython/jit/backend/zarch/registers.py b/rpython/jit/backend/zarch/registers.py --- a/rpython/jit/backend/zarch/registers.py +++ b/rpython/jit/backend/zarch/registers.py @@ -7,7 +7,7 @@ [r0,r1,r2,r3,r4,r5,r6,r7,r8, r9,r10,r11,r12,r13,r14,r15] = registers -MANAGED_REGS = [r2,r3,r4,r5,r6,r7,r8,r9,r10,r11] # keep this list sorted (asc)! +MANAGED_REGS = [r2,r3,r4,r5,r6,r7,r8,r9,r10,r11,r13] # keep this list sorted (asc)! MANAGED_REG_PAIRS = [(r2,r3), (r4,r5), (r6,r7), (r8,r9), (r10,r11)] VOLATILES = [r2,r3,r4,r5,r6] SP = r15 @@ -39,7 +39,6 @@ for _r in MANAGED_FP_REGS: ALL_REG_INDEXES[_r] = len(ALL_REG_INDEXES) # NOT used, but keeps JITFRAME_FIXED_SIZE even -ALL_REG_INDEXES[f15] = len(ALL_REG_INDEXES) JITFRAME_FIXED_SIZE = len(ALL_REG_INDEXES) def odd_reg(r): diff --git a/rpython/jit/backend/zarch/test/test_assembler.py b/rpython/jit/backend/zarch/test/test_assembler.py --- a/rpython/jit/backend/zarch/test/test_assembler.py +++ b/rpython/jit/backend/zarch/test/test_assembler.py @@ -325,11 +325,23 @@ self.a.mc.LGFI(r.r5, loc.imm(63)) self.a.mc.NGR(r.r4, r.r5) self.a.mc.LGFI(r.r3, loc.imm(18)) - self.a.mc.LGFI(r.r2, loc.imm(0xffffffff)) + self.a.mc.LGFI(r.r2, loc.imm(-1)) self.a.mc.SRLG(r.r2, r.r3, loc.addr(18)) self.a.jmpto(r.r14) assert run_asm(self.a) == 0 + def test_generate_max_integral_64bit(self): + self.a.mc.LGHI(r.r2, loc.imm(-1)) + self.a.mc.RISBG(r.r2, r.r2, loc.imm(1), loc.imm(0x80 | 63), loc.imm(0)) + self.a.jmpto(r.r14) + assert run_asm(self.a) == 2**63-1 + + def test_generate_sign_bit(self): + self.a.mc.LGHI(r.r2, loc.imm(-1)) + self.a.mc.RISBG(r.r2, r.r2, loc.imm(0), loc.imm(0x80 | 0), loc.imm(0)) + self.a.jmpto(r.r14) + assert run_asm(self.a) == -2**63 + def test_ag_overflow(self): self.a.mc.BRC(con.ANY, loc.imm(4+8+8)) self.a.mc.write('\x7f' + '\xff' * 7) @@ -593,7 +605,7 @@ # ensure there is just on instruction for the 'best case' self.pushpop_jitframe(r.MANAGED_REGS) - assert stored == [(r.r2, r.r11)] + assert stored == [(r.r2, r.r11), (r.r13,)] assert stored == loaded stored = [] loaded = [] diff --git a/rpython/jit/backend/zarch/test/test_runner.py b/rpython/jit/backend/zarch/test/test_runner.py --- a/rpython/jit/backend/zarch/test/test_runner.py +++ b/rpython/jit/backend/zarch/test/test_runner.py @@ -24,7 +24,6 @@ cpu.setup_once() return cpu - add_loop_instructions = "lg; lgr; larl; agr; cgfi; jge; j;$" - # realloc frame takes the most space (from just after larl, to lay) - bridge_loop_instructions = "larl; lg; cgfi; jnl; lghi; " \ - "iilf;( iihf;)? iilf;( iihf;)? basr; lg; br;$" + add_loop_instructions = "lg; lgr; agr; cgfi; jge; j;$" + bridge_loop_instructions = "lg; cgfi; jnl; lghi; " \ + "iilf;( iihf;)? iilf;( iihf;)? basr; iilf;( iihf;)? br;$" From pypy.commits at gmail.com Wed Mar 2 10:03:33 2016 From: pypy.commits at gmail.com (plan_rich) Date: Wed, 02 Mar 2016 07:03:33 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: ignore register 13 in pair allocation Message-ID: <56d700c5.e853c20a.e2a2a.ffff8ff0@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r82657:3e0b424d69d1 Date: 2016-03-02 16:02 +0100 http://bitbucket.org/pypy/pypy/changeset/3e0b424d69d1/ Log: ignore register 13 in pair allocation diff --git a/rpython/jit/backend/llsupport/tl/test/zrpy_gc_hypo_test.py b/rpython/jit/backend/llsupport/tl/test/zrpy_gc_hypo_test.py --- a/rpython/jit/backend/llsupport/tl/test/zrpy_gc_hypo_test.py +++ b/rpython/jit/backend/llsupport/tl/test/zrpy_gc_hypo_test.py @@ -55,7 +55,7 @@ @given(st.bytecode_block()) def test_execute_single_bytecode(self, program): - clazz, bytecode, consts, stack = program + bytecode, consts = program result, out, err = self.execute(bytecode, consts) if result != 0: raise Exception(("could not run program. returned %d" diff --git a/rpython/jit/backend/zarch/regalloc.py b/rpython/jit/backend/zarch/regalloc.py --- a/rpython/jit/backend/zarch/regalloc.py +++ b/rpython/jit/backend/zarch/regalloc.py @@ -261,6 +261,9 @@ i = len(self.free_regs)-1 while i >= 0: even = self.free_regs[i] + if even.value == 13: + i -= 1 + continue if even.is_even(): # found an even registers that is actually free odd = r.odd_reg(even) diff --git a/rpython/jit/backend/zarch/test/test_zrpy_gc_hypo.py b/rpython/jit/backend/zarch/test/test_zrpy_gc_hypo.py --- a/rpython/jit/backend/zarch/test/test_zrpy_gc_hypo.py +++ b/rpython/jit/backend/zarch/test/test_zrpy_gc_hypo.py @@ -1,5 +1,9 @@ from rpython.jit.backend.llsupport.tl.test.zrpy_gc_hypo_test import GCHypothesis +import py + +py.test.skip("not yet working") + class TestGCHypothesis(GCHypothesis): # runs ../../llsupport/tl/test/zrpy_gc_hypo_test.py gcrootfinder = "shadowstack" From pypy.commits at gmail.com Wed Mar 2 10:21:35 2016 From: pypy.commits at gmail.com (rlamy) Date: Wed, 02 Mar 2016 07:21:35 -0800 (PST) Subject: [pypy-commit] pypy default: Refactor parametric test into a single stateful test with increased run-time Message-ID: <56d704ff.88c8c20a.80ee0.ffff95d7@mx.google.com> Author: Ronan Lamy Branch: Changeset: r82658:eeb057746657 Date: 2016-03-02 15:02 +0000 http://bitbucket.org/pypy/pypy/changeset/eeb057746657/ Log: Refactor parametric test into a single stateful test with increased run-time diff --git a/rpython/rtyper/test/test_rdict.py b/rpython/rtyper/test/test_rdict.py --- a/rpython/rtyper/test/test_rdict.py +++ b/rpython/rtyper/test/test_rdict.py @@ -13,8 +13,9 @@ from rpython.rlib.rarithmetic import r_int, r_uint, r_longlong, r_ulonglong import py +from hypothesis import given, settings from hypothesis.strategies import ( - builds, sampled_from, binary, just, integers, text, characters) + builds, sampled_from, binary, just, integers, text, characters, tuples) from hypothesis.stateful import GenericStateMachine, run_state_machine_as_test def ann2strategy(s_value): @@ -1152,92 +1153,123 @@ class PseudoRTyper: cache_dummy_values = {} + # XXX: None keys crash the test, but translation sort-of allows it - at py.test.mark.parametrize('s_key', - [SomeString(), SomeInteger(), SomeChar(), SomeUnicodeString(), SomeUnicodeCodePoint()]) - at py.test.mark.parametrize('s_value', - [SomeString(can_be_None=True), SomeString(), SomeChar(), SomeInteger(), SomeUnicodeString(), SomeUnicodeCodePoint()]) -def test_hypothesis(s_key, s_value): - rtyper = PseudoRTyper() - r_key = s_key.rtyper_makerepr(rtyper) - r_value = s_value.rtyper_makerepr(rtyper) - dictrepr = rdict.DictRepr(rtyper, r_key, r_value, - DictKey(None, s_key), - DictValue(None, s_value)) - dictrepr.setup() +keytypes_s = [ + SomeString(), SomeInteger(), SomeChar(), + SomeUnicodeString(), SomeUnicodeCodePoint()] +st_keys = sampled_from(keytypes_s) +st_values = sampled_from(keytypes_s + [SomeString(can_be_None=True)]) - _ll_key = r_key.convert_const - _ll_value = r_value.convert_const +class Space(object): + def __init__(self, s_key, s_value): + self.s_key = s_key + self.s_value = s_value + rtyper = PseudoRTyper() + r_key = s_key.rtyper_makerepr(rtyper) + r_value = s_value.rtyper_makerepr(rtyper) + dictrepr = rdict.DictRepr(rtyper, r_key, r_value, + DictKey(None, s_key), + DictValue(None, s_value)) + dictrepr.setup() + self.l_dict = rdict.ll_newdict(dictrepr.DICT) + self.reference = {} + self.ll_key = r_key.convert_const + self.ll_value = r_value.convert_const - class SetItem(Action): - def __init__(self, key, value): - self.key = key - self.value = value + def setitem(self, key, value): + ll_key = self.ll_key(key) + ll_value = self.ll_value(value) + rdict.ll_dict_setitem(self.l_dict, ll_key, ll_value) + self.reference[key] = value + assert rdict.ll_contains(self.l_dict, ll_key) - def __repr__(self): - return 'SetItem(%r, %r)' % (self.key, self.value) + def delitem(self, key): + ll_key = self.ll_key(key) + rdict.ll_dict_delitem(self.l_dict, ll_key) + del self.reference[key] + assert not rdict.ll_contains(self.l_dict, ll_key) - def execute(self, state): - ll_key = _ll_key(self.key) - ll_value = _ll_value(self.value) - rdict.ll_dict_setitem(state.l_dict, ll_key, ll_value) - state.reference[self.key] = self.value - assert rdict.ll_contains(state.l_dict, ll_key) + def copydict(self): + self.l_dict = rdict.ll_copy(self.l_dict) - class DelItem(Action): - def __init__(self, key): - self.key = key + def cleardict(self): + rdict.ll_clear(self.l_dict) + self.reference.clear() + assert rdict.ll_dict_len(self.l_dict) == 0 - def __repr__(self): - return 'DelItem(%r)' % (self.key) + def fullcheck(self): + assert rdict.ll_dict_len(self.l_dict) == len(self.reference) + for key, value in self.reference.iteritems(): + assert (rdict.ll_dict_getitem(self.l_dict, self.ll_key(key)) == + self.ll_value(value)) - def execute(self, state): - ll_key = _ll_key(self.key) - rdict.ll_dict_delitem(state.l_dict, ll_key) - del state.reference[self.key] - assert not rdict.ll_contains(state.l_dict, ll_key) +class SetItem(Action): + def __init__(self, key, value): + self.key = key + self.value = value - class CopyDict(Action): - def execute(self, state): - state.l_dict = rdict.ll_copy(state.l_dict) + def __repr__(self): + return 'SetItem(%r, %r)' % (self.key, self.value) - class ClearDict(Action): - def execute(self, state): - rdict.ll_clear(state.l_dict) - state.reference.clear() + def execute(self, space): + space.setitem(self.key, self.value) - st_keys = ann2strategy(s_key) - st_values = ann2strategy(s_value) - st_setitem = builds(SetItem, st_keys, st_values) +class DelItem(Action): + def __init__(self, key): + self.key = key - def st_delitem(keys): - return builds(DelItem, sampled_from(keys)) + def __repr__(self): + return 'DelItem(%r)' % (self.key) - def st_updateitem(keys): - return builds(SetItem, sampled_from(keys), st_values) + def execute(self, space): + space.delitem(self.key) - class StressTest(GenericStateMachine): - def __init__(self): - self.l_dict = rdict.ll_newdict(dictrepr.DICT) - self.reference = {} +class CopyDict(Action): + def execute(self, space): + space.copydict() - def steps(self): - global_actions = [CopyDict(), ClearDict()] - if self.reference: - return ( - st_setitem | sampled_from(global_actions) | - st_updateitem(self.reference) | st_delitem(self.reference)) - else: - return (st_setitem | sampled_from(global_actions)) +class ClearDict(Action): + def execute(self, space): + space.cleardict() - def execute_step(self, action): - with signal_timeout(1): # catches infinite loops - action.execute(self) +class StressTest(GenericStateMachine): + def __init__(self): + self.space = None - def teardown(self): - assert rdict.ll_dict_len(self.l_dict) == len(self.reference) - for key, value in self.reference.iteritems(): - assert (rdict.ll_dict_getitem(self.l_dict, _ll_key(key)) == - _ll_value(value)) + def st_setitem(self): + return builds(SetItem, self.st_keys, self.st_values) - run_state_machine_as_test(StressTest) + def st_updateitem(self): + return builds(SetItem, sampled_from(self.space.reference), + self.st_values) + + def st_delitem(self): + return builds(DelItem, sampled_from(self.space.reference)) + + def steps(self): + if not self.space: + return builds(Space, st_keys, st_values) + global_actions = [CopyDict(), ClearDict()] + if self.space.reference: + return ( + self.st_setitem() | sampled_from(global_actions) | + self.st_updateitem() | self.st_delitem()) + else: + return (self.st_setitem() | sampled_from(global_actions)) + + def execute_step(self, action): + if isinstance(action, Space): + self.space = action + self.st_keys = ann2strategy(self.space.s_key) + self.st_values = ann2strategy(self.space.s_value) + return + with signal_timeout(1): # catches infinite loops + action.execute(self.space) + + def teardown(self): + if self.space: + self.space.fullcheck() + +def test_hypothesis(): + run_state_machine_as_test(StressTest, settings(max_examples=500, stateful_step_count=100)) From pypy.commits at gmail.com Wed Mar 2 10:35:14 2016 From: pypy.commits at gmail.com (fijal) Date: Wed, 02 Mar 2016 07:35:14 -0800 (PST) Subject: [pypy-commit] pypy jit-leaner-frontend: fix the merge Message-ID: <56d70832.88c8c20a.80ee0.ffff9b59@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r82659:0c113541c860 Date: 2016-03-01 14:07 +0100 http://bitbucket.org/pypy/pypy/changeset/0c113541c860/ Log: fix the merge diff --git a/rpython/jit/metainterp/opencoder.py b/rpython/jit/metainterp/opencoder.py --- a/rpython/jit/metainterp/opencoder.py +++ b/rpython/jit/metainterp/opencoder.py @@ -58,7 +58,7 @@ box = self._get(self._next()) assert box box_list.append(box) - op.rd_snapshot = resume.Snapshot(None, box_list) + op.rd_snapshot = resume.TopSnapshot(resume.Snapshot(None, box_list), [], []) def next(self): opnum = self._next() diff --git a/rpython/jit/metainterp/test/test_opencoder.py b/rpython/jit/metainterp/test/test_opencoder.py --- a/rpython/jit/metainterp/test/test_opencoder.py +++ b/rpython/jit/metainterp/test/test_opencoder.py @@ -49,11 +49,11 @@ add = t.record_op(rop.INT_ADD, [i0, i1]) t.record_op(rop.GUARD_FALSE, [add]) # now we write rd_snapshot and friends - virtualizable_boxes = [] + virtualizable_boxes = None virutalref_boxes = [] framestack = [FakeFrame(1, JitCode(2), [i0, i1])] resume.capture_resumedata(framestack, virtualizable_boxes, virutalref_boxes, t) (i0, i1), l = self.unpack(t) assert l[1].opnum == rop.GUARD_FALSE - assert l[1].rd_snapshot.boxes == [i0, i1] + assert l[1].rd_snapshot.prev.boxes == [i0, i1] From pypy.commits at gmail.com Wed Mar 2 10:35:15 2016 From: pypy.commits at gmail.com (fijal) Date: Wed, 02 Mar 2016 07:35:15 -0800 (PST) Subject: [pypy-commit] pypy jit-leaner-frontend: progress, pass the first snapshot test Message-ID: <56d70833.d30e1c0a.af5cc.ffffc480@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r82660:0077f0c7f4ec Date: 2016-03-02 16:33 +0100 http://bitbucket.org/pypy/pypy/changeset/0077f0c7f4ec/ Log: progress, pass the first snapshot test diff --git a/rpython/jit/metainterp/opencoder.py b/rpython/jit/metainterp/opencoder.py --- a/rpython/jit/metainterp/opencoder.py +++ b/rpython/jit/metainterp/opencoder.py @@ -5,12 +5,16 @@ ResOperation, oparity, opname, rop, ResOperation, opwithdescr from rpython.rlib.rarithmetic import intmask from rpython.jit.metainterp import resume +from rpython.rlib.objectmodel import we_are_translated TAGINT, TAGCONST, TAGBOX = range(3) TAGMASK = 0x3 TAGSHIFT = 2 MAXINT = 65536 +class Sentinel(object): + pass + class TraceIterator(object): def __init__(self, trace, end): self.trace = trace @@ -47,18 +51,10 @@ else: yyyy - def read_resume(self, op): - jc_index = self._next() - pc = self._next() - f = resume.FrameInfo(None, jc_index, pc) - op.rd_frame_info_list = f - lgt = self._next() - box_list = [] - for i in range(lgt): - box = self._get(self._next()) - assert box - box_list.append(box) - op.rd_snapshot = resume.TopSnapshot(resume.Snapshot(None, box_list), [], []) + def skip_resume_data(self): + pos = self.pos + self.pos = self._next() + return pos def next(self): opnum = self._next() @@ -79,7 +75,7 @@ descr = None res = ResOperation(opnum, args, -1, descr=descr) if rop.is_guard(opnum): - self.read_resume(res) + res.rd_snapshot_position = self.skip_resume_data() self._cache[self._count] = res self._count += 1 return res @@ -145,6 +141,9 @@ index = op._pos self._ops[index] = -newtag - 1 + def record_snapshot_link(self, pos): + self._ops.append(-pos - 1) + def record_op(self, opnum, argboxes, descr=None): # return an ResOperation instance, ideally die in hell pos = self._record_op(opnum, argboxes, descr) @@ -154,12 +153,29 @@ return tag(TAGBOX, self._record_raw(opnum, tagged_args, descr)) def record_snapshot(self, jitcode, pc, active_boxes): + pos = len(self._ops) + self._ops.append(len(active_boxes)) # unnecessary, can be read from self._ops.append(jitcode.index) self._ops.append(pc) - self._ops.append(len(active_boxes)) # unnecessary, can be read from - # jitcode for box in active_boxes: self._ops.append(box.position) # not tagged, as it must be boxes + return pos + + def get_patchable_position(self): + p = len(self._ops) + if not we_are_translated(): + self._ops.append(Sentinel()) + else: + self._ops.append(-1) + return p + + def patch_position_to_current(self, p): + prev = self._ops[p] + if we_are_translated(): + assert prev == -1 + else: + assert isinstance(prev, Sentinel) + self._ops[p] = len(self._ops) def get_iter(self): return TraceIterator(self, len(self._ops)) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_util.py b/rpython/jit/metainterp/optimizeopt/test/test_util.py --- a/rpython/jit/metainterp/optimizeopt/test/test_util.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_util.py @@ -502,13 +502,10 @@ return self.oparse.parse() def postprocess(self, op): - class FakeJitCode(object): - index = 0 - if OpHelpers.is_guard(op.getopnum()): - op.rd_snapshot = resume.TopSnapshot(None, + op.rd_snapshot = resume.TopSnapshot( resume.Snapshot(None, op.getfailargs()), [], []) - op.rd_frame_info_list = resume.FrameInfo(None, FakeJitCode(), 11) + op.rd_frame_info_list = resume.FrameInfo(None, 0, 11) def add_guard_future_condition(self, res): # invent a GUARD_FUTURE_CONDITION to not have to change all tests diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -70,8 +70,7 @@ self.copy_constants(self.registers_f, jitcode.constants_f, ConstFloat) self._result_argcode = 'v' # for resume.py operation - self.parent_resumedata_snapshot = None - self.parent_resumedata_frame_info_list = None + self.parent_resumedata_position = -1 # counter for unrolling inlined loops self.unroll_iterations = 1 @@ -2061,7 +2060,8 @@ else: guard_op = self.history.record(opnum, moreargs, None) assert isinstance(guard_op, GuardResOp) - self.capture_resumedata(resumepc) # <- records extra to history + self.capture_resumedata(resumepc) + # ^^^ records extra to history self.staticdata.profiler.count_ops(opnum, Counters.GUARDS) # count self.attach_debug_info(guard_op) diff --git a/rpython/jit/metainterp/resume.py b/rpython/jit/metainterp/resume.py --- a/rpython/jit/metainterp/resume.py +++ b/rpython/jit/metainterp/resume.py @@ -112,23 +112,20 @@ self.variable, self.location) -def _ensure_parent_resumedata(framestack, n): - target = framestack[n] +def _ensure_parent_resumedata(framestack, n, t): if n == 0: return + target = framestack[n] back = framestack[n - 1] - if target.parent_resumedata_frame_info_list is not None: - _, pc = unpack_uint(target.parent_resumedata_frame_info_list.packed_jitcode_pc) - assert pc == back.pc + if target.parent_resumedata_position != -1: + t.check_snapshot_jitcode_pc(back.jitcode, back.pc, + target.parent_resumedata_position) + t.record_snapshot_link(target.parent_resumedata_position) return - _ensure_parent_resumedata(framestack, n - 1) - target.parent_resumedata_frame_info_list = FrameInfo( - back.parent_resumedata_frame_info_list, - back.jitcode, - back.pc) - target.parent_resumedata_snapshot = Snapshot( - back.parent_resumedata_snapshot, - back.get_list_of_active_boxes(True)) + pos = t.record_snapshot(back.jitcode, back.pc, + back.get_list_of_active_boxes(True)) + _ensure_parent_resumedata(framestack, n - 1, t) + target.parent_resumedata_position = pos def capture_resumedata(framestack, virtualizable_boxes, virtualref_boxes, t): n = len(framestack) - 1 @@ -140,16 +137,11 @@ virtualref_boxes = virtualref_boxes[:] if n >= 0: top = framestack[n] + pos = t.get_patchable_position() t.record_snapshot(top.jitcode, top.pc, top.get_list_of_active_boxes(False)) - #_ensure_parent_resumedata(framestack, n) - #frame_info_list = FrameInfo(top.parent_resumedata_frame_info_list, - # top.jitcode, top.pc) - #snapshot_storage.rd_frame_info_list = frame_info_list - #snapshot = Snapshot(top.parent_resumedata_snapshot, - # top.get_list_of_active_boxes(False)) - #snapshot = TopSnapshot(snapshot, virtualref_boxes, virtualizable_boxes) - #snapshot_storage.rd_snapshot = snapshot + _ensure_parent_resumedata(framestack, n, t) + t.patch_position_to_current(pos) else: yyy snapshot_storage.rd_frame_info_list = None diff --git a/rpython/jit/metainterp/test/test_opencoder.py b/rpython/jit/metainterp/test/test_opencoder.py --- a/rpython/jit/metainterp/test/test_opencoder.py +++ b/rpython/jit/metainterp/test/test_opencoder.py @@ -11,14 +11,14 @@ l = [] while not iter.done(): l.append(iter.next()) - return iter.inputargs, l + return iter.inputargs, l, iter def test_simple_iterator(self): i0, i1 = InputArgInt(), InputArgInt() t = Trace([i0, i1]) add = t.record_op(rop.INT_ADD, [i0, i1]) t.record_op(rop.INT_ADD, [add, ConstInt(1)]) - (i0, i1), l = self.unpack(t) + (i0, i1), l, _ = self.unpack(t) assert len(l) == 2 assert l[0].opnum == rop.INT_ADD assert l[1].opnum == rop.INT_ADD @@ -27,14 +27,26 @@ assert l[0].getarg(0) is i0 assert l[0].getarg(1) is i1 + def unpack_snapshot(self, t, pos): + trace = t.trace + first = trace._ops[pos] # this is the size + pos += 1 + boxes = [] + while first > pos + 1: + snapshot_size = trace._ops[pos] + # 2 for jitcode and pc + pos += 1 + 2 + boxes += [t._get(trace._ops[i + pos]) for i in range(snapshot_size)] + pos += len(boxes) + return boxes + def test_rd_snapshot(self): class JitCode(object): def __init__(self, index): self.index = index class FakeFrame(object): - parent_resumedata_frame_info_list = None - parent_resumedata_snapshot = None + parent_resumedata_position = -1 def __init__(self, pc, jitcode, boxes): self.pc = pc @@ -49,11 +61,20 @@ add = t.record_op(rop.INT_ADD, [i0, i1]) t.record_op(rop.GUARD_FALSE, [add]) # now we write rd_snapshot and friends - virtualizable_boxes = None - virutalref_boxes = [] - framestack = [FakeFrame(1, JitCode(2), [i0, i1])] - resume.capture_resumedata(framestack, virtualizable_boxes, - virutalref_boxes, t) - (i0, i1), l = self.unpack(t) + frame0 = FakeFrame(1, JitCode(2), [i0, i1]) + frame1 = FakeFrame(3, JitCode(4), [i0, i0, add]) + framestack = [frame0] + resume.capture_resumedata(framestack, None, [], t) + (i0, i1), l, iter = self.unpack(t) assert l[1].opnum == rop.GUARD_FALSE - assert l[1].rd_snapshot.prev.boxes == [i0, i1] + boxes = self.unpack_snapshot(iter, l[1].rd_snapshot_position) + assert boxes == [i0, i1] + t.record_op(rop.GUARD_FALSE, [add]) + resume.capture_resumedata([frame0, frame1], None, [], t) + (i0, i1), l, iter = self.unpack(t) + assert l[1].opnum == rop.GUARD_FALSE + boxes = self.unpack_snapshot(iter, l[1].rd_snapshot_position) + assert boxes == [i0, i1] + assert l[2].opnum == rop.GUARD_FALSE + boxes = self.unpack_snapshot(iter, l[2].rd_snapshot_position) + assert boxes == [i0, i0, l[0], i0, i1] From pypy.commits at gmail.com Wed Mar 2 11:47:53 2016 From: pypy.commits at gmail.com (fijal) Date: Wed, 02 Mar 2016 08:47:53 -0800 (PST) Subject: [pypy-commit] pypy jit-leaner-frontend: revert the changes to oparser, will try to hack differently Message-ID: <56d71939.86351c0a.2db31.ffffd963@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r82661:23fc5965ce9d Date: 2016-03-02 17:47 +0100 http://bitbucket.org/pypy/pypy/changeset/23fc5965ce9d/ Log: revert the changes to oparser, will try to hack differently diff --git a/rpython/jit/tool/oparser.py b/rpython/jit/tool/oparser.py --- a/rpython/jit/tool/oparser.py +++ b/rpython/jit/tool/oparser.py @@ -9,9 +9,8 @@ from rpython.jit.metainterp.resoperation import rop, ResOperation, \ InputArgInt, InputArgRef, InputArgFloat, InputArgVector, \ - ResOpWithDescr, N_aryOp, UnaryOp, PlainResOp, optypes, OpHelpers,\ + ResOpWithDescr, N_aryOp, UnaryOp, PlainResOp, optypes, OpHelpers, \ VectorizationInfo -from rpython.jit.metainterp.opencoder import Trace class ParseError(Exception): pass @@ -325,7 +324,7 @@ assert descr is None return op else: - res = self.record(opnum, args, descr) + res = ResOperation(opnum, args, -1, descr) if fail_args is not None: res.setfailargs(fail_args) if self._postproces: @@ -392,6 +391,7 @@ def parse(self): lines = self.input.splitlines() + ops = [] newlines = [] first_comment = None for line in lines: @@ -411,25 +411,26 @@ continue # a comment or empty line newlines.append(line) base_indent, inpargs, newlines = self.parse_inpargs(newlines) - self.trace = self.model.Trace(inpargs) - num, last_offset = self.parse_ops(base_indent, newlines, 0) + num, ops, last_offset = self.parse_ops(base_indent, newlines, 0) if num < len(newlines): raise ParseError("unexpected dedent at line: %s" % newlines[num]) - self.trace.comment = first_comment - self.trace.original_jitcell_token = self.original_jitcell_token - return self.trace - - def record(self, opnum, args, descr): - return self.trace.record_op(opnum, args, descr) + loop = self.model.ExtendedTreeLoop("loop") + loop.comment = first_comment + loop.original_jitcell_token = self.original_jitcell_token + loop.operations = ops + loop.inputargs = inpargs + loop.last_offset = last_offset + return loop def parse_ops(self, indent, lines, start): num = start + ops = [] last_offset = None while num < len(lines): line = lines[num] if not line.startswith(" " * indent): # dedent - return num, None + return num, ops elif line.startswith(" "*(indent + 1)): raise ParseError("indentation not valid any more") elif line.startswith(" " * indent + "#"): @@ -444,8 +445,9 @@ op = self.parse_next_op(line) if offset: op.offset = offset + ops.append(op) num += 1 - return num, last_offset + return num, ops, last_offset def postprocess(self, loop): """ A hook that can be overloaded to do some postprocessing diff --git a/rpython/jit/tool/test/test_oparser.py b/rpython/jit/tool/test/test_oparser.py --- a/rpython/jit/tool/test/test_oparser.py +++ b/rpython/jit/tool/test/test_oparser.py @@ -24,12 +24,11 @@ finish() # (tricky) """ loop = self.parse(x) - ops = loop._get_operations() - assert len(ops) == 3 - assert [op.getopnum() for op in ops] == [rop.INT_ADD, rop.INT_SUB, + assert len(loop.operations) == 3 + assert [op.getopnum() for op in loop.operations] == [rop.INT_ADD, rop.INT_SUB, rop.FINISH] assert len(loop.inputargs) == 2 - assert ops[-1].getdescr() + assert loop.operations[-1].getdescr() def test_const_ptr_subops(self): x = """ @@ -39,10 +38,9 @@ S = lltype.Struct('S') vtable = lltype.nullptr(S) loop = self.parse(x, None, locals()) - ops = loop._get_operations() - assert len(ops) == 1 - assert ops[0].getdescr() - assert not ops[0].getfailargs() + assert len(loop.operations) == 1 + assert loop.operations[0].getdescr() + assert loop.operations[0].getfailargs() == [] def test_descr(self): class Xyz(AbstractDescr): @@ -54,8 +52,7 @@ """ stuff = Xyz() loop = self.parse(x, None, locals()) - ops = loop._get_operations() - assert ops[0].getdescr() is stuff + assert loop.operations[0].getdescr() is stuff def test_after_fail(self): x = """ @@ -64,7 +61,7 @@ i1 = int_add(1, 2) """ loop = self.parse(x, None, {}) - assert len(loop._get_operations()) == 2 + assert len(loop.operations) == 2 def test_descr_setfield(self): class Xyz(AbstractDescr): @@ -76,7 +73,7 @@ """ stuff = Xyz() loop = self.parse(x, None, locals()) - assert loop._get_operations()[0].getdescr() is stuff + assert loop.operations[0].getdescr() is stuff def test_getvar_const_ptr(self): x = ''' @@ -86,7 +83,7 @@ TP = lltype.GcArray(lltype.Signed) NULL = lltype.cast_opaque_ptr(llmemory.GCREF, lltype.nullptr(TP)) loop = self.parse(x, None, {'func_ptr' : NULL}) - assert loop._get_operations()[0].getarg(0).value == NULL + assert loop.operations[0].getarg(0).value == NULL def test_jump_target(self): x = ''' @@ -94,7 +91,7 @@ jump() ''' loop = self.parse(x) - assert loop._get_operations()[0].getdescr() is loop.original_jitcell_token + assert loop.operations[0].getdescr() is loop.original_jitcell_token def test_jump_target_other(self): looptoken = JitCellToken() @@ -104,7 +101,7 @@ jump(descr=looptoken) ''' loop = self.parse(x, namespace=locals()) - assert loop._get_operations()[0].getdescr() is looptoken + assert loop.operations[0].getdescr() is looptoken def test_floats(self): x = ''' @@ -112,7 +109,7 @@ f1 = float_add(f0, 3.5) ''' loop = self.parse(x) - box = loop._get_operations()[0].getarg(0) + box = loop.operations[0].getarg(0) # we cannot use isinstance, because in case of mock the class will be # constructed on the fly assert box.__class__.__name__ == 'InputArgFloat' @@ -126,13 +123,12 @@ debug_merge_point(0, 0, '(stuff) #1') ''' loop = self.parse(x) - ops = loop._get_operations() - assert ops[0].getarg(2)._get_str() == 'info' - assert ops[0].getarg(1).value == 0 - assert ops[1].getarg(2)._get_str() == 'info' - assert ops[2].getarg(2)._get_str() == " info" - assert ops[2].getarg(1).value == 1 - assert ops[3].getarg(2)._get_str() == "(stuff) #1" + assert loop.operations[0].getarg(2)._get_str() == 'info' + assert loop.operations[0].getarg(1).value == 0 + assert loop.operations[1].getarg(2)._get_str() == 'info' + assert loop.operations[2].getarg(2)._get_str() == " info" + assert loop.operations[2].getarg(1).value == 1 + assert loop.operations[3].getarg(2)._get_str() == "(stuff) #1" def test_descr_with_obj_print(self): @@ -168,7 +164,7 @@ p0 = new(, descr=) ''' loop = self.parse(x) - assert loop._get_operations()[0].getopname() == 'new' + assert loop.operations[0].getopname() == 'new' def test_no_fail_args(self): x = ''' @@ -176,7 +172,7 @@ guard_true(i0, descr=) ''' loop = self.parse(x, nonstrict=True) - assert not loop._get_operations()[0].getfailargs() + assert loop.operations[0].getfailargs() == [] def test_offsets(self): x = """ @@ -212,9 +208,8 @@ jump(i0, descr=1) """ loop = self.parse(x) - ops = loop._get_operations() - assert ops[0].getdescr() is ops[1].getdescr() - assert isinstance(ops[0].getdescr(), TargetToken) + assert loop.operations[0].getdescr() is loop.operations[1].getdescr() + assert isinstance(loop.operations[0].getdescr(), TargetToken) class ForbiddenModule(object): From pypy.commits at gmail.com Wed Mar 2 14:31:49 2016 From: pypy.commits at gmail.com (mattip) Date: Wed, 02 Mar 2016 11:31:49 -0800 (PST) Subject: [pypy-commit] pypy cpyext-ext: unskip and start to fix test for tp_new (issue #1703) Message-ID: <56d73fa5.12871c0a.e3639.2258@mx.google.com> Author: mattip Branch: cpyext-ext Changeset: r82662:2279834c7e56 Date: 2016-03-02 14:31 -0500 http://bitbucket.org/pypy/pypy/changeset/2279834c7e56/ Log: unskip and start to fix test for tp_new (issue #1703) diff --git a/pypy/module/cpyext/test/foo3.c b/pypy/module/cpyext/test/foo3.c --- a/pypy/module/cpyext/test/foo3.c +++ b/pypy/module/cpyext/test/foo3.c @@ -59,11 +59,14 @@ static PyMethodDef sbkMethods[] = {{NULL, NULL, 0, NULL}}; -#ifdef _WIN32 - __declspec(dllexport) void // PyModINIT_FUNC is broken on PyPy/Windows +/* Initialize this module. */ +#ifdef __GNUC__ +extern __attribute__((visibility("default"))) #else - PyMODINIT_FUNC +extern __declspec(dllexport) #endif + +PyMODINIT_FUNC initfoo3(void) { PyObject* mod = Py_InitModule("Foo3", sbkMethods); diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -865,7 +865,6 @@ assert (d + a) == 5 def test_tp_new_in_subclass_of_type(self): - skip("BROKEN") module = self.import_module(name='foo3') print('calling module.Type()...') module.Type("X", (object,), {}) From pypy.commits at gmail.com Wed Mar 2 15:17:43 2016 From: pypy.commits at gmail.com (rlamy) Date: Wed, 02 Mar 2016 12:17:43 -0800 (PST) Subject: [pypy-commit] pypy default: cleanup Message-ID: <56d74a67.03321c0a.632bc.2ef5@mx.google.com> Author: Ronan Lamy Branch: Changeset: r82663:db9d97b06d3a Date: 2016-03-02 20:16 +0000 http://bitbucket.org/pypy/pypy/changeset/db9d97b06d3a/ Log: cleanup diff --git a/rpython/rtyper/test/test_rdict.py b/rpython/rtyper/test/test_rdict.py --- a/rpython/rtyper/test/test_rdict.py +++ b/rpython/rtyper/test/test_rdict.py @@ -13,7 +13,7 @@ from rpython.rlib.rarithmetic import r_int, r_uint, r_longlong, r_ulonglong import py -from hypothesis import given, settings +from hypothesis import settings from hypothesis.strategies import ( builds, sampled_from, binary, just, integers, text, characters, tuples) from hypothesis.stateful import GenericStateMachine, run_state_machine_as_test @@ -1145,15 +1145,20 @@ assert sorted(DICT.TO.entries.TO.OF._flds) == ['f_hash', 'key', 'value'] +class Action(object): + def __init__(self, method, args): + self.method = method + self.args = args -class Action(object): + def execute(self, space): + getattr(space, self.method)(*self.args) + def __repr__(self): - return "%s()" % self.__class__.__name__ + return "space.%s(%s)" % (self.method, ', '.join(map(repr, self.args))) class PseudoRTyper: cache_dummy_values = {} - # XXX: None keys crash the test, but translation sort-of allows it keytypes_s = [ SomeString(), SomeInteger(), SomeChar(), @@ -1204,53 +1209,27 @@ assert (rdict.ll_dict_getitem(self.l_dict, self.ll_key(key)) == self.ll_value(value)) -class SetItem(Action): - def __init__(self, key, value): - self.key = key - self.value = value - - def __repr__(self): - return 'SetItem(%r, %r)' % (self.key, self.value) - - def execute(self, space): - space.setitem(self.key, self.value) - -class DelItem(Action): - def __init__(self, key): - self.key = key - - def __repr__(self): - return 'DelItem(%r)' % (self.key) - - def execute(self, space): - space.delitem(self.key) - -class CopyDict(Action): - def execute(self, space): - space.copydict() - -class ClearDict(Action): - def execute(self, space): - space.cleardict() - class StressTest(GenericStateMachine): def __init__(self): self.space = None def st_setitem(self): - return builds(SetItem, self.st_keys, self.st_values) + return builds(Action, + just('setitem'), tuples(self.st_keys, self.st_values)) def st_updateitem(self): - return builds(SetItem, sampled_from(self.space.reference), - self.st_values) + return builds(Action, + just('setitem'), + tuples(sampled_from(self.space.reference), self.st_values)) def st_delitem(self): - return builds(DelItem, sampled_from(self.space.reference)) + return builds(Action, + just('delitem'), tuples(sampled_from(self.space.reference))) def steps(self): if not self.space: - return builds(Space, st_keys, st_values) - global_actions = [CopyDict(), ClearDict()] + return builds(Action, just('setup'), tuples(st_keys, st_values)) + global_actions = [Action('copydict', ()), Action('cleardict', ())] if self.space.reference: return ( self.st_setitem() | sampled_from(global_actions) | @@ -1259,8 +1238,8 @@ return (self.st_setitem() | sampled_from(global_actions)) def execute_step(self, action): - if isinstance(action, Space): - self.space = action + if action.method == 'setup': + self.space = Space(*action.args) self.st_keys = ann2strategy(self.space.s_key) self.st_values = ann2strategy(self.space.s_value) return From pypy.commits at gmail.com Wed Mar 2 15:22:43 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 02 Mar 2016 12:22:43 -0800 (PST) Subject: [pypy-commit] cffi default: Oops, tests fail if we run them one by one Message-ID: <56d74b93.84b61c0a.47a3f.2f38@mx.google.com> Author: Armin Rigo Branch: Changeset: r2646:d7ec0dceb9ed Date: 2016-03-02 21:22 +0100 http://bitbucket.org/cffi/cffi/changeset/d7ec0dceb9ed/ Log: Oops, tests fail if we run them one by one diff --git a/testing/embedding/test_basic.py b/testing/embedding/test_basic.py --- a/testing/embedding/test_basic.py +++ b/testing/embedding/test_basic.py @@ -32,8 +32,12 @@ pythonpath.insert(0, cffi_base) return os.pathsep.join(pythonpath) -def setup_module(mod): - mod.org_env = os.environ.copy() +def copy_away_env(): + global org_env + try: + org_env + except NameError: + org_env = os.environ.copy() class EmbeddingTests: @@ -121,6 +125,7 @@ os.chdir(curdir) def patch_environment(self): + copy_away_env() path = self.get_path() # for libpypy-c.dll or Python27.dll path = os.path.split(sys.executable)[0] + os.path.pathsep + path From pypy.commits at gmail.com Wed Mar 2 15:45:39 2016 From: pypy.commits at gmail.com (plan_rich) Date: Wed, 02 Mar 2016 12:45:39 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: fix in assembly. 1 func for addr generation of gc_load/gc_store(_indexed) instead of 4 Message-ID: <56d750f3.472f1c0a.75880.3bcf@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r82664:7ec50a9c18a6 Date: 2016-03-02 21:44 +0100 http://bitbucket.org/pypy/pypy/changeset/7ec50a9c18a6/ Log: fix in assembly. 1 func for addr generation of gc_load/gc_store(_indexed) instead of 4 diff --git a/rpython/jit/backend/zarch/opassembler.py b/rpython/jit/backend/zarch/opassembler.py --- a/rpython/jit/backend/zarch/opassembler.py +++ b/rpython/jit/backend/zarch/opassembler.py @@ -908,16 +908,9 @@ def _emit_gc_load(self, op, arglocs, regalloc): - result_loc, base_loc, ofs_loc, size_loc, sign_loc = arglocs - # POOL assert not result_loc.is_in_pool() - # POOL assert not base_loc.is_in_pool() - # POOL assert not ofs_loc.is_in_pool() - if ofs_loc.is_imm(): - assert self._mem_offset_supported(ofs_loc.value) - src_addr = l.addr(ofs_loc.value, base_loc) - else: - src_addr = l.addr(0, base_loc, ofs_loc) - self._memory_read(result_loc, src_addr, size_loc.value, sign_loc.value) + result_loc, base_loc, index_loc, size_loc, sign_loc = arglocs + addr_loc = self._load_address(base_loc, index_loc, l.imm0) + self._memory_read(result_loc, addr_loc, size_loc.value, sign_loc.value) emit_gc_load_i = _emit_gc_load emit_gc_load_f = _emit_gc_load @@ -925,15 +918,7 @@ def _emit_gc_load_indexed(self, op, arglocs, regalloc): result_loc, base_loc, index_loc, offset_loc, size_loc, sign_loc=arglocs - # POOL assert not result_loc.is_in_pool() - # POOL assert not base_loc.is_in_pool() - # POOL assert not index_loc.is_in_pool() - # POOL assert not offset_loc.is_in_pool() - if offset_loc.is_imm() and self._mem_offset_supported(offset_loc.value): - addr_loc = l.addr(offset_loc.value, base_loc, index_loc) - else: - self.mc.AGRK(r.SCRATCH, index_loc, offset_loc) - addr_loc = l.addr(0, base_loc, r.SCRATCH) + addr_loc = self._load_address(base_loc, index_loc, offset_loc) self._memory_read(result_loc, addr_loc, size_loc.value, sign_loc.value) emit_gc_load_indexed_i = _emit_gc_load_indexed @@ -942,37 +927,30 @@ def emit_gc_store(self, op, arglocs, regalloc): (base_loc, index_loc, value_loc, size_loc) = arglocs - # POOL assert not base_loc.is_in_pool() - # POOL assert not index_loc.is_in_pool() - # POOL assert not value_loc.is_in_pool() - if index_loc.is_imm() and self._mem_offset_supported(index_loc.value): - addr_loc = l.addr(index_loc.value, base_loc) - else: - self.mc.LGR(r.SCRATCH, index_loc) - addr_loc = l.addr(0, base_loc, r.SCRATCH) + addr_loc = self._load_address(base_loc, index_loc, l.imm0) self._memory_store(value_loc, addr_loc, size_loc) def emit_gc_store_indexed(self, op, arglocs, regalloc): (base_loc, index_loc, value_loc, offset_loc, size_loc) = arglocs - # POOL assert not base_loc.is_in_pool() - # POOL assert not index_loc.is_in_pool() - # POOL assert not value_loc.is_in_pool() - addr_loc = self._load_address(base_loc, index_loc, offset_loc, r.SCRATCH) + addr_loc = self._load_address(base_loc, index_loc, offset_loc) self._memory_store(value_loc, addr_loc, size_loc) - def _load_address(self, base_loc, index_loc, offset_loc, helper_reg): - if index_loc.is_imm() and offset_loc.is_imm(): - const = offset_loc.value + index_loc.value - assert self._mem_offset_supported(const) - addr_loc = l.addr(const, base_loc) - elif offset_loc.is_imm() and self._mem_offset_supported(offset_loc.value): - assert index_loc.is_core_reg() - addr_loc = l.addr(offset_loc.value, base_loc, index_loc) + def _load_address(self, base_loc, index_loc, offset_imm): + assert offset_imm.is_imm() + offset = offset_imm.value + if index_loc.is_imm(): + offset = index_loc.value + offset + if self._mem_offset_supported(offset): + addr_loc = l.addr(offset, base_loc) + else: + self.mc.load_imm(r.SCRATCH, offset) + addr_loc = l.addr(0, base_loc, r.SCRATCH) else: - self.mc.AGRK(helper_reg, index_loc, offset_loc) - addr_loc = l.addr(0, base_loc, helper_reg) + assert self._mem_offset_supported(offset) + addr_loc = l.addr(offset, base_loc, index_loc) return addr_loc + def _mem_offset_supported(self, value): return -2**19 <= value < 2**19 diff --git a/rpython/jit/backend/zarch/regalloc.py b/rpython/jit/backend/zarch/regalloc.py --- a/rpython/jit/backend/zarch/regalloc.py +++ b/rpython/jit/backend/zarch/regalloc.py @@ -849,8 +849,8 @@ sign_loc = imm0 if size_box.value < 0: sign_loc = imm1 + result_loc = self.force_allocate_reg(op) self.free_op_vars() - result_loc = self.force_allocate_reg(op) return [result_loc, base_loc, index_loc, imm(size), sign_loc] prepare_gc_load_i = _prepare_gc_load From pypy.commits at gmail.com Wed Mar 2 15:58:17 2016 From: pypy.commits at gmail.com (plan_rich) Date: Wed, 02 Mar 2016 12:58:17 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: catchup default Message-ID: <56d753e9.034cc20a.d4ccb.218a@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r82665:90730c0a4880 Date: 2016-03-02 21:57 +0100 http://bitbucket.org/pypy/pypy/changeset/90730c0a4880/ Log: catchup default diff too long, truncating to 2000 out of 12259 lines diff --git a/.hgignore b/.hgignore --- a/.hgignore +++ b/.hgignore @@ -22,6 +22,7 @@ ^pypy/module/cpyext/test/.+\.obj$ ^pypy/module/cpyext/test/.+\.manifest$ ^pypy/module/test_lib_pypy/ctypes_tests/.+\.o$ +^pypy/module/test_lib_pypy/ctypes_tests/_ctypes_test\.o$ ^pypy/module/cppyy/src/.+\.o$ ^pypy/module/cppyy/bench/.+\.so$ ^pypy/module/cppyy/bench/.+\.root$ @@ -35,7 +36,6 @@ ^pypy/module/test_lib_pypy/cffi_tests/__pycache__.+$ ^pypy/doc/.+\.html$ ^pypy/doc/config/.+\.rst$ -^pypy/doc/basicblock\.asc$ ^pypy/doc/.+\.svninfo$ ^rpython/translator/c/src/libffi_msvc/.+\.obj$ ^rpython/translator/c/src/libffi_msvc/.+\.dll$ @@ -45,53 +45,33 @@ ^rpython/translator/c/src/cjkcodecs/.+\.obj$ ^rpython/translator/c/src/stacklet/.+\.o$ ^rpython/translator/c/src/.+\.o$ -^rpython/translator/jvm/\.project$ -^rpython/translator/jvm/\.classpath$ -^rpython/translator/jvm/eclipse-bin$ -^rpython/translator/jvm/src/pypy/.+\.class$ -^rpython/translator/benchmark/docutils$ -^rpython/translator/benchmark/templess$ -^rpython/translator/benchmark/gadfly$ -^rpython/translator/benchmark/mako$ -^rpython/translator/benchmark/bench-custom\.benchmark_result$ -^rpython/translator/benchmark/shootout_benchmarks$ +^rpython/translator/llvm/.+\.so$ ^rpython/translator/goal/target.+-c$ ^rpython/translator/goal/.+\.exe$ ^rpython/translator/goal/.+\.dll$ ^pypy/goal/pypy-translation-snapshot$ ^pypy/goal/pypy-c -^pypy/goal/pypy-jvm -^pypy/goal/pypy-jvm.jar ^pypy/goal/.+\.exe$ ^pypy/goal/.+\.dll$ ^pypy/goal/.+\.lib$ ^pypy/_cache$ -^pypy/doc/statistic/.+\.html$ -^pypy/doc/statistic/.+\.eps$ -^pypy/doc/statistic/.+\.pdf$ -^rpython/translator/cli/src/pypylib\.dll$ -^rpython/translator/cli/src/query\.exe$ -^rpython/translator/cli/src/main\.exe$ +^lib-python/2.7/lib2to3/.+\.pickle$ ^lib_pypy/__pycache__$ ^lib_pypy/ctypes_config_cache/_.+_cache\.py$ ^lib_pypy/ctypes_config_cache/_.+_.+_\.py$ ^lib_pypy/_libmpdec/.+.o$ -^rpython/translator/cli/query-descriptions$ ^pypy/doc/discussion/.+\.html$ ^include/.+\.h$ ^include/.+\.inl$ ^pypy/doc/_build/.*$ ^pypy/doc/config/.+\.html$ ^pypy/doc/config/style\.css$ -^pypy/doc/jit/.+\.html$ -^pypy/doc/jit/style\.css$ ^pypy/doc/image/lattice1\.png$ ^pypy/doc/image/lattice2\.png$ ^pypy/doc/image/lattice3\.png$ ^pypy/doc/image/stackless_informal\.png$ ^pypy/doc/image/parsing_example.+\.png$ ^rpython/doc/_build/.*$ -^pypy/module/test_lib_pypy/ctypes_tests/_ctypes_test\.o$ ^compiled ^.git/ ^.hypothesis/ diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -41,29 +41,29 @@ Amaury Forgeot d'Arc Antonio Cuni Samuele Pedroni + Matti Picus Alex Gaynor Brian Kearns - Matti Picus Philip Jenvey Michael Hudson + Ronan Lamy David Schneider + Manuel Jacob Holger Krekel Christian Tismer Hakan Ardo - Manuel Jacob - Ronan Lamy Benjamin Peterson + Richard Plangger Anders Chrigstrom Eric van Riet Paap Wim Lavrijsen - Richard Plangger Richard Emslie Alexander Schremmer Dan Villiom Podlaski Christiansen + Remi Meier Lukas Diekmann Sven Hager Anders Lehmann - Remi Meier Aurelien Campeas Niklaus Haldimann Camillo Bruni @@ -72,8 +72,8 @@ Romain Guillebert Leonardo Santagada Seo Sanghyeon + Ronny Pfannschmidt Justin Peel - Ronny Pfannschmidt David Edelsohn Anders Hammarquist Jakub Gustak @@ -95,6 +95,7 @@ Tyler Wade Michael Foord Stephan Diehl + Vincent Legoll Stefan Schwarzer Valentino Volonghi Tomek Meka @@ -105,9 +106,9 @@ Jean-Paul Calderone Timo Paulssen Squeaky + Marius Gedminas Alexandre Fayolle Simon Burton - Marius Gedminas Martin Matusiak Konstantin Lopuhin Wenzhu Man @@ -116,16 +117,20 @@ Ivan Sichmann Freitas Greg Price Dario Bertini + Stefano Rivera Mark Pearse Simon Cross Andreas Stührk - Stefano Rivera + Edd Barrett Jean-Philippe St. Pierre Guido van Rossum Pavel Vinogradov + Jeremy Thurgood Paweł Piotr Przeradowski + Spenser Bauman Paul deGrandis Ilya Osadchiy + marky1991 Tobias Oberstein Adrian Kuhn Boris Feigin @@ -134,14 +139,12 @@ Georg Brandl Bert Freudenberg Stian Andreassen - Edd Barrett + Tobias Pape Wanja Saatkamp Gerald Klix Mike Blume - Tobias Pape Oscar Nierstrasz Stefan H. Muller - Jeremy Thurgood Rami Chowdhury Eugene Oden Henry Mason @@ -153,6 +156,8 @@ Lukas Renggli Guenter Jantzen Ned Batchelder + Tim Felgentreff + Anton Gulenko Amit Regmi Ben Young Nicolas Chauvat @@ -162,12 +167,12 @@ Nicholas Riley Jason Chu Igor Trindade Oliveira - Tim Felgentreff + Yichao Yu Rocco Moretti Gintautas Miliauskas Michael Twomey Lucian Branescu Mihaila - Yichao Yu + Devin Jeanpierre Gabriel Lavoie Olivier Dormond Jared Grubb @@ -191,33 +196,33 @@ Stanislaw Halik Mikael Schönenberg Berkin Ilbeyi - Elmo M?ntynen + Elmo Mäntynen + Faye Zhao Jonathan David Riehl Anders Qvist Corbin Simpson Chirag Jadwani Beatrice During Alex Perry - Vincent Legoll + Vaibhav Sood Alan McIntyre - Spenser Bauman + William Leslie Alexander Sedov Attila Gobi + Jasper.Schulz Christopher Pope - Devin Jeanpierre - Vaibhav Sood Christian Tismer Marc Abramowitz Dan Stromberg Arjun Naik Valentina Mukhamedzhanova Stefano Parmesan + Mark Young Alexis Daboville Jens-Uwe Mager Carl Meyer Karl Ramm Pieter Zieschang - Anton Gulenko Gabriel Lukas Vacek Andrew Dalke @@ -225,6 +230,7 @@ Jakub Stasiak Nathan Taylor Vladimir Kryachko + Omer Katz Jacek Generowicz Alejandro J. Cura Jacob Oscarson @@ -239,6 +245,7 @@ Lars Wassermann Philipp Rustemeuer Henrik Vendelbo + Richard Lancaster Dan Buch Miguel de Val Borro Artur Lisiecki @@ -250,18 +257,18 @@ Tomo Cocoa Kim Jin Su Toni Mattis + Amber Brown Lucas Stadler Julian Berman Markus Holtermann roberto at goyle Yury V. Zaytsev Anna Katrina Dominguez - William Leslie Bobby Impollonia - Faye Zhao timo at eistee.fritz.box Andrew Thompson Yusei Tahara + Aaron Tubbs Ben Darnell Roberto De Ioris Juan Francisco Cantero Hurtado @@ -273,6 +280,7 @@ Christopher Armstrong Michael Hudson-Doyle Anders Sigfridsson + Nikolay Zinov Yasir Suhail Jason Michalski rafalgalczynski at gmail.com @@ -282,6 +290,7 @@ Gustavo Niemeyer Stephan Busemann Rafał Gałczyński + Matt Bogosian Christian Muirhead Berker Peksag James Lan @@ -316,9 +325,9 @@ Stefan Marr jiaaro Mads Kiilerich - Richard Lancaster opassembler.py Antony Lee + Jason Madden Yaroslav Fedevych Jim Hunziker Markus Unterwaditzer @@ -327,6 +336,7 @@ squeaky Zearin soareschen + Jonas Pfannschmidt Kurt Griffiths Mike Bayer Matthew Miller diff --git a/lib_pypy/_pypy_testcapi.py b/lib_pypy/_pypy_testcapi.py --- a/lib_pypy/_pypy_testcapi.py +++ b/lib_pypy/_pypy_testcapi.py @@ -7,6 +7,7 @@ content = fid.read() # from cffi's Verifier() key = '\x00'.join([sys.version[:3], content]) + key += 'cpyext-gc-support-2' # this branch requires recompilation! if sys.version_info >= (3,): key = key.encode('utf-8') k1 = hex(binascii.crc32(key[0::2]) & 0xffffffff) @@ -62,7 +63,7 @@ if sys.platform == 'win32': # XXX pyconfig.h uses a pragma to link to the import library, # which is currently python27.lib - library = os.path.join(thisdir, '..', 'include', 'python27') + library = os.path.join(thisdir, '..', 'libs', 'python27') if not os.path.exists(library + '.lib'): # For a local translation or nightly build library = os.path.join(thisdir, '..', 'pypy', 'goal', 'python27') diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py --- a/lib_pypy/cffi/api.py +++ b/lib_pypy/cffi/api.py @@ -550,6 +550,7 @@ lst.append(value) # if '__pypy__' in sys.builtin_module_names: + import os if sys.platform == "win32": # we need 'libpypy-c.lib'. Current distributions of # pypy (>= 4.1) contain it as 'libs/python27.lib'. @@ -558,11 +559,15 @@ ensure('library_dirs', os.path.join(sys.prefix, 'libs')) else: # we need 'libpypy-c.{so,dylib}', which should be by - # default located in 'sys.prefix/bin' + # default located in 'sys.prefix/bin' for installed + # systems. pythonlib = "pypy-c" if hasattr(sys, 'prefix'): - import os ensure('library_dirs', os.path.join(sys.prefix, 'bin')) + # On uninstalled pypy's, the libpypy-c is typically found in + # .../pypy/goal/. + if hasattr(sys, 'prefix'): + ensure('library_dirs', os.path.join(sys.prefix, 'pypy', 'goal')) else: if sys.platform == "win32": template = "python%d%d" diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -36,11 +36,16 @@ "cStringIO", "thread", "itertools", "pyexpat", "_ssl", "cpyext", "array", "binascii", "_multiprocessing", '_warnings', "_collections", "_multibytecodec", "micronumpy", "_continuation", "_cffi_backend", - "_csv", "cppyy", "_pypyjson", "_vmprof", + "_csv", "cppyy", "_pypyjson", ]) -if os.uname()[4] == 's390x': - working_modules.remove("_vmprof") +from rpython.jit.backend import detect_cpu +try: + if detect_cpu.autodetect().startswith('x86'): + working_modules.add('_vmprof') +except detect_cpu.ProcessorAutodetectError: + pass + translation_modules = default_modules.copy() translation_modules.update([ @@ -165,12 +170,8 @@ cmdline="--translationmodules", suggests=[("objspace.allworkingmodules", False)]), - BoolOption("usepycfiles", "Write and read pyc files when importing", - default=True), - BoolOption("lonepycfiles", "Import pyc files with no matching py file", - default=False, - requires=[("objspace.usepycfiles", True)]), + default=False), StrOption("soabi", "Tag to differentiate extension modules built for different Python interpreters", diff --git a/pypy/doc/discussion/rawrefcount.rst b/pypy/doc/discussion/rawrefcount.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/discussion/rawrefcount.rst @@ -0,0 +1,158 @@ +====================== +Rawrefcount and the GC +====================== + + +GC Interface +------------ + +"PyObject" is a raw structure with at least two fields, ob_refcnt and +ob_pypy_link. The ob_refcnt is the reference counter as used on +CPython. If the PyObject structure is linked to a live PyPy object, +its current address is stored in ob_pypy_link and ob_refcnt is bumped +by either the constant REFCNT_FROM_PYPY, or the constant +REFCNT_FROM_PYPY_LIGHT (== REFCNT_FROM_PYPY + SOME_HUGE_VALUE) +(to mean "light finalizer"). + +Most PyPy objects exist outside cpyext, and conversely in cpyext it is +possible that a lot of PyObjects exist without being seen by the rest +of PyPy. At the interface, however, we can "link" a PyPy object and a +PyObject. There are two kinds of link: + +rawrefcount.create_link_pypy(p, ob) + + Makes a link between an exising object gcref 'p' and a newly + allocated PyObject structure 'ob'. ob->ob_refcnt must be + initialized to either REFCNT_FROM_PYPY, or + REFCNT_FROM_PYPY_LIGHT. (The second case is an optimization: + when the GC finds the PyPy object and PyObject no longer + referenced, it can just free() the PyObject.) + +rawrefcount.create_link_pyobj(p, ob) + + Makes a link from an existing PyObject structure 'ob' to a newly + allocated W_CPyExtPlaceHolderObject 'p'. You must also add + REFCNT_FROM_PYPY to ob->ob_refcnt. For cases where the PyObject + contains all the data, and the PyPy object is just a proxy. The + W_CPyExtPlaceHolderObject should have only a field that contains + the address of the PyObject, but that's outside the scope of the + GC. + +rawrefcount.from_obj(p) + + If there is a link from object 'p' made with create_link_pypy(), + returns the corresponding 'ob'. Otherwise, returns NULL. + +rawrefcount.to_obj(Class, ob) + + Returns ob->ob_pypy_link, cast to an instance of 'Class'. + + +Collection logic +---------------- + +Objects existing purely on the C side have ob->ob_pypy_link == 0; +these are purely reference counted. On the other hand, if +ob->ob_pypy_link != 0, then ob->ob_refcnt is at least REFCNT_FROM_PYPY +and the object is part of a "link". + +The idea is that links whose 'p' is not reachable from other PyPy +objects *and* whose 'ob->ob_refcnt' is REFCNT_FROM_PYPY or +REFCNT_FROM_PYPY_LIGHT are the ones who die. But it is more messy +because PyObjects still (usually) need to have a tp_dealloc called, +and this cannot occur immediately (and can do random things like +accessing other references this object points to, or resurrecting the +object). + +Let P = list of links created with rawrefcount.create_link_pypy() +and O = list of links created with rawrefcount.create_link_pyobj(). +The PyPy objects in the list O are all W_CPyExtPlaceHolderObject: all +the data is in the PyObjects, and all outsite references (if any) are +in C, as "PyObject *" fields. + +So, during the collection we do this about P links: + + for (p, ob) in P: + if ob->ob_refcnt != REFCNT_FROM_PYPY + and ob->ob_refcnt != REFCNT_FROM_PYPY_LIGHT: + mark 'p' as surviving, as well as all its dependencies + +At the end of the collection, the P and O links are both handled like +this: + + for (p, ob) in P + O: + if p is not surviving: # even if 'ob' might be surviving + unlink p and ob + if ob->ob_refcnt == REFCNT_FROM_PYPY_LIGHT: + free(ob) + elif ob->ob_refcnt > REFCNT_FROM_PYPY_LIGHT: + ob->ob_refcnt -= REFCNT_FROM_PYPY_LIGHT + else: + ob->ob_refcnt -= REFCNT_FROM_PYPY + if ob->ob_refcnt == 0: + invoke _Py_Dealloc(ob) later, outside the GC + + +GC Implementation +----------------- + +We need two copies of both the P list and O list, for young or old +objects. All four lists can be regular AddressLists of 'ob' objects. + +We also need an AddressDict mapping 'p' to 'ob' for all links in the P +list, and update it when PyPy objects move. + + +Further notes +------------- + +XXX +XXX the rest is the ideal world, but as a first step, we'll look +XXX for the minimal tweaks needed to adapt the existing cpyext +XXX + +For objects that are opaque in CPython, like , we always create +a PyPy object, and then when needed we make an empty PyObject and +attach it with create_link_pypy()/REFCNT_FROM_PYPY_LIGHT. + +For and objects, the corresponding PyObjects contain a +"long" or "double" field too. We link them with create_link_pypy() +and we can use REFCNT_FROM_PYPY_LIGHT too: 'tp_dealloc' doesn't +need to be called, and instead just calling free() is fine. + +For objects, we need both a PyPy and a PyObject side. These +are made with create_link_pypy()/REFCNT_FROM_PYPY. + +For custom PyXxxObjects allocated from the C extension module, we +need create_link_pyobj(). + +For or objects coming from PyPy, we use +create_link_pypy()/REFCNT_FROM_PYPY_LIGHT with a PyObject +preallocated with the size of the string. We copy the string +lazily into that area if PyString_AS_STRING() is called. + +For , , or objects in the C extension +module, we first allocate it as only a PyObject, which supports +mutation of the data from C, like CPython. When it is exported to +PyPy we could make a W_CPyExtPlaceHolderObject with +create_link_pyobj(). + +For objects coming from PyPy, if they are not specialized, +then the PyPy side holds a regular reference to the items. Then we +can allocate a PyTupleObject and store in it borrowed PyObject +pointers to the items. Such a case is created with +create_link_pypy()/REFCNT_FROM_PYPY_LIGHT. If it is specialized, +then it doesn't work because the items are created just-in-time on the +PyPy side. In this case, the PyTupleObject needs to hold real +references to the PyObject items, and we use create_link_pypy()/ +REFCNT_FROM_PYPY. In all cases, we have a C array of PyObjects +that we can directly return from PySequence_Fast_ITEMS, PyTuple_ITEMS, +PyTuple_GetItem, and so on. + +For objects coming from PyPy, we can use a cpyext list +strategy. The list turns into a PyListObject, as if it had been +allocated from C in the first place. The special strategy can hold +(only) a direct reference to the PyListObject, and we can use either +create_link_pyobj() or create_link_pypy() (to be decided). +PySequence_Fast_ITEMS then works for lists too, and PyList_GetItem +can return a borrowed reference, and so on. diff --git a/pypy/doc/how-to-release.rst b/pypy/doc/how-to-release.rst --- a/pypy/doc/how-to-release.rst +++ b/pypy/doc/how-to-release.rst @@ -1,5 +1,20 @@ -Making a PyPy Release -===================== +The PyPy Release Process +======================== + +Release Policy +++++++++++++++ + +We try to create a stable release a few times a year. These are released on +a branch named like release-2.x or release-4.x, and each release is tagged, +for instance release-4.0.1. + +After release, inevitably there are bug fixes. It is the responsibility of +the commiter who fixes a bug to make sure this fix is on the release branch, +so that we can then create a tagged bug-fix release, which will hopefully +happen more often than stable releases. + +How to Create a PyPy Release +++++++++++++++++++++++++++++ Overview -------- diff --git a/pypy/doc/tool/makecontributor.py b/pypy/doc/tool/makecontributor.py --- a/pypy/doc/tool/makecontributor.py +++ b/pypy/doc/tool/makecontributor.py @@ -72,6 +72,7 @@ 'Anton Gulenko':['anton gulenko', 'anton_gulenko'], 'Richard Lancaster':['richardlancaster'], 'William Leslie':['William ML Leslie'], + 'Spenser Bauman':['Spenser Andrew Bauman'], } alias_map = {} diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -153,6 +153,37 @@ Seperate structmember.h from Python.h Also enhance creating api functions to specify which header file they appear in (previously only pypy_decl.h) +.. branch: llimpl + +Refactor register_external(), remove running_on_llinterp mechanism and +apply sandbox transform on externals at the end of annotation. + +.. branch: cffi-embedding-win32 + +.. branch: windows-vmprof-support + +vmprof should work on Windows. + + +.. branch: reorder-map-attributes + +When creating instances and adding attributes in several different orders +depending on some condition, the JIT would create too much code. This is now +fixed. + +.. branch: cpyext-gc-support-2 + +Improve CPython C API support, which means lxml now runs unmodified +(after removing pypy hacks, pending pull request) + +.. branch: look-inside-tuple-hash + +Look inside tuple hash, improving mdp benchmark + +.. branch: vlen-resume + +Compress resume data, saving 10-20% of memory consumed by the JIT + .. branch: memop-simplify3 Further simplifying the backend operations malloc_cond_varsize and zero_array. diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -277,7 +277,6 @@ if config.translation.sandbox: config.objspace.lonepycfiles = False - config.objspace.usepycfiles = False config.translating = True diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -27,7 +27,7 @@ class W_Root(object): """This is the abstract root class of all wrapped objects that live in a 'normal' object space like StdObjSpace.""" - __slots__ = () + __slots__ = ('__weakref__',) user_overridden_class = False def getdict(self, space): diff --git a/pypy/interpreter/pyparser/pytokenizer.py b/pypy/interpreter/pyparser/pytokenizer.py --- a/pypy/interpreter/pyparser/pytokenizer.py +++ b/pypy/interpreter/pyparser/pytokenizer.py @@ -91,6 +91,7 @@ strstart = (0, 0, "") for line in lines: lnum = lnum + 1 + line = universal_newline(line) pos, max = 0, len(line) if contstr: @@ -259,3 +260,14 @@ token_list.append((tokens.ENDMARKER, '', lnum, pos, line)) return token_list + + +def universal_newline(line): + # show annotator that indexes below are non-negative + line_len_m2 = len(line) - 2 + if line_len_m2 >= 0 and line[-2] == '\r' and line[-1] == '\n': + return line[:line_len_m2] + '\n' + line_len_m1 = len(line) - 1 + if line_len_m1 >= 0 and line[-1] == '\r': + return line[:line_len_m1] + '\n' + return line diff --git a/pypy/interpreter/pyparser/test/test_pyparse.py b/pypy/interpreter/pyparser/test/test_pyparse.py --- a/pypy/interpreter/pyparser/test/test_pyparse.py +++ b/pypy/interpreter/pyparser/test/test_pyparse.py @@ -158,3 +158,10 @@ def test_print_function(self): self.parse("from __future__ import print_function\nx = print\n") + + def test_universal_newlines(self): + fmt = 'stuff = """hello%sworld"""' + expected_tree = self.parse(fmt % '\n') + for linefeed in ["\r\n","\r"]: + tree = self.parse(fmt % linefeed) + assert expected_tree == tree diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py --- a/pypy/interpreter/typedef.py +++ b/pypy/interpreter/typedef.py @@ -156,20 +156,6 @@ get_unique_interplevel_subclass._annspecialcase_ = "specialize:memo" _subclass_cache = {} -def enum_interplevel_subclasses(config, cls): - """Return a list of all the extra interp-level subclasses of 'cls' that - can be built by get_unique_interplevel_subclass().""" - result = [] - for flag1 in (False, True): - for flag2 in (False, True): - for flag3 in (False, True): - for flag4 in (False, True): - result.append(get_unique_interplevel_subclass( - config, cls, flag1, flag2, flag3, flag4)) - result = dict.fromkeys(result) - assert len(result) <= 6 - return result.keys() - def _getusercls(config, cls, wants_dict, wants_slots, wants_del, weakrefable): typedef = cls.typedef if wants_dict and typedef.hasdict: @@ -262,7 +248,7 @@ def user_setup(self, space, w_subtype): self.space = space self.w__class__ = w_subtype - self.user_setup_slots(w_subtype.nslots) + self.user_setup_slots(w_subtype.layout.nslots) def user_setup_slots(self, nslots): assert nslots == 0 diff --git a/pypy/module/_cffi_backend/embedding.py b/pypy/module/_cffi_backend/embedding.py --- a/pypy/module/_cffi_backend/embedding.py +++ b/pypy/module/_cffi_backend/embedding.py @@ -57,7 +57,7 @@ # pypy_init_embedded_cffi_module(). if not glob.patched_sys: space.appexec([], """(): - import os + import os, sys sys.stdin = sys.__stdin__ = os.fdopen(0, 'rb', 0) sys.stdout = sys.__stdout__ = os.fdopen(1, 'wb', 0) sys.stderr = sys.__stderr__ = os.fdopen(2, 'wb', 0) diff --git a/pypy/module/_vmprof/test/test__vmprof.py b/pypy/module/_vmprof/test/test__vmprof.py --- a/pypy/module/_vmprof/test/test__vmprof.py +++ b/pypy/module/_vmprof/test/test__vmprof.py @@ -5,14 +5,15 @@ class AppTestVMProf(object): def setup_class(cls): cls.space = gettestobjspace(usemodules=['_vmprof', 'struct']) - cls.tmpfile = udir.join('test__vmprof.1').open('wb') - cls.w_tmpfileno = cls.space.wrap(cls.tmpfile.fileno()) - cls.w_tmpfilename = cls.space.wrap(cls.tmpfile.name) - cls.tmpfile2 = udir.join('test__vmprof.2').open('wb') - cls.w_tmpfileno2 = cls.space.wrap(cls.tmpfile2.fileno()) - cls.w_tmpfilename2 = cls.space.wrap(cls.tmpfile2.name) + cls.w_tmpfilename = cls.space.wrap(str(udir.join('test__vmprof.1'))) + cls.w_tmpfilename2 = cls.space.wrap(str(udir.join('test__vmprof.2'))) def test_import_vmprof(self): + tmpfile = open(self.tmpfilename, 'wb') + tmpfileno = tmpfile.fileno() + tmpfile2 = open(self.tmpfilename2, 'wb') + tmpfileno2 = tmpfile2.fileno() + import struct, sys WORD = struct.calcsize('l') @@ -45,7 +46,7 @@ return count import _vmprof - _vmprof.enable(self.tmpfileno, 0.01) + _vmprof.enable(tmpfileno, 0.01) _vmprof.disable() s = open(self.tmpfilename, 'rb').read() no_of_codes = count(s) @@ -56,7 +57,7 @@ pass """ in d - _vmprof.enable(self.tmpfileno2, 0.01) + _vmprof.enable(tmpfileno2, 0.01) exec """def foo2(): pass diff --git a/pypy/module/cpyext/__init__.py b/pypy/module/cpyext/__init__.py --- a/pypy/module/cpyext/__init__.py +++ b/pypy/module/cpyext/__init__.py @@ -34,7 +34,7 @@ import pypy.module.cpyext.pyerrors import pypy.module.cpyext.typeobject import pypy.module.cpyext.object -import pypy.module.cpyext.stringobject +import pypy.module.cpyext.bytesobject import pypy.module.cpyext.tupleobject import pypy.module.cpyext.setobject import pypy.module.cpyext.dictobject @@ -60,7 +60,6 @@ import pypy.module.cpyext.funcobject import pypy.module.cpyext.frameobject import pypy.module.cpyext.classobject -import pypy.module.cpyext.pypyintf import pypy.module.cpyext.memoryobject import pypy.module.cpyext.codecs import pypy.module.cpyext.pyfile diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -9,7 +9,7 @@ from rpython.rtyper.tool import rffi_platform from rpython.rtyper.lltypesystem import ll2ctypes from rpython.rtyper.annlowlevel import llhelper -from rpython.rlib.objectmodel import we_are_translated +from rpython.rlib.objectmodel import we_are_translated, keepalive_until_here from rpython.translator import cdir from rpython.translator.tool.cbuild import ExternalCompilationInfo from rpython.translator.gensupp import NameManager @@ -30,13 +30,13 @@ from rpython.rlib.rposix import is_valid_fd, validate_fd from rpython.rlib.unroll import unrolling_iterable from rpython.rlib.objectmodel import specialize -from rpython.rlib.exports import export_struct from pypy.module import exceptions from pypy.module.exceptions import interp_exceptions # CPython 2.4 compatibility from py.builtin import BaseException from rpython.tool.sourcetools import func_with_new_name from rpython.rtyper.lltypesystem.lloperation import llop +from rpython.rlib import rawrefcount DEBUG_WRAPPER = True @@ -194,7 +194,7 @@ class ApiFunction: def __init__(self, argtypes, restype, callable, error=_NOT_SPECIFIED, - c_name=None, gil=None): + c_name=None, gil=None, result_borrowed=False): self.argtypes = argtypes self.restype = restype self.functype = lltype.Ptr(lltype.FuncType(argtypes, restype)) @@ -211,17 +211,15 @@ self.argnames = argnames[1:] assert len(self.argnames) == len(self.argtypes) self.gil = gil + self.result_borrowed = result_borrowed + # + def get_llhelper(space): + return llhelper(self.functype, self.get_wrapper(space)) + self.get_llhelper = get_llhelper def _freeze_(self): return True - def get_llhelper(self, space): - llh = getattr(self, '_llhelper', None) - if llh is None: - llh = llhelper(self.functype, self.get_wrapper(space)) - self._llhelper = llh - return llh - @specialize.memo() def get_wrapper(self, space): wrapper = getattr(self, '_wrapper', None) @@ -234,7 +232,7 @@ return wrapper def cpython_api(argtypes, restype, error=_NOT_SPECIFIED, header='pypy_decl.h', - gil=None): + gil=None, result_borrowed=False): """ Declares a function to be exported. - `argtypes`, `restype` are lltypes and describe the function signature. @@ -263,13 +261,15 @@ rffi.cast(restype, 0) == 0) def decorate(func): + func._always_inline_ = 'try' func_name = func.func_name if header is not None: c_name = None else: c_name = func_name api_function = ApiFunction(argtypes, restype, func, error, - c_name=c_name, gil=gil) + c_name=c_name, gil=gil, + result_borrowed=result_borrowed) func.api_func = api_function if header is not None: @@ -280,6 +280,10 @@ raise ValueError("function %s has no return value for exceptions" % func) def make_unwrapper(catch_exception): + # ZZZ is this whole logic really needed??? It seems to be only + # for RPython code calling PyXxx() functions directly. I would + # think that usually directly calling the function is clean + # enough now names = api_function.argnames types_names_enum_ui = unrolling_iterable(enumerate( zip(api_function.argtypes, @@ -287,56 +291,58 @@ @specialize.ll() def unwrapper(space, *args): - from pypy.module.cpyext.pyobject import Py_DecRef - from pypy.module.cpyext.pyobject import make_ref, from_ref - from pypy.module.cpyext.pyobject import Reference + from pypy.module.cpyext.pyobject import Py_DecRef, is_pyobj + from pypy.module.cpyext.pyobject import from_ref, as_pyobj newargs = () - to_decref = [] + keepalives = () assert len(args) == len(api_function.argtypes) for i, (ARG, is_wrapped) in types_names_enum_ui: input_arg = args[i] if is_PyObject(ARG) and not is_wrapped: - # build a reference - if input_arg is None: - arg = lltype.nullptr(PyObject.TO) - elif isinstance(input_arg, W_Root): - ref = make_ref(space, input_arg) - to_decref.append(ref) - arg = rffi.cast(ARG, ref) + # build a 'PyObject *' (not holding a reference) + if not is_pyobj(input_arg): + keepalives += (input_arg,) + arg = rffi.cast(ARG, as_pyobj(space, input_arg)) + else: + arg = rffi.cast(ARG, input_arg) + elif is_PyObject(ARG) and is_wrapped: + # build a W_Root, possibly from a 'PyObject *' + if is_pyobj(input_arg): + arg = from_ref(space, input_arg) else: arg = input_arg - elif is_PyObject(ARG) and is_wrapped: - # convert to a wrapped object - if input_arg is None: - arg = input_arg - elif isinstance(input_arg, W_Root): - arg = input_arg - else: - try: - arg = from_ref(space, - rffi.cast(PyObject, input_arg)) - except TypeError, e: - err = OperationError(space.w_TypeError, - space.wrap( - "could not cast arg to PyObject")) - if not catch_exception: - raise err - state = space.fromcache(State) - state.set_exception(err) - if is_PyObject(restype): - return None - else: - return api_function.error_value + + ## ZZZ: for is_pyobj: + ## try: + ## arg = from_ref(space, + ## rffi.cast(PyObject, input_arg)) + ## except TypeError, e: + ## err = OperationError(space.w_TypeError, + ## space.wrap( + ## "could not cast arg to PyObject")) + ## if not catch_exception: + ## raise err + ## state = space.fromcache(State) + ## state.set_exception(err) + ## if is_PyObject(restype): + ## return None + ## else: + ## return api_function.error_value else: - # convert to a wrapped object + # arg is not declared as PyObject, no magic arg = input_arg newargs += (arg, ) - try: + if not catch_exception: + try: + res = func(space, *newargs) + finally: + keepalive_until_here(*keepalives) + else: + # non-rpython variant + assert not we_are_translated() try: res = func(space, *newargs) except OperationError, e: - if not catch_exception: - raise if not hasattr(api_function, "error_value"): raise state = space.fromcache(State) @@ -345,21 +351,13 @@ return None else: return api_function.error_value - if not we_are_translated(): - got_integer = isinstance(res, (int, long, float)) - assert got_integer == expect_integer,'got %r not integer' % res - if res is None: - return None - elif isinstance(res, Reference): - return res.get_wrapped(space) - else: - return res - finally: - for arg in to_decref: - Py_DecRef(space, arg) + # 'keepalives' is alive here (it's not rpython) + got_integer = isinstance(res, (int, long, float)) + assert got_integer == expect_integer, ( + 'got %r not integer' % (res,)) + return res unwrapper.func = func unwrapper.api_func = api_function - unwrapper._always_inline_ = 'try' return unwrapper unwrapper_catch = make_unwrapper(True) @@ -501,7 +499,7 @@ GLOBALS['%s#' % (cpyname, )] = ('PyTypeObject*', pypyexpr) for cpyname in '''PyMethodObject PyListObject PyLongObject - PyDictObject PyTupleObject PyClassObject'''.split(): + PyDictObject PyClassObject'''.split(): FORWARD_DECLS.append('typedef struct { PyObject_HEAD } %s' % (cpyname, )) build_exported_objects() @@ -514,14 +512,16 @@ "PyIntObject*": PyIntObject, "PyDateTime_CAPI*": lltype.Ptr(PyDateTime_CAPI)}[ctype] +# Note: as a special case, "PyObject" is the pointer type in RPython, +# corresponding to "PyObject *" in C. We do that only for PyObject. +# For example, "PyTypeObject" is the struct type even in RPython. PyTypeObject = lltype.ForwardReference() PyTypeObjectPtr = lltype.Ptr(PyTypeObject) -# It is important that these PyObjects are allocated in a raw fashion -# Thus we cannot save a forward pointer to the wrapped object -# So we need a forward and backward mapping in our State instance PyObjectStruct = lltype.ForwardReference() PyObject = lltype.Ptr(PyObjectStruct) -PyObjectFields = (("ob_refcnt", lltype.Signed), ("ob_type", PyTypeObjectPtr)) +PyObjectFields = (("ob_refcnt", lltype.Signed), + ("ob_pypy_link", lltype.Signed), + ("ob_type", PyTypeObjectPtr)) PyVarObjectFields = PyObjectFields + (("ob_size", Py_ssize_t), ) cpython_struct('PyObject', PyObjectFields, PyObjectStruct) PyVarObjectStruct = cpython_struct("PyVarObject", PyVarObjectFields) @@ -618,8 +618,8 @@ @specialize.ll() def wrapper(*args): - from pypy.module.cpyext.pyobject import make_ref, from_ref - from pypy.module.cpyext.pyobject import Reference + from pypy.module.cpyext.pyobject import make_ref, from_ref, is_pyobj + from pypy.module.cpyext.pyobject import as_pyobj # we hope that malloc removal removes the newtuple() that is # inserted exactly here by the varargs specializer if gil_acquire: @@ -628,6 +628,7 @@ llop.gc_stack_bottom(lltype.Void) # marker for trackgcroot.py retval = fatal_value boxed_args = () + tb = None try: if not we_are_translated() and DEBUG_WRAPPER: print >>sys.stderr, callable, @@ -635,10 +636,8 @@ for i, (typ, is_wrapped) in argtypes_enum_ui: arg = args[i] if is_PyObject(typ) and is_wrapped: - if arg: - arg_conv = from_ref(space, rffi.cast(PyObject, arg)) - else: - arg_conv = None + assert is_pyobj(arg) + arg_conv = from_ref(space, rffi.cast(PyObject, arg)) else: arg_conv = arg boxed_args += (arg_conv, ) @@ -653,6 +652,7 @@ except BaseException, e: failed = True if not we_are_translated(): + tb = sys.exc_info()[2] message = repr(e) import traceback traceback.print_exc() @@ -671,29 +671,34 @@ retval = error_value elif is_PyObject(callable.api_func.restype): - if result is None: - retval = rffi.cast(callable.api_func.restype, - make_ref(space, None)) - elif isinstance(result, Reference): - retval = result.get_ref(space) - elif not rffi._isllptr(result): - retval = rffi.cast(callable.api_func.restype, - make_ref(space, result)) + if is_pyobj(result): + retval = result else: - retval = result + if result is not None: + if callable.api_func.result_borrowed: + retval = as_pyobj(space, result) + else: + retval = make_ref(space, result) + retval = rffi.cast(callable.api_func.restype, retval) + else: + retval = lltype.nullptr(PyObject.TO) elif callable.api_func.restype is not lltype.Void: retval = rffi.cast(callable.api_func.restype, result) except Exception, e: print 'Fatal error in cpyext, CPython compatibility layer, calling', callable.__name__ print 'Either report a bug or consider not using this particular extension' if not we_are_translated(): + if tb is None: + tb = sys.exc_info()[2] import traceback traceback.print_exc() - print str(e) + if sys.stdout == sys.__stdout__: + import pdb; pdb.post_mortem(tb) # we can't do much here, since we're in ctypes, swallow else: print str(e) pypy_debug_catch_fatal_exception() + assert False rffi.stackcounter.stacks_counter -= 1 if gil_release: rgil.release() @@ -827,6 +832,19 @@ outputfilename=str(udir / "module_cache" / "pypyapi")) modulename = py.path.local(eci.libraries[-1]) + def dealloc_trigger(): + from pypy.module.cpyext.pyobject import _Py_Dealloc + print 'dealloc_trigger...' + while True: + ob = rawrefcount.next_dead(PyObject) + if not ob: + break + print ob + _Py_Dealloc(space, ob) + print 'dealloc_trigger DONE' + return "RETRY" + rawrefcount.init(dealloc_trigger) + run_bootstrap_functions(space) # load the bridge, and init structure @@ -836,9 +854,9 @@ space.fromcache(State).install_dll(eci) # populate static data - builder = StaticObjectBuilder(space) + builder = space.fromcache(StaticObjectBuilder) for name, (typ, expr) in GLOBALS.iteritems(): - from pypy.module import cpyext + from pypy.module import cpyext # for the eval() below w_obj = eval(expr) if name.endswith('#'): name = name[:-1] @@ -894,27 +912,44 @@ class StaticObjectBuilder: def __init__(self, space): self.space = space - self.to_attach = [] + self.static_pyobjs = [] + self.static_objs_w = [] + self.cpyext_type_init = None + # + # add a "method" that is overridden in setup_library() + # ('self.static_pyobjs' is completely ignored in that case) + self.get_static_pyobjs = lambda: self.static_pyobjs def prepare(self, py_obj, w_obj): - from pypy.module.cpyext.pyobject import track_reference - py_obj.c_ob_refcnt = 1 - track_reference(self.space, py_obj, w_obj) - self.to_attach.append((py_obj, w_obj)) + "NOT_RPYTHON" + if py_obj: + py_obj.c_ob_refcnt = 1 # 1 for kept immortal + self.static_pyobjs.append(py_obj) + self.static_objs_w.append(w_obj) def attach_all(self): + # this is RPython, called once in pypy-c when it imports cpyext from pypy.module.cpyext.pyobject import get_typedescr, make_ref from pypy.module.cpyext.typeobject import finish_type_1, finish_type_2 + from pypy.module.cpyext.pyobject import track_reference + # space = self.space - space._cpyext_type_init = [] - for py_obj, w_obj in self.to_attach: + static_pyobjs = self.get_static_pyobjs() + static_objs_w = self.static_objs_w + for i in range(len(static_objs_w)): + track_reference(space, static_pyobjs[i], static_objs_w[i]) + # + self.cpyext_type_init = [] + for i in range(len(static_objs_w)): + py_obj = static_pyobjs[i] + w_obj = static_objs_w[i] w_type = space.type(w_obj) - typedescr = get_typedescr(w_type.instancetypedef) + typedescr = get_typedescr(w_type.layout.typedef) py_obj.c_ob_type = rffi.cast(PyTypeObjectPtr, make_ref(space, w_type)) typedescr.attach(space, py_obj, w_obj) - cpyext_type_init = space._cpyext_type_init - del space._cpyext_type_init + cpyext_type_init = self.cpyext_type_init + self.cpyext_type_init = None for pto, w_type in cpyext_type_init: finish_type_1(space, pto) finish_type_2(space, pto, w_type) @@ -1067,7 +1102,7 @@ if name.endswith('#'): structs.append('%s %s;' % (typ[:-1], name[:-1])) elif name.startswith('PyExc_'): - structs.append('extern PyTypeObject _%s;' % (name,)) + structs.append('PyTypeObject _%s;' % (name,)) structs.append('PyObject* %s = (PyObject*)&_%s;' % (name, name)) elif typ == 'PyDateTime_CAPI*': structs.append('%s %s = NULL;' % (typ, name)) @@ -1107,7 +1142,7 @@ if not use_micronumpy: return use_micronumpy # import to register api functions by side-effect - import pypy.module.cpyext.ndarrayobject + import pypy.module.cpyext.ndarrayobject global GLOBALS, SYMBOLS_C, separate_module_files GLOBALS["PyArray_Type#"]= ('PyTypeObject*', "space.gettypeobject(W_NDimArray.typedef)") SYMBOLS_C += ['PyArray_Type', '_PyArray_FILLWBYTE', '_PyArray_ZEROS'] @@ -1116,10 +1151,8 @@ def setup_library(space): "NOT_RPYTHON" - from pypy.module.cpyext.pyobject import make_ref use_micronumpy = setup_micronumpy(space) - - export_symbols = list(FUNCTIONS) + SYMBOLS_C + list(GLOBALS) + export_symbols = sorted(FUNCTIONS) + sorted(SYMBOLS_C) + sorted(GLOBALS) from rpython.translator.c.database import LowLevelDatabase db = LowLevelDatabase() @@ -1135,41 +1168,37 @@ run_bootstrap_functions(space) setup_va_functions(eci) - from pypy.module import cpyext # for eval() below - - # Set up the types. Needs a special case, because of the - # immediate cycle involving 'c_ob_type', and because we don't - # want these types to be Py_TPFLAGS_HEAPTYPE. - static_types = {} - for name, (typ, expr) in GLOBALS.items(): - if typ == 'PyTypeObject*': - pto = lltype.malloc(PyTypeObject, immortal=True, - zero=True, flavor='raw') - pto.c_ob_refcnt = 1 - pto.c_tp_basicsize = -1 - static_types[name] = pto - builder = StaticObjectBuilder(space) - for name, pto in static_types.items(): - pto.c_ob_type = static_types['PyType_Type#'] - w_type = eval(GLOBALS[name][1]) - builder.prepare(rffi.cast(PyObject, pto), w_type) - builder.attach_all() - - # populate static data - for name, (typ, expr) in GLOBALS.iteritems(): - name = name.replace("#", "") - if name.startswith('PyExc_'): + # emit uninitialized static data + builder = space.fromcache(StaticObjectBuilder) + lines = ['PyObject *pypy_static_pyobjs[] = {\n'] + include_lines = ['RPY_EXTERN PyObject *pypy_static_pyobjs[];\n'] + for name, (typ, expr) in sorted(GLOBALS.items()): + if name.endswith('#'): + assert typ in ('PyObject*', 'PyTypeObject*', 'PyIntObject*') + typ, name = typ[:-1], name[:-1] + elif name.startswith('PyExc_'): + typ = 'PyTypeObject' name = '_' + name - w_obj = eval(expr) - if typ in ('PyObject*', 'PyTypeObject*', 'PyIntObject*'): - struct_ptr = make_ref(space, w_obj) elif typ == 'PyDateTime_CAPI*': continue else: assert False, "Unknown static data: %s %s" % (typ, name) - struct = rffi.cast(get_structtype_for_ctype(typ), struct_ptr)._obj - struct._compilation_info = eci - export_struct(name, struct) + + from pypy.module import cpyext # for the eval() below + w_obj = eval(expr) + builder.prepare(None, w_obj) + lines.append('\t(PyObject *)&%s,\n' % (name,)) + include_lines.append('RPY_EXPORTED %s %s;\n' % (typ, name)) + + lines.append('};\n') + eci2 = CConfig._compilation_info_.merge(ExternalCompilationInfo( + separate_module_sources = [''.join(lines)], + post_include_bits = [''.join(include_lines)], + )) + # override this method to return a pointer to this C array directly + builder.get_static_pyobjs = rffi.CExternVariable( + PyObjectP, 'pypy_static_pyobjs', eci2, c_type='PyObject **', + getter_only=True, declare_as_extern=False) for name, func in FUNCTIONS.iteritems(): newname = mangle_name('PyPy', name) or name @@ -1180,6 +1209,10 @@ trunk_include = pypydir.dirpath() / 'include' copy_header_files(trunk_include, use_micronumpy) +def init_static_data_translated(space): + builder = space.fromcache(StaticObjectBuilder) + builder.attach_all() + def _load_from_cffi(space, name, path, initptr): from pypy.module._cffi_backend import cffi1_module cffi1_module.load_cffi1_module(space, name, path, initptr) @@ -1262,22 +1295,18 @@ @specialize.ll() def generic_cpy_call(space, func, *args): FT = lltype.typeOf(func).TO - return make_generic_cpy_call(FT, True, False)(space, func, *args) - - at specialize.ll() -def generic_cpy_call_dont_decref(space, func, *args): - FT = lltype.typeOf(func).TO - return make_generic_cpy_call(FT, False, False)(space, func, *args) + return make_generic_cpy_call(FT, False)(space, func, *args) @specialize.ll() def generic_cpy_call_expect_null(space, func, *args): FT = lltype.typeOf(func).TO - return make_generic_cpy_call(FT, True, True)(space, func, *args) + return make_generic_cpy_call(FT, True)(space, func, *args) @specialize.memo() -def make_generic_cpy_call(FT, decref_args, expect_null): +def make_generic_cpy_call(FT, expect_null): from pypy.module.cpyext.pyobject import make_ref, from_ref, Py_DecRef - from pypy.module.cpyext.pyobject import RefcountState + from pypy.module.cpyext.pyobject import is_pyobj, as_pyobj + from pypy.module.cpyext.pyobject import get_w_obj_and_decref from pypy.module.cpyext.pyerrors import PyErr_Occurred unrolling_arg_types = unrolling_iterable(enumerate(FT.ARGS)) RESULT_TYPE = FT.RESULT @@ -1305,65 +1334,49 @@ @specialize.ll() def generic_cpy_call(space, func, *args): boxed_args = () - to_decref = [] + keepalives = () assert len(args) == len(FT.ARGS) for i, ARG in unrolling_arg_types: arg = args[i] if is_PyObject(ARG): - if arg is None: - boxed_args += (lltype.nullptr(PyObject.TO),) - elif isinstance(arg, W_Root): - ref = make_ref(space, arg) - boxed_args += (ref,) - if decref_args: - to_decref.append(ref) - else: - boxed_args += (arg,) - else: - boxed_args += (arg,) + if not is_pyobj(arg): + keepalives += (arg,) + arg = as_pyobj(space, arg) + boxed_args += (arg,) try: - # create a new container for borrowed references - state = space.fromcache(RefcountState) - old_container = state.swap_borrow_container(None) - try: - # Call the function - result = call_external_function(func, *boxed_args) - finally: - state.swap_borrow_container(old_container) + # Call the function + result = call_external_function(func, *boxed_args) + finally: + keepalive_until_here(*keepalives) - if is_PyObject(RESULT_TYPE): - if result is None: - ret = result - elif isinstance(result, W_Root): - ret = result + if is_PyObject(RESULT_TYPE): + if not is_pyobj(result): + ret = result + else: + # The object reference returned from a C function + # that is called from Python must be an owned reference + # - ownership is transferred from the function to its caller. + if result: + ret = get_w_obj_and_decref(space, result) else: - ret = from_ref(space, result) - # The object reference returned from a C function - # that is called from Python must be an owned reference - # - ownership is transferred from the function to its caller. - if result: - Py_DecRef(space, result) + ret = None - # Check for exception consistency - has_error = PyErr_Occurred(space) is not None - has_result = ret is not None - if has_error and has_result: - raise OperationError(space.w_SystemError, space.wrap( - "An exception was set, but function returned a value")) - elif not expect_null and not has_error and not has_result: - raise OperationError(space.w_SystemError, space.wrap( - "Function returned a NULL result without setting an exception")) + # Check for exception consistency + has_error = PyErr_Occurred(space) is not None + has_result = ret is not None + if has_error and has_result: + raise OperationError(space.w_SystemError, space.wrap( + "An exception was set, but function returned a value")) + elif not expect_null and not has_error and not has_result: + raise OperationError(space.w_SystemError, space.wrap( + "Function returned a NULL result without setting an exception")) - if has_error: - state = space.fromcache(State) - state.check_and_raise_exception() + if has_error: + state = space.fromcache(State) + state.check_and_raise_exception() - return ret - return result - finally: - if decref_args: - for ref in to_decref: - Py_DecRef(space, ref) + return ret + return result + return generic_cpy_call - diff --git a/pypy/module/cpyext/bufferobject.py b/pypy/module/cpyext/bufferobject.py --- a/pypy/module/cpyext/bufferobject.py +++ b/pypy/module/cpyext/bufferobject.py @@ -25,7 +25,7 @@ @bootstrap_function def init_bufferobject(space): "Type description of PyBufferObject" - make_typedescr(space.w_buffer.instancetypedef, + make_typedescr(space.w_buffer.layout.typedef, basestruct=PyBufferObject.TO, attach=buffer_attach, dealloc=buffer_dealloc, diff --git a/pypy/module/cpyext/bytesobject.py b/pypy/module/cpyext/bytesobject.py new file mode 100644 --- /dev/null +++ b/pypy/module/cpyext/bytesobject.py @@ -0,0 +1,319 @@ +from pypy.interpreter.error import OperationError +from rpython.rtyper.lltypesystem import rffi, lltype +from pypy.module.cpyext.api import ( + cpython_api, cpython_struct, bootstrap_function, build_type_checkers, + PyObjectFields, Py_ssize_t, CONST_STRING, CANNOT_FAIL) +from pypy.module.cpyext.pyerrors import PyErr_BadArgument +from pypy.module.cpyext.pyobject import ( + PyObject, PyObjectP, Py_DecRef, make_ref, from_ref, track_reference, + make_typedescr, get_typedescr) + +## +## Implementation of PyStringObject +## ================================ +## +## The problem +## ----------- +## +## PyString_AsString() must return a (non-movable) pointer to the underlying +## buffer, whereas pypy strings are movable. C code may temporarily store +## this address and use it, as long as it owns a reference to the PyObject. +## There is no "release" function to specify that the pointer is not needed +## any more. +## +## Also, the pointer may be used to fill the initial value of string. This is +## valid only when the string was just allocated, and is not used elsewhere. +## +## Solution +## -------- +## +## PyStringObject contains two additional members: the size and a pointer to a +## char buffer; it may be NULL. +## +## - A string allocated by pypy will be converted into a PyStringObject with a +## NULL buffer. The first time PyString_AsString() is called, memory is +## allocated (with flavor='raw') and content is copied. +## +## - A string allocated with PyString_FromStringAndSize(NULL, size) will +## allocate a PyStringObject structure, and a buffer with the specified +## size, but the reference won't be stored in the global map; there is no +## corresponding object in pypy. When from_ref() or Py_INCREF() is called, +## the pypy string is created, and added to the global map of tracked +## objects. The buffer is then supposed to be immutable. +## +## - _PyString_Resize() works only on not-yet-pypy'd strings, and returns a +## similar object. +## +## - PyString_Size() doesn't need to force the object. +## +## - There could be an (expensive!) check in from_ref() that the buffer still +## corresponds to the pypy gc-managed string. +## + +PyStringObjectStruct = lltype.ForwardReference() +PyStringObject = lltype.Ptr(PyStringObjectStruct) +PyStringObjectFields = PyObjectFields + \ + (("buffer", rffi.CCHARP), ("size", Py_ssize_t)) +cpython_struct("PyStringObject", PyStringObjectFields, PyStringObjectStruct) + + at bootstrap_function +def init_stringobject(space): + "Type description of PyStringObject" + make_typedescr(space.w_str.layout.typedef, + basestruct=PyStringObject.TO, + attach=string_attach, + dealloc=string_dealloc, + realize=string_realize) + +PyString_Check, PyString_CheckExact = build_type_checkers("String", "w_str") + +def new_empty_str(space, length): + """ + Allocate a PyStringObject and its buffer, but without a corresponding + interpreter object. The buffer may be mutated, until string_realize() is + called. Refcount of the result is 1. + """ + typedescr = get_typedescr(space.w_str.layout.typedef) + py_obj = typedescr.allocate(space, space.w_str) + py_str = rffi.cast(PyStringObject, py_obj) + + buflen = length + 1 + py_str.c_size = length + py_str.c_buffer = lltype.malloc(rffi.CCHARP.TO, buflen, + flavor='raw', zero=True) + return py_str + +def string_attach(space, py_obj, w_obj): + """ + Fills a newly allocated PyStringObject with the given string object. The + buffer must not be modified. + """ + py_str = rffi.cast(PyStringObject, py_obj) + py_str.c_size = len(space.str_w(w_obj)) + py_str.c_buffer = lltype.nullptr(rffi.CCHARP.TO) + +def string_realize(space, py_obj): + """ + Creates the string in the interpreter. The PyStringObject buffer must not + be modified after this call. + """ + py_str = rffi.cast(PyStringObject, py_obj) + s = rffi.charpsize2str(py_str.c_buffer, py_str.c_size) + w_obj = space.wrap(s) + track_reference(space, py_obj, w_obj) + return w_obj + + at cpython_api([PyObject], lltype.Void, header=None) +def string_dealloc(space, py_obj): + """Frees allocated PyStringObject resources. + """ + py_str = rffi.cast(PyStringObject, py_obj) + if py_str.c_buffer: + lltype.free(py_str.c_buffer, flavor="raw") + from pypy.module.cpyext.object import PyObject_dealloc + PyObject_dealloc(space, py_obj) + +#_______________________________________________________________________ + + at cpython_api([CONST_STRING, Py_ssize_t], PyObject) +def PyString_FromStringAndSize(space, char_p, length): + if char_p: + s = rffi.charpsize2str(char_p, length) + return make_ref(space, space.wrap(s)) + else: + return rffi.cast(PyObject, new_empty_str(space, length)) + + at cpython_api([CONST_STRING], PyObject) +def PyString_FromString(space, char_p): + s = rffi.charp2str(char_p) + return space.wrap(s) + + at cpython_api([PyObject], rffi.CCHARP, error=0) +def PyString_AsString(space, ref): + if from_ref(space, rffi.cast(PyObject, ref.c_ob_type)) is space.w_str: + pass # typecheck returned "ok" without forcing 'ref' at all + elif not PyString_Check(space, ref): # otherwise, use the alternate way + raise OperationError(space.w_TypeError, space.wrap( + "PyString_AsString only support strings")) + ref_str = rffi.cast(PyStringObject, ref) + if not ref_str.c_buffer: + # copy string buffer + w_str = from_ref(space, ref) + s = space.str_w(w_str) + ref_str.c_buffer = rffi.str2charp(s) + return ref_str.c_buffer + + at cpython_api([PyObject, rffi.CCHARPP, rffi.CArrayPtr(Py_ssize_t)], rffi.INT_real, error=-1) +def PyString_AsStringAndSize(space, ref, buffer, length): + if not PyString_Check(space, ref): + raise OperationError(space.w_TypeError, space.wrap( + "PyString_AsStringAndSize only support strings")) + ref_str = rffi.cast(PyStringObject, ref) + if not ref_str.c_buffer: + # copy string buffer + w_str = from_ref(space, ref) + s = space.str_w(w_str) + ref_str.c_buffer = rffi.str2charp(s) + buffer[0] = ref_str.c_buffer + if length: + length[0] = ref_str.c_size + else: + i = 0 + while ref_str.c_buffer[i] != '\0': + i += 1 + if i != ref_str.c_size: + raise OperationError(space.w_TypeError, space.wrap( + "expected string without null bytes")) + return 0 + + at cpython_api([PyObject], Py_ssize_t, error=-1) +def PyString_Size(space, ref): + if from_ref(space, rffi.cast(PyObject, ref.c_ob_type)) is space.w_str: + ref = rffi.cast(PyStringObject, ref) + return ref.c_size + else: + w_obj = from_ref(space, ref) + return space.len_w(w_obj) + + at cpython_api([PyObjectP, Py_ssize_t], rffi.INT_real, error=-1) +def _PyString_Resize(space, ref, newsize): + """A way to resize a string object even though it is "immutable". Only use this to + build up a brand new string object; don't use this if the string may already be + known in other parts of the code. It is an error to call this function if the + refcount on the input string object is not one. Pass the address of an existing + string object as an lvalue (it may be written into), and the new size desired. + On success, *string holds the resized string object and 0 is returned; + the address in *string may differ from its input value. If the reallocation + fails, the original string object at *string is deallocated, *string is + set to NULL, a memory exception is set, and -1 is returned. + """ + # XXX always create a new string so far + py_str = rffi.cast(PyStringObject, ref[0]) + if not py_str.c_buffer: + raise OperationError(space.w_SystemError, space.wrap( + "_PyString_Resize called on already created string")) + try: + py_newstr = new_empty_str(space, newsize) + except MemoryError: + Py_DecRef(space, ref[0]) + ref[0] = lltype.nullptr(PyObject.TO) + raise + to_cp = newsize + oldsize = py_str.c_size + if oldsize < newsize: + to_cp = oldsize + for i in range(to_cp): + py_newstr.c_buffer[i] = py_str.c_buffer[i] + Py_DecRef(space, ref[0]) + ref[0] = rffi.cast(PyObject, py_newstr) + return 0 + + at cpython_api([PyObject, PyObject], rffi.INT, error=CANNOT_FAIL) +def _PyString_Eq(space, w_str1, w_str2): + return space.eq_w(w_str1, w_str2) + + at cpython_api([PyObjectP, PyObject], lltype.Void) +def PyString_Concat(space, ref, w_newpart): + """Create a new string object in *string containing the contents of newpart + appended to string; the caller will own the new reference. The reference to + the old value of string will be stolen. If the new string cannot be created, + the old reference to string will still be discarded and the value of + *string will be set to NULL; the appropriate exception will be set.""" + + if not ref[0]: + return + + if w_newpart is None or not PyString_Check(space, ref[0]) or \ + not PyString_Check(space, w_newpart): + Py_DecRef(space, ref[0]) + ref[0] = lltype.nullptr(PyObject.TO) + return + w_str = from_ref(space, ref[0]) + w_newstr = space.add(w_str, w_newpart) + Py_DecRef(space, ref[0]) + ref[0] = make_ref(space, w_newstr) + + at cpython_api([PyObjectP, PyObject], lltype.Void) +def PyString_ConcatAndDel(space, ref, newpart): + """Create a new string object in *string containing the contents of newpart + appended to string. This version decrements the reference count of newpart.""" + PyString_Concat(space, ref, newpart) + Py_DecRef(space, newpart) + + at cpython_api([PyObject, PyObject], PyObject) +def PyString_Format(space, w_format, w_args): + """Return a new string object from format and args. Analogous to format % + args. The args argument must be a tuple.""" + return space.mod(w_format, w_args) + + at cpython_api([CONST_STRING], PyObject) +def PyString_InternFromString(space, string): + """A combination of PyString_FromString() and + PyString_InternInPlace(), returning either a new string object that has + been interned, or a new ("owned") reference to an earlier interned string + object with the same value.""" + s = rffi.charp2str(string) + return space.new_interned_str(s) + + at cpython_api([PyObjectP], lltype.Void) +def PyString_InternInPlace(space, string): + """Intern the argument *string in place. The argument must be the + address of a pointer variable pointing to a Python string object. + If there is an existing interned string that is the same as + *string, it sets *string to it (decrementing the reference count + of the old string object and incrementing the reference count of + the interned string object), otherwise it leaves *string alone and + interns it (incrementing its reference count). (Clarification: + even though there is a lot of talk about reference counts, think + of this function as reference-count-neutral; you own the object + after the call if and only if you owned it before the call.) + + This function is not available in 3.x and does not have a PyBytes + alias.""" + w_str = from_ref(space, string[0]) + w_str = space.new_interned_w_str(w_str) + Py_DecRef(space, string[0]) + string[0] = make_ref(space, w_str) + + at cpython_api([PyObject, CONST_STRING, CONST_STRING], PyObject) +def PyString_AsEncodedObject(space, w_str, encoding, errors): + """Encode a string object using the codec registered for encoding and return + the result as Python object. encoding and errors have the same meaning as + the parameters of the same name in the string encode() method. The codec to + be used is looked up using the Python codec registry. Return NULL if an + exception was raised by the codec. + + This function is not available in 3.x and does not have a PyBytes alias.""" + if not PyString_Check(space, w_str): + PyErr_BadArgument(space) + + w_encoding = w_errors = None + if encoding: + w_encoding = space.wrap(rffi.charp2str(encoding)) + if errors: + w_errors = space.wrap(rffi.charp2str(errors)) + return space.call_method(w_str, 'encode', w_encoding, w_errors) + + at cpython_api([PyObject, CONST_STRING, CONST_STRING], PyObject) +def PyString_AsDecodedObject(space, w_str, encoding, errors): + """Decode a string object by passing it to the codec registered + for encoding and return the result as Python object. encoding and + errors have the same meaning as the parameters of the same name in + the string encode() method. The codec to be used is looked up + using the Python codec registry. Return NULL if an exception was + raised by the codec. + + This function is not available in 3.x and does not have a PyBytes alias.""" + if not PyString_Check(space, w_str): + PyErr_BadArgument(space) + + w_encoding = w_errors = None + if encoding: + w_encoding = space.wrap(rffi.charp2str(encoding)) + if errors: + w_errors = space.wrap(rffi.charp2str(errors)) + return space.call_method(w_str, "decode", w_encoding, w_errors) + + at cpython_api([PyObject, PyObject], PyObject) +def _PyString_Join(space, w_sep, w_seq): + return space.call_method(w_sep, 'join', w_seq) diff --git a/pypy/module/cpyext/complexobject.py b/pypy/module/cpyext/complexobject.py --- a/pypy/module/cpyext/complexobject.py +++ b/pypy/module/cpyext/complexobject.py @@ -43,7 +43,7 @@ # lltype does not handle functions returning a structure. This implements a # helper function, which takes as argument a reference to the return value. - at cpython_api([PyObject, Py_complex_ptr], lltype.Void) + at cpython_api([PyObject, Py_complex_ptr], rffi.INT_real, error=-1) def _PyComplex_AsCComplex(space, w_obj, result): """Return the Py_complex value of the complex number op. @@ -60,7 +60,7 @@ # if the above did not work, interpret obj as a float giving the # real part of the result, and fill in the imaginary part as 0. result.c_real = PyFloat_AsDouble(space, w_obj) # -1 on failure - return + return 0 if not PyComplex_Check(space, w_obj): raise OperationError(space.w_TypeError, space.wrap( @@ -69,3 +69,4 @@ assert isinstance(w_obj, W_ComplexObject) result.c_real = w_obj.realval result.c_imag = w_obj.imagval + return 0 diff --git a/pypy/module/cpyext/dictobject.py b/pypy/module/cpyext/dictobject.py --- a/pypy/module/cpyext/dictobject.py +++ b/pypy/module/cpyext/dictobject.py @@ -2,8 +2,7 @@ from pypy.module.cpyext.api import ( cpython_api, CANNOT_FAIL, build_type_checkers, Py_ssize_t, Py_ssize_tP, CONST_STRING) -from pypy.module.cpyext.pyobject import PyObject, PyObjectP, borrow_from -from pypy.module.cpyext.pyobject import RefcountState +from pypy.module.cpyext.pyobject import PyObject, PyObjectP, as_pyobj from pypy.module.cpyext.pyerrors import PyErr_BadInternalCall from pypy.interpreter.error import OperationError from rpython.rlib.objectmodel import specialize @@ -14,13 +13,17 @@ PyDict_Check, PyDict_CheckExact = build_type_checkers("Dict") - at cpython_api([PyObject, PyObject], PyObject, error=CANNOT_FAIL) + at cpython_api([PyObject, PyObject], PyObject, error=CANNOT_FAIL, + result_borrowed=True) def PyDict_GetItem(space, w_dict, w_key): try: w_res = space.getitem(w_dict, w_key) except: return None - return borrow_from(w_dict, w_res) + # NOTE: this works so far because all our dict strategies store + # *values* as full objects, which stay alive as long as the dict is + # alive and not modified. So we can return a borrowed ref. + return w_res @cpython_api([PyObject, PyObject, PyObject], rffi.INT_real, error=-1) def PyDict_SetItem(space, w_dict, w_key, w_obj): @@ -47,7 +50,8 @@ else: PyErr_BadInternalCall(space) - at cpython_api([PyObject, CONST_STRING], PyObject, error=CANNOT_FAIL) + at cpython_api([PyObject, CONST_STRING], PyObject, error=CANNOT_FAIL, + result_borrowed=True) def PyDict_GetItemString(space, w_dict, key): """This is the same as PyDict_GetItem(), but key is specified as a char*, rather than a PyObject*.""" @@ -55,9 +59,10 @@ w_res = space.finditem_str(w_dict, rffi.charp2str(key)) except: w_res = None - if w_res is None: - return None - return borrow_from(w_dict, w_res) + # NOTE: this works so far because all our dict strategies store + # *values* as full objects, which stay alive as long as the dict is + # alive and not modified. So we can return a borrowed ref. + return w_res @cpython_api([PyObject, CONST_STRING], rffi.INT_real, error=-1) def PyDict_DelItemString(space, w_dict, key_ptr): @@ -170,10 +175,13 @@ if w_dict is None: return 0 - # Note: this is not efficient. Storing an iterator would probably + # XXX XXX PyDict_Next is not efficient. Storing an iterator would probably # work, but we can't work out how to not leak it if iteration does - # not complete. + # not complete. Alternatively, we could add some RPython-only + # dict-iterator method to move forward by N steps. + w_dict.ensure_object_strategy() # make sure both keys and values can + # be borrwed try: w_iter = space.call_method(space.w_dict, "iteritems", w_dict) pos = ppos[0] @@ -183,11 +191,10 @@ w_item = space.call_method(w_iter, "next") w_key, w_value = space.fixedview(w_item, 2) - state = space.fromcache(RefcountState) if pkey: - pkey[0] = state.make_borrowed(w_dict, w_key) + pkey[0] = as_pyobj(space, w_key) if pvalue: - pvalue[0] = state.make_borrowed(w_dict, w_value) + pvalue[0] = as_pyobj(space, w_value) ppos[0] += 1 except OperationError, e: if not e.match(space, space.w_StopIteration): diff --git a/pypy/module/cpyext/eval.py b/pypy/module/cpyext/eval.py --- a/pypy/module/cpyext/eval.py +++ b/pypy/module/cpyext/eval.py @@ -4,7 +4,7 @@ from pypy.module.cpyext.api import ( cpython_api, CANNOT_FAIL, CONST_STRING, FILEP, fread, feof, Py_ssize_tP, cpython_struct, is_valid_fp) -from pypy.module.cpyext.pyobject import PyObject, borrow_from +from pypy.module.cpyext.pyobject import PyObject from pypy.module.cpyext.pyerrors import PyErr_SetFromErrno from pypy.module.cpyext.funcobject import PyCodeObject from pypy.module.__builtin__ import compiling @@ -23,7 +23,7 @@ def PyEval_CallObjectWithKeywords(space, w_obj, w_arg, w_kwds): return space.call(w_obj, w_arg, w_kwds) - at cpython_api([], PyObject) + at cpython_api([], PyObject, result_borrowed=True) def PyEval_GetBuiltins(space): """Return a dictionary of the builtins in the current execution frame, or the interpreter of the thread state if no frame is @@ -36,25 +36,25 @@ w_builtins = w_builtins.getdict(space) else: w_builtins = space.builtin.getdict(space) - return borrow_from(None, w_builtins) + return w_builtins # borrowed ref in all cases - at cpython_api([], PyObject, error=CANNOT_FAIL) + at cpython_api([], PyObject, error=CANNOT_FAIL, result_borrowed=True) def PyEval_GetLocals(space): """Return a dictionary of the local variables in the current execution frame, or NULL if no frame is currently executing.""" caller = space.getexecutioncontext().gettopframe_nohidden() if caller is None: return None - return borrow_from(None, caller.getdictscope()) + return caller.getdictscope() # borrowed ref - at cpython_api([], PyObject, error=CANNOT_FAIL) + at cpython_api([], PyObject, error=CANNOT_FAIL, result_borrowed=True) def PyEval_GetGlobals(space): """Return a dictionary of the global variables in the current execution frame, or NULL if no frame is currently executing.""" caller = space.getexecutioncontext().gettopframe_nohidden() if caller is None: return None - return borrow_from(None, caller.get_w_globals()) + return caller.get_w_globals() # borrowed ref @cpython_api([PyCodeObject, PyObject, PyObject], PyObject) def PyEval_EvalCode(space, w_code, w_globals, w_locals): diff --git a/pypy/module/cpyext/funcobject.py b/pypy/module/cpyext/funcobject.py --- a/pypy/module/cpyext/funcobject.py +++ b/pypy/module/cpyext/funcobject.py @@ -3,7 +3,7 @@ PyObjectFields, generic_cpy_call, CONST_STRING, CANNOT_FAIL, Py_ssize_t, cpython_api, bootstrap_function, cpython_struct, build_type_checkers) from pypy.module.cpyext.pyobject import ( - PyObject, make_ref, from_ref, Py_DecRef, make_typedescr, borrow_from) + PyObject, make_ref, from_ref, Py_DecRef, make_typedescr) from rpython.rlib.unroll import unrolling_iterable from pypy.interpreter.error import OperationError from pypy.interpreter.function import Function, Method @@ -83,12 +83,12 @@ from pypy.module.cpyext.object import PyObject_dealloc PyObject_dealloc(space, py_obj) - at cpython_api([PyObject], PyObject) + at cpython_api([PyObject], PyObject, result_borrowed=True) def PyFunction_GetCode(space, w_func): """Return the code object associated with the function object op.""" func = space.interp_w(Function, w_func) w_code = space.wrap(func.code) - return borrow_from(w_func, w_code) + return w_code # borrowed ref @cpython_api([PyObject, PyObject, PyObject], PyObject) def PyMethod_New(space, w_func, w_self, w_cls): From pypy.commits at gmail.com Wed Mar 2 16:12:20 2016 From: pypy.commits at gmail.com (plan_rich) Date: Wed, 02 Mar 2016 13:12:20 -0800 (PST) Subject: [pypy-commit] pypy gcstress-hypothesis: moved out test test into new branch Message-ID: <56d75734.6718c20a.34fb4.1b64@mx.google.com> Author: Richard Plangger Branch: gcstress-hypothesis Changeset: r82666:f2615fd00d55 Date: 2016-03-02 22:11 +0100 http://bitbucket.org/pypy/pypy/changeset/f2615fd00d55/ Log: moved out test test into new branch diff --git a/rpython/jit/backend/llsupport/tl/__init__.py b/rpython/jit/backend/llsupport/tl/__init__.py new file mode 100644 diff --git a/rpython/jit/backend/llsupport/tl/code.py b/rpython/jit/backend/llsupport/tl/code.py new file mode 100644 --- /dev/null +++ b/rpython/jit/backend/llsupport/tl/code.py @@ -0,0 +1,216 @@ + +import struct + +class ByteCode(object): + def encode(self, ctx): + ctx.append_byte(self.BYTE_CODE) + + @classmethod + def create_from(self, draw, get_strategy_for): + pt = getattr(self.__init__, '_param_types', []) + return self(*[draw(get_strategy_for(t)) for t in pt]) + +_c = 0 + +LIST_TYP = 'l' +INT_TYP = 'i' +OBJ_TYP = 'o' +STR_TYP = 's' +VAL_TYP = 'v' # either one of the earlier + +all_types = [INT_TYP, LIST_TYP, STR_TYP] # TODO OBJ_TYP + +SHORT_TYP = 'h' +BYTE_TYP = 'b' +COND_TYP = 'c' +IDX_TYP = 'x' + + +def unique_code(): + global _c + v = _c + _c = v + 1 + return v + +class Context(object): + def __init__(self): + self.consts = {} + self.const_idx = 0 + self.bytecode = [] + + def append_byte(self, byte): + self.bytecode.append(('b', byte)) + + def get_byte(self, i): + typ, byte = self.bytecode[i] + assert typ == 'b' + return byte + + def get_short(self, i): + typ, int = self.bytecode[i] + assert typ == 'h' + return int + + def append_short(self, byte): + self.bytecode.append(('h', byte)) + + def append_int(self, byte): + self.bytecode.append(('i', byte)) + + def const_str(self, str): + self.consts[self.const_idx] = str + self.append_short(self.const_idx) + self.const_idx += 1 + + def to_string(self): + code = [] + for typ, nmr in self.bytecode: + code.append(struct.pack(typ, nmr)) + return ''.join(code) + + def transform(self, code_objs): + for code_obj in code_objs: + code_obj.encode(self) + + return self.to_string(), self.consts + + +def requires_stack(*types): + def method(clazz): + clazz._stack_types = tuple(types) + return clazz + return method + +def leaves_on_stack(*types): + def method(clazz): + clazz._return_on_stack_types = tuple(types) + return clazz + return method + + +def requires_param(*types): + def method(m): + m._param_types = tuple(types) + return m + return method + + at requires_stack() + at leaves_on_stack(INT_TYP) +class PutInt(ByteCode): + BYTE_CODE = unique_code() + @requires_param(INT_TYP) + def __init__(self, value): + self.integral = value + def encode(self, ctx): + ctx.append_byte(self.BYTE_CODE) + ctx.append_int(self.integral) + + at requires_stack(INT_TYP, INT_TYP) + at leaves_on_stack(INT_TYP) +class CompareInt(ByteCode): + BYTE_CODE = unique_code() + def __init__(self): + pass + + at requires_stack() + at leaves_on_stack(STR_TYP) +class LoadStr(ByteCode): + BYTE_CODE = unique_code() + @requires_param(STR_TYP) + def __init__(self, string): + self.string = string + def encode(self, ctx): + ctx.append_byte(self.BYTE_CODE) + ctx.const_str(self.string) + + at requires_stack(STR_TYP, STR_TYP) + at leaves_on_stack(STR_TYP) +class AddStr(ByteCode): + BYTE_CODE = unique_code() + def __init__(self): + pass + + at requires_stack(LIST_TYP, LIST_TYP) + at leaves_on_stack(LIST_TYP) +class AddList(ByteCode): + BYTE_CODE = unique_code() + def __init__(self): + pass + + at requires_stack() + at leaves_on_stack(LIST_TYP) +class CreateList(ByteCode): + BYTE_CODE = unique_code() + @requires_param(BYTE_TYP) + def __init__(self, size=8): + self.size = size + def encode(self, ctx): + ctx.append_byte(self.BYTE_CODE) + ctx.append_short(self.size) + + at requires_stack(LIST_TYP, IDX_TYP, INT_TYP) # TODO VAL_TYP + at leaves_on_stack(LIST_TYP) +class InsertList(ByteCode): + BYTE_CODE = unique_code() + def __init__(self): + pass + + at requires_stack(LIST_TYP, IDX_TYP) + at leaves_on_stack(LIST_TYP) +class DelList(ByteCode): + BYTE_CODE = unique_code() + def __init__(self): + pass + + at requires_stack(LIST_TYP, INT_TYP) # TODO VAL_TYP) + at leaves_on_stack(LIST_TYP) +class AppendList(ByteCode): + BYTE_CODE = unique_code() + def __init__(self): + pass + + +# remove comment one by one! + +#@requires_stack() +#@leaves_on_stack(INT_TYP) +#class CondJump(ByteCode): +# BYTE_CODE = unique_code() +# +# COND_EQ = 0 +# COND_LT = 1 +# COND_GT = 2 +# COND_LE = 3 +# COND_GE = 4 +# +# @requires_param(COND_TYP) +# def __init__(self, cond): +# self.cond = cond +# +# def encode(self, ctx): +# ctx.append_byte(self.BYTE_CODE) +# ctx.append_byte(self.cond) +# +#@requires_stack() +#@leaves_on_stack() +#class Jump(ByteCode): +# BYTE_CODE = unique_code() +# def __init__(self): +# pass +# + +#@requires_stack(LIST_TYP) +#@leaves_on_stack(LIST_TYP, INT_TYP) +#class LenList(ByteCode): +# BYTE_CODE = unique_code() +# def __init__(self): +# pass +# +# +#@requires_stack(INT_TYP) # TODO VAL_TYP) +#@leaves_on_stack() +#class ReturnFrame(ByteCode): +# BYTE_CODE = unique_code() +# def __init__(self): +# pass +# diff --git a/rpython/jit/backend/llsupport/tl/interp.py b/rpython/jit/backend/llsupport/tl/interp.py new file mode 100644 --- /dev/null +++ b/rpython/jit/backend/llsupport/tl/interp.py @@ -0,0 +1,126 @@ +from rpython.rlib.rstruct.runpack import runpack +from rpython.rlib.objectmodel import specialize, always_inline +from rpython.jit.backend.llsupport.tl import code +from rpython.jit.backend.llsupport.tl.stack import Stack +from rpython.rlib import rstring + +class W_Root(object): + pass + +class W_ListObject(W_Root): + def __init__(self, items): + self.items = items + + def concat(self, space, w_lst): + assert isinstance(w_lst, W_ListObject) + return space.wrap(self.items + w_lst.items) + +class W_IntObject(W_Root): + def __init__(self, value): + self.value = value + + def compare(self, space, w_int): + assert isinstance(w_int, W_IntObject) + return space.wrap(self.value - w_int.value) + + def concat(self, space, w_obj): + raise NotImplementedError("cannot concat int with object") + +class W_StrObject(W_Root): + def __init__(self, value): + self.value = value + + def concat(self, space, w_str): + assert isinstance(w_str, W_StrObject) + return space.wrap(self.value + w_str.value) + +class Space(object): + @specialize.argtype(1) + def wrap(self, val): + if isinstance(val, W_Root): + return val + if isinstance(val, int): + return W_IntObject(val) + if isinstance(val, str): + return W_StrObject(val) + if isinstance(val, unicode): + return W_StrObject(val.encode('utf-8')) + if isinstance(val, list): + return W_ListObject(val) + raise NotImplementedError("cannot handle: " + str(val)) + +def _read_all_from_file(file): + with open(file, 'rb') as fd: + return fd.read() + +_read_bytecode_from_file = _read_all_from_file + +def _read_consts_from_file(file): + consts = [] + bytestring = _read_all_from_file(file) + for line in bytestring.splitlines(): + consts.append(rstring.replace(line, "\\n", "\n")) + return consts + +def entry_point(argv): + bytecode = _read_bytecode_from_file(argv[1]) + consts = _read_consts_from_file(argv[2]) + print(bytecode) + print(consts) + pc = 0 + end = len(bytecode) + stack = Stack(16) + space = Space() + while pc < end: + pc = dispatch_once(space, pc, bytecode, consts, stack) + return 0 + + at always_inline +def dispatch_once(space, i, bytecode, consts, stack): + opcode = ord(bytecode[i]) + if opcode == code.PutInt.BYTE_CODE: + integral = runpack('i', bytecode[i+1:i+5]) + stack.append(space.wrap(integral)) + i += 4 + elif opcode == code.CompareInt.BYTE_CODE: + w_int2 = stack.pop() + w_int1 = stack.pop() + stack.append(w_int1.compare(space, w_int2)) + elif opcode == code.LoadStr.BYTE_CODE: + pos = runpack('h', bytecode[i+1:i+3]) + w_str = space.wrap(consts[pos]) + stack.append(w_str) + i += 2 + elif opcode == code.AddStr.BYTE_CODE: + w_str2 = stack.pop() + w_str1 = stack.pop() + stack.append(w_str1.concat(space, w_str2)) + elif opcode == code.AddList.BYTE_CODE: + w_lst2 = stack.pop() + w_lst1 = stack.pop() + stack.append(w_lst1.concat(space, w_lst2)) + elif opcode == code.CreateList.BYTE_CODE: + size = runpack('h', bytecode[i+1:i+3]) + stack.append(space.wrap([None] * size)) + i += 2 + elif opcode == code.AppendList.BYTE_CODE: + w_val = stack.pop() + w_lst = stack.peek(0) + w_lst.items.append(w_val) + elif opcode == code.InsertList.BYTE_CODE: + w_val = stack.pop() + w_idx = stack.pop() + assert isinstance(w_idx, W_IntObject) + w_lst = stack.peek(0) + w_lst.items[w_idx.value] = w_val + # index error, just crash here! + elif opcode == code.DelList.BYTE_CODE: + w_idx = stack.pop() + assert isinstance(w_idx, W_IntObject) + w_lst = stack.peek(0) + del w_lst.items[w_idx.value] + # index error, just crash the machine!! + else: + print("opcode %d is not implemented" % opcode) + raise NotImplementedError + return i + 1 diff --git a/rpython/jit/backend/llsupport/tl/stack.py b/rpython/jit/backend/llsupport/tl/stack.py new file mode 100644 --- /dev/null +++ b/rpython/jit/backend/llsupport/tl/stack.py @@ -0,0 +1,66 @@ +from rpython.rlib.jit import JitDriver, hint, dont_look_inside, promote + +class Stack(object): + _virtualizable_ = ['stackpos', 'stack[*]'] + + def __init__(self, size): + self = hint(self, access_directly=True, fresh_virtualizable=True) + self.stack = [None] * size + self.stackpos = 0 # always store a known-nonneg integer here + + def size(self): + return self.stackpos + + def append(self, elem): + while len(self.stack) <= self.stackpos: + self.stack.append(None) + self.stack[self.stackpos] = elem + self.stackpos += 1 + + def peek(self, i): + stackpos = self.stackpos - i - 1 + if stackpos < 0: + raise IndexError + return self.stack[stackpos] + + def pop(self): + stackpos = self.stackpos - 1 + if stackpos < 0: + raise IndexError + self.stackpos = stackpos # always store a known-nonneg integer here + return self.stack[stackpos] + + def pick(self, i): + n = self.stackpos - i - 1 + assert n >= 0 + self.append(self.stack[n]) + + def put(self, i): + elem = self.pop() + n = self.stackpos - i - 1 + assert n >= 0 + self.stack[n] = elem + + @dont_look_inside + def roll(self, r): + if r < -1: + i = self.stackpos + r + if i < 0: + raise IndexError + n = self.stackpos - 1 + assert n >= 0 + elem = self.stack[n] + for j in range(self.stackpos - 2, i - 1, -1): + assert j >= 0 + self.stack[j + 1] = self.stack[j] + self.stack[i] = elem + elif r > 1: + i = self.stackpos - r + if i < 0: + raise IndexError + elem = self.stack[i] + for j in range(i, self.stackpos - 1): + self.stack[j] = self.stack[j + 1] + n = self.stackpos - 1 + assert n >= 0 + self.stack[n] = elem diff --git a/rpython/jit/backend/llsupport/tl/test/__init__.py b/rpython/jit/backend/llsupport/tl/test/__init__.py new file mode 100644 diff --git a/rpython/jit/backend/llsupport/tl/test/code_strategies.py b/rpython/jit/backend/llsupport/tl/test/code_strategies.py new file mode 100644 --- /dev/null +++ b/rpython/jit/backend/llsupport/tl/test/code_strategies.py @@ -0,0 +1,75 @@ +from hypothesis import strategies as st +from hypothesis.control import assume +from hypothesis.strategies import defines_strategy, composite +from rpython.jit.backend.llsupport.tl import code, interp, stack +from rpython.jit.backend.llsupport.tl.code import (all_types, + INT_TYP, STR_TYP, LIST_TYP, SHORT_TYP, BYTE_TYP, + COND_TYP, IDX_TYP) +from hypothesis.searchstrategy.strategies import OneOfStrategy +from hypothesis.searchstrategy.collections import TupleStrategy + +def get_strategy_for(typ): + if typ == INT_TYP: + return st.integers(min_value=-2**31, max_value=2**31-1) + elif typ == IDX_TYP: + return st.integers(min_value=-2**31, max_value=2**31-1) + elif typ == SHORT_TYP: + return st.integers(min_value=-2**15, max_value=2**15-1) + elif typ == BYTE_TYP: + return st.integers(min_value=-2**7, max_value=2**7-1) + elif typ == COND_TYP: + return st.integers(min_value=0, max_value=4) + elif typ == STR_TYP: + return st.text() + elif typ == LIST_TYP: + return st.lists(elements=st.one_of(st.integers())) # TODO must be recursive + else: + raise NotImplementedError("type: " + str(typ)) + +STD_SPACE = interp.Space() + + at composite +def runtime_stack(draw, clazz): + strats = [get_strategy_for(t) for t in clazz._stack_types] + stack_obj = stack.Stack(len(strats)) + for i,strat in enumerate(strats): + if clazz._stack_types[i] == IDX_TYP: + # it is only valid to access a list with a valid index! + w_list = stack_obj.peek(i-1) + l = len(w_list.items) + assume(l > 0) + integrals = st.integers(min_value=0, max_value=l-1) + stack_obj.append(STD_SPACE.wrap(draw(integrals))) + continue + stack_obj.append(STD_SPACE.wrap(draw(strat))) + return stack_obj + +def byte_code_classes(): + for name, clazz in code.__dict__.items(): + if hasattr(clazz, 'BYTE_CODE'): + yield clazz + +def get_byte_code_class(num): + for clazz in byte_code_classes(): + if clazz.BYTE_CODE == num: + return clazz + return None + + at composite +def single_bytecode(draw, + clazzes=st.sampled_from(byte_code_classes()), + integrals=st.integers(), texts=st.text()): + clazz = draw(clazzes) + inst = clazz.create_from(draw, get_strategy_for) + bytecode, consts = code.Context().transform([inst]) + _stack = draw(runtime_stack(clazz)) + return bytecode, consts, _stack + + at composite +def bytecode_block(draw, + clazzes=st.sampled_from(byte_code_classes()), + integrals=st.integers(), texts=st.text()): + clazz = draw(clazzes) + inst = clazz.create_from(draw, get_strategy_for) + bytecode, consts = code.Context().transform([inst]) + return bytecode, consts diff --git a/rpython/jit/backend/llsupport/tl/test/test_tl_interp.py b/rpython/jit/backend/llsupport/tl/test/test_tl_interp.py new file mode 100644 --- /dev/null +++ b/rpython/jit/backend/llsupport/tl/test/test_tl_interp.py @@ -0,0 +1,43 @@ +import py +from hypothesis import given +from rpython.jit.backend.llsupport.tl import code, interp +from rpython.jit.backend.llsupport.tl.stack import Stack +from rpython.jit.backend.llsupport.tl.test import code_strategies as st + +class TestByteCode(object): + def test_load_str(self): + c = code.Context() + code.LoadStr("hello world").encode(c) + assert c.consts[0] == "hello world" + assert c.get_byte(0) == code.LoadStr.BYTE_CODE + assert c.get_short(1) == 0 + + def test_str_add(self): + c = code.Context() + code.LoadStr("hello").encode(c) + code.LoadStr("world").encode(c) + code.AddStr().encode(c) + assert len(c.consts) == 2 + assert c.get_byte(4) == code.AddStr.BYTE_CODE + assert c.get_short(3) == 1 + +class TestInterp(object): + @given(st.single_bytecode()) + def test_consume_stack(self, args): + bytecode, consts, stack = args + space = interp.Space() + i = interp.dispatch_once(space, 0, bytecode, consts, stack) + assert i == len(bytecode) + clazz = st.get_byte_code_class(ord(bytecode[0])) + assert stack.size() == len(clazz._return_on_stack_types) + + @given(st.bytecode_block()) + def test_execute_bytecode_block(self, args): + bytecode, consts = args + space = interp.Space() + stack = Stack(16) + pc = 0 + end = len(bytecode) + while pc < end: + pc = interp.dispatch_once(space, pc, bytecode, consts, stack) + assert pc == len(bytecode) diff --git a/rpython/jit/backend/llsupport/tl/test/zrpy_gc_hypo_test.py b/rpython/jit/backend/llsupport/tl/test/zrpy_gc_hypo_test.py new file mode 100644 --- /dev/null +++ b/rpython/jit/backend/llsupport/tl/test/zrpy_gc_hypo_test.py @@ -0,0 +1,62 @@ +import py +from hypothesis import given +from rpython.tool.udir import udir +from rpython.jit.metainterp.optimize import SpeculativeError +from rpython.annotator.listdef import s_list_of_strings +from rpython.translator.translator import TranslationContext +from rpython.translator.c import genc +from rpython.jit.backend.llsupport.tl import interp +from rpython.jit.backend.llsupport.tl.test import code_strategies as st + +def persist(type, contents): + dir = udir.ensure(type) + print "written", type, "to", dir + with open(dir.strpath, 'wb') as fd: + fd.write(contents) + return dir.strpath + +def persist_constants(consts): + contents = "" + for string in consts: + contents += string.replace("\n", "\\n") + "\n" + return persist('constants', contents) + +def persist_bytecode(bc): + return persist('bytecode', bc) + +class GCHypothesis(object): + builder = None + def setup_method(self, name): + if self.builder: + return + + t = TranslationContext() + t.config.translation.gc = "incminimark" + t.config.translation.gcremovetypeptr = True + ann = t.buildannotator() + ann.build_types(interp.entry_point, [s_list_of_strings], main_entry_point=True) + rtyper = t.buildrtyper() + rtyper.specialize() + + cbuilder = genc.CStandaloneBuilder(t, interp.entry_point, t.config) + cbuilder.generate_source(defines=cbuilder.DEBUG_DEFINES) + cbuilder.compile() + # prevent from rebuilding the c object! + self.builder = cbuilder + + def execute(self, bytecode, consts): + exe = self.builder.executable_name + bc_file = persist_bytecode(bytecode) + consts_file = persist_constants(consts) + args = [bc_file, consts_file] + env = {} + res = self.builder.translator.platform.execute(exe, args, env=env) + return res.returncode, res.out, res.err + + @given(st.bytecode_block()) + def test_execute_single_bytecode(self, program): + bytecode, consts = program + result, out, err = self.execute(bytecode, consts) + if result != 0: + raise Exception(("could not run program. returned %d" + " stderr:\n%s\nstdout:\n%s\n") % (result, err, out)) diff --git a/rpython/jit/backend/x86/test/test_zrpy_gc_hypo.py b/rpython/jit/backend/x86/test/test_zrpy_gc_hypo.py new file mode 100644 --- /dev/null +++ b/rpython/jit/backend/x86/test/test_zrpy_gc_hypo.py @@ -0,0 +1,6 @@ +from rpython.jit.backend.llsupport.tl.test.zrpy_gc_hypo_test import GCHypothesis + +class TestGCHypothesis(GCHypothesis): + # runs ../../llsupport/tl/test/zrpy_gc_hypo_test.py + gcrootfinder = "shadowstack" + gc = "incminimark" From pypy.commits at gmail.com Wed Mar 2 16:14:34 2016 From: pypy.commits at gmail.com (plan_rich) Date: Wed, 02 Mar 2016 13:14:34 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: removed test files that where moved to gcstress-hypothesis Message-ID: <56d757ba.8e811c0a.48822.3f5c@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r82667:178b2f4db5aa Date: 2016-03-02 22:13 +0100 http://bitbucket.org/pypy/pypy/changeset/178b2f4db5aa/ Log: removed test files that where moved to gcstress-hypothesis diff --git a/rpython/jit/backend/llsupport/tl/__init__.py b/rpython/jit/backend/llsupport/tl/__init__.py deleted file mode 100644 diff --git a/rpython/jit/backend/llsupport/tl/code.py b/rpython/jit/backend/llsupport/tl/code.py deleted file mode 100644 --- a/rpython/jit/backend/llsupport/tl/code.py +++ /dev/null @@ -1,216 +0,0 @@ - -import struct - -class ByteCode(object): - def encode(self, ctx): - ctx.append_byte(self.BYTE_CODE) - - @classmethod - def create_from(self, draw, get_strategy_for): - pt = getattr(self.__init__, '_param_types', []) - return self(*[draw(get_strategy_for(t)) for t in pt]) - -_c = 0 - -LIST_TYP = 'l' -INT_TYP = 'i' -OBJ_TYP = 'o' -STR_TYP = 's' -VAL_TYP = 'v' # either one of the earlier - -all_types = [INT_TYP, LIST_TYP, STR_TYP] # TODO OBJ_TYP - -SHORT_TYP = 'h' -BYTE_TYP = 'b' -COND_TYP = 'c' -IDX_TYP = 'x' - - -def unique_code(): - global _c - v = _c - _c = v + 1 - return v - -class Context(object): - def __init__(self): - self.consts = {} - self.const_idx = 0 - self.bytecode = [] - - def append_byte(self, byte): - self.bytecode.append(('b', byte)) - - def get_byte(self, i): - typ, byte = self.bytecode[i] - assert typ == 'b' - return byte - - def get_short(self, i): - typ, int = self.bytecode[i] - assert typ == 'h' - return int - - def append_short(self, byte): - self.bytecode.append(('h', byte)) - - def append_int(self, byte): - self.bytecode.append(('i', byte)) - - def const_str(self, str): - self.consts[self.const_idx] = str - self.append_short(self.const_idx) - self.const_idx += 1 - - def to_string(self): - code = [] - for typ, nmr in self.bytecode: - code.append(struct.pack(typ, nmr)) - return ''.join(code) - - def transform(self, code_objs): - for code_obj in code_objs: - code_obj.encode(self) - - return self.to_string(), self.consts - - -def requires_stack(*types): - def method(clazz): - clazz._stack_types = tuple(types) - return clazz - return method - -def leaves_on_stack(*types): - def method(clazz): - clazz._return_on_stack_types = tuple(types) - return clazz - return method - - -def requires_param(*types): - def method(m): - m._param_types = tuple(types) - return m - return method - - at requires_stack() - at leaves_on_stack(INT_TYP) -class PutInt(ByteCode): - BYTE_CODE = unique_code() - @requires_param(INT_TYP) - def __init__(self, value): - self.integral = value - def encode(self, ctx): - ctx.append_byte(self.BYTE_CODE) - ctx.append_int(self.integral) - - at requires_stack(INT_TYP, INT_TYP) - at leaves_on_stack(INT_TYP) -class CompareInt(ByteCode): - BYTE_CODE = unique_code() - def __init__(self): - pass - - at requires_stack() - at leaves_on_stack(STR_TYP) -class LoadStr(ByteCode): - BYTE_CODE = unique_code() - @requires_param(STR_TYP) - def __init__(self, string): - self.string = string - def encode(self, ctx): - ctx.append_byte(self.BYTE_CODE) - ctx.const_str(self.string) - - at requires_stack(STR_TYP, STR_TYP) - at leaves_on_stack(STR_TYP) -class AddStr(ByteCode): - BYTE_CODE = unique_code() - def __init__(self): - pass - - at requires_stack(LIST_TYP, LIST_TYP) - at leaves_on_stack(LIST_TYP) -class AddList(ByteCode): - BYTE_CODE = unique_code() - def __init__(self): - pass - - at requires_stack() - at leaves_on_stack(LIST_TYP) -class CreateList(ByteCode): - BYTE_CODE = unique_code() - @requires_param(BYTE_TYP) - def __init__(self, size=8): - self.size = size - def encode(self, ctx): - ctx.append_byte(self.BYTE_CODE) - ctx.append_short(self.size) - - at requires_stack(LIST_TYP, IDX_TYP, INT_TYP) # TODO VAL_TYP - at leaves_on_stack(LIST_TYP) -class InsertList(ByteCode): - BYTE_CODE = unique_code() - def __init__(self): - pass - - at requires_stack(LIST_TYP, IDX_TYP) - at leaves_on_stack(LIST_TYP) -class DelList(ByteCode): - BYTE_CODE = unique_code() - def __init__(self): - pass - - at requires_stack(LIST_TYP, INT_TYP) # TODO VAL_TYP) - at leaves_on_stack(LIST_TYP) -class AppendList(ByteCode): - BYTE_CODE = unique_code() - def __init__(self): - pass - - -# remove comment one by one! - -#@requires_stack() -#@leaves_on_stack(INT_TYP) -#class CondJump(ByteCode): -# BYTE_CODE = unique_code() -# -# COND_EQ = 0 -# COND_LT = 1 -# COND_GT = 2 -# COND_LE = 3 -# COND_GE = 4 -# -# @requires_param(COND_TYP) -# def __init__(self, cond): -# self.cond = cond -# -# def encode(self, ctx): -# ctx.append_byte(self.BYTE_CODE) -# ctx.append_byte(self.cond) -# -#@requires_stack() -#@leaves_on_stack() -#class Jump(ByteCode): -# BYTE_CODE = unique_code() -# def __init__(self): -# pass -# - -#@requires_stack(LIST_TYP) -#@leaves_on_stack(LIST_TYP, INT_TYP) -#class LenList(ByteCode): -# BYTE_CODE = unique_code() -# def __init__(self): -# pass -# -# -#@requires_stack(INT_TYP) # TODO VAL_TYP) -#@leaves_on_stack() -#class ReturnFrame(ByteCode): -# BYTE_CODE = unique_code() -# def __init__(self): -# pass -# diff --git a/rpython/jit/backend/llsupport/tl/interp.py b/rpython/jit/backend/llsupport/tl/interp.py deleted file mode 100644 --- a/rpython/jit/backend/llsupport/tl/interp.py +++ /dev/null @@ -1,126 +0,0 @@ -from rpython.rlib.rstruct.runpack import runpack -from rpython.rlib.objectmodel import specialize, always_inline -from rpython.jit.backend.llsupport.tl import code -from rpython.jit.backend.llsupport.tl.stack import Stack -from rpython.rlib import rstring - -class W_Root(object): - pass - -class W_ListObject(W_Root): - def __init__(self, items): - self.items = items - - def concat(self, space, w_lst): - assert isinstance(w_lst, W_ListObject) - return space.wrap(self.items + w_lst.items) - -class W_IntObject(W_Root): - def __init__(self, value): - self.value = value - - def compare(self, space, w_int): - assert isinstance(w_int, W_IntObject) - return space.wrap(self.value - w_int.value) - - def concat(self, space, w_obj): - raise NotImplementedError("cannot concat int with object") - -class W_StrObject(W_Root): - def __init__(self, value): - self.value = value - - def concat(self, space, w_str): - assert isinstance(w_str, W_StrObject) - return space.wrap(self.value + w_str.value) - -class Space(object): - @specialize.argtype(1) - def wrap(self, val): - if isinstance(val, W_Root): - return val - if isinstance(val, int): - return W_IntObject(val) - if isinstance(val, str): - return W_StrObject(val) - if isinstance(val, unicode): - return W_StrObject(val.encode('utf-8')) - if isinstance(val, list): - return W_ListObject(val) - raise NotImplementedError("cannot handle: " + str(val)) - -def _read_all_from_file(file): - with open(file, 'rb') as fd: - return fd.read() - -_read_bytecode_from_file = _read_all_from_file - -def _read_consts_from_file(file): - consts = [] - bytestring = _read_all_from_file(file) - for line in bytestring.splitlines(): - consts.append(rstring.replace(line, "\\n", "\n")) - return consts - -def entry_point(argv): - bytecode = _read_bytecode_from_file(argv[1]) - consts = _read_consts_from_file(argv[2]) - print(bytecode) - print(consts) - pc = 0 - end = len(bytecode) - stack = Stack(16) - space = Space() - while pc < end: - pc = dispatch_once(space, pc, bytecode, consts, stack) - return 0 - - at always_inline -def dispatch_once(space, i, bytecode, consts, stack): - opcode = ord(bytecode[i]) - if opcode == code.PutInt.BYTE_CODE: - integral = runpack('i', bytecode[i+1:i+5]) - stack.append(space.wrap(integral)) - i += 4 - elif opcode == code.CompareInt.BYTE_CODE: - w_int2 = stack.pop() - w_int1 = stack.pop() - stack.append(w_int1.compare(space, w_int2)) - elif opcode == code.LoadStr.BYTE_CODE: - pos = runpack('h', bytecode[i+1:i+3]) - w_str = space.wrap(consts[pos]) - stack.append(w_str) - i += 2 - elif opcode == code.AddStr.BYTE_CODE: - w_str2 = stack.pop() - w_str1 = stack.pop() - stack.append(w_str1.concat(space, w_str2)) - elif opcode == code.AddList.BYTE_CODE: - w_lst2 = stack.pop() - w_lst1 = stack.pop() - stack.append(w_lst1.concat(space, w_lst2)) - elif opcode == code.CreateList.BYTE_CODE: - size = runpack('h', bytecode[i+1:i+3]) - stack.append(space.wrap([None] * size)) - i += 2 - elif opcode == code.AppendList.BYTE_CODE: - w_val = stack.pop() - w_lst = stack.peek(0) - w_lst.items.append(w_val) - elif opcode == code.InsertList.BYTE_CODE: - w_val = stack.pop() - w_idx = stack.pop() - assert isinstance(w_idx, W_IntObject) - w_lst = stack.peek(0) - w_lst.items[w_idx.value] = w_val - # index error, just crash here! - elif opcode == code.DelList.BYTE_CODE: - w_idx = stack.pop() - assert isinstance(w_idx, W_IntObject) - w_lst = stack.peek(0) - del w_lst.items[w_idx.value] - # index error, just crash the machine!! - else: - print("opcode %d is not implemented" % opcode) - raise NotImplementedError - return i + 1 diff --git a/rpython/jit/backend/llsupport/tl/stack.py b/rpython/jit/backend/llsupport/tl/stack.py deleted file mode 100644 --- a/rpython/jit/backend/llsupport/tl/stack.py +++ /dev/null @@ -1,66 +0,0 @@ -from rpython.rlib.jit import JitDriver, hint, dont_look_inside, promote - -class Stack(object): - _virtualizable_ = ['stackpos', 'stack[*]'] - - def __init__(self, size): - self = hint(self, access_directly=True, fresh_virtualizable=True) - self.stack = [None] * size - self.stackpos = 0 # always store a known-nonneg integer here - - def size(self): - return self.stackpos - - def append(self, elem): - while len(self.stack) <= self.stackpos: - self.stack.append(None) - self.stack[self.stackpos] = elem - self.stackpos += 1 - - def peek(self, i): - stackpos = self.stackpos - i - 1 - if stackpos < 0: - raise IndexError - return self.stack[stackpos] - - def pop(self): - stackpos = self.stackpos - 1 - if stackpos < 0: - raise IndexError - self.stackpos = stackpos # always store a known-nonneg integer here - return self.stack[stackpos] - - def pick(self, i): - n = self.stackpos - i - 1 - assert n >= 0 - self.append(self.stack[n]) - - def put(self, i): - elem = self.pop() - n = self.stackpos - i - 1 - assert n >= 0 - self.stack[n] = elem - - @dont_look_inside - def roll(self, r): - if r < -1: - i = self.stackpos + r - if i < 0: - raise IndexError - n = self.stackpos - 1 - assert n >= 0 - elem = self.stack[n] - for j in range(self.stackpos - 2, i - 1, -1): - assert j >= 0 - self.stack[j + 1] = self.stack[j] - self.stack[i] = elem - elif r > 1: - i = self.stackpos - r - if i < 0: - raise IndexError - elem = self.stack[i] - for j in range(i, self.stackpos - 1): - self.stack[j] = self.stack[j + 1] - n = self.stackpos - 1 - assert n >= 0 - self.stack[n] = elem diff --git a/rpython/jit/backend/llsupport/tl/test/__init__.py b/rpython/jit/backend/llsupport/tl/test/__init__.py deleted file mode 100644 diff --git a/rpython/jit/backend/llsupport/tl/test/code_strategies.py b/rpython/jit/backend/llsupport/tl/test/code_strategies.py deleted file mode 100644 --- a/rpython/jit/backend/llsupport/tl/test/code_strategies.py +++ /dev/null @@ -1,75 +0,0 @@ -from hypothesis import strategies as st -from hypothesis.control import assume -from hypothesis.strategies import defines_strategy, composite -from rpython.jit.backend.llsupport.tl import code, interp, stack -from rpython.jit.backend.llsupport.tl.code import (all_types, - INT_TYP, STR_TYP, LIST_TYP, SHORT_TYP, BYTE_TYP, - COND_TYP, IDX_TYP) -from hypothesis.searchstrategy.strategies import OneOfStrategy -from hypothesis.searchstrategy.collections import TupleStrategy - -def get_strategy_for(typ): - if typ == INT_TYP: - return st.integers(min_value=-2**31, max_value=2**31-1) - elif typ == IDX_TYP: - return st.integers(min_value=-2**31, max_value=2**31-1) - elif typ == SHORT_TYP: - return st.integers(min_value=-2**15, max_value=2**15-1) - elif typ == BYTE_TYP: - return st.integers(min_value=-2**7, max_value=2**7-1) - elif typ == COND_TYP: - return st.integers(min_value=0, max_value=4) - elif typ == STR_TYP: - return st.text() - elif typ == LIST_TYP: - return st.lists(elements=st.one_of(st.integers())) # TODO must be recursive - else: - raise NotImplementedError("type: " + str(typ)) - -STD_SPACE = interp.Space() - - at composite -def runtime_stack(draw, clazz): - strats = [get_strategy_for(t) for t in clazz._stack_types] - stack_obj = stack.Stack(len(strats)) - for i,strat in enumerate(strats): - if clazz._stack_types[i] == IDX_TYP: - # it is only valid to access a list with a valid index! - w_list = stack_obj.peek(i-1) - l = len(w_list.items) - assume(l > 0) - integrals = st.integers(min_value=0, max_value=l-1) - stack_obj.append(STD_SPACE.wrap(draw(integrals))) - continue - stack_obj.append(STD_SPACE.wrap(draw(strat))) - return stack_obj - -def byte_code_classes(): - for name, clazz in code.__dict__.items(): - if hasattr(clazz, 'BYTE_CODE'): - yield clazz - -def get_byte_code_class(num): - for clazz in byte_code_classes(): - if clazz.BYTE_CODE == num: - return clazz - return None - - at composite -def single_bytecode(draw, - clazzes=st.sampled_from(byte_code_classes()), - integrals=st.integers(), texts=st.text()): - clazz = draw(clazzes) - inst = clazz.create_from(draw, get_strategy_for) - bytecode, consts = code.Context().transform([inst]) - _stack = draw(runtime_stack(clazz)) - return bytecode, consts, _stack - - at composite -def bytecode_block(draw, - clazzes=st.sampled_from(byte_code_classes()), - integrals=st.integers(), texts=st.text()): - clazz = draw(clazzes) - inst = clazz.create_from(draw, get_strategy_for) - bytecode, consts = code.Context().transform([inst]) - return bytecode, consts diff --git a/rpython/jit/backend/llsupport/tl/test/test_tl_interp.py b/rpython/jit/backend/llsupport/tl/test/test_tl_interp.py deleted file mode 100644 --- a/rpython/jit/backend/llsupport/tl/test/test_tl_interp.py +++ /dev/null @@ -1,43 +0,0 @@ -import py -from hypothesis import given -from rpython.jit.backend.llsupport.tl import code, interp -from rpython.jit.backend.llsupport.tl.stack import Stack -from rpython.jit.backend.llsupport.tl.test import code_strategies as st - -class TestByteCode(object): - def test_load_str(self): - c = code.Context() - code.LoadStr("hello world").encode(c) - assert c.consts[0] == "hello world" - assert c.get_byte(0) == code.LoadStr.BYTE_CODE - assert c.get_short(1) == 0 - - def test_str_add(self): - c = code.Context() - code.LoadStr("hello").encode(c) - code.LoadStr("world").encode(c) - code.AddStr().encode(c) - assert len(c.consts) == 2 - assert c.get_byte(4) == code.AddStr.BYTE_CODE - assert c.get_short(3) == 1 - -class TestInterp(object): - @given(st.single_bytecode()) - def test_consume_stack(self, args): - bytecode, consts, stack = args - space = interp.Space() - i = interp.dispatch_once(space, 0, bytecode, consts, stack) - assert i == len(bytecode) - clazz = code.get_byte_code_class(ord(bytecode[0])) - assert stack.size() == len(clazz._return_on_stack_types) - - @given(st.bytecode_block()) - def test_execute_bytecode_block(self, args): - bytecode, consts = args - space = interp.Space() - stack = Stack(16) - pc = 0 - end = len(bytecode) - while pc < end: - pc = interp.dispatch_once(space, pc, bytecode, consts, stack) - assert pc == len(bytecode) diff --git a/rpython/jit/backend/llsupport/tl/test/zrpy_gc_hypo_test.py b/rpython/jit/backend/llsupport/tl/test/zrpy_gc_hypo_test.py deleted file mode 100644 --- a/rpython/jit/backend/llsupport/tl/test/zrpy_gc_hypo_test.py +++ /dev/null @@ -1,62 +0,0 @@ -import py -from hypothesis import given -from rpython.tool.udir import udir -from rpython.jit.metainterp.optimize import SpeculativeError -from rpython.annotator.listdef import s_list_of_strings -from rpython.translator.translator import TranslationContext -from rpython.translator.c import genc -from rpython.jit.backend.llsupport.tl import interp -from rpython.jit.backend.llsupport.tl.test import code_strategies as st - -def persist(type, contents): - dir = udir.ensure(type) - print "written", type, "to", dir - with open(dir.strpath, 'wb') as fd: - fd.write(contents) - return dir.strpath - -def persist_constants(consts): - contents = "" - for string in consts: - contents += string.replace("\n", "\\n") + "\n" - return persist('constants', contents) - -def persist_bytecode(bc): - return persist('bytecode', bc) - -class GCHypothesis(object): - builder = None - def setup_method(self, name): - if self.builder: - return - - t = TranslationContext() - t.config.translation.gc = "incminimark" - t.config.translation.gcremovetypeptr = True - ann = t.buildannotator() - ann.build_types(interp.entry_point, [s_list_of_strings], main_entry_point=True) - rtyper = t.buildrtyper() - rtyper.specialize() - - cbuilder = genc.CStandaloneBuilder(t, interp.entry_point, t.config) - cbuilder.generate_source(defines=cbuilder.DEBUG_DEFINES) - cbuilder.compile() - # prevent from rebuilding the c object! - self.builder = cbuilder - - def execute(self, bytecode, consts): - exe = self.builder.executable_name - bc_file = persist_bytecode(bytecode) - consts_file = persist_constants(consts) - args = [bc_file, consts_file] - env = {} - res = self.builder.translator.platform.execute(exe, args, env=env) - return res.returncode, res.out, res.err - - @given(st.bytecode_block()) - def test_execute_single_bytecode(self, program): - bytecode, consts = program - result, out, err = self.execute(bytecode, consts) - if result != 0: - raise Exception(("could not run program. returned %d" - " stderr:\n%s\nstdout:\n%s\n") % (result, err, out)) diff --git a/rpython/jit/backend/x86/test/test_zrpy_gc_hypo.py b/rpython/jit/backend/x86/test/test_zrpy_gc_hypo.py deleted file mode 100644 --- a/rpython/jit/backend/x86/test/test_zrpy_gc_hypo.py +++ /dev/null @@ -1,6 +0,0 @@ -from rpython.jit.backend.llsupport.tl.test.zrpy_gc_hypo_test import GCHypothesis - -class TestGCHypothesis(GCHypothesis): - # runs ../../llsupport/tl/test/zrpy_gc_hypo_test.py - gcrootfinder = "shadowstack" - gc = "incminimark" From pypy.commits at gmail.com Thu Mar 3 05:52:37 2016 From: pypy.commits at gmail.com (cfbolz) Date: Thu, 03 Mar 2016 02:52:37 -0800 (PST) Subject: [pypy-commit] pypy default: disable the setdefault test on kwargs dicts, it doesn't make sense there Message-ID: <56d81775.d4e41c0a.e0c02.1a84@mx.google.com> Author: Carl Friedrich Bolz Branch: Changeset: r82670:618d06ca2419 Date: 2016-03-02 12:41 +0100 http://bitbucket.org/pypy/pypy/changeset/618d06ca2419/ Log: disable the setdefault test on kwargs dicts, it doesn't make sense there diff --git a/pypy/objspace/std/test/test_kwargsdict.py b/pypy/objspace/std/test/test_kwargsdict.py --- a/pypy/objspace/std/test/test_kwargsdict.py +++ b/pypy/objspace/std/test/test_kwargsdict.py @@ -118,10 +118,16 @@ def test_delitem(self): pass # delitem devolves for now + def test_setdefault_fast(self): + pass # not based on hashing at all + class TestDevolvedKwargsDictImplementation(BaseTestDevolvedDictImplementation): get_impl = get_impl StrategyClass = KwargsDictStrategy + def test_setdefault_fast(self): + pass # not based on hashing at all + class AppTestKwargsDictStrategy(object): def setup_class(cls): From pypy.commits at gmail.com Thu Mar 3 05:52:39 2016 From: pypy.commits at gmail.com (cfbolz) Date: Thu, 03 Mar 2016 02:52:39 -0800 (PST) Subject: [pypy-commit] pypy default: test a sensible argument to the --jit option Message-ID: <56d81777.11301c0a.922d9.1b4b@mx.google.com> Author: Carl Friedrich Bolz Branch: Changeset: r82671:634581f24c02 Date: 2016-03-02 17:21 +0100 http://bitbucket.org/pypy/pypy/changeset/634581f24c02/ Log: test a sensible argument to the --jit option diff --git a/pypy/interpreter/test/test_app_main.py b/pypy/interpreter/test/test_app_main.py --- a/pypy/interpreter/test/test_app_main.py +++ b/pypy/interpreter/test/test_app_main.py @@ -133,7 +133,7 @@ self.check(['-S', '-tO', '--info'], {}, output_contains='translation') self.check(['-S', '-tO', '--version'], {}, output_contains='Python') self.check(['-S', '-tOV'], {}, output_contains='Python') - self.check(['--jit', 'foobar', '-S'], {}, sys_argv=[''], + self.check(['--jit', 'off', '-S'], {}, sys_argv=[''], run_stdin=True, no_site=1) self.check(['-c', 'pass'], {}, sys_argv=['-c'], run_command='pass') self.check(['-cpass'], {}, sys_argv=['-c'], run_command='pass') From pypy.commits at gmail.com Thu Mar 3 05:53:57 2016 From: pypy.commits at gmail.com (cfbolz) Date: Thu, 03 Mar 2016 02:53:57 -0800 (PST) Subject: [pypy-commit] pypy speed-up-stringsearch: try to split ll_search into two elidable functions, one of which only depends on the search string Message-ID: <56d817c5.c711c30a.329b7.fffff102@mx.google.com> Author: Carl Friedrich Bolz Branch: speed-up-stringsearch Changeset: r82672:37cc19e3dcb4 Date: 2016-02-27 17:01 +0100 http://bitbucket.org/pypy/pypy/changeset/37cc19e3dcb4/ Log: try to split ll_search into two elidable functions, one of which only depends on the search string diff --git a/rpython/rtyper/lltypesystem/rstr.py b/rpython/rtyper/lltypesystem/rstr.py --- a/rpython/rtyper/lltypesystem/rstr.py +++ b/rpython/rtyper/lltypesystem/rstr.py @@ -721,12 +721,9 @@ return res @staticmethod - @jit.elidable def ll_search(s1, s2, start, end, mode): - count = 0 n = end - start m = len(s2.chars) - if m == 0: if mode == FAST_COUNT: return end - start + 1 @@ -743,74 +740,112 @@ return -1 mlast = m - 1 + + if mode != FAST_RFIND: + skip, mask = LLHelpers._precompute_skip_mask_forward(s2) + return LLHelpers._str_search_forward(s1, s2, start, end, mode, skip, mask) + else: + skip, mask = LLHelpers._precompute_skip_mask_backward(s2) + return LLHelpers._str_search_backward(s1, s2, start, end, skip, mask) + + @staticmethod + @jit.elidable + def _precompute_skip_mask_forward(s2): + mlast = len(s2.chars) - 1 skip = mlast - 1 mask = 0 + lastchar = s2.chars[mlast] + for i in range(mlast): + mask = bloom_add(mask, s2.chars[i]) + if s2.chars[i] == lastchar: + skip = mlast - i - 1 + mask = bloom_add(mask, lastchar) + return skip, mask - if mode != FAST_RFIND: - for i in range(mlast): - mask = bloom_add(mask, s2.chars[i]) - if s2.chars[i] == s2.chars[mlast]: - skip = mlast - i - 1 - mask = bloom_add(mask, s2.chars[mlast]) + @staticmethod + @jit.elidable + def _precompute_skip_mask_backward(s2): + mlast = len(s2.chars) - 1 + skip = mlast - 1 + firstchar = s2.chars[0] + mask = bloom_add(0, firstchar) + for i in range(mlast, 0, -1): + mask = bloom_add(mask, s2.chars[i]) + if s2.chars[i] == firstchar: + skip = i - 1 + return skip, mask - i = start - 1 - while i + 1 <= start + w: - i += 1 - if s1.chars[i + m - 1] == s2.chars[m - 1]: - for j in range(mlast): - if s1.chars[i + j] != s2.chars[j]: - break - else: - if mode != FAST_COUNT: - return i - count += 1 - i += mlast - continue + @staticmethod + @jit.elidable + def _str_search_forward(s1, s2, start, end, mode, skip, mask): + count = 0 + n = end - start + m = len(s2.chars) - if i + m < len(s1.chars): - c = s1.chars[i + m] - else: - c = '\0' - if not bloom(mask, c): - i += m - else: - i += skip + w = n - m + mlast = m - 1 + i = start - 1 + lastchar = s2.chars[mlast] + while i + 1 <= start + w: + i += 1 + if s1.chars[i + m - 1] == lastchar: + for j in range(mlast): + if s1.chars[i + j] != s2.chars[j]: + break else: - if i + m < len(s1.chars): - c = s1.chars[i + m] - else: - c = '\0' - if not bloom(mask, c): - i += m - else: - mask = bloom_add(mask, s2.chars[0]) - for i in range(mlast, 0, -1): - mask = bloom_add(mask, s2.chars[i]) - if s2.chars[i] == s2.chars[0]: - skip = i - 1 + if mode != FAST_COUNT: + return i + count += 1 + i += mlast + continue - i = start + w + 1 - while i - 1 >= start: - i -= 1 - if s1.chars[i] == s2.chars[0]: - for j in xrange(mlast, 0, -1): - if s1.chars[i + j] != s2.chars[j]: - break - else: - return i - if i - 1 >= 0 and not bloom(mask, s1.chars[i - 1]): - i -= m - else: - i -= skip + if i + m < len(s1.chars): + c = s1.chars[i + m] else: - if i - 1 >= 0 and not bloom(mask, s1.chars[i - 1]): - i -= m - + c = '\0' + if not bloom(mask, c): + i += m + else: + i += skip + else: + if i + m < len(s1.chars): + c = s1.chars[i + m] + else: + c = '\0' + if not bloom(mask, c): + i += m if mode != FAST_COUNT: return -1 return count @staticmethod + @jit.elidable + def _str_search_backward(s1, s2, start, end, skip, mask): + n = end - start + m = len(s2.chars) + + w = n - m + mlast = m - 1 + i = start + w + 1 + firstchar = s2.chars[0] + while i - 1 >= start: + i -= 1 + if s1.chars[i] == firstchar: + for j in xrange(mlast, 0, -1): + if s1.chars[i + j] != s2.chars[j]: + break + else: + return i + if i - 1 >= 0 and not bloom(mask, s1.chars[i - 1]): + i -= m + else: + i -= skip + else: + if i - 1 >= 0 and not bloom(mask, s1.chars[i - 1]): + i -= m + return -1 + + @staticmethod @signature(types.int(), types.any(), returns=types.any()) @jit.look_inside_iff(lambda length, items: jit.loop_unrolling_heuristic( items, length)) From pypy.commits at gmail.com Thu Mar 3 09:13:26 2016 From: pypy.commits at gmail.com (rlamy) Date: Thu, 03 Mar 2016 06:13:26 -0800 (PST) Subject: [pypy-commit] pypy default: Fix typo that made a test useless Message-ID: <56d84686.6718c20a.34fb4.4297@mx.google.com> Author: Ronan Lamy Branch: Changeset: r82673:4d2c1de4fbff Date: 2016-03-03 14:12 +0000 http://bitbucket.org/pypy/pypy/changeset/4d2c1de4fbff/ Log: Fix typo that made a test useless diff --git a/rpython/rtyper/test/test_rdict.py b/rpython/rtyper/test/test_rdict.py --- a/rpython/rtyper/test/test_rdict.py +++ b/rpython/rtyper/test/test_rdict.py @@ -1048,7 +1048,7 @@ s_BA_dic = s.items[1] r_AB_dic = rtyper.getrepr(s_AB_dic) - r_BA_dic = rtyper.getrepr(s_AB_dic) + r_BA_dic = rtyper.getrepr(s_BA_dic) assert r_AB_dic.lowleveltype == r_BA_dic.lowleveltype From pypy.commits at gmail.com Thu Mar 3 10:14:44 2016 From: pypy.commits at gmail.com (mattip) Date: Thu, 03 Mar 2016 07:14:44 -0800 (PST) Subject: [pypy-commit] pypy cpyext-ext: fix uppercase/lowercase issues till test passes -A, exceeds recursion depth untranslated Message-ID: <56d854e4.080a1c0a.d66b7.ffff8042@mx.google.com> Author: mattip Branch: cpyext-ext Changeset: r82674:c95852e53e64 Date: 2016-03-02 18:28 -0500 http://bitbucket.org/pypy/pypy/changeset/c95852e53e64/ Log: fix uppercase/lowercase issues till test passes -A, exceeds recursion depth untranslated diff --git a/pypy/module/cpyext/test/foo3.c b/pypy/module/cpyext/test/foo3.c --- a/pypy/module/cpyext/test/foo3.c +++ b/pypy/module/cpyext/test/foo3.c @@ -9,9 +9,9 @@ return newType; } -PyTypeObject Foo3Type_Type = { - PyVarObject_HEAD_INIT(0, 0) - /*tp_name*/ "Foo3.Type", +PyTypeObject footype = { + PyVarObject_HEAD_INIT(NULL, 0) + /*tp_name*/ "foo3.footype", /*tp_basicsize*/ sizeof(PyTypeObject), /*tp_itemsize*/ 0, /*tp_dealloc*/ 0, @@ -40,7 +40,7 @@ /*tp_methods*/ 0, /*tp_members*/ 0, /*tp_getset*/ 0, - /*tp_base*/ 0, // set to &PyType_Type in module init function (why can it not be done here?) + /*tp_base*/ 0, // set to &PyType_Type in module init function (why can it not be done here?) /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, @@ -69,8 +69,15 @@ PyMODINIT_FUNC initfoo3(void) { - PyObject* mod = Py_InitModule("Foo3", sbkMethods); - Foo3Type_Type.tp_base = &PyType_Type; - PyType_Ready(&Foo3Type_Type); - PyModule_AddObject(mod, "Type", (PyObject*)&Foo3Type_Type); + PyObject *mod, *d; + footype.tp_base = &PyType_Type; + PyType_Ready(&footype); + mod = Py_InitModule("foo3", sbkMethods); + if (mod == NULL) + return; + d = PyModule_GetDict(mod); + if (d == NULL) + return; + if (PyDict_SetItemString(d, "footype", (PyObject *)&footype) < 0) + return; } diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -866,8 +866,8 @@ def test_tp_new_in_subclass_of_type(self): module = self.import_module(name='foo3') - print('calling module.Type()...') - module.Type("X", (object,), {}) + print('calling module.footype()...') + module.footype("X", (object,), {}) def test_app_subclass_of_c_type(self): module = self.import_module(name='foo') From pypy.commits at gmail.com Thu Mar 3 10:14:46 2016 From: pypy.commits at gmail.com (mattip) Date: Thu, 03 Mar 2016 07:14:46 -0800 (PST) Subject: [pypy-commit] pypy cpyext-ext: fix for tp_new slot Message-ID: <56d854e6.06b01c0a.3dc1a.ffff8de6@mx.google.com> Author: mattip Branch: cpyext-ext Changeset: r82675:3e9fab0f7ce6 Date: 2016-03-02 23:25 -0500 http://bitbucket.org/pypy/pypy/changeset/3e9fab0f7ce6/ Log: fix for tp_new slot diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -318,17 +318,6 @@ return space.wrap(generic_cpy_call(space, func_target, w_self, w_other)) - at cpython_api([PyTypeObjectPtr, PyObject, PyObject], PyObject, header=None) -def slot_tp_new(space, type, w_args, w_kwds): - from pypy.module.cpyext.tupleobject import PyTuple_Check - pyo = rffi.cast(PyObject, type) - w_type = from_ref(space, pyo) - w_func = space.getattr(w_type, space.wrap("__new__")) - assert PyTuple_Check(space, w_args) - args_w = [w_type] + space.fixedview(w_args) - w_args_new = space.newtuple(args_w) - return space.call(w_func, w_args_new, w_kwds) - from rpython.rlib.nonconst import NonConstant SLOTS = {} @@ -463,7 +452,16 @@ space.call_args(init_fn, args) return 0 api_func = slot_tp_init.api_func + elif name == 'tp_new': + new_fn = w_type.getdictvalue(space, '__new__') + if new_fn is None: + return + @cpython_api([PyTypeObjectPtr, PyObject, PyObject], PyObject, header=None) + @func_renamer("cpyext_%s_%s" % (name.replace('.', '_'), typedef.name)) + def slot_tp_new(space, type, w_args, w_kwds): + return space.call(w_type, w_args, w_kwds) + api_func = slot_tp_new.api_func else: return From pypy.commits at gmail.com Thu Mar 3 10:14:48 2016 From: pypy.commits at gmail.com (mattip) Date: Thu, 03 Mar 2016 07:14:48 -0800 (PST) Subject: [pypy-commit] pypy cpyext-ext: PyNumber_Check accepts int as well as float Message-ID: <56d854e8.455e1c0a.fcc49.ffff88f7@mx.google.com> Author: mattip Branch: cpyext-ext Changeset: r82676:4d2743ceeb86 Date: 2016-03-03 02:24 -0500 http://bitbucket.org/pypy/pypy/changeset/4d2743ceeb86/ Log: PyNumber_Check accepts int as well as float diff --git a/pypy/module/cpyext/number.py b/pypy/module/cpyext/number.py --- a/pypy/module/cpyext/number.py +++ b/pypy/module/cpyext/number.py @@ -24,7 +24,13 @@ space.float_w(w_obj) return 1 except OperationError: - return 0 + pass + try: + space.int_w(w_obj) + return 1 + except OperationError: + pass + return 0 @cpython_api([PyObject, PyObject], Py_ssize_t, error=-1) def PyNumber_AsSsize_t(space, w_obj, w_exc): From pypy.commits at gmail.com Thu Mar 3 10:14:50 2016 From: pypy.commits at gmail.com (mattip) Date: Thu, 03 Mar 2016 07:14:50 -0800 (PST) Subject: [pypy-commit] pypy ndarray-setitem-filtered: fix setitem Message-ID: <56d854ea.d3921c0a.3ab93.184b@mx.google.com> Author: mattip Branch: ndarray-setitem-filtered Changeset: r82677:abeee2a7ee62 Date: 2016-03-03 15:45 +0200 http://bitbucket.org/pypy/pypy/changeset/abeee2a7ee62/ Log: fix setitem diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -22,7 +22,8 @@ from pypy.module.micronumpy.flagsobj import W_FlagsObject from pypy.module.micronumpy.strides import ( get_shape_from_iterable, shape_agreement, shape_agreement_multiple, - is_c_contiguous, is_f_contiguous, calc_strides, new_view, BooleanChunk) + is_c_contiguous, is_f_contiguous, calc_strides, new_view, BooleanChunk, + SliceChunk) from pypy.module.micronumpy.casting import can_cast_array from pypy.module.micronumpy.descriptor import get_dtype_cache @@ -226,8 +227,24 @@ if iter_shape is None: # w_index is a list of slices chunks = self.implementation._prepare_slice_args(space, w_index) - view = new_view(space, self, chunks) - view.implementation.setslice(space, val_arr) + dim = -1 + view = self + for i, c in enumerate(chunks): + if isinstance(c, BooleanChunk): + dim = i + idx = c.w_idx + chunks.pop(i) + chunks.insert(0, SliceChunk(space.newslice(space.wrap(0), + space.w_None, space.w_None))) + break + if dim > 0: + view = self.implementation.swapaxes(space, self, 0, dim) + if dim >= 0: + view = new_view(space, self, chunks) + view.setitem_filter(space, idx, val_arr) + else: + view = new_view(space, self, chunks) + view.implementation.setslice(space, val_arr) return if support.product(iter_shape) == 0: return From pypy.commits at gmail.com Thu Mar 3 10:53:28 2016 From: pypy.commits at gmail.com (rlamy) Date: Thu, 03 Mar 2016 07:53:28 -0800 (PST) Subject: [pypy-commit] pypy default: Reuse rdict hypothesis test in test_rordereddict.py Message-ID: <56d85df8.12871c0a.e3639.ffff9101@mx.google.com> Author: Ronan Lamy Branch: Changeset: r82678:67aa41de326b Date: 2016-03-03 15:52 +0000 http://bitbucket.org/pypy/pypy/changeset/67aa41de326b/ Log: Reuse rdict hypothesis test in test_rordereddict.py diff --git a/rpython/rtyper/test/test_rdict.py b/rpython/rtyper/test/test_rdict.py --- a/rpython/rtyper/test/test_rdict.py +++ b/rpython/rtyper/test/test_rdict.py @@ -1166,50 +1166,51 @@ st_keys = sampled_from(keytypes_s) st_values = sampled_from(keytypes_s + [SomeString(can_be_None=True)]) -class Space(object): +class MappingSpace(object): def __init__(self, s_key, s_value): self.s_key = s_key self.s_value = s_value rtyper = PseudoRTyper() r_key = s_key.rtyper_makerepr(rtyper) r_value = s_value.rtyper_makerepr(rtyper) - dictrepr = rdict.DictRepr(rtyper, r_key, r_value, + dictrepr = self.MappingRepr(rtyper, r_key, r_value, DictKey(None, s_key), DictValue(None, s_value)) dictrepr.setup() - self.l_dict = rdict.ll_newdict(dictrepr.DICT) - self.reference = {} + self.l_dict = self.newdict(dictrepr) + self.reference = self.new_reference() self.ll_key = r_key.convert_const self.ll_value = r_value.convert_const def setitem(self, key, value): ll_key = self.ll_key(key) ll_value = self.ll_value(value) - rdict.ll_dict_setitem(self.l_dict, ll_key, ll_value) + self.ll_setitem(self.l_dict, ll_key, ll_value) self.reference[key] = value - assert rdict.ll_contains(self.l_dict, ll_key) + assert self.ll_contains(self.l_dict, ll_key) def delitem(self, key): ll_key = self.ll_key(key) - rdict.ll_dict_delitem(self.l_dict, ll_key) + self.ll_delitem(self.l_dict, ll_key) del self.reference[key] - assert not rdict.ll_contains(self.l_dict, ll_key) + assert not self.ll_contains(self.l_dict, ll_key) def copydict(self): - self.l_dict = rdict.ll_copy(self.l_dict) + self.l_dict = self.ll_copy(self.l_dict) + assert self.ll_len(self.l_dict) == len(self.reference) def cleardict(self): - rdict.ll_clear(self.l_dict) + self.ll_clear(self.l_dict) self.reference.clear() - assert rdict.ll_dict_len(self.l_dict) == 0 + assert self.ll_len(self.l_dict) == 0 def fullcheck(self): - assert rdict.ll_dict_len(self.l_dict) == len(self.reference) + assert self.ll_len(self.l_dict) == len(self.reference) for key, value in self.reference.iteritems(): - assert (rdict.ll_dict_getitem(self.l_dict, self.ll_key(key)) == + assert (self.ll_getitem(self.l_dict, self.ll_key(key)) == self.ll_value(value)) -class StressTest(GenericStateMachine): +class MappingSM(GenericStateMachine): def __init__(self): self.space = None @@ -1239,7 +1240,7 @@ def execute_step(self, action): if action.method == 'setup': - self.space = Space(*action.args) + self.space = self.Space(*action.args) self.st_keys = ann2strategy(self.space.s_key) self.st_values = ann2strategy(self.space.s_value) return @@ -1250,5 +1251,24 @@ if self.space: self.space.fullcheck() + +class DictSpace(MappingSpace): + MappingRepr = rdict.DictRepr + new_reference = dict + ll_getitem = staticmethod(rdict.ll_dict_getitem) + ll_setitem = staticmethod(rdict.ll_dict_setitem) + ll_delitem = staticmethod(rdict.ll_dict_delitem) + ll_len = staticmethod(rdict.ll_dict_len) + ll_contains = staticmethod(rdict.ll_contains) + ll_copy = staticmethod(rdict.ll_copy) + ll_clear = staticmethod(rdict.ll_clear) + + def newdict(self, repr): + return rdict.ll_newdict(repr.DICT) + +class DictSM(MappingSM): + Space = DictSpace + def test_hypothesis(): - run_state_machine_as_test(StressTest, settings(max_examples=500, stateful_step_count=100)) + run_state_machine_as_test( + DictSM, settings(max_examples=500, stateful_step_count=100)) diff --git a/rpython/rtyper/test/test_rordereddict.py b/rpython/rtyper/test/test_rordereddict.py --- a/rpython/rtyper/test/test_rordereddict.py +++ b/rpython/rtyper/test/test_rordereddict.py @@ -1,14 +1,18 @@ - import py from collections import OrderedDict +from hypothesis import settings +from hypothesis.stateful import run_state_machine_as_test + from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rtyper.lltypesystem import rordereddict, rstr from rpython.rlib.rarithmetic import intmask from rpython.rtyper.annlowlevel import llstr, hlstr -from rpython.rtyper.test.test_rdict import BaseTestRDict +from rpython.rtyper.test.test_rdict import ( + BaseTestRDict, MappingSpace, MappingSM) from rpython.rlib import objectmodel +rodct = rordereddict def get_indexes(ll_d): return ll_d.indexes._obj.container._as_ptr() @@ -451,3 +455,50 @@ print 'current dict length:', referencelength assert l_dict.num_live_items == referencelength complete_check() + + +class ODictSpace(MappingSpace): + MappingRepr = rodct.OrderedDictRepr + new_reference = OrderedDict + ll_getitem = staticmethod(rodct.ll_dict_getitem) + ll_setitem = staticmethod(rodct.ll_dict_setitem) + ll_delitem = staticmethod(rodct.ll_dict_delitem) + ll_len = staticmethod(rodct.ll_dict_len) + ll_contains = staticmethod(rodct.ll_dict_contains) + ll_copy = staticmethod(rodct.ll_dict_copy) + ll_clear = staticmethod(rodct.ll_dict_clear) + + def newdict(self, repr): + return rodct.ll_newdict(repr.DICT) + + def get_keys(self): + DICT = lltype.typeOf(self.l_dict).TO + ITER = rordereddict.get_ll_dictiter(lltype.Ptr(DICT)) + ll_iter = rordereddict.ll_dictiter(ITER, self.l_dict) + ll_dictnext = rordereddict._ll_dictnext + keys_ll = [] + while True: + try: + num = ll_dictnext(ll_iter) + keys_ll.append(self.l_dict.entries[num].key) + except StopIteration: + break + return keys_ll + + def fullcheck(self): + # overridden to also check key order + assert self.ll_len(self.l_dict) == len(self.reference) + keys_ll = self.get_keys() + assert len(keys_ll) == len(self.reference) + for key, ll_key in zip(self.reference, keys_ll): + assert self.ll_key(key) == ll_key + assert (self.ll_getitem(self.l_dict, self.ll_key(key)) == + self.ll_value(self.reference[key])) + + +class ODictSM(MappingSM): + Space = ODictSpace + +def test_hypothesis(): + run_state_machine_as_test( + ODictSM, settings(max_examples=500, stateful_step_count=100)) From pypy.commits at gmail.com Thu Mar 3 11:26:15 2016 From: pypy.commits at gmail.com (rlamy) Date: Thu, 03 Mar 2016 08:26:15 -0800 (PST) Subject: [pypy-commit] pypy default: kill obsolete tests Message-ID: <56d865a7.89bd1c0a.45f78.ffff97a5@mx.google.com> Author: Ronan Lamy Branch: Changeset: r82679:9db8617310dc Date: 2016-03-03 16:25 +0000 http://bitbucket.org/pypy/pypy/changeset/9db8617310dc/ Log: kill obsolete tests diff --git a/rpython/rtyper/test/test_rdict.py b/rpython/rtyper/test/test_rdict.py --- a/rpython/rtyper/test/test_rdict.py +++ b/rpython/rtyper/test/test_rdict.py @@ -59,19 +59,6 @@ yield -def not_really_random(): - """A random-ish generator, which also generates nice patterns from time to time. - Could be useful to detect problems associated with specific usage patterns.""" - import random - x = random.random() - print 'random seed: %r' % (x,) - for i in range(12000): - r = 3.4 + i/20000.0 - x = r*x - x*x - assert 0 <= x < 4 - yield x - - class BaseTestRDict(BaseRtypingTest): def test_dict_creation(self): def createdict(i): diff --git a/rpython/rtyper/test/test_rordereddict.py b/rpython/rtyper/test/test_rordereddict.py --- a/rpython/rtyper/test/test_rordereddict.py +++ b/rpython/rtyper/test/test_rordereddict.py @@ -334,129 +334,6 @@ assert res == 6 -class TestStress: - - def test_stress(self): - from rpython.annotator.dictdef import DictKey, DictValue - from rpython.annotator import model as annmodel - from rpython.rtyper import rint - from rpython.rtyper.test.test_rdict import not_really_random - rodct = rordereddict - dictrepr = rodct.OrderedDictRepr( - None, rint.signed_repr, rint.signed_repr, - DictKey(None, annmodel.SomeInteger()), - DictValue(None, annmodel.SomeInteger())) - dictrepr.setup() - l_dict = rodct.ll_newdict(dictrepr.DICT) - referencetable = [None] * 400 - referencelength = 0 - value = 0 - - def complete_check(): - for n, refvalue in zip(range(len(referencetable)), referencetable): - try: - gotvalue = rodct.ll_dict_getitem(l_dict, n) - except KeyError: - assert refvalue is None - else: - assert gotvalue == refvalue - - for x in not_really_random(): - n = int(x*100.0) # 0 <= x < 400 - op = repr(x)[-1] - if op <= '2' and referencetable[n] is not None: - rodct.ll_dict_delitem(l_dict, n) - referencetable[n] = None - referencelength -= 1 - elif op <= '6': - rodct.ll_dict_setitem(l_dict, n, value) - if referencetable[n] is None: - referencelength += 1 - referencetable[n] = value - value += 1 - else: - try: - gotvalue = rodct.ll_dict_getitem(l_dict, n) - except KeyError: - assert referencetable[n] is None - else: - assert gotvalue == referencetable[n] - if 1.38 <= x <= 1.39: - complete_check() - print 'current dict length:', referencelength - assert l_dict.num_live_items == referencelength - complete_check() - - def test_stress_2(self): - yield self.stress_combination, True, False - yield self.stress_combination, False, True - yield self.stress_combination, False, False - yield self.stress_combination, True, True - - def stress_combination(self, key_can_be_none, value_can_be_none): - from rpython.rtyper.lltypesystem.rstr import string_repr - from rpython.annotator.dictdef import DictKey, DictValue - from rpython.annotator import model as annmodel - from rpython.rtyper.test.test_rdict import not_really_random - rodct = rordereddict - - print - print "Testing combination with can_be_None: keys %s, values %s" % ( - key_can_be_none, value_can_be_none) - - class PseudoRTyper: - cache_dummy_values = {} - dictrepr = rodct.OrderedDictRepr( - PseudoRTyper(), string_repr, string_repr, - DictKey(None, annmodel.SomeString(key_can_be_none)), - DictValue(None, annmodel.SomeString(value_can_be_none))) - dictrepr.setup() - print dictrepr.lowleveltype - #for key, value in dictrepr.DICTENTRY._adtmeths.items(): - # print ' %s = %s' % (key, value) - l_dict = rodct.ll_newdict(dictrepr.DICT) - referencetable = [None] * 400 - referencelength = 0 - values = not_really_random() - keytable = [string_repr.convert_const("foo%d" % n) - for n in range(len(referencetable))] - - def complete_check(): - for n, refvalue in zip(range(len(referencetable)), referencetable): - try: - gotvalue = rodct.ll_dict_getitem(l_dict, keytable[n]) - except KeyError: - assert refvalue is None - else: - assert gotvalue == refvalue - - for x in not_really_random(): - n = int(x*100.0) # 0 <= x < 400 - op = repr(x)[-1] - if op <= '2' and referencetable[n] is not None: - rodct.ll_dict_delitem(l_dict, keytable[n]) - referencetable[n] = None - referencelength -= 1 - elif op <= '6': - ll_value = string_repr.convert_const(str(values.next())) - rodct.ll_dict_setitem(l_dict, keytable[n], ll_value) - if referencetable[n] is None: - referencelength += 1 - referencetable[n] = ll_value - else: - try: - gotvalue = rodct.ll_dict_getitem(l_dict, keytable[n]) - except KeyError: - assert referencetable[n] is None - else: - assert gotvalue == referencetable[n] - if 1.38 <= x <= 1.39: - complete_check() - print 'current dict length:', referencelength - assert l_dict.num_live_items == referencelength - complete_check() - - class ODictSpace(MappingSpace): MappingRepr = rodct.OrderedDictRepr new_reference = OrderedDict From pypy.commits at gmail.com Thu Mar 3 13:10:19 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 03 Mar 2016 10:10:19 -0800 (PST) Subject: [pypy-commit] pypy default: update cffi/d7ec0dceb9ed Message-ID: <56d87e0b.49f9c20a.c7eb3.ffffa1b9@mx.google.com> Author: Armin Rigo Branch: Changeset: r82680:95d497c4b701 Date: 2016-03-03 18:13 +0100 http://bitbucket.org/pypy/pypy/changeset/95d497c4b701/ Log: update cffi/d7ec0dceb9ed diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/test_basic.py b/pypy/module/test_lib_pypy/cffi_tests/embedding/test_basic.py --- a/pypy/module/test_lib_pypy/cffi_tests/embedding/test_basic.py +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/test_basic.py @@ -33,8 +33,12 @@ pythonpath.insert(0, cffi_base) return os.pathsep.join(pythonpath) -def setup_module(mod): - mod.org_env = os.environ.copy() +def copy_away_env(): + global org_env + try: + org_env + except NameError: + org_env = os.environ.copy() class EmbeddingTests: @@ -122,6 +126,7 @@ os.chdir(curdir) def patch_environment(self): + copy_away_env() path = self.get_path() # for libpypy-c.dll or Python27.dll path = os.path.split(sys.executable)[0] + os.path.pathsep + path From pypy.commits at gmail.com Thu Mar 3 13:10:21 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 03 Mar 2016 10:10:21 -0800 (PST) Subject: [pypy-commit] pypy default: Print stderr of the subprocess, as an attempt to gain more info from buildbot Message-ID: <56d87e0d.a3f6c20a.d9d04.ffffa881@mx.google.com> Author: Armin Rigo Branch: Changeset: r82681:084d80e5669d Date: 2016-03-03 18:18 +0100 http://bitbucket.org/pypy/pypy/changeset/084d80e5669d/ Log: Print stderr of the subprocess, as an attempt to gain more info from buildbot diff --git a/pypy/module/pypyjit/test_pypy_c/test_00_model.py b/pypy/module/pypyjit/test_pypy_c/test_00_model.py --- a/pypy/module/pypyjit/test_pypy_c/test_00_model.py +++ b/pypy/module/pypyjit/test_pypy_c/test_00_model.py @@ -68,9 +68,12 @@ pipe.returncode,)) if stderr.startswith('SKIP:'): py.test.skip(stderr) - if stderr.startswith('debug_alloc.h:'): # lldebug builds - stderr = '' + #if stderr.startswith('debug_alloc.h:'): # lldebug builds + # stderr = '' #assert not stderr + if stderr: + print '*** stderr of the subprocess: ***' + print stderr # if discard_stdout_before_last_line: stdout = stdout.splitlines(True)[-1] From pypy.commits at gmail.com Thu Mar 3 13:10:24 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 03 Mar 2016 10:10:24 -0800 (PST) Subject: [pypy-commit] pypy default: Improve the test to really check all arguments and, in case of mismatch, Message-ID: <56d87e10.83561c0a.e4d27.ffffbda4@mx.google.com> Author: Armin Rigo Branch: Changeset: r82682:8f3c22550f1a Date: 2016-03-03 18:26 +0100 http://bitbucket.org/pypy/pypy/changeset/8f3c22550f1a/ Log: Improve the test to really check all arguments and, in case of mismatch, to have a more precise error message on buildbot diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -548,7 +548,9 @@ if cpu.supports_floats: def func(f0, f1, f2, f3, f4, f5, f6, i0, f7, i1, f8, f9): + seen.append((f0, f1, f2, f3, f4, f5, f6, i0, f7, i1, f8, f9)) return f0 + f1 + f2 + f3 + f4 + f5 + f6 + float(i0 + i1) + f7 + f8 + f9 + seen = [] F = lltype.Float I = lltype.Signed FUNC = self.FuncType([F] * 7 + [I] + [F] + [I] + [F]* 2, F) @@ -557,13 +559,15 @@ calldescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, EffectInfo.MOST_GENERAL) funcbox = self.get_funcbox(cpu, func_ptr) - args = ([boxfloat(.1) for i in range(7)] + - [InputArgInt(1), boxfloat(.2), InputArgInt(2), boxfloat(.3), - boxfloat(.4)]) + args = ([boxfloat(.0), boxfloat(.1), boxfloat(.2), boxfloat(.3), + boxfloat(.4), boxfloat(.5), boxfloat(.6), + InputArgInt(1), boxfloat(.7), InputArgInt(2), boxfloat(.8), + boxfloat(.9)]) res = self.execute_operation(rop.CALL_F, [funcbox] + args, 'float', descr=calldescr) - assert abs(longlong.getrealfloat(res) - 4.6) < 0.0001 + assert seen == [(.0, .1, .2, .3, .4, .5, .6, 1, .7, 2, .8, .9)] + assert abs(longlong.getrealfloat(res) - 7.5) < 0.0001 def test_call_many_arguments(self): # Test calling a function with a large number of arguments (more than From pypy.commits at gmail.com Thu Mar 3 13:10:26 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 03 Mar 2016 10:10:26 -0800 (PST) Subject: [pypy-commit] pypy default: merge heads Message-ID: <56d87e12.500f1c0a.66c95.ffffc553@mx.google.com> Author: Armin Rigo Branch: Changeset: r82683:bc2523a1a870 Date: 2016-03-03 19:09 +0100 http://bitbucket.org/pypy/pypy/changeset/bc2523a1a870/ Log: merge heads diff --git a/pypy/interpreter/test/test_app_main.py b/pypy/interpreter/test/test_app_main.py --- a/pypy/interpreter/test/test_app_main.py +++ b/pypy/interpreter/test/test_app_main.py @@ -133,7 +133,7 @@ self.check(['-S', '-tO', '--info'], {}, output_contains='translation') self.check(['-S', '-tO', '--version'], {}, output_contains='Python') self.check(['-S', '-tOV'], {}, output_contains='Python') - self.check(['--jit', 'foobar', '-S'], {}, sys_argv=[''], + self.check(['--jit', 'off', '-S'], {}, sys_argv=[''], run_stdin=True, no_site=1) self.check(['-c', 'pass'], {}, sys_argv=['-c'], run_command='pass') self.check(['-cpass'], {}, sys_argv=['-c'], run_command='pass') diff --git a/pypy/objspace/std/test/test_kwargsdict.py b/pypy/objspace/std/test/test_kwargsdict.py --- a/pypy/objspace/std/test/test_kwargsdict.py +++ b/pypy/objspace/std/test/test_kwargsdict.py @@ -118,10 +118,16 @@ def test_delitem(self): pass # delitem devolves for now + def test_setdefault_fast(self): + pass # not based on hashing at all + class TestDevolvedKwargsDictImplementation(BaseTestDevolvedDictImplementation): get_impl = get_impl StrategyClass = KwargsDictStrategy + def test_setdefault_fast(self): + pass # not based on hashing at all + class AppTestKwargsDictStrategy(object): def setup_class(cls): diff --git a/rpython/rtyper/test/test_rdict.py b/rpython/rtyper/test/test_rdict.py --- a/rpython/rtyper/test/test_rdict.py +++ b/rpython/rtyper/test/test_rdict.py @@ -59,19 +59,6 @@ yield -def not_really_random(): - """A random-ish generator, which also generates nice patterns from time to time. - Could be useful to detect problems associated with specific usage patterns.""" - import random - x = random.random() - print 'random seed: %r' % (x,) - for i in range(12000): - r = 3.4 + i/20000.0 - x = r*x - x*x - assert 0 <= x < 4 - yield x - - class BaseTestRDict(BaseRtypingTest): def test_dict_creation(self): def createdict(i): @@ -1048,7 +1035,7 @@ s_BA_dic = s.items[1] r_AB_dic = rtyper.getrepr(s_AB_dic) - r_BA_dic = rtyper.getrepr(s_AB_dic) + r_BA_dic = rtyper.getrepr(s_BA_dic) assert r_AB_dic.lowleveltype == r_BA_dic.lowleveltype @@ -1166,50 +1153,51 @@ st_keys = sampled_from(keytypes_s) st_values = sampled_from(keytypes_s + [SomeString(can_be_None=True)]) -class Space(object): +class MappingSpace(object): def __init__(self, s_key, s_value): self.s_key = s_key self.s_value = s_value rtyper = PseudoRTyper() r_key = s_key.rtyper_makerepr(rtyper) r_value = s_value.rtyper_makerepr(rtyper) - dictrepr = rdict.DictRepr(rtyper, r_key, r_value, + dictrepr = self.MappingRepr(rtyper, r_key, r_value, DictKey(None, s_key), DictValue(None, s_value)) dictrepr.setup() - self.l_dict = rdict.ll_newdict(dictrepr.DICT) - self.reference = {} + self.l_dict = self.newdict(dictrepr) + self.reference = self.new_reference() self.ll_key = r_key.convert_const self.ll_value = r_value.convert_const def setitem(self, key, value): ll_key = self.ll_key(key) ll_value = self.ll_value(value) - rdict.ll_dict_setitem(self.l_dict, ll_key, ll_value) + self.ll_setitem(self.l_dict, ll_key, ll_value) self.reference[key] = value - assert rdict.ll_contains(self.l_dict, ll_key) + assert self.ll_contains(self.l_dict, ll_key) def delitem(self, key): ll_key = self.ll_key(key) - rdict.ll_dict_delitem(self.l_dict, ll_key) + self.ll_delitem(self.l_dict, ll_key) del self.reference[key] - assert not rdict.ll_contains(self.l_dict, ll_key) + assert not self.ll_contains(self.l_dict, ll_key) def copydict(self): - self.l_dict = rdict.ll_copy(self.l_dict) + self.l_dict = self.ll_copy(self.l_dict) + assert self.ll_len(self.l_dict) == len(self.reference) def cleardict(self): - rdict.ll_clear(self.l_dict) + self.ll_clear(self.l_dict) self.reference.clear() - assert rdict.ll_dict_len(self.l_dict) == 0 + assert self.ll_len(self.l_dict) == 0 def fullcheck(self): - assert rdict.ll_dict_len(self.l_dict) == len(self.reference) + assert self.ll_len(self.l_dict) == len(self.reference) for key, value in self.reference.iteritems(): - assert (rdict.ll_dict_getitem(self.l_dict, self.ll_key(key)) == + assert (self.ll_getitem(self.l_dict, self.ll_key(key)) == self.ll_value(value)) -class StressTest(GenericStateMachine): +class MappingSM(GenericStateMachine): def __init__(self): self.space = None @@ -1239,7 +1227,7 @@ def execute_step(self, action): if action.method == 'setup': - self.space = Space(*action.args) + self.space = self.Space(*action.args) self.st_keys = ann2strategy(self.space.s_key) self.st_values = ann2strategy(self.space.s_value) return @@ -1250,5 +1238,24 @@ if self.space: self.space.fullcheck() + +class DictSpace(MappingSpace): + MappingRepr = rdict.DictRepr + new_reference = dict + ll_getitem = staticmethod(rdict.ll_dict_getitem) + ll_setitem = staticmethod(rdict.ll_dict_setitem) + ll_delitem = staticmethod(rdict.ll_dict_delitem) + ll_len = staticmethod(rdict.ll_dict_len) + ll_contains = staticmethod(rdict.ll_contains) + ll_copy = staticmethod(rdict.ll_copy) + ll_clear = staticmethod(rdict.ll_clear) + + def newdict(self, repr): + return rdict.ll_newdict(repr.DICT) + +class DictSM(MappingSM): + Space = DictSpace + def test_hypothesis(): - run_state_machine_as_test(StressTest, settings(max_examples=500, stateful_step_count=100)) + run_state_machine_as_test( + DictSM, settings(max_examples=500, stateful_step_count=100)) diff --git a/rpython/rtyper/test/test_rordereddict.py b/rpython/rtyper/test/test_rordereddict.py --- a/rpython/rtyper/test/test_rordereddict.py +++ b/rpython/rtyper/test/test_rordereddict.py @@ -1,14 +1,18 @@ - import py from collections import OrderedDict +from hypothesis import settings +from hypothesis.stateful import run_state_machine_as_test + from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rtyper.lltypesystem import rordereddict, rstr from rpython.rlib.rarithmetic import intmask from rpython.rtyper.annlowlevel import llstr, hlstr -from rpython.rtyper.test.test_rdict import BaseTestRDict +from rpython.rtyper.test.test_rdict import ( + BaseTestRDict, MappingSpace, MappingSM) from rpython.rlib import objectmodel +rodct = rordereddict def get_indexes(ll_d): return ll_d.indexes._obj.container._as_ptr() @@ -330,124 +334,48 @@ assert res == 6 -class TestStress: +class ODictSpace(MappingSpace): + MappingRepr = rodct.OrderedDictRepr + new_reference = OrderedDict + ll_getitem = staticmethod(rodct.ll_dict_getitem) + ll_setitem = staticmethod(rodct.ll_dict_setitem) + ll_delitem = staticmethod(rodct.ll_dict_delitem) + ll_len = staticmethod(rodct.ll_dict_len) + ll_contains = staticmethod(rodct.ll_dict_contains) + ll_copy = staticmethod(rodct.ll_dict_copy) + ll_clear = staticmethod(rodct.ll_dict_clear) - def test_stress(self): - from rpython.annotator.dictdef import DictKey, DictValue - from rpython.annotator import model as annmodel - from rpython.rtyper import rint - from rpython.rtyper.test.test_rdict import not_really_random - rodct = rordereddict - dictrepr = rodct.OrderedDictRepr( - None, rint.signed_repr, rint.signed_repr, - DictKey(None, annmodel.SomeInteger()), - DictValue(None, annmodel.SomeInteger())) - dictrepr.setup() - l_dict = rodct.ll_newdict(dictrepr.DICT) - referencetable = [None] * 400 - referencelength = 0 - value = 0 + def newdict(self, repr): + return rodct.ll_newdict(repr.DICT) - def complete_check(): - for n, refvalue in zip(range(len(referencetable)), referencetable): - try: - gotvalue = rodct.ll_dict_getitem(l_dict, n) - except KeyError: - assert refvalue is None - else: - assert gotvalue == refvalue + def get_keys(self): + DICT = lltype.typeOf(self.l_dict).TO + ITER = rordereddict.get_ll_dictiter(lltype.Ptr(DICT)) + ll_iter = rordereddict.ll_dictiter(ITER, self.l_dict) + ll_dictnext = rordereddict._ll_dictnext + keys_ll = [] + while True: + try: + num = ll_dictnext(ll_iter) + keys_ll.append(self.l_dict.entries[num].key) + except StopIteration: + break + return keys_ll - for x in not_really_random(): - n = int(x*100.0) # 0 <= x < 400 - op = repr(x)[-1] - if op <= '2' and referencetable[n] is not None: - rodct.ll_dict_delitem(l_dict, n) - referencetable[n] = None - referencelength -= 1 - elif op <= '6': - rodct.ll_dict_setitem(l_dict, n, value) - if referencetable[n] is None: - referencelength += 1 - referencetable[n] = value - value += 1 - else: - try: - gotvalue = rodct.ll_dict_getitem(l_dict, n) - except KeyError: - assert referencetable[n] is None - else: - assert gotvalue == referencetable[n] - if 1.38 <= x <= 1.39: - complete_check() - print 'current dict length:', referencelength - assert l_dict.num_live_items == referencelength - complete_check() + def fullcheck(self): + # overridden to also check key order + assert self.ll_len(self.l_dict) == len(self.reference) + keys_ll = self.get_keys() + assert len(keys_ll) == len(self.reference) + for key, ll_key in zip(self.reference, keys_ll): + assert self.ll_key(key) == ll_key + assert (self.ll_getitem(self.l_dict, self.ll_key(key)) == + self.ll_value(self.reference[key])) - def test_stress_2(self): - yield self.stress_combination, True, False - yield self.stress_combination, False, True - yield self.stress_combination, False, False - yield self.stress_combination, True, True - def stress_combination(self, key_can_be_none, value_can_be_none): - from rpython.rtyper.lltypesystem.rstr import string_repr - from rpython.annotator.dictdef import DictKey, DictValue - from rpython.annotator import model as annmodel - from rpython.rtyper.test.test_rdict import not_really_random - rodct = rordereddict +class ODictSM(MappingSM): + Space = ODictSpace - print - print "Testing combination with can_be_None: keys %s, values %s" % ( - key_can_be_none, value_can_be_none) - - class PseudoRTyper: - cache_dummy_values = {} - dictrepr = rodct.OrderedDictRepr( - PseudoRTyper(), string_repr, string_repr, - DictKey(None, annmodel.SomeString(key_can_be_none)), - DictValue(None, annmodel.SomeString(value_can_be_none))) - dictrepr.setup() - print dictrepr.lowleveltype - #for key, value in dictrepr.DICTENTRY._adtmeths.items(): - # print ' %s = %s' % (key, value) - l_dict = rodct.ll_newdict(dictrepr.DICT) - referencetable = [None] * 400 - referencelength = 0 - values = not_really_random() - keytable = [string_repr.convert_const("foo%d" % n) - for n in range(len(referencetable))] - - def complete_check(): - for n, refvalue in zip(range(len(referencetable)), referencetable): - try: - gotvalue = rodct.ll_dict_getitem(l_dict, keytable[n]) - except KeyError: - assert refvalue is None - else: - assert gotvalue == refvalue - - for x in not_really_random(): - n = int(x*100.0) # 0 <= x < 400 - op = repr(x)[-1] - if op <= '2' and referencetable[n] is not None: - rodct.ll_dict_delitem(l_dict, keytable[n]) - referencetable[n] = None - referencelength -= 1 - elif op <= '6': - ll_value = string_repr.convert_const(str(values.next())) - rodct.ll_dict_setitem(l_dict, keytable[n], ll_value) - if referencetable[n] is None: - referencelength += 1 - referencetable[n] = ll_value - else: - try: - gotvalue = rodct.ll_dict_getitem(l_dict, keytable[n]) - except KeyError: - assert referencetable[n] is None - else: - assert gotvalue == referencetable[n] - if 1.38 <= x <= 1.39: - complete_check() - print 'current dict length:', referencelength - assert l_dict.num_live_items == referencelength - complete_check() +def test_hypothesis(): + run_state_machine_as_test( + ODictSM, settings(max_examples=500, stateful_step_count=100)) From pypy.commits at gmail.com Fri Mar 4 05:15:16 2016 From: pypy.commits at gmail.com (fijal) Date: Fri, 04 Mar 2016 02:15:16 -0800 (PST) Subject: [pypy-commit] pypy jit-leaner-frontend: write snapshot iterator Message-ID: <56d96034.86b71c0a.491ac.ffffdeb0@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r82684:b00c49bd7e47 Date: 2016-03-04 12:14 +0200 http://bitbucket.org/pypy/pypy/changeset/b00c49bd7e47/ Log: write snapshot iterator diff --git a/rpython/jit/metainterp/opencoder.py b/rpython/jit/metainterp/opencoder.py --- a/rpython/jit/metainterp/opencoder.py +++ b/rpython/jit/metainterp/opencoder.py @@ -15,6 +15,29 @@ class Sentinel(object): pass +class SnapshotIterator(object): + def __init__(self, main_iter, pos, end_pos): + self.trace = main_iter.trace + self.main_iter = main_iter + self.end = end_pos + self.pos = pos + + def done(self): + return self.pos >= self.end + + def _next(self): + res = self.trace._ops[self.pos] + self.pos += 1 + return res + + def next(self): + r = self.main_iter._get(self._next()) + assert r + return r + + def get_size_jitcode_pc(self): + return self._next(), self._next(), self._next() + class TraceIterator(object): def __init__(self, trace, end): self.trace = trace @@ -56,6 +79,10 @@ self.pos = self._next() return pos + def get_snapshot_iter(self, pos): + end = self.trace._ops[pos] + return SnapshotIterator(self, pos + 1, end) + def next(self): opnum = self._next() if oparity[opnum] == -1: @@ -75,7 +102,7 @@ descr = None res = ResOperation(opnum, args, -1, descr=descr) if rop.is_guard(opnum): - res.rd_snapshot_position = self.skip_resume_data() + res.rd_resume_position = self.skip_resume_data() self._cache[self._count] = res self._count += 1 return res diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -507,6 +507,7 @@ def propagate_all_forward(self, trace, call_pure_results=None, flush=True): trace = trace.get_iter() + self.trace = trace self.call_pure_results = call_pure_results while not trace.done(): self._really_emitted_operation = None @@ -691,7 +692,7 @@ op.setdescr(descr) assert isinstance(descr, compile.ResumeGuardDescr) assert isinstance(op, GuardResOp) - modifier = resume.ResumeDataVirtualAdder(self, descr, op, + modifier = resume.ResumeDataVirtualAdder(self, descr, op, self.trace, self.resumedata_memo) try: newboxes = modifier.finish(self, pendingfields) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -14,62 +14,6 @@ from rpython.rlib.rarithmetic import LONG_BIT from rpython.jit.tool.oparser import parse -class FakeJitCode(object): - index = 0 - -def test_store_final_boxes_in_guard(): - py.test.skip("needs to be rewritten") - from rpython.jit.metainterp.compile import ResumeGuardDescr - from rpython.jit.metainterp.resume import tag, TAGBOX - b0 = InputArgInt() - b1 = InputArgInt() - opt = optimizeopt.Optimizer(FakeMetaInterpStaticData(LLtypeMixin.cpu), - None, None) - op = ResOperation(rop.GUARD_TRUE, [ConstInt(1)], None) - # setup rd data - fi0 = resume.FrameInfo(None, FakeJitCode(), 11) - snapshot0 = resume.Snapshot(None, [b0]) - op.rd_snapshot = resume.TopSnapshot(snapshot0, [], [b1]) - op.rd_frame_info_list = resume.FrameInfo(fi0, FakeJitCode(), 33) - # - opt.store_final_boxes_in_guard(op, []) - fdescr = op.getdescr() - if op.getfailargs() == [b0, b1]: - assert list(fdescr.rd_numb.nums) == [tag(1, TAGBOX)] - assert list(fdescr.rd_numb.prev.nums) == [tag(0, TAGBOX)] - else: - assert op.getfailargs() == [b1, b0] - assert list(fdescr.rd_numb.nums) == [tag(0, TAGBOX)] - assert list(fdescr.rd_numb.prev.nums) == [tag(1, TAGBOX)] - assert fdescr.rd_virtuals is None - assert fdescr.rd_consts == [] - -def test_sharing_field_lists_of_virtual(): - py.test.skip("needs to be rewritten") - class FakeOptimizer(object): - class optimizer(object): - class cpu(object): - pass - opt = FakeOptimizer() - virt1 = virtualize.AbstractVirtualStructValue(opt, None) - lst1 = virt1._get_field_descr_list() - assert lst1 == [] - lst2 = virt1._get_field_descr_list() - assert lst1 is lst2 - virt1.setfield(LLtypeMixin.valuedescr, optimizeopt.OptValue(None)) - lst3 = virt1._get_field_descr_list() - assert lst3 == [LLtypeMixin.valuedescr] - lst4 = virt1._get_field_descr_list() - assert lst3 is lst4 - - virt2 = virtualize.AbstractVirtualStructValue(opt, None) - lst5 = virt2._get_field_descr_list() - assert lst5 is lst1 - virt2.setfield(LLtypeMixin.valuedescr, optimizeopt.OptValue(None)) - lst6 = virt1._get_field_descr_list() - assert lst6 is lst3 - - # ____________________________________________________________ @@ -78,16 +22,17 @@ enable_opts = "intbounds:rewrite:virtualize:string:earlyforce:pure:heap" def optimize_loop(self, ops, optops, call_pure_results=None): - loop = self.parse(ops, postprocess=self.postprocess) + loop = self.parse(ops) token = JitCellToken() - label_op = None # loop.record_op(rop.LABEL, loop.inputargs, - # descr=TargetToken(token)) - #if loop.operations[-1].getopnum() == rop.JUMP: - # loop.operations[-1].setdescr(token) + label_op = ResOperation(rop.LABEL, loop.inputargs, -1, + descr=TargetToken(token)) + if loop.operations[-1].getopnum() == rop.JUMP: + loop.operations[-1].setdescr(token) exp = parse(optops, namespace=self.namespace.copy()) - #expected = convert_old_style_to_targets(exp, jump=True) + expected = convert_old_style_to_targets(exp, jump=True) call_pure_results = self._convert_call_pure_results(call_pure_results) - compile_data = compile.SimpleCompileData(label_op, loop, + trace = self.convert_loop_to_packed(loop) + compile_data = compile.SimpleCompileData(label_op, trace, call_pure_results) info, ops = self._do_optimize_loop(compile_data) label_op = ResOperation(rop.LABEL, info.inputargs) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_util.py b/rpython/jit/metainterp/optimizeopt/test/test_util.py --- a/rpython/jit/metainterp/optimizeopt/test/test_util.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_util.py @@ -493,6 +493,18 @@ final_descr = history.BasicFinalDescr() +class FakeFrame(object): + pc = 0 + + class jitcode: + index = 0 + + def __init__(self, boxes): + self.boxes = boxes + + def get_list_of_active_boxes(self, flag): + return self.boxes + class BaseTest(object): def parse(self, s, boxkinds=None, want_fail_descr=True, postprocess=None): @@ -501,12 +513,6 @@ None, False, postprocess) return self.oparse.parse() - def postprocess(self, op): - if OpHelpers.is_guard(op.getopnum()): - op.rd_snapshot = resume.TopSnapshot( - resume.Snapshot(None, op.getfailargs()), [], []) - op.rd_frame_info_list = resume.FrameInfo(None, 0, 11) - def add_guard_future_condition(self, res): # invent a GUARD_FUTURE_CONDITION to not have to change all tests if res.operations[-1].getopnum() == rop.JUMP: @@ -546,6 +552,17 @@ call_pure_results[list(k)] = v return call_pure_results + def convert_loop_to_packed(self, loop): + from rpython.jit.metainterp.opencoder import Trace + trace = Trace(loop.inputargs) + for op in loop.operations: + newop = trace.record_op(op.getopnum(), op.getarglist()) + if rop.is_guard(op.getopnum()): + frame = FakeFrame(op.getfailargs()) + resume.capture_resumedata([frame], None, [], trace) + op.position = newop.position + return trace + def unroll_and_optimize(self, loop, call_pure_results=None): self.add_guard_future_condition(loop) jump_op = loop.operations[-1] @@ -601,7 +618,7 @@ def convert_old_style_to_targets(loop, jump): newloop = TreeLoop(loop.name) newloop.inputargs = loop.inputargs - newloop.operations = [ResOperation(rop.LABEL, loop.inputargs, descr=FakeDescr())] + \ + newloop.operations = [ResOperation(rop.LABEL, loop.inputargs, -1, descr=FakeDescr())] + \ loop.operations if not jump: assert newloop.operations[-1].getopnum() == rop.JUMP diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -581,9 +581,7 @@ class GuardResOp(ResOpWithDescr): _fail_args = None - - rd_snapshot = None - rd_frame_info_list = None + rd_resume_position = -1 def getfailargs(self): return self._fail_args @@ -598,8 +596,7 @@ newop = AbstractResOp.copy_and_change(self, opnum, args, descr) assert isinstance(newop, GuardResOp) newop.setfailargs(self.getfailargs()) - newop.rd_snapshot = self.rd_snapshot - newop.rd_frame_info_list = self.rd_frame_info_list + newop.rd_resume_position = self.rd_resume_position return newop class VectorGuardOp(GuardResOp): diff --git a/rpython/jit/metainterp/resume.py b/rpython/jit/metainterp/resume.py --- a/rpython/jit/metainterp/resume.py +++ b/rpython/jit/metainterp/resume.py @@ -301,16 +301,7 @@ state.n = n state.v = v - def number(self, optimizer, topsnapshot, frameinfo): - # flatten the list - cur = topsnapshot.prev - snapshot_list = [topsnapshot] - framestack_list = [] - while cur: - framestack_list.append(frameinfo) - frameinfo = frameinfo.prev - snapshot_list.append(cur) - cur = cur.prev + def number(self, optimizer, position, trace): state = NumberingState(snapshot_list) # we want to number snapshots starting from the back, but ending @@ -387,10 +378,11 @@ class ResumeDataVirtualAdder(VirtualVisitor): - def __init__(self, optimizer, storage, snapshot_storage, memo): + def __init__(self, optimizer, storage, guard_op, trace, memo): self.optimizer = optimizer + self.trace = trace self.storage = storage - self.snapshot_storage = snapshot_storage + self.guard_op = guard_op self.memo = memo def make_virtual_info(self, info, fieldnums): @@ -480,11 +472,11 @@ storage = self.storage # make sure that nobody attached resume data to this guard yet assert not storage.rd_numb - snapshot = self.snapshot_storage.rd_snapshot - assert snapshot is not None # is that true? + resume_position = self.guard_op.rd_resume_position + assert resume_position > 0 # count stack depth - numb, liveboxes_from_env, v = self.memo.number(optimizer, snapshot, - self.snapshot_storage.rd_frame_info_list) + numb, liveboxes_from_env, v = self.memo.number(optimizer, + resume_position, self.optimize.trace) self.liveboxes_from_env = liveboxes_from_env self.liveboxes = {} storage.rd_numb = numb diff --git a/rpython/jit/metainterp/test/test_opencoder.py b/rpython/jit/metainterp/test/test_opencoder.py --- a/rpython/jit/metainterp/test/test_opencoder.py +++ b/rpython/jit/metainterp/test/test_opencoder.py @@ -5,6 +5,21 @@ from rpython.jit.metainterp.optimizeopt.optimizer import Optimizer from rpython.jit.metainterp import resume +class JitCode(object): + def __init__(self, index): + self.index = index + +class FakeFrame(object): + parent_resumedata_position = -1 + + def __init__(self, pc, jitcode, boxes): + self.pc = pc + self.jitcode = jitcode + self.boxes = boxes + + def get_list_of_active_boxes(self, flag): + return self.boxes + class TestOpencoder(object): def unpack(self, t): iter = t.get_iter() @@ -41,21 +56,6 @@ return boxes def test_rd_snapshot(self): - class JitCode(object): - def __init__(self, index): - self.index = index - - class FakeFrame(object): - parent_resumedata_position = -1 - - def __init__(self, pc, jitcode, boxes): - self.pc = pc - self.jitcode = jitcode - self.boxes = boxes - - def get_list_of_active_boxes(self, flag): - return self.boxes - i0, i1 = InputArgInt(), InputArgInt() t = Trace([i0, i1]) add = t.record_op(rop.INT_ADD, [i0, i1]) @@ -67,14 +67,35 @@ resume.capture_resumedata(framestack, None, [], t) (i0, i1), l, iter = self.unpack(t) assert l[1].opnum == rop.GUARD_FALSE - boxes = self.unpack_snapshot(iter, l[1].rd_snapshot_position) + boxes = self.unpack_snapshot(iter, l[1].rd_resume_position) assert boxes == [i0, i1] t.record_op(rop.GUARD_FALSE, [add]) resume.capture_resumedata([frame0, frame1], None, [], t) (i0, i1), l, iter = self.unpack(t) assert l[1].opnum == rop.GUARD_FALSE - boxes = self.unpack_snapshot(iter, l[1].rd_snapshot_position) + boxes = self.unpack_snapshot(iter, l[1].rd_resume_position) assert boxes == [i0, i1] assert l[2].opnum == rop.GUARD_FALSE - boxes = self.unpack_snapshot(iter, l[2].rd_snapshot_position) + boxes = self.unpack_snapshot(iter, l[2].rd_resume_position) assert boxes == [i0, i0, l[0], i0, i1] + + def test_read_snapshot_interface(self): + i0, i1, i2 = InputArgInt(), InputArgInt(), InputArgInt() + t = Trace([i0, i1, i2]) + t.record_op(rop.GUARD_TRUE, [i1]) + frame0 = FakeFrame(1, JitCode(2), [i0, i1]) + frame1 = FakeFrame(3, JitCode(4), [i2, i2]) + resume.capture_resumedata([frame0, frame1], None, [], t) + (i0, i1, i2), l, iter = self.unpack(t) + pos = l[0].rd_resume_position + snapshot_iter = iter.get_snapshot_iter(pos) + size, jc_index, pc = snapshot_iter.get_size_jitcode_pc() + assert size == 2 + assert jc_index == 4 + assert pc == 3 + assert [snapshot_iter.next() for i in range(2)] == [i2, i2] + size, jc_index, pc = snapshot_iter.get_size_jitcode_pc() + assert size == 2 + assert jc_index == 2 + assert pc == 1 + assert [snapshot_iter.next() for i in range(2)] == [i0, i1] From pypy.commits at gmail.com Fri Mar 4 05:29:46 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 04 Mar 2016 02:29:46 -0800 (PST) Subject: [pypy-commit] pypy issue-2248: close branch to be merged Message-ID: <56d9639a.10921c0a.861ac.ffffe8d2@mx.google.com> Author: mattip Branch: issue-2248 Changeset: r82685:771022ef9e37 Date: 2016-03-04 11:52 +0200 http://bitbucket.org/pypy/pypy/changeset/771022ef9e37/ Log: close branch to be merged From pypy.commits at gmail.com Fri Mar 4 05:29:50 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 04 Mar 2016 02:29:50 -0800 (PST) Subject: [pypy-commit] pypy ndarray-setitem-filtered: close branch to be merged Message-ID: <56d9639e.080a1c0a.6f783.ffffe3e6@mx.google.com> Author: mattip Branch: ndarray-setitem-filtered Changeset: r82687:3269540e9cfd Date: 2016-03-04 11:54 +0200 http://bitbucket.org/pypy/pypy/changeset/3269540e9cfd/ Log: close branch to be merged From pypy.commits at gmail.com Fri Mar 4 05:29:48 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 04 Mar 2016 02:29:48 -0800 (PST) Subject: [pypy-commit] pypy default: merge issue-2248, which fixes float.__int__() Message-ID: <56d9639c.88c8c20a.8eb98.ffffe4ef@mx.google.com> Author: mattip Branch: Changeset: r82686:7ac45ccc8658 Date: 2016-03-04 11:53 +0200 http://bitbucket.org/pypy/pypy/changeset/7ac45ccc8658/ Log: merge issue-2248, which fixes float.__int__() diff --git a/pypy/module/__builtin__/test/test_classobj.py b/pypy/module/__builtin__/test/test_classobj.py --- a/pypy/module/__builtin__/test/test_classobj.py +++ b/pypy/module/__builtin__/test/test_classobj.py @@ -452,7 +452,6 @@ assert a + 1 == 2 assert a + 1.1 == 2 - def test_binaryop_calls_coerce_always(self): l = [] class A: @@ -1076,6 +1075,16 @@ assert (D() > A()) == 'D:A.gt' assert (D() >= A()) == 'D:A.ge' + def test_override___int__(self): + class F(float): + def __int__(self): + return 666 + f = F(-12.3) + assert int(f) == 666 + # on cpython, this calls float_trunc() in floatobject.c + # which ends up calling PyFloat_AS_DOUBLE((PyFloatObject*) f) + assert float.__int__(f) == -12 + class AppTestOldStyleClassBytesDict(object): def setup_class(cls): diff --git a/pypy/objspace/std/floatobject.py b/pypy/objspace/std/floatobject.py --- a/pypy/objspace/std/floatobject.py +++ b/pypy/objspace/std/floatobject.py @@ -160,15 +160,11 @@ return self.floatval def int(self, space): + # this is a speed-up only, for space.int(w_float). if (type(self) is not W_FloatObject and space.is_overloaded(self, space.w_float, '__int__')): return W_Root.int(self, space) - try: - value = ovfcheck_float_to_int(self.floatval) - except OverflowError: - return space.long(self) - else: - return space.newint(value) + return self.descr_trunc(space) def is_w(self, space, w_other): from rpython.rlib.longlong2float import float2longlong @@ -424,9 +420,8 @@ "cannot convert float NaN to integer") def descr_trunc(self, space): - whole = math.modf(self.floatval)[1] try: - value = ovfcheck_float_to_int(whole) + value = ovfcheck_float_to_int(self.floatval) except OverflowError: return self.descr_long(space) else: @@ -661,7 +656,7 @@ __format__ = interp2app(W_FloatObject.descr_format), __coerce__ = interp2app(W_FloatObject.descr_coerce), __nonzero__ = interp2app(W_FloatObject.descr_nonzero), - __int__ = interp2app(W_FloatObject.int), + __int__ = interp2app(W_FloatObject.descr_trunc), __float__ = interp2app(W_FloatObject.descr_float), __long__ = interp2app(W_FloatObject.descr_long), __trunc__ = interp2app(W_FloatObject.descr_trunc), From pypy.commits at gmail.com Fri Mar 4 05:29:54 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 04 Mar 2016 02:29:54 -0800 (PST) Subject: [pypy-commit] pypy default: document merged branches Message-ID: <56d963a2.4412c30a.afb7.ffffece5@mx.google.com> Author: mattip Branch: Changeset: r82689:1491ec62293d Date: 2016-03-04 12:04 +0200 http://bitbucket.org/pypy/pypy/changeset/1491ec62293d/ Log: document merged branches diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -183,4 +183,11 @@ .. branch: vlen-resume -Compress resume data, saving 10-20% of memory consumed by the JIT \ No newline at end of file +Compress resume data, saving 10-20% of memory consumed by the JIT + +.. branch: issue-2248 + +.. branch: ndarray-setitem-filtered + +Fix boolean-array indexing in micronumpy + From pypy.commits at gmail.com Fri Mar 4 05:29:56 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 04 Mar 2016 02:29:56 -0800 (PST) Subject: [pypy-commit] pypy release-1.7.x: close unsupported release branch Message-ID: <56d963a4.2968c20a.9b496.ffffe59a@mx.google.com> Author: mattip Branch: release-1.7.x Changeset: r82690:192b87cf01e5 Date: 2016-03-04 12:06 +0200 http://bitbucket.org/pypy/pypy/changeset/192b87cf01e5/ Log: close unsupported release branch From pypy.commits at gmail.com Fri Mar 4 05:29:52 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 04 Mar 2016 02:29:52 -0800 (PST) Subject: [pypy-commit] pypy default: merge ndarray-setitem-filtered, which fixes issue #1674, issue #1717 Message-ID: <56d963a0.2a6ec20a.dabb2.ffffea59@mx.google.com> Author: mattip Branch: Changeset: r82688:403a0931e0bc Date: 2016-03-04 11:58 +0200 http://bitbucket.org/pypy/pypy/changeset/403a0931e0bc/ Log: merge ndarray-setitem-filtered, which fixes issue #1674, issue #1717 diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py --- a/pypy/module/micronumpy/concrete.py +++ b/pypy/module/micronumpy/concrete.py @@ -298,7 +298,14 @@ except IndexError: # not a single result chunks = self._prepare_slice_args(space, w_index) - return new_view(space, orig_arr, chunks) + copy = False + if isinstance(chunks[0], BooleanChunk): + # numpy compatibility + copy = True + w_ret = new_view(space, orig_arr, chunks) + if copy: + w_ret = w_ret.descr_copy(space, space.wrap(w_ret.get_order())) + return w_ret def descr_setitem(self, space, orig_arr, w_index, w_value): try: diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -22,7 +22,8 @@ from pypy.module.micronumpy.flagsobj import W_FlagsObject from pypy.module.micronumpy.strides import ( get_shape_from_iterable, shape_agreement, shape_agreement_multiple, - is_c_contiguous, is_f_contiguous, calc_strides, new_view) + is_c_contiguous, is_f_contiguous, calc_strides, new_view, BooleanChunk, + SliceChunk) from pypy.module.micronumpy.casting import can_cast_array from pypy.module.micronumpy.descriptor import get_dtype_cache @@ -204,7 +205,13 @@ if iter_shape is None: # w_index is a list of slices, return a view chunks = self.implementation._prepare_slice_args(space, w_index) - return new_view(space, self, chunks) + copy = False + if isinstance(chunks[0], BooleanChunk): + copy = True + w_ret = new_view(space, self, chunks) + if copy: + w_ret = w_ret.descr_copy(space, space.wrap(w_ret.get_order())) + return w_ret shape = res_shape + self.get_shape()[len(indexes):] w_res = W_NDimArray.from_shape(space, shape, self.get_dtype(), self.get_order(), w_instance=self) @@ -220,8 +227,24 @@ if iter_shape is None: # w_index is a list of slices chunks = self.implementation._prepare_slice_args(space, w_index) - view = new_view(space, self, chunks) - view.implementation.setslice(space, val_arr) + dim = -1 + view = self + for i, c in enumerate(chunks): + if isinstance(c, BooleanChunk): + dim = i + idx = c.w_idx + chunks.pop(i) + chunks.insert(0, SliceChunk(space.newslice(space.wrap(0), + space.w_None, space.w_None))) + break + if dim > 0: + view = self.implementation.swapaxes(space, self, 0, dim) + if dim >= 0: + view = new_view(space, self, chunks) + view.setitem_filter(space, idx, val_arr) + else: + view = new_view(space, self, chunks) + view.implementation.setslice(space, val_arr) return if support.product(iter_shape) == 0: return diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py --- a/pypy/module/micronumpy/strides.py +++ b/pypy/module/micronumpy/strides.py @@ -97,22 +97,19 @@ # filter by axis dim filtr = chunks[dim] assert isinstance(filtr, BooleanChunk) + # XXX this creates a new array, and fails in setitem w_arr = w_arr.getitem_filter(space, filtr.w_idx, axis=dim) arr = w_arr.implementation chunks[dim] = SliceChunk(space.newslice(space.wrap(0), - space.wrap(-1), space.w_None)) + space.w_None, space.w_None)) r = calculate_slice_strides(space, arr.shape, arr.start, arr.get_strides(), arr.get_backstrides(), chunks) else: r = calculate_slice_strides(space, arr.shape, arr.start, arr.get_strides(), arr.get_backstrides(), chunks) shape, start, strides, backstrides = r - w_ret = W_NDimArray.new_slice(space, start, strides[:], backstrides[:], + return W_NDimArray.new_slice(space, start, strides[:], backstrides[:], shape[:], arr, w_arr) - if dim == 0: - # Do not return a view - return w_ret.descr_copy(space, space.wrap(w_ret.get_order())) - return w_ret @jit.unroll_safe def _extend_shape(old_shape, chunks): diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -2550,8 +2550,10 @@ assert b.base is None b = a[:, np.array([True, False, True])] assert b.base is not None + a[np.array([True, False]), 0] = 100 b = a[np.array([True, False]), 0] - assert (b ==[0]).all() + assert b.shape == (1,) + assert (b ==[100]).all() def test_scalar_indexing(self): import numpy as np From pypy.commits at gmail.com Fri Mar 4 05:29:58 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 04 Mar 2016 02:29:58 -0800 (PST) Subject: [pypy-commit] pypy release-2.0-beta2: close unsupported release branch Message-ID: <56d963a6.96941c0a.2397a.ffffe4ee@mx.google.com> Author: mattip Branch: release-2.0-beta2 Changeset: r82691:b030b1d607ea Date: 2016-03-04 12:07 +0200 http://bitbucket.org/pypy/pypy/changeset/b030b1d607ea/ Log: close unsupported release branch From pypy.commits at gmail.com Fri Mar 4 05:30:00 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 04 Mar 2016 02:30:00 -0800 (PST) Subject: [pypy-commit] pypy release-2.0.x: close unsupported release branch Message-ID: <56d963a8.2457c20a.61023.ffffeb75@mx.google.com> Author: mattip Branch: release-2.0.x Changeset: r82692:e0575fdcfb2a Date: 2016-03-04 12:07 +0200 http://bitbucket.org/pypy/pypy/changeset/e0575fdcfb2a/ Log: close unsupported release branch From pypy.commits at gmail.com Fri Mar 4 05:30:02 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 04 Mar 2016 02:30:02 -0800 (PST) Subject: [pypy-commit] pypy release-2.1.x: close unsupported release branch Message-ID: <56d963aa.a118c20a.2e706.ffffe605@mx.google.com> Author: mattip Branch: release-2.1.x Changeset: r82693:9eb8b9771f92 Date: 2016-03-04 12:08 +0200 http://bitbucket.org/pypy/pypy/changeset/9eb8b9771f92/ Log: close unsupported release branch From pypy.commits at gmail.com Fri Mar 4 05:30:04 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 04 Mar 2016 02:30:04 -0800 (PST) Subject: [pypy-commit] pypy release-2.2.x: close unsupported release branch Message-ID: <56d963ac.aa0ac20a.e7976.ffffe244@mx.google.com> Author: mattip Branch: release-2.2.x Changeset: r82694:d532e10b0752 Date: 2016-03-04 12:08 +0200 http://bitbucket.org/pypy/pypy/changeset/d532e10b0752/ Log: close unsupported release branch From pypy.commits at gmail.com Fri Mar 4 05:30:06 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 04 Mar 2016 02:30:06 -0800 (PST) Subject: [pypy-commit] pypy closed-branches: Merge closed head 40d0d90f6ece on branch osx-eci-frameworks-makefile Message-ID: <56d963ae.4c181c0a.722c9.ffffe342@mx.google.com> Author: mattip Branch: closed-branches Changeset: r82695:d9364dd273b9 Date: 2016-03-04 12:14 +0200 http://bitbucket.org/pypy/pypy/changeset/d9364dd273b9/ Log: Merge closed head 40d0d90f6ece on branch osx-eci-frameworks-makefile From pypy.commits at gmail.com Fri Mar 4 05:30:10 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 04 Mar 2016 02:30:10 -0800 (PST) Subject: [pypy-commit] pypy closed-branches: Merge closed head 94a579477ef0 on branch better_ftime_detect2 Message-ID: <56d963b2.c9161c0a.3ce6a.ffffedbc@mx.google.com> Author: mattip Branch: closed-branches Changeset: r82696:2cfb04d22801 Date: 2016-03-04 12:14 +0200 http://bitbucket.org/pypy/pypy/changeset/2cfb04d22801/ Log: Merge closed head 94a579477ef0 on branch better_ftime_detect2 From pypy.commits at gmail.com Fri Mar 4 05:30:25 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 04 Mar 2016 02:30:25 -0800 (PST) Subject: [pypy-commit] pypy closed-branches: Merge closed head b8f603e27cae on branch timeb_h Message-ID: <56d963c1.c85b1c0a.db10b.ffffd3ca@mx.google.com> Author: mattip Branch: closed-branches Changeset: r82697:d5a85cd5327b Date: 2016-03-04 12:14 +0200 http://bitbucket.org/pypy/pypy/changeset/d5a85cd5327b/ Log: Merge closed head b8f603e27cae on branch timeb_h From pypy.commits at gmail.com Fri Mar 4 05:30:27 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 04 Mar 2016 02:30:27 -0800 (PST) Subject: [pypy-commit] pypy closed-branches: Merge closed head 17a61e37733e on branch OlivierBlanvillain/fix-3-broken-links-on-pypy-published-pap-1386250839215 Message-ID: <56d963c3.e6ebc20a.51bce.ffffebb9@mx.google.com> Author: mattip Branch: closed-branches Changeset: r82698:c79472432dae Date: 2016-03-04 12:14 +0200 http://bitbucket.org/pypy/pypy/changeset/c79472432dae/ Log: Merge closed head 17a61e37733e on branch OlivierBlanvillain/fix-3 -broken-links-on-pypy-published-pap-1386250839215 From pypy.commits at gmail.com Fri Mar 4 05:30:42 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 04 Mar 2016 02:30:42 -0800 (PST) Subject: [pypy-commit] pypy closed-branches: Merge closed head aabd62322443 on branch test_SetFromErrnoWithFilename__tweaks Message-ID: <56d963d2.07811c0a.aa47a.ffffeb9e@mx.google.com> Author: mattip Branch: closed-branches Changeset: r82700:1bf86aef5ba0 Date: 2016-03-04 12:14 +0200 http://bitbucket.org/pypy/pypy/changeset/1bf86aef5ba0/ Log: Merge closed head aabd62322443 on branch test_SetFromErrnoWithFilename__tweaks From pypy.commits at gmail.com Fri Mar 4 05:30:44 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 04 Mar 2016 02:30:44 -0800 (PST) Subject: [pypy-commit] pypy closed-branches: Merge closed head cf1cb893229a on branch add_PyErr_SetFromErrnoWithFilenameObject_try_2 Message-ID: <56d963d4.10921c0a.861ac.ffffe945@mx.google.com> Author: mattip Branch: closed-branches Changeset: r82701:ff58cc0ac36c Date: 2016-03-04 12:14 +0200 http://bitbucket.org/pypy/pypy/changeset/ff58cc0ac36c/ Log: Merge closed head cf1cb893229a on branch add_PyErr_SetFromErrnoWithFilenameObject_try_2 From pypy.commits at gmail.com Fri Mar 4 05:30:46 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 04 Mar 2016 02:30:46 -0800 (PST) Subject: [pypy-commit] pypy closed-branches: Merge closed head bbdd34440e0e on branch test_SetFromErrnoWithFilename_NULL Message-ID: <56d963d6.e213c20a.3514f.ffffea05@mx.google.com> Author: mattip Branch: closed-branches Changeset: r82702:6434bce4640f Date: 2016-03-04 12:14 +0200 http://bitbucket.org/pypy/pypy/changeset/6434bce4640f/ Log: Merge closed head bbdd34440e0e on branch test_SetFromErrnoWithFilename_NULL From pypy.commits at gmail.com Fri Mar 4 05:30:40 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 04 Mar 2016 02:30:40 -0800 (PST) Subject: [pypy-commit] pypy closed-branches: Merge closed head 317499aea22b on branch popen-pclose Message-ID: <56d963d0.c65b1c0a.671ec.3431@mx.google.com> Author: mattip Branch: closed-branches Changeset: r82699:f288134e3b10 Date: 2016-03-04 12:14 +0200 http://bitbucket.org/pypy/pypy/changeset/f288134e3b10/ Log: Merge closed head 317499aea22b on branch popen-pclose From pypy.commits at gmail.com Fri Mar 4 05:30:47 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 04 Mar 2016 02:30:47 -0800 (PST) Subject: [pypy-commit] pypy closed-branches: Merge closed head 04584fc796b0 on branch refactor_PyErr_SetFromErrnoWithFilename Message-ID: <56d963d7.42711c0a.1ebd2.ffffeacb@mx.google.com> Author: mattip Branch: closed-branches Changeset: r82703:bb02a4d18ebb Date: 2016-03-04 12:14 +0200 http://bitbucket.org/pypy/pypy/changeset/bb02a4d18ebb/ Log: Merge closed head 04584fc796b0 on branch refactor_PyErr_SetFromErrnoWithFilename From pypy.commits at gmail.com Fri Mar 4 05:30:49 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 04 Mar 2016 02:30:49 -0800 (PST) Subject: [pypy-commit] pypy closed-branches: Merge closed head 887a48ed959b on branch lexer_token_position_class Message-ID: <56d963d9.838d1c0a.20973.ffffe73a@mx.google.com> Author: mattip Branch: closed-branches Changeset: r82704:eeff0fe9524f Date: 2016-03-04 12:14 +0200 http://bitbucket.org/pypy/pypy/changeset/eeff0fe9524f/ Log: Merge closed head 887a48ed959b on branch lexer_token_position_class From pypy.commits at gmail.com Fri Mar 4 05:30:51 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 04 Mar 2016 02:30:51 -0800 (PST) Subject: [pypy-commit] pypy closed-branches: Merge closed head 82fa12110c8f on branch scalar-operations Message-ID: <56d963db.8e811c0a.6a073.ffffe5aa@mx.google.com> Author: mattip Branch: closed-branches Changeset: r82705:3195c23f335e Date: 2016-03-04 12:14 +0200 http://bitbucket.org/pypy/pypy/changeset/3195c23f335e/ Log: Merge closed head 82fa12110c8f on branch scalar-operations From pypy.commits at gmail.com Fri Mar 4 05:30:54 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 04 Mar 2016 02:30:54 -0800 (PST) Subject: [pypy-commit] pypy closed-branches: Merge closed head 42120ecaf5ae on branch py3k-posix-decode Message-ID: <56d963de.a2afc20a.c00e8.ffffe93a@mx.google.com> Author: mattip Branch: closed-branches Changeset: r82707:b16a501feb63 Date: 2016-03-04 12:14 +0200 http://bitbucket.org/pypy/pypy/changeset/b16a501feb63/ Log: Merge closed head 42120ecaf5ae on branch py3k-posix-decode From pypy.commits at gmail.com Fri Mar 4 05:30:56 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 04 Mar 2016 02:30:56 -0800 (PST) Subject: [pypy-commit] pypy closed-branches: Merge closed head 61b902f1f07b on branch improve-docs-fixes Message-ID: <56d963e0.0775c20a.45ba5.ffffeb54@mx.google.com> Author: mattip Branch: closed-branches Changeset: r82708:3375c78ccaf0 Date: 2016-03-04 12:14 +0200 http://bitbucket.org/pypy/pypy/changeset/3375c78ccaf0/ Log: Merge closed head 61b902f1f07b on branch improve-docs-fixes From pypy.commits at gmail.com Fri Mar 4 05:30:53 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 04 Mar 2016 02:30:53 -0800 (PST) Subject: [pypy-commit] pypy closed-branches: Merge closed head d577539c2307 on branch py3k-reset-locale Message-ID: <56d963dd.e83cc20a.8b5fd.ffffe398@mx.google.com> Author: mattip Branch: closed-branches Changeset: r82706:f52800a195d5 Date: 2016-03-04 12:14 +0200 http://bitbucket.org/pypy/pypy/changeset/f52800a195d5/ Log: Merge closed head d577539c2307 on branch py3k-reset-locale From pypy.commits at gmail.com Fri Mar 4 05:30:58 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 04 Mar 2016 02:30:58 -0800 (PST) Subject: [pypy-commit] pypy closed-branches: Merge closed head fdf329dca5f0 on branch improve-docs-fixes Message-ID: <56d963e2.0bdf1c0a.36e46.ffffe205@mx.google.com> Author: mattip Branch: closed-branches Changeset: r82709:d7b3cbdc9fc4 Date: 2016-03-04 12:14 +0200 http://bitbucket.org/pypy/pypy/changeset/d7b3cbdc9fc4/ Log: Merge closed head fdf329dca5f0 on branch improve-docs-fixes From pypy.commits at gmail.com Fri Mar 4 05:31:00 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 04 Mar 2016 02:31:00 -0800 (PST) Subject: [pypy-commit] pypy closed-branches: Merge closed head 1441eb71f9d9 on branch None-consistency Message-ID: <56d963e4.080a1c0a.6f783.ffffe466@mx.google.com> Author: mattip Branch: closed-branches Changeset: r82710:67eaaf820c0e Date: 2016-03-04 12:14 +0200 http://bitbucket.org/pypy/pypy/changeset/67eaaf820c0e/ Log: Merge closed head 1441eb71f9d9 on branch None-consistency From pypy.commits at gmail.com Fri Mar 4 05:31:02 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 04 Mar 2016 02:31:02 -0800 (PST) Subject: [pypy-commit] pypy closed-branches: Merge closed head 08b71058baa1 on branch gc-incminimark-pinning-improve Message-ID: <56d963e6.0357c20a.3f91d.ffffe8a8@mx.google.com> Author: mattip Branch: closed-branches Changeset: r82711:ace15dd75551 Date: 2016-03-04 12:14 +0200 http://bitbucket.org/pypy/pypy/changeset/ace15dd75551/ Log: Merge closed head 08b71058baa1 on branch gc-incminimark-pinning- improve From pypy.commits at gmail.com Fri Mar 4 05:31:03 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 04 Mar 2016 02:31:03 -0800 (PST) Subject: [pypy-commit] pypy closed-branches: Merge closed head e040414ce026 on branch vecopt-merge Message-ID: <56d963e7.42121c0a.1215e.ffffe958@mx.google.com> Author: mattip Branch: closed-branches Changeset: r82712:8c7029f406cb Date: 2016-03-04 12:14 +0200 http://bitbucket.org/pypy/pypy/changeset/8c7029f406cb/ Log: Merge closed head e040414ce026 on branch vecopt-merge From pypy.commits at gmail.com Fri Mar 4 05:31:06 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 04 Mar 2016 02:31:06 -0800 (PST) Subject: [pypy-commit] pypy closed-branches: Merge closed head 5548644da690 on branch vecopt Message-ID: <56d963ea.6614c20a.b770a.ffffe5b7@mx.google.com> Author: mattip Branch: closed-branches Changeset: r82713:99fcf78c0f4a Date: 2016-03-04 12:14 +0200 http://bitbucket.org/pypy/pypy/changeset/99fcf78c0f4a/ Log: Merge closed head 5548644da690 on branch vecopt From pypy.commits at gmail.com Fri Mar 4 05:31:07 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 04 Mar 2016 02:31:07 -0800 (PST) Subject: [pypy-commit] pypy closed-branches: Merge closed head bc6d1dab0c3b on branch release-15.11 Message-ID: <56d963eb.657bc20a.9b8e8.ffffe949@mx.google.com> Author: mattip Branch: closed-branches Changeset: r82714:54716e4d3687 Date: 2016-03-04 12:14 +0200 http://bitbucket.org/pypy/pypy/changeset/54716e4d3687/ Log: Merge closed head bc6d1dab0c3b on branch release-15.11 From pypy.commits at gmail.com Fri Mar 4 05:31:09 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 04 Mar 2016 02:31:09 -0800 (PST) Subject: [pypy-commit] pypy closed-branches: Merge closed head c081afd4ac18 on branch memop-simplify Message-ID: <56d963ed.0775c20a.45ba5.ffffeb6e@mx.google.com> Author: mattip Branch: closed-branches Changeset: r82715:c570b441b2c1 Date: 2016-03-04 12:14 +0200 http://bitbucket.org/pypy/pypy/changeset/c570b441b2c1/ Log: Merge closed head c081afd4ac18 on branch memop-simplify From pypy.commits at gmail.com Fri Mar 4 05:31:11 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 04 Mar 2016 02:31:11 -0800 (PST) Subject: [pypy-commit] pypy closed-branches: Merge closed head de58d64bcb59 on branch osx-vmprof-support Message-ID: <56d963ef.6672c20a.68db4.ffffeba4@mx.google.com> Author: mattip Branch: closed-branches Changeset: r82716:fc3b9721f914 Date: 2016-03-04 12:14 +0200 http://bitbucket.org/pypy/pypy/changeset/fc3b9721f914/ Log: Merge closed head de58d64bcb59 on branch osx-vmprof-support From pypy.commits at gmail.com Fri Mar 4 05:31:12 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 04 Mar 2016 02:31:12 -0800 (PST) Subject: [pypy-commit] pypy closed-branches: Merge closed head 8abc47287812 on branch py3.3 Message-ID: <56d963f0.6672c20a.68db4.ffffeba9@mx.google.com> Author: mattip Branch: closed-branches Changeset: r82717:2b135a489870 Date: 2016-03-04 12:14 +0200 http://bitbucket.org/pypy/pypy/changeset/2b135a489870/ Log: Merge closed head 8abc47287812 on branch py3.3 From pypy.commits at gmail.com Fri Mar 4 05:31:14 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 04 Mar 2016 02:31:14 -0800 (PST) Subject: [pypy-commit] pypy closed-branches: Merge closed head c7df87defdb6 on branch refactor-translator Message-ID: <56d963f2.a151c20a.73e93.ffffeef3@mx.google.com> Author: mattip Branch: closed-branches Changeset: r82718:c1cedee31ba7 Date: 2016-03-04 12:14 +0200 http://bitbucket.org/pypy/pypy/changeset/c1cedee31ba7/ Log: Merge closed head c7df87defdb6 on branch refactor-translator From pypy.commits at gmail.com Fri Mar 4 05:31:18 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 04 Mar 2016 02:31:18 -0800 (PST) Subject: [pypy-commit] pypy closed-branches: Merge closed head 80e69caefe31 on branch jit-threshold-hooks Message-ID: <56d963f6.45d61c0a.313c8.439a@mx.google.com> Author: mattip Branch: closed-branches Changeset: r82720:08ef3483ce89 Date: 2016-03-04 12:14 +0200 http://bitbucket.org/pypy/pypy/changeset/08ef3483ce89/ Log: Merge closed head 80e69caefe31 on branch jit-threshold-hooks From pypy.commits at gmail.com Fri Mar 4 05:31:21 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 04 Mar 2016 02:31:21 -0800 (PST) Subject: [pypy-commit] pypy closed-branches: Merge closed head 0b45d26b252c on branch gc-counters Message-ID: <56d963f9.838d1c0a.20973.ffffe76d@mx.google.com> Author: mattip Branch: closed-branches Changeset: r82722:e3f1bd9714dc Date: 2016-03-04 12:14 +0200 http://bitbucket.org/pypy/pypy/changeset/e3f1bd9714dc/ Log: Merge closed head 0b45d26b252c on branch gc-counters From pypy.commits at gmail.com Fri Mar 4 05:31:16 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 04 Mar 2016 02:31:16 -0800 (PST) Subject: [pypy-commit] pypy closed-branches: Merge closed head 3edacc8819f9 on branch kill-running_on_llinterp Message-ID: <56d963f4.c711c30a.49b7.ffffeab2@mx.google.com> Author: mattip Branch: closed-branches Changeset: r82719:f09d47f86a0c Date: 2016-03-04 12:14 +0200 http://bitbucket.org/pypy/pypy/changeset/f09d47f86a0c/ Log: Merge closed head 3edacc8819f9 on branch kill-running_on_llinterp From pypy.commits at gmail.com Fri Mar 4 05:31:23 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 04 Mar 2016 02:31:23 -0800 (PST) Subject: [pypy-commit] pypy closed-branches: Merge closed head 4796ade52de3 on branch numpy-reintroduce-zjit-tests Message-ID: <56d963fb.857ac20a.c9638.ffffe764@mx.google.com> Author: mattip Branch: closed-branches Changeset: r82723:08fc11067e27 Date: 2016-03-04 12:14 +0200 http://bitbucket.org/pypy/pypy/changeset/08fc11067e27/ Log: Merge closed head 4796ade52de3 on branch numpy-reintroduce-zjit- tests From pypy.commits at gmail.com Fri Mar 4 05:31:20 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 04 Mar 2016 02:31:20 -0800 (PST) Subject: [pypy-commit] pypy closed-branches: Merge closed head d920fee2333f on branch jitframe-offset Message-ID: <56d963f8.42121c0a.1215e.ffffe97a@mx.google.com> Author: mattip Branch: closed-branches Changeset: r82721:710a6c573849 Date: 2016-03-04 12:14 +0200 http://bitbucket.org/pypy/pypy/changeset/710a6c573849/ Log: Merge closed head d920fee2333f on branch jitframe-offset From pypy.commits at gmail.com Fri Mar 4 05:31:25 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 04 Mar 2016 02:31:25 -0800 (PST) Subject: [pypy-commit] pypy closed-branches: Merge closed head 15773be8bbc7 on branch release-2.0-beta1 Message-ID: <56d963fd.576f1c0a.2185a.ffffe4ec@mx.google.com> Author: mattip Branch: closed-branches Changeset: r82724:4a6c7fe51f10 Date: 2016-03-04 12:14 +0200 http://bitbucket.org/pypy/pypy/changeset/4a6c7fe51f10/ Log: Merge closed head 15773be8bbc7 on branch release-2.0-beta1 From pypy.commits at gmail.com Fri Mar 4 05:31:27 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 04 Mar 2016 02:31:27 -0800 (PST) Subject: [pypy-commit] pypy closed-branches: Merge closed head 0baec005ac6f on branch look-into-thread Message-ID: <56d963ff.8ee61c0a.36800.ffffe83e@mx.google.com> Author: mattip Branch: closed-branches Changeset: r82725:5b418b1c49a4 Date: 2016-03-04 12:14 +0200 http://bitbucket.org/pypy/pypy/changeset/5b418b1c49a4/ Log: Merge closed head 0baec005ac6f on branch look-into-thread From pypy.commits at gmail.com Fri Mar 4 05:31:29 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 04 Mar 2016 02:31:29 -0800 (PST) Subject: [pypy-commit] pypy closed-branches: Merge closed head 61b4f2b3a808 on branch rdict-experiments-3 Message-ID: <56d96401.2a6ec20a.dabb2.ffffeb12@mx.google.com> Author: mattip Branch: closed-branches Changeset: r82726:ecbc7b202ef1 Date: 2016-03-04 12:14 +0200 http://bitbucket.org/pypy/pypy/changeset/ecbc7b202ef1/ Log: Merge closed head 61b4f2b3a808 on branch rdict-experiments-3 From pypy.commits at gmail.com Fri Mar 4 05:31:30 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 04 Mar 2016 02:31:30 -0800 (PST) Subject: [pypy-commit] pypy closed-branches: Merge closed head c3d7ed5d1fb9 on branch string-char-concat Message-ID: <56d96402.865a1c0a.20816.ffffe6f0@mx.google.com> Author: mattip Branch: closed-branches Changeset: r82727:b9c795652053 Date: 2016-03-04 12:14 +0200 http://bitbucket.org/pypy/pypy/changeset/b9c795652053/ Log: Merge closed head c3d7ed5d1fb9 on branch string-char-concat From pypy.commits at gmail.com Fri Mar 4 05:31:32 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 04 Mar 2016 02:31:32 -0800 (PST) Subject: [pypy-commit] pypy closed-branches: Merge closed head e49e1d8eda30 on branch rpython-bytearray Message-ID: <56d96404.03321c0a.e6476.ffffe1aa@mx.google.com> Author: mattip Branch: closed-branches Changeset: r82728:2d0b14fa746e Date: 2016-03-04 12:14 +0200 http://bitbucket.org/pypy/pypy/changeset/2d0b14fa746e/ Log: Merge closed head e49e1d8eda30 on branch rpython-bytearray From pypy.commits at gmail.com Fri Mar 4 05:31:34 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 04 Mar 2016 02:31:34 -0800 (PST) Subject: [pypy-commit] pypy closed-branches: Merge closed head cbaff8720ab5 on branch rewrite-unrolling Message-ID: <56d96406.0bdf1c0a.36e46.ffffe249@mx.google.com> Author: mattip Branch: closed-branches Changeset: r82729:9d7e8d085710 Date: 2016-03-04 12:14 +0200 http://bitbucket.org/pypy/pypy/changeset/9d7e8d085710/ Log: Merge closed head cbaff8720ab5 on branch rewrite-unrolling From pypy.commits at gmail.com Fri Mar 4 05:31:38 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 04 Mar 2016 02:31:38 -0800 (PST) Subject: [pypy-commit] pypy closed-branches: Merge closed head 345ab9700a5e on branch imrpove-custom-gc-tracing Message-ID: <56d9640a.aa0ac20a.e7976.ffffe2f6@mx.google.com> Author: mattip Branch: closed-branches Changeset: r82731:a59fb9eb0488 Date: 2016-03-04 12:14 +0200 http://bitbucket.org/pypy/pypy/changeset/a59fb9eb0488/ Log: Merge closed head 345ab9700a5e on branch imrpove-custom-gc-tracing From pypy.commits at gmail.com Fri Mar 4 05:31:39 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 04 Mar 2016 02:31:39 -0800 (PST) Subject: [pypy-commit] pypy closed-branches: Merge closed head 84f522ad074f on branch dtrace-support Message-ID: <56d9640b.0775c20a.45ba5.ffffeba0@mx.google.com> Author: mattip Branch: closed-branches Changeset: r82732:c55b25e50d3d Date: 2016-03-04 12:14 +0200 http://bitbucket.org/pypy/pypy/changeset/c55b25e50d3d/ Log: Merge closed head 84f522ad074f on branch dtrace-support From pypy.commits at gmail.com Fri Mar 4 05:31:36 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 04 Mar 2016 02:31:36 -0800 (PST) Subject: [pypy-commit] pypy closed-branches: Merge closed head 5baf4767a38a on branch numpy-indexing-by-arrays-2 Message-ID: <56d96408.e83cc20a.8b5fd.ffffe3e3@mx.google.com> Author: mattip Branch: closed-branches Changeset: r82730:2740cfaee70c Date: 2016-03-04 12:14 +0200 http://bitbucket.org/pypy/pypy/changeset/2740cfaee70c/ Log: Merge closed head 5baf4767a38a on branch numpy-indexing-by-arrays-2 From pypy.commits at gmail.com Fri Mar 4 05:31:41 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 04 Mar 2016 02:31:41 -0800 (PST) Subject: [pypy-commit] pypy closed-branches: Merge closed head c4178dc96d81 on branch better-log-parser Message-ID: <56d9640d.c13fc20a.4392a.ffffe337@mx.google.com> Author: mattip Branch: closed-branches Changeset: r82733:e3ea2c8fc056 Date: 2016-03-04 12:14 +0200 http://bitbucket.org/pypy/pypy/changeset/e3ea2c8fc056/ Log: Merge closed head c4178dc96d81 on branch better-log-parser From pypy.commits at gmail.com Fri Mar 4 05:31:43 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 04 Mar 2016 02:31:43 -0800 (PST) Subject: [pypy-commit] pypy closed-branches: Merge closed head d2bec3ce42fc on branch release-1.8.x Message-ID: <56d9640f.06b01c0a.1c3eb.ffffea50@mx.google.com> Author: mattip Branch: closed-branches Changeset: r82734:8e002f80ef03 Date: 2016-03-04 12:14 +0200 http://bitbucket.org/pypy/pypy/changeset/8e002f80ef03/ Log: Merge closed head d2bec3ce42fc on branch release-1.8.x From pypy.commits at gmail.com Fri Mar 4 05:31:45 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 04 Mar 2016 02:31:45 -0800 (PST) Subject: [pypy-commit] pypy closed-branches: Merge closed head 1cae4524495b on branch shared-by-default Message-ID: <56d96411.c3e01c0a.27d4f.ffffe379@mx.google.com> Author: mattip Branch: closed-branches Changeset: r82735:3ba07c026e2f Date: 2016-03-04 12:14 +0200 http://bitbucket.org/pypy/pypy/changeset/3ba07c026e2f/ Log: Merge closed head 1cae4524495b on branch shared-by-default From pypy.commits at gmail.com Fri Mar 4 05:31:47 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 04 Mar 2016 02:31:47 -0800 (PST) Subject: [pypy-commit] pypy closed-branches: Merge closed head 222a0672c649 on branch backend-vector-ops Message-ID: <56d96413.13821c0a.a9acb.ffffe9a3@mx.google.com> Author: mattip Branch: closed-branches Changeset: r82736:dba064ceb7ba Date: 2016-03-04 12:14 +0200 http://bitbucket.org/pypy/pypy/changeset/dba064ceb7ba/ Log: Merge closed head 222a0672c649 on branch backend-vector-ops From pypy.commits at gmail.com Fri Mar 4 05:31:48 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 04 Mar 2016 02:31:48 -0800 (PST) Subject: [pypy-commit] pypy closed-branches: Merge closed head 11d22b777219 on branch dead-code-optimization Message-ID: <56d96414.03321c0a.e6476.ffffe1cb@mx.google.com> Author: mattip Branch: closed-branches Changeset: r82737:187602515a23 Date: 2016-03-04 12:14 +0200 http://bitbucket.org/pypy/pypy/changeset/187602515a23/ Log: Merge closed head 11d22b777219 on branch dead-code-optimization From pypy.commits at gmail.com Fri Mar 4 05:31:50 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 04 Mar 2016 02:31:50 -0800 (PST) Subject: [pypy-commit] pypy closed-branches: Merge closed head ab28953f1a27 on branch elidable-canfold-exception Message-ID: <56d96416.55031c0a.f199e.ffffe686@mx.google.com> Author: mattip Branch: closed-branches Changeset: r82738:c45bad179662 Date: 2016-03-04 12:14 +0200 http://bitbucket.org/pypy/pypy/changeset/c45bad179662/ Log: Merge closed head ab28953f1a27 on branch elidable-canfold-exception From pypy.commits at gmail.com Fri Mar 4 05:31:52 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 04 Mar 2016 02:31:52 -0800 (PST) Subject: [pypy-commit] pypy closed-branches: Merge closed head 8e349db6774b on branch resume-refactor Message-ID: <56d96418.838d1c0a.20973.ffffe7a8@mx.google.com> Author: mattip Branch: closed-branches Changeset: r82739:1efd9dc0eaef Date: 2016-03-04 12:14 +0200 http://bitbucket.org/pypy/pypy/changeset/1efd9dc0eaef/ Log: Merge closed head 8e349db6774b on branch resume-refactor From pypy.commits at gmail.com Fri Mar 4 05:31:54 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 04 Mar 2016 02:31:54 -0800 (PST) Subject: [pypy-commit] pypy closed-branches: Merge closed head cbb7ae222f5f on branch gc-minimark-pinning Message-ID: <56d9641a.030f1c0a.9bcf9.fffff016@mx.google.com> Author: mattip Branch: closed-branches Changeset: r82740:cc75cd4d7052 Date: 2016-03-04 12:14 +0200 http://bitbucket.org/pypy/pypy/changeset/cc75cd4d7052/ Log: Merge closed head cbb7ae222f5f on branch gc-minimark-pinning From pypy.commits at gmail.com Fri Mar 4 05:31:55 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 04 Mar 2016 02:31:55 -0800 (PST) Subject: [pypy-commit] pypy closed-branches: Merge closed head be1ce0174d9d on branch remove-translator-platform Message-ID: <56d9641b.c96cc20a.89e0a.ffffeabb@mx.google.com> Author: mattip Branch: closed-branches Changeset: r82741:a62bb8281979 Date: 2016-03-04 12:14 +0200 http://bitbucket.org/pypy/pypy/changeset/a62bb8281979/ Log: Merge closed head be1ce0174d9d on branch remove-translator-platform From pypy.commits at gmail.com Fri Mar 4 05:31:57 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 04 Mar 2016 02:31:57 -0800 (PST) Subject: [pypy-commit] pypy closed-branches: Merge closed head e38d26c3eb0f on branch separate-jit-compilation Message-ID: <56d9641d.aa17c20a.29a87.ffffec32@mx.google.com> Author: mattip Branch: closed-branches Changeset: r82742:5633a5ffd6c1 Date: 2016-03-04 12:14 +0200 http://bitbucket.org/pypy/pypy/changeset/5633a5ffd6c1/ Log: Merge closed head e38d26c3eb0f on branch separate-jit-compilation From pypy.commits at gmail.com Fri Mar 4 05:31:59 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 04 Mar 2016 02:31:59 -0800 (PST) Subject: [pypy-commit] pypy closed-branches: Merge closed head 6cd7a5566fa0 on branch parallel-c-compilation Message-ID: <56d9641f.c74fc20a.1869a.ffffebb5@mx.google.com> Author: mattip Branch: closed-branches Changeset: r82743:b01357cad4ee Date: 2016-03-04 12:14 +0200 http://bitbucket.org/pypy/pypy/changeset/b01357cad4ee/ Log: Merge closed head 6cd7a5566fa0 on branch parallel-c-compilation From pypy.commits at gmail.com Fri Mar 4 05:32:01 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 04 Mar 2016 02:32:01 -0800 (PST) Subject: [pypy-commit] pypy closed-branches: Merge closed head bd9f7013f90a on branch share-resume-info-frontend Message-ID: <56d96421.13821c0a.a9acb.ffffe9b7@mx.google.com> Author: mattip Branch: closed-branches Changeset: r82744:53709ebd2cbd Date: 2016-03-04 12:14 +0200 http://bitbucket.org/pypy/pypy/changeset/53709ebd2cbd/ Log: Merge closed head bd9f7013f90a on branch share-resume-info-frontend From pypy.commits at gmail.com Fri Mar 4 05:32:02 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 04 Mar 2016 02:32:02 -0800 (PST) Subject: [pypy-commit] pypy closed-branches: Merge closed head e10d5fca406c on branch more-pending-setfields Message-ID: <56d96422.576f1c0a.2185a.ffffe52d@mx.google.com> Author: mattip Branch: closed-branches Changeset: r82745:2bbbbce7fd68 Date: 2016-03-04 12:14 +0200 http://bitbucket.org/pypy/pypy/changeset/2bbbbce7fd68/ Log: Merge closed head e10d5fca406c on branch more-pending-setfields From pypy.commits at gmail.com Fri Mar 4 05:32:04 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 04 Mar 2016 02:32:04 -0800 (PST) Subject: [pypy-commit] pypy closed-branches: Merge closed head ec377e09522a on branch release-1.9.x Message-ID: <56d96424.03dd1c0a.79ad4.ffffe54e@mx.google.com> Author: mattip Branch: closed-branches Changeset: r82746:7839c8f070fb Date: 2016-03-04 12:14 +0200 http://bitbucket.org/pypy/pypy/changeset/7839c8f070fb/ Log: Merge closed head ec377e09522a on branch release-1.9.x From pypy.commits at gmail.com Fri Mar 4 05:32:06 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 04 Mar 2016 02:32:06 -0800 (PST) Subject: [pypy-commit] pypy closed-branches: Merge closed head 1af05a376142 on branch result-in-resops Message-ID: <56d96426.c96cc20a.89e0a.ffffeaca@mx.google.com> Author: mattip Branch: closed-branches Changeset: r82747:fe78d1ad4264 Date: 2016-03-04 12:14 +0200 http://bitbucket.org/pypy/pypy/changeset/fe78d1ad4264/ Log: Merge closed head 1af05a376142 on branch result-in-resops From pypy.commits at gmail.com Fri Mar 4 05:32:08 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 04 Mar 2016 02:32:08 -0800 (PST) Subject: [pypy-commit] pypy closed-branches: Merge closed head 65d75e316ae1 on branch faster-blackhole Message-ID: <56d96428.4577c20a.149e9.ffffea2a@mx.google.com> Author: mattip Branch: closed-branches Changeset: r82748:512a5b88f202 Date: 2016-03-04 12:14 +0200 http://bitbucket.org/pypy/pypy/changeset/512a5b88f202/ Log: Merge closed head 65d75e316ae1 on branch faster-blackhole From pypy.commits at gmail.com Fri Mar 4 05:32:09 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 04 Mar 2016 02:32:09 -0800 (PST) Subject: [pypy-commit] pypy closed-branches: Merge closed head 73ac001a5cf3 on branch fix-megamorphic-calls Message-ID: <56d96429.44e21c0a.2a76.69e3@mx.google.com> Author: mattip Branch: closed-branches Changeset: r82749:a1fdb980d355 Date: 2016-03-04 12:14 +0200 http://bitbucket.org/pypy/pypy/changeset/a1fdb980d355/ Log: Merge closed head 73ac001a5cf3 on branch fix-megamorphic-calls From pypy.commits at gmail.com Fri Mar 4 05:32:11 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 04 Mar 2016 02:32:11 -0800 (PST) Subject: [pypy-commit] pypy closed-branches: Merge closed head c16b22bbf31f on branch more-reusal-of-structures Message-ID: <56d9642b.4577c20a.149e9.ffffea31@mx.google.com> Author: mattip Branch: closed-branches Changeset: r82750:a269165e4a08 Date: 2016-03-04 12:14 +0200 http://bitbucket.org/pypy/pypy/changeset/a269165e4a08/ Log: Merge closed head c16b22bbf31f on branch more-reusal-of-structures From pypy.commits at gmail.com Fri Mar 4 05:32:13 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 04 Mar 2016 02:32:13 -0800 (PST) Subject: [pypy-commit] pypy closed-branches: Merge closed head 192b87cf01e5 on branch release-1.7.x Message-ID: <56d9642d.455e1c0a.9583e.ffffe932@mx.google.com> Author: mattip Branch: closed-branches Changeset: r82751:c9b8cdbdbfa6 Date: 2016-03-04 12:14 +0200 http://bitbucket.org/pypy/pypy/changeset/c9b8cdbdbfa6/ Log: Merge closed head 192b87cf01e5 on branch release-1.7.x From pypy.commits at gmail.com Fri Mar 4 05:32:15 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 04 Mar 2016 02:32:15 -0800 (PST) Subject: [pypy-commit] pypy closed-branches: Merge closed head b030b1d607ea on branch release-2.0-beta2 Message-ID: <56d9642f.2179c20a.69458.ffffe96f@mx.google.com> Author: mattip Branch: closed-branches Changeset: r82752:434461f43aa6 Date: 2016-03-04 12:14 +0200 http://bitbucket.org/pypy/pypy/changeset/434461f43aa6/ Log: Merge closed head b030b1d607ea on branch release-2.0-beta2 From pypy.commits at gmail.com Fri Mar 4 05:32:16 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 04 Mar 2016 02:32:16 -0800 (PST) Subject: [pypy-commit] pypy closed-branches: Merge closed head e0575fdcfb2a on branch release-2.0.x Message-ID: <56d96430.d4e41c0a.26ddf.ffffe85c@mx.google.com> Author: mattip Branch: closed-branches Changeset: r82753:4f5470c88bcc Date: 2016-03-04 12:14 +0200 http://bitbucket.org/pypy/pypy/changeset/4f5470c88bcc/ Log: Merge closed head e0575fdcfb2a on branch release-2.0.x From pypy.commits at gmail.com Fri Mar 4 05:32:18 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 04 Mar 2016 02:32:18 -0800 (PST) Subject: [pypy-commit] pypy closed-branches: Merge closed head 9eb8b9771f92 on branch release-2.1.x Message-ID: <56d96432.8e811c0a.6a073.ffffe642@mx.google.com> Author: mattip Branch: closed-branches Changeset: r82754:e42f94c26142 Date: 2016-03-04 12:14 +0200 http://bitbucket.org/pypy/pypy/changeset/e42f94c26142/ Log: Merge closed head 9eb8b9771f92 on branch release-2.1.x From pypy.commits at gmail.com Fri Mar 4 05:32:20 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 04 Mar 2016 02:32:20 -0800 (PST) Subject: [pypy-commit] pypy closed-branches: Merge closed head d532e10b0752 on branch release-2.2.x Message-ID: <56d96434.703dc20a.8ac20.ffffe98c@mx.google.com> Author: mattip Branch: closed-branches Changeset: r82755:29996420adc4 Date: 2016-03-04 12:14 +0200 http://bitbucket.org/pypy/pypy/changeset/29996420adc4/ Log: Merge closed head d532e10b0752 on branch release-2.2.x From pypy.commits at gmail.com Fri Mar 4 05:32:22 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 04 Mar 2016 02:32:22 -0800 (PST) Subject: [pypy-commit] pypy closed-branches: re-close this branch Message-ID: <56d96436.aa17c20a.29a87.ffffec60@mx.google.com> Author: mattip Branch: closed-branches Changeset: r82756:751ba6212e7c Date: 2016-03-04 12:14 +0200 http://bitbucket.org/pypy/pypy/changeset/751ba6212e7c/ Log: re-close this branch From pypy.commits at gmail.com Fri Mar 4 06:53:17 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 04 Mar 2016 03:53:17 -0800 (PST) Subject: [pypy-commit] pypy release-5.x: update versioning Message-ID: <56d9772d.01adc20a.ae5b7.0863@mx.google.com> Author: mattip Branch: release-5.x Changeset: r82757:e52afe5b234f Date: 2016-03-04 13:09 +0200 http://bitbucket.org/pypy/pypy/changeset/e52afe5b234f/ Log: update versioning diff --git a/pypy/module/cpyext/include/patchlevel.h b/pypy/module/cpyext/include/patchlevel.h --- a/pypy/module/cpyext/include/patchlevel.h +++ b/pypy/module/cpyext/include/patchlevel.h @@ -29,8 +29,8 @@ #define PY_VERSION "2.7.10" /* PyPy version as a string */ -#define PYPY_VERSION "4.1.0-alpha0" -#define PYPY_VERSION_NUM 0x04010000 +#define PYPY_VERSION "5.0.0" +#define PYPY_VERSION_NUM 0x05000000 /* Defined to mean a PyPy where cpyext holds more regular references to PyObjects, e.g. staying alive as long as the internal PyPy object diff --git a/pypy/module/sys/version.py b/pypy/module/sys/version.py --- a/pypy/module/sys/version.py +++ b/pypy/module/sys/version.py @@ -10,7 +10,7 @@ #XXX # sync CPYTHON_VERSION with patchlevel.h, package.py CPYTHON_API_VERSION = 1013 #XXX # sync with include/modsupport.h -PYPY_VERSION = (4, 1, 0, "alpha", 0) #XXX # sync patchlevel.h +PYPY_VERSION = (5, 0, 0, "final", 0) #XXX # sync patchlevel.h import pypy From pypy.commits at gmail.com Fri Mar 4 06:53:25 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 04 Mar 2016 03:53:25 -0800 (PST) Subject: [pypy-commit] pypy default: add next release to indices Message-ID: <56d97735.d3921c0a.7d988.073f@mx.google.com> Author: mattip Branch: Changeset: r82761:b68cfadb2cb8 Date: 2016-03-04 13:42 +0200 http://bitbucket.org/pypy/pypy/changeset/b68cfadb2cb8/ Log: add next release to indices diff --git a/pypy/doc/index-of-release-notes.rst b/pypy/doc/index-of-release-notes.rst --- a/pypy/doc/index-of-release-notes.rst +++ b/pypy/doc/index-of-release-notes.rst @@ -6,6 +6,7 @@ .. toctree:: + release-5.0.0.rst release-4.0.1.rst release-4.0.0.rst release-2.6.1.rst diff --git a/pypy/doc/index-of-whatsnew.rst b/pypy/doc/index-of-whatsnew.rst --- a/pypy/doc/index-of-whatsnew.rst +++ b/pypy/doc/index-of-whatsnew.rst @@ -7,6 +7,7 @@ .. toctree:: whatsnew-head.rst + whatsnew-5.0.0.rst whatsnew-4.0.1.rst whatsnew-4.0.0.rst whatsnew-2.6.1.rst From pypy.commits at gmail.com Fri Mar 4 06:53:21 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 04 Mar 2016 03:53:21 -0800 (PST) Subject: [pypy-commit] pypy default: restart whatsnew, add draft release doc Message-ID: <56d97731.654fc20a.1121.09b0@mx.google.com> Author: mattip Branch: Changeset: r82759:2b76eedfa3f5 Date: 2016-03-04 13:27 +0200 http://bitbucket.org/pypy/pypy/changeset/2b76eedfa3f5/ Log: restart whatsnew, add draft release doc diff --git a/pypy/doc/release-5.0.0.rst b/pypy/doc/release-5.0.0.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-5.0.0.rst @@ -0,0 +1,100 @@ +========== +PyPy 5.0.0 +========== + +We have released PyPy 5.0.0, about three months after PyPy 4.0.0. +We encourage all users of PyPy to update to this version. There are +bug fixes and a major upgrade to our c-api layer (cpyext) + +You can download the PyPy 5.0.0 release here: + + http://pypy.org/download.html + +We would like to thank our donors for the continued support of the PyPy +project. + +We would also like to thank our contributors and +encourage new people to join the project. PyPy has many +layers and we need help with all of them: `PyPy`_ and `RPython`_ documentation +improvements, tweaking popular `modules`_ to run on pypy, or general `help`_ +with making RPython's JIT even better. + +CFFI +==== + +While not applicable only to PyPy, `cffi`_ is arguably our most significant +contribution to the python ecosystem. PyPy 5.0.0 ships with +`cffi-1.5.2`_ which now allows embedding PyPy (or cpython) in a c program. + +.. _`PyPy`: http://doc.pypy.org +.. _`RPython`: https://rpython.readthedocs.org +.. _`cffi`: https://cffi.readthedocs.org +.. _`cffi-1.5.2`: http://cffi.readthedocs.org/en/latest/whatsnew.html#v1-5-2 +.. _`modules`: http://doc.pypy.org/en/latest/project-ideas.html#make-more-python-modules-pypy-friendly +.. _`help`: http://doc.pypy.org/en/latest/project-ideas.html +.. _`numpy`: https://bitbucket.org/pypy/numpy + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7. It's fast (`pypy and cpython 2.7.x`_ performance comparison) +due to its integrated tracing JIT compiler. + +We also welcome developers of other +`dynamic languages`_ to see what RPython can do for them. + +This release supports **x86** machines on most common operating systems +(Linux 32/64, Mac OS X 64, Windows 32, OpenBSD, freebsd), +newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux, and the +big- and little-endian variants of **ppc64** running Linux. + +.. _`pypy and cpython 2.7.x`: http://speed.pypy.org +.. _`dynamic languages`: http://pypyjs.org + +Other Highlights (since 4.0.1 released in November 2015) +======================================================= + +* Bug Fixes + + * + + * + + * + + * Issues reported with our previous release were resolved_ after reports from users on + our issue tracker at https://bitbucket.org/pypy/pypy/issues or on IRC at + #pypy + +* New features: + + * + + * + + * + +* Numpy: + + * + + * + + +* Performance improvements and refactorings: + + * + + * + + * + +.. _resolved: http://doc.pypy.org/en/latest/whatsnew-5.0.0.html + +Please update, and continue to help us make PyPy better. + +Cheers + +The PyPy Team + diff --git a/pypy/doc/whatsnew-5.0.0.rst b/pypy/doc/whatsnew-5.0.0.rst --- a/pypy/doc/whatsnew-5.0.0.rst +++ b/pypy/doc/whatsnew-5.0.0.rst @@ -1,6 +1,6 @@ -========================= -What's new in PyPy 4.1.+ -========================= +======================== +What's new in PyPy 5.0.0 +======================== .. this is a revision shortly after release-4.0.1 .. startrev: 4b5c840d0da2 diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/whatsnew-head.rst @@ -0,0 +1,8 @@ +========================= +What's new in PyPy 5.0.+ +========================= + +.. this is a revision shortly after release-5.0.0 +.. startrev: 6d13e55b962a + + From pypy.commits at gmail.com Fri Mar 4 06:53:23 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 04 Mar 2016 03:53:23 -0800 (PST) Subject: [pypy-commit] pypy default: update contributors - 10 more joined the list Message-ID: <56d97733.455e1c0a.9583e.0a4e@mx.google.com> Author: mattip Branch: Changeset: r82760:5f74174d25dc Date: 2016-03-04 13:38 +0200 http://bitbucket.org/pypy/pypy/changeset/5f74174d25dc/ Log: update contributors - 10 more joined the list diff --git a/pypy/doc/contributor.rst b/pypy/doc/contributor.rst --- a/pypy/doc/contributor.rst +++ b/pypy/doc/contributor.rst @@ -11,29 +11,29 @@ Amaury Forgeot d'Arc Antonio Cuni Samuele Pedroni + Matti Picus Alex Gaynor Brian Kearns - Matti Picus Philip Jenvey Michael Hudson + Ronan Lamy David Schneider + Manuel Jacob Holger Krekel Christian Tismer Hakan Ardo - Manuel Jacob - Ronan Lamy Benjamin Peterson + Richard Plangger Anders Chrigstrom Eric van Riet Paap Wim Lavrijsen - Richard Plangger Richard Emslie Alexander Schremmer Dan Villiom Podlaski Christiansen + Remi Meier Lukas Diekmann Sven Hager Anders Lehmann - Remi Meier Aurelien Campeas Niklaus Haldimann Camillo Bruni @@ -42,8 +42,8 @@ Romain Guillebert Leonardo Santagada Seo Sanghyeon + Ronny Pfannschmidt Justin Peel - Ronny Pfannschmidt David Edelsohn Anders Hammarquist Jakub Gustak @@ -65,6 +65,7 @@ Tyler Wade Michael Foord Stephan Diehl + Vincent Legoll Stefan Schwarzer Valentino Volonghi Tomek Meka @@ -75,9 +76,9 @@ Jean-Paul Calderone Timo Paulssen Squeaky + Marius Gedminas Alexandre Fayolle Simon Burton - Marius Gedminas Martin Matusiak Konstantin Lopuhin Wenzhu Man @@ -86,16 +87,20 @@ Ivan Sichmann Freitas Greg Price Dario Bertini + Stefano Rivera Mark Pearse Simon Cross Andreas Stührk - Stefano Rivera + Edd Barrett Jean-Philippe St. Pierre Guido van Rossum Pavel Vinogradov + Jeremy Thurgood Paweł Piotr Przeradowski + Spenser Bauman Paul deGrandis Ilya Osadchiy + marky1991 Tobias Oberstein Adrian Kuhn Boris Feigin @@ -104,14 +109,12 @@ Georg Brandl Bert Freudenberg Stian Andreassen - Edd Barrett + Tobias Pape Wanja Saatkamp Gerald Klix Mike Blume - Tobias Pape Oscar Nierstrasz Stefan H. Muller - Jeremy Thurgood Rami Chowdhury Eugene Oden Henry Mason @@ -123,6 +126,8 @@ Lukas Renggli Guenter Jantzen Ned Batchelder + Tim Felgentreff + Anton Gulenko Amit Regmi Ben Young Nicolas Chauvat @@ -132,12 +137,12 @@ Nicholas Riley Jason Chu Igor Trindade Oliveira - Tim Felgentreff + Yichao Yu Rocco Moretti Gintautas Miliauskas Michael Twomey Lucian Branescu Mihaila - Yichao Yu + Devin Jeanpierre Gabriel Lavoie Olivier Dormond Jared Grubb @@ -161,33 +166,33 @@ Stanislaw Halik Mikael Schönenberg Berkin Ilbeyi - Elmo M?ntynen + Elmo Mäntynen + Faye Zhao Jonathan David Riehl Anders Qvist Corbin Simpson Chirag Jadwani Beatrice During Alex Perry - Vincent Legoll + Vaibhav Sood Alan McIntyre - Spenser Bauman + William Leslie Alexander Sedov Attila Gobi + Jasper.Schulz Christopher Pope - Devin Jeanpierre - Vaibhav Sood Christian Tismer Marc Abramowitz Dan Stromberg Arjun Naik Valentina Mukhamedzhanova Stefano Parmesan + Mark Young Alexis Daboville Jens-Uwe Mager Carl Meyer Karl Ramm Pieter Zieschang - Anton Gulenko Gabriel Lukas Vacek Andrew Dalke @@ -195,6 +200,7 @@ Jakub Stasiak Nathan Taylor Vladimir Kryachko + Omer Katz Jacek Generowicz Alejandro J. Cura Jacob Oscarson @@ -209,6 +215,7 @@ Lars Wassermann Philipp Rustemeuer Henrik Vendelbo + Richard Lancaster Dan Buch Miguel de Val Borro Artur Lisiecki @@ -220,18 +227,18 @@ Tomo Cocoa Kim Jin Su Toni Mattis + Amber Brown Lucas Stadler Julian Berman Markus Holtermann roberto at goyle Yury V. Zaytsev Anna Katrina Dominguez - William Leslie Bobby Impollonia - Faye Zhao timo at eistee.fritz.box Andrew Thompson Yusei Tahara + Aaron Tubbs Ben Darnell Roberto De Ioris Juan Francisco Cantero Hurtado @@ -243,6 +250,7 @@ Christopher Armstrong Michael Hudson-Doyle Anders Sigfridsson + Nikolay Zinov Yasir Suhail Jason Michalski rafalgalczynski at gmail.com @@ -252,6 +260,7 @@ Gustavo Niemeyer Stephan Busemann Rafał Gałczyński + Matt Bogosian Christian Muirhead Berker Peksag James Lan @@ -286,9 +295,9 @@ Stefan Marr jiaaro Mads Kiilerich - Richard Lancaster opassembler.py Antony Lee + Jason Madden Yaroslav Fedevych Jim Hunziker Markus Unterwaditzer @@ -297,6 +306,7 @@ squeaky Zearin soareschen + Jonas Pfannschmidt Kurt Griffiths Mike Bayer Matthew Miller @@ -311,4 +321,3 @@ Julien Phalip Roman Podoliaka Dan Loewenherz - From pypy.commits at gmail.com Fri Mar 4 06:53:19 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 04 Mar 2016 03:53:19 -0800 (PST) Subject: [pypy-commit] pypy default: move whatsnew Message-ID: <56d9772f.c16dc20a.d91b7.036f@mx.google.com> Author: mattip Branch: Changeset: r82758:6d13e55b962a Date: 2016-03-04 13:15 +0200 http://bitbucket.org/pypy/pypy/changeset/6d13e55b962a/ Log: move whatsnew diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-5.0.0.rst rename from pypy/doc/whatsnew-head.rst rename to pypy/doc/whatsnew-5.0.0.rst diff --git a/pypy/module/cpyext/include/patchlevel.h b/pypy/module/cpyext/include/patchlevel.h --- a/pypy/module/cpyext/include/patchlevel.h +++ b/pypy/module/cpyext/include/patchlevel.h @@ -29,8 +29,8 @@ #define PY_VERSION "2.7.10" /* PyPy version as a string */ -#define PYPY_VERSION "4.1.0-alpha0" -#define PYPY_VERSION_NUM 0x04010000 +#define PYPY_VERSION "5.1.0-alpha0" +#define PYPY_VERSION_NUM 0x05010000 /* Defined to mean a PyPy where cpyext holds more regular references to PyObjects, e.g. staying alive as long as the internal PyPy object diff --git a/pypy/module/sys/version.py b/pypy/module/sys/version.py --- a/pypy/module/sys/version.py +++ b/pypy/module/sys/version.py @@ -10,7 +10,7 @@ #XXX # sync CPYTHON_VERSION with patchlevel.h, package.py CPYTHON_API_VERSION = 1013 #XXX # sync with include/modsupport.h -PYPY_VERSION = (4, 1, 0, "alpha", 0) #XXX # sync patchlevel.h +PYPY_VERSION = (5, 1, 0, "alpha", 0) #XXX # sync patchlevel.h import pypy From pypy.commits at gmail.com Fri Mar 4 06:53:27 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 04 Mar 2016 03:53:27 -0800 (PST) Subject: [pypy-commit] pypy release-5.x: merge default into branch Message-ID: <56d97737.d3921c0a.7d988.0740@mx.google.com> Author: mattip Branch: release-5.x Changeset: r82762:b9d4d54170cf Date: 2016-03-04 13:51 +0200 http://bitbucket.org/pypy/pypy/changeset/b9d4d54170cf/ Log: merge default into branch diff --git a/pypy/doc/contributor.rst b/pypy/doc/contributor.rst --- a/pypy/doc/contributor.rst +++ b/pypy/doc/contributor.rst @@ -11,29 +11,29 @@ Amaury Forgeot d'Arc Antonio Cuni Samuele Pedroni + Matti Picus Alex Gaynor Brian Kearns - Matti Picus Philip Jenvey Michael Hudson + Ronan Lamy David Schneider + Manuel Jacob Holger Krekel Christian Tismer Hakan Ardo - Manuel Jacob - Ronan Lamy Benjamin Peterson + Richard Plangger Anders Chrigstrom Eric van Riet Paap Wim Lavrijsen - Richard Plangger Richard Emslie Alexander Schremmer Dan Villiom Podlaski Christiansen + Remi Meier Lukas Diekmann Sven Hager Anders Lehmann - Remi Meier Aurelien Campeas Niklaus Haldimann Camillo Bruni @@ -42,8 +42,8 @@ Romain Guillebert Leonardo Santagada Seo Sanghyeon + Ronny Pfannschmidt Justin Peel - Ronny Pfannschmidt David Edelsohn Anders Hammarquist Jakub Gustak @@ -65,6 +65,7 @@ Tyler Wade Michael Foord Stephan Diehl + Vincent Legoll Stefan Schwarzer Valentino Volonghi Tomek Meka @@ -75,9 +76,9 @@ Jean-Paul Calderone Timo Paulssen Squeaky + Marius Gedminas Alexandre Fayolle Simon Burton - Marius Gedminas Martin Matusiak Konstantin Lopuhin Wenzhu Man @@ -86,16 +87,20 @@ Ivan Sichmann Freitas Greg Price Dario Bertini + Stefano Rivera Mark Pearse Simon Cross Andreas Stührk - Stefano Rivera + Edd Barrett Jean-Philippe St. Pierre Guido van Rossum Pavel Vinogradov + Jeremy Thurgood Paweł Piotr Przeradowski + Spenser Bauman Paul deGrandis Ilya Osadchiy + marky1991 Tobias Oberstein Adrian Kuhn Boris Feigin @@ -104,14 +109,12 @@ Georg Brandl Bert Freudenberg Stian Andreassen - Edd Barrett + Tobias Pape Wanja Saatkamp Gerald Klix Mike Blume - Tobias Pape Oscar Nierstrasz Stefan H. Muller - Jeremy Thurgood Rami Chowdhury Eugene Oden Henry Mason @@ -123,6 +126,8 @@ Lukas Renggli Guenter Jantzen Ned Batchelder + Tim Felgentreff + Anton Gulenko Amit Regmi Ben Young Nicolas Chauvat @@ -132,12 +137,12 @@ Nicholas Riley Jason Chu Igor Trindade Oliveira - Tim Felgentreff + Yichao Yu Rocco Moretti Gintautas Miliauskas Michael Twomey Lucian Branescu Mihaila - Yichao Yu + Devin Jeanpierre Gabriel Lavoie Olivier Dormond Jared Grubb @@ -161,33 +166,33 @@ Stanislaw Halik Mikael Schönenberg Berkin Ilbeyi - Elmo M?ntynen + Elmo Mäntynen + Faye Zhao Jonathan David Riehl Anders Qvist Corbin Simpson Chirag Jadwani Beatrice During Alex Perry - Vincent Legoll + Vaibhav Sood Alan McIntyre - Spenser Bauman + William Leslie Alexander Sedov Attila Gobi + Jasper.Schulz Christopher Pope - Devin Jeanpierre - Vaibhav Sood Christian Tismer Marc Abramowitz Dan Stromberg Arjun Naik Valentina Mukhamedzhanova Stefano Parmesan + Mark Young Alexis Daboville Jens-Uwe Mager Carl Meyer Karl Ramm Pieter Zieschang - Anton Gulenko Gabriel Lukas Vacek Andrew Dalke @@ -195,6 +200,7 @@ Jakub Stasiak Nathan Taylor Vladimir Kryachko + Omer Katz Jacek Generowicz Alejandro J. Cura Jacob Oscarson @@ -209,6 +215,7 @@ Lars Wassermann Philipp Rustemeuer Henrik Vendelbo + Richard Lancaster Dan Buch Miguel de Val Borro Artur Lisiecki @@ -220,18 +227,18 @@ Tomo Cocoa Kim Jin Su Toni Mattis + Amber Brown Lucas Stadler Julian Berman Markus Holtermann roberto at goyle Yury V. Zaytsev Anna Katrina Dominguez - William Leslie Bobby Impollonia - Faye Zhao timo at eistee.fritz.box Andrew Thompson Yusei Tahara + Aaron Tubbs Ben Darnell Roberto De Ioris Juan Francisco Cantero Hurtado @@ -243,6 +250,7 @@ Christopher Armstrong Michael Hudson-Doyle Anders Sigfridsson + Nikolay Zinov Yasir Suhail Jason Michalski rafalgalczynski at gmail.com @@ -252,6 +260,7 @@ Gustavo Niemeyer Stephan Busemann Rafał Gałczyński + Matt Bogosian Christian Muirhead Berker Peksag James Lan @@ -286,9 +295,9 @@ Stefan Marr jiaaro Mads Kiilerich - Richard Lancaster opassembler.py Antony Lee + Jason Madden Yaroslav Fedevych Jim Hunziker Markus Unterwaditzer @@ -297,6 +306,7 @@ squeaky Zearin soareschen + Jonas Pfannschmidt Kurt Griffiths Mike Bayer Matthew Miller @@ -311,4 +321,3 @@ Julien Phalip Roman Podoliaka Dan Loewenherz - diff --git a/pypy/doc/index-of-release-notes.rst b/pypy/doc/index-of-release-notes.rst --- a/pypy/doc/index-of-release-notes.rst +++ b/pypy/doc/index-of-release-notes.rst @@ -6,6 +6,7 @@ .. toctree:: + release-5.0.0.rst release-4.0.1.rst release-4.0.0.rst release-2.6.1.rst diff --git a/pypy/doc/index-of-whatsnew.rst b/pypy/doc/index-of-whatsnew.rst --- a/pypy/doc/index-of-whatsnew.rst +++ b/pypy/doc/index-of-whatsnew.rst @@ -7,6 +7,7 @@ .. toctree:: whatsnew-head.rst + whatsnew-5.0.0.rst whatsnew-4.0.1.rst whatsnew-4.0.0.rst whatsnew-2.6.1.rst diff --git a/pypy/doc/release-5.0.0.rst b/pypy/doc/release-5.0.0.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-5.0.0.rst @@ -0,0 +1,100 @@ +========== +PyPy 5.0.0 +========== + +We have released PyPy 5.0.0, about three months after PyPy 4.0.0. +We encourage all users of PyPy to update to this version. There are +bug fixes and a major upgrade to our c-api layer (cpyext) + +You can download the PyPy 5.0.0 release here: + + http://pypy.org/download.html + +We would like to thank our donors for the continued support of the PyPy +project. + +We would also like to thank our contributors and +encourage new people to join the project. PyPy has many +layers and we need help with all of them: `PyPy`_ and `RPython`_ documentation +improvements, tweaking popular `modules`_ to run on pypy, or general `help`_ +with making RPython's JIT even better. + +CFFI +==== + +While not applicable only to PyPy, `cffi`_ is arguably our most significant +contribution to the python ecosystem. PyPy 5.0.0 ships with +`cffi-1.5.2`_ which now allows embedding PyPy (or cpython) in a c program. + +.. _`PyPy`: http://doc.pypy.org +.. _`RPython`: https://rpython.readthedocs.org +.. _`cffi`: https://cffi.readthedocs.org +.. _`cffi-1.5.2`: http://cffi.readthedocs.org/en/latest/whatsnew.html#v1-5-2 +.. _`modules`: http://doc.pypy.org/en/latest/project-ideas.html#make-more-python-modules-pypy-friendly +.. _`help`: http://doc.pypy.org/en/latest/project-ideas.html +.. _`numpy`: https://bitbucket.org/pypy/numpy + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7. It's fast (`pypy and cpython 2.7.x`_ performance comparison) +due to its integrated tracing JIT compiler. + +We also welcome developers of other +`dynamic languages`_ to see what RPython can do for them. + +This release supports **x86** machines on most common operating systems +(Linux 32/64, Mac OS X 64, Windows 32, OpenBSD, freebsd), +newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux, and the +big- and little-endian variants of **ppc64** running Linux. + +.. _`pypy and cpython 2.7.x`: http://speed.pypy.org +.. _`dynamic languages`: http://pypyjs.org + +Other Highlights (since 4.0.1 released in November 2015) +======================================================= + +* Bug Fixes + + * + + * + + * + + * Issues reported with our previous release were resolved_ after reports from users on + our issue tracker at https://bitbucket.org/pypy/pypy/issues or on IRC at + #pypy + +* New features: + + * + + * + + * + +* Numpy: + + * + + * + + +* Performance improvements and refactorings: + + * + + * + + * + +.. _resolved: http://doc.pypy.org/en/latest/whatsnew-5.0.0.html + +Please update, and continue to help us make PyPy better. + +Cheers + +The PyPy Team + diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-5.0.0.rst copy from pypy/doc/whatsnew-head.rst copy to pypy/doc/whatsnew-5.0.0.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-5.0.0.rst @@ -1,6 +1,6 @@ -========================= -What's new in PyPy 4.1.+ -========================= +======================== +What's new in PyPy 5.0.0 +======================== .. this is a revision shortly after release-4.0.1 .. startrev: 4b5c840d0da2 diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -1,193 +1,8 @@ ========================= -What's new in PyPy 4.1.+ +What's new in PyPy 5.0.+ ========================= -.. this is a revision shortly after release-4.0.1 -.. startrev: 4b5c840d0da2 +.. this is a revision shortly after release-5.0.0 +.. startrev: 6d13e55b962a -Fixed ``_PyLong_FromByteArray()``, which was buggy. -Fixed a crash with stacklets (or greenlets) on non-Linux machines -which showed up if you forget stacklets without resuming them. - -.. branch: numpy-1.10 - -Fix tests to run cleanly with -A and start to fix micronumpy for upstream numpy -which is now 1.10.2 - -.. branch: osx-flat-namespace - -Fix the cpyext tests on OSX by linking with -flat_namespace - -.. branch: anntype - -Refactor and improve exception analysis in the annotator. - -.. branch: posita/2193-datetime-timedelta-integrals - -Fix issue #2193. ``isinstance(..., int)`` => ``isinstance(..., numbers.Integral)`` -to allow for alternate ``int``-like implementations (e.g., ``future.types.newint``) - -.. branch: faster-rstruct - -Improve the performace of struct.unpack, which now directly reads inside the -string buffer and directly casts the bytes to the appropriate type, when -allowed. Unpacking of floats and doubles is about 15 times faster now, while -for integer types it's up to ~50% faster for 64bit integers. - -.. branch: wrap-specialisation - -Remove unnecessary special handling of space.wrap(). - -.. branch: compress-numbering - -Improve the memory signature of numbering instances in the JIT. This should massively -decrease the amount of memory consumed by the JIT, which is significant for most programs. - -.. branch: fix-trace-too-long-heuristic - -Improve the heuristic when disable trace-too-long - -.. branch: fix-setslice-can-resize - -Make rlist's ll_listsetslice() able to resize the target list to help -simplify objspace/std/listobject.py. Was issue #2196. - -.. branch: anntype2 - -A somewhat random bunch of changes and fixes following up on branch 'anntype'. Highlights: - -- Implement @doubledispatch decorator and use it for intersection() and difference(). - -- Turn isinstance into a SpaceOperation - -- Create a few direct tests of the fundamental annotation invariant in test_model.py - -- Remove bookkeeper attribute from DictDef and ListDef. - -.. branch: cffi-static-callback - -.. branch: vecopt-absvalue - -- Enhancement. Removed vector fields from AbstractValue. - -.. branch: memop-simplify2 - -Simplification. Backends implement too many loading instructions, only having a slightly different interface. -Four new operations (gc_load/gc_load_indexed, gc_store/gc_store_indexed) replace all the -commonly known loading operations - -.. branch: more-rposix - -Move wrappers for OS functions from `rpython/rtyper` to `rpython/rlib` and -turn them into regular RPython functions. Most RPython-compatible `os.*` -functions are now directly accessible as `rpython.rposix.*`. - -.. branch: always-enable-gil - -Simplify a bit the GIL handling in non-jitted code. Fixes issue #2205. - -.. branch: flowspace-cleanups - -Trivial cleanups in flowspace.operation : fix comment & duplicated method - -.. branch: test-AF_NETLINK - -Add a test for pre-existing AF_NETLINK support. Was part of issue #1942. - -.. branch: small-cleanups-misc - -Trivial misc cleanups: typo, whitespace, obsolete comments - -.. branch: cpyext-slotdefs -.. branch: fix-missing-canraise -.. branch: whatsnew - -.. branch: fix-2211 - -Fix the cryptic exception message when attempting to use extended slicing -in rpython. Was issue #2211. - -.. branch: ec-keepalive - -Optimize the case where, in a new C-created thread, we keep invoking -short-running Python callbacks. (CFFI on CPython has a hack to achieve -the same result.) This can also be seen as a bug fix: previously, -thread-local objects would be reset between two such calls. - -.. branch: globals-quasiimmut - -Optimize global lookups. - -.. branch: cffi-static-callback-embedding - -Updated to CFFI 1.5, which supports a new way to do embedding. -Deprecates http://pypy.readthedocs.org/en/latest/embedding.html. - -.. branch: fix-cpython-ssl-tests-2.7 - -Fix SSL tests by importing cpython's patch - - -.. branch: remove-getfield-pure - -Remove pure variants of ``getfield_gc_*`` operations from the JIT. Relevant -optimizations instead consult the field descriptor to determine the purity of -the operation. Additionally, pure ``getfield`` operations are now handled -entirely by `rpython/jit/metainterp/optimizeopt/heap.py` rather than -`rpython/jit/metainterp/optimizeopt/pure.py`, which can result in better codegen -for traces containing a large number of pure getfield operations. - -.. branch: exctrans - -Try to ensure that no new functions get annotated during the 'source_c' phase. -Refactor sandboxing to operate at a higher level. - -.. branch: cpyext-bootstrap - -.. branch: vmprof-newstack - -Refactor vmprof to work cross-operating-system. - -.. branch: seperate-strucmember_h - -Seperate structmember.h from Python.h Also enhance creating api functions -to specify which header file they appear in (previously only pypy_decl.h) - -.. branch: llimpl - -Refactor register_external(), remove running_on_llinterp mechanism and -apply sandbox transform on externals at the end of annotation. - -.. branch: cffi-embedding-win32 - -.. branch: windows-vmprof-support - -vmprof should work on Windows. - - -.. branch: reorder-map-attributes - -When creating instances and adding attributes in several different orders -depending on some condition, the JIT would create too much code. This is now -fixed. - -.. branch: cpyext-gc-support-2 - -Improve CPython C API support, which means lxml now runs unmodified -(after removing pypy hacks, pending pull request) - -.. branch: look-inside-tuple-hash - -Look inside tuple hash, improving mdp benchmark - -.. branch: vlen-resume - -Compress resume data, saving 10-20% of memory consumed by the JIT - -.. branch: issue-2248 - -.. branch: ndarray-setitem-filtered - -Fix boolean-array indexing in micronumpy - From pypy.commits at gmail.com Fri Mar 4 08:14:02 2016 From: pypy.commits at gmail.com (fijal) Date: Fri, 04 Mar 2016 05:14:02 -0800 (PST) Subject: [pypy-commit] pypy jit-leaner-frontend: pass the first tests of optimizeopt, yay! Message-ID: <56d98a1a.2a6ec20a.dabb2.2a9d@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r82763:b36f4652488a Date: 2016-03-04 15:13 +0200 http://bitbucket.org/pypy/pypy/changeset/b36f4652488a/ Log: pass the first tests of optimizeopt, yay! diff --git a/rpython/jit/metainterp/opencoder.py b/rpython/jit/metainterp/opencoder.py --- a/rpython/jit/metainterp/opencoder.py +++ b/rpython/jit/metainterp/opencoder.py @@ -1,10 +1,11 @@ -from rpython.jit.metainterp.history import ConstInt, Const, AbstractDescr,\ - AbstractValue +""" Storage format: +""" + +from rpython.jit.metainterp.history import ConstInt, Const from rpython.jit.metainterp.resoperation import AbstractResOp, AbstractInputArg,\ - ResOperation, oparity, opname, rop, ResOperation, opwithdescr + ResOperation, oparity, rop, opwithdescr from rpython.rlib.rarithmetic import intmask -from rpython.jit.metainterp import resume from rpython.rlib.objectmodel import we_are_translated TAGINT, TAGCONST, TAGBOX = range(3) @@ -20,7 +21,12 @@ self.trace = main_iter.trace self.main_iter = main_iter self.end = end_pos + self.start = pos self.pos = pos + self.save_pos = -1 + + def length(self): + return self.end - self.start def done(self): return self.pos >= self.end @@ -36,7 +42,16 @@ return r def get_size_jitcode_pc(self): - return self._next(), self._next(), self._next() + if self.save_pos >= 0: + self.pos = self.save_pos + size = self._next() + if size < 0: + self.save_pos = self.pos + self.pos = -size - 1 + assert self.pos >= 0 + size = self._next() + assert size >= 0 + return size, self._next(), self._next() class TraceIterator(object): def __init__(self, trace, end): @@ -204,6 +219,10 @@ assert isinstance(prev, Sentinel) self._ops[p] = len(self._ops) + def check_snapshot_jitcode_pc(self, jitcode, pc, resumedata_pos): + assert self._ops[resumedata_pos + 1] == jitcode.index + assert self._ops[resumedata_pos + 2] == pc + def get_iter(self): return TraceIterator(self, len(self._ops)) diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -27,7 +27,7 @@ class BasicLoopInfo(LoopInfo): def __init__(self, inputargs, quasi_immutable_deps): self.inputargs = inputargs - self.label_op = ResOperation(rop.LABEL, inputargs) + self.label_op = ResOperation(rop.LABEL, inputargs, -1) self.quasi_immutable_deps = quasi_immutable_deps self.extra_same_as = [] @@ -509,30 +509,24 @@ trace = trace.get_iter() self.trace = trace self.call_pure_results = call_pure_results + last_op = None + i = 0 while not trace.done(): self._really_emitted_operation = None op = trace.next() if op.getopnum() in (rop.FINISH, rop.JUMP): - xxx - self.first_optimization.propagate_forward(trace.next()) - xxxx - if ops[-1].getopnum() in (rop.FINISH, rop.JUMP): - last = len(ops) - 1 - extra_jump = True - else: - extra_jump = False - last = len(ops) - for i in range(last): - self._really_emitted_operation = None - self.first_optimization.propagate_forward(ops[i]) + last_op = op + break + self.first_optimization.propagate_forward(op) + i += 1 # accumulate counters if flush: self.flush() - if extra_jump: - self.first_optimization.propagate_forward(ops[-1]) + if last_op: + self.first_optimization.propagate_forward(last_op) self.resumedata_memo.update_counters(self.metainterp_sd.profiler) - return (BasicLoopInfo(newargs, self.quasi_immutable_deps), + return (BasicLoopInfo(trace.inputargs, self.quasi_immutable_deps), self._newoperations) def _clean_optimization_info(self, lst): diff --git a/rpython/jit/metainterp/optimizeopt/pure.py b/rpython/jit/metainterp/optimizeopt/pure.py --- a/rpython/jit/metainterp/optimizeopt/pure.py +++ b/rpython/jit/metainterp/optimizeopt/pure.py @@ -49,7 +49,6 @@ return None def lookup(self, optimizer, op): - return None numargs = op.numargs() if numargs == 1: return self.lookup1(optimizer, @@ -76,8 +75,8 @@ dispatch_opt(self, op) def optimize_default(self, op): - canfold = OpHelpers.is_always_pure(op.opnum) - if OpHelpers.is_ovf(op.opnum): + canfold = op.is_always_pure() + if op.is_ovf(): self.postponed_op = op return if self.postponed_op: @@ -91,7 +90,7 @@ save = False if canfold: for i in range(op.numargs()): - if not self.optimizer.is_constant(op.getarg(i)): + if self.get_constant_box(op.getarg(i)) is None: break else: # all constant arguments: constant-fold away @@ -207,7 +206,7 @@ def pure_from_args(self, opnum, args, op, descr=None): newop = ResOperation(opnum, [self.get_box_replacement(arg) for arg in args], - descr=descr) + -1, descr=descr) newop.set_forwarded(op) self.pure(opnum, newop) @@ -222,7 +221,7 @@ def produce_potential_short_preamble_ops(self, sb): ops = self.optimizer._newoperations for i, op in enumerate(ops): - if OpHelpers.is_always_pure(op.opnum): + if op.is_always_pure(): sb.add_pure_op(op) if op.is_ovf() and ops[i + 1].getopnum() == rop.GUARD_NO_OVERFLOW: sb.add_pure_op(op) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -35,7 +35,7 @@ compile_data = compile.SimpleCompileData(label_op, trace, call_pure_results) info, ops = self._do_optimize_loop(compile_data) - label_op = ResOperation(rop.LABEL, info.inputargs) + label_op = ResOperation(rop.LABEL, info.inputargs, -1) loop.inputargs = info.inputargs loop.operations = [label_op] + ops #print '\n'.join([str(o) for o in loop.operations]) diff --git a/rpython/jit/metainterp/optimizeopt/util.py b/rpython/jit/metainterp/optimizeopt/util.py --- a/rpython/jit/metainterp/optimizeopt/util.py +++ b/rpython/jit/metainterp/optimizeopt/util.py @@ -154,7 +154,7 @@ if op1.type != 'v': remap[op2] = op1 if (op1.getopnum() not in [rop.JUMP, rop.LABEL, rop.FINISH] and - not OpHelpers.is_guard(op1.getopnum())): + not rop.is_guard(op1.getopnum())): assert op1.getdescr() == op2.getdescr() if op1.getfailargs() or op2.getfailargs(): assert len(op1.getfailargs()) == len(op2.getfailargs()) diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -396,7 +396,16 @@ except KeyError: return '<%d>' % self.getopnum() - # XXX kill all those in favor of ophelpers + def is_guard(self): + return rop.is_guard(self.getopnum()) + + def is_ovf(self): + return rop.is_ovf(self.getopnum()) + + def can_raise(self): + return rop.can_raise(self.getopnum()) + + # XXX fix def is_foldable_guard(self): return rop._GUARD_FOLDABLE_FIRST <= self.getopnum() <= rop._GUARD_FOLDABLE_LAST @@ -1430,6 +1439,14 @@ xxxx @staticmethod + def is_pure_getfield(opnum, descr): + if (opnum == rop.GETFIELD_GC_I or + opnum == rop.GETFIELD_GC_F or + opnum == rop.GETFIELD_GC_R): + return descr is not None and descr.is_always_pure() + return False + + @staticmethod def has_no_side_effect(opnum): return rop._NOSIDEEFFECT_FIRST <= opnum <= rop._NOSIDEEFFECT_LAST diff --git a/rpython/jit/metainterp/resume.py b/rpython/jit/metainterp/resume.py --- a/rpython/jit/metainterp/resume.py +++ b/rpython/jit/metainterp/resume.py @@ -115,6 +115,7 @@ def _ensure_parent_resumedata(framestack, n, t): if n == 0: return + _ensure_parent_resumedata(framestack, n - 1, t) target = framestack[n] back = framestack[n - 1] if target.parent_resumedata_position != -1: @@ -124,7 +125,6 @@ return pos = t.record_snapshot(back.jitcode, back.pc, back.get_list_of_active_boxes(True)) - _ensure_parent_resumedata(framestack, n - 1, t) target.parent_resumedata_position = pos def capture_resumedata(framestack, virtualizable_boxes, virtualref_boxes, t): @@ -138,9 +138,9 @@ if n >= 0: top = framestack[n] pos = t.get_patchable_position() + _ensure_parent_resumedata(framestack, n, t) t.record_snapshot(top.jitcode, top.pc, top.get_list_of_active_boxes(False)) - _ensure_parent_resumedata(framestack, n, t) t.patch_position_to_current(pos) else: yyy @@ -196,22 +196,13 @@ TAG_CONST_OFFSET = 0 class NumberingState(object): - def __init__(self, snapshot_list): + def __init__(self, size): self.liveboxes = {} - self.current = [rffi.cast(rffi.SHORT, 0)] * self.count_boxes(snapshot_list) - self.position = len(self.current) + self.current = [rffi.cast(rffi.SHORT, 0)] * (size + 2) + self.position = 0 self.n = 0 self.v = 0 - def count_boxes(self, lst): - snapshot = lst[0] - assert isinstance(snapshot, TopSnapshot) - c = len(snapshot.vable_boxes) - for snapshot in lst: - c += len(snapshot.boxes) - c += 2 * (len(lst) - 1) + 1 + 1 - return c - def append(self, item): self.current[self.position] = item self.position += 1 @@ -267,15 +258,14 @@ # env numbering - def _number_boxes(self, boxes, optimizer, state): + def _number_boxes(self, iter, length, optimizer, state): """ Number boxes from one snapshot """ n = state.n v = state.v liveboxes = state.liveboxes - length = len(boxes) for i in range(length): - box = boxes[i] + box = iter.next() box = optimizer.get_box_replacement(box) if isinstance(box, Const): @@ -302,32 +292,33 @@ state.v = v def number(self, optimizer, position, trace): - state = NumberingState(snapshot_list) + snapshot_iter = trace.get_snapshot_iter(position) + state = NumberingState(snapshot_iter.length()) + while not snapshot_iter.done(): + size, jitcode_index, pc = snapshot_iter.get_size_jitcode_pc() + state.append(rffi.cast(rffi.SHORT, jitcode_index)) + state.append(rffi.cast(rffi.SHORT, pc)) + self._number_boxes(snapshot_iter, size, optimizer, state) - # we want to number snapshots starting from the back, but ending - # with a forward list - for i in range(len(snapshot_list) - 1, 0, -1): - state.position -= len(snapshot_list[i].boxes) + 2 - frameinfo = framestack_list[i - 1] - jitcode_pos, pc = unpack_uint(frameinfo.packed_jitcode_pc) - state.append(rffi.cast(rffi.SHORT, jitcode_pos)) - state.append(rffi.cast(rffi.SHORT, pc)) - self._number_boxes(snapshot_list[i].boxes, optimizer, state) - state.position -= len(snapshot_list[i].boxes) + 2 - - assert isinstance(topsnapshot, TopSnapshot) - special_boxes_size = (1 + len(topsnapshot.vable_boxes) + - 1 + len(topsnapshot.boxes)) - assert state.position == special_boxes_size - - state.position = 0 - state.append(rffi.cast(rffi.SHORT, len(topsnapshot.vable_boxes))) - self._number_boxes(topsnapshot.vable_boxes, optimizer, state) - n = len(topsnapshot.boxes) + state.append(rffi.cast(rffi.SHORT, 0)) + n = 0 # len(topsnapshot.boxes) assert not (n & 1) state.append(rffi.cast(rffi.SHORT, n >> 1)) - self._number_boxes(topsnapshot.boxes, optimizer, state) - assert state.position == special_boxes_size + # + # XXX ignore vables and virtualrefs for now + #assert isinstance(topsnapshot, TopSnapshot) + #special_boxes_size = (1 + len(topsnapshot.vable_boxes) + + # 1 + len(topsnapshot.boxes)) + #assert state.position == special_boxes_size + + #state.position = 0 + #state.append(rffi.cast(rffi.SHORT, len(topsnapshot.vable_boxes))) + #self._number_boxes(topsnapshot.vable_boxes, optimizer, state) + #n = len(topsnapshot.boxes) + #assert not (n & 1) + #state.append(rffi.cast(rffi.SHORT, n >> 1)) + #self._number_boxes(topsnapshot.boxes, optimizer, state) + #assert state.position == special_boxes_size numb = resumecode.create_numbering(state.current) return numb, state.liveboxes, state.v @@ -476,11 +467,10 @@ assert resume_position > 0 # count stack depth numb, liveboxes_from_env, v = self.memo.number(optimizer, - resume_position, self.optimize.trace) + resume_position, self.optimizer.trace) self.liveboxes_from_env = liveboxes_from_env self.liveboxes = {} storage.rd_numb = numb - self.snapshot_storage.rd_snapshot = None # collect liveboxes and virtuals n = len(liveboxes_from_env) - v diff --git a/rpython/jit/metainterp/test/test_opencoder.py b/rpython/jit/metainterp/test/test_opencoder.py --- a/rpython/jit/metainterp/test/test_opencoder.py +++ b/rpython/jit/metainterp/test/test_opencoder.py @@ -77,7 +77,7 @@ assert boxes == [i0, i1] assert l[2].opnum == rop.GUARD_FALSE boxes = self.unpack_snapshot(iter, l[2].rd_resume_position) - assert boxes == [i0, i0, l[0], i0, i1] + assert boxes == [i0, i1, i0, i0, l[0]] def test_read_snapshot_interface(self): i0, i1, i2 = InputArgInt(), InputArgInt(), InputArgInt() @@ -86,16 +86,26 @@ frame0 = FakeFrame(1, JitCode(2), [i0, i1]) frame1 = FakeFrame(3, JitCode(4), [i2, i2]) resume.capture_resumedata([frame0, frame1], None, [], t) + t.record_op(rop.GUARD_TRUE, [i1]) + resume.capture_resumedata([frame0, frame1], None, [], t) (i0, i1, i2), l, iter = self.unpack(t) pos = l[0].rd_resume_position snapshot_iter = iter.get_snapshot_iter(pos) size, jc_index, pc = snapshot_iter.get_size_jitcode_pc() assert size == 2 + assert jc_index == 2 + assert pc == 1 + assert [snapshot_iter.next() for i in range(2)] == [i0, i1] + size, jc_index, pc = snapshot_iter.get_size_jitcode_pc() + assert size == 2 assert jc_index == 4 assert pc == 3 assert [snapshot_iter.next() for i in range(2)] == [i2, i2] + pos = l[1].rd_resume_position + snapshot_iter = iter.get_snapshot_iter(pos) size, jc_index, pc = snapshot_iter.get_size_jitcode_pc() assert size == 2 assert jc_index == 2 assert pc == 1 assert [snapshot_iter.next() for i in range(2)] == [i0, i1] + \ No newline at end of file From pypy.commits at gmail.com Fri Mar 4 09:41:30 2016 From: pypy.commits at gmail.com (fijal) Date: Fri, 04 Mar 2016 06:41:30 -0800 (PST) Subject: [pypy-commit] pypy jit-leaner-frontend: an example simple strategy of list of operations Message-ID: <56d99e9a.046f1c0a.68c06.4850@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r82764:9877c87c497d Date: 2016-03-04 16:40 +0200 http://bitbucket.org/pypy/pypy/changeset/9877c87c497d/ Log: an example simple strategy of list of operations diff --git a/rpython/jit/metainterp/test/strategies.py b/rpython/jit/metainterp/test/strategies.py --- a/rpython/jit/metainterp/test/strategies.py +++ b/rpython/jit/metainterp/test/strategies.py @@ -1,7 +1,7 @@ import sys from hypothesis import strategies -from rpython.jit.metainterp.resoperation import InputArgInt +from rpython.jit.metainterp.resoperation import InputArgInt, ResOperation, rop from rpython.jit.metainterp.history import ConstInt machine_ints = strategies.integers(min_value=-sys.maxint - 1, @@ -10,4 +10,28 @@ intconsts = strategies.builds(ConstInt, machine_ints) boxes = intboxes | intconsts boxlists = strategies.lists(boxes, min_size=1).flatmap( - lambda cis: strategies.lists(strategies.sampled_from(cis))) \ No newline at end of file + lambda cis: strategies.lists(strategies.sampled_from(cis))) + + at strategies.composite +def lists_of_operations(draw, inputboxes): + def get(l1, l2, index): + if index < len(l1): + return l1[index] + return l2[index - len(l1)] + + size = draw(strategies.integers(min_value=1, max_value=100)) + inputargs = [] + for i in range(size): + inputargs.append(draw(inputboxes)) + size = draw(strategies.integers(min_value=1, max_value=100)) + ops = [] + for i in range(size): + s = strategies.integers(min_value=0, max_value=len(inputargs) + len(ops) - 1) + arg0 = get(inputargs, ops, draw(s)) + arg1 = get(inputargs, ops, draw(s)) + ops.append(ResOperation(rop.INT_ADD, [arg0, arg1], -1)) + return ops + +if __name__ == '__main__': + import pprint + pprint.pprint(lists_of_operations(intboxes).example()) \ No newline at end of file diff --git a/rpython/jit/metainterp/test/test_opencoder.py b/rpython/jit/metainterp/test/test_opencoder.py --- a/rpython/jit/metainterp/test/test_opencoder.py +++ b/rpython/jit/metainterp/test/test_opencoder.py @@ -108,4 +108,3 @@ assert jc_index == 2 assert pc == 1 assert [snapshot_iter.next() for i in range(2)] == [i0, i1] - \ No newline at end of file From pypy.commits at gmail.com Fri Mar 4 09:45:25 2016 From: pypy.commits at gmail.com (fijal) Date: Fri, 04 Mar 2016 06:45:25 -0800 (PST) Subject: [pypy-commit] pypy jit-leaner-frontend: add consts Message-ID: <56d99f85.02931c0a.c76a.4abf@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r82765:eb646706d3c4 Date: 2016-03-04 16:43 +0200 http://bitbucket.org/pypy/pypy/changeset/eb646706d3c4/ Log: add consts diff --git a/rpython/jit/metainterp/test/strategies.py b/rpython/jit/metainterp/test/strategies.py --- a/rpython/jit/metainterp/test/strategies.py +++ b/rpython/jit/metainterp/test/strategies.py @@ -14,10 +14,13 @@ @strategies.composite def lists_of_operations(draw, inputboxes): - def get(l1, l2, index): + def get(draw, l1, l2, index): if index < len(l1): return l1[index] - return l2[index - len(l1)] + index -= len(l1) + if index >= len(l2): + return draw(intconsts) + return l2[index] size = draw(strategies.integers(min_value=1, max_value=100)) inputargs = [] @@ -26,9 +29,9 @@ size = draw(strategies.integers(min_value=1, max_value=100)) ops = [] for i in range(size): - s = strategies.integers(min_value=0, max_value=len(inputargs) + len(ops) - 1) - arg0 = get(inputargs, ops, draw(s)) - arg1 = get(inputargs, ops, draw(s)) + s = strategies.integers(min_value=0, max_value=len(inputargs) + 2 * len(ops)) + arg0 = get(draw, inputargs, ops, draw(s)) + arg1 = get(draw, inputargs, ops, draw(s)) ops.append(ResOperation(rop.INT_ADD, [arg0, arg1], -1)) return ops From pypy.commits at gmail.com Fri Mar 4 09:51:54 2016 From: pypy.commits at gmail.com (fijal) Date: Fri, 04 Mar 2016 06:51:54 -0800 (PST) Subject: [pypy-commit] pypy jit-leaner-frontend: use hypothesis and find a bug Message-ID: <56d9a10a.42121c0a.1215e.4d7a@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r82766:9030d27af66e Date: 2016-03-04 16:51 +0200 http://bitbucket.org/pypy/pypy/changeset/9030d27af66e/ Log: use hypothesis and find a bug diff --git a/rpython/jit/metainterp/optimizeopt/test/test_util.py b/rpython/jit/metainterp/optimizeopt/test/test_util.py --- a/rpython/jit/metainterp/optimizeopt/test/test_util.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_util.py @@ -520,7 +520,8 @@ guard.rd_snapshot = resume.TopSnapshot(None, [], []) res.operations.insert(-1, guard) - def assert_equal(self, optimized, expected, text_right=None): + @staticmethod + def assert_equal(optimized, expected, text_right=None): from rpython.jit.metainterp.optimizeopt.util import equaloplists assert len(optimized.inputargs) == len(expected.inputargs) remap = {} diff --git a/rpython/jit/metainterp/test/strategies.py b/rpython/jit/metainterp/test/strategies.py --- a/rpython/jit/metainterp/test/strategies.py +++ b/rpython/jit/metainterp/test/strategies.py @@ -13,7 +13,7 @@ lambda cis: strategies.lists(strategies.sampled_from(cis))) @strategies.composite -def lists_of_operations(draw, inputboxes): +def lists_of_operations(draw, inputboxes=intboxes): def get(draw, l1, l2, index): if index < len(l1): return l1[index] @@ -33,7 +33,7 @@ arg0 = get(draw, inputargs, ops, draw(s)) arg1 = get(draw, inputargs, ops, draw(s)) ops.append(ResOperation(rop.INT_ADD, [arg0, arg1], -1)) - return ops + return inputargs, ops if __name__ == '__main__': import pprint diff --git a/rpython/jit/metainterp/test/test_opencoder.py b/rpython/jit/metainterp/test/test_opencoder.py --- a/rpython/jit/metainterp/test/test_opencoder.py +++ b/rpython/jit/metainterp/test/test_opencoder.py @@ -4,6 +4,10 @@ from rpython.jit.metainterp.history import ConstInt from rpython.jit.metainterp.optimizeopt.optimizer import Optimizer from rpython.jit.metainterp import resume +from rpython.jit.metainterp.test.strategies import lists_of_operations +from rpython.jit.metainterp.optimizeopt.test.test_util import BaseTest +from rpython.jit.metainterp.history import TreeLoop +from hypothesis import given class JitCode(object): def __init__(self, index): @@ -108,3 +112,18 @@ assert jc_index == 2 assert pc == 1 assert [snapshot_iter.next() for i in range(2)] == [i0, i1] + + @given(lists_of_operations()) + def test_random_snapshot(self, lst): + inputargs, ops = lst + t = Trace(inputargs) + for op in ops: + t.record_op(op.getopnum(), op.getarglist()) + inpargs, l, iter = self.unpack(t) + loop1 = TreeLoop("loop1") + loop1.inputargs = inputargs + loop1.operations = ops + loop2 = TreeLoop("loop2") + loop2.inputargs = inpargs + loop2.operations = l + BaseTest.assert_equal(loop1, loop2) \ No newline at end of file From pypy.commits at gmail.com Fri Mar 4 09:53:52 2016 From: pypy.commits at gmail.com (cfbolz) Date: Fri, 04 Mar 2016 06:53:52 -0800 (PST) Subject: [pypy-commit] pypy bigint-with-int-ops: close branch that was merged long ago Message-ID: <56d9a180.a3f6c20a.82b86.520b@mx.google.com> Author: Carl Friedrich Bolz Branch: bigint-with-int-ops Changeset: r82767:23e336d3e045 Date: 2016-03-04 15:53 +0100 http://bitbucket.org/pypy/pypy/changeset/23e336d3e045/ Log: close branch that was merged long ago From pypy.commits at gmail.com Fri Mar 4 09:59:09 2016 From: pypy.commits at gmail.com (plan_rich) Date: Fri, 04 Mar 2016 06:59:09 -0800 (PST) Subject: [pypy-commit] pypy gcstress-hypothesis: added some tests to ensure bytecode generation considers the top elements on the stack Message-ID: <56d9a2bd.918e1c0a.199e5.52e6@mx.google.com> Author: Richard Plangger Branch: gcstress-hypothesis Changeset: r82769:13234c967ffc Date: 2016-03-04 10:40 +0100 http://bitbucket.org/pypy/pypy/changeset/13234c967ffc/ Log: added some tests to ensure bytecode generation considers the top elements on the stack diff --git a/rpython/jit/backend/llsupport/tl/code.py b/rpython/jit/backend/llsupport/tl/code.py --- a/rpython/jit/backend/llsupport/tl/code.py +++ b/rpython/jit/backend/llsupport/tl/code.py @@ -10,6 +10,14 @@ pt = getattr(self.__init__, '_param_types', []) return self(*[draw(get_strategy_for(t)) for t in pt]) + def filter_bytecode(self, stack): + """ filter this byte code if the stack does + not contain the right values on the stack. + This should only be used for values hypothesis + cannot forsee (like list manipulation) + """ + return False + _c = 0 LIST_TYP = 'l' @@ -154,6 +162,12 @@ BYTE_CODE = unique_code() def __init__(self): pass + def filter_bytecode(self, stack): + w_idx = stack.peek(1) + w_list = stack.peek(2) + if w_idx.value >= len(w_list.items): + return True + return False @requires_stack(LIST_TYP, IDX_TYP) @leaves_on_stack(LIST_TYP) @@ -161,6 +175,12 @@ BYTE_CODE = unique_code() def __init__(self): pass + def filter_bytecode(self, stack): + w_idx = stack.peek(0) + w_list = stack.peek(1) + if w_idx.value >= len(w_list.items): + return True + return False @requires_stack(LIST_TYP, INT_TYP) # TODO VAL_TYP) @leaves_on_stack(LIST_TYP) @@ -169,6 +189,8 @@ def __init__(self): pass +def op_modifies_list(clazz): + return clazz in (DelList, InsertList) # remove comment one by one! diff --git a/rpython/jit/backend/llsupport/tl/interp.py b/rpython/jit/backend/llsupport/tl/interp.py --- a/rpython/jit/backend/llsupport/tl/interp.py +++ b/rpython/jit/backend/llsupport/tl/interp.py @@ -8,6 +8,8 @@ pass class W_ListObject(W_Root): + TYPE = code.LIST_TYP + def __init__(self, items): self.items = items @@ -17,9 +19,11 @@ def is_of_type(self, type): """ NOT_RPYTHON """ - return type in (LIST_TYP,) + return type in (code.LIST_TYP,) class W_IntObject(W_Root): + TYPE = code.INT_TYP + def __init__(self, value): self.value = value @@ -37,6 +41,8 @@ code.BYTE_TYP) class W_StrObject(W_Root): + TYPE = code.STR_TYP + def __init__(self, value): self.value = value @@ -79,8 +85,6 @@ def entry_point(argv): bytecode = _read_bytecode_from_file(argv[1]) consts = _read_consts_from_file(argv[2]) - print(bytecode) - print(consts) pc = 0 end = len(bytecode) stack = Stack(16) diff --git a/rpython/jit/backend/llsupport/tl/stack.py b/rpython/jit/backend/llsupport/tl/stack.py --- a/rpython/jit/backend/llsupport/tl/stack.py +++ b/rpython/jit/backend/llsupport/tl/stack.py @@ -19,6 +19,13 @@ def size(self): return self.stackpos + def copy(self): + """ NOT_RPYTHON """ + copy = Stack(self.size()) + for item in self.stack: + copy.append(item) + return copy + def append(self, elem): while len(self.stack) <= self.stackpos: self.stack.append(None) @@ -72,3 +79,9 @@ n = self.stackpos - 1 assert n >= 0 self.stack[n] = elem + + def __repr__(self): + """ NOT_RPYTHON """ + entry_types = [e.TYPE for e in self.stack] + return "Stack(%s)" % ','.join(entry_types) + diff --git a/rpython/jit/backend/llsupport/tl/test/code_strategies.py b/rpython/jit/backend/llsupport/tl/test/code_strategies.py --- a/rpython/jit/backend/llsupport/tl/test/code_strategies.py +++ b/rpython/jit/backend/llsupport/tl/test/code_strategies.py @@ -28,29 +28,15 @@ STD_SPACE = interp.Space() -#@composite -#def runtime_stack(draw, clazz): -# strats = [get_strategy_for(t) for t in clazz._stack_types] -# stack_obj = stack.Stack(len(strats)) -# for i,strat in enumerate(strats): -# if clazz._stack_types[i] == IDX_TYP: -# # it is only valid to access a list with a valid index! -# w_list = stack_obj.peek(i-1) -# l = len(w_list.items) -# assume(l > 0) -# integrals = st.integers(min_value=0, max_value=l-1) -# stack_obj.append(STD_SPACE.wrap(draw(integrals))) -# continue -# stack_obj.append(STD_SPACE.wrap(draw(strat))) -# return stack_obj - @defines_strategy def stack_entry(types=all_types): - return st.sampled_from([get_strategy_for(t) for t in types]) + return st.one_of(*[get_strategy_for(t) for t in types]) @defines_strategy def runtime_stack(min_size=0, average_size=5, max_size=4096, types=all_types): + if max_size < average_size: + average_size = max_size // 2 stack_entries = st.lists(stack_entry(all_types), min_size, average_size, max_size) return stack_entries.map(lambda elems: \ @@ -67,30 +53,50 @@ return clazz return None +def find_next(stack, type, off=0): + i = off + while i < stack.size(): + if stack.peek(i).is_of_type(LIST_TYP): + break + i += 1 + else: + return None + return stack.peek(i) @defines_strategy def bytecode_class(stack): def filter_using_stack(bytecode_class): - required_types = bytecode_class.requires_stack - if len(required_types) < stack.size(): + required_types = bytecode_class._stack_types + if len(required_types) > stack.size(): return False - j = len(required_types)-1 - for i in range(stack.size()): + for i in range(len(required_types)): item = stack.peek(i) - if not item.is_of_type(required_types[j]): + j = len(required_types) - i - 1 + rt = required_types[j] + if not item.is_of_type(rt): return False - j -= 1 - if j < 0: - break + if code.op_modifies_list(bytecode_class): + w_list = find_next(stack, LIST_TYP) + if w_list is None or len(w_list.items) == 0: + # on an empty list we cannot insert or delete + return False return True - return st.sampled_from(byte_code_classes()).filter(filter_using_stack) + clazzes = filter(filter_using_stack, byte_code_classes()) + return st.sampled_from(clazzes) @composite def bytecode(draw, max_stack_size=4096): # get a stack that is the same for one test run - rs = runtime_stack(max_size=max_stack_size) - stack = draw(st.shared(rs, 'stack')) - clazz = draw(bytecode_class(stack)) + stack_strat = runtime_stack(max_size=max_stack_size) + run_stack = draw(st.shared(stack_strat, 'stack')) + + # get a byte code class + clazz = draw(bytecode_class(run_stack)) inst = clazz.create_from(draw, get_strategy_for) + assume(not inst.filter_bytecode(run_stack)) bytecode, consts = code.Context().transform([inst]) - return bytecode, consts, stack + + # propagate the changes to the stack + orig_stack = run_stack.copy() + interp.dispatch_once(STD_SPACE, 0, bytecode, consts, run_stack) + return inst, orig_stack diff --git a/rpython/jit/backend/llsupport/tl/test/test_tl_interp.py b/rpython/jit/backend/llsupport/tl/test/test_tl_interp.py --- a/rpython/jit/backend/llsupport/tl/test/test_tl_interp.py +++ b/rpython/jit/backend/llsupport/tl/test/test_tl_interp.py @@ -1,6 +1,6 @@ import py from hypothesis import given -from hypothesis.strategies import lists +from hypothesis.strategies import lists, data from rpython.jit.backend.llsupport.tl import code, interp from rpython.jit.backend.llsupport.tl.stack import Stack from rpython.jit.backend.llsupport.tl.test import code_strategies as st @@ -22,19 +22,88 @@ assert c.get_byte(4) == code.AddStr.BYTE_CODE assert c.get_short(3) == 1 +class TestCodeStrategies(object): + + DEFAULT_ACTION_CLASSES = (code.CreateList, code.PutInt, + code.LoadStr) + + @given(data()) + def test_bytecode_class_generation(self, data): + space = interp.Space() + stack = Stack(0) + for i in range(10): + clazz = data.draw(st.bytecode_class(stack)) + assert(clazz in self.DEFAULT_ACTION_CLASSES) + + @given(data()) + def test_bytecode_class_generation_int(self, data): + space = interp.Space() + stack = Stack(0) + stack.append(space.wrap(0)) + for i in range(10): + clazz = data.draw(st.bytecode_class(stack)) + assert(clazz in self.DEFAULT_ACTION_CLASSES) + stack.append(space.wrap(0)) + for i in range(10): + clazz = data.draw(st.bytecode_class(stack)) + assert(clazz in self.DEFAULT_ACTION_CLASSES + \ + (code.CompareInt,)) + + @given(data()) + def test_bytecode_class_generation_str(self, data): + space = interp.Space() + stack = Stack(0) + stack.append(space.wrap("hello")) + for i in range(10): + clazz = data.draw(st.bytecode_class(stack)) + assert(clazz in self.DEFAULT_ACTION_CLASSES) + stack.append(space.wrap("world")) + for i in range(10): + clazz = data.draw(st.bytecode_class(stack)) + assert(clazz in self.DEFAULT_ACTION_CLASSES + \ + (code.AddStr,)) + + @given(data()) + def test_bytecode_class_generation_list(self, data): + space = interp.Space() + stack = Stack(0) + stack.append(space.wrap([])) + stack.append(space.wrap(0)) + for i in range(10): + clazz = data.draw(st.bytecode_class(stack)) + assert(clazz not in (code.InsertList, code.DelList)) + stack.append(space.wrap([space.wrap(1)])) + stack.append(space.wrap(0)) + for i in range(10): + clazz = data.draw(st.bytecode_class(stack)) + assert(clazz in self.DEFAULT_ACTION_CLASSES + \ + (code.DelList, code.AppendList)) + stack.append(space.wrap("haskell")) + for i in range(10): + clazz = data.draw(st.bytecode_class(stack)) + assert(clazz in self.DEFAULT_ACTION_CLASSES + \ + (code.InsertList, code.AppendList)) + + class TestInterp(object): @given(st.bytecode()) def test_consume_stack(self, args): - bytecode, consts, stack = args + bc_obj, stack = args + bytecode, consts = code.Context().transform([bc_obj]) space = interp.Space() i = interp.dispatch_once(space, 0, bytecode, consts, stack) assert i == len(bytecode) clazz = st.get_byte_code_class(ord(bytecode[0])) - assert stack.size() == len(clazz._return_on_stack_types) + assert stack.size() >= len(clazz._return_on_stack_types) + for i,type in enumerate(clazz._return_on_stack_types): + j = len(clazz._return_on_stack_types) - i - 1 + assert stack.peek(j).is_of_type(type) - @given(lists(st.bytecode(max_stack_size=0))) - def test_execute_bytecode_block(self, args): - bytecode, consts, _ = args + @given(lists(st.bytecode(max_stack_size=0), min_size=1)) + def test_execute_bytecode_block(self, codes): + bc_obj_list = [bc for bc,stack in codes] + _, stack = codes[0] + bytecode, consts = code.Context().transform(bc_obj_list) space = interp.Space() stack = Stack(16) pc = 0 From pypy.commits at gmail.com Fri Mar 4 09:59:11 2016 From: pypy.commits at gmail.com (plan_rich) Date: Fri, 04 Mar 2016 06:59:11 -0800 (PST) Subject: [pypy-commit] pypy gcstress-hypothesis: block of bytecodes is now supported, it seems that some cases are generated that trash the constraint max_size of the initial stack is 0 Message-ID: <56d9a2bf.465ec20a.b4ac0.541d@mx.google.com> Author: Richard Plangger Branch: gcstress-hypothesis Changeset: r82770:4dbf54a3736b Date: 2016-03-04 15:58 +0100 http://bitbucket.org/pypy/pypy/changeset/4dbf54a3736b/ Log: block of bytecodes is now supported, it seems that some cases are generated that trash the constraint max_size of the initial stack is 0 diff --git a/rpython/jit/backend/llsupport/tl/code.py b/rpython/jit/backend/llsupport/tl/code.py --- a/rpython/jit/backend/llsupport/tl/code.py +++ b/rpython/jit/backend/llsupport/tl/code.py @@ -165,7 +165,8 @@ def filter_bytecode(self, stack): w_idx = stack.peek(1) w_list = stack.peek(2) - if w_idx.value >= len(w_list.items): + if w_idx.value >= len(w_list.items) or \ + w_idx.value < 0: return True return False @@ -178,7 +179,8 @@ def filter_bytecode(self, stack): w_idx = stack.peek(0) w_list = stack.peek(1) - if w_idx.value >= len(w_list.items): + if w_idx.value >= len(w_list.items) or \ + w_idx.value < 0: return True return False diff --git a/rpython/jit/backend/llsupport/tl/interp.py b/rpython/jit/backend/llsupport/tl/interp.py --- a/rpython/jit/backend/llsupport/tl/interp.py +++ b/rpython/jit/backend/llsupport/tl/interp.py @@ -17,6 +17,15 @@ assert isinstance(w_lst, W_ListObject) return space.wrap(self.items + w_lst.items) + def copy(self): + newlist = [] + for item in self.items: + if item is None: + newlist.append(None) + else: + newlist.append(item.copy()) + return W_ListObject(newlist) + def is_of_type(self, type): """ NOT_RPYTHON """ return type in (code.LIST_TYP,) @@ -31,6 +40,9 @@ assert isinstance(w_int, W_IntObject) return space.wrap(self.value - w_int.value) + def copy(self): + return W_IntObject(self.value) + def concat(self, space, w_obj): raise NotImplementedError("cannot concat int with object") @@ -50,6 +62,9 @@ assert isinstance(w_str, W_StrObject) return space.wrap(self.value + w_str.value) + def copy(self): + return W_StrObject(self.value) + def is_of_type(self, type): """ NOT_RPYTHON """ return type in (code.STR_TYP,) diff --git a/rpython/jit/backend/llsupport/tl/stack.py b/rpython/jit/backend/llsupport/tl/stack.py --- a/rpython/jit/backend/llsupport/tl/stack.py +++ b/rpython/jit/backend/llsupport/tl/stack.py @@ -7,6 +7,8 @@ def from_items(space, elems): s = Stack(len(elems)) for elem in elems: + if isinstance(elem, list): + elem = [space.wrap(e) for e in elem] s.append(space.wrap(elem)) return s @@ -19,11 +21,14 @@ def size(self): return self.stackpos - def copy(self): + def copy(self, values=False): """ NOT_RPYTHON """ copy = Stack(self.size()) for item in self.stack: - copy.append(item) + if values: + copy.append(item.copy()) + else: + copy.append(item) return copy def append(self, elem): @@ -80,6 +85,10 @@ assert n >= 0 self.stack[n] = elem + def reset(self): + self.stack = [None] * self.size() + self.stackpos = 0 + def __repr__(self): """ NOT_RPYTHON """ entry_types = [e.TYPE for e in self.stack] diff --git a/rpython/jit/backend/llsupport/tl/test/code_strategies.py b/rpython/jit/backend/llsupport/tl/test/code_strategies.py --- a/rpython/jit/backend/llsupport/tl/test/code_strategies.py +++ b/rpython/jit/backend/llsupport/tl/test/code_strategies.py @@ -20,9 +20,11 @@ elif typ == COND_TYP: return st.integers(min_value=0, max_value=4) elif typ == STR_TYP: - return st.text() + return st.text().filter(lambda x: x is not None) elif typ == LIST_TYP: - return st.lists(elements=st.one_of(st.integers())) # TODO must be recursive + # TODO recursive + result = st.lists(elements=st.one_of(get_strategy_for('i'))) + return result.filter(lambda x: x is not None) else: raise NotImplementedError("type: " + str(typ)) @@ -35,10 +37,11 @@ @defines_strategy def runtime_stack(min_size=0, average_size=5, max_size=4096, types=all_types): - if max_size < average_size: - average_size = max_size // 2 - stack_entries = st.lists(stack_entry(all_types), min_size, - average_size, max_size) + if max_size == 0: + return st.just(stack.Stack(0)) + stack_entries = st.lists(stack_entry(all_types), min_size=min_size, + average_size=average_size, + max_size=max_size) return stack_entries.map(lambda elems: \ stack.Stack.from_items(STD_SPACE, elems)) @@ -89,6 +92,9 @@ # get a stack that is the same for one test run stack_strat = runtime_stack(max_size=max_stack_size) run_stack = draw(st.shared(stack_strat, 'stack')) + # propagate the changes to the stack + orig_stack = run_stack.copy(values=True) + assert orig_stack is not run_stack # get a byte code class clazz = draw(bytecode_class(run_stack)) @@ -96,7 +102,5 @@ assume(not inst.filter_bytecode(run_stack)) bytecode, consts = code.Context().transform([inst]) - # propagate the changes to the stack - orig_stack = run_stack.copy() interp.dispatch_once(STD_SPACE, 0, bytecode, consts, run_stack) return inst, orig_stack diff --git a/rpython/jit/backend/llsupport/tl/test/test_tl_interp.py b/rpython/jit/backend/llsupport/tl/test/test_tl_interp.py --- a/rpython/jit/backend/llsupport/tl/test/test_tl_interp.py +++ b/rpython/jit/backend/llsupport/tl/test/test_tl_interp.py @@ -105,7 +105,6 @@ _, stack = codes[0] bytecode, consts = code.Context().transform(bc_obj_list) space = interp.Space() - stack = Stack(16) pc = 0 end = len(bytecode) while pc < end: diff --git a/rpython/jit/backend/llsupport/tl/test/zrpy_gc_hypo_test.py b/rpython/jit/backend/llsupport/tl/test/zrpy_gc_hypo_test.py --- a/rpython/jit/backend/llsupport/tl/test/zrpy_gc_hypo_test.py +++ b/rpython/jit/backend/llsupport/tl/test/zrpy_gc_hypo_test.py @@ -1,35 +1,33 @@ import py from hypothesis import given +from hypothesis.strategies import lists from rpython.tool.udir import udir from rpython.jit.metainterp.optimize import SpeculativeError from rpython.annotator.listdef import s_list_of_strings from rpython.translator.translator import TranslationContext from rpython.translator.c import genc -from rpython.jit.backend.llsupport.tl import interp +from rpython.jit.backend.llsupport.tl import interp, code from rpython.jit.backend.llsupport.tl.test import code_strategies as st def persist(type, contents): dir = udir.ensure(type) - print "written", type, "to", dir with open(dir.strpath, 'wb') as fd: fd.write(contents) return dir.strpath def persist_constants(consts): contents = "" - for string in consts: + for key, string in sorted(consts.items()): contents += string.replace("\n", "\\n") + "\n" - return persist('constants', contents) + return persist('constants', contents.encode('utf-8')) def persist_bytecode(bc): return persist('bytecode', bc) + class GCHypothesis(object): - builder = None - def setup_method(self, name): - if self.builder: - return + def setup_class(cls): t = TranslationContext() t.config.translation.gc = "incminimark" t.config.translation.gcremovetypeptr = True @@ -42,7 +40,7 @@ cbuilder.generate_source(defines=cbuilder.DEBUG_DEFINES) cbuilder.compile() # prevent from rebuilding the c object! - self.builder = cbuilder + cls.builder = cbuilder def execute(self, bytecode, consts): exe = self.builder.executable_name @@ -53,10 +51,25 @@ res = self.builder.translator.platform.execute(exe, args, env=env) return res.returncode, res.out, res.err - @given(st.bytecode_block()) + # cannot have a non empty stack, cannot pass stack to executable! + @given(st.bytecode(max_stack_size=0)) def test_execute_single_bytecode(self, program): - bytecode, consts = program + bc_obj, stack = program + assert stack.size() == 0 + bytecode, consts = code.Context().transform([bc_obj]) result, out, err = self.execute(bytecode, consts) if result != 0: raise Exception(("could not run program. returned %d" " stderr:\n%s\nstdout:\n%s\n") % (result, err, out)) + + # cannot have a non empty stack, cannot pass stack to executable! + @given(lists(st.bytecode(max_stack_size=0), min_size=1, average_size=24)) + def test_execute_bytecodes(self, args): + _, stack = args[0] + assert stack.size() == 0 + bc_objs = [bc for bc, _ in args] + bytecode, consts = code.Context().transform(bc_objs) + result, out, err = self.execute(bytecode, consts) + if result != 0: + raise Exception(("could not run program. returned %d" + " stderr:\n%s\nstdout:\n%s\n") % (result, err, out)) From pypy.commits at gmail.com Fri Mar 4 09:59:07 2016 From: pypy.commits at gmail.com (plan_rich) Date: Fri, 04 Mar 2016 06:59:07 -0800 (PST) Subject: [pypy-commit] pypy gcstress-hypothesis: creating a list of byte codes using hypothesis. the state along each instruction is passed using shared Message-ID: <56d9a2bb.500f1c0a.baefc.5100@mx.google.com> Author: Richard Plangger Branch: gcstress-hypothesis Changeset: r82768:5ae35a1cd368 Date: 2016-03-03 10:52 +0100 http://bitbucket.org/pypy/pypy/changeset/5ae35a1cd368/ Log: creating a list of byte codes using hypothesis. the state along each instruction is passed using shared diff --git a/.hgignore b/.hgignore --- a/.hgignore +++ b/.hgignore @@ -76,3 +76,4 @@ ^.git/ ^release/ ^rpython/_cache$ +^.hypothesis/ diff --git a/rpython/jit/backend/llsupport/tl/interp.py b/rpython/jit/backend/llsupport/tl/interp.py --- a/rpython/jit/backend/llsupport/tl/interp.py +++ b/rpython/jit/backend/llsupport/tl/interp.py @@ -15,6 +15,10 @@ assert isinstance(w_lst, W_ListObject) return space.wrap(self.items + w_lst.items) + def is_of_type(self, type): + """ NOT_RPYTHON """ + return type in (LIST_TYP,) + class W_IntObject(W_Root): def __init__(self, value): self.value = value @@ -26,6 +30,12 @@ def concat(self, space, w_obj): raise NotImplementedError("cannot concat int with object") + def is_of_type(self, type): + """ NOT_RPYTHON """ + return type in (code.INT_TYP,code.IDX_TYP, + code.COND_TYP,code.SHORT_TYP, + code.BYTE_TYP) + class W_StrObject(W_Root): def __init__(self, value): self.value = value @@ -34,6 +44,10 @@ assert isinstance(w_str, W_StrObject) return space.wrap(self.value + w_str.value) + def is_of_type(self, type): + """ NOT_RPYTHON """ + return type in (code.STR_TYP,) + class Space(object): @specialize.argtype(1) def wrap(self, val): diff --git a/rpython/jit/backend/llsupport/tl/stack.py b/rpython/jit/backend/llsupport/tl/stack.py --- a/rpython/jit/backend/llsupport/tl/stack.py +++ b/rpython/jit/backend/llsupport/tl/stack.py @@ -3,6 +3,14 @@ class Stack(object): _virtualizable_ = ['stackpos', 'stack[*]'] + @staticmethod + def from_items(space, elems): + s = Stack(len(elems)) + for elem in elems: + s.append(space.wrap(elem)) + return s + + def __init__(self, size): self = hint(self, access_directly=True, fresh_virtualizable=True) self.stack = [None] * size diff --git a/rpython/jit/backend/llsupport/tl/test/code_strategies.py b/rpython/jit/backend/llsupport/tl/test/code_strategies.py --- a/rpython/jit/backend/llsupport/tl/test/code_strategies.py +++ b/rpython/jit/backend/llsupport/tl/test/code_strategies.py @@ -28,21 +28,33 @@ STD_SPACE = interp.Space() - at composite -def runtime_stack(draw, clazz): - strats = [get_strategy_for(t) for t in clazz._stack_types] - stack_obj = stack.Stack(len(strats)) - for i,strat in enumerate(strats): - if clazz._stack_types[i] == IDX_TYP: - # it is only valid to access a list with a valid index! - w_list = stack_obj.peek(i-1) - l = len(w_list.items) - assume(l > 0) - integrals = st.integers(min_value=0, max_value=l-1) - stack_obj.append(STD_SPACE.wrap(draw(integrals))) - continue - stack_obj.append(STD_SPACE.wrap(draw(strat))) - return stack_obj +#@composite +#def runtime_stack(draw, clazz): +# strats = [get_strategy_for(t) for t in clazz._stack_types] +# stack_obj = stack.Stack(len(strats)) +# for i,strat in enumerate(strats): +# if clazz._stack_types[i] == IDX_TYP: +# # it is only valid to access a list with a valid index! +# w_list = stack_obj.peek(i-1) +# l = len(w_list.items) +# assume(l > 0) +# integrals = st.integers(min_value=0, max_value=l-1) +# stack_obj.append(STD_SPACE.wrap(draw(integrals))) +# continue +# stack_obj.append(STD_SPACE.wrap(draw(strat))) +# return stack_obj + + at defines_strategy +def stack_entry(types=all_types): + return st.sampled_from([get_strategy_for(t) for t in types]) + + at defines_strategy +def runtime_stack(min_size=0, average_size=5, max_size=4096, + types=all_types): + stack_entries = st.lists(stack_entry(all_types), min_size, + average_size, max_size) + return stack_entries.map(lambda elems: \ + stack.Stack.from_items(STD_SPACE, elems)) def byte_code_classes(): for name, clazz in code.__dict__.items(): @@ -55,21 +67,30 @@ return clazz return None + + at defines_strategy +def bytecode_class(stack): + def filter_using_stack(bytecode_class): + required_types = bytecode_class.requires_stack + if len(required_types) < stack.size(): + return False + j = len(required_types)-1 + for i in range(stack.size()): + item = stack.peek(i) + if not item.is_of_type(required_types[j]): + return False + j -= 1 + if j < 0: + break + return True + return st.sampled_from(byte_code_classes()).filter(filter_using_stack) + @composite -def single_bytecode(draw, - clazzes=st.sampled_from(byte_code_classes()), - integrals=st.integers(), texts=st.text()): - clazz = draw(clazzes) +def bytecode(draw, max_stack_size=4096): + # get a stack that is the same for one test run + rs = runtime_stack(max_size=max_stack_size) + stack = draw(st.shared(rs, 'stack')) + clazz = draw(bytecode_class(stack)) inst = clazz.create_from(draw, get_strategy_for) bytecode, consts = code.Context().transform([inst]) - _stack = draw(runtime_stack(clazz)) - return bytecode, consts, _stack - - at composite -def bytecode_block(draw, - clazzes=st.sampled_from(byte_code_classes()), - integrals=st.integers(), texts=st.text()): - clazz = draw(clazzes) - inst = clazz.create_from(draw, get_strategy_for) - bytecode, consts = code.Context().transform([inst]) - return bytecode, consts + return bytecode, consts, stack diff --git a/rpython/jit/backend/llsupport/tl/test/test_tl_interp.py b/rpython/jit/backend/llsupport/tl/test/test_tl_interp.py --- a/rpython/jit/backend/llsupport/tl/test/test_tl_interp.py +++ b/rpython/jit/backend/llsupport/tl/test/test_tl_interp.py @@ -1,5 +1,6 @@ import py from hypothesis import given +from hypothesis.strategies import lists from rpython.jit.backend.llsupport.tl import code, interp from rpython.jit.backend.llsupport.tl.stack import Stack from rpython.jit.backend.llsupport.tl.test import code_strategies as st @@ -22,7 +23,7 @@ assert c.get_short(3) == 1 class TestInterp(object): - @given(st.single_bytecode()) + @given(st.bytecode()) def test_consume_stack(self, args): bytecode, consts, stack = args space = interp.Space() @@ -31,9 +32,9 @@ clazz = st.get_byte_code_class(ord(bytecode[0])) assert stack.size() == len(clazz._return_on_stack_types) - @given(st.bytecode_block()) + @given(lists(st.bytecode(max_stack_size=0))) def test_execute_bytecode_block(self, args): - bytecode, consts = args + bytecode, consts, _ = args space = interp.Space() stack = Stack(16) pc = 0 From pypy.commits at gmail.com Fri Mar 4 11:07:27 2016 From: pypy.commits at gmail.com (fijal) Date: Fri, 04 Mar 2016 08:07:27 -0800 (PST) Subject: [pypy-commit] pypy jit-leaner-frontend: fix actual bugs, thank you hypothesis Message-ID: <56d9b2bf.06b01c0a.1c3eb.6bd8@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r82771:80df6e4dfd56 Date: 2016-03-04 18:06 +0200 http://bitbucket.org/pypy/pypy/changeset/80df6e4dfd56/ Log: fix actual bugs, thank you hypothesis diff --git a/rpython/jit/metainterp/opencoder.py b/rpython/jit/metainterp/opencoder.py --- a/rpython/jit/metainterp/opencoder.py +++ b/rpython/jit/metainterp/opencoder.py @@ -134,7 +134,7 @@ def _encode(self, box): if isinstance(box, Const): - if isinstance(box, ConstInt) and box.getint() < MAXINT: + if isinstance(box, ConstInt) and 0 <= box.getint() < MAXINT: return tag(TAGINT, box.getint()) else: self._consts.append(box) @@ -148,7 +148,7 @@ def _record_op(self, opnum, argboxes, descr=None): operations = self._ops - pos = len(operations) + pos = self._count operations.append(opnum) if oparity[opnum] == -1: operations.append(len(argboxes)) @@ -163,7 +163,7 @@ def _record_raw(self, opnum, tagged_args, tagged_descr=-1): operations = self._ops - pos = len(operations) + pos = self._count operations.append(opnum) if oparity[opnum] == -1: operations.append(len(tagged_args)) @@ -179,9 +179,9 @@ self._descrs.append(descr) return len(self._descrs) - 1 - def record_forwarding(self, op, newtag): - index = op._pos - self._ops[index] = -newtag - 1 +# def record_forwarding(self, op, newtag): +# index = op._pos +# self._ops[index] = -newtag - 1 def record_snapshot_link(self, pos): self._ops.append(-pos - 1) diff --git a/rpython/jit/metainterp/test/test_opencoder.py b/rpython/jit/metainterp/test/test_opencoder.py --- a/rpython/jit/metainterp/test/test_opencoder.py +++ b/rpython/jit/metainterp/test/test_opencoder.py @@ -118,7 +118,8 @@ inputargs, ops = lst t = Trace(inputargs) for op in ops: - t.record_op(op.getopnum(), op.getarglist()) + newop = t.record_op(op.getopnum(), op.getarglist()) + op.position = newop.position inpargs, l, iter = self.unpack(t) loop1 = TreeLoop("loop1") loop1.inputargs = inputargs @@ -126,4 +127,5 @@ loop2 = TreeLoop("loop2") loop2.inputargs = inpargs loop2.operations = l - BaseTest.assert_equal(loop1, loop2) \ No newline at end of file + BaseTest.assert_equal(loop1, loop2) + print "success" \ No newline at end of file From pypy.commits at gmail.com Fri Mar 4 11:47:07 2016 From: pypy.commits at gmail.com (fijal) Date: Fri, 04 Mar 2016 08:47:07 -0800 (PST) Subject: [pypy-commit] pypy jit-leaner-frontend: kill special casing of ESCAPE and FORCE_SPILL and make them normal classes, pass most of test_optimizebasic Message-ID: <56d9bc0b.88c8c20a.8eb98.779c@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r82772:1d9927b12845 Date: 2016-03-04 18:46 +0200 http://bitbucket.org/pypy/pypy/changeset/1d9927b12845/ Log: kill special casing of ESCAPE and FORCE_SPILL and make them normal classes, pass most of test_optimizebasic diff --git a/rpython/jit/metainterp/executor.py b/rpython/jit/metainterp/executor.py --- a/rpython/jit/metainterp/executor.py +++ b/rpython/jit/metainterp/executor.py @@ -385,6 +385,11 @@ rop.CALL_MALLOC_NURSERY_VARSIZE_FRAME, rop.NURSERY_PTR_INCREMENT, rop.LABEL, + rop.ESCAPE_I, + rop.ESCAPE_N, + rop.ESCAPE_R, + rop.ESCAPE_F, + rop.FORCE_SPILL, rop.SAVE_EXC_CLASS, rop.SAVE_EXCEPTION, rop.RESTORE_EXCEPTION, diff --git a/rpython/jit/metainterp/opencoder.py b/rpython/jit/metainterp/opencoder.py --- a/rpython/jit/metainterp/opencoder.py +++ b/rpython/jit/metainterp/opencoder.py @@ -134,7 +134,9 @@ def _encode(self, box): if isinstance(box, Const): - if isinstance(box, ConstInt) and 0 <= box.getint() < MAXINT: + if (isinstance(box, ConstInt) and + isinstance(box.getint(), int) and # symbolics + 0 <= box.getint() < MAXINT): return tag(TAGINT, box.getint()) else: self._consts.append(box) @@ -189,6 +191,7 @@ def record_op(self, opnum, argboxes, descr=None): # return an ResOperation instance, ideally die in hell pos = self._record_op(opnum, argboxes, descr) + assert opnum >= 0 return ResOperation(opnum, argboxes, pos, descr) def record_op_tag(self, opnum, tagged_args, descr=None): diff --git a/rpython/jit/metainterp/optimizeopt/rewrite.py b/rpython/jit/metainterp/optimizeopt/rewrite.py --- a/rpython/jit/metainterp/optimizeopt/rewrite.py +++ b/rpython/jit/metainterp/optimizeopt/rewrite.py @@ -56,13 +56,13 @@ arg0 = op.getarg(0) arg1 = op.getarg(1) if oldopnum != -1: - top = ResOperation(oldopnum, [arg0, arg1], None) + top = ResOperation(oldopnum, [arg0, arg1], -1) if self.try_boolinvers(op, top): return True oldopnum = op.boolreflex # FIXME: add INT_ADD, INT_MUL if oldopnum != -1: - top = ResOperation(oldopnum, [arg1, arg0], None) + top = ResOperation(oldopnum, [arg1, arg0], -1) oldop = self.get_pure_result(top) if oldop is not None: self.optimizer.make_equal_to(op, oldop) @@ -72,7 +72,7 @@ return False oldopnum = opclasses[op.boolreflex].boolinverse if oldopnum != -1: - top = ResOperation(oldopnum, [arg1, arg0], None) + top = ResOperation(oldopnum, [arg1, arg0], -1) if self.try_boolinvers(op, top): return True diff --git a/rpython/jit/metainterp/optimizeopt/test/test_util.py b/rpython/jit/metainterp/optimizeopt/test/test_util.py --- a/rpython/jit/metainterp/optimizeopt/test/test_util.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_util.py @@ -557,7 +557,7 @@ from rpython.jit.metainterp.opencoder import Trace trace = Trace(loop.inputargs) for op in loop.operations: - newop = trace.record_op(op.getopnum(), op.getarglist()) + newop = trace.record_op(op.getopnum(), op.getarglist(), op.getdescr()) if rop.is_guard(op.getopnum()): frame = FakeFrame(op.getfailargs()) resume.capture_resumedata([frame], None, [], trace) diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -81,7 +81,7 @@ return False -def ResOperation(opnum, args, position, descr=None): +def ResOperation(opnum, args, position=-1, descr=None): cls = opclasses[opnum] op = cls() op.initarglist(args) @@ -1153,8 +1153,6 @@ 'STRLEN/1/i', 'STRGETITEM/2/i', 'GETARRAYITEM_GC_PURE/2d/rfi', - #'GETFIELD_RAW_PURE/1d/rfi', these two operations not useful and - #'GETARRAYITEM_RAW_PURE/2d/fi', dangerous when unrolling speculatively 'UNICODELEN/1/i', 'UNICODEGETITEM/2/i', # @@ -1235,6 +1233,8 @@ 'LEAVE_PORTAL_FRAME/1/n', # debugging only 'JIT_DEBUG/*/n', # debugging only '_JIT_DEBUG_LAST', + 'ESCAPE/*/rfin', # tests only + 'FORCE_SPILL/1/n', # tests only 'VIRTUAL_REF_FINISH/2/n', # removed before it's passed to the backend 'COPYSTRCONTENT/5/n', # src, dst, srcstart, dststart, length 'COPYUNICODECONTENT/5/n', diff --git a/rpython/jit/tool/oparser.py b/rpython/jit/tool/oparser.py --- a/rpython/jit/tool/oparser.py +++ b/rpython/jit/tool/oparser.py @@ -15,67 +15,6 @@ class ParseError(Exception): pass -class ESCAPE_OP(N_aryOp, ResOpWithDescr): - - is_source_op = True - - def getopnum(self): - return self.OPNUM - - def getopname(self): - return 'escape' - - def copy_and_change(self, opnum, args=None, descr=None): - assert opnum == self.OPNUM - op = self.__class__() - if args is not None: - op.initarglist(args) - else: - op.initarglist(self._args[:]) - assert descr is None - return op - - -class ESCAPE_OP_I(ESCAPE_OP): - type = 'i' - OPNUM = -123 - -class ESCAPE_OP_F(ESCAPE_OP): - type = 'f' - OPNUM = -124 - -class ESCAPE_OP_N(ESCAPE_OP): - type = 'v' - OPNUM = -125 - -class ESCAPE_OP_R(ESCAPE_OP): - type = 'r' - OPNUM = -126 - -ALL_ESCAPE_OPS = { - ESCAPE_OP_I.OPNUM: ESCAPE_OP_I, - ESCAPE_OP_F.OPNUM: ESCAPE_OP_F, - ESCAPE_OP_N.OPNUM: ESCAPE_OP_N, - ESCAPE_OP_R.OPNUM: ESCAPE_OP_R -} - -class FORCE_SPILL(UnaryOp, PlainResOp): - - OPNUM = -127 - is_source_op = True - - def getopnum(self): - return self.OPNUM - - def getopname(self): - return 'force_spill' - - def copy_and_change(self, opnum, args=None, descr=None): - assert opnum == self.OPNUM - newop = FORCE_SPILL() - newop.initarglist(args or self.getarglist()) - return newop - def default_fail_descr(model, opnum, fail_args=None): if opnum == rop.FINISH: @@ -313,23 +252,12 @@ return opnum, args, descr, fail_args def create_op(self, opnum, args, res, descr, fail_args): - if opnum in ALL_ESCAPE_OPS: - op = ALL_ESCAPE_OPS[opnum]() - op.initarglist(args) - assert descr is None - return op - if opnum == FORCE_SPILL.OPNUM: - op = FORCE_SPILL() - op.initarglist(args) - assert descr is None - return op - else: - res = ResOperation(opnum, args, -1, descr) - if fail_args is not None: - res.setfailargs(fail_args) - if self._postproces: - self._postproces(res) - return res + res = ResOperation(opnum, args, -1, descr) + if fail_args is not None: + res.setfailargs(fail_args) + if self._postproces: + self._postproces(res) + return res def parse_result_op(self, line): res, op = line.split("=", 1) From pypy.commits at gmail.com Fri Mar 4 11:49:48 2016 From: pypy.commits at gmail.com (rlamy) Date: Fri, 04 Mar 2016 08:49:48 -0800 (PST) Subject: [pypy-commit] pypy rawrefcount-review: A branch to remove the conceptual dependency of rpython.rlib.rawrefcount on PyPy specifics Message-ID: <56d9bcac.463f1c0a.5acf6.76da@mx.google.com> Author: Ronan Lamy Branch: rawrefcount-review Changeset: r82773:aeab1067f34a Date: 2016-03-04 15:18 +0000 http://bitbucket.org/pypy/pypy/changeset/aeab1067f34a/ Log: A branch to remove the conceptual dependency of rpython.rlib.rawrefcount on PyPy specifics From pypy.commits at gmail.com Fri Mar 4 11:49:50 2016 From: pypy.commits at gmail.com (rlamy) Date: Fri, 04 Mar 2016 08:49:50 -0800 (PST) Subject: [pypy-commit] pypy rawrefcount-review: Rename some parameters for clarity Message-ID: <56d9bcae.657bc20a.9b8e8.78ee@mx.google.com> Author: Ronan Lamy Branch: rawrefcount-review Changeset: r82774:111c68c340a7 Date: 2016-03-04 16:48 +0000 http://bitbucket.org/pypy/pypy/changeset/111c68c340a7/ Log: Rename some parameters for clarity diff --git a/rpython/rlib/rawrefcount.py b/rpython/rlib/rawrefcount.py --- a/rpython/rlib/rawrefcount.py +++ b/rpython/rlib/rawrefcount.py @@ -6,7 +6,6 @@ # import sys, weakref from rpython.rtyper.lltypesystem import lltype, llmemory -from rpython.rlib.objectmodel import we_are_translated, specialize from rpython.rtyper.extregistry import ExtRegistryEntry from rpython.rlib import rgc @@ -36,27 +35,23 @@ _d_list = [] _dealloc_trigger_callback = dealloc_trigger_callback -def create_link_pypy(p, ob): +def create_link_pypy(gcobj, ob): "NOT_RPYTHON: a link where the PyPy object contains some or all the data" - #print 'create_link_pypy\n\t%s\n\t%s' % (p, ob) - assert p not in _pypy2ob - #assert not ob.c_ob_pypy_link - ob.c_ob_pypy_link = _build_pypy_link(p) - _pypy2ob[p] = ob + assert gcobj not in _pypy2ob + ob.c_ob_pypy_link = _build_pypy_link(gcobj) + _pypy2ob[gcobj] = ob _p_list.append(ob) -def create_link_pyobj(p, ob): +def create_link_pyobj(gcobj, ob): """NOT_RPYTHON: a link where the PyObject contains all the data. - from_obj() will not work on this 'p'.""" - #print 'create_link_pyobj\n\t%s\n\t%s' % (p, ob) - assert p not in _pypy2ob - #assert not ob.c_ob_pypy_link - ob.c_ob_pypy_link = _build_pypy_link(p) + from_obj() will not work on this 'gcobj'.""" + assert gcobj not in _pypy2ob + ob.c_ob_pypy_link = _build_pypy_link(gcobj) _o_list.append(ob) -def from_obj(OB_PTR_TYPE, p): +def from_obj(OB_PTR_TYPE, gcobj): "NOT_RPYTHON" - ob = _pypy2ob.get(p) + ob = _pypy2ob.get(gcobj) if ob is None: return lltype.nullptr(OB_PTR_TYPE.TO) assert lltype.typeOf(ob) == OB_PTR_TYPE @@ -67,9 +62,9 @@ link = ob.c_ob_pypy_link if link == 0: return None - p = _adr2pypy[link] - assert isinstance(p, Class) - return p + gcobj = _adr2pypy[link] + assert isinstance(gcobj, Class) + return gcobj def next_dead(OB_PTR_TYPE): if len(_d_list) == 0: @@ -163,24 +158,24 @@ # ____________________________________________________________ -def _unspec_p(hop, v_p): - assert isinstance(v_p.concretetype, lltype.Ptr) - assert v_p.concretetype.TO._gckind == 'gc' - return hop.genop('cast_opaque_ptr', [v_p], resulttype=llmemory.GCREF) +def _unspec_gc(hop, v_gcobj): + assert isinstance(v_gcobj.concretetype, lltype.Ptr) + assert v_gcobj.concretetype.TO._gckind == 'gc' + return hop.genop('cast_opaque_ptr', [v_gcobj], resulttype=llmemory.GCREF) def _unspec_ob(hop, v_ob): assert isinstance(v_ob.concretetype, lltype.Ptr) assert v_ob.concretetype.TO._gckind == 'raw' return hop.genop('cast_ptr_to_adr', [v_ob], resulttype=llmemory.Address) -def _spec_p(hop, v_p): - assert v_p.concretetype == llmemory.GCREF - return hop.genop('cast_opaque_ptr', [v_p], +def _spec_gc(hop, v_gcref): + assert v_gcref.concretetype == llmemory.GCREF + return hop.genop('cast_opaque_ptr', [v_gcref], resulttype=hop.r_result.lowleveltype) -def _spec_ob(hop, v_ob): - assert v_ob.concretetype == llmemory.Address - return hop.genop('cast_adr_to_ptr', [v_ob], +def _spec_ob(hop, v_adr): + assert v_adr.concretetype == llmemory.Address + return hop.genop('cast_adr_to_ptr', [v_adr], resulttype=hop.r_result.lowleveltype) @@ -200,7 +195,7 @@ class Entry(ExtRegistryEntry): _about_ = (create_link_pypy, create_link_pyobj) - def compute_result_annotation(self, s_p, s_ob): + def compute_result_annotation(self, s_gcobj, s_ob): pass def specialize_call(self, hop): @@ -208,28 +203,28 @@ name = 'gc_rawrefcount_create_link_pypy' elif self.instance is create_link_pyobj: name = 'gc_rawrefcount_create_link_pyobj' - v_p, v_ob = hop.inputargs(*hop.args_r) + v_gcobj, v_ob = hop.inputargs(*hop.args_r) hop.exception_cannot_occur() - hop.genop(name, [_unspec_p(hop, v_p), _unspec_ob(hop, v_ob)]) + hop.genop(name, [_unspec_gc(hop, v_gcobj), _unspec_ob(hop, v_ob)]) class Entry(ExtRegistryEntry): _about_ = from_obj - def compute_result_annotation(self, s_OB_PTR_TYPE, s_p): + def compute_result_annotation(self, s_OB_PTR_TYPE, s_gcobj): from rpython.annotator import model as annmodel from rpython.rtyper.llannotation import lltype_to_annotation - assert (isinstance(s_p, annmodel.SomeInstance) or - annmodel.s_None.contains(s_p)) + assert (isinstance(s_gcobj, annmodel.SomeInstance) or + annmodel.s_None.contains(s_gcobj)) assert s_OB_PTR_TYPE.is_constant() return lltype_to_annotation(s_OB_PTR_TYPE.const) def specialize_call(self, hop): hop.exception_cannot_occur() - v_p = hop.inputarg(hop.args_r[1], arg=1) - v_ob = hop.genop('gc_rawrefcount_from_obj', [_unspec_p(hop, v_p)], - resulttype = llmemory.Address) - return _spec_ob(hop, v_ob) + v_gcobj = hop.inputarg(hop.args_r[1], arg=1) + v_adr = hop.genop('gc_rawrefcount_from_obj', [_unspec_gc(hop, v_gcobj)], + resulttype=llmemory.Address) + return _spec_ob(hop, v_adr) class Entry(ExtRegistryEntry): _about_ = to_obj @@ -245,21 +240,20 @@ def specialize_call(self, hop): hop.exception_cannot_occur() v_ob = hop.inputarg(hop.args_r[1], arg=1) - v_p = hop.genop('gc_rawrefcount_to_obj', [_unspec_ob(hop, v_ob)], - resulttype = llmemory.GCREF) - return _spec_p(hop, v_p) + v_gcobj = hop.genop('gc_rawrefcount_to_obj', [_unspec_ob(hop, v_ob)], + resulttype=llmemory.GCREF) + return _spec_gc(hop, v_gcobj) class Entry(ExtRegistryEntry): _about_ = next_dead def compute_result_annotation(self, s_OB_PTR_TYPE): - from rpython.annotator import model as annmodel from rpython.rtyper.llannotation import lltype_to_annotation assert s_OB_PTR_TYPE.is_constant() return lltype_to_annotation(s_OB_PTR_TYPE.const) def specialize_call(self, hop): hop.exception_cannot_occur() - v_ob = hop.genop('gc_rawrefcount_next_dead', [], - resulttype = llmemory.Address) - return _spec_ob(hop, v_ob) + v_rawaddr = hop.genop('gc_rawrefcount_next_dead', [], + resulttype=llmemory.Address) + return _spec_ob(hop, v_rawaddr) From pypy.commits at gmail.com Fri Mar 4 12:00:01 2016 From: pypy.commits at gmail.com (rlamy) Date: Fri, 04 Mar 2016 09:00:01 -0800 (PST) Subject: [pypy-commit] pypy rawrefcount-review: Move rpython implementations next to their functions Message-ID: <56d9bf11.86b71c0a.491ac.7c02@mx.google.com> Author: Ronan Lamy Branch: rawrefcount-review Changeset: r82775:267edcb35bc5 Date: 2016-03-04 16:59 +0000 http://bitbucket.org/pypy/pypy/changeset/267edcb35bc5/ Log: Move rpython implementations next to their functions diff --git a/rpython/rlib/rawrefcount.py b/rpython/rlib/rawrefcount.py --- a/rpython/rlib/rawrefcount.py +++ b/rpython/rlib/rawrefcount.py @@ -35,6 +35,19 @@ _d_list = [] _dealloc_trigger_callback = dealloc_trigger_callback +class Entry(ExtRegistryEntry): + _about_ = init + + def compute_result_annotation(self, s_dealloc_callback): + from rpython.rtyper.llannotation import SomePtr + assert isinstance(s_dealloc_callback, SomePtr) # ll-ptr-to-function + + def specialize_call(self, hop): + hop.exception_cannot_occur() + [v_dealloc_callback] = hop.inputargs(hop.args_r[0]) + hop.genop('gc_rawrefcount_init', [v_dealloc_callback]) + + def create_link_pypy(gcobj, ob): "NOT_RPYTHON: a link where the PyPy object contains some or all the data" assert gcobj not in _pypy2ob @@ -49,6 +62,22 @@ ob.c_ob_pypy_link = _build_pypy_link(gcobj) _o_list.append(ob) +class Entry(ExtRegistryEntry): + _about_ = (create_link_pypy, create_link_pyobj) + + def compute_result_annotation(self, s_gcobj, s_ob): + pass + + def specialize_call(self, hop): + if self.instance is create_link_pypy: + name = 'gc_rawrefcount_create_link_pypy' + elif self.instance is create_link_pyobj: + name = 'gc_rawrefcount_create_link_pyobj' + v_gcobj, v_ob = hop.inputargs(*hop.args_r) + hop.exception_cannot_occur() + hop.genop(name, [_unspec_gc(hop, v_gcobj), _unspec_ob(hop, v_ob)]) + + def from_obj(OB_PTR_TYPE, gcobj): "NOT_RPYTHON" ob = _pypy2ob.get(gcobj) @@ -57,6 +86,25 @@ assert lltype.typeOf(ob) == OB_PTR_TYPE return ob +class Entry(ExtRegistryEntry): + _about_ = from_obj + + def compute_result_annotation(self, s_OB_PTR_TYPE, s_gcobj): + from rpython.annotator import model as annmodel + from rpython.rtyper.llannotation import lltype_to_annotation + assert (isinstance(s_gcobj, annmodel.SomeInstance) or + annmodel.s_None.contains(s_gcobj)) + assert s_OB_PTR_TYPE.is_constant() + return lltype_to_annotation(s_OB_PTR_TYPE.const) + + def specialize_call(self, hop): + hop.exception_cannot_occur() + v_gcobj = hop.inputarg(hop.args_r[1], arg=1) + v_adr = hop.genop('gc_rawrefcount_from_obj', [_unspec_gc(hop, v_gcobj)], + resulttype=llmemory.Address) + return _spec_ob(hop, v_adr) + + def to_obj(Class, ob): "NOT_RPYTHON" link = ob.c_ob_pypy_link @@ -66,6 +114,25 @@ assert isinstance(gcobj, Class) return gcobj +class Entry(ExtRegistryEntry): + _about_ = to_obj + + def compute_result_annotation(self, s_Class, s_ob): + from rpython.annotator import model as annmodel + from rpython.rtyper.llannotation import SomePtr + assert isinstance(s_ob, SomePtr) + assert s_Class.is_constant() + classdef = self.bookkeeper.getuniqueclassdef(s_Class.const) + return annmodel.SomeInstance(classdef, can_be_None=True) + + def specialize_call(self, hop): + hop.exception_cannot_occur() + v_ob = hop.inputarg(hop.args_r[1], arg=1) + v_gcobj = hop.genop('gc_rawrefcount_to_obj', [_unspec_ob(hop, v_ob)], + resulttype=llmemory.GCREF) + return _spec_gc(hop, v_gcobj) + + def next_dead(OB_PTR_TYPE): if len(_d_list) == 0: return lltype.nullptr(OB_PTR_TYPE.TO) @@ -73,6 +140,21 @@ assert lltype.typeOf(ob) == OB_PTR_TYPE return ob +class Entry(ExtRegistryEntry): + _about_ = next_dead + + def compute_result_annotation(self, s_OB_PTR_TYPE): + from rpython.rtyper.llannotation import lltype_to_annotation + assert s_OB_PTR_TYPE.is_constant() + return lltype_to_annotation(s_OB_PTR_TYPE.const) + + def specialize_call(self, hop): + hop.exception_cannot_occur() + v_rawaddr = hop.genop('gc_rawrefcount_next_dead', [], + resulttype=llmemory.Address) + return _spec_ob(hop, v_rawaddr) + + def _collect(track_allocation=True): """NOT_RPYTHON: for tests only. Emulates a GC collection. Will invoke dealloc_trigger_callback() once if there are objects @@ -177,83 +259,3 @@ assert v_adr.concretetype == llmemory.Address return hop.genop('cast_adr_to_ptr', [v_adr], resulttype=hop.r_result.lowleveltype) - - -class Entry(ExtRegistryEntry): - _about_ = init - - def compute_result_annotation(self, s_dealloc_callback): - from rpython.rtyper.llannotation import SomePtr - assert isinstance(s_dealloc_callback, SomePtr) # ll-ptr-to-function - - def specialize_call(self, hop): - hop.exception_cannot_occur() - [v_dealloc_callback] = hop.inputargs(hop.args_r[0]) - hop.genop('gc_rawrefcount_init', [v_dealloc_callback]) - - -class Entry(ExtRegistryEntry): - _about_ = (create_link_pypy, create_link_pyobj) - - def compute_result_annotation(self, s_gcobj, s_ob): - pass - - def specialize_call(self, hop): - if self.instance is create_link_pypy: - name = 'gc_rawrefcount_create_link_pypy' - elif self.instance is create_link_pyobj: - name = 'gc_rawrefcount_create_link_pyobj' - v_gcobj, v_ob = hop.inputargs(*hop.args_r) - hop.exception_cannot_occur() - hop.genop(name, [_unspec_gc(hop, v_gcobj), _unspec_ob(hop, v_ob)]) - - -class Entry(ExtRegistryEntry): - _about_ = from_obj - - def compute_result_annotation(self, s_OB_PTR_TYPE, s_gcobj): - from rpython.annotator import model as annmodel - from rpython.rtyper.llannotation import lltype_to_annotation - assert (isinstance(s_gcobj, annmodel.SomeInstance) or - annmodel.s_None.contains(s_gcobj)) - assert s_OB_PTR_TYPE.is_constant() - return lltype_to_annotation(s_OB_PTR_TYPE.const) - - def specialize_call(self, hop): - hop.exception_cannot_occur() - v_gcobj = hop.inputarg(hop.args_r[1], arg=1) - v_adr = hop.genop('gc_rawrefcount_from_obj', [_unspec_gc(hop, v_gcobj)], - resulttype=llmemory.Address) - return _spec_ob(hop, v_adr) - -class Entry(ExtRegistryEntry): - _about_ = to_obj - - def compute_result_annotation(self, s_Class, s_ob): - from rpython.annotator import model as annmodel - from rpython.rtyper.llannotation import SomePtr - assert isinstance(s_ob, SomePtr) - assert s_Class.is_constant() - classdef = self.bookkeeper.getuniqueclassdef(s_Class.const) - return annmodel.SomeInstance(classdef, can_be_None=True) - - def specialize_call(self, hop): - hop.exception_cannot_occur() - v_ob = hop.inputarg(hop.args_r[1], arg=1) - v_gcobj = hop.genop('gc_rawrefcount_to_obj', [_unspec_ob(hop, v_ob)], - resulttype=llmemory.GCREF) - return _spec_gc(hop, v_gcobj) - -class Entry(ExtRegistryEntry): - _about_ = next_dead - - def compute_result_annotation(self, s_OB_PTR_TYPE): - from rpython.rtyper.llannotation import lltype_to_annotation - assert s_OB_PTR_TYPE.is_constant() - return lltype_to_annotation(s_OB_PTR_TYPE.const) - - def specialize_call(self, hop): - hop.exception_cannot_occur() - v_rawaddr = hop.genop('gc_rawrefcount_next_dead', [], - resulttype=llmemory.Address) - return _spec_ob(hop, v_rawaddr) From pypy.commits at gmail.com Fri Mar 4 12:03:49 2016 From: pypy.commits at gmail.com (fijal) Date: Fri, 04 Mar 2016 09:03:49 -0800 (PST) Subject: [pypy-commit] pypy jit-leaner-frontend: enough to start passing some frontend tests Message-ID: <56d9bff5.a3f6c20a.82b86.ffff823d@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r82776:cd6efbc4d6b1 Date: 2016-03-04 19:03 +0200 http://bitbucket.org/pypy/pypy/changeset/cd6efbc4d6b1/ Log: enough to start passing some frontend tests diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -1037,20 +1037,20 @@ else: inline_short_preamble = True inputargs = metainterp.history.inputargs[:] - operations = metainterp.history.operations + trace = metainterp.history.trace label = ResOperation(rop.LABEL, inputargs) jitdriver_sd = metainterp.jitdriver_sd enable_opts = jitdriver_sd.warmstate.enable_opts call_pure_results = metainterp.call_pure_results - if operations[-1].getopnum() == rop.JUMP: + if metainterp.history.ends_with_jump: data = BridgeCompileData(label, operations[:], call_pure_results=call_pure_results, enable_opts=enable_opts, inline_short_preamble=inline_short_preamble) else: - data = SimpleCompileData(label, operations[:], + data = SimpleCompileData(label, trace, call_pure_results=call_pure_results, enable_opts=enable_opts) try: diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -641,8 +641,9 @@ class History(object): + ends_with_jump = False + def __init__(self): - self.inputargs = None self.descr_cache = {} self.descrs = {} self.consts = [] @@ -651,6 +652,7 @@ from rpython.jit.metainterp.opencoder import Trace self.trace = Trace(inpargs) + self.inputargs = inpargs def any_operation(self): return self.trace._count > 0 diff --git a/rpython/jit/metainterp/optimizeopt/__init__.py b/rpython/jit/metainterp/optimizeopt/__init__.py --- a/rpython/jit/metainterp/optimizeopt/__init__.py +++ b/rpython/jit/metainterp/optimizeopt/__init__.py @@ -53,7 +53,7 @@ """ debug_start("jit-optimize") try: - metainterp_sd.logger_noopt.log_loop(compile_data.trace, memo=memo) + #metainterp_sd.logger_noopt.log_loop(compile_data.trace, memo=memo) if memo is None: memo = {} compile_data.box_names_memo = memo diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -2608,6 +2608,7 @@ self.history.record(rop.JUMP, live_arg_boxes[num_green_args:], None, descr=target_jitcell_token) + self.history.ends_with_jump = True try: target_token = compile.compile_trace(self, self.resumekey) finally: From pypy.commits at gmail.com Fri Mar 4 12:23:55 2016 From: pypy.commits at gmail.com (cfbolz) Date: Fri, 04 Mar 2016 09:23:55 -0800 (PST) Subject: [pypy-commit] pypy default: don't go via the less efficient BufMatchContext if the string is simply a str Message-ID: <56d9c4ab.d3921c0a.eb707.00d2@mx.google.com> Author: Carl Friedrich Bolz Branch: Changeset: r82777:2fa325cdc340 Date: 2016-03-03 21:32 +0100 http://bitbucket.org/pypy/pypy/changeset/2fa325cdc340/ Log: don't go via the less efficient BufMatchContext if the string is simply a str diff --git a/pypy/module/_sre/interp_sre.py b/pypy/module/_sre/interp_sre.py --- a/pypy/module/_sre/interp_sre.py +++ b/pypy/module/_sre/interp_sre.py @@ -36,6 +36,8 @@ if 0 <= start <= end: if isinstance(ctx, rsre_core.BufMatchContext): return space.wrap(ctx._buffer.getslice(start, end, 1, end-start)) + if isinstance(ctx, rsre_core.StrMatchContext): + return space.wrap(ctx._string[start:end]) elif isinstance(ctx, rsre_core.UnicodeMatchContext): return space.wrap(ctx._unicodestr[start:end]) else: @@ -98,8 +100,8 @@ space.wrap("cannot copy this pattern object")) def make_ctx(self, w_string, pos=0, endpos=sys.maxint): - """Make a BufMatchContext or a UnicodeMatchContext for searching - in the given w_string object.""" + """Make a StrMatchContext, BufMatchContext or a UnicodeMatchContext for + searching in the given w_string object.""" space = self.space if pos < 0: pos = 0 @@ -113,6 +115,14 @@ endpos = len(unicodestr) return rsre_core.UnicodeMatchContext(self.code, unicodestr, pos, endpos, self.flags) + elif space.isinstance_w(w_string, space.w_str): + str = space.str_w(w_string) + if pos > len(str): + pos = len(str) + if endpos > len(str): + endpos = len(str) + return rsre_core.StrMatchContext(self.code, str, + pos, endpos, self.flags) else: buf = space.readbuf_w(w_string) size = buf.getlength() @@ -482,6 +492,8 @@ ctx = self.ctx if isinstance(ctx, rsre_core.BufMatchContext): return space.wrap(ctx._buffer.as_str()) + elif isinstance(ctx, rsre_core.StrMatchContext): + return space.wrap(ctx._string) elif isinstance(ctx, rsre_core.UnicodeMatchContext): return space.wrap(ctx._unicodestr) else: From pypy.commits at gmail.com Fri Mar 4 12:23:57 2016 From: pypy.commits at gmail.com (cfbolz) Date: Fri, 04 Mar 2016 09:23:57 -0800 (PST) Subject: [pypy-commit] pypy default: a somewhat messy improvement of re.sub. makes simple things 3x faster (not Message-ID: <56d9c4ad.e6ebc20a.51bce.ffff8a38@mx.google.com> Author: Carl Friedrich Bolz Branch: Changeset: r82778:a4e20be9d315 Date: 2016-03-04 10:27 +0100 http://bitbucket.org/pypy/pypy/changeset/a4e20be9d315/ Log: a somewhat messy improvement of re.sub. makes simple things 3x faster (not quite beating CPython) diff --git a/pypy/module/_sre/interp_sre.py b/pypy/module/_sre/interp_sre.py --- a/pypy/module/_sre/interp_sre.py +++ b/pypy/module/_sre/interp_sre.py @@ -7,6 +7,7 @@ from pypy.interpreter.error import OperationError from rpython.rlib.rarithmetic import intmask from rpython.rlib import jit +from rpython.rlib.rstring import StringBuilder, UnicodeBuilder # ____________________________________________________________ # @@ -226,6 +227,11 @@ def subx(self, w_ptemplate, w_string, count): space = self.space + # use a (much faster) string/unicode builder if w_ptemplate and + # w_string are both string or both unicode objects, and if w_ptemplate + # is a literal + use_builder = False + filter_as_unicode = filter_as_string = None if space.is_true(space.callable(w_ptemplate)): w_filter = w_ptemplate filter_is_callable = True @@ -233,6 +239,8 @@ if space.isinstance_w(w_ptemplate, space.w_unicode): filter_as_unicode = space.unicode_w(w_ptemplate) literal = u'\\' not in filter_as_unicode + use_builder = ( + space.isinstance_w(w_string, space.w_unicode) and literal) else: try: filter_as_string = space.str_w(w_ptemplate) @@ -242,6 +250,8 @@ literal = False else: literal = '\\' not in filter_as_string + use_builder = ( + space.isinstance_w(w_string, space.w_str) and literal) if literal: w_filter = w_ptemplate filter_is_callable = False @@ -252,19 +262,28 @@ space.wrap(self), w_ptemplate) filter_is_callable = space.is_true(space.callable(w_filter)) # + # XXX this is a bit of a mess, but it improves performance a lot ctx = self.make_ctx(w_string) - sublist_w = [] + sublist_w = strbuilder = unicodebuilder = None + if use_builder: + if filter_as_unicode is not None: + unicodebuilder = UnicodeBuilder(ctx.end) + else: + assert filter_as_string is not None + strbuilder = StringBuilder(ctx.end) + else: + sublist_w = [] n = last_pos = 0 while not count or n < count: if not searchcontext(space, ctx): break if last_pos < ctx.match_start: - sublist_w.append(slice_w(space, ctx, last_pos, - ctx.match_start, space.w_None)) + _sub_append_slice( + ctx, space, use_builder, sublist_w, + strbuilder, unicodebuilder, last_pos, ctx.match_start) start = ctx.match_end if start == ctx.match_start: start += 1 - nextctx = ctx.fresh_copy(start) if not (last_pos == ctx.match_start == ctx.match_end and n > 0): # the above ignores empty matches on latest position @@ -272,27 +291,60 @@ w_match = self.getmatch(ctx, True) w_piece = space.call_function(w_filter, w_match) if not space.is_w(w_piece, space.w_None): + assert strbuilder is None and unicodebuilder is None + assert not use_builder sublist_w.append(w_piece) else: - sublist_w.append(w_filter) + if use_builder: + if strbuilder is not None: + assert filter_as_string is not None + strbuilder.append(filter_as_string) + else: + assert unicodebuilder is not None + assert filter_as_unicode is not None + unicodebuilder.append(filter_as_unicode) + else: + sublist_w.append(w_filter) last_pos = ctx.match_end n += 1 elif last_pos >= ctx.end: break # empty match at the end: finished - ctx = nextctx + ctx.reset(start) if last_pos < ctx.end: - sublist_w.append(slice_w(space, ctx, last_pos, ctx.end, - space.w_None)) + _sub_append_slice(ctx, space, use_builder, sublist_w, + strbuilder, unicodebuilder, last_pos, ctx.end) + if use_builder: + if strbuilder is not None: + return space.wrap(strbuilder.build()), n + else: + assert unicodebuilder is not None + return space.wrap(unicodebuilder.build()), n + else: + if space.isinstance_w(w_string, space.w_unicode): + w_emptystr = space.wrap(u'') + else: + w_emptystr = space.wrap('') + w_item = space.call_method(w_emptystr, 'join', + space.newlist(sublist_w)) + return w_item, n - if space.isinstance_w(w_string, space.w_unicode): - w_emptystr = space.wrap(u'') - else: - w_emptystr = space.wrap('') - w_item = space.call_method(w_emptystr, 'join', - space.newlist(sublist_w)) - return w_item, n +def _sub_append_slice(ctx, space, use_builder, sublist_w, + strbuilder, unicodebuilder, start, end): + if use_builder: + if isinstance(ctx, rsre_core.BufMatchContext): + assert strbuilder is not None + return strbuilder.append(ctx._buffer.getslice(start, end, 1, end-start)) + if isinstance(ctx, rsre_core.StrMatchContext): + assert strbuilder is not None + return strbuilder.append_slice(ctx._string, start, end) + elif isinstance(ctx, rsre_core.UnicodeMatchContext): + assert unicodebuilder is not None + return unicodebuilder.append_slice(ctx._unicodestr, start, end) + assert 0, "unreachable" + else: + sublist_w.append(slice_w(space, ctx, start, end, space.w_None)) @unwrap_spec(flags=int, groups=int, w_groupindex=WrappedDefault(None), w_indexgroup=WrappedDefault(None)) From pypy.commits at gmail.com Fri Mar 4 12:23:59 2016 From: pypy.commits at gmail.com (cfbolz) Date: Fri, 04 Mar 2016 09:23:59 -0800 (PST) Subject: [pypy-commit] pypy default: add a jit driver (unfortunately I can't use autoreds because of the green field Message-ID: <56d9c4af.e6ebc20a.51bce.ffff8a3c@mx.google.com> Author: Carl Friedrich Bolz Branch: Changeset: r82779:b068f288af89 Date: 2016-03-04 15:43 +0100 http://bitbucket.org/pypy/pypy/changeset/b068f288af89/ Log: add a jit driver (unfortunately I can't use autoreds because of the green field usage :-( ) diff --git a/pypy/module/_sre/interp_sre.py b/pypy/module/_sre/interp_sre.py --- a/pypy/module/_sre/interp_sre.py +++ b/pypy/module/_sre/interp_sre.py @@ -275,6 +275,22 @@ sublist_w = [] n = last_pos = 0 while not count or n < count: + sub_jitdriver.jit_merge_point( + self=self, + use_builder=use_builder, + filter_is_callable=filter_is_callable, + filter_type=type(w_filter), + ctx=ctx, + w_filter=w_filter, + strbuilder=strbuilder, + unicodebuilder=unicodebuilder, + filter_as_string=filter_as_string, + filter_as_unicode=filter_as_unicode, + count=count, + w_string=w_string, + n=n, last_pos=last_pos, sublist_w=sublist_w + ) + space = self.space if not searchcontext(space, ctx): break if last_pos < ctx.match_start: @@ -329,6 +345,16 @@ space.newlist(sublist_w)) return w_item, n +sub_jitdriver = jit.JitDriver( + reds="""count n last_pos + ctx w_filter + strbuilder unicodebuilder + filter_as_string + filter_as_unicode + w_string sublist_w + self""".split(), + greens=["filter_is_callable", "use_builder", "filter_type", "ctx.pattern"]) + def _sub_append_slice(ctx, space, use_builder, sublist_w, strbuilder, unicodebuilder, start, end): From pypy.commits at gmail.com Fri Mar 4 14:15:46 2016 From: pypy.commits at gmail.com (rlamy) Date: Fri, 04 Mar 2016 11:15:46 -0800 (PST) Subject: [pypy-commit] pypy rawrefcount-review: Factor out some repeated code Message-ID: <56d9dee2.c96cc20a.89e0a.ffffb0ed@mx.google.com> Author: Ronan Lamy Branch: rawrefcount-review Changeset: r82780:0d0ac385be9c Date: 2016-03-04 19:14 +0000 http://bitbucket.org/pypy/pypy/changeset/0d0ac385be9c/ Log: Factor out some repeated code diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py --- a/rpython/memory/gc/incminimark.py +++ b/rpython/memory/gc/incminimark.py @@ -285,7 +285,7 @@ assert small_request_threshold % WORD == 0 self.read_from_env = read_from_env self.nursery_size = nursery_size - + self.small_request_threshold = small_request_threshold self.major_collection_threshold = major_collection_threshold self.growth_rate_max = growth_rate_max @@ -729,7 +729,7 @@ # nursery. "Next area" in this case is the space between the # pinned object in front of nusery_top and the pinned object # after that. Graphically explained: - # + # # |- allocating totalsize failed in this area # | |- nursery_top # | | |- pinned object in front of nursery_top, @@ -774,7 +774,7 @@ # true after that. In that case we do a second step. # The goal is to avoid too high memory peaks if the # program allocates a lot of surviving objects. - # + # if (self.gc_state != STATE_SCANNING or self.threshold_reached()): @@ -2767,6 +2767,14 @@ def _pyobj(self, pyobjaddr): return llmemory.cast_adr_to_ptr(pyobjaddr, self.PYOBJ_HDR_PTR) + def _rrc_set_gc_partner(self, adr_rawobj, adr_gcobj): + int_gcobj = llmemory.cast_adr_to_int(adr_gcobj, "symbolic") + self._pyobj(adr_rawobj).ob_pypy_link = int_gcobj + + def _rrc_get_gc_partner(self, adr_rawobj): + int_gcobj = self._pyobj(adr_rawobj).ob_pypy_link + return llmemory.cast_int_to_adr(int_gcobj) + def rawrefcount_init(self, dealloc_trigger_callback): # see pypy/doc/discussion/rawrefcount.rst if not self.rrc_enabled: @@ -2797,8 +2805,7 @@ def rawrefcount_create_link_pypy(self, gcobj, pyobject): ll_assert(self.rrc_enabled, "rawrefcount.init not called") obj = llmemory.cast_ptr_to_adr(gcobj) - objint = llmemory.cast_adr_to_int(obj, "symbolic") - self._pyobj(pyobject).ob_pypy_link = objint + self._rrc_set_gc_partner(pyobject, obj) # lst = self.rrc_p_list_young if self.is_in_nursery(obj): @@ -2813,12 +2820,11 @@ def rawrefcount_create_link_pyobj(self, gcobj, pyobject): ll_assert(self.rrc_enabled, "rawrefcount.init not called") obj = llmemory.cast_ptr_to_adr(gcobj) + self._rrc_set_gc_partner(pyobject, obj) if self.is_young_object(obj): self.rrc_o_list_young.append(pyobject) else: self.rrc_o_list_old.append(pyobject) - objint = llmemory.cast_adr_to_int(obj, "symbolic") - self._pyobj(pyobject).ob_pypy_link = objint # there is no rrc_o_dict def rawrefcount_from_obj(self, gcobj): @@ -2830,7 +2836,7 @@ return dct.get(obj) def rawrefcount_to_obj(self, pyobject): - obj = llmemory.cast_int_to_adr(self._pyobj(pyobject).ob_pypy_link) + obj = self._rrc_get_gc_partner(pyobject) return llmemory.cast_adr_to_ptr(obj, llmemory.GCREF) def rawrefcount_next_dead(self): @@ -2859,8 +2865,7 @@ pass # the corresponding object may die else: # force the corresponding object to be alive - intobj = self._pyobj(pyobject).ob_pypy_link - singleaddr.address[0] = llmemory.cast_int_to_adr(intobj) + singleaddr.address[0] = self._rrc_get_gc_partner(pyobject) self._trace_drag_out(singleaddr, llmemory.NULL) def rrc_minor_collection_free(self): @@ -2876,14 +2881,12 @@ no_o_dict) def _rrc_minor_free(self, pyobject, surviving_list, surviving_dict): - intobj = self._pyobj(pyobject).ob_pypy_link - obj = llmemory.cast_int_to_adr(intobj) + obj = self._rrc_get_gc_partner(pyobject) if self.is_in_nursery(obj): if self.is_forwarded(obj): # Common case: survives and moves obj = self.get_forwarding_address(obj) - intobj = llmemory.cast_adr_to_int(obj, "symbolic") - self._pyobj(pyobject).ob_pypy_link = intobj + self._rrc_set_gc_partner(pyobject, obj) surviving = True if surviving_dict: # Surviving nursery object: was originally in @@ -2947,8 +2950,7 @@ pass # the corresponding object may die else: # force the corresponding object to be alive - intobj = self._pyobj(pyobject).ob_pypy_link - obj = llmemory.cast_int_to_adr(intobj) + obj = self._rrc_get_gc_partner(pyobject) self.objects_to_trace.append(obj) self.visit_all_objects() @@ -2977,8 +2979,7 @@ # This is true if the obj has one of the following two flags: # * GCFLAG_VISITED: was seen during tracing # * GCFLAG_NO_HEAP_PTRS: immortal object never traced (so far) - intobj = self._pyobj(pyobject).ob_pypy_link - obj = llmemory.cast_int_to_adr(intobj) + obj = self._rrc_get_gc_partner(pyobject) if self.header(obj).tid & (GCFLAG_VISITED | GCFLAG_NO_HEAP_PTRS): surviving_list.append(pyobject) if surviving_dict: From pypy.commits at gmail.com Fri Mar 4 15:58:28 2016 From: pypy.commits at gmail.com (rlamy) Date: Fri, 04 Mar 2016 12:58:28 -0800 (PST) Subject: [pypy-commit] pypy rawrefcount-review: Use @py.test.mark.parametrize Message-ID: <56d9f6f4.85371c0a.c5e82.4caf@mx.google.com> Author: Ronan Lamy Branch: rawrefcount-review Changeset: r82783:73435fbb4416 Date: 2016-03-04 20:57 +0000 http://bitbucket.org/pypy/pypy/changeset/73435fbb4416/ Log: Use @py.test.mark.parametrize diff --git a/rpython/memory/gc/test/test_rawrefcount.py b/rpython/memory/gc/test/test_rawrefcount.py --- a/rpython/memory/gc/test/test_rawrefcount.py +++ b/rpython/memory/gc/test/test_rawrefcount.py @@ -74,7 +74,8 @@ return p1 return p1, p1ref, r1, r1addr, check_alive - def test_rawrefcount_objects_basic(self, old=False): + @py.test.mark.parametrize('old', [True, False]) + def test_rawrefcount_objects_basic(self, old): p1, p1ref, r1, r1addr, check_alive = ( self._rawrefcount_pair(42, is_light=True, create_old=old)) p2 = self.malloc(S) @@ -95,7 +96,8 @@ lltype.free(r1, flavor='raw') lltype.free(r2, flavor='raw') - def test_rawrefcount_objects_collection_survives_from_raw(self, old=False): + @py.test.mark.parametrize('old', [True, False]) + def test_rawrefcount_objects_collection_survives_from_raw(self, old): p1, p1ref, r1, r1addr, check_alive = ( self._rawrefcount_pair(42, is_light=True, create_old=old)) check_alive(0) @@ -114,7 +116,8 @@ assert self.trigger == [] assert self.gc.rawrefcount_next_dead() == llmemory.NULL - def test_rawrefcount_dies_quickly(self, old=False): + @py.test.mark.parametrize('old', [True, False]) + def test_rawrefcount_dies_quickly(self, old): p1, p1ref, r1, r1addr, check_alive = ( self._rawrefcount_pair(42, is_light=True, create_old=old)) check_alive(0) @@ -126,7 +129,8 @@ py.test.raises(RuntimeError, "p1.x") # dead self.gc.check_no_more_rawrefcount_state() - def test_rawrefcount_objects_collection_survives_from_obj(self, old=False): + @py.test.mark.parametrize('old', [True, False]) + def test_rawrefcount_objects_collection_survives_from_obj(self, old): p1, p1ref, r1, r1addr, check_alive = ( self._rawrefcount_pair(42, is_light=True, create_old=old)) check_alive(0) @@ -144,16 +148,8 @@ py.test.raises(RuntimeError, "p1.x") # dead self.gc.check_no_more_rawrefcount_state() - def test_rawrefcount_objects_basic_old(self): - self.test_rawrefcount_objects_basic(old=True) - def test_rawrefcount_objects_collection_survives_from_raw_old(self): - self.test_rawrefcount_objects_collection_survives_from_raw(old=True) - def test_rawrefcount_dies_quickly_old(self): - self.test_rawrefcount_dies_quickly(old=True) - def test_rawrefcount_objects_collection_survives_from_obj_old(self): - self.test_rawrefcount_objects_collection_survives_from_obj(old=True) - - def test_pypy_nonlight_survives_from_raw(self, old=False): + @py.test.mark.parametrize('old', [True, False]) + def test_pypy_nonlight_survives_from_raw(self, old): p1, p1ref, r1, r1addr, check_alive = ( self._rawrefcount_pair(42, is_light=False, create_old=old)) check_alive(0) @@ -175,7 +171,8 @@ self.gc.check_no_more_rawrefcount_state() lltype.free(r1, flavor='raw') - def test_pypy_nonlight_survives_from_obj(self, old=False): + @py.test.mark.parametrize('old', [True, False]) + def test_pypy_nonlight_survives_from_obj(self, old): p1, p1ref, r1, r1addr, check_alive = ( self._rawrefcount_pair(42, is_light=False, create_old=old)) check_alive(0) @@ -196,7 +193,8 @@ self.gc.check_no_more_rawrefcount_state() lltype.free(r1, flavor='raw') - def test_pypy_nonlight_dies_quickly(self, old=False): + @py.test.mark.parametrize('old', [True, False]) + def test_pypy_nonlight_dies_quickly(self, old): p1, p1ref, r1, r1addr, check_alive = ( self._rawrefcount_pair(42, is_light=False, create_old=old)) check_alive(0) @@ -213,13 +211,6 @@ self.gc.check_no_more_rawrefcount_state() lltype.free(r1, flavor='raw') - def test_pypy_nonlight_survives_from_raw_old(self): - self.test_pypy_nonlight_survives_from_raw(old=True) - def test_pypy_nonlight_survives_from_obj_old(self): - self.test_pypy_nonlight_survives_from_obj(old=True) - def test_pypy_nonlight_dies_quickly_old(self): - self.test_pypy_nonlight_dies_quickly(old=True) - def test_pyobject_pypy_link_dies_on_minor_collection(self): p1, p1ref, r1, r1addr, check_alive = ( self._rawrefcount_pair(42, is_pyobj=True)) @@ -231,7 +222,8 @@ self.gc.check_no_more_rawrefcount_state() lltype.free(r1, flavor='raw') - def test_pyobject_dies(self, old=False): + @py.test.mark.parametrize('old', [True, False]) + def test_pyobject_dies(self, old): p1, p1ref, r1, r1addr, check_alive = ( self._rawrefcount_pair(42, is_pyobj=True, create_old=old)) check_alive(0) @@ -247,7 +239,8 @@ self.gc.check_no_more_rawrefcount_state() lltype.free(r1, flavor='raw') - def test_pyobject_survives_from_obj(self, old=False): + @py.test.mark.parametrize('old', [True, False]) + def test_pyobject_survives_from_obj(self, old): p1, p1ref, r1, r1addr, check_alive = ( self._rawrefcount_pair(42, is_pyobj=True, create_old=old)) check_alive(0) @@ -269,11 +262,6 @@ self.gc.check_no_more_rawrefcount_state() lltype.free(r1, flavor='raw') - def test_pyobject_dies_old(self): - self.test_pyobject_dies(old=True) - def test_pyobject_survives_from_obj_old(self): - self.test_pyobject_survives_from_obj(old=True) - def test_pyobject_attached_to_prebuilt_obj(self): p1, p1ref, r1, r1addr, check_alive = ( self._rawrefcount_pair(42, create_immortal=True)) From pypy.commits at gmail.com Sat Mar 5 07:40:46 2016 From: pypy.commits at gmail.com (cfbolz) Date: Sat, 05 Mar 2016 04:40:46 -0800 (PST) Subject: [pypy-commit] pypy global-cell-cache: another one Message-ID: <56dad3ce.6672c20a.68db4.ffffb469@mx.google.com> Author: Carl Friedrich Bolz Branch: global-cell-cache Changeset: r82784:b51b594a5d13 Date: 2016-03-05 13:32 +0100 http://bitbucket.org/pypy/pypy/changeset/b51b594a5d13/ Log: another one diff --git a/pypy/objspace/std/celldict.py b/pypy/objspace/std/celldict.py --- a/pypy/objspace/std/celldict.py +++ b/pypy/objspace/std/celldict.py @@ -244,7 +244,7 @@ def STORE_GLOBAL_celldict(space, frame, nameindex, w_value): pycode = frame.getcode() - w_globals = frame.w_globals + w_globals = frame.get_w_globals() entry = pycode._celldict_cache[nameindex] cell = _finditem_with_cache(space, frame, nameindex, pycode, w_globals, entry.version, entry.value) From pypy.commits at gmail.com Sat Mar 5 07:40:48 2016 From: pypy.commits at gmail.com (cfbolz) Date: Sat, 05 Mar 2016 04:40:48 -0800 (PST) Subject: [pypy-commit] pypy default: implement super.__thisclass__ Message-ID: <56dad3d0.d4e41c0a.bc3bf.2fee@mx.google.com> Author: Carl Friedrich Bolz Branch: Changeset: r82785:03f7d9255635 Date: 2016-03-05 13:37 +0100 http://bitbucket.org/pypy/pypy/changeset/03f7d9255635/ Log: implement super.__thisclass__ diff --git a/pypy/module/__builtin__/descriptor.py b/pypy/module/__builtin__/descriptor.py --- a/pypy/module/__builtin__/descriptor.py +++ b/pypy/module/__builtin__/descriptor.py @@ -79,6 +79,7 @@ W_Super.typedef = TypeDef( 'super', __new__ = interp2app(descr_new_super), + __thisclass__ = interp_attrproperty_w("w_starttype", W_Super), __getattribute__ = interp2app(W_Super.getattribute), __get__ = interp2app(W_Super.get), __doc__ = """super(type) -> unbound super object diff --git a/pypy/module/__builtin__/test/test_descriptor.py b/pypy/module/__builtin__/test/test_descriptor.py --- a/pypy/module/__builtin__/test/test_descriptor.py +++ b/pypy/module/__builtin__/test/test_descriptor.py @@ -214,7 +214,7 @@ c = C() assert C.goo(1) == (C, 1) assert c.goo(1) == (C, 1) - + assert c.foo(1) == (c, 1) class D(C): pass @@ -238,6 +238,12 @@ meth = classmethod(1).__get__(1) raises(TypeError, meth) + def test_super_thisclass(self): + class A(object): + pass + + assert super(A, A()).__thisclass__ is A + def test_property_docstring(self): assert property.__doc__.startswith('property') From pypy.commits at gmail.com Sat Mar 5 08:30:33 2016 From: pypy.commits at gmail.com (cfbolz) Date: Sat, 05 Mar 2016 05:30:33 -0800 (PST) Subject: [pypy-commit] pypy default: skip a few things when running under appdirect that CPython does differently Message-ID: <56dadf79.0775c20a.45ba5.ffffc255@mx.google.com> Author: Carl Friedrich Bolz Branch: Changeset: r82786:9195aec38e3f Date: 2016-03-05 13:56 +0100 http://bitbucket.org/pypy/pypy/changeset/9195aec38e3f/ Log: skip a few things when running under appdirect that CPython does differently (error messages) or does not support diff --git a/pypy/interpreter/test/test_function.py b/pypy/interpreter/test/test_function.py --- a/pypy/interpreter/test/test_function.py +++ b/pypy/interpreter/test/test_function.py @@ -1,5 +1,4 @@ - -import unittest +import pytest from pypy.interpreter import eval from pypy.interpreter.function import Function, Method, descr_function_get from pypy.interpreter.pycode import PyCode @@ -187,6 +186,7 @@ raises( TypeError, func, 42, {'arg1': 23}) + @pytest.mark.skipif("config.option.runappdirect") def test_kwargs_nondict_mapping(self): class Mapping: def keys(self): @@ -284,6 +284,7 @@ raises(TypeError, len, s, some_unknown_keyword=s) raises(TypeError, len, s, s, some_unknown_keyword=s) + @pytest.mark.skipif("config.option.runappdirect") def test_call_error_message(self): try: len() @@ -325,6 +326,7 @@ f = lambda: 42 assert f.func_doc is None + @pytest.mark.skipif("config.option.runappdirect") def test_setstate_called_with_wrong_args(self): f = lambda: 42 # not sure what it should raise, since CPython doesn't have setstate From pypy.commits at gmail.com Sat Mar 5 08:30:35 2016 From: pypy.commits at gmail.com (cfbolz) Date: Sat, 05 Mar 2016 05:30:35 -0800 (PST) Subject: [pypy-commit] pypy default: define a constant for the shift amount of the id tagging Message-ID: <56dadf7b.02931c0a.63606.3ca1@mx.google.com> Author: Carl Friedrich Bolz Branch: Changeset: r82787:73814b195ba5 Date: 2016-03-05 14:29 +0100 http://bitbucket.org/pypy/pypy/changeset/73814b195ba5/ Log: define a constant for the shift amount of the id tagging diff --git a/pypy/objspace/std/complexobject.py b/pypy/objspace/std/complexobject.py --- a/pypy/objspace/std/complexobject.py +++ b/pypy/objspace/std/complexobject.py @@ -266,11 +266,12 @@ return None from rpython.rlib.longlong2float import float2longlong from pypy.objspace.std.util import IDTAG_COMPLEX as tag + from pypy.objspace.std.util import IDTAG_SHIFT real = space.float_w(space.getattr(self, space.wrap("real"))) imag = space.float_w(space.getattr(self, space.wrap("imag"))) real_b = rbigint.fromrarith_int(float2longlong(real)) imag_b = rbigint.fromrarith_int(r_ulonglong(float2longlong(imag))) - val = real_b.lshift(64).or_(imag_b).lshift(3).int_or_(tag) + val = real_b.lshift(64).or_(imag_b).lshift(IDTAG_SHIFT).int_or_(tag) return space.newlong_from_rbigint(val) def int(self, space): diff --git a/pypy/objspace/std/floatobject.py b/pypy/objspace/std/floatobject.py --- a/pypy/objspace/std/floatobject.py +++ b/pypy/objspace/std/floatobject.py @@ -181,9 +181,10 @@ return None from rpython.rlib.longlong2float import float2longlong from pypy.objspace.std.util import IDTAG_FLOAT as tag + from pypy.objspace.std.util import IDTAG_SHIFT val = float2longlong(space.float_w(self)) b = rbigint.fromrarith_int(val) - b = b.lshift(3).int_or_(tag) + b = b.lshift(IDTAG_SHIFT).int_or_(tag) return space.newlong_from_rbigint(b) def __repr__(self): diff --git a/pypy/objspace/std/intobject.py b/pypy/objspace/std/intobject.py --- a/pypy/objspace/std/intobject.py +++ b/pypy/objspace/std/intobject.py @@ -24,7 +24,7 @@ from pypy.interpreter.typedef import TypeDef from pypy.objspace.std import newformat from pypy.objspace.std.util import ( - BINARY_OPS, CMP_OPS, COMMUTATIVE_OPS, IDTAG_INT, wrap_parsestringerror) + BINARY_OPS, CMP_OPS, COMMUTATIVE_OPS, IDTAG_INT, IDTAG_SHIFT, wrap_parsestringerror) SENTINEL = object() @@ -46,7 +46,7 @@ if self.user_overridden_class: return None b = space.bigint_w(self) - b = b.lshift(3).int_or_(IDTAG_INT) + b = b.lshift(IDTAG_SHIFT).int_or_(IDTAG_INT) return space.newlong_from_rbigint(b) def int(self, space): diff --git a/pypy/objspace/std/longobject.py b/pypy/objspace/std/longobject.py --- a/pypy/objspace/std/longobject.py +++ b/pypy/objspace/std/longobject.py @@ -16,7 +16,7 @@ from pypy.objspace.std import newformat from pypy.objspace.std.intobject import W_AbstractIntObject from pypy.objspace.std.util import ( - BINARY_OPS, CMP_OPS, COMMUTATIVE_OPS, IDTAG_LONG, wrap_parsestringerror) + BINARY_OPS, CMP_OPS, COMMUTATIVE_OPS, IDTAG_LONG, IDTAG_SHIFT, wrap_parsestringerror) def delegate_other(func): @@ -45,7 +45,7 @@ if self.user_overridden_class: return None b = space.bigint_w(self) - b = b.lshift(3).int_or_(IDTAG_LONG) + b = b.lshift(IDTAG_SHIFT).int_or_(IDTAG_LONG) return space.newlong_from_rbigint(b) def unwrap(self, space): diff --git a/pypy/objspace/std/util.py b/pypy/objspace/std/util.py --- a/pypy/objspace/std/util.py +++ b/pypy/objspace/std/util.py @@ -2,6 +2,7 @@ from pypy.interpreter.error import OperationError, oefmt +IDTAG_SHIFT = 3 IDTAG_INT = 1 IDTAG_LONG = 3 From pypy.commits at gmail.com Sat Mar 5 11:02:21 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 05 Mar 2016 08:02:21 -0800 (PST) Subject: [pypy-commit] pypy default: improve the test to check that __thisclass__ returns the first argument, Message-ID: <56db030d.85371c0a.c5e82.71e5@mx.google.com> Author: Armin Rigo Branch: Changeset: r82788:72ff295d1274 Date: 2016-03-05 17:01 +0100 http://bitbucket.org/pypy/pypy/changeset/72ff295d1274/ Log: improve the test to check that __thisclass__ returns the first argument, and not e.g. the class of the instance diff --git a/pypy/module/__builtin__/test/test_descriptor.py b/pypy/module/__builtin__/test/test_descriptor.py --- a/pypy/module/__builtin__/test/test_descriptor.py +++ b/pypy/module/__builtin__/test/test_descriptor.py @@ -244,6 +244,11 @@ assert super(A, A()).__thisclass__ is A + class B(A): + pass + + assert super(B, B()).__thisclass__ is B + assert super(A, B()).__thisclass__ is A def test_property_docstring(self): assert property.__doc__.startswith('property') From pypy.commits at gmail.com Sat Mar 5 11:07:00 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 05 Mar 2016 08:07:00 -0800 (PST) Subject: [pypy-commit] pypy default: Fix defaultdict.__module__ Message-ID: <56db0424.82561c0a.51f04.6ef9@mx.google.com> Author: Armin Rigo Branch: Changeset: r82789:478732783f5a Date: 2016-03-05 17:06 +0100 http://bitbucket.org/pypy/pypy/changeset/478732783f5a/ Log: Fix defaultdict.__module__ diff --git a/pypy/module/_collections/app_defaultdict.py b/pypy/module/_collections/app_defaultdict.py --- a/pypy/module/_collections/app_defaultdict.py +++ b/pypy/module/_collections/app_defaultdict.py @@ -12,6 +12,7 @@ class defaultdict(dict): __slots__ = ['default_factory'] + __module__ = 'collections' def __init__(self, *args, **kwds): if len(args) > 0: diff --git a/pypy/module/_collections/test/test_defaultdict.py b/pypy/module/_collections/test/test_defaultdict.py --- a/pypy/module/_collections/test/test_defaultdict.py +++ b/pypy/module/_collections/test/test_defaultdict.py @@ -14,6 +14,12 @@ d[5].append(44) assert l == [42, 43] and l2 == [44] + def test_module(self): + from _collections import defaultdict + assert repr(defaultdict) in ( + "", # on PyPy + "") # on CPython + def test_keyerror_without_factory(self): from _collections import defaultdict for d1 in [defaultdict(), defaultdict(None)]: From pypy.commits at gmail.com Sat Mar 5 11:11:01 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 05 Mar 2016 08:11:01 -0800 (PST) Subject: [pypy-commit] pypy default: Merged in loganchien/pypy (pull request #408) Message-ID: <56db0515.03dd1c0a.62e34.6d64@mx.google.com> Author: Armin Rigo Branch: Changeset: r82791:f8d22385b6a6 Date: 2016-03-05 17:10 +0100 http://bitbucket.org/pypy/pypy/changeset/f8d22385b6a6/ Log: Merged in loganchien/pypy (pull request #408) update byte interpreter link in rpython docs diff --git a/rpython/doc/translation.rst b/rpython/doc/translation.rst --- a/rpython/doc/translation.rst +++ b/rpython/doc/translation.rst @@ -85,7 +85,7 @@ .. _PDF color version: _static/translation.pdf -.. _bytecode evaluator: interpreter.html +.. _bytecode evaluator: http://pypy.readthedocs.org/en/latest/interpreter.html .. _abstract interpretation: http://en.wikipedia.org/wiki/Abstract_interpretation From pypy.commits at gmail.com Sat Mar 5 11:11:12 2016 From: pypy.commits at gmail.com (loganchien) Date: Sat, 05 Mar 2016 08:11:12 -0800 (PST) Subject: [pypy-commit] pypy default: update byte interpreter link in rpython docs Message-ID: <56db0520.6718c20a.5e729.fffff549@mx.google.com> Author: Logan Chien Branch: Changeset: r82790:def5114a5b9a Date: 2016-03-05 22:15 +0800 http://bitbucket.org/pypy/pypy/changeset/def5114a5b9a/ Log: update byte interpreter link in rpython docs diff --git a/rpython/doc/translation.rst b/rpython/doc/translation.rst --- a/rpython/doc/translation.rst +++ b/rpython/doc/translation.rst @@ -85,7 +85,7 @@ .. _PDF color version: _static/translation.pdf -.. _bytecode evaluator: interpreter.html +.. _bytecode evaluator: http://pypy.readthedocs.org/en/latest/interpreter.html .. _abstract interpretation: http://en.wikipedia.org/wiki/Abstract_interpretation From pypy.commits at gmail.com Sat Mar 5 11:16:37 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 05 Mar 2016 08:16:37 -0800 (PST) Subject: [pypy-commit] pypy default: Actually, this link should go away nowadays Message-ID: <56db0665.cf0b1c0a.f2da7.6dd3@mx.google.com> Author: Armin Rigo Branch: Changeset: r82792:472c371018f4 Date: 2016-03-05 17:15 +0100 http://bitbucket.org/pypy/pypy/changeset/472c371018f4/ Log: Actually, this link should go away nowadays diff --git a/rpython/doc/translation.rst b/rpython/doc/translation.rst --- a/rpython/doc/translation.rst +++ b/rpython/doc/translation.rst @@ -33,7 +33,7 @@ The RPython translation toolchain never sees Python source code or syntax trees, but rather starts with the *code objects* that define the behaviour of the function objects one gives it as input. The -`bytecode evaluator`_ and the :ref:`flow graph builder` work through these +:ref:`flow graph builder` works through these code objects using `abstract interpretation`_ to produce a control flow graph (one per function): yet another representation of the source program, but one which is suitable for applying type inference @@ -85,7 +85,6 @@ .. _PDF color version: _static/translation.pdf -.. _bytecode evaluator: http://pypy.readthedocs.org/en/latest/interpreter.html .. _abstract interpretation: http://en.wikipedia.org/wiki/Abstract_interpretation From pypy.commits at gmail.com Sat Mar 5 11:59:11 2016 From: pypy.commits at gmail.com (cfbolz) Date: Sat, 05 Mar 2016 08:59:11 -0800 (PST) Subject: [pypy-commit] pypy default: make method identity the same as equality (since people seem to do stuff like Message-ID: <56db105f.c85b1c0a.db10b.ffff8d27@mx.google.com> Author: Carl Friedrich Bolz Branch: Changeset: r82793:666871b885d9 Date: 2016-03-05 17:58 +0100 http://bitbucket.org/pypy/pypy/changeset/666871b885d9/ Log: make method identity the same as equality (since people seem to do stuff like cls.__repr__ is object.__repr__, see issue #2253) diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py --- a/pypy/interpreter/function.py +++ b/pypy/interpreter/function.py @@ -13,6 +13,9 @@ from pypy.interpreter.argument import Arguments from rpython.rlib import jit +from rpython.rlib.rarithmetic import LONG_BIT +from rpython.rlib.rbigint import rbigint + funccallunrolling = unrolling_iterable(range(4)) @@ -557,6 +560,26 @@ return space.w_False return space.eq(self.w_function, w_other.w_function) + def is_w(self, space, other): + if not isinstance(other, Method): + return False + return (self.w_instance is other.w_instance and + self.w_function is other.w_function and + self.w_class is other.w_class) + + def immutable_unique_id(self, space): + from pypy.objspace.std.util import IDTAG_METHOD as tag + from pypy.objspace.std.util import IDTAG_SHIFT + if self.w_instance is not None: + id = space.bigint_w(space.id(self.w_instance)) + id.lshift(LONG_BIT) + else: + id = rbigint.fromint(0) + id = id.or_(space.bigint_w(space.id(self.w_function))) + id = id.lshift(LONG_BIT).or_(space.bigint_w(space.id(self.w_class))) + id = id.lshift(IDTAG_SHIFT).int_or_(tag) + return space.newlong_from_rbigint(id) + def descr_method_hash(self): space = self.space w_result = space.hash(self.w_function) diff --git a/pypy/interpreter/test/test_function.py b/pypy/interpreter/test/test_function.py --- a/pypy/interpreter/test/test_function.py +++ b/pypy/interpreter/test/test_function.py @@ -552,6 +552,37 @@ assert A().m == X() assert X() == A().m + @pytest.mark.skipif("config.option.runappdirect") + def test_method_identity(self): + class A(object): + def m(self): + pass + def n(self): + pass + + class B(A): + pass + + class X(object): + def __eq__(self, other): + return True + + a = A() + a2 = A() + assert a.m is a.m + assert id(a.m) == id(a.m) + assert a.m is not a.n + assert id(a.m) != id(a.n) + assert a.m is not a2.m + assert id(a.m) != id(a2.m) + + assert A.m is A.m + assert id(A.m) == id(A.m) + assert A.m is not A.n + assert id(A.m) != id(A.n) + assert A.m is not B.m + assert id(A.m) != id(B.m) + class TestMethod: def setup_method(self, method): diff --git a/pypy/objspace/std/util.py b/pypy/objspace/std/util.py --- a/pypy/objspace/std/util.py +++ b/pypy/objspace/std/util.py @@ -2,12 +2,13 @@ from pypy.interpreter.error import OperationError, oefmt -IDTAG_SHIFT = 3 +IDTAG_SHIFT = 4 IDTAG_INT = 1 IDTAG_LONG = 3 IDTAG_FLOAT = 5 IDTAG_COMPLEX = 7 +IDTAG_METHOD = 9 CMP_OPS = dict(lt='<', le='<=', eq='==', ne='!=', gt='>', ge='>=') BINARY_BITWISE_OPS = {'and': '&', 'lshift': '<<', 'or': '|', 'rshift': '>>', From pypy.commits at gmail.com Sat Mar 5 15:55:42 2016 From: pypy.commits at gmail.com (mattip) Date: Sat, 05 Mar 2016 12:55:42 -0800 (PST) Subject: [pypy-commit] pypy default: flesh out release notice Message-ID: <56db47ce.d3921c0a.eb707.ffffc6a8@mx.google.com> Author: mattip Branch: Changeset: r82794:0cbb7492c49a Date: 2016-03-05 22:54 +0200 http://bitbucket.org/pypy/pypy/changeset/0cbb7492c49a/ Log: flesh out release notice diff --git a/pypy/doc/release-5.0.0.rst b/pypy/doc/release-5.0.0.rst --- a/pypy/doc/release-5.0.0.rst +++ b/pypy/doc/release-5.0.0.rst @@ -3,8 +3,20 @@ ========== We have released PyPy 5.0.0, about three months after PyPy 4.0.0. -We encourage all users of PyPy to update to this version. There are -bug fixes and a major upgrade to our c-api layer (cpyext) +We encourage all users of PyPy to update to this version. Apart from the usual +bug fixes, there is an ongoing effort to improve the warmup time and memory +usage of JIT-related metadata. The exact effects depend vastly on the program +you're running and can range from insignificant to warmup being up to 30% +faster and memory dropping by about 30%. + +We also merged a major upgrade to our C-API layer (cpyext), simplifying the +interaction between c-level objects and PyPy interpreter level objects. As a +result, lxml with its cython compiled component `passes all tests`_ on PyPy + +Users who have gotten used to vmprof_ on Linux, and those on other platforms +who have not yet tried it's awesomeness, will be happy to hear that vmprof +now just works on MacOS and Windows too, in both PyPy (built-in support) and +CPython (as an installed module). You can download the PyPy 5.0.0 release here: @@ -33,6 +45,8 @@ .. _`modules`: http://doc.pypy.org/en/latest/project-ideas.html#make-more-python-modules-pypy-friendly .. _`help`: http://doc.pypy.org/en/latest/project-ideas.html .. _`numpy`: https://bitbucket.org/pypy/numpy +.. _`passes all tests`: https://bitbucket.org/pypy/compatibility/wiki/lxml +.. _vmprof: http://vmprof.readthedocs.org What is PyPy? ============= @@ -55,42 +69,153 @@ Other Highlights (since 4.0.1 released in November 2015) ======================================================= +* New features: + + * Support embedding PyPy in a C-program via cffi and static callbacks in cffi. + This deprecates the old method of embedding PyPy + + * Refactor vmprof to work cross-operating-system, deprecate using buggy + libunwind on Linux platforms. Vmprof even works on Windows now. + + * Support more of the C-API type slots, like tp_getattro, and fix C-API + macros, functions, and structs such as _PyLong_FromByteArray(), + PyString_GET_SIZE, f_locals in PyFrameObject, Py_NAN, co_filename in + PyCodeObject + + * Use a more stable approach for allocating PyObjects in cpyext. (see + `blog post`_). Once the PyObject corresponding to a PyPy object is created, + it stays around at the same location until the death of the PyPy object. + Done with a little bit of custom GC support. It allows us to kill the + notion of "borrowing" inside cpyext, reduces 4 dictionaries down to 1, and + significantly simplifies the whole approach (which is why it is a new + feature while technically a refactoring) and allows PyPy to support the + populart lxml module (as of the *next* release) with no PyPy specific + patches needed + + * Make the default filesystem encoding ASCII, like CPython + + * Use `hypothesis`_ in test creation, which is great for randomizing tests + * Bug Fixes - * + * Backport always using os.urandom for uuid4 from cpython - * + * More completely support datetime, optimize timedelta creation - * + * Fix for issue 2185 which caused an inconsistent list of operations to be + generated by the unroller, appeared in a complicated DJango app + + * Fix an elusive issue with stacklets on shadowstack which showed up when + forgetting stacklets without resuming them + + * Fix entrypoint() which now acquires the GIL + + * Fix direct_ffi_call() so failure does not bail out before setting CALL_MAY_FORCE + + * Fix (de)pickling long values by simplifying the implementation + + * Fix RPython rthread so that objects stored as threadlocal do not force minor + GC collection and are kept alive automatically. This improves perfomance of + short-running Python callbacks and prevents resetting such object between + calls + + * Support floats as parameters to itertools.isslice() + + * Check for the existence of CODESET, ignoring it should have prevented PyPy + from working on FreeBSD + + * Fix for corner case (likely shown by Krakatau) for consecutive guards with + interdependencies + + * Fix applevel bare class method comparisons which should fix pretty printing + in IPython * Issues reported with our previous release were resolved_ after reports from users on our issue tracker at https://bitbucket.org/pypy/pypy/issues or on IRC at #pypy -* New features: - - * - - * - - * - * Numpy: - * + * Updates to numpy 1.10.2 (incompatibilities and not-implemented features + still exist) - * + * Support dtype=(('O', spec)) union while disallowing record arrays with + mixed object, non-object values + * Remove all traces of micronumpy from cpyext if --withoutmod-micronumpy option used + + * Support indexing filtering with a boolean ndarray * Performance improvements and refactorings: - * + * Refactor and improve exception analysis in the annotator - * + * Improve the performace of struct.unpack; unpacking of floats and doubles + is now about 15 times faster and 64 bit integers faster by a factor of 2 - * + * Remove unnecessary special handling of space.wrap(). + + * Improve the memory signature of numbering instances in the JIT. This should + massively decrease the amount of memory consumed by the JIT, which is + significant for most programs. Also compress the numberings using variable- + size encoding + + * Support list-resizing setslice operations in RPython + + * Tweak the trace-too-long heuristic for multiple jit drivers + + * Refactor bookkeeping (such a cool word - three double letters) in the + annotater + + * Refactor wrappers for OS functions from rtyper to rlib and simplify them + + * Simplify backend loading instructions to only use four variants + + * Optimize string concatination + + * Simplify GIL handling in non-jitted code + + * Use INT_LSHIFT instead of INT_MUL when possible + + * Improve struct.unpack by casting directly from the underlying buffer. + Unpacking floats and doubles is about 15 times faster, and integer types + about 50% faster (on 64 bit integers). This was then subsequently + improved further in optimizeopt.py. + + * Refactor naming in optimizeopt + + * Change GraphAnalyzer to use a more precise way to recognize external + functions and fix null pointer handling, generally clean up external + function handling + + * Optimize global lookups + + * Optimize two-tuple lookups in mapdict + + * Remove pure variants of ``getfield_gc_*`` operations from the JIT by + determining purity while tracing + + * Refactor databasing + + * Simplify bootstrapping in cpyext + + * Refactor rtyper debug code into python.rtyper.debug + + * Seperate structmember.h from Python.h Also enhance creating api functions + to specify which header file they appear in (previously only pypy_decl.h) + + * Reduce all guards from int_floordiv_ovf if one of the arguments is constant + + * Fix tokenizer to enforce universal newlines, needed for Python 3 support + + * Identify permutations of attributes at instance creation, reducing the + number of bridges created + + * Greatly improve re.sub() performance .. _resolved: http://doc.pypy.org/en/latest/whatsnew-5.0.0.html +.. _`hypothesis`: http://hypothesis.readthedocs.org +.. _`blog post`: http://morepypy.blogspot.com/2016/02/c-api-support-update.html Please update, and continue to help us make PyPy better. From pypy.commits at gmail.com Sat Mar 5 15:55:44 2016 From: pypy.commits at gmail.com (mattip) Date: Sat, 05 Mar 2016 12:55:44 -0800 (PST) Subject: [pypy-commit] pypy default: check that branch actually exists Message-ID: <56db47d0.d3921c0a.eb707.ffffc6ab@mx.google.com> Author: mattip Branch: Changeset: r82795:65217c37cf95 Date: 2016-03-04 14:05 +0200 http://bitbucket.org/pypy/pypy/changeset/65217c37cf95/ Log: check that branch actually exists diff --git a/pypy/tool/release/force-builds.py b/pypy/tool/release/force-builds.py --- a/pypy/tool/release/force-builds.py +++ b/pypy/tool/release/force-builds.py @@ -9,7 +9,7 @@ modified by PyPy team """ -import os, sys, urllib +import os, sys, urllib, subprocess from twisted.internet import reactor, defer from twisted.python import log @@ -83,4 +83,9 @@ (options, args) = parser.parse_args() if not options.branch: parser.error("branch option required") + try: + subprocess.check_call(['hg','id','-r', options.branch]) + except subprocess.CalledProcessError: + print 'branch', options.branch, 'could not be found in local repository' + sys.exit(-1) main(options.branch, options.server, user=options.user) From pypy.commits at gmail.com Sat Mar 5 16:07:53 2016 From: pypy.commits at gmail.com (mattip) Date: Sat, 05 Mar 2016 13:07:53 -0800 (PST) Subject: [pypy-commit] pypy default: fix formatting Message-ID: <56db4aa9.a3abc20a.a69b3.4ab0@mx.google.com> Author: mattip Branch: Changeset: r82796:f29f4d335f15 Date: 2016-03-05 23:07 +0200 http://bitbucket.org/pypy/pypy/changeset/f29f4d335f15/ Log: fix formatting diff --git a/pypy/doc/release-5.0.0.rst b/pypy/doc/release-5.0.0.rst --- a/pypy/doc/release-5.0.0.rst +++ b/pypy/doc/release-5.0.0.rst @@ -67,7 +67,7 @@ .. _`dynamic languages`: http://pypyjs.org Other Highlights (since 4.0.1 released in November 2015) -======================================================= +========================================================= * New features: From pypy.commits at gmail.com Sat Mar 5 16:18:43 2016 From: pypy.commits at gmail.com (mattip) Date: Sat, 05 Mar 2016 13:18:43 -0800 (PST) Subject: [pypy-commit] pypy default: typo Message-ID: <56db4d33.aa17c20a.29a87.5766@mx.google.com> Author: mattip Branch: Changeset: r82797:e4e1ee3ba512 Date: 2016-03-05 23:18 +0200 http://bitbucket.org/pypy/pypy/changeset/e4e1ee3ba512/ Log: typo diff --git a/pypy/doc/release-5.0.0.rst b/pypy/doc/release-5.0.0.rst --- a/pypy/doc/release-5.0.0.rst +++ b/pypy/doc/release-5.0.0.rst @@ -14,7 +14,7 @@ result, lxml with its cython compiled component `passes all tests`_ on PyPy Users who have gotten used to vmprof_ on Linux, and those on other platforms -who have not yet tried it's awesomeness, will be happy to hear that vmprof +who have not yet tried its awesomeness, will be happy to hear that vmprof now just works on MacOS and Windows too, in both PyPy (built-in support) and CPython (as an installed module). From pypy.commits at gmail.com Sat Mar 5 17:05:04 2016 From: pypy.commits at gmail.com (Sergey Matyunin) Date: Sat, 05 Mar 2016 14:05:04 -0800 (PST) Subject: [pypy-commit] pypy numpy_partition: Added tests for ndarray.tolist for arrays containing objects Message-ID: <56db5810.85371c0a.c5e82.ffffdf10@mx.google.com> Author: Sergey Matyunin Branch: numpy_partition Changeset: r82800:f70b91a44559 Date: 2016-03-02 22:57 +0100 http://bitbucket.org/pypy/pypy/changeset/f70b91a44559/ Log: Added tests for ndarray.tolist for arrays containing objects diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -1936,6 +1936,22 @@ a = array([[1, 2], [3, 4]]) assert (a + a).tolist() == [[2, 4], [6, 8]] + def test_tolist_object(self): + from numpy import array + a = array([0], dtype=object) + assert a.tolist() == [0] + + def test_tolist_object_slice(self): + from numpy import array + list_expected = [slice(0, 1), 0] + a = array(list_expected, dtype=object) + assert a.tolist() == list_expected + + def test_tolist_object_slice_2d(self): + from numpy import array + a = array([(slice(0, 1), 1), (0, 1)], dtype=object) + assert a.tolist() == [[slice(0, 1, None), 1], [0, 1]] + def test_tolist_slice(self): from numpy import array a = array([[17.1, 27.2], [40.3, 50.3]]) From pypy.commits at gmail.com Sat Mar 5 17:05:01 2016 From: pypy.commits at gmail.com (Sergey Matyunin) Date: Sat, 05 Mar 2016 14:05:01 -0800 (PST) Subject: [pypy-commit] pypy numpy_partition: Added function for parition call from micronumpy Message-ID: <56db580d.45d61c0a.778d.ffffdc30@mx.google.com> Author: Sergey Matyunin Branch: numpy_partition Changeset: r82798:b0b79d1b0927 Date: 2016-02-29 22:06 +0100 http://bitbucket.org/pypy/pypy/changeset/b0b79d1b0927/ Log: Added function for parition call from micronumpy diff --git a/pypy/module/micronumpy/appbridge.py b/pypy/module/micronumpy/appbridge.py --- a/pypy/module/micronumpy/appbridge.py +++ b/pypy/module/micronumpy/appbridge.py @@ -9,6 +9,7 @@ w_array_repr = None w_array_str = None w__usefields = None + w_partition = None def __init__(self, space): pass diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -911,6 +911,10 @@ return return self.implementation.sort(space, w_axis, w_order) + def descr_partition(self, space, __args__): + return get_appbridge_cache(space).call_method( + space, 'numpy.core._partition_use', 'partition', __args__.prepend(self)) + def descr_squeeze(self, space, w_axis=None): cur_shape = self.get_shape() if not space.is_none(w_axis): @@ -1635,6 +1639,7 @@ argsort = interp2app(W_NDimArray.descr_argsort), sort = interp2app(W_NDimArray.descr_sort), + partition = interp2app(W_NDimArray.descr_partition), astype = interp2app(W_NDimArray.descr_astype), base = GetSetProperty(W_NDimArray.descr_get_base), byteswap = interp2app(W_NDimArray.descr_byteswap), From pypy.commits at gmail.com Sat Mar 5 17:05:06 2016 From: pypy.commits at gmail.com (mattip) Date: Sat, 05 Mar 2016 14:05:06 -0800 (PST) Subject: [pypy-commit] pypy numpy_partition: fix for boxed value Message-ID: <56db5812.465ec20a.b4ac0.64e8@mx.google.com> Author: mattip Branch: numpy_partition Changeset: r82801:fc583e7eb11a Date: 2016-03-06 00:03 +0200 http://bitbucket.org/pypy/pypy/changeset/fc583e7eb11a/ Log: fix for boxed value diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -535,7 +535,8 @@ l_w = [] for i in range(self.get_shape()[0]): item_w = self.descr_getitem(space, space.wrap(i)) - if isinstance(item_w, W_NDimArray): + if (isinstance(item_w, W_NDimArray) or + isinstance(item_w, boxes.W_GenericBox)): l_w.append(space.call_method(item_w, "tolist")) else: l_w.append(item_w) From pypy.commits at gmail.com Sat Mar 5 17:05:02 2016 From: pypy.commits at gmail.com (Sergey Matyunin) Date: Sat, 05 Mar 2016 14:05:02 -0800 (PST) Subject: [pypy-commit] pypy numpy_partition: Fixed tolist for ndarrays containing objects Message-ID: <56db580e.c52f1c0a.aa093.ffffacc1@mx.google.com> Author: Sergey Matyunin Branch: numpy_partition Changeset: r82799:d4fa44a7c61f Date: 2016-02-29 22:11 +0100 http://bitbucket.org/pypy/pypy/changeset/d4fa44a7c61f/ Log: Fixed tolist for ndarrays containing objects diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -534,8 +534,11 @@ return self.get_scalar_value().item(space) l_w = [] for i in range(self.get_shape()[0]): - l_w.append(space.call_method(self.descr_getitem(space, - space.wrap(i)), "tolist")) + item_w = self.descr_getitem(space, space.wrap(i)) + if isinstance(item_w, W_NDimArray): + l_w.append(space.call_method(item_w, "tolist")) + else: + l_w.append(item_w) return space.newlist(l_w) def descr_ravel(self, space, w_order=None): From pypy.commits at gmail.com Sat Mar 5 17:41:42 2016 From: pypy.commits at gmail.com (cfbolz) Date: Sat, 05 Mar 2016 14:41:42 -0800 (PST) Subject: [pypy-commit] pypy default: gah, fix typo Message-ID: <56db60a6.8916c20a.e6694.6a0e@mx.google.com> Author: Carl Friedrich Bolz Branch: Changeset: r82802:3273c7f02b47 Date: 2016-03-05 19:46 +0100 http://bitbucket.org/pypy/pypy/changeset/3273c7f02b47/ Log: gah, fix typo diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py --- a/pypy/interpreter/function.py +++ b/pypy/interpreter/function.py @@ -572,7 +572,7 @@ from pypy.objspace.std.util import IDTAG_SHIFT if self.w_instance is not None: id = space.bigint_w(space.id(self.w_instance)) - id.lshift(LONG_BIT) + id = id.lshift(LONG_BIT) else: id = rbigint.fromint(0) id = id.or_(space.bigint_w(space.id(self.w_function))) From pypy.commits at gmail.com Sat Mar 5 17:42:46 2016 From: pypy.commits at gmail.com (cfbolz) Date: Sat, 05 Mar 2016 14:42:46 -0800 (PST) Subject: [pypy-commit] pypy default: fix identity tests Message-ID: <56db60e6.918e1c0a.7d129.ffffe402@mx.google.com> Author: Carl Friedrich Bolz Branch: Changeset: r82803:c7cc35224c29 Date: 2016-03-05 23:42 +0100 http://bitbucket.org/pypy/pypy/changeset/c7cc35224c29/ Log: fix identity tests diff --git a/pypy/objspace/std/test/test_obj.py b/pypy/objspace/std/test/test_obj.py --- a/pypy/objspace/std/test/test_obj.py +++ b/pypy/objspace/std/test/test_obj.py @@ -172,15 +172,15 @@ def test_id_on_primitives(self): if self.cpython_apptest: skip("cpython behaves differently") - assert id(1) == (1 << 3) + 1 - assert id(1l) == (1 << 3) + 3 + assert id(1) == (1 << 4) + 1 + assert id(1l) == (1 << 4) + 3 class myint(int): pass assert id(myint(1)) != id(1) assert id(1.0) & 7 == 5 assert id(-0.0) != id(0.0) - assert hex(id(2.0)) == '0x20000000000000005L' + assert hex(id(2.0)) == '0x40000000000000005L' assert id(0.0) == 5 def test_id_on_strs(self): From pypy.commits at gmail.com Sat Mar 5 18:21:28 2016 From: pypy.commits at gmail.com (cfbolz) Date: Sat, 05 Mar 2016 15:21:28 -0800 (PST) Subject: [pypy-commit] pypy default: trying to fix issue #2083 (see the test). This is a somewhat experimental Message-ID: <56db69f8.2a6ec20a.dabb2.7310@mx.google.com> Author: Carl Friedrich Bolz Branch: Changeset: r82804:395abeb42996 Date: 2016-03-06 00:20 +0100 http://bitbucket.org/pypy/pypy/changeset/395abeb42996/ Log: trying to fix issue #2083 (see the test). This is a somewhat experimental check-in: I couldn't find a failing test caused by the removal of this condition. If there is one, it's probably ok to simply revert this commit. diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py --- a/pypy/interpreter/function.py +++ b/pypy/interpreter/function.py @@ -432,8 +432,7 @@ # this is not defined as a method on Function because it's generally # useful logic: w_function can be any callable. It is used by Method too. asking_for_bound = (space.is_none(w_cls) or - not space.is_w(w_obj, space.w_None) or - space.is_w(w_cls, space.type(space.w_None))) + not space.is_w(w_obj, space.w_None)) if asking_for_bound: return space.wrap(Method(space, w_function, w_obj, w_cls)) else: diff --git a/pypy/interpreter/test/test_function.py b/pypy/interpreter/test/test_function.py --- a/pypy/interpreter/test/test_function.py +++ b/pypy/interpreter/test/test_function.py @@ -257,6 +257,9 @@ meth = func.__get__(obj, object) assert meth() == obj + def test_none_get_interaction(self): + assert type(None).__repr__(None) == 'None' + def test_no_get_builtin(self): assert not hasattr(dir, '__get__') class A(object): From pypy.commits at gmail.com Sat Mar 5 18:53:29 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 05 Mar 2016 15:53:29 -0800 (PST) Subject: [pypy-commit] pypy default: Back out 395abeb42996, add test that shows why Message-ID: <56db7179.a151c20a.73e93.7bff@mx.google.com> Author: Armin Rigo Branch: Changeset: r82805:08867b5b10b9 Date: 2016-03-06 00:52 +0100 http://bitbucket.org/pypy/pypy/changeset/08867b5b10b9/ Log: Back out 395abeb42996, add test that shows why diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py --- a/pypy/interpreter/function.py +++ b/pypy/interpreter/function.py @@ -432,7 +432,8 @@ # this is not defined as a method on Function because it's generally # useful logic: w_function can be any callable. It is used by Method too. asking_for_bound = (space.is_none(w_cls) or - not space.is_w(w_obj, space.w_None)) + not space.is_w(w_obj, space.w_None) or + space.is_w(w_cls, space.type(space.w_None))) if asking_for_bound: return space.wrap(Method(space, w_function, w_obj, w_cls)) else: diff --git a/pypy/interpreter/test/test_function.py b/pypy/interpreter/test/test_function.py --- a/pypy/interpreter/test/test_function.py +++ b/pypy/interpreter/test/test_function.py @@ -258,8 +258,13 @@ assert meth() == obj def test_none_get_interaction(self): + skip("XXX issue #2083") assert type(None).__repr__(None) == 'None' + def test_none_get_interaction_2(self): + f = None.__repr__ + assert f() == 'None' + def test_no_get_builtin(self): assert not hasattr(dir, '__get__') class A(object): From pypy.commits at gmail.com Sun Mar 6 04:25:10 2016 From: pypy.commits at gmail.com (plan_rich) Date: Sun, 06 Mar 2016 01:25:10 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: fixed some tests Message-ID: <56dbf776.88c8c20a.8eb98.ffffdfbc@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r82806:f563f1ea7c20 Date: 2016-03-06 10:21 +0100 http://bitbucket.org/pypy/pypy/changeset/f563f1ea7c20/ Log: fixed some tests diff --git a/rpython/jit/backend/llsupport/test/test_gc_integration.py b/rpython/jit/backend/llsupport/test/test_gc_integration.py --- a/rpython/jit/backend/llsupport/test/test_gc_integration.py +++ b/rpython/jit/backend/llsupport/test/test_gc_integration.py @@ -315,11 +315,9 @@ 'strdescr': arraydescr}) # check the returned pointers gc_ll_descr = self.cpu.gc_ll_descr - scale = lambda x: x if x in self.cpu.load_supported_factors else 1 - byte = lambda f,v: v if scale(f) != 1 else v*f - assert gc_ll_descr.calls == [(scale(8), 15, byte(8,10)), - (scale(5), 15, byte(5,3)), - ('str', byte(5,3))] + assert gc_ll_descr.calls == [(8, 15, 10), + (5, 15, 3), + ('str', 3)] # one fit, one was too large, one was not fitting def test_malloc_slowpath(self): diff --git a/rpython/jit/backend/zarch/test/test_zrpy_gc_hypo.py b/rpython/jit/backend/zarch/test/test_zrpy_gc_hypo.py deleted file mode 100644 --- a/rpython/jit/backend/zarch/test/test_zrpy_gc_hypo.py +++ /dev/null @@ -1,10 +0,0 @@ -from rpython.jit.backend.llsupport.tl.test.zrpy_gc_hypo_test import GCHypothesis - -import py - -py.test.skip("not yet working") - -class TestGCHypothesis(GCHypothesis): - # runs ../../llsupport/tl/test/zrpy_gc_hypo_test.py - gcrootfinder = "shadowstack" - gc = "incminimark" diff --git a/rpython/rlib/rvmprof/cintf.py b/rpython/rlib/rvmprof/cintf.py --- a/rpython/rlib/rvmprof/cintf.py +++ b/rpython/rlib/rvmprof/cintf.py @@ -28,6 +28,7 @@ def setup(): + from rpython.jit.backend import detect_cpu if detect_cpu.autodetect().startswith(detect_cpu.MODEL_S390_64): raise VMProfPlatformUnsupported("rvmprof not supported on" " s390x CPUs for now") From pypy.commits at gmail.com Sun Mar 6 04:25:12 2016 From: pypy.commits at gmail.com (plan_rich) Date: Sun, 06 Mar 2016 01:25:12 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: catchup with default Message-ID: <56dbf778.d7c21c0a.2e77e.5471@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r82807:9ff14f05a92d Date: 2016-03-06 10:24 +0100 http://bitbucket.org/pypy/pypy/changeset/9ff14f05a92d/ Log: catchup with default diff --git a/pypy/doc/contributor.rst b/pypy/doc/contributor.rst --- a/pypy/doc/contributor.rst +++ b/pypy/doc/contributor.rst @@ -11,29 +11,29 @@ Amaury Forgeot d'Arc Antonio Cuni Samuele Pedroni + Matti Picus Alex Gaynor Brian Kearns - Matti Picus Philip Jenvey Michael Hudson + Ronan Lamy David Schneider + Manuel Jacob Holger Krekel Christian Tismer Hakan Ardo - Manuel Jacob - Ronan Lamy Benjamin Peterson + Richard Plangger Anders Chrigstrom Eric van Riet Paap Wim Lavrijsen - Richard Plangger Richard Emslie Alexander Schremmer Dan Villiom Podlaski Christiansen + Remi Meier Lukas Diekmann Sven Hager Anders Lehmann - Remi Meier Aurelien Campeas Niklaus Haldimann Camillo Bruni @@ -42,8 +42,8 @@ Romain Guillebert Leonardo Santagada Seo Sanghyeon + Ronny Pfannschmidt Justin Peel - Ronny Pfannschmidt David Edelsohn Anders Hammarquist Jakub Gustak @@ -65,6 +65,7 @@ Tyler Wade Michael Foord Stephan Diehl + Vincent Legoll Stefan Schwarzer Valentino Volonghi Tomek Meka @@ -75,9 +76,9 @@ Jean-Paul Calderone Timo Paulssen Squeaky + Marius Gedminas Alexandre Fayolle Simon Burton - Marius Gedminas Martin Matusiak Konstantin Lopuhin Wenzhu Man @@ -86,16 +87,20 @@ Ivan Sichmann Freitas Greg Price Dario Bertini + Stefano Rivera Mark Pearse Simon Cross Andreas Stührk - Stefano Rivera + Edd Barrett Jean-Philippe St. Pierre Guido van Rossum Pavel Vinogradov + Jeremy Thurgood Paweł Piotr Przeradowski + Spenser Bauman Paul deGrandis Ilya Osadchiy + marky1991 Tobias Oberstein Adrian Kuhn Boris Feigin @@ -104,14 +109,12 @@ Georg Brandl Bert Freudenberg Stian Andreassen - Edd Barrett + Tobias Pape Wanja Saatkamp Gerald Klix Mike Blume - Tobias Pape Oscar Nierstrasz Stefan H. Muller - Jeremy Thurgood Rami Chowdhury Eugene Oden Henry Mason @@ -123,6 +126,8 @@ Lukas Renggli Guenter Jantzen Ned Batchelder + Tim Felgentreff + Anton Gulenko Amit Regmi Ben Young Nicolas Chauvat @@ -132,12 +137,12 @@ Nicholas Riley Jason Chu Igor Trindade Oliveira - Tim Felgentreff + Yichao Yu Rocco Moretti Gintautas Miliauskas Michael Twomey Lucian Branescu Mihaila - Yichao Yu + Devin Jeanpierre Gabriel Lavoie Olivier Dormond Jared Grubb @@ -161,33 +166,33 @@ Stanislaw Halik Mikael Schönenberg Berkin Ilbeyi - Elmo M?ntynen + Elmo Mäntynen + Faye Zhao Jonathan David Riehl Anders Qvist Corbin Simpson Chirag Jadwani Beatrice During Alex Perry - Vincent Legoll + Vaibhav Sood Alan McIntyre - Spenser Bauman + William Leslie Alexander Sedov Attila Gobi + Jasper.Schulz Christopher Pope - Devin Jeanpierre - Vaibhav Sood Christian Tismer Marc Abramowitz Dan Stromberg Arjun Naik Valentina Mukhamedzhanova Stefano Parmesan + Mark Young Alexis Daboville Jens-Uwe Mager Carl Meyer Karl Ramm Pieter Zieschang - Anton Gulenko Gabriel Lukas Vacek Andrew Dalke @@ -195,6 +200,7 @@ Jakub Stasiak Nathan Taylor Vladimir Kryachko + Omer Katz Jacek Generowicz Alejandro J. Cura Jacob Oscarson @@ -209,6 +215,7 @@ Lars Wassermann Philipp Rustemeuer Henrik Vendelbo + Richard Lancaster Dan Buch Miguel de Val Borro Artur Lisiecki @@ -220,18 +227,18 @@ Tomo Cocoa Kim Jin Su Toni Mattis + Amber Brown Lucas Stadler Julian Berman Markus Holtermann roberto at goyle Yury V. Zaytsev Anna Katrina Dominguez - William Leslie Bobby Impollonia - Faye Zhao timo at eistee.fritz.box Andrew Thompson Yusei Tahara + Aaron Tubbs Ben Darnell Roberto De Ioris Juan Francisco Cantero Hurtado @@ -243,6 +250,7 @@ Christopher Armstrong Michael Hudson-Doyle Anders Sigfridsson + Nikolay Zinov Yasir Suhail Jason Michalski rafalgalczynski at gmail.com @@ -252,6 +260,7 @@ Gustavo Niemeyer Stephan Busemann Rafał Gałczyński + Matt Bogosian Christian Muirhead Berker Peksag James Lan @@ -286,9 +295,9 @@ Stefan Marr jiaaro Mads Kiilerich - Richard Lancaster opassembler.py Antony Lee + Jason Madden Yaroslav Fedevych Jim Hunziker Markus Unterwaditzer @@ -297,6 +306,7 @@ squeaky Zearin soareschen + Jonas Pfannschmidt Kurt Griffiths Mike Bayer Matthew Miller @@ -311,4 +321,3 @@ Julien Phalip Roman Podoliaka Dan Loewenherz - diff --git a/pypy/doc/index-of-release-notes.rst b/pypy/doc/index-of-release-notes.rst --- a/pypy/doc/index-of-release-notes.rst +++ b/pypy/doc/index-of-release-notes.rst @@ -6,6 +6,7 @@ .. toctree:: + release-5.0.0.rst release-4.0.1.rst release-4.0.0.rst release-2.6.1.rst diff --git a/pypy/doc/index-of-whatsnew.rst b/pypy/doc/index-of-whatsnew.rst --- a/pypy/doc/index-of-whatsnew.rst +++ b/pypy/doc/index-of-whatsnew.rst @@ -7,6 +7,7 @@ .. toctree:: whatsnew-head.rst + whatsnew-5.0.0.rst whatsnew-4.0.1.rst whatsnew-4.0.0.rst whatsnew-2.6.1.rst diff --git a/pypy/doc/release-5.0.0.rst b/pypy/doc/release-5.0.0.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-5.0.0.rst @@ -0,0 +1,225 @@ +========== +PyPy 5.0.0 +========== + +We have released PyPy 5.0.0, about three months after PyPy 4.0.0. +We encourage all users of PyPy to update to this version. Apart from the usual +bug fixes, there is an ongoing effort to improve the warmup time and memory +usage of JIT-related metadata. The exact effects depend vastly on the program +you're running and can range from insignificant to warmup being up to 30% +faster and memory dropping by about 30%. + +We also merged a major upgrade to our C-API layer (cpyext), simplifying the +interaction between c-level objects and PyPy interpreter level objects. As a +result, lxml with its cython compiled component `passes all tests`_ on PyPy + +Users who have gotten used to vmprof_ on Linux, and those on other platforms +who have not yet tried its awesomeness, will be happy to hear that vmprof +now just works on MacOS and Windows too, in both PyPy (built-in support) and +CPython (as an installed module). + +You can download the PyPy 5.0.0 release here: + + http://pypy.org/download.html + +We would like to thank our donors for the continued support of the PyPy +project. + +We would also like to thank our contributors and +encourage new people to join the project. PyPy has many +layers and we need help with all of them: `PyPy`_ and `RPython`_ documentation +improvements, tweaking popular `modules`_ to run on pypy, or general `help`_ +with making RPython's JIT even better. + +CFFI +==== + +While not applicable only to PyPy, `cffi`_ is arguably our most significant +contribution to the python ecosystem. PyPy 5.0.0 ships with +`cffi-1.5.2`_ which now allows embedding PyPy (or cpython) in a c program. + +.. _`PyPy`: http://doc.pypy.org +.. _`RPython`: https://rpython.readthedocs.org +.. _`cffi`: https://cffi.readthedocs.org +.. _`cffi-1.5.2`: http://cffi.readthedocs.org/en/latest/whatsnew.html#v1-5-2 +.. _`modules`: http://doc.pypy.org/en/latest/project-ideas.html#make-more-python-modules-pypy-friendly +.. _`help`: http://doc.pypy.org/en/latest/project-ideas.html +.. _`numpy`: https://bitbucket.org/pypy/numpy +.. _`passes all tests`: https://bitbucket.org/pypy/compatibility/wiki/lxml +.. _vmprof: http://vmprof.readthedocs.org + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7. It's fast (`pypy and cpython 2.7.x`_ performance comparison) +due to its integrated tracing JIT compiler. + +We also welcome developers of other +`dynamic languages`_ to see what RPython can do for them. + +This release supports **x86** machines on most common operating systems +(Linux 32/64, Mac OS X 64, Windows 32, OpenBSD, freebsd), +newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux, and the +big- and little-endian variants of **ppc64** running Linux. + +.. _`pypy and cpython 2.7.x`: http://speed.pypy.org +.. _`dynamic languages`: http://pypyjs.org + +Other Highlights (since 4.0.1 released in November 2015) +========================================================= + +* New features: + + * Support embedding PyPy in a C-program via cffi and static callbacks in cffi. + This deprecates the old method of embedding PyPy + + * Refactor vmprof to work cross-operating-system, deprecate using buggy + libunwind on Linux platforms. Vmprof even works on Windows now. + + * Support more of the C-API type slots, like tp_getattro, and fix C-API + macros, functions, and structs such as _PyLong_FromByteArray(), + PyString_GET_SIZE, f_locals in PyFrameObject, Py_NAN, co_filename in + PyCodeObject + + * Use a more stable approach for allocating PyObjects in cpyext. (see + `blog post`_). Once the PyObject corresponding to a PyPy object is created, + it stays around at the same location until the death of the PyPy object. + Done with a little bit of custom GC support. It allows us to kill the + notion of "borrowing" inside cpyext, reduces 4 dictionaries down to 1, and + significantly simplifies the whole approach (which is why it is a new + feature while technically a refactoring) and allows PyPy to support the + populart lxml module (as of the *next* release) with no PyPy specific + patches needed + + * Make the default filesystem encoding ASCII, like CPython + + * Use `hypothesis`_ in test creation, which is great for randomizing tests + +* Bug Fixes + + * Backport always using os.urandom for uuid4 from cpython + + * More completely support datetime, optimize timedelta creation + + * Fix for issue 2185 which caused an inconsistent list of operations to be + generated by the unroller, appeared in a complicated DJango app + + * Fix an elusive issue with stacklets on shadowstack which showed up when + forgetting stacklets without resuming them + + * Fix entrypoint() which now acquires the GIL + + * Fix direct_ffi_call() so failure does not bail out before setting CALL_MAY_FORCE + + * Fix (de)pickling long values by simplifying the implementation + + * Fix RPython rthread so that objects stored as threadlocal do not force minor + GC collection and are kept alive automatically. This improves perfomance of + short-running Python callbacks and prevents resetting such object between + calls + + * Support floats as parameters to itertools.isslice() + + * Check for the existence of CODESET, ignoring it should have prevented PyPy + from working on FreeBSD + + * Fix for corner case (likely shown by Krakatau) for consecutive guards with + interdependencies + + * Fix applevel bare class method comparisons which should fix pretty printing + in IPython + + * Issues reported with our previous release were resolved_ after reports from users on + our issue tracker at https://bitbucket.org/pypy/pypy/issues or on IRC at + #pypy + +* Numpy: + + * Updates to numpy 1.10.2 (incompatibilities and not-implemented features + still exist) + + * Support dtype=(('O', spec)) union while disallowing record arrays with + mixed object, non-object values + + * Remove all traces of micronumpy from cpyext if --withoutmod-micronumpy option used + + * Support indexing filtering with a boolean ndarray + +* Performance improvements and refactorings: + + * Refactor and improve exception analysis in the annotator + + * Improve the performace of struct.unpack; unpacking of floats and doubles + is now about 15 times faster and 64 bit integers faster by a factor of 2 + + * Remove unnecessary special handling of space.wrap(). + + * Improve the memory signature of numbering instances in the JIT. This should + massively decrease the amount of memory consumed by the JIT, which is + significant for most programs. Also compress the numberings using variable- + size encoding + + * Support list-resizing setslice operations in RPython + + * Tweak the trace-too-long heuristic for multiple jit drivers + + * Refactor bookkeeping (such a cool word - three double letters) in the + annotater + + * Refactor wrappers for OS functions from rtyper to rlib and simplify them + + * Simplify backend loading instructions to only use four variants + + * Optimize string concatination + + * Simplify GIL handling in non-jitted code + + * Use INT_LSHIFT instead of INT_MUL when possible + + * Improve struct.unpack by casting directly from the underlying buffer. + Unpacking floats and doubles is about 15 times faster, and integer types + about 50% faster (on 64 bit integers). This was then subsequently + improved further in optimizeopt.py. + + * Refactor naming in optimizeopt + + * Change GraphAnalyzer to use a more precise way to recognize external + functions and fix null pointer handling, generally clean up external + function handling + + * Optimize global lookups + + * Optimize two-tuple lookups in mapdict + + * Remove pure variants of ``getfield_gc_*`` operations from the JIT by + determining purity while tracing + + * Refactor databasing + + * Simplify bootstrapping in cpyext + + * Refactor rtyper debug code into python.rtyper.debug + + * Seperate structmember.h from Python.h Also enhance creating api functions + to specify which header file they appear in (previously only pypy_decl.h) + + * Reduce all guards from int_floordiv_ovf if one of the arguments is constant + + * Fix tokenizer to enforce universal newlines, needed for Python 3 support + + * Identify permutations of attributes at instance creation, reducing the + number of bridges created + + * Greatly improve re.sub() performance + +.. _resolved: http://doc.pypy.org/en/latest/whatsnew-5.0.0.html +.. _`hypothesis`: http://hypothesis.readthedocs.org +.. _`blog post`: http://morepypy.blogspot.com/2016/02/c-api-support-update.html + +Please update, and continue to help us make PyPy better. + +Cheers + +The PyPy Team + diff --git a/pypy/doc/whatsnew-5.0.0.rst b/pypy/doc/whatsnew-5.0.0.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/whatsnew-5.0.0.rst @@ -0,0 +1,192 @@ +========================= +What's new in PyPy 4.1.+ +========================= + +.. this is a revision shortly after release-4.0.1 +.. startrev: 4b5c840d0da2 + +Fixed ``_PyLong_FromByteArray()``, which was buggy. + +Fixed a crash with stacklets (or greenlets) on non-Linux machines +which showed up if you forget stacklets without resuming them. + +.. branch: numpy-1.10 + +Fix tests to run cleanly with -A and start to fix micronumpy for upstream numpy +which is now 1.10.2 + +.. branch: osx-flat-namespace + +Fix the cpyext tests on OSX by linking with -flat_namespace + +.. branch: anntype + +Refactor and improve exception analysis in the annotator. + +.. branch: posita/2193-datetime-timedelta-integrals + +Fix issue #2193. ``isinstance(..., int)`` => ``isinstance(..., numbers.Integral)`` +to allow for alternate ``int``-like implementations (e.g., ``future.types.newint``) + +.. branch: faster-rstruct + +Improve the performace of struct.unpack, which now directly reads inside the +string buffer and directly casts the bytes to the appropriate type, when +allowed. Unpacking of floats and doubles is about 15 times faster now, while +for integer types it's up to ~50% faster for 64bit integers. + +.. branch: wrap-specialisation + +Remove unnecessary special handling of space.wrap(). + +.. branch: compress-numbering + +Improve the memory signature of numbering instances in the JIT. This should massively +decrease the amount of memory consumed by the JIT, which is significant for most programs. + +.. branch: fix-trace-too-long-heuristic + +Improve the heuristic when disable trace-too-long + +.. branch: fix-setslice-can-resize + +Make rlist's ll_listsetslice() able to resize the target list to help +simplify objspace/std/listobject.py. Was issue #2196. + +.. branch: anntype2 + +A somewhat random bunch of changes and fixes following up on branch 'anntype'. Highlights: + +- Implement @doubledispatch decorator and use it for intersection() and difference(). + +- Turn isinstance into a SpaceOperation + +- Create a few direct tests of the fundamental annotation invariant in test_model.py + +- Remove bookkeeper attribute from DictDef and ListDef. + +.. branch: cffi-static-callback + +.. branch: vecopt-absvalue + +- Enhancement. Removed vector fields from AbstractValue. + +.. branch: memop-simplify2 + +Simplification. Backends implement too many loading instructions, only having a slightly different interface. +Four new operations (gc_load/gc_load_indexed, gc_store/gc_store_indexed) replace all the +commonly known loading operations + +.. branch: more-rposix + +Move wrappers for OS functions from `rpython/rtyper` to `rpython/rlib` and +turn them into regular RPython functions. Most RPython-compatible `os.*` +functions are now directly accessible as `rpython.rposix.*`. + +.. branch: always-enable-gil + +Simplify a bit the GIL handling in non-jitted code. Fixes issue #2205. + +.. branch: flowspace-cleanups + +Trivial cleanups in flowspace.operation : fix comment & duplicated method + +.. branch: test-AF_NETLINK + +Add a test for pre-existing AF_NETLINK support. Was part of issue #1942. + +.. branch: small-cleanups-misc + +Trivial misc cleanups: typo, whitespace, obsolete comments + +.. branch: cpyext-slotdefs +.. branch: fix-missing-canraise +.. branch: whatsnew + +.. branch: fix-2211 + +Fix the cryptic exception message when attempting to use extended slicing +in rpython. Was issue #2211. + +.. branch: ec-keepalive + +Optimize the case where, in a new C-created thread, we keep invoking +short-running Python callbacks. (CFFI on CPython has a hack to achieve +the same result.) This can also be seen as a bug fix: previously, +thread-local objects would be reset between two such calls. + +.. branch: globals-quasiimmut + +Optimize global lookups. + +.. branch: cffi-static-callback-embedding + +Updated to CFFI 1.5, which supports a new way to do embedding. +Deprecates http://pypy.readthedocs.org/en/latest/embedding.html. + +.. branch: fix-cpython-ssl-tests-2.7 + +Fix SSL tests by importing cpython's patch + +.. branch: remove-getfield-pure + +Remove pure variants of ``getfield_gc_*`` operations from the JIT. Relevant +optimizations instead consult the field descriptor to determine the purity of +the operation. Additionally, pure ``getfield`` operations are now handled +entirely by `rpython/jit/metainterp/optimizeopt/heap.py` rather than +`rpython/jit/metainterp/optimizeopt/pure.py`, which can result in better codegen +for traces containing a large number of pure getfield operations. + +.. branch: exctrans + +Try to ensure that no new functions get annotated during the 'source_c' phase. +Refactor sandboxing to operate at a higher level. + +.. branch: cpyext-bootstrap + +.. branch: vmprof-newstack + +Refactor vmprof to work cross-operating-system. + +.. branch: seperate-strucmember_h + +Seperate structmember.h from Python.h Also enhance creating api functions +to specify which header file they appear in (previously only pypy_decl.h) + +.. branch: llimpl + +Refactor register_external(), remove running_on_llinterp mechanism and +apply sandbox transform on externals at the end of annotation. + +.. branch: cffi-embedding-win32 + +.. branch: windows-vmprof-support + +vmprof should work on Windows. + + +.. branch: reorder-map-attributes + +When creating instances and adding attributes in several different orders +depending on some condition, the JIT would create too much code. This is now +fixed. + +.. branch: cpyext-gc-support-2 + +Improve CPython C API support, which means lxml now runs unmodified +(after removing pypy hacks, pending pull request) + +.. branch: look-inside-tuple-hash + +Look inside tuple hash, improving mdp benchmark + +.. branch: vlen-resume + +Compress resume data, saving 10-20% of memory consumed by the JIT + +.. branch: issue-2248 + +.. branch: ndarray-setitem-filtered + +Fix boolean-array indexing in micronumpy + diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -184,10 +184,6 @@ Compress resume data, saving 10-20% of memory consumed by the JIT -.. branch: memop-simplify3 - -Further simplifying the backend operations malloc_cond_varsize and zero_array. - .. branch: s390x-backend The jit compiler backend implementation for the s390x architecutre. diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py --- a/pypy/interpreter/function.py +++ b/pypy/interpreter/function.py @@ -13,6 +13,9 @@ from pypy.interpreter.argument import Arguments from rpython.rlib import jit +from rpython.rlib.rarithmetic import LONG_BIT +from rpython.rlib.rbigint import rbigint + funccallunrolling = unrolling_iterable(range(4)) @@ -557,6 +560,26 @@ return space.w_False return space.eq(self.w_function, w_other.w_function) + def is_w(self, space, other): + if not isinstance(other, Method): + return False + return (self.w_instance is other.w_instance and + self.w_function is other.w_function and + self.w_class is other.w_class) + + def immutable_unique_id(self, space): + from pypy.objspace.std.util import IDTAG_METHOD as tag + from pypy.objspace.std.util import IDTAG_SHIFT + if self.w_instance is not None: + id = space.bigint_w(space.id(self.w_instance)) + id = id.lshift(LONG_BIT) + else: + id = rbigint.fromint(0) + id = id.or_(space.bigint_w(space.id(self.w_function))) + id = id.lshift(LONG_BIT).or_(space.bigint_w(space.id(self.w_class))) + id = id.lshift(IDTAG_SHIFT).int_or_(tag) + return space.newlong_from_rbigint(id) + def descr_method_hash(self): space = self.space w_result = space.hash(self.w_function) diff --git a/pypy/interpreter/test/test_app_main.py b/pypy/interpreter/test/test_app_main.py --- a/pypy/interpreter/test/test_app_main.py +++ b/pypy/interpreter/test/test_app_main.py @@ -133,7 +133,7 @@ self.check(['-S', '-tO', '--info'], {}, output_contains='translation') self.check(['-S', '-tO', '--version'], {}, output_contains='Python') self.check(['-S', '-tOV'], {}, output_contains='Python') - self.check(['--jit', 'foobar', '-S'], {}, sys_argv=[''], + self.check(['--jit', 'off', '-S'], {}, sys_argv=[''], run_stdin=True, no_site=1) self.check(['-c', 'pass'], {}, sys_argv=['-c'], run_command='pass') self.check(['-cpass'], {}, sys_argv=['-c'], run_command='pass') diff --git a/pypy/interpreter/test/test_function.py b/pypy/interpreter/test/test_function.py --- a/pypy/interpreter/test/test_function.py +++ b/pypy/interpreter/test/test_function.py @@ -1,5 +1,4 @@ - -import unittest +import pytest from pypy.interpreter import eval from pypy.interpreter.function import Function, Method, descr_function_get from pypy.interpreter.pycode import PyCode @@ -187,6 +186,7 @@ raises( TypeError, func, 42, {'arg1': 23}) + @pytest.mark.skipif("config.option.runappdirect") def test_kwargs_nondict_mapping(self): class Mapping: def keys(self): @@ -257,6 +257,14 @@ meth = func.__get__(obj, object) assert meth() == obj + def test_none_get_interaction(self): + skip("XXX issue #2083") + assert type(None).__repr__(None) == 'None' + + def test_none_get_interaction_2(self): + f = None.__repr__ + assert f() == 'None' + def test_no_get_builtin(self): assert not hasattr(dir, '__get__') class A(object): @@ -284,6 +292,7 @@ raises(TypeError, len, s, some_unknown_keyword=s) raises(TypeError, len, s, s, some_unknown_keyword=s) + @pytest.mark.skipif("config.option.runappdirect") def test_call_error_message(self): try: len() @@ -325,6 +334,7 @@ f = lambda: 42 assert f.func_doc is None + @pytest.mark.skipif("config.option.runappdirect") def test_setstate_called_with_wrong_args(self): f = lambda: 42 # not sure what it should raise, since CPython doesn't have setstate @@ -550,6 +560,37 @@ assert A().m == X() assert X() == A().m + @pytest.mark.skipif("config.option.runappdirect") + def test_method_identity(self): + class A(object): + def m(self): + pass + def n(self): + pass + + class B(A): + pass + + class X(object): + def __eq__(self, other): + return True + + a = A() + a2 = A() + assert a.m is a.m + assert id(a.m) == id(a.m) + assert a.m is not a.n + assert id(a.m) != id(a.n) + assert a.m is not a2.m + assert id(a.m) != id(a2.m) + + assert A.m is A.m + assert id(A.m) == id(A.m) + assert A.m is not A.n + assert id(A.m) != id(A.n) + assert A.m is not B.m + assert id(A.m) != id(B.m) + class TestMethod: def setup_method(self, method): diff --git a/pypy/module/__builtin__/descriptor.py b/pypy/module/__builtin__/descriptor.py --- a/pypy/module/__builtin__/descriptor.py +++ b/pypy/module/__builtin__/descriptor.py @@ -79,6 +79,7 @@ W_Super.typedef = TypeDef( 'super', __new__ = interp2app(descr_new_super), + __thisclass__ = interp_attrproperty_w("w_starttype", W_Super), __getattribute__ = interp2app(W_Super.getattribute), __get__ = interp2app(W_Super.get), __doc__ = """super(type) -> unbound super object diff --git a/pypy/module/__builtin__/test/test_classobj.py b/pypy/module/__builtin__/test/test_classobj.py --- a/pypy/module/__builtin__/test/test_classobj.py +++ b/pypy/module/__builtin__/test/test_classobj.py @@ -452,7 +452,6 @@ assert a + 1 == 2 assert a + 1.1 == 2 - def test_binaryop_calls_coerce_always(self): l = [] class A: @@ -1076,6 +1075,16 @@ assert (D() > A()) == 'D:A.gt' assert (D() >= A()) == 'D:A.ge' + def test_override___int__(self): + class F(float): + def __int__(self): + return 666 + f = F(-12.3) + assert int(f) == 666 + # on cpython, this calls float_trunc() in floatobject.c + # which ends up calling PyFloat_AS_DOUBLE((PyFloatObject*) f) + assert float.__int__(f) == -12 + class AppTestOldStyleClassBytesDict(object): def setup_class(cls): diff --git a/pypy/module/__builtin__/test/test_descriptor.py b/pypy/module/__builtin__/test/test_descriptor.py --- a/pypy/module/__builtin__/test/test_descriptor.py +++ b/pypy/module/__builtin__/test/test_descriptor.py @@ -214,7 +214,7 @@ c = C() assert C.goo(1) == (C, 1) assert c.goo(1) == (C, 1) - + assert c.foo(1) == (c, 1) class D(C): pass @@ -238,6 +238,17 @@ meth = classmethod(1).__get__(1) raises(TypeError, meth) + def test_super_thisclass(self): + class A(object): + pass + + assert super(A, A()).__thisclass__ is A + + class B(A): + pass + + assert super(B, B()).__thisclass__ is B + assert super(A, B()).__thisclass__ is A def test_property_docstring(self): assert property.__doc__.startswith('property') diff --git a/pypy/module/_collections/app_defaultdict.py b/pypy/module/_collections/app_defaultdict.py --- a/pypy/module/_collections/app_defaultdict.py +++ b/pypy/module/_collections/app_defaultdict.py @@ -12,6 +12,7 @@ class defaultdict(dict): __slots__ = ['default_factory'] + __module__ = 'collections' def __init__(self, *args, **kwds): if len(args) > 0: diff --git a/pypy/module/_collections/test/test_defaultdict.py b/pypy/module/_collections/test/test_defaultdict.py --- a/pypy/module/_collections/test/test_defaultdict.py +++ b/pypy/module/_collections/test/test_defaultdict.py @@ -14,6 +14,12 @@ d[5].append(44) assert l == [42, 43] and l2 == [44] + def test_module(self): + from _collections import defaultdict + assert repr(defaultdict) in ( + "", # on PyPy + "") # on CPython + def test_keyerror_without_factory(self): from _collections import defaultdict for d1 in [defaultdict(), defaultdict(None)]: diff --git a/pypy/module/_sre/interp_sre.py b/pypy/module/_sre/interp_sre.py --- a/pypy/module/_sre/interp_sre.py +++ b/pypy/module/_sre/interp_sre.py @@ -7,6 +7,7 @@ from pypy.interpreter.error import OperationError from rpython.rlib.rarithmetic import intmask from rpython.rlib import jit +from rpython.rlib.rstring import StringBuilder, UnicodeBuilder # ____________________________________________________________ # @@ -36,6 +37,8 @@ if 0 <= start <= end: if isinstance(ctx, rsre_core.BufMatchContext): return space.wrap(ctx._buffer.getslice(start, end, 1, end-start)) + if isinstance(ctx, rsre_core.StrMatchContext): + return space.wrap(ctx._string[start:end]) elif isinstance(ctx, rsre_core.UnicodeMatchContext): return space.wrap(ctx._unicodestr[start:end]) else: @@ -98,8 +101,8 @@ space.wrap("cannot copy this pattern object")) def make_ctx(self, w_string, pos=0, endpos=sys.maxint): - """Make a BufMatchContext or a UnicodeMatchContext for searching - in the given w_string object.""" + """Make a StrMatchContext, BufMatchContext or a UnicodeMatchContext for + searching in the given w_string object.""" space = self.space if pos < 0: pos = 0 @@ -113,6 +116,14 @@ endpos = len(unicodestr) return rsre_core.UnicodeMatchContext(self.code, unicodestr, pos, endpos, self.flags) + elif space.isinstance_w(w_string, space.w_str): + str = space.str_w(w_string) + if pos > len(str): + pos = len(str) + if endpos > len(str): + endpos = len(str) + return rsre_core.StrMatchContext(self.code, str, + pos, endpos, self.flags) else: buf = space.readbuf_w(w_string) size = buf.getlength() @@ -216,6 +227,11 @@ def subx(self, w_ptemplate, w_string, count): space = self.space + # use a (much faster) string/unicode builder if w_ptemplate and + # w_string are both string or both unicode objects, and if w_ptemplate + # is a literal + use_builder = False + filter_as_unicode = filter_as_string = None if space.is_true(space.callable(w_ptemplate)): w_filter = w_ptemplate filter_is_callable = True @@ -223,6 +239,8 @@ if space.isinstance_w(w_ptemplate, space.w_unicode): filter_as_unicode = space.unicode_w(w_ptemplate) literal = u'\\' not in filter_as_unicode + use_builder = ( + space.isinstance_w(w_string, space.w_unicode) and literal) else: try: filter_as_string = space.str_w(w_ptemplate) @@ -232,6 +250,8 @@ literal = False else: literal = '\\' not in filter_as_string + use_builder = ( + space.isinstance_w(w_string, space.w_str) and literal) if literal: w_filter = w_ptemplate filter_is_callable = False @@ -242,19 +262,44 @@ space.wrap(self), w_ptemplate) filter_is_callable = space.is_true(space.callable(w_filter)) # + # XXX this is a bit of a mess, but it improves performance a lot ctx = self.make_ctx(w_string) - sublist_w = [] + sublist_w = strbuilder = unicodebuilder = None + if use_builder: + if filter_as_unicode is not None: + unicodebuilder = UnicodeBuilder(ctx.end) + else: + assert filter_as_string is not None + strbuilder = StringBuilder(ctx.end) + else: + sublist_w = [] n = last_pos = 0 while not count or n < count: + sub_jitdriver.jit_merge_point( + self=self, + use_builder=use_builder, + filter_is_callable=filter_is_callable, + filter_type=type(w_filter), + ctx=ctx, + w_filter=w_filter, + strbuilder=strbuilder, + unicodebuilder=unicodebuilder, + filter_as_string=filter_as_string, + filter_as_unicode=filter_as_unicode, + count=count, + w_string=w_string, + n=n, last_pos=last_pos, sublist_w=sublist_w + ) + space = self.space if not searchcontext(space, ctx): break if last_pos < ctx.match_start: - sublist_w.append(slice_w(space, ctx, last_pos, - ctx.match_start, space.w_None)) + _sub_append_slice( + ctx, space, use_builder, sublist_w, + strbuilder, unicodebuilder, last_pos, ctx.match_start) start = ctx.match_end if start == ctx.match_start: start += 1 - nextctx = ctx.fresh_copy(start) if not (last_pos == ctx.match_start == ctx.match_end and n > 0): # the above ignores empty matches on latest position @@ -262,28 +307,71 @@ w_match = self.getmatch(ctx, True) w_piece = space.call_function(w_filter, w_match) if not space.is_w(w_piece, space.w_None): + assert strbuilder is None and unicodebuilder is None + assert not use_builder sublist_w.append(w_piece) else: - sublist_w.append(w_filter) + if use_builder: + if strbuilder is not None: + assert filter_as_string is not None + strbuilder.append(filter_as_string) + else: + assert unicodebuilder is not None + assert filter_as_unicode is not None + unicodebuilder.append(filter_as_unicode) + else: + sublist_w.append(w_filter) last_pos = ctx.match_end n += 1 elif last_pos >= ctx.end: break # empty match at the end: finished - ctx = nextctx + ctx.reset(start) if last_pos < ctx.end: - sublist_w.append(slice_w(space, ctx, last_pos, ctx.end, - space.w_None)) + _sub_append_slice(ctx, space, use_builder, sublist_w, + strbuilder, unicodebuilder, last_pos, ctx.end) + if use_builder: + if strbuilder is not None: + return space.wrap(strbuilder.build()), n + else: + assert unicodebuilder is not None + return space.wrap(unicodebuilder.build()), n + else: + if space.isinstance_w(w_string, space.w_unicode): + w_emptystr = space.wrap(u'') + else: + w_emptystr = space.wrap('') + w_item = space.call_method(w_emptystr, 'join', + space.newlist(sublist_w)) + return w_item, n - if space.isinstance_w(w_string, space.w_unicode): - w_emptystr = space.wrap(u'') - else: - w_emptystr = space.wrap('') - w_item = space.call_method(w_emptystr, 'join', - space.newlist(sublist_w)) - return w_item, n +sub_jitdriver = jit.JitDriver( + reds="""count n last_pos + ctx w_filter + strbuilder unicodebuilder + filter_as_string + filter_as_unicode + w_string sublist_w + self""".split(), + greens=["filter_is_callable", "use_builder", "filter_type", "ctx.pattern"]) +def _sub_append_slice(ctx, space, use_builder, sublist_w, + strbuilder, unicodebuilder, start, end): + if use_builder: + if isinstance(ctx, rsre_core.BufMatchContext): + assert strbuilder is not None + return strbuilder.append(ctx._buffer.getslice(start, end, 1, end-start)) + if isinstance(ctx, rsre_core.StrMatchContext): + assert strbuilder is not None + return strbuilder.append_slice(ctx._string, start, end) + elif isinstance(ctx, rsre_core.UnicodeMatchContext): + assert unicodebuilder is not None + return unicodebuilder.append_slice(ctx._unicodestr, start, end) + assert 0, "unreachable" + else: + sublist_w.append(slice_w(space, ctx, start, end, space.w_None)) + @unwrap_spec(flags=int, groups=int, w_groupindex=WrappedDefault(None), w_indexgroup=WrappedDefault(None)) def SRE_Pattern__new__(space, w_subtype, w_pattern, flags, w_code, @@ -482,6 +570,8 @@ ctx = self.ctx if isinstance(ctx, rsre_core.BufMatchContext): return space.wrap(ctx._buffer.as_str()) + elif isinstance(ctx, rsre_core.StrMatchContext): + return space.wrap(ctx._string) elif isinstance(ctx, rsre_core.UnicodeMatchContext): return space.wrap(ctx._unicodestr) else: diff --git a/pypy/module/cpyext/include/patchlevel.h b/pypy/module/cpyext/include/patchlevel.h --- a/pypy/module/cpyext/include/patchlevel.h +++ b/pypy/module/cpyext/include/patchlevel.h @@ -29,8 +29,8 @@ #define PY_VERSION "2.7.10" /* PyPy version as a string */ -#define PYPY_VERSION "4.1.0-alpha0" -#define PYPY_VERSION_NUM 0x04010000 +#define PYPY_VERSION "5.1.0-alpha0" +#define PYPY_VERSION_NUM 0x05010000 /* Defined to mean a PyPy where cpyext holds more regular references to PyObjects, e.g. staying alive as long as the internal PyPy object diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py --- a/pypy/module/micronumpy/concrete.py +++ b/pypy/module/micronumpy/concrete.py @@ -298,7 +298,14 @@ except IndexError: # not a single result chunks = self._prepare_slice_args(space, w_index) - return new_view(space, orig_arr, chunks) + copy = False + if isinstance(chunks[0], BooleanChunk): + # numpy compatibility + copy = True + w_ret = new_view(space, orig_arr, chunks) + if copy: + w_ret = w_ret.descr_copy(space, space.wrap(w_ret.get_order())) + return w_ret def descr_setitem(self, space, orig_arr, w_index, w_value): try: diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -22,7 +22,8 @@ from pypy.module.micronumpy.flagsobj import W_FlagsObject from pypy.module.micronumpy.strides import ( get_shape_from_iterable, shape_agreement, shape_agreement_multiple, - is_c_contiguous, is_f_contiguous, calc_strides, new_view) + is_c_contiguous, is_f_contiguous, calc_strides, new_view, BooleanChunk, + SliceChunk) from pypy.module.micronumpy.casting import can_cast_array from pypy.module.micronumpy.descriptor import get_dtype_cache @@ -204,7 +205,13 @@ if iter_shape is None: # w_index is a list of slices, return a view chunks = self.implementation._prepare_slice_args(space, w_index) - return new_view(space, self, chunks) + copy = False + if isinstance(chunks[0], BooleanChunk): + copy = True + w_ret = new_view(space, self, chunks) + if copy: + w_ret = w_ret.descr_copy(space, space.wrap(w_ret.get_order())) + return w_ret shape = res_shape + self.get_shape()[len(indexes):] w_res = W_NDimArray.from_shape(space, shape, self.get_dtype(), self.get_order(), w_instance=self) @@ -220,8 +227,24 @@ if iter_shape is None: # w_index is a list of slices chunks = self.implementation._prepare_slice_args(space, w_index) - view = new_view(space, self, chunks) - view.implementation.setslice(space, val_arr) + dim = -1 + view = self + for i, c in enumerate(chunks): + if isinstance(c, BooleanChunk): + dim = i + idx = c.w_idx + chunks.pop(i) + chunks.insert(0, SliceChunk(space.newslice(space.wrap(0), + space.w_None, space.w_None))) + break + if dim > 0: + view = self.implementation.swapaxes(space, self, 0, dim) + if dim >= 0: + view = new_view(space, self, chunks) + view.setitem_filter(space, idx, val_arr) + else: + view = new_view(space, self, chunks) + view.implementation.setslice(space, val_arr) return if support.product(iter_shape) == 0: return diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py --- a/pypy/module/micronumpy/strides.py +++ b/pypy/module/micronumpy/strides.py @@ -97,22 +97,19 @@ # filter by axis dim filtr = chunks[dim] assert isinstance(filtr, BooleanChunk) + # XXX this creates a new array, and fails in setitem w_arr = w_arr.getitem_filter(space, filtr.w_idx, axis=dim) arr = w_arr.implementation chunks[dim] = SliceChunk(space.newslice(space.wrap(0), - space.wrap(-1), space.w_None)) + space.w_None, space.w_None)) r = calculate_slice_strides(space, arr.shape, arr.start, arr.get_strides(), arr.get_backstrides(), chunks) else: r = calculate_slice_strides(space, arr.shape, arr.start, arr.get_strides(), arr.get_backstrides(), chunks) shape, start, strides, backstrides = r - w_ret = W_NDimArray.new_slice(space, start, strides[:], backstrides[:], + return W_NDimArray.new_slice(space, start, strides[:], backstrides[:], shape[:], arr, w_arr) - if dim == 0: - # Do not return a view - return w_ret.descr_copy(space, space.wrap(w_ret.get_order())) - return w_ret @jit.unroll_safe def _extend_shape(old_shape, chunks): diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -2582,8 +2582,10 @@ assert b.base is None b = a[:, np.array([True, False, True])] assert b.base is not None + a[np.array([True, False]), 0] = 100 b = a[np.array([True, False]), 0] - assert (b ==[0]).all() + assert b.shape == (1,) + assert (b ==[100]).all() def test_scalar_indexing(self): import numpy as np diff --git a/pypy/module/pypyjit/test_pypy_c/test_00_model.py b/pypy/module/pypyjit/test_pypy_c/test_00_model.py --- a/pypy/module/pypyjit/test_pypy_c/test_00_model.py +++ b/pypy/module/pypyjit/test_pypy_c/test_00_model.py @@ -68,9 +68,12 @@ pipe.returncode,)) if stderr.startswith('SKIP:'): py.test.skip(stderr) - if stderr.startswith('debug_alloc.h:'): # lldebug builds - stderr = '' + #if stderr.startswith('debug_alloc.h:'): # lldebug builds + # stderr = '' #assert not stderr + if stderr: + print '*** stderr of the subprocess: ***' + print stderr # if discard_stdout_before_last_line: stdout = stdout.splitlines(True)[-1] diff --git a/pypy/module/sys/version.py b/pypy/module/sys/version.py --- a/pypy/module/sys/version.py +++ b/pypy/module/sys/version.py @@ -10,7 +10,7 @@ #XXX # sync CPYTHON_VERSION with patchlevel.h, package.py CPYTHON_API_VERSION = 1013 #XXX # sync with include/modsupport.h -PYPY_VERSION = (4, 1, 0, "alpha", 0) #XXX # sync patchlevel.h +PYPY_VERSION = (5, 1, 0, "alpha", 0) #XXX # sync patchlevel.h import pypy diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/test_basic.py b/pypy/module/test_lib_pypy/cffi_tests/embedding/test_basic.py --- a/pypy/module/test_lib_pypy/cffi_tests/embedding/test_basic.py +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/test_basic.py @@ -33,8 +33,12 @@ pythonpath.insert(0, cffi_base) return os.pathsep.join(pythonpath) -def setup_module(mod): - mod.org_env = os.environ.copy() +def copy_away_env(): + global org_env + try: + org_env + except NameError: + org_env = os.environ.copy() class EmbeddingTests: @@ -122,6 +126,7 @@ os.chdir(curdir) def patch_environment(self): + copy_away_env() path = self.get_path() # for libpypy-c.dll or Python27.dll path = os.path.split(sys.executable)[0] + os.path.pathsep + path diff --git a/pypy/objspace/std/complexobject.py b/pypy/objspace/std/complexobject.py --- a/pypy/objspace/std/complexobject.py +++ b/pypy/objspace/std/complexobject.py @@ -266,11 +266,12 @@ return None from rpython.rlib.longlong2float import float2longlong from pypy.objspace.std.util import IDTAG_COMPLEX as tag + from pypy.objspace.std.util import IDTAG_SHIFT real = space.float_w(space.getattr(self, space.wrap("real"))) imag = space.float_w(space.getattr(self, space.wrap("imag"))) real_b = rbigint.fromrarith_int(float2longlong(real)) imag_b = rbigint.fromrarith_int(r_ulonglong(float2longlong(imag))) - val = real_b.lshift(64).or_(imag_b).lshift(3).int_or_(tag) + val = real_b.lshift(64).or_(imag_b).lshift(IDTAG_SHIFT).int_or_(tag) return space.newlong_from_rbigint(val) def int(self, space): diff --git a/pypy/objspace/std/floatobject.py b/pypy/objspace/std/floatobject.py --- a/pypy/objspace/std/floatobject.py +++ b/pypy/objspace/std/floatobject.py @@ -160,15 +160,11 @@ return self.floatval def int(self, space): + # this is a speed-up only, for space.int(w_float). if (type(self) is not W_FloatObject and space.is_overloaded(self, space.w_float, '__int__')): return W_Root.int(self, space) - try: - value = ovfcheck_float_to_int(self.floatval) - except OverflowError: - return space.long(self) - else: - return space.newint(value) + return self.descr_trunc(space) def is_w(self, space, w_other): from rpython.rlib.longlong2float import float2longlong @@ -185,9 +181,10 @@ return None from rpython.rlib.longlong2float import float2longlong from pypy.objspace.std.util import IDTAG_FLOAT as tag + from pypy.objspace.std.util import IDTAG_SHIFT val = float2longlong(space.float_w(self)) b = rbigint.fromrarith_int(val) - b = b.lshift(3).int_or_(tag) + b = b.lshift(IDTAG_SHIFT).int_or_(tag) return space.newlong_from_rbigint(b) def __repr__(self): @@ -424,9 +421,8 @@ "cannot convert float NaN to integer") def descr_trunc(self, space): - whole = math.modf(self.floatval)[1] try: - value = ovfcheck_float_to_int(whole) + value = ovfcheck_float_to_int(self.floatval) except OverflowError: return self.descr_long(space) else: @@ -661,7 +657,7 @@ __format__ = interp2app(W_FloatObject.descr_format), __coerce__ = interp2app(W_FloatObject.descr_coerce), __nonzero__ = interp2app(W_FloatObject.descr_nonzero), - __int__ = interp2app(W_FloatObject.int), + __int__ = interp2app(W_FloatObject.descr_trunc), __float__ = interp2app(W_FloatObject.descr_float), __long__ = interp2app(W_FloatObject.descr_long), __trunc__ = interp2app(W_FloatObject.descr_trunc), diff --git a/pypy/objspace/std/intobject.py b/pypy/objspace/std/intobject.py --- a/pypy/objspace/std/intobject.py +++ b/pypy/objspace/std/intobject.py @@ -24,7 +24,7 @@ from pypy.interpreter.typedef import TypeDef from pypy.objspace.std import newformat from pypy.objspace.std.util import ( - BINARY_OPS, CMP_OPS, COMMUTATIVE_OPS, IDTAG_INT, wrap_parsestringerror) + BINARY_OPS, CMP_OPS, COMMUTATIVE_OPS, IDTAG_INT, IDTAG_SHIFT, wrap_parsestringerror) SENTINEL = object() @@ -46,7 +46,7 @@ if self.user_overridden_class: return None b = space.bigint_w(self) - b = b.lshift(3).int_or_(IDTAG_INT) + b = b.lshift(IDTAG_SHIFT).int_or_(IDTAG_INT) return space.newlong_from_rbigint(b) def int(self, space): diff --git a/pypy/objspace/std/longobject.py b/pypy/objspace/std/longobject.py --- a/pypy/objspace/std/longobject.py +++ b/pypy/objspace/std/longobject.py @@ -16,7 +16,7 @@ from pypy.objspace.std import newformat from pypy.objspace.std.intobject import W_AbstractIntObject from pypy.objspace.std.util import ( - BINARY_OPS, CMP_OPS, COMMUTATIVE_OPS, IDTAG_LONG, wrap_parsestringerror) + BINARY_OPS, CMP_OPS, COMMUTATIVE_OPS, IDTAG_LONG, IDTAG_SHIFT, wrap_parsestringerror) def delegate_other(func): @@ -45,7 +45,7 @@ if self.user_overridden_class: return None b = space.bigint_w(self) - b = b.lshift(3).int_or_(IDTAG_LONG) + b = b.lshift(IDTAG_SHIFT).int_or_(IDTAG_LONG) return space.newlong_from_rbigint(b) def unwrap(self, space): diff --git a/pypy/objspace/std/test/test_kwargsdict.py b/pypy/objspace/std/test/test_kwargsdict.py --- a/pypy/objspace/std/test/test_kwargsdict.py +++ b/pypy/objspace/std/test/test_kwargsdict.py @@ -118,10 +118,16 @@ def test_delitem(self): pass # delitem devolves for now + def test_setdefault_fast(self): + pass # not based on hashing at all + class TestDevolvedKwargsDictImplementation(BaseTestDevolvedDictImplementation): get_impl = get_impl StrategyClass = KwargsDictStrategy + def test_setdefault_fast(self): + pass # not based on hashing at all + class AppTestKwargsDictStrategy(object): def setup_class(cls): diff --git a/pypy/objspace/std/test/test_obj.py b/pypy/objspace/std/test/test_obj.py --- a/pypy/objspace/std/test/test_obj.py +++ b/pypy/objspace/std/test/test_obj.py @@ -172,15 +172,15 @@ def test_id_on_primitives(self): if self.cpython_apptest: skip("cpython behaves differently") - assert id(1) == (1 << 3) + 1 - assert id(1l) == (1 << 3) + 3 + assert id(1) == (1 << 4) + 1 + assert id(1l) == (1 << 4) + 3 class myint(int): pass assert id(myint(1)) != id(1) assert id(1.0) & 7 == 5 assert id(-0.0) != id(0.0) - assert hex(id(2.0)) == '0x20000000000000005L' + assert hex(id(2.0)) == '0x40000000000000005L' assert id(0.0) == 5 def test_id_on_strs(self): diff --git a/pypy/objspace/std/util.py b/pypy/objspace/std/util.py --- a/pypy/objspace/std/util.py +++ b/pypy/objspace/std/util.py @@ -2,11 +2,13 @@ from pypy.interpreter.error import OperationError, oefmt +IDTAG_SHIFT = 4 IDTAG_INT = 1 IDTAG_LONG = 3 IDTAG_FLOAT = 5 IDTAG_COMPLEX = 7 +IDTAG_METHOD = 9 CMP_OPS = dict(lt='<', le='<=', eq='==', ne='!=', gt='>', ge='>=') BINARY_BITWISE_OPS = {'and': '&', 'lshift': '<<', 'or': '|', 'rshift': '>>', diff --git a/pypy/tool/release/force-builds.py b/pypy/tool/release/force-builds.py --- a/pypy/tool/release/force-builds.py +++ b/pypy/tool/release/force-builds.py @@ -9,7 +9,7 @@ modified by PyPy team """ -import os, sys, urllib +import os, sys, urllib, subprocess from twisted.internet import reactor, defer from twisted.python import log @@ -83,4 +83,9 @@ (options, args) = parser.parse_args() if not options.branch: parser.error("branch option required") + try: + subprocess.check_call(['hg','id','-r', options.branch]) + except subprocess.CalledProcessError: + print 'branch', options.branch, 'could not be found in local repository' + sys.exit(-1) main(options.branch, options.server, user=options.user) diff --git a/rpython/doc/translation.rst b/rpython/doc/translation.rst --- a/rpython/doc/translation.rst +++ b/rpython/doc/translation.rst @@ -33,7 +33,7 @@ The RPython translation toolchain never sees Python source code or syntax trees, but rather starts with the *code objects* that define the behaviour of the function objects one gives it as input. The -`bytecode evaluator`_ and the :ref:`flow graph builder` work through these +:ref:`flow graph builder` works through these code objects using `abstract interpretation`_ to produce a control flow graph (one per function): yet another representation of the source program, but one which is suitable for applying type inference @@ -85,7 +85,6 @@ .. _PDF color version: _static/translation.pdf -.. _bytecode evaluator: interpreter.html .. _abstract interpretation: http://en.wikipedia.org/wiki/Abstract_interpretation diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -576,7 +576,9 @@ if cpu.supports_floats: def func(f0, f1, f2, f3, f4, f5, f6, i0, f7, i1, f8, f9): + seen.append((f0, f1, f2, f3, f4, f5, f6, i0, f7, i1, f8, f9)) return f0 + f1 + f2 + f3 + f4 + f5 + f6 + float(i0 + i1) + f7 + f8 + f9 + seen = [] F = lltype.Float I = lltype.Signed FUNC = self.FuncType([F] * 7 + [I] + [F] + [I] + [F]* 2, F) @@ -585,13 +587,15 @@ calldescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, EffectInfo.MOST_GENERAL) funcbox = self.get_funcbox(cpu, func_ptr) - args = ([boxfloat(.1) for i in range(7)] + - [InputArgInt(1), boxfloat(.2), InputArgInt(2), boxfloat(.3), - boxfloat(.4)]) + args = ([boxfloat(.0), boxfloat(.1), boxfloat(.2), boxfloat(.3), + boxfloat(.4), boxfloat(.5), boxfloat(.6), + InputArgInt(1), boxfloat(.7), InputArgInt(2), boxfloat(.8), + boxfloat(.9)]) res = self.execute_operation(rop.CALL_F, [funcbox] + args, 'float', descr=calldescr) - assert abs(longlong.getrealfloat(res) - 4.6) < 0.0001 + assert seen == [(.0, .1, .2, .3, .4, .5, .6, 1, .7, 2, .8, .9)] + assert abs(longlong.getrealfloat(res) - 7.5) < 0.0001 def test_call_many_arguments(self): # Test calling a function with a large number of arguments (more than diff --git a/rpython/rtyper/test/test_rdict.py b/rpython/rtyper/test/test_rdict.py --- a/rpython/rtyper/test/test_rdict.py +++ b/rpython/rtyper/test/test_rdict.py @@ -59,19 +59,6 @@ yield -def not_really_random(): - """A random-ish generator, which also generates nice patterns from time to time. - Could be useful to detect problems associated with specific usage patterns.""" - import random - x = random.random() - print 'random seed: %r' % (x,) - for i in range(12000): - r = 3.4 + i/20000.0 - x = r*x - x*x - assert 0 <= x < 4 - yield x - - class BaseTestRDict(BaseRtypingTest): def test_dict_creation(self): def createdict(i): @@ -1048,7 +1035,7 @@ s_BA_dic = s.items[1] r_AB_dic = rtyper.getrepr(s_AB_dic) - r_BA_dic = rtyper.getrepr(s_AB_dic) + r_BA_dic = rtyper.getrepr(s_BA_dic) assert r_AB_dic.lowleveltype == r_BA_dic.lowleveltype @@ -1166,50 +1153,51 @@ st_keys = sampled_from(keytypes_s) st_values = sampled_from(keytypes_s + [SomeString(can_be_None=True)]) -class Space(object): +class MappingSpace(object): def __init__(self, s_key, s_value): self.s_key = s_key self.s_value = s_value rtyper = PseudoRTyper() r_key = s_key.rtyper_makerepr(rtyper) r_value = s_value.rtyper_makerepr(rtyper) - dictrepr = rdict.DictRepr(rtyper, r_key, r_value, + dictrepr = self.MappingRepr(rtyper, r_key, r_value, DictKey(None, s_key), DictValue(None, s_value)) dictrepr.setup() - self.l_dict = rdict.ll_newdict(dictrepr.DICT) - self.reference = {} + self.l_dict = self.newdict(dictrepr) + self.reference = self.new_reference() self.ll_key = r_key.convert_const self.ll_value = r_value.convert_const def setitem(self, key, value): ll_key = self.ll_key(key) ll_value = self.ll_value(value) - rdict.ll_dict_setitem(self.l_dict, ll_key, ll_value) + self.ll_setitem(self.l_dict, ll_key, ll_value) self.reference[key] = value - assert rdict.ll_contains(self.l_dict, ll_key) + assert self.ll_contains(self.l_dict, ll_key) def delitem(self, key): ll_key = self.ll_key(key) - rdict.ll_dict_delitem(self.l_dict, ll_key) + self.ll_delitem(self.l_dict, ll_key) del self.reference[key] - assert not rdict.ll_contains(self.l_dict, ll_key) + assert not self.ll_contains(self.l_dict, ll_key) def copydict(self): - self.l_dict = rdict.ll_copy(self.l_dict) + self.l_dict = self.ll_copy(self.l_dict) + assert self.ll_len(self.l_dict) == len(self.reference) def cleardict(self): - rdict.ll_clear(self.l_dict) + self.ll_clear(self.l_dict) self.reference.clear() - assert rdict.ll_dict_len(self.l_dict) == 0 + assert self.ll_len(self.l_dict) == 0 def fullcheck(self): - assert rdict.ll_dict_len(self.l_dict) == len(self.reference) + assert self.ll_len(self.l_dict) == len(self.reference) for key, value in self.reference.iteritems(): - assert (rdict.ll_dict_getitem(self.l_dict, self.ll_key(key)) == + assert (self.ll_getitem(self.l_dict, self.ll_key(key)) == self.ll_value(value)) -class StressTest(GenericStateMachine): +class MappingSM(GenericStateMachine): def __init__(self): self.space = None @@ -1239,7 +1227,7 @@ def execute_step(self, action): if action.method == 'setup': - self.space = Space(*action.args) + self.space = self.Space(*action.args) self.st_keys = ann2strategy(self.space.s_key) self.st_values = ann2strategy(self.space.s_value) return @@ -1250,5 +1238,24 @@ if self.space: self.space.fullcheck() + +class DictSpace(MappingSpace): + MappingRepr = rdict.DictRepr + new_reference = dict + ll_getitem = staticmethod(rdict.ll_dict_getitem) + ll_setitem = staticmethod(rdict.ll_dict_setitem) + ll_delitem = staticmethod(rdict.ll_dict_delitem) + ll_len = staticmethod(rdict.ll_dict_len) + ll_contains = staticmethod(rdict.ll_contains) + ll_copy = staticmethod(rdict.ll_copy) + ll_clear = staticmethod(rdict.ll_clear) + + def newdict(self, repr): + return rdict.ll_newdict(repr.DICT) + +class DictSM(MappingSM): + Space = DictSpace + def test_hypothesis(): - run_state_machine_as_test(StressTest, settings(max_examples=500, stateful_step_count=100)) + run_state_machine_as_test( + DictSM, settings(max_examples=500, stateful_step_count=100)) diff --git a/rpython/rtyper/test/test_rordereddict.py b/rpython/rtyper/test/test_rordereddict.py --- a/rpython/rtyper/test/test_rordereddict.py +++ b/rpython/rtyper/test/test_rordereddict.py @@ -1,14 +1,18 @@ - import py from collections import OrderedDict +from hypothesis import settings +from hypothesis.stateful import run_state_machine_as_test + from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rtyper.lltypesystem import rordereddict, rstr from rpython.rlib.rarithmetic import intmask from rpython.rtyper.annlowlevel import llstr, hlstr -from rpython.rtyper.test.test_rdict import BaseTestRDict +from rpython.rtyper.test.test_rdict import ( + BaseTestRDict, MappingSpace, MappingSM) from rpython.rlib import objectmodel +rodct = rordereddict def get_indexes(ll_d): return ll_d.indexes._obj.container._as_ptr() @@ -330,124 +334,48 @@ assert res == 6 -class TestStress: +class ODictSpace(MappingSpace): + MappingRepr = rodct.OrderedDictRepr + new_reference = OrderedDict + ll_getitem = staticmethod(rodct.ll_dict_getitem) + ll_setitem = staticmethod(rodct.ll_dict_setitem) + ll_delitem = staticmethod(rodct.ll_dict_delitem) + ll_len = staticmethod(rodct.ll_dict_len) + ll_contains = staticmethod(rodct.ll_dict_contains) + ll_copy = staticmethod(rodct.ll_dict_copy) + ll_clear = staticmethod(rodct.ll_dict_clear) - def test_stress(self): - from rpython.annotator.dictdef import DictKey, DictValue - from rpython.annotator import model as annmodel - from rpython.rtyper import rint - from rpython.rtyper.test.test_rdict import not_really_random - rodct = rordereddict - dictrepr = rodct.OrderedDictRepr( - None, rint.signed_repr, rint.signed_repr, - DictKey(None, annmodel.SomeInteger()), - DictValue(None, annmodel.SomeInteger())) - dictrepr.setup() - l_dict = rodct.ll_newdict(dictrepr.DICT) - referencetable = [None] * 400 - referencelength = 0 - value = 0 + def newdict(self, repr): + return rodct.ll_newdict(repr.DICT) - def complete_check(): - for n, refvalue in zip(range(len(referencetable)), referencetable): - try: - gotvalue = rodct.ll_dict_getitem(l_dict, n) - except KeyError: - assert refvalue is None - else: - assert gotvalue == refvalue + def get_keys(self): + DICT = lltype.typeOf(self.l_dict).TO + ITER = rordereddict.get_ll_dictiter(lltype.Ptr(DICT)) + ll_iter = rordereddict.ll_dictiter(ITER, self.l_dict) + ll_dictnext = rordereddict._ll_dictnext + keys_ll = [] + while True: + try: + num = ll_dictnext(ll_iter) + keys_ll.append(self.l_dict.entries[num].key) + except StopIteration: + break + return keys_ll - for x in not_really_random(): - n = int(x*100.0) # 0 <= x < 400 - op = repr(x)[-1] - if op <= '2' and referencetable[n] is not None: - rodct.ll_dict_delitem(l_dict, n) - referencetable[n] = None - referencelength -= 1 - elif op <= '6': - rodct.ll_dict_setitem(l_dict, n, value) - if referencetable[n] is None: - referencelength += 1 - referencetable[n] = value - value += 1 - else: - try: - gotvalue = rodct.ll_dict_getitem(l_dict, n) - except KeyError: - assert referencetable[n] is None - else: - assert gotvalue == referencetable[n] - if 1.38 <= x <= 1.39: - complete_check() - print 'current dict length:', referencelength - assert l_dict.num_live_items == referencelength - complete_check() + def fullcheck(self): + # overridden to also check key order + assert self.ll_len(self.l_dict) == len(self.reference) + keys_ll = self.get_keys() + assert len(keys_ll) == len(self.reference) + for key, ll_key in zip(self.reference, keys_ll): + assert self.ll_key(key) == ll_key + assert (self.ll_getitem(self.l_dict, self.ll_key(key)) == + self.ll_value(self.reference[key])) - def test_stress_2(self): - yield self.stress_combination, True, False - yield self.stress_combination, False, True - yield self.stress_combination, False, False - yield self.stress_combination, True, True - def stress_combination(self, key_can_be_none, value_can_be_none): - from rpython.rtyper.lltypesystem.rstr import string_repr - from rpython.annotator.dictdef import DictKey, DictValue - from rpython.annotator import model as annmodel - from rpython.rtyper.test.test_rdict import not_really_random - rodct = rordereddict +class ODictSM(MappingSM): + Space = ODictSpace - print - print "Testing combination with can_be_None: keys %s, values %s" % ( - key_can_be_none, value_can_be_none) - - class PseudoRTyper: - cache_dummy_values = {} - dictrepr = rodct.OrderedDictRepr( - PseudoRTyper(), string_repr, string_repr, - DictKey(None, annmodel.SomeString(key_can_be_none)), - DictValue(None, annmodel.SomeString(value_can_be_none))) - dictrepr.setup() - print dictrepr.lowleveltype - #for key, value in dictrepr.DICTENTRY._adtmeths.items(): - # print ' %s = %s' % (key, value) - l_dict = rodct.ll_newdict(dictrepr.DICT) - referencetable = [None] * 400 - referencelength = 0 - values = not_really_random() - keytable = [string_repr.convert_const("foo%d" % n) - for n in range(len(referencetable))] - - def complete_check(): - for n, refvalue in zip(range(len(referencetable)), referencetable): - try: - gotvalue = rodct.ll_dict_getitem(l_dict, keytable[n]) - except KeyError: - assert refvalue is None - else: - assert gotvalue == refvalue - - for x in not_really_random(): - n = int(x*100.0) # 0 <= x < 400 - op = repr(x)[-1] - if op <= '2' and referencetable[n] is not None: - rodct.ll_dict_delitem(l_dict, keytable[n]) - referencetable[n] = None - referencelength -= 1 - elif op <= '6': - ll_value = string_repr.convert_const(str(values.next())) - rodct.ll_dict_setitem(l_dict, keytable[n], ll_value) - if referencetable[n] is None: - referencelength += 1 - referencetable[n] = ll_value - else: - try: - gotvalue = rodct.ll_dict_getitem(l_dict, keytable[n]) - except KeyError: - assert referencetable[n] is None - else: - assert gotvalue == referencetable[n] - if 1.38 <= x <= 1.39: - complete_check() - print 'current dict length:', referencelength - assert l_dict.num_live_items == referencelength - complete_check() +def test_hypothesis(): + run_state_machine_as_test( + ODictSM, settings(max_examples=500, stateful_step_count=100)) From pypy.commits at gmail.com Sun Mar 6 05:58:15 2016 From: pypy.commits at gmail.com (plan_rich) Date: Sun, 06 Mar 2016 02:58:15 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: reverted x86 assembler (malloc_cond_varsize), related to the issue with bytesize and length Message-ID: <56dc0d47.500f1c0a.a3c89.7df9@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r82808:572470f1b45a Date: 2016-03-06 11:57 +0100 http://bitbucket.org/pypy/pypy/changeset/572470f1b45a/ Log: reverted x86 assembler (malloc_cond_varsize), related to the issue with bytesize and length diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -1546,6 +1546,32 @@ genop_gc_load_indexed_r = _genop_gc_load_indexed genop_gc_load_indexed_f = _genop_gc_load_indexed + def _imul_const_scaled(self, mc, targetreg, sourcereg, itemsize): + """Produce one operation to do roughly + targetreg = sourcereg * itemsize + except that the targetreg may still need shifting by 0,1,2,3. + """ + if (itemsize & 7) == 0: + shift = 3 + elif (itemsize & 3) == 0: + shift = 2 + elif (itemsize & 1) == 0: + shift = 1 + else: + shift = 0 + itemsize >>= shift + # + if valid_addressing_size(itemsize - 1): + mc.LEA_ra(targetreg, (sourcereg, sourcereg, + get_scale(itemsize - 1), 0)) + elif valid_addressing_size(itemsize): + mc.LEA_ra(targetreg, (rx86.NO_BASE_REGISTER, sourcereg, + get_scale(itemsize), 0)) + else: + mc.IMUL_rri(targetreg, sourcereg, itemsize) + # + return shift + def genop_discard_increment_debug_counter(self, op, arglocs): # The argument should be an immediate address. This should # generate code equivalent to a GETFIELD_RAW, an ADD(1), and a @@ -2374,8 +2400,12 @@ jmp_adr0 = self.mc.get_relative_pos() self.mc.MOV(eax, heap(nursery_free_adr)) - assert valid_addressing_size(itemsize) - shift = get_scale(itemsize) + if valid_addressing_size(itemsize): + shift = get_scale(itemsize) + else: + shift = self._imul_const_scaled(self.mc, edi.value, + varsizeloc.value, itemsize) + varsizeloc = edi # now varsizeloc is a register != eax. The size of # the variable part of the array is (varsizeloc << shift) From pypy.commits at gmail.com Sun Mar 6 06:02:07 2016 From: pypy.commits at gmail.com (plan_rich) Date: Sun, 06 Mar 2016 03:02:07 -0800 (PST) Subject: [pypy-commit] pypy memop-simplify3: close branch Message-ID: <56dc0e2f.aa17c20a.29a87.ffffff20@mx.google.com> Author: Richard Plangger Branch: memop-simplify3 Changeset: r82809:1cd36e0809b5 Date: 2016-03-06 11:58 +0100 http://bitbucket.org/pypy/pypy/changeset/1cd36e0809b5/ Log: close branch From pypy.commits at gmail.com Sun Mar 6 06:02:08 2016 From: pypy.commits at gmail.com (plan_rich) Date: Sun, 06 Mar 2016 03:02:08 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: added whats new entry for memop-simplify3 Message-ID: <56dc0e30.e5ecc20a.4bb5c.0ac0@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r82810:71943846762a Date: 2016-03-06 12:01 +0100 http://bitbucket.org/pypy/pypy/changeset/71943846762a/ Log: added whats new entry for memop-simplify3 diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -186,4 +186,10 @@ .. branch: s390x-backend +.. branch: memop-simplify3 + +Simplification of zero_array. Start and end index are scaled using res ops (or cpu scaling) rather than doing it manually. + +.. branch: s390x-backend + The jit compiler backend implementation for the s390x architecutre. From pypy.commits at gmail.com Sun Mar 6 06:11:40 2016 From: pypy.commits at gmail.com (plan_rich) Date: Sun, 06 Mar 2016 03:11:40 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: fixed messed up whatsnew entry by merge... Message-ID: <56dc106c.d3921c0a.eb707.7ab8@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r82811:5e9210fd34f7 Date: 2016-03-06 12:10 +0100 http://bitbucket.org/pypy/pypy/changeset/5e9210fd34f7/ Log: fixed messed up whatsnew entry by merge... diff --git a/pypy/doc/whatsnew-5.0.0.rst b/pypy/doc/whatsnew-5.0.0.rst --- a/pypy/doc/whatsnew-5.0.0.rst +++ b/pypy/doc/whatsnew-5.0.0.rst @@ -1,5 +1,5 @@ ========================= -What's new in PyPy 4.1.+ +What's new in PyPy 5.0.+ ========================= .. this is a revision shortly after release-4.0.1 @@ -189,4 +189,3 @@ .. branch: ndarray-setitem-filtered Fix boolean-array indexing in micronumpy - diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -1,190 +1,9 @@ ========================= -What's new in PyPy 4.1.+ +What's new in PyPy 5.0.+ ========================= -.. this is a revision shortly after release-4.0.1 -.. startrev: 4b5c840d0da2 - -Fixed ``_PyLong_FromByteArray()``, which was buggy. - -Fixed a crash with stacklets (or greenlets) on non-Linux machines -which showed up if you forget stacklets without resuming them. - -.. branch: numpy-1.10 - -Fix tests to run cleanly with -A and start to fix micronumpy for upstream numpy -which is now 1.10.2 - -.. branch: osx-flat-namespace - -Fix the cpyext tests on OSX by linking with -flat_namespace - -.. branch: anntype - -Refactor and improve exception analysis in the annotator. - -.. branch: posita/2193-datetime-timedelta-integrals - -Fix issue #2193. ``isinstance(..., int)`` => ``isinstance(..., numbers.Integral)`` -to allow for alternate ``int``-like implementations (e.g., ``future.types.newint``) - -.. branch: faster-rstruct - -Improve the performace of struct.unpack, which now directly reads inside the -string buffer and directly casts the bytes to the appropriate type, when -allowed. Unpacking of floats and doubles is about 15 times faster now, while -for integer types it's up to ~50% faster for 64bit integers. - -.. branch: wrap-specialisation - -Remove unnecessary special handling of space.wrap(). - -.. branch: compress-numbering - -Improve the memory signature of numbering instances in the JIT. This should massively -decrease the amount of memory consumed by the JIT, which is significant for most programs. - -.. branch: fix-trace-too-long-heuristic - -Improve the heuristic when disable trace-too-long - -.. branch: fix-setslice-can-resize - -Make rlist's ll_listsetslice() able to resize the target list to help -simplify objspace/std/listobject.py. Was issue #2196. - -.. branch: anntype2 - -A somewhat random bunch of changes and fixes following up on branch 'anntype'. Highlights: - -- Implement @doubledispatch decorator and use it for intersection() and difference(). - -- Turn isinstance into a SpaceOperation - -- Create a few direct tests of the fundamental annotation invariant in test_model.py - -- Remove bookkeeper attribute from DictDef and ListDef. - -.. branch: cffi-static-callback - -.. branch: vecopt-absvalue - -- Enhancement. Removed vector fields from AbstractValue. - -.. branch: memop-simplify2 - -Simplification. Backends implement too many loading instructions, only having a slightly different interface. -Four new operations (gc_load/gc_load_indexed, gc_store/gc_store_indexed) replace all the -commonly known loading operations - -.. branch: more-rposix - -Move wrappers for OS functions from `rpython/rtyper` to `rpython/rlib` and -turn them into regular RPython functions. Most RPython-compatible `os.*` -functions are now directly accessible as `rpython.rposix.*`. - -.. branch: always-enable-gil - -Simplify a bit the GIL handling in non-jitted code. Fixes issue #2205. - -.. branch: flowspace-cleanups - -Trivial cleanups in flowspace.operation : fix comment & duplicated method - -.. branch: test-AF_NETLINK - -Add a test for pre-existing AF_NETLINK support. Was part of issue #1942. - -.. branch: small-cleanups-misc - -Trivial misc cleanups: typo, whitespace, obsolete comments - -.. branch: cpyext-slotdefs -.. branch: fix-missing-canraise -.. branch: whatsnew - -.. branch: fix-2211 - -Fix the cryptic exception message when attempting to use extended slicing -in rpython. Was issue #2211. - -.. branch: ec-keepalive - -Optimize the case where, in a new C-created thread, we keep invoking -short-running Python callbacks. (CFFI on CPython has a hack to achieve -the same result.) This can also be seen as a bug fix: previously, -thread-local objects would be reset between two such calls. - -.. branch: globals-quasiimmut - -Optimize global lookups. - -.. branch: cffi-static-callback-embedding - -Updated to CFFI 1.5, which supports a new way to do embedding. -Deprecates http://pypy.readthedocs.org/en/latest/embedding.html. - -.. branch: fix-cpython-ssl-tests-2.7 - -Fix SSL tests by importing cpython's patch - -.. branch: remove-getfield-pure - -Remove pure variants of ``getfield_gc_*`` operations from the JIT. Relevant -optimizations instead consult the field descriptor to determine the purity of -the operation. Additionally, pure ``getfield`` operations are now handled -entirely by `rpython/jit/metainterp/optimizeopt/heap.py` rather than -`rpython/jit/metainterp/optimizeopt/pure.py`, which can result in better codegen -for traces containing a large number of pure getfield operations. - -.. branch: exctrans - -Try to ensure that no new functions get annotated during the 'source_c' phase. -Refactor sandboxing to operate at a higher level. - -.. branch: cpyext-bootstrap - -.. branch: vmprof-newstack - -Refactor vmprof to work cross-operating-system. - -.. branch: seperate-strucmember_h - -Seperate structmember.h from Python.h Also enhance creating api functions -to specify which header file they appear in (previously only pypy_decl.h) - -.. branch: llimpl - -Refactor register_external(), remove running_on_llinterp mechanism and -apply sandbox transform on externals at the end of annotation. - -.. branch: cffi-embedding-win32 - -.. branch: windows-vmprof-support - -vmprof should work on Windows. - - -.. branch: reorder-map-attributes - -When creating instances and adding attributes in several different orders -depending on some condition, the JIT would create too much code. This is now -fixed. - -.. branch: cpyext-gc-support-2 - -Improve CPython C API support, which means lxml now runs unmodified -(after removing pypy hacks, pending pull request) - -.. branch: look-inside-tuple-hash - -Look inside tuple hash, improving mdp benchmark - -.. branch: vlen-resume - -Compress resume data, saving 10-20% of memory consumed by the JIT - -.. branch: s390x-backend +.. this is a revision shortly after release-5.0.0 +.. startrev: 6d13e55b962a .. branch: memop-simplify3 From pypy.commits at gmail.com Sun Mar 6 09:54:42 2016 From: pypy.commits at gmail.com (mattip) Date: Sun, 06 Mar 2016 06:54:42 -0800 (PST) Subject: [pypy-commit] pypy default: merge numpy_partition which provides an app-level cffi implementation of partition() Message-ID: <56dc44b2.500f1c0a.a3c89.ffffc714@mx.google.com> Author: mattip Branch: Changeset: r82812:d56d7d90a811 Date: 2016-03-06 16:33 +0200 http://bitbucket.org/pypy/pypy/changeset/d56d7d90a811/ Log: merge numpy_partition which provides an app-level cffi implementation of partition() diff --git a/pypy/module/micronumpy/appbridge.py b/pypy/module/micronumpy/appbridge.py --- a/pypy/module/micronumpy/appbridge.py +++ b/pypy/module/micronumpy/appbridge.py @@ -9,6 +9,7 @@ w_array_repr = None w_array_str = None w__usefields = None + w_partition = None def __init__(self, space): pass diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -557,8 +557,12 @@ return self.get_scalar_value().item(space) l_w = [] for i in range(self.get_shape()[0]): - l_w.append(space.call_method(self.descr_getitem(space, - space.wrap(i)), "tolist")) + item_w = self.descr_getitem(space, space.wrap(i)) + if (isinstance(item_w, W_NDimArray) or + isinstance(item_w, boxes.W_GenericBox)): + l_w.append(space.call_method(item_w, "tolist")) + else: + l_w.append(item_w) return space.newlist(l_w) def descr_ravel(self, space, w_order=None): @@ -934,6 +938,10 @@ return return self.implementation.sort(space, w_axis, w_order) + def descr_partition(self, space, __args__): + return get_appbridge_cache(space).call_method( + space, 'numpy.core._partition_use', 'partition', __args__.prepend(self)) + def descr_squeeze(self, space, w_axis=None): cur_shape = self.get_shape() if not space.is_none(w_axis): @@ -1658,6 +1666,7 @@ argsort = interp2app(W_NDimArray.descr_argsort), sort = interp2app(W_NDimArray.descr_sort), + partition = interp2app(W_NDimArray.descr_partition), astype = interp2app(W_NDimArray.descr_astype), base = GetSetProperty(W_NDimArray.descr_get_base), byteswap = interp2app(W_NDimArray.descr_byteswap), diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -1936,6 +1936,22 @@ a = array([[1, 2], [3, 4]]) assert (a + a).tolist() == [[2, 4], [6, 8]] + def test_tolist_object(self): + from numpy import array + a = array([0], dtype=object) + assert a.tolist() == [0] + + def test_tolist_object_slice(self): + from numpy import array + list_expected = [slice(0, 1), 0] + a = array(list_expected, dtype=object) + assert a.tolist() == list_expected + + def test_tolist_object_slice_2d(self): + from numpy import array + a = array([(slice(0, 1), 1), (0, 1)], dtype=object) + assert a.tolist() == [[slice(0, 1, None), 1], [0, 1]] + def test_tolist_slice(self): from numpy import array a = array([[17.1, 27.2], [40.3, 50.3]]) From pypy.commits at gmail.com Sun Mar 6 09:54:44 2016 From: pypy.commits at gmail.com (mattip) Date: Sun, 06 Mar 2016 06:54:44 -0800 (PST) Subject: [pypy-commit] pypy default: fix release notice (idnar), document ndarray partition Message-ID: <56dc44b4.88c8c20a.8eb98.4491@mx.google.com> Author: mattip Branch: Changeset: r82813:b520e33904bc Date: 2016-03-06 16:53 +0200 http://bitbucket.org/pypy/pypy/changeset/b520e33904bc/ Log: fix release notice (idnar), document ndarray partition diff --git a/pypy/doc/release-5.0.0.rst b/pypy/doc/release-5.0.0.rst --- a/pypy/doc/release-5.0.0.rst +++ b/pypy/doc/release-5.0.0.rst @@ -98,7 +98,8 @@ * Bug Fixes - * Backport always using os.urandom for uuid4 from cpython + * Backport always using os.urandom for uuid4 from cpython and fix the JIT as well + (issue #2202) * More completely support datetime, optimize timedelta creation @@ -106,7 +107,7 @@ generated by the unroller, appeared in a complicated DJango app * Fix an elusive issue with stacklets on shadowstack which showed up when - forgetting stacklets without resuming them + forgetting stacklets without resuming them * Fix entrypoint() which now acquires the GIL @@ -146,13 +147,13 @@ * Support indexing filtering with a boolean ndarray + * Support partition() as an app-level function, together with a cffi wrapper + in pypy/numpy, this now provides partial support for partition() + * Performance improvements and refactorings: * Refactor and improve exception analysis in the annotator - * Improve the performace of struct.unpack; unpacking of floats and doubles - is now about 15 times faster and 64 bit integers faster by a factor of 2 - * Remove unnecessary special handling of space.wrap(). * Improve the memory signature of numbering instances in the JIT. This should From pypy.commits at gmail.com Sun Mar 6 10:20:01 2016 From: pypy.commits at gmail.com (mattip) Date: Sun, 06 Mar 2016 07:20:01 -0800 (PST) Subject: [pypy-commit] pypy default: document merged branch Message-ID: <56dc4aa1.654fc20a.1121.502f@mx.google.com> Author: mattip Branch: Changeset: r82814:7bb6381d084c Date: 2016-03-06 17:18 +0200 http://bitbucket.org/pypy/pypy/changeset/7bb6381d084c/ Log: document merged branch diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -5,4 +5,6 @@ .. this is a revision shortly after release-5.0.0 .. startrev: 6d13e55b962a - +.. branch: numpy_partition +Support ndarray.partition() as an app-level function numpy.core._partition_use, +provided as a cffi wrapper to upstream's implementation in the pypy/numpy repo From pypy.commits at gmail.com Sun Mar 6 11:02:55 2016 From: pypy.commits at gmail.com (plan_rich) Date: Sun, 06 Mar 2016 08:02:55 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: fixed resop comment + param count Message-ID: <56dc54af.a2afc20a.c00e8.5b3d@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r82815:372d00e95882 Date: 2016-03-06 17:02 +0100 http://bitbucket.org/pypy/pypy/changeset/372d00e95882/ Log: fixed resop comment + param count diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py +++ b/rpython/jit/backend/llsupport/rewrite.py @@ -540,8 +540,9 @@ scale, offset, v_length_scaled = \ self._emit_mul_if_factor_offset_not_supported(v_length, scale, 0) v_scale = ConstInt(scale) - # there is probably no point in doing _emit_mul_if.. for - # c_zero! + # there is probably no point in doing _emit_mul_if.. for c_zero! + # NOTE that the scale might be != 1 for e.g. v_length_scaled if it is a constant + # it is later applied in emit_pending_zeros args = [v_arr, self.c_zero, v_length_scaled, ConstInt(scale), v_scale] o = ResOperation(rop.ZERO_ARRAY, args, descr=arraydescr) self.emit_op(o) diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -1224,8 +1224,9 @@ 'SETINTERIORFIELD_GC/3d/n', 'SETINTERIORFIELD_RAW/3d/n', # right now, only used by tests 'SETFIELD_GC/2d/n', - 'ZERO_ARRAY/4d/n', # only emitted by the rewrite, clears (part of) an array - # [arraygcptr, firstindex, length], descr=ArrayDescr + 'ZERO_ARRAY/5d/n', # only emitted by the rewrite, clears (part of) an array + # [arraygcptr, firstindex, length, scale_firstindex, + # scale_length], descr=ArrayDescr 'SETFIELD_RAW/2d/n', 'STRSETITEM/3/n', 'UNICODESETITEM/3/n', From pypy.commits at gmail.com Sun Mar 6 12:18:25 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 06 Mar 2016 09:18:25 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: (untested) fix zero_array on arm Message-ID: <56dc6661.8d571c0a.84a4f.ffffe9e7@mx.google.com> Author: Armin Rigo Branch: s390x-backend Changeset: r82816:a30c8d2ad91b Date: 2016-03-06 18:16 +0100 http://bitbucket.org/pypy/pypy/changeset/a30c8d2ad91b/ Log: (untested) fix zero_array on arm diff --git a/rpython/jit/backend/arm/opassembler.py b/rpython/jit/backend/arm/opassembler.py --- a/rpython/jit/backend/arm/opassembler.py +++ b/rpython/jit/backend/arm/opassembler.py @@ -1143,35 +1143,42 @@ def emit_op_zero_array(self, op, arglocs, regalloc, fcond): from rpython.jit.backend.llsupport.descr import unpack_arraydescr assert len(arglocs) == 0 - length_box = op.getarg(2) - if isinstance(length_box, ConstInt) and length_box.getint() == 0: + size_box = op.getarg(2) + if isinstance(size_box, ConstInt) and size_box.getint() == 0: return fcond # nothing to do itemsize, baseofs, _ = unpack_arraydescr(op.getdescr()) args = op.getarglist() + # + # ZERO_ARRAY(base_loc, start, size, 1, 1) + # 'start' and 'size' are both expressed in bytes, + # and the two scaling arguments should always be ConstInt(1) on ARM. + assert args[3].getint() == 1 + assert args[4].getint() == 1 + # base_loc = regalloc.rm.make_sure_var_in_reg(args[0], args) - sibox = args[1] - if isinstance(sibox, ConstInt): - startindex_loc = None - startindex = sibox.getint() - assert startindex >= 0 + startbyte_box = args[1] + if isinstance(startbyte_box, ConstInt): + startbyte_loc = None + startbyte = startbyte_box.getint() + assert startbyte >= 0 else: - startindex_loc = regalloc.rm.make_sure_var_in_reg(sibox, args) - startindex = -1 + startbyte_loc = regalloc.rm.make_sure_var_in_reg(startbyte_box, + args) + startbyte = -1 - # base_loc and startindex_loc are in two regs here (or they are - # immediates). Compute the dstaddr_loc, which is the raw + # base_loc and startbyte_loc are in two regs here (or startbyte_loc + # is an immediate). Compute the dstaddr_loc, which is the raw # address that we will pass as first argument to memset(). # It can be in the same register as either one, but not in # args[2], because we're still needing the latter. dstaddr_box = TempVar() dstaddr_loc = regalloc.rm.force_allocate_reg(dstaddr_box, [args[2]]) - if startindex >= 0: # a constant - ofs = baseofs + startindex * itemsize + if startbyte >= 0: # a constant + ofs = baseofs + startbyte reg = base_loc.value else: - self.mc.gen_load_int(r.ip.value, itemsize) - self.mc.MLA(dstaddr_loc.value, r.ip.value, - startindex_loc.value, base_loc.value) + self.mc.ADD_rr(dstaddr_loc.value, + base_loc.value, startbyte_loc.value) ofs = baseofs reg = dstaddr_loc.value if check_imm_arg(ofs): @@ -1180,20 +1187,27 @@ self.mc.gen_load_int(r.ip.value, ofs) self.mc.ADD_rr(dstaddr_loc.value, reg, r.ip.value) - if (isinstance(length_box, ConstInt) and - length_box.getint() <= 14 and # same limit as GCC - itemsize in (4, 2, 1)): + # We use STRB, STRH or STR based on whether we know the array + # item size is a multiple of 1, 2 or 4. + if itemsize & 1: itemsize = 1 + elif itemsize & 2: itemsize = 2 + else: itemsize = 4 + limit = itemsize + next_group = -1 + if itemsize < 4 and startbyte >= 0: + # we optimize STRB/STRH into STR, but this needs care: + # it only works if startindex_loc is a constant, otherwise + # we'd be doing unaligned accesses. + next_group = (-startbyte) & 3 + limit = 4 + + if (isinstance(size_box, ConstInt) and + size_box.getint() <= 14 * limit): # same limit as GCC # Inline a series of STR operations, starting at 'dstaddr_loc'. - next_group = -1 - if itemsize < 4 and startindex >= 0: - # we optimize STRB/STRH into STR, but this needs care: - # it only works if startindex_loc is a constant, otherwise - # we'd be doing unaligned accesses. - next_group = (-startindex * itemsize) & 3 # self.mc.gen_load_int(r.ip.value, 0) i = 0 - total_size = length_box.getint() * itemsize + total_size = size_box.getint() while i < total_size: sz = itemsize if i == next_group: @@ -1209,29 +1223,18 @@ i += sz else: - if isinstance(length_box, ConstInt): - length_loc = imm(length_box.getint() * itemsize) + if isinstance(size_box, ConstInt): + size_loc = imm(size_box.getint()) else: - # load length_loc in a register different than dstaddr_loc - length_loc = regalloc.rm.make_sure_var_in_reg(length_box, - [dstaddr_box]) - if itemsize > 1: - # we need a register that is different from dstaddr_loc, - # but which can be identical to length_loc (as usual, - # only if the length_box is not used by future operations) - bytes_box = TempVar() - bytes_loc = regalloc.rm.force_allocate_reg(bytes_box, - [dstaddr_box]) - self.mc.gen_load_int(r.ip.value, itemsize) - self.mc.MUL(bytes_loc.value, r.ip.value, length_loc.value) - length_box = bytes_box - length_loc = bytes_loc + # load size_loc in a register different than dstaddr_loc + size_loc = regalloc.rm.make_sure_var_in_reg(size_box, + [dstaddr_box]) # # call memset() regalloc.before_call() self.simple_call_no_collect(imm(self.memset_addr), - [dstaddr_loc, imm(0), length_loc]) - regalloc.rm.possibly_free_var(length_box) + [dstaddr_loc, imm(0), size_loc]) + regalloc.rm.possibly_free_var(size_box) regalloc.rm.possibly_free_var(dstaddr_box) return fcond From pypy.commits at gmail.com Sun Mar 6 12:18:26 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 06 Mar 2016 09:18:26 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: merge heads Message-ID: <56dc6662.a151c20a.73e93.73db@mx.google.com> Author: Armin Rigo Branch: s390x-backend Changeset: r82817:a554729812fd Date: 2016-03-06 18:17 +0100 http://bitbucket.org/pypy/pypy/changeset/a554729812fd/ Log: merge heads diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py +++ b/rpython/jit/backend/llsupport/rewrite.py @@ -540,8 +540,9 @@ scale, offset, v_length_scaled = \ self._emit_mul_if_factor_offset_not_supported(v_length, scale, 0) v_scale = ConstInt(scale) - # there is probably no point in doing _emit_mul_if.. for - # c_zero! + # there is probably no point in doing _emit_mul_if.. for c_zero! + # NOTE that the scale might be != 1 for e.g. v_length_scaled if it is a constant + # it is later applied in emit_pending_zeros args = [v_arr, self.c_zero, v_length_scaled, ConstInt(scale), v_scale] o = ResOperation(rop.ZERO_ARRAY, args, descr=arraydescr) self.emit_op(o) diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -1224,8 +1224,9 @@ 'SETINTERIORFIELD_GC/3d/n', 'SETINTERIORFIELD_RAW/3d/n', # right now, only used by tests 'SETFIELD_GC/2d/n', - 'ZERO_ARRAY/4d/n', # only emitted by the rewrite, clears (part of) an array - # [arraygcptr, firstindex, length], descr=ArrayDescr + 'ZERO_ARRAY/5d/n', # only emitted by the rewrite, clears (part of) an array + # [arraygcptr, firstindex, length, scale_firstindex, + # scale_length], descr=ArrayDescr 'SETFIELD_RAW/2d/n', 'STRSETITEM/3/n', 'UNICODESETITEM/3/n', From pypy.commits at gmail.com Sun Mar 6 12:31:33 2016 From: pypy.commits at gmail.com (plan_rich) Date: Sun, 06 Mar 2016 09:31:33 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: (untested) fix zero array for ppc Message-ID: <56dc6975.42711c0a.43478.fffff4fa@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r82818:8ab56b6e79d4 Date: 2016-03-06 18:28 +0100 http://bitbucket.org/pypy/pypy/changeset/8ab56b6e79d4/ Log: (untested) fix zero array for ppc diff --git a/rpython/jit/backend/ppc/opassembler.py b/rpython/jit/backend/ppc/opassembler.py --- a/rpython/jit/backend/ppc/opassembler.py +++ b/rpython/jit/backend/ppc/opassembler.py @@ -910,79 +910,78 @@ else: self.mc.std(a, b, c) def emit_zero_array(self, op, arglocs, regalloc): - base_loc, startindex_loc, length_loc, ofs_loc, itemsize_loc = arglocs + base_loc, startindex_loc, length_loc, ofs_loc = arglocs - # assume that an array where an item size is N: - # * if N is even, then all items are aligned to a multiple of 2 - # * if N % 4 == 0, then all items are aligned to a multiple of 4 - # * if N % 8 == 0, then all items are aligned to a multiple of 8 - itemsize = itemsize_loc.getint() - if itemsize & 1: stepsize = 1 - elif itemsize & 2: stepsize = 2 - elif (itemsize & 4) or IS_PPC_32: stepsize = 4 - else: stepsize = WORD + stepsize = 8 + if IS_PPC_32: + stepsize = 4 - repeat_factor = itemsize // stepsize - if repeat_factor != 1: - # This is only for itemsize not in (1, 2, 4, WORD). - # Include the repeat_factor inside length_loc if it is a constant - if length_loc.is_imm(): - length_loc = imm(length_loc.value * repeat_factor) - repeat_factor = 1 # included + if length_loc.is_imm(): + if length_loc.value <= 0: + return # nothing to do - unroll = -1 + self.mc.addi(r.SCRATCH2.value, startindex_loc.value, ofs_loc.getint()) + ofs_loc = r.SCRATCH2 + # ofs_loc is now the startindex in bytes + the array offset + if length_loc.is_imm(): - if length_loc.value <= 8: - unroll = length_loc.value - if unroll <= 0: - return # nothing to do - - ofs_loc = self._apply_scale(ofs_loc, startindex_loc, itemsize_loc) - ofs_loc = self._copy_in_scratch2(ofs_loc) - - if unroll > 0: - assert repeat_factor == 1 - self.mc.li(r.SCRATCH.value, 0) - self.eza_stXux(r.SCRATCH.value, ofs_loc.value, base_loc.value, - itemsize) - for i in range(1, unroll): - self.eza_stX(r.SCRATCH.value, ofs_loc.value, i * stepsize, - itemsize) - + self.mc.load_imm(r.SCRATCH, length_loc.value) + length_loc = r.SCRATCH + jz_location = -1 else: - if length_loc.is_imm(): - self.mc.load_imm(r.SCRATCH, length_loc.value) - length_loc = r.SCRATCH - jz_location = -1 - assert repeat_factor == 1 - else: - self.mc.cmp_op(0, length_loc.value, 0, imm=True) - jz_location = self.mc.currpos() - self.mc.trap() - length_loc = self._multiply_by_constant(length_loc, - repeat_factor, - r.SCRATCH) - self.mc.mtctr(length_loc.value) - self.mc.li(r.SCRATCH.value, 0) - - self.eza_stXux(r.SCRATCH.value, ofs_loc.value, base_loc.value, - itemsize) - bdz_location = self.mc.currpos() + # jump to end if length is less than stepsize + self.mc.cmp_op(0, length_loc.value, stepsize, imm=True) + jz_location = self.mc.currpos() self.mc.trap() - loop_location = self.mc.currpos() - self.eza_stXu(r.SCRATCH.value, ofs_loc.value, stepsize, - itemsize) - self.mc.bdnz(loop_location - self.mc.currpos()) + self.mc.li(r.SCRATCH.value, 0) - pmc = OverwritingBuilder(self.mc, bdz_location, 1) - pmc.bdz(self.mc.currpos() - bdz_location) + # NOTE the following assumes that bytes have been passed to both startindex + # and length. Thus we zero 4/8 bytes in a loop in 1) and every remaining + # byte is zeroed in another loop in 2) + + # first store of case 1) + self.eza_stXux(r.SCRATCH.value, ofs_loc.value, base_loc.value, stepsize) + self.mc.subi(length_loc.value, length_loc.value, stepsize) + self.mc.cmp_op(0, length_loc.value, stepsize, imm=True) + lt_location = self.mc.currpos() + self.mc.trap() # jump over the loop if we are already done with 1) + + # 1) The next loop copies WORDS into the memory chunk starting at startindex + # ending at startindex + length. These are bytes + loop_location = self.mc.currpos() + self.eza_stXu(r.SCRATCH.value, ofs_loc.value, stepsize, stepsize) + self.mc.subi(length_loc.value, length_loc.value, stepsize) + self.mc.cmp_op(0, length_loc.value, stepsize, imm=True) + self.mc.bge(loop_location - self.mc.currpos()) + + pmc = OverwritingBuilder(self.mc, lt_location, 1) + pmc.blt(self.mc.currpos() - lt_location) + pmc.overwrite() + + if jz_location != -1: + pmc = OverwritingBuilder(self.mc, jz_location, 1) + pmc.ble(self.mc.currpos() - jz_location) # !GT pmc.overwrite() - if jz_location != -1: - pmc = OverwritingBuilder(self.mc, jz_location, 1) - pmc.ble(self.mc.currpos() - jz_location) # !GT - pmc.overwrite() + # 2) There might be some bytes left to be written. + # following scenario: length_loc == 3 bytes, stepsize == 4! + # need to write the last bytes. + self.mc.cmp_op(0, length_loc.value, 0, imm=True) + jle_location = self.mc.curpos() + self.mc.trap() + + self.mc.mtctr(length_loc.value) + + loop_position = self.mc.curpos() + self.eza_stXu(r.SCRATCH.value, ofs_loc.value, 1, 1) + self.mc.bdnz(self.mc.currpos() - loop_location) + + pmc = OverwritingBuilder(self.mc, jle_location, 1) + pmc.ble(self.mc.currpos() - jle_location) # !GT + pmc.overwrite() + + class StrOpAssembler(object): diff --git a/rpython/jit/backend/ppc/regalloc.py b/rpython/jit/backend/ppc/regalloc.py --- a/rpython/jit/backend/ppc/regalloc.py +++ b/rpython/jit/backend/ppc/regalloc.py @@ -955,12 +955,19 @@ return arglocs def prepare_zero_array(self, op): - itemsize, ofs, _ = unpack_arraydescr(op.getdescr()) + _, ofs, _ = unpack_arraydescr(op.getdescr()) base_loc = self.ensure_reg(op.getarg(0)) startindex_loc = self.ensure_reg_or_16bit_imm(op.getarg(1)) length_loc = self.ensure_reg_or_16bit_imm(op.getarg(2)) + # startindex and length are bytes, not array items anymore. + # rewrite already applied the scale! + startindex_scale_box = op.getarg(3) + assert startindex_scale_box.getint() == 1 + length_scale_box = op.getarg(4) + assert length_scale_box.getint() == 1 + # ofs_loc = self.ensure_reg_or_16bit_imm(ConstInt(ofs)) - return [base_loc, startindex_loc, length_loc, ofs_loc, imm(itemsize)] + return [base_loc, startindex_loc, length_loc, ofs_loc] def prepare_cond_call(self, op): self.load_condition_into_cc(op.getarg(0)) From pypy.commits at gmail.com Sun Mar 6 12:36:50 2016 From: pypy.commits at gmail.com (plan_rich) Date: Sun, 06 Mar 2016 09:36:50 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: kill unused methods (_apply_scale, _multiply_by_constant) Message-ID: <56dc6ab2.8e301c0a.21c39.fffff8e1@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r82819:539acda35771 Date: 2016-03-06 18:35 +0100 http://bitbucket.org/pypy/pypy/changeset/539acda35771/ Log: kill unused methods (_apply_scale, _multiply_by_constant) diff --git a/rpython/jit/backend/ppc/opassembler.py b/rpython/jit/backend/ppc/opassembler.py --- a/rpython/jit/backend/ppc/opassembler.py +++ b/rpython/jit/backend/ppc/opassembler.py @@ -828,63 +828,6 @@ SIZE2SCALE = dict([(1<<_i, _i) for _i in range(32)]) - def _multiply_by_constant(self, loc, multiply_by, scratch_loc): - # XXX should die together with _apply_scale() but can't because - # of emit_zero_array() and malloc_cond_varsize() at the moment - assert loc.is_reg() - if multiply_by == 1: - return loc - try: - scale = self.SIZE2SCALE[multiply_by] - except KeyError: - if _check_imm_arg(multiply_by): - self.mc.mulli(scratch_loc.value, loc.value, multiply_by) - else: - self.mc.load_imm(scratch_loc, multiply_by) - if IS_PPC_32: - self.mc.mullw(scratch_loc.value, loc.value, - scratch_loc.value) - else: - self.mc.mulld(scratch_loc.value, loc.value, - scratch_loc.value) - else: - self.mc.sldi(scratch_loc.value, loc.value, scale) - return scratch_loc - - def _apply_scale(self, ofs, index_loc, itemsize): - # XXX should die now that getarrayitem and getinteriorfield are gone - # but can't because of emit_zero_array() at the moment - - # For arrayitem and interiorfield reads and writes: this returns an - # offset suitable for use in ld/ldx or similar instructions. - # The result will be either the register r2 or a 16-bit immediate. - # The arguments stand for "ofs + index_loc * itemsize", - # with the following constrains: - assert ofs.is_imm() # must be an immediate... - assert _check_imm_arg(ofs.getint()) # ...that fits 16 bits - assert index_loc is not r.SCRATCH2 # can be a reg or imm (any size) - assert itemsize.is_imm() # must be an immediate (any size) - - multiply_by = itemsize.value - offset = ofs.getint() - if index_loc.is_imm(): - offset += index_loc.getint() * multiply_by - if _check_imm_arg(offset): - return imm(offset) - else: - self.mc.load_imm(r.SCRATCH2, offset) - return r.SCRATCH2 - else: - index_loc = self._multiply_by_constant(index_loc, multiply_by, - r.SCRATCH2) - # here, the new index_loc contains 'index_loc * itemsize'. - # If offset != 0 then we have to add it here. Note that - # mc.addi() would not be valid with operand r0. - if offset != 0: - self.mc.addi(r.SCRATCH2.value, index_loc.value, offset) - index_loc = r.SCRATCH2 - return index_loc - def _copy_in_scratch2(self, loc): if loc.is_imm(): self.mc.li(r.SCRATCH2.value, loc.value) From pypy.commits at gmail.com Sun Mar 6 12:38:03 2016 From: pypy.commits at gmail.com (plan_rich) Date: Sun, 06 Mar 2016 09:38:03 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: typo it is curRpos Message-ID: <56dc6afb.e83cc20a.8b5fd.699b@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r82820:974fa6a90ecd Date: 2016-03-06 18:37 +0100 http://bitbucket.org/pypy/pypy/changeset/974fa6a90ecd/ Log: typo it is curRpos diff --git a/rpython/jit/backend/ppc/opassembler.py b/rpython/jit/backend/ppc/opassembler.py --- a/rpython/jit/backend/ppc/opassembler.py +++ b/rpython/jit/backend/ppc/opassembler.py @@ -911,12 +911,12 @@ # following scenario: length_loc == 3 bytes, stepsize == 4! # need to write the last bytes. self.mc.cmp_op(0, length_loc.value, 0, imm=True) - jle_location = self.mc.curpos() + jle_location = self.mc.currpos() self.mc.trap() self.mc.mtctr(length_loc.value) - loop_position = self.mc.curpos() + loop_position = self.mc.currpos() self.eza_stXu(r.SCRATCH.value, ofs_loc.value, 1, 1) self.mc.bdnz(self.mc.currpos() - loop_location) From pypy.commits at gmail.com Sun Mar 6 12:39:33 2016 From: pypy.commits at gmail.com (plan_rich) Date: Sun, 06 Mar 2016 09:39:33 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: _multiply_by_constant is still used by malloc_cond_varsize Message-ID: <56dc6b55.654fc20a.1121.7826@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r82821:64b4e6d07aed Date: 2016-03-06 18:38 +0100 http://bitbucket.org/pypy/pypy/changeset/64b4e6d07aed/ Log: _multiply_by_constant is still used by malloc_cond_varsize diff --git a/rpython/jit/backend/ppc/opassembler.py b/rpython/jit/backend/ppc/opassembler.py --- a/rpython/jit/backend/ppc/opassembler.py +++ b/rpython/jit/backend/ppc/opassembler.py @@ -828,6 +828,29 @@ SIZE2SCALE = dict([(1<<_i, _i) for _i in range(32)]) + def _multiply_by_constant(self, loc, multiply_by, scratch_loc): + # XXX should die together with _apply_scale() but can't because + # of emit_zero_array() and malloc_cond_varsize() at the moment + assert loc.is_reg() + if multiply_by == 1: + return loc + try: + scale = self.SIZE2SCALE[multiply_by] + except KeyError: + if _check_imm_arg(multiply_by): + self.mc.mulli(scratch_loc.value, loc.value, multiply_by) + else: + self.mc.load_imm(scratch_loc, multiply_by) + if IS_PPC_32: + self.mc.mullw(scratch_loc.value, loc.value, + scratch_loc.value) + else: + self.mc.mulld(scratch_loc.value, loc.value, + scratch_loc.value) + else: + self.mc.sldi(scratch_loc.value, loc.value, scale) + return scratch_loc + def _copy_in_scratch2(self, loc): if loc.is_imm(): self.mc.li(r.SCRATCH2.value, loc.value) From pypy.commits at gmail.com Sun Mar 6 13:16:16 2016 From: pypy.commits at gmail.com (rlamy) Date: Sun, 06 Mar 2016 10:16:16 -0800 (PST) Subject: [pypy-commit] pypy rawrefcount-review: Try to simplify tests Message-ID: <56dc73f0.426dc20a.5e818.ffff8360@mx.google.com> Author: Ronan Lamy Branch: rawrefcount-review Changeset: r82822:805b6b103730 Date: 2016-03-06 16:09 +0000 http://bitbucket.org/pypy/pypy/changeset/805b6b103730/ Log: Try to simplify tests diff --git a/rpython/memory/gc/test/test_rawrefcount.py b/rpython/memory/gc/test/test_rawrefcount.py --- a/rpython/memory/gc/test/test_rawrefcount.py +++ b/rpython/memory/gc/test/test_rawrefcount.py @@ -28,42 +28,58 @@ count2 = len(self.trigger) assert count2 - count1 == expected_trigger + def create_gcobj(self, intval, old=False, immortal=False): + if immortal: + p1 = lltype.malloc(S, immortal=True) + p1.x = intval + self.consider_constant(p1) + return p1 + p1 = self.malloc(S) + p1.x = intval + if old: + self.stackroots.append(p1) + self._collect(major=False) + p1 = self.stackroots.pop() + return p1 + + def create_rawobj(self, immortal=False): + r1 = lltype.malloc(PYOBJ_HDR, flavor='raw', immortal=immortal) + r1.ob_refcnt = 0 + r1.ob_pypy_link = 0 + return r1 + + def create_link(self, rawobj, gcobj, is_light=False, is_pyobj=False): + if is_light: + rawobj.ob_refcnt += REFCNT_FROM_PYPY_LIGHT + else: + rawobj.ob_refcnt += REFCNT_FROM_PYPY + rawaddr = llmemory.cast_ptr_to_adr(rawobj) + gcref = lltype.cast_opaque_ptr(llmemory.GCREF, gcobj) + if is_pyobj: + assert not is_light + self.gc.rawrefcount_create_link_pyobj(gcref, rawaddr) + else: + self.gc.rawrefcount_create_link_pypy(gcref, rawaddr) + def _rawrefcount_pair(self, intval, is_light=False, is_pyobj=False, create_old=False, create_immortal=False): + self.trigger = [] + self.gc.rawrefcount_init(lambda: self.trigger.append(1)) + # + p1 = self.create_gcobj(intval, old=create_old, immortal=create_immortal) + r1 = self.create_rawobj(immortal=create_immortal) + self.create_link(r1, p1, is_light=is_light, is_pyobj=is_pyobj) if is_light: rc = REFCNT_FROM_PYPY_LIGHT else: rc = REFCNT_FROM_PYPY - self.trigger = [] - self.gc.rawrefcount_init(lambda: self.trigger.append(1)) - # - if create_immortal: - p1 = lltype.malloc(S, immortal=True) - else: - p1 = self.malloc(S) - p1.x = intval - if create_immortal: - self.consider_constant(p1) - elif create_old: - self.stackroots.append(p1) - self._collect(major=False) - p1 = self.stackroots.pop() - p1ref = lltype.cast_opaque_ptr(llmemory.GCREF, p1) - r1 = lltype.malloc(PYOBJ_HDR, flavor='raw', immortal=create_immortal) - r1.ob_refcnt = rc - r1.ob_pypy_link = 0 - r1addr = llmemory.cast_ptr_to_adr(r1) - if is_pyobj: - assert not is_light - self.gc.rawrefcount_create_link_pyobj(p1ref, r1addr) - else: - self.gc.rawrefcount_create_link_pypy(p1ref, r1addr) assert r1.ob_refcnt == rc assert r1.ob_pypy_link != 0 def check_alive(extra_refcount): assert r1.ob_refcnt == rc + extra_refcount assert r1.ob_pypy_link != 0 + r1addr = llmemory.cast_ptr_to_adr(r1) p1ref = self.gc.rawrefcount_to_obj(r1addr) p1 = lltype.cast_opaque_ptr(lltype.Ptr(S), p1ref) assert p1.x == intval @@ -72,25 +88,25 @@ else: assert self.gc.rawrefcount_from_obj(p1ref) == llmemory.NULL return p1 - return p1, p1ref, r1, r1addr, check_alive + return p1, r1, check_alive @py.test.mark.parametrize('old', [True, False]) def test_rawrefcount_objects_basic(self, old): - p1, p1ref, r1, r1addr, check_alive = ( + p1, r1, check_alive = ( self._rawrefcount_pair(42, is_light=True, create_old=old)) - p2 = self.malloc(S) - p2.x = 84 + p1ref = lltype.cast_opaque_ptr(llmemory.GCREF, p1) + r1addr = llmemory.cast_ptr_to_adr(r1) + assert r1.ob_pypy_link != 0 + assert self.gc.rawrefcount_from_obj(p1ref) == r1addr + assert self.gc.rawrefcount_to_obj(r1addr) == p1ref + p2 = self.create_gcobj(84) + r2 = self.create_rawobj() + r2.ob_refcnt += 1 p2ref = lltype.cast_opaque_ptr(llmemory.GCREF, p2) - r2 = lltype.malloc(PYOBJ_HDR, flavor='raw') - r2.ob_refcnt = 1 - r2.ob_pypy_link = 0 r2addr = llmemory.cast_ptr_to_adr(r2) # p2 and r2 are not linked - assert r1.ob_pypy_link != 0 assert r2.ob_pypy_link == 0 - assert self.gc.rawrefcount_from_obj(p1ref) == r1addr assert self.gc.rawrefcount_from_obj(p2ref) == llmemory.NULL - assert self.gc.rawrefcount_to_obj(r1addr) == p1ref assert self.gc.rawrefcount_to_obj(r2addr) == lltype.nullptr( llmemory.GCREF.TO) lltype.free(r1, flavor='raw') @@ -98,7 +114,7 @@ @py.test.mark.parametrize('old', [True, False]) def test_rawrefcount_objects_collection_survives_from_raw(self, old): - p1, p1ref, r1, r1addr, check_alive = ( + p1, r1, check_alive = ( self._rawrefcount_pair(42, is_light=True, create_old=old)) check_alive(0) r1.ob_refcnt += 1 @@ -118,7 +134,7 @@ @py.test.mark.parametrize('old', [True, False]) def test_rawrefcount_dies_quickly(self, old): - p1, p1ref, r1, r1addr, check_alive = ( + p1, r1, check_alive = ( self._rawrefcount_pair(42, is_light=True, create_old=old)) check_alive(0) self._collect(major=False) @@ -131,7 +147,7 @@ @py.test.mark.parametrize('old', [True, False]) def test_rawrefcount_objects_collection_survives_from_obj(self, old): - p1, p1ref, r1, r1addr, check_alive = ( + p1, r1, check_alive = ( self._rawrefcount_pair(42, is_light=True, create_old=old)) check_alive(0) self.stackroots.append(p1) @@ -150,7 +166,7 @@ @py.test.mark.parametrize('old', [True, False]) def test_pypy_nonlight_survives_from_raw(self, old): - p1, p1ref, r1, r1addr, check_alive = ( + p1, r1, check_alive = ( self._rawrefcount_pair(42, is_light=False, create_old=old)) check_alive(0) r1.ob_refcnt += 1 @@ -165,6 +181,7 @@ py.test.raises(RuntimeError, "p1.x") # dead assert r1.ob_refcnt == 0 assert r1.ob_pypy_link == 0 + r1addr = llmemory.cast_ptr_to_adr(r1) assert self.gc.rawrefcount_next_dead() == r1addr assert self.gc.rawrefcount_next_dead() == llmemory.NULL assert self.gc.rawrefcount_next_dead() == llmemory.NULL @@ -173,7 +190,7 @@ @py.test.mark.parametrize('old', [True, False]) def test_pypy_nonlight_survives_from_obj(self, old): - p1, p1ref, r1, r1addr, check_alive = ( + p1, r1, check_alive = ( self._rawrefcount_pair(42, is_light=False, create_old=old)) check_alive(0) self.stackroots.append(p1) @@ -189,13 +206,14 @@ py.test.raises(RuntimeError, "p1.x") # dead assert r1.ob_refcnt == 0 assert r1.ob_pypy_link == 0 + r1addr = llmemory.cast_ptr_to_adr(r1) assert self.gc.rawrefcount_next_dead() == r1addr self.gc.check_no_more_rawrefcount_state() lltype.free(r1, flavor='raw') @py.test.mark.parametrize('old', [True, False]) def test_pypy_nonlight_dies_quickly(self, old): - p1, p1ref, r1, r1addr, check_alive = ( + p1, r1, check_alive = ( self._rawrefcount_pair(42, is_light=False, create_old=old)) check_alive(0) if old: @@ -207,12 +225,13 @@ py.test.raises(RuntimeError, "p1.x") # dead assert r1.ob_refcnt == 0 assert r1.ob_pypy_link == 0 + r1addr = llmemory.cast_ptr_to_adr(r1) assert self.gc.rawrefcount_next_dead() == r1addr self.gc.check_no_more_rawrefcount_state() lltype.free(r1, flavor='raw') def test_pyobject_pypy_link_dies_on_minor_collection(self): - p1, p1ref, r1, r1addr, check_alive = ( + p1, r1, check_alive = ( self._rawrefcount_pair(42, is_pyobj=True)) check_alive(0) r1.ob_refcnt += 1 # the pyobject is kept alive @@ -224,7 +243,7 @@ @py.test.mark.parametrize('old', [True, False]) def test_pyobject_dies(self, old): - p1, p1ref, r1, r1addr, check_alive = ( + p1, r1, check_alive = ( self._rawrefcount_pair(42, is_pyobj=True, create_old=old)) check_alive(0) if old: @@ -235,13 +254,14 @@ self._collect(major=False, expected_trigger=1) assert r1.ob_refcnt == 0 # refcnt dropped to 0 assert r1.ob_pypy_link == 0 # detached + r1addr = llmemory.cast_ptr_to_adr(r1) assert self.gc.rawrefcount_next_dead() == r1addr self.gc.check_no_more_rawrefcount_state() lltype.free(r1, flavor='raw') @py.test.mark.parametrize('old', [True, False]) def test_pyobject_survives_from_obj(self, old): - p1, p1ref, r1, r1addr, check_alive = ( + p1, r1, check_alive = ( self._rawrefcount_pair(42, is_pyobj=True, create_old=old)) check_alive(0) self.stackroots.append(p1) @@ -258,12 +278,13 @@ py.test.raises(RuntimeError, "p1.x") # dead assert r1.ob_refcnt == 0 assert r1.ob_pypy_link == 0 + r1addr = llmemory.cast_ptr_to_adr(r1) assert self.gc.rawrefcount_next_dead() == r1addr self.gc.check_no_more_rawrefcount_state() lltype.free(r1, flavor='raw') def test_pyobject_attached_to_prebuilt_obj(self): - p1, p1ref, r1, r1addr, check_alive = ( + p1, r1, check_alive = ( self._rawrefcount_pair(42, create_immortal=True)) check_alive(0) self._collect(major=True) From pypy.commits at gmail.com Sun Mar 6 13:16:18 2016 From: pypy.commits at gmail.com (rlamy) Date: Sun, 06 Mar 2016 10:16:18 -0800 (PST) Subject: [pypy-commit] pypy rawrefcount-review: Move test class state into separate class GCSpace Message-ID: <56dc73f2.654fc20a.1121.ffff8486@mx.google.com> Author: Ronan Lamy Branch: rawrefcount-review Changeset: r82823:d4a535a667c8 Date: 2016-03-06 17:58 +0000 http://bitbucket.org/pypy/pypy/changeset/d4a535a667c8/ Log: Move test class state into separate class GCSpace diff --git a/rpython/memory/gc/test/test_direct.py b/rpython/memory/gc/test/test_direct.py --- a/rpython/memory/gc/test/test_direct.py +++ b/rpython/memory/gc/test/test_direct.py @@ -13,7 +13,7 @@ from rpython.memory.gc import minimark, incminimark from rpython.memory.gctypelayout import zero_gc_pointers_inside, zero_gc_pointers from rpython.rlib.debug import debug_print -import pdb + WORD = LONG_BIT // 8 ADDR_ARRAY = lltype.Array(llmemory.Address) @@ -29,15 +29,15 @@ class DirectRootWalker(object): - def __init__(self, tester): - self.tester = tester + def __init__(self, space): + self.space = space def walk_roots(self, collect_stack_root, collect_static_in_prebuilt_nongc, collect_static_in_prebuilt_gc, is_minor=False): - gc = self.tester.gc - layoutbuilder = self.tester.layoutbuilder + gc = self.space.gc + layoutbuilder = self.space.layoutbuilder if collect_static_in_prebuilt_gc: for addrofaddr in layoutbuilder.addresses_of_static_ptrs: if addrofaddr.address[0]: @@ -47,7 +47,7 @@ if addrofaddr.address[0]: collect_static_in_prebuilt_nongc(gc, addrofaddr) if collect_stack_root: - stackroots = self.tester.stackroots + stackroots = self.space.stackroots a = lltype.malloc(ADDR_ARRAY, len(stackroots), flavor='raw') for i in range(len(a)): a[i] = llmemory.cast_ptr_to_adr(stackroots[i]) @@ -67,22 +67,18 @@ pass -class BaseDirectGCTest(object): - GC_PARAMS = {} - - def setup_method(self, meth): +class GCSpace(object): + def __init__(self, GCClass, GC_PARAMS): from rpython.config.translationoption import get_combined_translation_config config = get_combined_translation_config(translating=True).translation self.stackroots = [] - GC_PARAMS = self.GC_PARAMS.copy() - if hasattr(meth, 'GC_PARAMS'): - GC_PARAMS.update(meth.GC_PARAMS) + GC_PARAMS = GC_PARAMS.copy() GC_PARAMS['translated_to_c'] = False - self.gc = self.GCClass(config, **GC_PARAMS) + self.gc = GCClass(config, **GC_PARAMS) self.gc.DEBUG = True self.rootwalker = DirectRootWalker(self) self.gc.set_root_walker(self.rootwalker) - self.layoutbuilder = TypeLayoutBuilder(self.GCClass) + self.layoutbuilder = TypeLayoutBuilder(GCClass) self.get_type_id = self.layoutbuilder.get_type_id self.layoutbuilder.initialize_gc_query_function(self.gc) self.gc.setup() @@ -115,9 +111,34 @@ zero_gc_pointers_inside(obj_ptr, TYPE) return obj_ptr +class BaseDirectGCTest(object): + GC_PARAMS = {} + + def setup_method(self, meth): + GC_PARAMS = self.GC_PARAMS.copy() + if hasattr(meth, 'GC_PARAMS'): + GC_PARAMS.update(meth.GC_PARAMS) + self.space = GCSpace(self.GCClass, GC_PARAMS) + self.stackroots = self.space.stackroots + self.gc = self.space.gc + self.get_type_id = self.space.get_type_id + + def consider_constant(self, p): + self.space.consider_constant(p) + + def write(self, p, fieldname, newvalue): + self.space.write(p, fieldname, newvalue) + + def writearray(self, p, index, newvalue): + self.space.writearray(p, index, newvalue) + + def malloc(self, TYPE, n=None): + return self.space.malloc(TYPE, n) + + class DirectGCTest(BaseDirectGCTest): - + def test_simple(self): p = self.malloc(S) p.x = 5 @@ -679,7 +700,7 @@ #ensure all the ptr fields are zeroed assert p.prev == lltype.nullptr(S) assert p.next == lltype.nullptr(S) - + def test_malloc_varsize_no_cleanup(self): x = lltype.Signed VAR1 = lltype.GcArray(x) @@ -744,4 +765,4 @@ assert elem.prev == lltype.nullptr(S) assert elem.next == lltype.nullptr(S) - + From pypy.commits at gmail.com Sun Mar 6 13:18:37 2016 From: pypy.commits at gmail.com (plan_rich) Date: Sun, 06 Mar 2016 10:18:37 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: (untested) fixed two issues in the ppc zero_array Message-ID: <56dc747d.654fc20a.1121.ffff8544@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r82824:4cca8fad1c1f Date: 2016-03-06 19:17 +0100 http://bitbucket.org/pypy/pypy/changeset/4cca8fad1c1f/ Log: (untested) fixed two issues in the ppc zero_array diff --git a/rpython/jit/backend/ppc/opassembler.py b/rpython/jit/backend/ppc/opassembler.py --- a/rpython/jit/backend/ppc/opassembler.py +++ b/rpython/jit/backend/ppc/opassembler.py @@ -879,13 +879,19 @@ base_loc, startindex_loc, length_loc, ofs_loc = arglocs stepsize = 8 + shift_by = 3 if IS_PPC_32: stepsize = 4 + shift_by = 2 if length_loc.is_imm(): if length_loc.value <= 0: return # nothing to do + if startindex_loc.is_imm(): + self.mc.load_imm(r.SCRATCH, startindex_loc.value) + startindex_loc = r.SCRATCH + self.mc.addi(r.SCRATCH2.value, startindex_loc.value, ofs_loc.getint()) ofs_loc = r.SCRATCH2 # ofs_loc is now the startindex in bytes + the array offset @@ -900,6 +906,9 @@ jz_location = self.mc.currpos() self.mc.trap() + self.mc.sradi(r.SCRATCH.value, r.length_loc.value, shift_by) + self.mc.mtctr(r.SCRATCH.value) # store the length in count register + self.mc.li(r.SCRATCH.value, 0) # NOTE the following assumes that bytes have been passed to both startindex @@ -908,21 +917,17 @@ # first store of case 1) self.eza_stXux(r.SCRATCH.value, ofs_loc.value, base_loc.value, stepsize) - self.mc.subi(length_loc.value, length_loc.value, stepsize) - self.mc.cmp_op(0, length_loc.value, stepsize, imm=True) - lt_location = self.mc.currpos() + bdz_location = self.mc.currpos() self.mc.trap() # jump over the loop if we are already done with 1) # 1) The next loop copies WORDS into the memory chunk starting at startindex # ending at startindex + length. These are bytes loop_location = self.mc.currpos() self.eza_stXu(r.SCRATCH.value, ofs_loc.value, stepsize, stepsize) - self.mc.subi(length_loc.value, length_loc.value, stepsize) - self.mc.cmp_op(0, length_loc.value, stepsize, imm=True) - self.mc.bge(loop_location - self.mc.currpos()) + self.mc.bdnz(loop_location - self.mc.currpos()) - pmc = OverwritingBuilder(self.mc, lt_location, 1) - pmc.blt(self.mc.currpos() - lt_location) + pmc = OverwritingBuilder(self.mc, bdz_location, 1) + pmc.bdz(self.mc.currpos() - bdz_location) pmc.overwrite() if jz_location != -1: @@ -933,11 +938,19 @@ # 2) There might be some bytes left to be written. # following scenario: length_loc == 3 bytes, stepsize == 4! # need to write the last bytes. - self.mc.cmp_op(0, length_loc.value, 0, imm=True) + + # move the last bytes to the count register + if length_loc.is_imm(): + self.mc.load_imm(r.SCRATCH, length_loc.value & (stepsize-1)) + else: + self.mc.andi(r.SCRATCH.value, length_loc, stepsize-1) + + self.mc.cmp_op(0, SCRATCH.value, 0, imm=True) jle_location = self.mc.currpos() self.mc.trap() - self.mc.mtctr(length_loc.value) + self.mc.mtctr(r.SCRATCH.value) + self.mc.li(r.SCRATCH.value, 0) loop_position = self.mc.currpos() self.eza_stXu(r.SCRATCH.value, ofs_loc.value, 1, 1) @@ -948,7 +961,6 @@ pmc.overwrite() - class StrOpAssembler(object): _mixin_ = True From pypy.commits at gmail.com Sun Mar 6 14:34:28 2016 From: pypy.commits at gmail.com (mjacob) Date: Sun, 06 Mar 2016 11:34:28 -0800 (PST) Subject: [pypy-commit] pypy py3k: hg merge b68cfadb2cb8 Message-ID: <56dc8644.84c9c20a.e9238.ffff9ea5@mx.google.com> Author: Manuel Jacob Branch: py3k Changeset: r82825:c8ebbe78a212 Date: 2016-03-06 20:30 +0100 http://bitbucket.org/pypy/pypy/changeset/c8ebbe78a212/ Log: hg merge b68cfadb2cb8 This is the last changeset which was merged into release-5.x. diff --git a/pypy/doc/contributor.rst b/pypy/doc/contributor.rst --- a/pypy/doc/contributor.rst +++ b/pypy/doc/contributor.rst @@ -11,29 +11,29 @@ Amaury Forgeot d'Arc Antonio Cuni Samuele Pedroni + Matti Picus Alex Gaynor Brian Kearns - Matti Picus Philip Jenvey Michael Hudson + Ronan Lamy David Schneider + Manuel Jacob Holger Krekel Christian Tismer Hakan Ardo - Manuel Jacob - Ronan Lamy Benjamin Peterson + Richard Plangger Anders Chrigstrom Eric van Riet Paap Wim Lavrijsen - Richard Plangger Richard Emslie Alexander Schremmer Dan Villiom Podlaski Christiansen + Remi Meier Lukas Diekmann Sven Hager Anders Lehmann - Remi Meier Aurelien Campeas Niklaus Haldimann Camillo Bruni @@ -42,8 +42,8 @@ Romain Guillebert Leonardo Santagada Seo Sanghyeon + Ronny Pfannschmidt Justin Peel - Ronny Pfannschmidt David Edelsohn Anders Hammarquist Jakub Gustak @@ -65,6 +65,7 @@ Tyler Wade Michael Foord Stephan Diehl + Vincent Legoll Stefan Schwarzer Valentino Volonghi Tomek Meka @@ -75,9 +76,9 @@ Jean-Paul Calderone Timo Paulssen Squeaky + Marius Gedminas Alexandre Fayolle Simon Burton - Marius Gedminas Martin Matusiak Konstantin Lopuhin Wenzhu Man @@ -86,16 +87,20 @@ Ivan Sichmann Freitas Greg Price Dario Bertini + Stefano Rivera Mark Pearse Simon Cross Andreas Stührk - Stefano Rivera + Edd Barrett Jean-Philippe St. Pierre Guido van Rossum Pavel Vinogradov + Jeremy Thurgood Paweł Piotr Przeradowski + Spenser Bauman Paul deGrandis Ilya Osadchiy + marky1991 Tobias Oberstein Adrian Kuhn Boris Feigin @@ -104,14 +109,12 @@ Georg Brandl Bert Freudenberg Stian Andreassen - Edd Barrett + Tobias Pape Wanja Saatkamp Gerald Klix Mike Blume - Tobias Pape Oscar Nierstrasz Stefan H. Muller - Jeremy Thurgood Rami Chowdhury Eugene Oden Henry Mason @@ -123,6 +126,8 @@ Lukas Renggli Guenter Jantzen Ned Batchelder + Tim Felgentreff + Anton Gulenko Amit Regmi Ben Young Nicolas Chauvat @@ -132,12 +137,12 @@ Nicholas Riley Jason Chu Igor Trindade Oliveira - Tim Felgentreff + Yichao Yu Rocco Moretti Gintautas Miliauskas Michael Twomey Lucian Branescu Mihaila - Yichao Yu + Devin Jeanpierre Gabriel Lavoie Olivier Dormond Jared Grubb @@ -161,33 +166,33 @@ Stanislaw Halik Mikael Schönenberg Berkin Ilbeyi - Elmo M?ntynen + Elmo Mäntynen + Faye Zhao Jonathan David Riehl Anders Qvist Corbin Simpson Chirag Jadwani Beatrice During Alex Perry - Vincent Legoll + Vaibhav Sood Alan McIntyre - Spenser Bauman + William Leslie Alexander Sedov Attila Gobi + Jasper.Schulz Christopher Pope - Devin Jeanpierre - Vaibhav Sood Christian Tismer Marc Abramowitz Dan Stromberg Arjun Naik Valentina Mukhamedzhanova Stefano Parmesan + Mark Young Alexis Daboville Jens-Uwe Mager Carl Meyer Karl Ramm Pieter Zieschang - Anton Gulenko Gabriel Lukas Vacek Andrew Dalke @@ -195,6 +200,7 @@ Jakub Stasiak Nathan Taylor Vladimir Kryachko + Omer Katz Jacek Generowicz Alejandro J. Cura Jacob Oscarson @@ -209,6 +215,7 @@ Lars Wassermann Philipp Rustemeuer Henrik Vendelbo + Richard Lancaster Dan Buch Miguel de Val Borro Artur Lisiecki @@ -220,18 +227,18 @@ Tomo Cocoa Kim Jin Su Toni Mattis + Amber Brown Lucas Stadler Julian Berman Markus Holtermann roberto at goyle Yury V. Zaytsev Anna Katrina Dominguez - William Leslie Bobby Impollonia - Faye Zhao timo at eistee.fritz.box Andrew Thompson Yusei Tahara + Aaron Tubbs Ben Darnell Roberto De Ioris Juan Francisco Cantero Hurtado @@ -243,6 +250,7 @@ Christopher Armstrong Michael Hudson-Doyle Anders Sigfridsson + Nikolay Zinov Yasir Suhail Jason Michalski rafalgalczynski at gmail.com @@ -252,6 +260,7 @@ Gustavo Niemeyer Stephan Busemann Rafał Gałczyński + Matt Bogosian Christian Muirhead Berker Peksag James Lan @@ -286,9 +295,9 @@ Stefan Marr jiaaro Mads Kiilerich - Richard Lancaster opassembler.py Antony Lee + Jason Madden Yaroslav Fedevych Jim Hunziker Markus Unterwaditzer @@ -297,6 +306,7 @@ squeaky Zearin soareschen + Jonas Pfannschmidt Kurt Griffiths Mike Bayer Matthew Miller @@ -311,4 +321,3 @@ Julien Phalip Roman Podoliaka Dan Loewenherz - diff --git a/pypy/doc/index-of-release-notes.rst b/pypy/doc/index-of-release-notes.rst --- a/pypy/doc/index-of-release-notes.rst +++ b/pypy/doc/index-of-release-notes.rst @@ -6,6 +6,7 @@ .. toctree:: + release-5.0.0.rst release-4.0.1.rst release-4.0.0.rst release-2.6.1.rst diff --git a/pypy/doc/index-of-whatsnew.rst b/pypy/doc/index-of-whatsnew.rst --- a/pypy/doc/index-of-whatsnew.rst +++ b/pypy/doc/index-of-whatsnew.rst @@ -7,6 +7,7 @@ .. toctree:: whatsnew-head.rst + whatsnew-5.0.0.rst whatsnew-4.0.1.rst whatsnew-4.0.0.rst whatsnew-2.6.1.rst diff --git a/pypy/doc/release-5.0.0.rst b/pypy/doc/release-5.0.0.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-5.0.0.rst @@ -0,0 +1,100 @@ +========== +PyPy 5.0.0 +========== + +We have released PyPy 5.0.0, about three months after PyPy 4.0.0. +We encourage all users of PyPy to update to this version. There are +bug fixes and a major upgrade to our c-api layer (cpyext) + +You can download the PyPy 5.0.0 release here: + + http://pypy.org/download.html + +We would like to thank our donors for the continued support of the PyPy +project. + +We would also like to thank our contributors and +encourage new people to join the project. PyPy has many +layers and we need help with all of them: `PyPy`_ and `RPython`_ documentation +improvements, tweaking popular `modules`_ to run on pypy, or general `help`_ +with making RPython's JIT even better. + +CFFI +==== + +While not applicable only to PyPy, `cffi`_ is arguably our most significant +contribution to the python ecosystem. PyPy 5.0.0 ships with +`cffi-1.5.2`_ which now allows embedding PyPy (or cpython) in a c program. + +.. _`PyPy`: http://doc.pypy.org +.. _`RPython`: https://rpython.readthedocs.org +.. _`cffi`: https://cffi.readthedocs.org +.. _`cffi-1.5.2`: http://cffi.readthedocs.org/en/latest/whatsnew.html#v1-5-2 +.. _`modules`: http://doc.pypy.org/en/latest/project-ideas.html#make-more-python-modules-pypy-friendly +.. _`help`: http://doc.pypy.org/en/latest/project-ideas.html +.. _`numpy`: https://bitbucket.org/pypy/numpy + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7. It's fast (`pypy and cpython 2.7.x`_ performance comparison) +due to its integrated tracing JIT compiler. + +We also welcome developers of other +`dynamic languages`_ to see what RPython can do for them. + +This release supports **x86** machines on most common operating systems +(Linux 32/64, Mac OS X 64, Windows 32, OpenBSD, freebsd), +newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux, and the +big- and little-endian variants of **ppc64** running Linux. + +.. _`pypy and cpython 2.7.x`: http://speed.pypy.org +.. _`dynamic languages`: http://pypyjs.org + +Other Highlights (since 4.0.1 released in November 2015) +======================================================= + +* Bug Fixes + + * + + * + + * + + * Issues reported with our previous release were resolved_ after reports from users on + our issue tracker at https://bitbucket.org/pypy/pypy/issues or on IRC at + #pypy + +* New features: + + * + + * + + * + +* Numpy: + + * + + * + + +* Performance improvements and refactorings: + + * + + * + + * + +.. _resolved: http://doc.pypy.org/en/latest/whatsnew-5.0.0.html + +Please update, and continue to help us make PyPy better. + +Cheers + +The PyPy Team + diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-5.0.0.rst copy from pypy/doc/whatsnew-head.rst copy to pypy/doc/whatsnew-5.0.0.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-5.0.0.rst @@ -1,6 +1,6 @@ -========================= -What's new in PyPy 4.1.+ -========================= +======================== +What's new in PyPy 5.0.0 +======================== .. this is a revision shortly after release-4.0.1 .. startrev: 4b5c840d0da2 @@ -183,4 +183,11 @@ .. branch: vlen-resume -Compress resume data, saving 10-20% of memory consumed by the JIT \ No newline at end of file +Compress resume data, saving 10-20% of memory consumed by the JIT + +.. branch: issue-2248 + +.. branch: ndarray-setitem-filtered + +Fix boolean-array indexing in micronumpy + diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -1,186 +1,8 @@ ========================= -What's new in PyPy 4.1.+ +What's new in PyPy 5.0.+ ========================= -.. this is a revision shortly after release-4.0.1 -.. startrev: 4b5c840d0da2 +.. this is a revision shortly after release-5.0.0 +.. startrev: 6d13e55b962a -Fixed ``_PyLong_FromByteArray()``, which was buggy. -Fixed a crash with stacklets (or greenlets) on non-Linux machines -which showed up if you forget stacklets without resuming them. - -.. branch: numpy-1.10 - -Fix tests to run cleanly with -A and start to fix micronumpy for upstream numpy -which is now 1.10.2 - -.. branch: osx-flat-namespace - -Fix the cpyext tests on OSX by linking with -flat_namespace - -.. branch: anntype - -Refactor and improve exception analysis in the annotator. - -.. branch: posita/2193-datetime-timedelta-integrals - -Fix issue #2193. ``isinstance(..., int)`` => ``isinstance(..., numbers.Integral)`` -to allow for alternate ``int``-like implementations (e.g., ``future.types.newint``) - -.. branch: faster-rstruct - -Improve the performace of struct.unpack, which now directly reads inside the -string buffer and directly casts the bytes to the appropriate type, when -allowed. Unpacking of floats and doubles is about 15 times faster now, while -for integer types it's up to ~50% faster for 64bit integers. - -.. branch: wrap-specialisation - -Remove unnecessary special handling of space.wrap(). - -.. branch: compress-numbering - -Improve the memory signature of numbering instances in the JIT. This should massively -decrease the amount of memory consumed by the JIT, which is significant for most programs. - -.. branch: fix-trace-too-long-heuristic - -Improve the heuristic when disable trace-too-long - -.. branch: fix-setslice-can-resize - -Make rlist's ll_listsetslice() able to resize the target list to help -simplify objspace/std/listobject.py. Was issue #2196. - -.. branch: anntype2 - -A somewhat random bunch of changes and fixes following up on branch 'anntype'. Highlights: - -- Implement @doubledispatch decorator and use it for intersection() and difference(). - -- Turn isinstance into a SpaceOperation - -- Create a few direct tests of the fundamental annotation invariant in test_model.py - -- Remove bookkeeper attribute from DictDef and ListDef. - -.. branch: cffi-static-callback - -.. branch: vecopt-absvalue - -- Enhancement. Removed vector fields from AbstractValue. - -.. branch: memop-simplify2 - -Simplification. Backends implement too many loading instructions, only having a slightly different interface. -Four new operations (gc_load/gc_load_indexed, gc_store/gc_store_indexed) replace all the -commonly known loading operations - -.. branch: more-rposix - -Move wrappers for OS functions from `rpython/rtyper` to `rpython/rlib` and -turn them into regular RPython functions. Most RPython-compatible `os.*` -functions are now directly accessible as `rpython.rposix.*`. - -.. branch: always-enable-gil - -Simplify a bit the GIL handling in non-jitted code. Fixes issue #2205. - -.. branch: flowspace-cleanups - -Trivial cleanups in flowspace.operation : fix comment & duplicated method - -.. branch: test-AF_NETLINK - -Add a test for pre-existing AF_NETLINK support. Was part of issue #1942. - -.. branch: small-cleanups-misc - -Trivial misc cleanups: typo, whitespace, obsolete comments - -.. branch: cpyext-slotdefs -.. branch: fix-missing-canraise -.. branch: whatsnew - -.. branch: fix-2211 - -Fix the cryptic exception message when attempting to use extended slicing -in rpython. Was issue #2211. - -.. branch: ec-keepalive - -Optimize the case where, in a new C-created thread, we keep invoking -short-running Python callbacks. (CFFI on CPython has a hack to achieve -the same result.) This can also be seen as a bug fix: previously, -thread-local objects would be reset between two such calls. - -.. branch: globals-quasiimmut - -Optimize global lookups. - -.. branch: cffi-static-callback-embedding - -Updated to CFFI 1.5, which supports a new way to do embedding. -Deprecates http://pypy.readthedocs.org/en/latest/embedding.html. - -.. branch: fix-cpython-ssl-tests-2.7 - -Fix SSL tests by importing cpython's patch - - -.. branch: remove-getfield-pure - -Remove pure variants of ``getfield_gc_*`` operations from the JIT. Relevant -optimizations instead consult the field descriptor to determine the purity of -the operation. Additionally, pure ``getfield`` operations are now handled -entirely by `rpython/jit/metainterp/optimizeopt/heap.py` rather than -`rpython/jit/metainterp/optimizeopt/pure.py`, which can result in better codegen -for traces containing a large number of pure getfield operations. - -.. branch: exctrans - -Try to ensure that no new functions get annotated during the 'source_c' phase. -Refactor sandboxing to operate at a higher level. - -.. branch: cpyext-bootstrap - -.. branch: vmprof-newstack - -Refactor vmprof to work cross-operating-system. - -.. branch: seperate-strucmember_h - -Seperate structmember.h from Python.h Also enhance creating api functions -to specify which header file they appear in (previously only pypy_decl.h) - -.. branch: llimpl - -Refactor register_external(), remove running_on_llinterp mechanism and -apply sandbox transform on externals at the end of annotation. - -.. branch: cffi-embedding-win32 - -.. branch: windows-vmprof-support - -vmprof should work on Windows. - - -.. branch: reorder-map-attributes - -When creating instances and adding attributes in several different orders -depending on some condition, the JIT would create too much code. This is now -fixed. - -.. branch: cpyext-gc-support-2 - -Improve CPython C API support, which means lxml now runs unmodified -(after removing pypy hacks, pending pull request) - -.. branch: look-inside-tuple-hash - -Look inside tuple hash, improving mdp benchmark - -.. branch: vlen-resume - -Compress resume data, saving 10-20% of memory consumed by the JIT \ No newline at end of file diff --git a/pypy/interpreter/test/test_app_main.py b/pypy/interpreter/test/test_app_main.py --- a/pypy/interpreter/test/test_app_main.py +++ b/pypy/interpreter/test/test_app_main.py @@ -143,7 +143,7 @@ self.check(['-S', '-O', '--info'], {}, output_contains='translation') self.check(['-S', '-O', '--version'], {}, output_contains='Python') self.check(['-S', '-OV'], {}, output_contains='Python') - self.check(['--jit', 'foobar', '-S'], {}, sys_argv=[''], + self.check(['--jit', 'off', '-S'], {}, sys_argv=[''], run_stdin=True, no_site=1) self.check(['-c', 'pass'], {}, sys_argv=['-c'], run_command='pass') self.check(['-cpass'], {}, sys_argv=['-c'], run_command='pass') diff --git a/pypy/module/cpyext/include/patchlevel.h b/pypy/module/cpyext/include/patchlevel.h --- a/pypy/module/cpyext/include/patchlevel.h +++ b/pypy/module/cpyext/include/patchlevel.h @@ -29,8 +29,8 @@ #define PY_VERSION "3.2.5" /* PyPy version as a string */ -#define PYPY_VERSION "4.1.0-alpha0" -#define PYPY_VERSION_NUM 0x04010000 +#define PYPY_VERSION "5.1.0-alpha0" +#define PYPY_VERSION_NUM 0x05010000 /* Defined to mean a PyPy where cpyext holds more regular references to PyObjects, e.g. staying alive as long as the internal PyPy object diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py --- a/pypy/module/micronumpy/concrete.py +++ b/pypy/module/micronumpy/concrete.py @@ -298,7 +298,14 @@ except IndexError: # not a single result chunks = self._prepare_slice_args(space, w_index) - return new_view(space, orig_arr, chunks) + copy = False + if isinstance(chunks[0], BooleanChunk): + # numpy compatibility + copy = True + w_ret = new_view(space, orig_arr, chunks) + if copy: + w_ret = w_ret.descr_copy(space, space.wrap(w_ret.get_order())) + return w_ret def descr_setitem(self, space, orig_arr, w_index, w_value): try: diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -22,7 +22,8 @@ from pypy.module.micronumpy.flagsobj import W_FlagsObject from pypy.module.micronumpy.strides import ( get_shape_from_iterable, shape_agreement, shape_agreement_multiple, - is_c_contiguous, is_f_contiguous, calc_strides, new_view) + is_c_contiguous, is_f_contiguous, calc_strides, new_view, BooleanChunk, + SliceChunk) from pypy.module.micronumpy.casting import can_cast_array from pypy.module.micronumpy.descriptor import get_dtype_cache @@ -204,7 +205,13 @@ if iter_shape is None: # w_index is a list of slices, return a view chunks = self.implementation._prepare_slice_args(space, w_index) - return new_view(space, self, chunks) + copy = False + if isinstance(chunks[0], BooleanChunk): + copy = True + w_ret = new_view(space, self, chunks) + if copy: + w_ret = w_ret.descr_copy(space, space.wrap(w_ret.get_order())) + return w_ret shape = res_shape + self.get_shape()[len(indexes):] w_res = W_NDimArray.from_shape(space, shape, self.get_dtype(), self.get_order(), w_instance=self) @@ -220,8 +227,24 @@ if iter_shape is None: # w_index is a list of slices chunks = self.implementation._prepare_slice_args(space, w_index) - view = new_view(space, self, chunks) - view.implementation.setslice(space, val_arr) + dim = -1 + view = self + for i, c in enumerate(chunks): + if isinstance(c, BooleanChunk): + dim = i + idx = c.w_idx + chunks.pop(i) + chunks.insert(0, SliceChunk(space.newslice(space.wrap(0), + space.w_None, space.w_None))) + break + if dim > 0: + view = self.implementation.swapaxes(space, self, 0, dim) + if dim >= 0: + view = new_view(space, self, chunks) + view.setitem_filter(space, idx, val_arr) + else: + view = new_view(space, self, chunks) + view.implementation.setslice(space, val_arr) return if support.product(iter_shape) == 0: return diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py --- a/pypy/module/micronumpy/strides.py +++ b/pypy/module/micronumpy/strides.py @@ -97,22 +97,19 @@ # filter by axis dim filtr = chunks[dim] assert isinstance(filtr, BooleanChunk) + # XXX this creates a new array, and fails in setitem w_arr = w_arr.getitem_filter(space, filtr.w_idx, axis=dim) arr = w_arr.implementation chunks[dim] = SliceChunk(space.newslice(space.wrap(0), - space.wrap(-1), space.w_None)) + space.w_None, space.w_None)) r = calculate_slice_strides(space, arr.shape, arr.start, arr.get_strides(), arr.get_backstrides(), chunks) else: r = calculate_slice_strides(space, arr.shape, arr.start, arr.get_strides(), arr.get_backstrides(), chunks) shape, start, strides, backstrides = r - w_ret = W_NDimArray.new_slice(space, start, strides[:], backstrides[:], + return W_NDimArray.new_slice(space, start, strides[:], backstrides[:], shape[:], arr, w_arr) - if dim == 0: - # Do not return a view - return w_ret.descr_copy(space, space.wrap(w_ret.get_order())) - return w_ret @jit.unroll_safe def _extend_shape(old_shape, chunks): diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -2541,8 +2541,10 @@ assert b.base is None b = a[:, np.array([True, False, True])] assert b.base is not None + a[np.array([True, False]), 0] = 100 b = a[np.array([True, False]), 0] - assert (b ==[0]).all() + assert b.shape == (1,) + assert (b ==[100]).all() def test_scalar_indexing(self): import numpy as np diff --git a/pypy/module/pypyjit/test_pypy_c/test_00_model.py b/pypy/module/pypyjit/test_pypy_c/test_00_model.py --- a/pypy/module/pypyjit/test_pypy_c/test_00_model.py +++ b/pypy/module/pypyjit/test_pypy_c/test_00_model.py @@ -68,9 +68,12 @@ pipe.returncode,)) if stderr.startswith('SKIP:'): py.test.skip(stderr) - if stderr.startswith('debug_alloc.h:'): # lldebug builds - stderr = '' + #if stderr.startswith('debug_alloc.h:'): # lldebug builds + # stderr = '' #assert not stderr + if stderr: + print '*** stderr of the subprocess: ***' + print stderr # if discard_stdout_before_last_line: stdout = stdout.splitlines(True)[-1] diff --git a/pypy/module/sys/version.py b/pypy/module/sys/version.py --- a/pypy/module/sys/version.py +++ b/pypy/module/sys/version.py @@ -10,7 +10,7 @@ #XXX # sync CPYTHON_VERSION with patchlevel.h, package.py CPYTHON_API_VERSION = 1013 #XXX # sync with include/modsupport.h -PYPY_VERSION = (4, 1, 0, "alpha", 0) #XXX # sync patchlevel.h +PYPY_VERSION = (5, 1, 0, "alpha", 0) #XXX # sync patchlevel.h import pypy diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_zintegration.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_zintegration.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_zintegration.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_zintegration.py @@ -12,7 +12,9 @@ def create_venv(name): tmpdir = udir.join(name) try: - subprocess.check_call(['virtualenv', '--distribute', + subprocess.check_call(['virtualenv', + #'--never-download', <= could be added, but causes failures + # in random cases on random machines '-p', os.path.abspath(sys.executable), str(tmpdir)]) except OSError as e: diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/add1-test.c b/pypy/module/test_lib_pypy/cffi_tests/embedding/add1-test.c --- a/pypy/module/test_lib_pypy/cffi_tests/embedding/add1-test.c +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/add1-test.c @@ -1,3 +1,4 @@ +/* Generated by pypy/tool/import_cffi.py */ #include extern int add1(int, int); diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/add2-test.c b/pypy/module/test_lib_pypy/cffi_tests/embedding/add2-test.c --- a/pypy/module/test_lib_pypy/cffi_tests/embedding/add2-test.c +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/add2-test.c @@ -1,3 +1,4 @@ +/* Generated by pypy/tool/import_cffi.py */ #include extern int add1(int, int); diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/add_recursive-test.c b/pypy/module/test_lib_pypy/cffi_tests/embedding/add_recursive-test.c --- a/pypy/module/test_lib_pypy/cffi_tests/embedding/add_recursive-test.c +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/add_recursive-test.c @@ -1,3 +1,4 @@ +/* Generated by pypy/tool/import_cffi.py */ #include #ifdef _MSC_VER diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/perf-test.c b/pypy/module/test_lib_pypy/cffi_tests/embedding/perf-test.c --- a/pypy/module/test_lib_pypy/cffi_tests/embedding/perf-test.c +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/perf-test.c @@ -1,10 +1,12 @@ +/* Generated by pypy/tool/import_cffi.py */ #include #include #include #ifdef PTEST_USE_THREAD # include -# include -static sem_t done; +static pthread_mutex_t mutex1 = PTHREAD_MUTEX_INITIALIZER; +static pthread_cond_t cond1 = PTHREAD_COND_INITIALIZER; +static int remaining; #endif @@ -54,8 +56,11 @@ printf("time per call: %.3g\n", t); #ifdef PTEST_USE_THREAD - int status = sem_post(&done); - assert(status == 0); + pthread_mutex_lock(&mutex1); + remaining -= 1; + if (!remaining) + pthread_cond_signal(&cond1); + pthread_mutex_unlock(&mutex1); #endif return arg; @@ -68,19 +73,19 @@ start_routine(0); #else pthread_t th; - int i, status = sem_init(&done, 0, 0); - assert(status == 0); + int i, status; add1(0, 0); /* this is the main thread */ + remaining = PTEST_USE_THREAD; for (i = 0; i < PTEST_USE_THREAD; i++) { status = pthread_create(&th, NULL, start_routine, NULL); assert(status == 0); } - for (i = 0; i < PTEST_USE_THREAD; i++) { - status = sem_wait(&done); - assert(status == 0); - } + pthread_mutex_lock(&mutex1); + while (remaining) + pthread_cond_wait(&cond1, &mutex1); + pthread_mutex_unlock(&mutex1); #endif return 0; } diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/test_basic.py b/pypy/module/test_lib_pypy/cffi_tests/embedding/test_basic.py --- a/pypy/module/test_lib_pypy/cffi_tests/embedding/test_basic.py +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/test_basic.py @@ -33,8 +33,12 @@ pythonpath.insert(0, cffi_base) return os.pathsep.join(pythonpath) -def setup_module(mod): - mod.org_env = os.environ.copy() +def copy_away_env(): + global org_env + try: + org_env + except NameError: + org_env = os.environ.copy() class EmbeddingTests: @@ -122,6 +126,7 @@ os.chdir(curdir) def patch_environment(self): + copy_away_env() path = self.get_path() # for libpypy-c.dll or Python27.dll path = os.path.split(sys.executable)[0] + os.path.pathsep + path diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/thread-test.h b/pypy/module/test_lib_pypy/cffi_tests/embedding/thread-test.h --- a/pypy/module/test_lib_pypy/cffi_tests/embedding/thread-test.h +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/thread-test.h @@ -1,10 +1,45 @@ +/* Generated by pypy/tool/import_cffi.py */ /************************************************************/ #ifndef _MSC_VER /************************************************************/ #include -#include + +/* don't include , it is not available on OS/X */ + +typedef struct { + pthread_mutex_t mutex1; + pthread_cond_t cond1; + unsigned int value; +} sem_t; + +static int sem_init(sem_t *sem, int pshared, unsigned int value) +{ + assert(pshared == 0); + sem->value = value; + return (pthread_mutex_init(&sem->mutex1, NULL) || + pthread_cond_init(&sem->cond1, NULL)); +} + +static int sem_post(sem_t *sem) +{ + pthread_mutex_lock(&sem->mutex1); + sem->value += 1; + pthread_cond_signal(&sem->cond1); + pthread_mutex_unlock(&sem->mutex1); + return 0; +} + +static int sem_wait(sem_t *sem) +{ + pthread_mutex_lock(&sem->mutex1); + while (sem->value == 0) + pthread_cond_wait(&sem->cond1, &sem->mutex1); + sem->value -= 1; + pthread_mutex_unlock(&sem->mutex1); + return 0; +} /************************************************************/ @@ -22,7 +57,7 @@ typedef HANDLE sem_t; typedef HANDLE pthread_t; -int sem_init(sem_t *sem, int pshared, unsigned int value) +static int sem_init(sem_t *sem, int pshared, unsigned int value) { assert(pshared == 0); assert(value == 0); @@ -30,26 +65,26 @@ return *sem ? 0 : -1; } -int sem_post(sem_t *sem) +static int sem_post(sem_t *sem) { return ReleaseSemaphore(*sem, 1, NULL) ? 0 : -1; } -int sem_wait(sem_t *sem) +static int sem_wait(sem_t *sem) { WaitForSingleObject(*sem, INFINITE); return 0; } -DWORD WINAPI myThreadProc(LPVOID lpParameter) +static DWORD WINAPI myThreadProc(LPVOID lpParameter) { void *(* start_routine)(void *) = (void *(*)(void *))lpParameter; start_routine(NULL); return 0; } -int pthread_create(pthread_t *thread, void *attr, - void *start_routine(void *), void *arg) +static int pthread_create(pthread_t *thread, void *attr, + void *start_routine(void *), void *arg) { assert(arg == NULL); *thread = CreateThread(NULL, 0, myThreadProc, start_routine, 0, NULL); diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/thread1-test.c b/pypy/module/test_lib_pypy/cffi_tests/embedding/thread1-test.c --- a/pypy/module/test_lib_pypy/cffi_tests/embedding/thread1-test.c +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/thread1-test.c @@ -1,3 +1,4 @@ +/* Generated by pypy/tool/import_cffi.py */ #include #include #include "thread-test.h" diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/thread2-test.c b/pypy/module/test_lib_pypy/cffi_tests/embedding/thread2-test.c --- a/pypy/module/test_lib_pypy/cffi_tests/embedding/thread2-test.c +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/thread2-test.c @@ -1,3 +1,4 @@ +/* Generated by pypy/tool/import_cffi.py */ #include #include #include "thread-test.h" diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/thread3-test.c b/pypy/module/test_lib_pypy/cffi_tests/embedding/thread3-test.c --- a/pypy/module/test_lib_pypy/cffi_tests/embedding/thread3-test.c +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/thread3-test.c @@ -1,3 +1,4 @@ +/* Generated by pypy/tool/import_cffi.py */ #include #include #include "thread-test.h" diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/tlocal-test.c b/pypy/module/test_lib_pypy/cffi_tests/embedding/tlocal-test.c --- a/pypy/module/test_lib_pypy/cffi_tests/embedding/tlocal-test.c +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/tlocal-test.c @@ -1,3 +1,4 @@ +/* Generated by pypy/tool/import_cffi.py */ #include #include #include "thread-test.h" diff --git a/pypy/objspace/std/celldict.py b/pypy/objspace/std/celldict.py --- a/pypy/objspace/std/celldict.py +++ b/pypy/objspace/std/celldict.py @@ -64,6 +64,9 @@ def setitem_str(self, w_dict, key, w_value): cell = self.getdictvalue_no_unwrapping(w_dict, key) + return self._setitem_str_cell_known(cell, w_dict, key, w_value) + + def _setitem_str_cell_known(self, cell, w_dict, key, w_value): w_value = write_cell(self.space, cell, w_value) if w_value is None: return @@ -74,10 +77,11 @@ space = self.space if space.is_w(space.type(w_key), space.w_unicode): key = space.str_w(w_key) - w_result = self.getitem_str(w_dict, key) + cell = self.getdictvalue_no_unwrapping(w_dict, key) + w_result = unwrap_cell(self.space, cell) if w_result is not None: return w_result - self.setitem_str(w_dict, key, w_default) + self._setitem_str_cell_known(cell, w_dict, key, w_default) return w_default else: self.switch_to_object_strategy(w_dict) diff --git a/pypy/objspace/std/floatobject.py b/pypy/objspace/std/floatobject.py --- a/pypy/objspace/std/floatobject.py +++ b/pypy/objspace/std/floatobject.py @@ -170,15 +170,11 @@ return self.floatval def int(self, space): + # this is a speed-up only, for space.int(w_float). if (type(self) is not W_FloatObject and space.is_overloaded(self, space.w_float, '__int__')): return W_Root.int(self, space) - try: - value = ovfcheck_float_to_int(self.floatval) - except OverflowError: - return newlong_from_float(space, self.floatval) - else: - return space.newint(value) + return self.descr_trunc(space) def is_w(self, space, w_other): from rpython.rlib.longlong2float import float2longlong @@ -417,11 +413,10 @@ return W_FloatObject(a) def descr_trunc(self, space): - whole = math.modf(self.floatval)[1] try: - value = ovfcheck_float_to_int(whole) + value = ovfcheck_float_to_int(self.floatval) except OverflowError: - return newlong_from_float(space, whole) + return newlong_from_float(space, self.floatval) else: return space.newint(value) @@ -656,7 +651,7 @@ __hash__ = interp2app(W_FloatObject.descr_hash), __format__ = interp2app(W_FloatObject.descr_format), __bool__ = interp2app(W_FloatObject.descr_bool), - __int__ = interp2app(W_FloatObject.int), + __int__ = interp2app(W_FloatObject.descr_trunc), __float__ = interp2app(W_FloatObject.descr_float), __trunc__ = interp2app(W_FloatObject.descr_trunc), __neg__ = interp2app(W_FloatObject.descr_neg), diff --git a/pypy/objspace/std/setobject.py b/pypy/objspace/std/setobject.py --- a/pypy/objspace/std/setobject.py +++ b/pypy/objspace/std/setobject.py @@ -1056,7 +1056,7 @@ if self is w_other.strategy: strategy = self if w_set.length() > w_other.length(): - # swap operants + # swap operands storage = self._intersect_unwrapped(w_other, w_set) else: storage = self._intersect_unwrapped(w_set, w_other) @@ -1066,7 +1066,7 @@ else: strategy = self.space.fromcache(ObjectSetStrategy) if w_set.length() > w_other.length(): - # swap operants + # swap operands storage = w_other.strategy._intersect_wrapped(w_other, w_set) else: storage = self._intersect_wrapped(w_set, w_other) diff --git a/pypy/objspace/std/test/test_celldict.py b/pypy/objspace/std/test/test_celldict.py --- a/pypy/objspace/std/test/test_celldict.py +++ b/pypy/objspace/std/test/test_celldict.py @@ -114,22 +114,11 @@ class TestModuleDictImplementation(BaseTestRDictImplementation): StrategyClass = ModuleDictStrategy - -class TestModuleDictImplementationWithBuiltinNames(BaseTestRDictImplementation): - StrategyClass = ModuleDictStrategy - - string = "int" - string2 = "isinstance" - + setdefault_hash_count = 2 class TestDevolvedModuleDictImplementation(BaseTestDevolvedDictImplementation): StrategyClass = ModuleDictStrategy - -class TestDevolvedModuleDictImplementationWithBuiltinNames(BaseTestDevolvedDictImplementation): - StrategyClass = ModuleDictStrategy - - string = "int" - string2 = "isinstance" + setdefault_hash_count = 2 class AppTestCellDict(object): diff --git a/pypy/objspace/std/test/test_dictmultiobject.py b/pypy/objspace/std/test/test_dictmultiobject.py --- a/pypy/objspace/std/test/test_dictmultiobject.py +++ b/pypy/objspace/std/test/test_dictmultiobject.py @@ -1305,6 +1305,9 @@ impl.setitem(x, x) assert type(impl.get_strategy()) is ObjectDictStrategy + + setdefault_hash_count = 1 + def test_setdefault_fast(self): on_pypy = "__pypy__" in sys.builtin_module_names impl = self.impl @@ -1312,11 +1315,11 @@ x = impl.setdefault(key, 1) assert x == 1 if on_pypy and self.FakeString is FakeString: - assert key.hash_count == 1 + assert key.hash_count == self.setdefault_hash_count x = impl.setdefault(key, 2) assert x == 1 if on_pypy and self.FakeString is FakeString: - assert key.hash_count == 2 + assert key.hash_count == self.setdefault_hash_count + 1 def test_fallback_evil_key(self): class F(object): diff --git a/pypy/objspace/std/test/test_kwargsdict.py b/pypy/objspace/std/test/test_kwargsdict.py --- a/pypy/objspace/std/test/test_kwargsdict.py +++ b/pypy/objspace/std/test/test_kwargsdict.py @@ -119,10 +119,16 @@ def test_delitem(self): pass # delitem devolves for now + def test_setdefault_fast(self): + pass # not based on hashing at all + class TestDevolvedKwargsDictImplementation(BaseTestDevolvedDictImplementation): get_impl = get_impl StrategyClass = KwargsDictStrategy + def test_setdefault_fast(self): + pass # not based on hashing at all + class AppTestKwargsDictStrategy(object): def setup_class(cls): diff --git a/pypy/tool/import_cffi.py b/pypy/tool/import_cffi.py --- a/pypy/tool/import_cffi.py +++ b/pypy/tool/import_cffi.py @@ -7,11 +7,18 @@ import sys, py -def mangle(lines): - yield "# Generated by pypy/tool/import_cffi.py\n" - for line in lines: - line = line.replace('from testing', 'from pypy.module.test_lib_pypy.cffi_tests') - yield line +def mangle(lines, ext): + if ext == '.py': + yield "# Generated by pypy/tool/import_cffi.py\n" + for line in lines: + line = line.replace('from testing', 'from pypy.module.test_lib_pypy.cffi_tests') + yield line + elif ext in ('.c', '.h'): + yield "/* Generated by pypy/tool/import_cffi.py */\n" + for line in lines: + yield line + else: + raise AssertionError(ext) def main(cffi_dir): cffi_dir = py.path.local(cffi_dir) @@ -23,10 +30,12 @@ for p in (list(cffi_dir.join('cffi').visit(fil='*.py')) + list(cffi_dir.join('cffi').visit(fil='*.h'))): cffi_dest.join('..', p.relto(cffi_dir)).write(p.read()) - for p in cffi_dir.join('testing').visit(fil='*.py'): + for p in (list(cffi_dir.join('testing').visit(fil='*.py')) + + list(cffi_dir.join('testing').visit(fil='*.h')) + + list(cffi_dir.join('testing').visit(fil='*.c'))): path = test_dest.join(p.relto(cffi_dir.join('testing'))) path.join('..').ensure(dir=1) - path.write(''.join(mangle(p.readlines()))) + path.write(''.join(mangle(p.readlines(), p.ext))) if __name__ == '__main__': if len(sys.argv) != 2: diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -548,7 +548,9 @@ if cpu.supports_floats: def func(f0, f1, f2, f3, f4, f5, f6, i0, f7, i1, f8, f9): + seen.append((f0, f1, f2, f3, f4, f5, f6, i0, f7, i1, f8, f9)) return f0 + f1 + f2 + f3 + f4 + f5 + f6 + float(i0 + i1) + f7 + f8 + f9 + seen = [] F = lltype.Float I = lltype.Signed FUNC = self.FuncType([F] * 7 + [I] + [F] + [I] + [F]* 2, F) @@ -557,13 +559,15 @@ calldescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, EffectInfo.MOST_GENERAL) funcbox = self.get_funcbox(cpu, func_ptr) - args = ([boxfloat(.1) for i in range(7)] + - [InputArgInt(1), boxfloat(.2), InputArgInt(2), boxfloat(.3), - boxfloat(.4)]) + args = ([boxfloat(.0), boxfloat(.1), boxfloat(.2), boxfloat(.3), + boxfloat(.4), boxfloat(.5), boxfloat(.6), + InputArgInt(1), boxfloat(.7), InputArgInt(2), boxfloat(.8), + boxfloat(.9)]) res = self.execute_operation(rop.CALL_F, [funcbox] + args, 'float', descr=calldescr) - assert abs(longlong.getrealfloat(res) - 4.6) < 0.0001 + assert seen == [(.0, .1, .2, .3, .4, .5, .6, 1, .7, 2, .8, .9)] + assert abs(longlong.getrealfloat(res) - 7.5) < 0.0001 def test_call_many_arguments(self): # Test calling a function with a large number of arguments (more than diff --git a/rpython/memory/gctransform/boehm.py b/rpython/memory/gctransform/boehm.py --- a/rpython/memory/gctransform/boehm.py +++ b/rpython/memory/gctransform/boehm.py @@ -156,9 +156,9 @@ resulttype = lltype.Signed) hop.genop('int_invert', [v_int], resultvar=hop.spaceop.result) - def gcheader_initdata(self, defnode): + def gcheader_initdata(self, obj): hdr = lltype.malloc(self.HDR, immortal=True) - hdr.hash = lltype.identityhash_nocache(defnode.obj._as_ptr()) + hdr.hash = lltype.identityhash_nocache(obj._as_ptr()) return hdr._obj diff --git a/rpython/memory/gctransform/framework.py b/rpython/memory/gctransform/framework.py --- a/rpython/memory/gctransform/framework.py +++ b/rpython/memory/gctransform/framework.py @@ -1479,8 +1479,8 @@ resulttype=llmemory.Address) llops.genop('raw_memclear', [v_adr, v_totalsize]) - def gcheader_initdata(self, defnode): - o = lltype.top_container(defnode.obj) + def gcheader_initdata(self, obj): + o = lltype.top_container(obj) needs_hash = self.get_prebuilt_hash(o) is not None hdr = self.gc_header_for(o, needs_hash) return hdr._obj diff --git a/rpython/memory/gctransform/refcounting.py b/rpython/memory/gctransform/refcounting.py --- a/rpython/memory/gctransform/refcounting.py +++ b/rpython/memory/gctransform/refcounting.py @@ -286,6 +286,6 @@ hop.genop("direct_call", [self.identityhash_ptr, v_adr], resultvar=hop.spaceop.result) - def gcheader_initdata(self, defnode): - top = lltype.top_container(defnode.obj) + def gcheader_initdata(self, obj): + top = lltype.top_container(obj) return self.gcheaderbuilder.header_of_object(top)._obj diff --git a/rpython/rtyper/lltypesystem/rstr.py b/rpython/rtyper/lltypesystem/rstr.py --- a/rpython/rtyper/lltypesystem/rstr.py +++ b/rpython/rtyper/lltypesystem/rstr.py @@ -717,10 +717,7 @@ return cls.ll_count_char(s1, s2.chars[0], start, end) res = cls.ll_search(s1, s2, start, end, FAST_COUNT) - # For a few cases ll_search can return -1 to indicate an "impossible" - # condition for a string match, count just returns 0 in these cases. - if res < 0: - res = 0 + assert res >= 0 return res @staticmethod @@ -741,6 +738,8 @@ w = n - m if w < 0: + if mode == FAST_COUNT: + return 0 return -1 mlast = m - 1 diff --git a/rpython/rtyper/test/test_rdict.py b/rpython/rtyper/test/test_rdict.py --- a/rpython/rtyper/test/test_rdict.py +++ b/rpython/rtyper/test/test_rdict.py @@ -1,25 +1,62 @@ +import sys +from contextlib import contextmanager +import signal + from rpython.translator.translator import TranslationContext +from rpython.annotator.model import ( + SomeInteger, SomeString, SomeChar, SomeUnicodeString, SomeUnicodeCodePoint) +from rpython.annotator.dictdef import DictKey, DictValue from rpython.rtyper.lltypesystem import lltype, rffi -from rpython.rtyper import rint -from rpython.rtyper.lltypesystem import rdict, rstr +from rpython.rtyper.lltypesystem import rdict from rpython.rtyper.test.tool import BaseRtypingTest from rpython.rlib.objectmodel import r_dict from rpython.rlib.rarithmetic import r_int, r_uint, r_longlong, r_ulonglong import py -py.log.setconsumer("rtyper", py.log.STDOUT) +from hypothesis import settings +from hypothesis.strategies import ( + builds, sampled_from, binary, just, integers, text, characters, tuples) +from hypothesis.stateful import GenericStateMachine, run_state_machine_as_test -def not_really_random(): - """A random-ish generator, which also generates nice patterns from time to time. - Could be useful to detect problems associated with specific usage patterns.""" - import random - x = random.random() - print 'random seed: %r' % (x,) - for i in range(12000): - r = 3.4 + i/20000.0 - x = r*x - x*x - assert 0 <= x < 4 - yield x +def ann2strategy(s_value): + if isinstance(s_value, SomeChar): + return builds(chr, integers(min_value=0, max_value=255)) + elif isinstance(s_value, SomeString): + if s_value.can_be_None: + return binary() | just(None) + else: + return binary() + elif isinstance(s_value, SomeUnicodeCodePoint): + return characters() + elif isinstance(s_value, SomeUnicodeString): + if s_value.can_be_None: + return text() | just(None) + else: + return text() + elif isinstance(s_value, SomeInteger): + return integers(min_value=~sys.maxint, max_value=sys.maxint) + else: + raise TypeError("Cannot convert annotation %s to a strategy" % s_value) + + +if hasattr(signal, 'alarm'): + @contextmanager + def signal_timeout(n): + """A flaky context manager that throws an exception if the body of the + `with` block runs for longer than `n` seconds. + """ + def handler(signum, frame): + raise RuntimeError('timeout') + signal.signal(signal.SIGALRM, handler) + signal.alarm(n) + try: + yield + finally: + signal.alarm(0) +else: + @contextmanager + def signal_timeout(n): + yield class BaseTestRDict(BaseRtypingTest): @@ -199,9 +236,8 @@ def test_dict_copy(self): def func(): - # XXX this does not work if we use chars, only! dic = self.newdict() - dic['ab'] = 1 + dic['a'] = 1 dic['b'] = 2 d2 = dic.copy() ok = 1 @@ -999,33 +1035,11 @@ s_BA_dic = s.items[1] r_AB_dic = rtyper.getrepr(s_AB_dic) - r_BA_dic = rtyper.getrepr(s_AB_dic) + r_BA_dic = rtyper.getrepr(s_BA_dic) assert r_AB_dic.lowleveltype == r_BA_dic.lowleveltype - def test_dict_resize(self): - py.test.skip("test written for non-ordered dicts, update or kill") - # XXX we no longer automatically resize on 'del'. We need to - # hack a bit in this test to trigger a resize by continuing to - # fill the dict's table while keeping the actual size very low - # in order to force a resize to shrink the table back - def func(want_empty): - d = self.newdict() - for i in range(rdict.DICT_INITSIZE << 1): - d[chr(ord('a') + i)] = i - if want_empty: - for i in range(rdict.DICT_INITSIZE << 1): - del d[chr(ord('a') + i)] - for i in range(rdict.DICT_INITSIZE << 3): - d[chr(ord('A') - i)] = i - del d[chr(ord('A') - i)] - return d - res = self.interpret(func, [0]) - assert len(res.entries) > rdict.DICT_INITSIZE - res = self.interpret(func, [1]) - assert len(res.entries) == rdict.DICT_INITSIZE - def test_opt_dummykeymarker(self): def f(): d = {"hello": None} @@ -1117,183 +1131,131 @@ DICT = lltype.typeOf(llres.item1) assert sorted(DICT.TO.entries.TO.OF._flds) == ['f_hash', 'key', 'value'] - def test_deleted_entry_reusage_with_colliding_hashes(self): - py.test.skip("test written for non-ordered dicts, update or kill") - def lowlevelhash(value): - p = rstr.mallocstr(len(value)) - for i in range(len(value)): - p.chars[i] = value[i] - return rstr.LLHelpers.ll_strhash(p) - def func(c1, c2): - c1 = chr(c1) - c2 = chr(c2) - d = self.newdict() - d[c1] = 1 - d[c2] = 2 - del d[c1] - return d[c2] +class Action(object): + def __init__(self, method, args): + self.method = method + self.args = args - char_by_hash = {} - base = rdict.DICT_INITSIZE - for y in range(0, 256): - y = chr(y) - y_hash = lowlevelhash(y) % base - char_by_hash.setdefault(y_hash, []).append(y) + def execute(self, space): + getattr(space, self.method)(*self.args) - x, y = char_by_hash[0][:2] # find a collision + def __repr__(self): + return "space.%s(%s)" % (self.method, ', '.join(map(repr, self.args))) - res = self.interpret(func, [ord(x), ord(y)]) - assert res == 2 +class PseudoRTyper: + cache_dummy_values = {} - def func2(c1, c2): - c1 = chr(c1) - c2 = chr(c2) - d = self.newdict() - d[c1] = 1 - d[c2] = 2 - del d[c1] - d[c1] = 3 - return d +# XXX: None keys crash the test, but translation sort-of allows it +keytypes_s = [ + SomeString(), SomeInteger(), SomeChar(), + SomeUnicodeString(), SomeUnicodeCodePoint()] +st_keys = sampled_from(keytypes_s) +st_values = sampled_from(keytypes_s + [SomeString(can_be_None=True)]) - res = self.interpret(func2, [ord(x), ord(y)]) - for i in range(len(res.entries)): - assert not (res.entries.everused(i) and not res.entries.valid(i)) +class MappingSpace(object): + def __init__(self, s_key, s_value): + self.s_key = s_key + self.s_value = s_value + rtyper = PseudoRTyper() + r_key = s_key.rtyper_makerepr(rtyper) + r_value = s_value.rtyper_makerepr(rtyper) + dictrepr = self.MappingRepr(rtyper, r_key, r_value, + DictKey(None, s_key), + DictValue(None, s_value)) + dictrepr.setup() + self.l_dict = self.newdict(dictrepr) + self.reference = self.new_reference() + self.ll_key = r_key.convert_const + self.ll_value = r_value.convert_const - def func3(c0, c1, c2, c3, c4, c5, c6, c7): - d = self.newdict() - c0 = chr(c0) ; d[c0] = 1; del d[c0] - c1 = chr(c1) ; d[c1] = 1; del d[c1] - c2 = chr(c2) ; d[c2] = 1; del d[c2] - c3 = chr(c3) ; d[c3] = 1; del d[c3] - c4 = chr(c4) ; d[c4] = 1; del d[c4] - c5 = chr(c5) ; d[c5] = 1; del d[c5] - c6 = chr(c6) ; d[c6] = 1; del d[c6] - c7 = chr(c7) ; d[c7] = 1; del d[c7] - return d + def setitem(self, key, value): + ll_key = self.ll_key(key) + ll_value = self.ll_value(value) + self.ll_setitem(self.l_dict, ll_key, ll_value) + self.reference[key] = value + assert self.ll_contains(self.l_dict, ll_key) - if rdict.DICT_INITSIZE != 8: - py.test.skip("make dict tests more indepdent from initsize") - res = self.interpret(func3, [ord(char_by_hash[i][0]) - for i in range(rdict.DICT_INITSIZE)]) - count_frees = 0 - for i in range(len(res.entries)): - if not res.entries.everused(i): - count_frees += 1 - assert count_frees >= 3 + def delitem(self, key): + ll_key = self.ll_key(key) + self.ll_delitem(self.l_dict, ll_key) + del self.reference[key] + assert not self.ll_contains(self.l_dict, ll_key) -class TestStress: + def copydict(self): + self.l_dict = self.ll_copy(self.l_dict) + assert self.ll_len(self.l_dict) == len(self.reference) - def test_stress(self): - from rpython.annotator.dictdef import DictKey, DictValue - from rpython.annotator import model as annmodel - dictrepr = rdict.DictRepr(None, rint.signed_repr, rint.signed_repr, - DictKey(None, annmodel.SomeInteger()), - DictValue(None, annmodel.SomeInteger())) - dictrepr.setup() - l_dict = rdict.ll_newdict(dictrepr.DICT) - referencetable = [None] * 400 - referencelength = 0 - value = 0 + def cleardict(self): + self.ll_clear(self.l_dict) + self.reference.clear() + assert self.ll_len(self.l_dict) == 0 - def complete_check(): - for n, refvalue in zip(range(len(referencetable)), referencetable): - try: - gotvalue = rdict.ll_dict_getitem(l_dict, n) - except KeyError: - assert refvalue is None - else: - assert gotvalue == refvalue + def fullcheck(self): + assert self.ll_len(self.l_dict) == len(self.reference) + for key, value in self.reference.iteritems(): + assert (self.ll_getitem(self.l_dict, self.ll_key(key)) == + self.ll_value(value)) - for x in not_really_random(): - n = int(x*100.0) # 0 <= x < 400 - op = repr(x)[-1] - if op <= '2' and referencetable[n] is not None: - rdict.ll_dict_delitem(l_dict, n) - referencetable[n] = None - referencelength -= 1 - elif op <= '6': - rdict.ll_dict_setitem(l_dict, n, value) - if referencetable[n] is None: - referencelength += 1 - referencetable[n] = value - value += 1 - else: - try: - gotvalue = rdict.ll_dict_getitem(l_dict, n) - except KeyError: - assert referencetable[n] is None - else: - assert gotvalue == referencetable[n] - if 1.38 <= x <= 1.39: - complete_check() - print 'current dict length:', referencelength - assert l_dict.num_items == referencelength - complete_check() +class MappingSM(GenericStateMachine): + def __init__(self): + self.space = None - def test_stress_2(self): - yield self.stress_combination, True, False - yield self.stress_combination, False, True - yield self.stress_combination, False, False - yield self.stress_combination, True, True + def st_setitem(self): + return builds(Action, + just('setitem'), tuples(self.st_keys, self.st_values)) - def stress_combination(self, key_can_be_none, value_can_be_none): - from rpython.rtyper.lltypesystem.rstr import string_repr - from rpython.annotator.dictdef import DictKey, DictValue - from rpython.annotator import model as annmodel + def st_updateitem(self): + return builds(Action, + just('setitem'), + tuples(sampled_from(self.space.reference), self.st_values)) - print - print "Testing combination with can_be_None: keys %s, values %s" % ( - key_can_be_none, value_can_be_none) + def st_delitem(self): + return builds(Action, + just('delitem'), tuples(sampled_from(self.space.reference))) - class PseudoRTyper: - cache_dummy_values = {} - dictrepr = rdict.DictRepr(PseudoRTyper(), string_repr, string_repr, - DictKey(None, annmodel.SomeString(key_can_be_none)), - DictValue(None, annmodel.SomeString(value_can_be_none))) - dictrepr.setup() - print dictrepr.lowleveltype - for key, value in dictrepr.DICTENTRY._adtmeths.items(): - print ' %s = %s' % (key, value) - l_dict = rdict.ll_newdict(dictrepr.DICT) - referencetable = [None] * 400 - referencelength = 0 - values = not_really_random() - keytable = [string_repr.convert_const("foo%d" % n) - for n in range(len(referencetable))] + def steps(self): + if not self.space: + return builds(Action, just('setup'), tuples(st_keys, st_values)) + global_actions = [Action('copydict', ()), Action('cleardict', ())] + if self.space.reference: + return ( + self.st_setitem() | sampled_from(global_actions) | + self.st_updateitem() | self.st_delitem()) + else: + return (self.st_setitem() | sampled_from(global_actions)) - def complete_check(): - for n, refvalue in zip(range(len(referencetable)), referencetable): - try: - gotvalue = rdict.ll_dict_getitem(l_dict, keytable[n]) - except KeyError: - assert refvalue is None - else: - assert gotvalue == refvalue + def execute_step(self, action): + if action.method == 'setup': + self.space = self.Space(*action.args) + self.st_keys = ann2strategy(self.space.s_key) + self.st_values = ann2strategy(self.space.s_value) + return + with signal_timeout(1): # catches infinite loops + action.execute(self.space) - for x in not_really_random(): - n = int(x*100.0) # 0 <= x < 400 - op = repr(x)[-1] - if op <= '2' and referencetable[n] is not None: - rdict.ll_dict_delitem(l_dict, keytable[n]) - referencetable[n] = None - referencelength -= 1 - elif op <= '6': - ll_value = string_repr.convert_const(str(values.next())) - rdict.ll_dict_setitem(l_dict, keytable[n], ll_value) - if referencetable[n] is None: - referencelength += 1 - referencetable[n] = ll_value - else: - try: - gotvalue = rdict.ll_dict_getitem(l_dict, keytable[n]) - except KeyError: - assert referencetable[n] is None - else: - assert gotvalue == referencetable[n] - if 1.38 <= x <= 1.39: - complete_check() - print 'current dict length:', referencelength - assert l_dict.num_items == referencelength - complete_check() + def teardown(self): + if self.space: + self.space.fullcheck() + +class DictSpace(MappingSpace): + MappingRepr = rdict.DictRepr + new_reference = dict + ll_getitem = staticmethod(rdict.ll_dict_getitem) + ll_setitem = staticmethod(rdict.ll_dict_setitem) + ll_delitem = staticmethod(rdict.ll_dict_delitem) + ll_len = staticmethod(rdict.ll_dict_len) + ll_contains = staticmethod(rdict.ll_contains) + ll_copy = staticmethod(rdict.ll_copy) + ll_clear = staticmethod(rdict.ll_clear) + + def newdict(self, repr): + return rdict.ll_newdict(repr.DICT) + +class DictSM(MappingSM): + Space = DictSpace + +def test_hypothesis(): + run_state_machine_as_test( + DictSM, settings(max_examples=500, stateful_step_count=100)) diff --git a/rpython/rtyper/test/test_rordereddict.py b/rpython/rtyper/test/test_rordereddict.py --- a/rpython/rtyper/test/test_rordereddict.py +++ b/rpython/rtyper/test/test_rordereddict.py @@ -1,14 +1,18 @@ - import py from collections import OrderedDict +from hypothesis import settings +from hypothesis.stateful import run_state_machine_as_test + from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rtyper.lltypesystem import rordereddict, rstr from rpython.rlib.rarithmetic import intmask from rpython.rtyper.annlowlevel import llstr, hlstr -from rpython.rtyper.test.test_rdict import BaseTestRDict +from rpython.rtyper.test.test_rdict import ( + BaseTestRDict, MappingSpace, MappingSM) from rpython.rlib import objectmodel +rodct = rordereddict def get_indexes(ll_d): return ll_d.indexes._obj.container._as_ptr() @@ -330,124 +334,48 @@ assert res == 6 -class TestStress: +class ODictSpace(MappingSpace): + MappingRepr = rodct.OrderedDictRepr + new_reference = OrderedDict + ll_getitem = staticmethod(rodct.ll_dict_getitem) + ll_setitem = staticmethod(rodct.ll_dict_setitem) + ll_delitem = staticmethod(rodct.ll_dict_delitem) + ll_len = staticmethod(rodct.ll_dict_len) + ll_contains = staticmethod(rodct.ll_dict_contains) + ll_copy = staticmethod(rodct.ll_dict_copy) + ll_clear = staticmethod(rodct.ll_dict_clear) - def test_stress(self): - from rpython.annotator.dictdef import DictKey, DictValue - from rpython.annotator import model as annmodel - from rpython.rtyper import rint - from rpython.rtyper.test.test_rdict import not_really_random - rodct = rordereddict - dictrepr = rodct.OrderedDictRepr( - None, rint.signed_repr, rint.signed_repr, - DictKey(None, annmodel.SomeInteger()), - DictValue(None, annmodel.SomeInteger())) - dictrepr.setup() - l_dict = rodct.ll_newdict(dictrepr.DICT) - referencetable = [None] * 400 - referencelength = 0 - value = 0 + def newdict(self, repr): + return rodct.ll_newdict(repr.DICT) - def complete_check(): - for n, refvalue in zip(range(len(referencetable)), referencetable): - try: - gotvalue = rodct.ll_dict_getitem(l_dict, n) - except KeyError: - assert refvalue is None - else: - assert gotvalue == refvalue + def get_keys(self): + DICT = lltype.typeOf(self.l_dict).TO + ITER = rordereddict.get_ll_dictiter(lltype.Ptr(DICT)) + ll_iter = rordereddict.ll_dictiter(ITER, self.l_dict) + ll_dictnext = rordereddict._ll_dictnext + keys_ll = [] + while True: + try: + num = ll_dictnext(ll_iter) + keys_ll.append(self.l_dict.entries[num].key) + except StopIteration: + break + return keys_ll - for x in not_really_random(): - n = int(x*100.0) # 0 <= x < 400 - op = repr(x)[-1] - if op <= '2' and referencetable[n] is not None: - rodct.ll_dict_delitem(l_dict, n) - referencetable[n] = None - referencelength -= 1 - elif op <= '6': - rodct.ll_dict_setitem(l_dict, n, value) - if referencetable[n] is None: - referencelength += 1 - referencetable[n] = value - value += 1 - else: - try: - gotvalue = rodct.ll_dict_getitem(l_dict, n) - except KeyError: - assert referencetable[n] is None - else: - assert gotvalue == referencetable[n] - if 1.38 <= x <= 1.39: - complete_check() - print 'current dict length:', referencelength - assert l_dict.num_live_items == referencelength - complete_check() + def fullcheck(self): + # overridden to also check key order + assert self.ll_len(self.l_dict) == len(self.reference) + keys_ll = self.get_keys() + assert len(keys_ll) == len(self.reference) + for key, ll_key in zip(self.reference, keys_ll): + assert self.ll_key(key) == ll_key + assert (self.ll_getitem(self.l_dict, self.ll_key(key)) == + self.ll_value(self.reference[key])) - def test_stress_2(self): - yield self.stress_combination, True, False - yield self.stress_combination, False, True - yield self.stress_combination, False, False - yield self.stress_combination, True, True - def stress_combination(self, key_can_be_none, value_can_be_none): - from rpython.rtyper.lltypesystem.rstr import string_repr - from rpython.annotator.dictdef import DictKey, DictValue - from rpython.annotator import model as annmodel - from rpython.rtyper.test.test_rdict import not_really_random - rodct = rordereddict +class ODictSM(MappingSM): + Space = ODictSpace - print - print "Testing combination with can_be_None: keys %s, values %s" % ( - key_can_be_none, value_can_be_none) - - class PseudoRTyper: - cache_dummy_values = {} - dictrepr = rodct.OrderedDictRepr( - PseudoRTyper(), string_repr, string_repr, - DictKey(None, annmodel.SomeString(key_can_be_none)), - DictValue(None, annmodel.SomeString(value_can_be_none))) - dictrepr.setup() - print dictrepr.lowleveltype - #for key, value in dictrepr.DICTENTRY._adtmeths.items(): - # print ' %s = %s' % (key, value) - l_dict = rodct.ll_newdict(dictrepr.DICT) - referencetable = [None] * 400 - referencelength = 0 - values = not_really_random() - keytable = [string_repr.convert_const("foo%d" % n) - for n in range(len(referencetable))] - - def complete_check(): - for n, refvalue in zip(range(len(referencetable)), referencetable): - try: - gotvalue = rodct.ll_dict_getitem(l_dict, keytable[n]) - except KeyError: - assert refvalue is None - else: - assert gotvalue == refvalue - - for x in not_really_random(): - n = int(x*100.0) # 0 <= x < 400 - op = repr(x)[-1] - if op <= '2' and referencetable[n] is not None: - rodct.ll_dict_delitem(l_dict, keytable[n]) - referencetable[n] = None - referencelength -= 1 - elif op <= '6': - ll_value = string_repr.convert_const(str(values.next())) - rodct.ll_dict_setitem(l_dict, keytable[n], ll_value) - if referencetable[n] is None: - referencelength += 1 - referencetable[n] = ll_value - else: - try: - gotvalue = rodct.ll_dict_getitem(l_dict, keytable[n]) - except KeyError: - assert referencetable[n] is None - else: - assert gotvalue == referencetable[n] - if 1.38 <= x <= 1.39: - complete_check() - print 'current dict length:', referencelength - assert l_dict.num_live_items == referencelength - complete_check() +def test_hypothesis(): + run_state_machine_as_test( + ODictSM, settings(max_examples=500, stateful_step_count=100)) diff --git a/rpython/translator/c/node.py b/rpython/translator/c/node.py --- a/rpython/translator/c/node.py +++ b/rpython/translator/c/node.py @@ -546,7 +546,7 @@ if needs_gcheader(T): gct = self.db.gctransformer if gct is not None: - self.gc_init = gct.gcheader_initdata(self) + self.gc_init = gct.gcheader_initdata(self.obj) db.getcontainernode(self.gc_init) else: self.gc_init = None @@ -677,7 +677,7 @@ if needs_gcheader(T): gct = self.db.gctransformer if gct is not None: - self.gc_init = gct.gcheader_initdata(self) + self.gc_init = gct.gcheader_initdata(self.obj) db.getcontainernode(self.gc_init) else: self.gc_init = None From pypy.commits at gmail.com Sun Mar 6 14:48:13 2016 From: pypy.commits at gmail.com (mattip) Date: Sun, 06 Mar 2016 11:48:13 -0800 (PST) Subject: [pypy-commit] pypy default: fix up release documentation for merge Message-ID: <56dc897d.c13fc20a.4392a.ffff9a1d@mx.google.com> Author: mattip Branch: Changeset: r82826:1ca5e965b73d Date: 2016-03-06 21:43 +0200 http://bitbucket.org/pypy/pypy/changeset/1ca5e965b73d/ Log: fix up release documentation for merge diff --git a/pypy/doc/whatsnew-5.0.0.rst b/pypy/doc/whatsnew-5.0.0.rst --- a/pypy/doc/whatsnew-5.0.0.rst +++ b/pypy/doc/whatsnew-5.0.0.rst @@ -191,3 +191,7 @@ Fix boolean-array indexing in micronumpy +.. branch: numpy_partition +Support ndarray.partition() as an app-level function numpy.core._partition_use, +provided as a cffi wrapper to upstream's implementation in the pypy/numpy repo + diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -3,8 +3,5 @@ ========================= .. this is a revision shortly after release-5.0.0 -.. startrev: 6d13e55b962a +.. startrev: 7bb6381d084c -.. branch: numpy_partition -Support ndarray.partition() as an app-level function numpy.core._partition_use, -provided as a cffi wrapper to upstream's implementation in the pypy/numpy repo From pypy.commits at gmail.com Sun Mar 6 14:48:14 2016 From: pypy.commits at gmail.com (mattip) Date: Sun, 06 Mar 2016 11:48:14 -0800 (PST) Subject: [pypy-commit] pypy release-5.x: merge default into release Message-ID: <56dc897e.418f1c0a.8d718.1cf1@mx.google.com> Author: mattip Branch: release-5.x Changeset: r82827:a4efe1285120 Date: 2016-03-06 21:43 +0200 http://bitbucket.org/pypy/pypy/changeset/a4efe1285120/ Log: merge default into release diff --git a/pypy/doc/release-5.0.0.rst b/pypy/doc/release-5.0.0.rst --- a/pypy/doc/release-5.0.0.rst +++ b/pypy/doc/release-5.0.0.rst @@ -3,8 +3,20 @@ ========== We have released PyPy 5.0.0, about three months after PyPy 4.0.0. -We encourage all users of PyPy to update to this version. There are -bug fixes and a major upgrade to our c-api layer (cpyext) +We encourage all users of PyPy to update to this version. Apart from the usual +bug fixes, there is an ongoing effort to improve the warmup time and memory +usage of JIT-related metadata. The exact effects depend vastly on the program +you're running and can range from insignificant to warmup being up to 30% +faster and memory dropping by about 30%. + +We also merged a major upgrade to our C-API layer (cpyext), simplifying the +interaction between c-level objects and PyPy interpreter level objects. As a +result, lxml with its cython compiled component `passes all tests`_ on PyPy + +Users who have gotten used to vmprof_ on Linux, and those on other platforms +who have not yet tried its awesomeness, will be happy to hear that vmprof +now just works on MacOS and Windows too, in both PyPy (built-in support) and +CPython (as an installed module). You can download the PyPy 5.0.0 release here: @@ -33,6 +45,8 @@ .. _`modules`: http://doc.pypy.org/en/latest/project-ideas.html#make-more-python-modules-pypy-friendly .. _`help`: http://doc.pypy.org/en/latest/project-ideas.html .. _`numpy`: https://bitbucket.org/pypy/numpy +.. _`passes all tests`: https://bitbucket.org/pypy/compatibility/wiki/lxml +.. _vmprof: http://vmprof.readthedocs.org What is PyPy? ============= @@ -53,44 +67,156 @@ .. _`dynamic languages`: http://pypyjs.org Other Highlights (since 4.0.1 released in November 2015) -======================================================= +========================================================= + +* New features: + + * Support embedding PyPy in a C-program via cffi and static callbacks in cffi. + This deprecates the old method of embedding PyPy + + * Refactor vmprof to work cross-operating-system, deprecate using buggy + libunwind on Linux platforms. Vmprof even works on Windows now. + + * Support more of the C-API type slots, like tp_getattro, and fix C-API + macros, functions, and structs such as _PyLong_FromByteArray(), + PyString_GET_SIZE, f_locals in PyFrameObject, Py_NAN, co_filename in + PyCodeObject + + * Use a more stable approach for allocating PyObjects in cpyext. (see + `blog post`_). Once the PyObject corresponding to a PyPy object is created, + it stays around at the same location until the death of the PyPy object. + Done with a little bit of custom GC support. It allows us to kill the + notion of "borrowing" inside cpyext, reduces 4 dictionaries down to 1, and + significantly simplifies the whole approach (which is why it is a new + feature while technically a refactoring) and allows PyPy to support the + populart lxml module (as of the *next* release) with no PyPy specific + patches needed + + * Make the default filesystem encoding ASCII, like CPython + + * Use `hypothesis`_ in test creation, which is great for randomizing tests * Bug Fixes - * + * Backport always using os.urandom for uuid4 from cpython and fix the JIT as well + (issue #2202) - * + * More completely support datetime, optimize timedelta creation - * + * Fix for issue 2185 which caused an inconsistent list of operations to be + generated by the unroller, appeared in a complicated DJango app + + * Fix an elusive issue with stacklets on shadowstack which showed up when + forgetting stacklets without resuming them + + * Fix entrypoint() which now acquires the GIL + + * Fix direct_ffi_call() so failure does not bail out before setting CALL_MAY_FORCE + + * Fix (de)pickling long values by simplifying the implementation + + * Fix RPython rthread so that objects stored as threadlocal do not force minor + GC collection and are kept alive automatically. This improves perfomance of + short-running Python callbacks and prevents resetting such object between + calls + + * Support floats as parameters to itertools.isslice() + + * Check for the existence of CODESET, ignoring it should have prevented PyPy + from working on FreeBSD + + * Fix for corner case (likely shown by Krakatau) for consecutive guards with + interdependencies + + * Fix applevel bare class method comparisons which should fix pretty printing + in IPython * Issues reported with our previous release were resolved_ after reports from users on our issue tracker at https://bitbucket.org/pypy/pypy/issues or on IRC at #pypy -* New features: - - * - - * - - * - * Numpy: - * + * Updates to numpy 1.10.2 (incompatibilities and not-implemented features + still exist) - * + * Support dtype=(('O', spec)) union while disallowing record arrays with + mixed object, non-object values + * Remove all traces of micronumpy from cpyext if --withoutmod-micronumpy option used + + * Support indexing filtering with a boolean ndarray + + * Support partition() as an app-level function, together with a cffi wrapper + in pypy/numpy, this now provides partial support for partition() * Performance improvements and refactorings: - * + * Refactor and improve exception analysis in the annotator - * + * Remove unnecessary special handling of space.wrap(). - * + * Improve the memory signature of numbering instances in the JIT. This should + massively decrease the amount of memory consumed by the JIT, which is + significant for most programs. Also compress the numberings using variable- + size encoding + + * Support list-resizing setslice operations in RPython + + * Tweak the trace-too-long heuristic for multiple jit drivers + + * Refactor bookkeeping (such a cool word - three double letters) in the + annotater + + * Refactor wrappers for OS functions from rtyper to rlib and simplify them + + * Simplify backend loading instructions to only use four variants + + * Optimize string concatination + + * Simplify GIL handling in non-jitted code + + * Use INT_LSHIFT instead of INT_MUL when possible + + * Improve struct.unpack by casting directly from the underlying buffer. + Unpacking floats and doubles is about 15 times faster, and integer types + about 50% faster (on 64 bit integers). This was then subsequently + improved further in optimizeopt.py. + + * Refactor naming in optimizeopt + + * Change GraphAnalyzer to use a more precise way to recognize external + functions and fix null pointer handling, generally clean up external + function handling + + * Optimize global lookups + + * Optimize two-tuple lookups in mapdict + + * Remove pure variants of ``getfield_gc_*`` operations from the JIT by + determining purity while tracing + + * Refactor databasing + + * Simplify bootstrapping in cpyext + + * Refactor rtyper debug code into python.rtyper.debug + + * Seperate structmember.h from Python.h Also enhance creating api functions + to specify which header file they appear in (previously only pypy_decl.h) + + * Reduce all guards from int_floordiv_ovf if one of the arguments is constant + + * Fix tokenizer to enforce universal newlines, needed for Python 3 support + + * Identify permutations of attributes at instance creation, reducing the + number of bridges created + + * Greatly improve re.sub() performance .. _resolved: http://doc.pypy.org/en/latest/whatsnew-5.0.0.html +.. _`hypothesis`: http://hypothesis.readthedocs.org +.. _`blog post`: http://morepypy.blogspot.com/2016/02/c-api-support-update.html Please update, and continue to help us make PyPy better. diff --git a/pypy/doc/whatsnew-5.0.0.rst b/pypy/doc/whatsnew-5.0.0.rst --- a/pypy/doc/whatsnew-5.0.0.rst +++ b/pypy/doc/whatsnew-5.0.0.rst @@ -191,3 +191,7 @@ Fix boolean-array indexing in micronumpy +.. branch: numpy_partition +Support ndarray.partition() as an app-level function numpy.core._partition_use, +provided as a cffi wrapper to upstream's implementation in the pypy/numpy repo + diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -3,6 +3,5 @@ ========================= .. this is a revision shortly after release-5.0.0 -.. startrev: 6d13e55b962a +.. startrev: 7bb6381d084c - diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py --- a/pypy/interpreter/function.py +++ b/pypy/interpreter/function.py @@ -13,6 +13,9 @@ from pypy.interpreter.argument import Arguments from rpython.rlib import jit +from rpython.rlib.rarithmetic import LONG_BIT +from rpython.rlib.rbigint import rbigint + funccallunrolling = unrolling_iterable(range(4)) @@ -557,6 +560,26 @@ return space.w_False return space.eq(self.w_function, w_other.w_function) + def is_w(self, space, other): + if not isinstance(other, Method): + return False + return (self.w_instance is other.w_instance and + self.w_function is other.w_function and + self.w_class is other.w_class) + + def immutable_unique_id(self, space): + from pypy.objspace.std.util import IDTAG_METHOD as tag + from pypy.objspace.std.util import IDTAG_SHIFT + if self.w_instance is not None: + id = space.bigint_w(space.id(self.w_instance)) + id = id.lshift(LONG_BIT) + else: + id = rbigint.fromint(0) + id = id.or_(space.bigint_w(space.id(self.w_function))) + id = id.lshift(LONG_BIT).or_(space.bigint_w(space.id(self.w_class))) + id = id.lshift(IDTAG_SHIFT).int_or_(tag) + return space.newlong_from_rbigint(id) + def descr_method_hash(self): space = self.space w_result = space.hash(self.w_function) diff --git a/pypy/interpreter/test/test_function.py b/pypy/interpreter/test/test_function.py --- a/pypy/interpreter/test/test_function.py +++ b/pypy/interpreter/test/test_function.py @@ -1,5 +1,4 @@ - -import unittest +import pytest from pypy.interpreter import eval from pypy.interpreter.function import Function, Method, descr_function_get from pypy.interpreter.pycode import PyCode @@ -187,6 +186,7 @@ raises( TypeError, func, 42, {'arg1': 23}) + @pytest.mark.skipif("config.option.runappdirect") def test_kwargs_nondict_mapping(self): class Mapping: def keys(self): @@ -257,6 +257,14 @@ meth = func.__get__(obj, object) assert meth() == obj + def test_none_get_interaction(self): + skip("XXX issue #2083") + assert type(None).__repr__(None) == 'None' + + def test_none_get_interaction_2(self): + f = None.__repr__ + assert f() == 'None' + def test_no_get_builtin(self): assert not hasattr(dir, '__get__') class A(object): @@ -284,6 +292,7 @@ raises(TypeError, len, s, some_unknown_keyword=s) raises(TypeError, len, s, s, some_unknown_keyword=s) + @pytest.mark.skipif("config.option.runappdirect") def test_call_error_message(self): try: len() @@ -325,6 +334,7 @@ f = lambda: 42 assert f.func_doc is None + @pytest.mark.skipif("config.option.runappdirect") def test_setstate_called_with_wrong_args(self): f = lambda: 42 # not sure what it should raise, since CPython doesn't have setstate @@ -550,6 +560,37 @@ assert A().m == X() assert X() == A().m + @pytest.mark.skipif("config.option.runappdirect") + def test_method_identity(self): + class A(object): + def m(self): + pass + def n(self): + pass + + class B(A): + pass + + class X(object): + def __eq__(self, other): + return True + + a = A() + a2 = A() + assert a.m is a.m + assert id(a.m) == id(a.m) + assert a.m is not a.n + assert id(a.m) != id(a.n) + assert a.m is not a2.m + assert id(a.m) != id(a2.m) + + assert A.m is A.m + assert id(A.m) == id(A.m) + assert A.m is not A.n + assert id(A.m) != id(A.n) + assert A.m is not B.m + assert id(A.m) != id(B.m) + class TestMethod: def setup_method(self, method): diff --git a/pypy/module/__builtin__/descriptor.py b/pypy/module/__builtin__/descriptor.py --- a/pypy/module/__builtin__/descriptor.py +++ b/pypy/module/__builtin__/descriptor.py @@ -79,6 +79,7 @@ W_Super.typedef = TypeDef( 'super', __new__ = interp2app(descr_new_super), + __thisclass__ = interp_attrproperty_w("w_starttype", W_Super), __getattribute__ = interp2app(W_Super.getattribute), __get__ = interp2app(W_Super.get), __doc__ = """super(type) -> unbound super object diff --git a/pypy/module/__builtin__/test/test_descriptor.py b/pypy/module/__builtin__/test/test_descriptor.py --- a/pypy/module/__builtin__/test/test_descriptor.py +++ b/pypy/module/__builtin__/test/test_descriptor.py @@ -214,7 +214,7 @@ c = C() assert C.goo(1) == (C, 1) assert c.goo(1) == (C, 1) - + assert c.foo(1) == (c, 1) class D(C): pass @@ -238,6 +238,17 @@ meth = classmethod(1).__get__(1) raises(TypeError, meth) + def test_super_thisclass(self): + class A(object): + pass + + assert super(A, A()).__thisclass__ is A + + class B(A): + pass + + assert super(B, B()).__thisclass__ is B + assert super(A, B()).__thisclass__ is A def test_property_docstring(self): assert property.__doc__.startswith('property') diff --git a/pypy/module/_collections/app_defaultdict.py b/pypy/module/_collections/app_defaultdict.py --- a/pypy/module/_collections/app_defaultdict.py +++ b/pypy/module/_collections/app_defaultdict.py @@ -12,6 +12,7 @@ class defaultdict(dict): __slots__ = ['default_factory'] + __module__ = 'collections' def __init__(self, *args, **kwds): if len(args) > 0: diff --git a/pypy/module/_collections/test/test_defaultdict.py b/pypy/module/_collections/test/test_defaultdict.py --- a/pypy/module/_collections/test/test_defaultdict.py +++ b/pypy/module/_collections/test/test_defaultdict.py @@ -14,6 +14,12 @@ d[5].append(44) assert l == [42, 43] and l2 == [44] + def test_module(self): + from _collections import defaultdict + assert repr(defaultdict) in ( + "", # on PyPy + "") # on CPython + def test_keyerror_without_factory(self): from _collections import defaultdict for d1 in [defaultdict(), defaultdict(None)]: diff --git a/pypy/module/_sre/interp_sre.py b/pypy/module/_sre/interp_sre.py --- a/pypy/module/_sre/interp_sre.py +++ b/pypy/module/_sre/interp_sre.py @@ -7,6 +7,7 @@ from pypy.interpreter.error import OperationError from rpython.rlib.rarithmetic import intmask from rpython.rlib import jit +from rpython.rlib.rstring import StringBuilder, UnicodeBuilder # ____________________________________________________________ # @@ -36,6 +37,8 @@ if 0 <= start <= end: if isinstance(ctx, rsre_core.BufMatchContext): return space.wrap(ctx._buffer.getslice(start, end, 1, end-start)) + if isinstance(ctx, rsre_core.StrMatchContext): + return space.wrap(ctx._string[start:end]) elif isinstance(ctx, rsre_core.UnicodeMatchContext): return space.wrap(ctx._unicodestr[start:end]) else: @@ -98,8 +101,8 @@ space.wrap("cannot copy this pattern object")) def make_ctx(self, w_string, pos=0, endpos=sys.maxint): - """Make a BufMatchContext or a UnicodeMatchContext for searching - in the given w_string object.""" + """Make a StrMatchContext, BufMatchContext or a UnicodeMatchContext for + searching in the given w_string object.""" space = self.space if pos < 0: pos = 0 @@ -113,6 +116,14 @@ endpos = len(unicodestr) return rsre_core.UnicodeMatchContext(self.code, unicodestr, pos, endpos, self.flags) + elif space.isinstance_w(w_string, space.w_str): + str = space.str_w(w_string) + if pos > len(str): + pos = len(str) + if endpos > len(str): + endpos = len(str) + return rsre_core.StrMatchContext(self.code, str, + pos, endpos, self.flags) else: buf = space.readbuf_w(w_string) size = buf.getlength() @@ -216,6 +227,11 @@ def subx(self, w_ptemplate, w_string, count): space = self.space + # use a (much faster) string/unicode builder if w_ptemplate and + # w_string are both string or both unicode objects, and if w_ptemplate + # is a literal + use_builder = False + filter_as_unicode = filter_as_string = None if space.is_true(space.callable(w_ptemplate)): w_filter = w_ptemplate filter_is_callable = True @@ -223,6 +239,8 @@ if space.isinstance_w(w_ptemplate, space.w_unicode): filter_as_unicode = space.unicode_w(w_ptemplate) literal = u'\\' not in filter_as_unicode + use_builder = ( + space.isinstance_w(w_string, space.w_unicode) and literal) else: try: filter_as_string = space.str_w(w_ptemplate) @@ -232,6 +250,8 @@ literal = False else: literal = '\\' not in filter_as_string + use_builder = ( + space.isinstance_w(w_string, space.w_str) and literal) if literal: w_filter = w_ptemplate filter_is_callable = False @@ -242,19 +262,44 @@ space.wrap(self), w_ptemplate) filter_is_callable = space.is_true(space.callable(w_filter)) # + # XXX this is a bit of a mess, but it improves performance a lot ctx = self.make_ctx(w_string) - sublist_w = [] + sublist_w = strbuilder = unicodebuilder = None + if use_builder: + if filter_as_unicode is not None: + unicodebuilder = UnicodeBuilder(ctx.end) + else: + assert filter_as_string is not None + strbuilder = StringBuilder(ctx.end) + else: + sublist_w = [] n = last_pos = 0 while not count or n < count: + sub_jitdriver.jit_merge_point( + self=self, + use_builder=use_builder, + filter_is_callable=filter_is_callable, + filter_type=type(w_filter), + ctx=ctx, + w_filter=w_filter, + strbuilder=strbuilder, + unicodebuilder=unicodebuilder, + filter_as_string=filter_as_string, + filter_as_unicode=filter_as_unicode, + count=count, + w_string=w_string, + n=n, last_pos=last_pos, sublist_w=sublist_w + ) + space = self.space if not searchcontext(space, ctx): break if last_pos < ctx.match_start: - sublist_w.append(slice_w(space, ctx, last_pos, - ctx.match_start, space.w_None)) + _sub_append_slice( + ctx, space, use_builder, sublist_w, + strbuilder, unicodebuilder, last_pos, ctx.match_start) start = ctx.match_end if start == ctx.match_start: start += 1 - nextctx = ctx.fresh_copy(start) if not (last_pos == ctx.match_start == ctx.match_end and n > 0): # the above ignores empty matches on latest position @@ -262,28 +307,71 @@ w_match = self.getmatch(ctx, True) w_piece = space.call_function(w_filter, w_match) if not space.is_w(w_piece, space.w_None): + assert strbuilder is None and unicodebuilder is None + assert not use_builder sublist_w.append(w_piece) else: - sublist_w.append(w_filter) + if use_builder: + if strbuilder is not None: + assert filter_as_string is not None + strbuilder.append(filter_as_string) + else: + assert unicodebuilder is not None + assert filter_as_unicode is not None + unicodebuilder.append(filter_as_unicode) + else: + sublist_w.append(w_filter) last_pos = ctx.match_end n += 1 elif last_pos >= ctx.end: break # empty match at the end: finished - ctx = nextctx + ctx.reset(start) if last_pos < ctx.end: - sublist_w.append(slice_w(space, ctx, last_pos, ctx.end, - space.w_None)) + _sub_append_slice(ctx, space, use_builder, sublist_w, + strbuilder, unicodebuilder, last_pos, ctx.end) + if use_builder: + if strbuilder is not None: + return space.wrap(strbuilder.build()), n + else: + assert unicodebuilder is not None + return space.wrap(unicodebuilder.build()), n + else: + if space.isinstance_w(w_string, space.w_unicode): + w_emptystr = space.wrap(u'') + else: + w_emptystr = space.wrap('') + w_item = space.call_method(w_emptystr, 'join', + space.newlist(sublist_w)) + return w_item, n - if space.isinstance_w(w_string, space.w_unicode): - w_emptystr = space.wrap(u'') - else: - w_emptystr = space.wrap('') - w_item = space.call_method(w_emptystr, 'join', - space.newlist(sublist_w)) - return w_item, n +sub_jitdriver = jit.JitDriver( + reds="""count n last_pos + ctx w_filter + strbuilder unicodebuilder + filter_as_string + filter_as_unicode + w_string sublist_w + self""".split(), + greens=["filter_is_callable", "use_builder", "filter_type", "ctx.pattern"]) +def _sub_append_slice(ctx, space, use_builder, sublist_w, + strbuilder, unicodebuilder, start, end): + if use_builder: + if isinstance(ctx, rsre_core.BufMatchContext): + assert strbuilder is not None + return strbuilder.append(ctx._buffer.getslice(start, end, 1, end-start)) + if isinstance(ctx, rsre_core.StrMatchContext): + assert strbuilder is not None + return strbuilder.append_slice(ctx._string, start, end) + elif isinstance(ctx, rsre_core.UnicodeMatchContext): + assert unicodebuilder is not None + return unicodebuilder.append_slice(ctx._unicodestr, start, end) + assert 0, "unreachable" + else: + sublist_w.append(slice_w(space, ctx, start, end, space.w_None)) + @unwrap_spec(flags=int, groups=int, w_groupindex=WrappedDefault(None), w_indexgroup=WrappedDefault(None)) def SRE_Pattern__new__(space, w_subtype, w_pattern, flags, w_code, @@ -482,6 +570,8 @@ ctx = self.ctx if isinstance(ctx, rsre_core.BufMatchContext): return space.wrap(ctx._buffer.as_str()) + elif isinstance(ctx, rsre_core.StrMatchContext): + return space.wrap(ctx._string) elif isinstance(ctx, rsre_core.UnicodeMatchContext): return space.wrap(ctx._unicodestr) else: diff --git a/pypy/module/micronumpy/appbridge.py b/pypy/module/micronumpy/appbridge.py --- a/pypy/module/micronumpy/appbridge.py +++ b/pypy/module/micronumpy/appbridge.py @@ -9,6 +9,7 @@ w_array_repr = None w_array_str = None w__usefields = None + w_partition = None def __init__(self, space): pass diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -557,8 +557,12 @@ return self.get_scalar_value().item(space) l_w = [] for i in range(self.get_shape()[0]): - l_w.append(space.call_method(self.descr_getitem(space, - space.wrap(i)), "tolist")) + item_w = self.descr_getitem(space, space.wrap(i)) + if (isinstance(item_w, W_NDimArray) or + isinstance(item_w, boxes.W_GenericBox)): + l_w.append(space.call_method(item_w, "tolist")) + else: + l_w.append(item_w) return space.newlist(l_w) def descr_ravel(self, space, w_order=None): @@ -934,6 +938,10 @@ return return self.implementation.sort(space, w_axis, w_order) + def descr_partition(self, space, __args__): + return get_appbridge_cache(space).call_method( + space, 'numpy.core._partition_use', 'partition', __args__.prepend(self)) + def descr_squeeze(self, space, w_axis=None): cur_shape = self.get_shape() if not space.is_none(w_axis): @@ -1658,6 +1666,7 @@ argsort = interp2app(W_NDimArray.descr_argsort), sort = interp2app(W_NDimArray.descr_sort), + partition = interp2app(W_NDimArray.descr_partition), astype = interp2app(W_NDimArray.descr_astype), base = GetSetProperty(W_NDimArray.descr_get_base), byteswap = interp2app(W_NDimArray.descr_byteswap), diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -1936,6 +1936,22 @@ a = array([[1, 2], [3, 4]]) assert (a + a).tolist() == [[2, 4], [6, 8]] + def test_tolist_object(self): + from numpy import array + a = array([0], dtype=object) + assert a.tolist() == [0] + + def test_tolist_object_slice(self): + from numpy import array + list_expected = [slice(0, 1), 0] + a = array(list_expected, dtype=object) + assert a.tolist() == list_expected + + def test_tolist_object_slice_2d(self): + from numpy import array + a = array([(slice(0, 1), 1), (0, 1)], dtype=object) + assert a.tolist() == [[slice(0, 1, None), 1], [0, 1]] + def test_tolist_slice(self): from numpy import array a = array([[17.1, 27.2], [40.3, 50.3]]) diff --git a/pypy/objspace/std/complexobject.py b/pypy/objspace/std/complexobject.py --- a/pypy/objspace/std/complexobject.py +++ b/pypy/objspace/std/complexobject.py @@ -266,11 +266,12 @@ return None from rpython.rlib.longlong2float import float2longlong from pypy.objspace.std.util import IDTAG_COMPLEX as tag + from pypy.objspace.std.util import IDTAG_SHIFT real = space.float_w(space.getattr(self, space.wrap("real"))) imag = space.float_w(space.getattr(self, space.wrap("imag"))) real_b = rbigint.fromrarith_int(float2longlong(real)) imag_b = rbigint.fromrarith_int(r_ulonglong(float2longlong(imag))) - val = real_b.lshift(64).or_(imag_b).lshift(3).int_or_(tag) + val = real_b.lshift(64).or_(imag_b).lshift(IDTAG_SHIFT).int_or_(tag) return space.newlong_from_rbigint(val) def int(self, space): diff --git a/pypy/objspace/std/floatobject.py b/pypy/objspace/std/floatobject.py --- a/pypy/objspace/std/floatobject.py +++ b/pypy/objspace/std/floatobject.py @@ -181,9 +181,10 @@ return None from rpython.rlib.longlong2float import float2longlong from pypy.objspace.std.util import IDTAG_FLOAT as tag + from pypy.objspace.std.util import IDTAG_SHIFT val = float2longlong(space.float_w(self)) b = rbigint.fromrarith_int(val) - b = b.lshift(3).int_or_(tag) + b = b.lshift(IDTAG_SHIFT).int_or_(tag) return space.newlong_from_rbigint(b) def __repr__(self): diff --git a/pypy/objspace/std/intobject.py b/pypy/objspace/std/intobject.py --- a/pypy/objspace/std/intobject.py +++ b/pypy/objspace/std/intobject.py @@ -24,7 +24,7 @@ from pypy.interpreter.typedef import TypeDef from pypy.objspace.std import newformat from pypy.objspace.std.util import ( - BINARY_OPS, CMP_OPS, COMMUTATIVE_OPS, IDTAG_INT, wrap_parsestringerror) + BINARY_OPS, CMP_OPS, COMMUTATIVE_OPS, IDTAG_INT, IDTAG_SHIFT, wrap_parsestringerror) SENTINEL = object() @@ -46,7 +46,7 @@ if self.user_overridden_class: return None b = space.bigint_w(self) - b = b.lshift(3).int_or_(IDTAG_INT) + b = b.lshift(IDTAG_SHIFT).int_or_(IDTAG_INT) return space.newlong_from_rbigint(b) def int(self, space): diff --git a/pypy/objspace/std/longobject.py b/pypy/objspace/std/longobject.py --- a/pypy/objspace/std/longobject.py +++ b/pypy/objspace/std/longobject.py @@ -16,7 +16,7 @@ from pypy.objspace.std import newformat from pypy.objspace.std.intobject import W_AbstractIntObject from pypy.objspace.std.util import ( - BINARY_OPS, CMP_OPS, COMMUTATIVE_OPS, IDTAG_LONG, wrap_parsestringerror) + BINARY_OPS, CMP_OPS, COMMUTATIVE_OPS, IDTAG_LONG, IDTAG_SHIFT, wrap_parsestringerror) def delegate_other(func): @@ -45,7 +45,7 @@ if self.user_overridden_class: return None b = space.bigint_w(self) - b = b.lshift(3).int_or_(IDTAG_LONG) + b = b.lshift(IDTAG_SHIFT).int_or_(IDTAG_LONG) return space.newlong_from_rbigint(b) def unwrap(self, space): diff --git a/pypy/objspace/std/test/test_obj.py b/pypy/objspace/std/test/test_obj.py --- a/pypy/objspace/std/test/test_obj.py +++ b/pypy/objspace/std/test/test_obj.py @@ -172,15 +172,15 @@ def test_id_on_primitives(self): if self.cpython_apptest: skip("cpython behaves differently") - assert id(1) == (1 << 3) + 1 - assert id(1l) == (1 << 3) + 3 + assert id(1) == (1 << 4) + 1 + assert id(1l) == (1 << 4) + 3 class myint(int): pass assert id(myint(1)) != id(1) assert id(1.0) & 7 == 5 assert id(-0.0) != id(0.0) - assert hex(id(2.0)) == '0x20000000000000005L' + assert hex(id(2.0)) == '0x40000000000000005L' assert id(0.0) == 5 def test_id_on_strs(self): diff --git a/pypy/objspace/std/util.py b/pypy/objspace/std/util.py --- a/pypy/objspace/std/util.py +++ b/pypy/objspace/std/util.py @@ -2,11 +2,13 @@ from pypy.interpreter.error import OperationError, oefmt +IDTAG_SHIFT = 4 IDTAG_INT = 1 IDTAG_LONG = 3 IDTAG_FLOAT = 5 IDTAG_COMPLEX = 7 +IDTAG_METHOD = 9 CMP_OPS = dict(lt='<', le='<=', eq='==', ne='!=', gt='>', ge='>=') BINARY_BITWISE_OPS = {'and': '&', 'lshift': '<<', 'or': '|', 'rshift': '>>', diff --git a/pypy/tool/release/force-builds.py b/pypy/tool/release/force-builds.py --- a/pypy/tool/release/force-builds.py +++ b/pypy/tool/release/force-builds.py @@ -9,7 +9,7 @@ modified by PyPy team """ -import os, sys, urllib +import os, sys, urllib, subprocess from twisted.internet import reactor, defer from twisted.python import log @@ -83,4 +83,9 @@ (options, args) = parser.parse_args() if not options.branch: parser.error("branch option required") + try: + subprocess.check_call(['hg','id','-r', options.branch]) + except subprocess.CalledProcessError: + print 'branch', options.branch, 'could not be found in local repository' + sys.exit(-1) main(options.branch, options.server, user=options.user) diff --git a/rpython/doc/translation.rst b/rpython/doc/translation.rst --- a/rpython/doc/translation.rst +++ b/rpython/doc/translation.rst @@ -33,7 +33,7 @@ The RPython translation toolchain never sees Python source code or syntax trees, but rather starts with the *code objects* that define the behaviour of the function objects one gives it as input. The -`bytecode evaluator`_ and the :ref:`flow graph builder` work through these +:ref:`flow graph builder` works through these code objects using `abstract interpretation`_ to produce a control flow graph (one per function): yet another representation of the source program, but one which is suitable for applying type inference @@ -85,7 +85,6 @@ .. _PDF color version: _static/translation.pdf -.. _bytecode evaluator: interpreter.html .. _abstract interpretation: http://en.wikipedia.org/wiki/Abstract_interpretation From pypy.commits at gmail.com Sun Mar 6 14:48:16 2016 From: pypy.commits at gmail.com (mattip) Date: Sun, 06 Mar 2016 11:48:16 -0800 (PST) Subject: [pypy-commit] pypy default: drop the rev number from the published name, since we decided bug fixes will be 5.1, 5.2, ... Message-ID: <56dc8980.a118c20a.2e706.ffffa09c@mx.google.com> Author: mattip Branch: Changeset: r82828:089e08aceb1c Date: 2016-03-06 21:47 +0200 http://bitbucket.org/pypy/pypy/changeset/089e08aceb1c/ Log: drop the rev number from the published name, since we decided bug fixes will be 5.1, 5.2, ... diff --git a/pypy/doc/release-5.0.0.rst b/pypy/doc/release-5.0.0.rst --- a/pypy/doc/release-5.0.0.rst +++ b/pypy/doc/release-5.0.0.rst @@ -1,8 +1,8 @@ -========== -PyPy 5.0.0 -========== +======== +PyPy 5.0 +======== -We have released PyPy 5.0.0, about three months after PyPy 4.0.0. +We have released PyPy 5.0, about three months after PyPy 4.0.1. We encourage all users of PyPy to update to this version. Apart from the usual bug fixes, there is an ongoing effort to improve the warmup time and memory usage of JIT-related metadata. The exact effects depend vastly on the program @@ -18,7 +18,7 @@ now just works on MacOS and Windows too, in both PyPy (built-in support) and CPython (as an installed module). -You can download the PyPy 5.0.0 release here: +You can download the PyPy 5.0 release here: http://pypy.org/download.html @@ -35,7 +35,7 @@ ==== While not applicable only to PyPy, `cffi`_ is arguably our most significant -contribution to the python ecosystem. PyPy 5.0.0 ships with +contribution to the python ecosystem. PyPy 5.0 ships with `cffi-1.5.2`_ which now allows embedding PyPy (or cpython) in a c program. .. _`PyPy`: http://doc.pypy.org diff --git a/pypy/doc/whatsnew-5.0.0.rst b/pypy/doc/whatsnew-5.0.0.rst --- a/pypy/doc/whatsnew-5.0.0.rst +++ b/pypy/doc/whatsnew-5.0.0.rst @@ -1,6 +1,6 @@ -======================== -What's new in PyPy 5.0.0 -======================== +====================== +What's new in PyPy 5.0 +====================== .. this is a revision shortly after release-4.0.1 .. startrev: 4b5c840d0da2 diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -2,6 +2,6 @@ What's new in PyPy 5.0.+ ========================= -.. this is a revision shortly after release-5.0.0 +.. this is a revision shortly after release-5.0 .. startrev: 7bb6381d084c From pypy.commits at gmail.com Sun Mar 6 14:50:46 2016 From: pypy.commits at gmail.com (plan_rich) Date: Sun, 06 Mar 2016 11:50:46 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: (untested) length is imm, we still need to check if enough space can be written. eliminated first stdux, doing addr calc. before entering the loops Message-ID: <56dc8a16.4577c20a.149e9.ffffa064@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r82829:1afedf2dd2d2 Date: 2016-03-06 20:49 +0100 http://bitbucket.org/pypy/pypy/changeset/1afedf2dd2d2/ Log: (untested) length is imm, we still need to check if enough space can be written. eliminated first stdux, doing addr calc. before entering the loops diff --git a/rpython/jit/backend/ppc/opassembler.py b/rpython/jit/backend/ppc/opassembler.py --- a/rpython/jit/backend/ppc/opassembler.py +++ b/rpython/jit/backend/ppc/opassembler.py @@ -869,11 +869,6 @@ elif itemsize & 2: self.mc.sthu(a, b, c) elif (itemsize & 4) or IS_PPC_32: self.mc.stwu(a, b, c) else: self.mc.stdu(a, b, c) - def eza_stX(self, a, b, c, itemsize): - if itemsize & 1: self.mc.stb(a, b, c) - elif itemsize & 2: self.mc.sth(a, b, c) - elif (itemsize & 4) or IS_PPC_32: self.mc.stw(a, b, c) - else: self.mc.std(a, b, c) def emit_zero_array(self, op, arglocs, regalloc): base_loc, startindex_loc, length_loc, ofs_loc = arglocs @@ -894,17 +889,17 @@ self.mc.addi(r.SCRATCH2.value, startindex_loc.value, ofs_loc.getint()) ofs_loc = r.SCRATCH2 - # ofs_loc is now the startindex in bytes + the array offset + self.mc.add(ofs_loc.value, ofs_loc.value, base_loc.value) + # ofs_loc is now the real address pointing to the first + # byte to be zeroed if length_loc.is_imm(): self.mc.load_imm(r.SCRATCH, length_loc.value) length_loc = r.SCRATCH - jz_location = -1 - else: - # jump to end if length is less than stepsize - self.mc.cmp_op(0, length_loc.value, stepsize, imm=True) - jz_location = self.mc.currpos() - self.mc.trap() + + self.mc.cmp_op(0, length_loc.value, stepsize, imm=True) + jlt_location = self.mc.currpos() + self.mc.trap() self.mc.sradi(r.SCRATCH.value, r.length_loc.value, shift_by) self.mc.mtctr(r.SCRATCH.value) # store the length in count register @@ -916,25 +911,16 @@ # byte is zeroed in another loop in 2) # first store of case 1) - self.eza_stXux(r.SCRATCH.value, ofs_loc.value, base_loc.value, stepsize) - bdz_location = self.mc.currpos() - self.mc.trap() # jump over the loop if we are already done with 1) - # 1) The next loop copies WORDS into the memory chunk starting at startindex # ending at startindex + length. These are bytes loop_location = self.mc.currpos() self.eza_stXu(r.SCRATCH.value, ofs_loc.value, stepsize, stepsize) self.mc.bdnz(loop_location - self.mc.currpos()) - pmc = OverwritingBuilder(self.mc, bdz_location, 1) - pmc.bdz(self.mc.currpos() - bdz_location) + pmc = OverwritingBuilder(self.mc, jlt_location, 1) + pmc.blt(self.mc.currpos() - jlt_location) # jump if length < WORD pmc.overwrite() - if jz_location != -1: - pmc = OverwritingBuilder(self.mc, jz_location, 1) - pmc.ble(self.mc.currpos() - jz_location) # !GT - pmc.overwrite() - # 2) There might be some bytes left to be written. # following scenario: length_loc == 3 bytes, stepsize == 4! # need to write the last bytes. @@ -943,9 +929,9 @@ if length_loc.is_imm(): self.mc.load_imm(r.SCRATCH, length_loc.value & (stepsize-1)) else: - self.mc.andi(r.SCRATCH.value, length_loc, stepsize-1) + self.mc.andi(r.SCRATCH.value, length_loc.value, stepsize-1) - self.mc.cmp_op(0, SCRATCH.value, 0, imm=True) + self.mc.cmp_op(0, r.SCRATCH.value, 0, imm=True) jle_location = self.mc.currpos() self.mc.trap() From pypy.commits at gmail.com Sun Mar 6 15:35:38 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 06 Mar 2016 12:35:38 -0800 (PST) Subject: [pypy-commit] pypy default: Merged in loganchien/pypy (pull request #410) Message-ID: <56dc949a.02f0c20a.19980.205d@mx.google.com> Author: Armin Rigo Branch: Changeset: r82831:a7c2a20fd22d Date: 2016-03-06 21:35 +0100 http://bitbucket.org/pypy/pypy/changeset/a7c2a20fd22d/ Log: Merged in loganchien/pypy (pull request #410) Remove dead code related to argtypes and standalone diff --git a/rpython/translator/interactive.py b/rpython/translator/interactive.py --- a/rpython/translator/interactive.py +++ b/rpython/translator/interactive.py @@ -32,12 +32,6 @@ self.context.viewcg() def ensure_setup(self, argtypes=None, policy=None): - standalone = argtypes is None - if standalone: - assert argtypes is None - else: - if argtypes is None: - argtypes = [] self.driver.setup(self.entry_point, argtypes, policy, empty_translator=self.context) self.ann_argtypes = argtypes From pypy.commits at gmail.com Sun Mar 6 15:35:43 2016 From: pypy.commits at gmail.com (loganchien) Date: Sun, 06 Mar 2016 12:35:43 -0800 (PST) Subject: [pypy-commit] pypy default: remove dead code for argtypes and standalone Message-ID: <56dc949f.d7c21c0a.2e77e.2084@mx.google.com> Author: Logan Chien Branch: Changeset: r82830:524f77883111 Date: 2016-03-06 23:08 +0800 http://bitbucket.org/pypy/pypy/changeset/524f77883111/ Log: remove dead code for argtypes and standalone diff --git a/rpython/translator/interactive.py b/rpython/translator/interactive.py --- a/rpython/translator/interactive.py +++ b/rpython/translator/interactive.py @@ -32,12 +32,6 @@ self.context.viewcg() def ensure_setup(self, argtypes=None, policy=None): - standalone = argtypes is None - if standalone: - assert argtypes is None - else: - if argtypes is None: - argtypes = [] self.driver.setup(self.entry_point, argtypes, policy, empty_translator=self.context) self.ann_argtypes = argtypes From pypy.commits at gmail.com Sun Mar 6 15:35:51 2016 From: pypy.commits at gmail.com (plan_rich) Date: Sun, 06 Mar 2016 12:35:51 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: catchup with default Message-ID: <56dc94a7.046f1c0a.c8d06.2692@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r82832:b31ac68c3821 Date: 2016-03-06 21:35 +0100 http://bitbucket.org/pypy/pypy/changeset/b31ac68c3821/ Log: catchup with default diff --git a/pypy/doc/release-5.0.0.rst b/pypy/doc/release-5.0.0.rst --- a/pypy/doc/release-5.0.0.rst +++ b/pypy/doc/release-5.0.0.rst @@ -1,8 +1,8 @@ -========== -PyPy 5.0.0 -========== +======== +PyPy 5.0 +======== -We have released PyPy 5.0.0, about three months after PyPy 4.0.0. +We have released PyPy 5.0, about three months after PyPy 4.0.1. We encourage all users of PyPy to update to this version. Apart from the usual bug fixes, there is an ongoing effort to improve the warmup time and memory usage of JIT-related metadata. The exact effects depend vastly on the program @@ -18,7 +18,7 @@ now just works on MacOS and Windows too, in both PyPy (built-in support) and CPython (as an installed module). -You can download the PyPy 5.0.0 release here: +You can download the PyPy 5.0 release here: http://pypy.org/download.html @@ -35,7 +35,7 @@ ==== While not applicable only to PyPy, `cffi`_ is arguably our most significant -contribution to the python ecosystem. PyPy 5.0.0 ships with +contribution to the python ecosystem. PyPy 5.0 ships with `cffi-1.5.2`_ which now allows embedding PyPy (or cpython) in a c program. .. _`PyPy`: http://doc.pypy.org @@ -98,7 +98,8 @@ * Bug Fixes - * Backport always using os.urandom for uuid4 from cpython + * Backport always using os.urandom for uuid4 from cpython and fix the JIT as well + (issue #2202) * More completely support datetime, optimize timedelta creation @@ -106,7 +107,7 @@ generated by the unroller, appeared in a complicated DJango app * Fix an elusive issue with stacklets on shadowstack which showed up when - forgetting stacklets without resuming them + forgetting stacklets without resuming them * Fix entrypoint() which now acquires the GIL @@ -146,13 +147,13 @@ * Support indexing filtering with a boolean ndarray + * Support partition() as an app-level function, together with a cffi wrapper + in pypy/numpy, this now provides partial support for partition() + * Performance improvements and refactorings: * Refactor and improve exception analysis in the annotator - * Improve the performace of struct.unpack; unpacking of floats and doubles - is now about 15 times faster and 64 bit integers faster by a factor of 2 - * Remove unnecessary special handling of space.wrap(). * Improve the memory signature of numbering instances in the JIT. This should diff --git a/pypy/doc/whatsnew-5.0.0.rst b/pypy/doc/whatsnew-5.0.0.rst --- a/pypy/doc/whatsnew-5.0.0.rst +++ b/pypy/doc/whatsnew-5.0.0.rst @@ -1,6 +1,6 @@ -========================= -What's new in PyPy 5.0.+ -========================= +====================== +What's new in PyPy 5.0 +====================== .. this is a revision shortly after release-4.0.1 .. startrev: 4b5c840d0da2 @@ -128,6 +128,7 @@ Fix SSL tests by importing cpython's patch + .. branch: remove-getfield-pure Remove pure variants of ``getfield_gc_*`` operations from the JIT. Relevant @@ -189,3 +190,8 @@ .. branch: ndarray-setitem-filtered Fix boolean-array indexing in micronumpy + +.. branch: numpy_partition +Support ndarray.partition() as an app-level function numpy.core._partition_use, +provided as a cffi wrapper to upstream's implementation in the pypy/numpy repo + diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -2,8 +2,8 @@ What's new in PyPy 5.0.+ ========================= -.. this is a revision shortly after release-5.0.0 -.. startrev: 6d13e55b962a +.. this is a revision shortly after release-5.0 +.. startrev: 7bb6381d084c .. branch: memop-simplify3 diff --git a/pypy/module/micronumpy/appbridge.py b/pypy/module/micronumpy/appbridge.py --- a/pypy/module/micronumpy/appbridge.py +++ b/pypy/module/micronumpy/appbridge.py @@ -9,6 +9,7 @@ w_array_repr = None w_array_str = None w__usefields = None + w_partition = None def __init__(self, space): pass diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -557,8 +557,12 @@ return self.get_scalar_value().item(space) l_w = [] for i in range(self.get_shape()[0]): - l_w.append(space.call_method(self.descr_getitem(space, - space.wrap(i)), "tolist")) + item_w = self.descr_getitem(space, space.wrap(i)) + if (isinstance(item_w, W_NDimArray) or + isinstance(item_w, boxes.W_GenericBox)): + l_w.append(space.call_method(item_w, "tolist")) + else: + l_w.append(item_w) return space.newlist(l_w) def descr_ravel(self, space, w_order=None): @@ -934,6 +938,10 @@ return return self.implementation.sort(space, w_axis, w_order) + def descr_partition(self, space, __args__): + return get_appbridge_cache(space).call_method( + space, 'numpy.core._partition_use', 'partition', __args__.prepend(self)) + def descr_squeeze(self, space, w_axis=None): cur_shape = self.get_shape() if not space.is_none(w_axis): @@ -1658,6 +1666,7 @@ argsort = interp2app(W_NDimArray.descr_argsort), sort = interp2app(W_NDimArray.descr_sort), + partition = interp2app(W_NDimArray.descr_partition), astype = interp2app(W_NDimArray.descr_astype), base = GetSetProperty(W_NDimArray.descr_get_base), byteswap = interp2app(W_NDimArray.descr_byteswap), diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -1958,6 +1958,22 @@ a = array([[1, 2], [3, 4]]) assert (a + a).tolist() == [[2, 4], [6, 8]] + def test_tolist_object(self): + from numpy import array + a = array([0], dtype=object) + assert a.tolist() == [0] + + def test_tolist_object_slice(self): + from numpy import array + list_expected = [slice(0, 1), 0] + a = array(list_expected, dtype=object) + assert a.tolist() == list_expected + + def test_tolist_object_slice_2d(self): + from numpy import array + a = array([(slice(0, 1), 1), (0, 1)], dtype=object) + assert a.tolist() == [[slice(0, 1, None), 1], [0, 1]] + def test_tolist_slice(self): from numpy import array a = array([[17.1, 27.2], [40.3, 50.3]]) From pypy.commits at gmail.com Sun Mar 6 16:32:48 2016 From: pypy.commits at gmail.com (mjacob) Date: Sun, 06 Mar 2016 13:32:48 -0800 (PST) Subject: [pypy-commit] pypy py3k: hg merge 1ca5e965b73d Message-ID: <56dca200.654fc20a.1121.ffffbe85@mx.google.com> Author: Manuel Jacob Branch: py3k Changeset: r82833:c6435e382303 Date: 2016-03-06 22:11 +0100 http://bitbucket.org/pypy/pypy/changeset/c6435e382303/ Log: hg merge 1ca5e965b73d This is the last changeset which was merged into release-5.x. diff --git a/pypy/doc/release-5.0.0.rst b/pypy/doc/release-5.0.0.rst --- a/pypy/doc/release-5.0.0.rst +++ b/pypy/doc/release-5.0.0.rst @@ -3,8 +3,20 @@ ========== We have released PyPy 5.0.0, about three months after PyPy 4.0.0. -We encourage all users of PyPy to update to this version. There are -bug fixes and a major upgrade to our c-api layer (cpyext) +We encourage all users of PyPy to update to this version. Apart from the usual +bug fixes, there is an ongoing effort to improve the warmup time and memory +usage of JIT-related metadata. The exact effects depend vastly on the program +you're running and can range from insignificant to warmup being up to 30% +faster and memory dropping by about 30%. + +We also merged a major upgrade to our C-API layer (cpyext), simplifying the +interaction between c-level objects and PyPy interpreter level objects. As a +result, lxml with its cython compiled component `passes all tests`_ on PyPy + +Users who have gotten used to vmprof_ on Linux, and those on other platforms +who have not yet tried its awesomeness, will be happy to hear that vmprof +now just works on MacOS and Windows too, in both PyPy (built-in support) and +CPython (as an installed module). You can download the PyPy 5.0.0 release here: @@ -33,6 +45,8 @@ .. _`modules`: http://doc.pypy.org/en/latest/project-ideas.html#make-more-python-modules-pypy-friendly .. _`help`: http://doc.pypy.org/en/latest/project-ideas.html .. _`numpy`: https://bitbucket.org/pypy/numpy +.. _`passes all tests`: https://bitbucket.org/pypy/compatibility/wiki/lxml +.. _vmprof: http://vmprof.readthedocs.org What is PyPy? ============= @@ -53,44 +67,156 @@ .. _`dynamic languages`: http://pypyjs.org Other Highlights (since 4.0.1 released in November 2015) -======================================================= +========================================================= + +* New features: + + * Support embedding PyPy in a C-program via cffi and static callbacks in cffi. + This deprecates the old method of embedding PyPy + + * Refactor vmprof to work cross-operating-system, deprecate using buggy + libunwind on Linux platforms. Vmprof even works on Windows now. + + * Support more of the C-API type slots, like tp_getattro, and fix C-API + macros, functions, and structs such as _PyLong_FromByteArray(), + PyString_GET_SIZE, f_locals in PyFrameObject, Py_NAN, co_filename in + PyCodeObject + + * Use a more stable approach for allocating PyObjects in cpyext. (see + `blog post`_). Once the PyObject corresponding to a PyPy object is created, + it stays around at the same location until the death of the PyPy object. + Done with a little bit of custom GC support. It allows us to kill the + notion of "borrowing" inside cpyext, reduces 4 dictionaries down to 1, and + significantly simplifies the whole approach (which is why it is a new + feature while technically a refactoring) and allows PyPy to support the + populart lxml module (as of the *next* release) with no PyPy specific + patches needed + + * Make the default filesystem encoding ASCII, like CPython + + * Use `hypothesis`_ in test creation, which is great for randomizing tests * Bug Fixes - * + * Backport always using os.urandom for uuid4 from cpython and fix the JIT as well + (issue #2202) - * + * More completely support datetime, optimize timedelta creation - * + * Fix for issue 2185 which caused an inconsistent list of operations to be + generated by the unroller, appeared in a complicated DJango app + + * Fix an elusive issue with stacklets on shadowstack which showed up when + forgetting stacklets without resuming them + + * Fix entrypoint() which now acquires the GIL + + * Fix direct_ffi_call() so failure does not bail out before setting CALL_MAY_FORCE + + * Fix (de)pickling long values by simplifying the implementation + + * Fix RPython rthread so that objects stored as threadlocal do not force minor + GC collection and are kept alive automatically. This improves perfomance of + short-running Python callbacks and prevents resetting such object between + calls + + * Support floats as parameters to itertools.isslice() + + * Check for the existence of CODESET, ignoring it should have prevented PyPy + from working on FreeBSD + + * Fix for corner case (likely shown by Krakatau) for consecutive guards with + interdependencies + + * Fix applevel bare class method comparisons which should fix pretty printing + in IPython * Issues reported with our previous release were resolved_ after reports from users on our issue tracker at https://bitbucket.org/pypy/pypy/issues or on IRC at #pypy -* New features: - - * - - * - - * - * Numpy: - * + * Updates to numpy 1.10.2 (incompatibilities and not-implemented features + still exist) - * + * Support dtype=(('O', spec)) union while disallowing record arrays with + mixed object, non-object values + * Remove all traces of micronumpy from cpyext if --withoutmod-micronumpy option used + + * Support indexing filtering with a boolean ndarray + + * Support partition() as an app-level function, together with a cffi wrapper + in pypy/numpy, this now provides partial support for partition() * Performance improvements and refactorings: - * + * Refactor and improve exception analysis in the annotator - * + * Remove unnecessary special handling of space.wrap(). - * + * Improve the memory signature of numbering instances in the JIT. This should + massively decrease the amount of memory consumed by the JIT, which is + significant for most programs. Also compress the numberings using variable- + size encoding + + * Support list-resizing setslice operations in RPython + + * Tweak the trace-too-long heuristic for multiple jit drivers + + * Refactor bookkeeping (such a cool word - three double letters) in the + annotater + + * Refactor wrappers for OS functions from rtyper to rlib and simplify them + + * Simplify backend loading instructions to only use four variants + + * Optimize string concatination + + * Simplify GIL handling in non-jitted code + + * Use INT_LSHIFT instead of INT_MUL when possible + + * Improve struct.unpack by casting directly from the underlying buffer. + Unpacking floats and doubles is about 15 times faster, and integer types + about 50% faster (on 64 bit integers). This was then subsequently + improved further in optimizeopt.py. + + * Refactor naming in optimizeopt + + * Change GraphAnalyzer to use a more precise way to recognize external + functions and fix null pointer handling, generally clean up external + function handling + + * Optimize global lookups + + * Optimize two-tuple lookups in mapdict + + * Remove pure variants of ``getfield_gc_*`` operations from the JIT by + determining purity while tracing + + * Refactor databasing + + * Simplify bootstrapping in cpyext + + * Refactor rtyper debug code into python.rtyper.debug + + * Seperate structmember.h from Python.h Also enhance creating api functions + to specify which header file they appear in (previously only pypy_decl.h) + + * Reduce all guards from int_floordiv_ovf if one of the arguments is constant + + * Fix tokenizer to enforce universal newlines, needed for Python 3 support + + * Identify permutations of attributes at instance creation, reducing the + number of bridges created + + * Greatly improve re.sub() performance .. _resolved: http://doc.pypy.org/en/latest/whatsnew-5.0.0.html +.. _`hypothesis`: http://hypothesis.readthedocs.org +.. _`blog post`: http://morepypy.blogspot.com/2016/02/c-api-support-update.html Please update, and continue to help us make PyPy better. diff --git a/pypy/doc/whatsnew-5.0.0.rst b/pypy/doc/whatsnew-5.0.0.rst --- a/pypy/doc/whatsnew-5.0.0.rst +++ b/pypy/doc/whatsnew-5.0.0.rst @@ -191,3 +191,7 @@ Fix boolean-array indexing in micronumpy +.. branch: numpy_partition +Support ndarray.partition() as an app-level function numpy.core._partition_use, +provided as a cffi wrapper to upstream's implementation in the pypy/numpy repo + diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -3,6 +3,5 @@ ========================= .. this is a revision shortly after release-5.0.0 -.. startrev: 6d13e55b962a +.. startrev: 7bb6381d084c - diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py --- a/pypy/interpreter/function.py +++ b/pypy/interpreter/function.py @@ -13,6 +13,9 @@ from pypy.interpreter.argument import Arguments from rpython.rlib import jit +from rpython.rlib.rarithmetic import LONG_BIT +from rpython.rlib.rbigint import rbigint + funccallunrolling = unrolling_iterable(range(4)) @@ -532,6 +535,26 @@ return space.w_False return space.eq(self.w_function, w_other.w_function) + def is_w(self, space, other): + if not isinstance(other, Method): + return False + return (self.w_instance is other.w_instance and + self.w_function is other.w_function and + self.w_class is other.w_class) + + def immutable_unique_id(self, space): + from pypy.objspace.std.util import IDTAG_METHOD as tag + from pypy.objspace.std.util import IDTAG_SHIFT + if self.w_instance is not None: + id = space.bigint_w(space.id(self.w_instance)) + id = id.lshift(LONG_BIT) + else: + id = rbigint.fromint(0) + id = id.or_(space.bigint_w(space.id(self.w_function))) + id = id.lshift(LONG_BIT).or_(space.bigint_w(space.id(self.w_class))) + id = id.lshift(IDTAG_SHIFT).int_or_(tag) + return space.newlong_from_rbigint(id) + def descr_method_hash(self): space = self.space w_result = space.hash(self.w_function) diff --git a/pypy/interpreter/test/test_function.py b/pypy/interpreter/test/test_function.py --- a/pypy/interpreter/test/test_function.py +++ b/pypy/interpreter/test/test_function.py @@ -1,5 +1,5 @@ # encoding: utf-8 -import unittest +import pytest from pypy.interpreter import eval from pypy.interpreter.function import Function, Method, descr_function_get from pypy.interpreter.pycode import PyCode @@ -220,6 +220,7 @@ raises( TypeError, func, 42, {'arg1': 23}) + @pytest.mark.skipif("config.option.runappdirect") def test_kwargs_nondict_mapping(self): class Mapping: def keys(self): @@ -290,6 +291,14 @@ meth = func.__get__(obj, object) assert meth() == obj + def test_none_get_interaction(self): + skip("XXX issue #2083") + assert type(None).__repr__(None) == 'None' + + def test_none_get_interaction_2(self): + f = None.__repr__ + assert f() == 'None' + def test_no_get_builtin(self): assert not hasattr(dir, '__get__') class A(object): @@ -317,6 +326,7 @@ raises(TypeError, len, s, some_unknown_keyword=s) raises(TypeError, len, s, s, some_unknown_keyword=s) + @pytest.mark.skipif("config.option.runappdirect") def test_call_error_message(self): try: len() @@ -362,6 +372,7 @@ f = lambda: 42 assert f.__doc__ is None + @pytest.mark.skipif("config.option.runappdirect") def test_setstate_called_with_wrong_args(self): f = lambda: 42 # not sure what it should raise, since CPython doesn't have setstate @@ -524,6 +535,37 @@ assert A().m == X() assert X() == A().m + @pytest.mark.skipif("config.option.runappdirect") + def test_method_identity(self): + class A(object): + def m(self): + pass + def n(self): + pass + + class B(A): + pass + + class X(object): + def __eq__(self, other): + return True + + a = A() + a2 = A() + assert a.m is a.m + assert id(a.m) == id(a.m) + assert a.m is not a.n + assert id(a.m) != id(a.n) + assert a.m is not a2.m + assert id(a.m) != id(a2.m) + + assert A.m is A.m + assert id(A.m) == id(A.m) + assert A.m is not A.n + assert id(A.m) != id(A.n) + assert A.m is not B.m + assert id(A.m) != id(B.m) + class TestMethod: def setup_method(self, method): diff --git a/pypy/module/__builtin__/descriptor.py b/pypy/module/__builtin__/descriptor.py --- a/pypy/module/__builtin__/descriptor.py +++ b/pypy/module/__builtin__/descriptor.py @@ -108,6 +108,7 @@ W_Super.typedef = TypeDef( 'super', __new__ = interp2app(descr_new_super), + __thisclass__ = interp_attrproperty_w("w_starttype", W_Super), __getattribute__ = interp2app(W_Super.getattribute), __get__ = interp2app(W_Super.get), __doc__ = """super(type) -> unbound super object diff --git a/pypy/module/__builtin__/test/test_descriptor.py b/pypy/module/__builtin__/test/test_descriptor.py --- a/pypy/module/__builtin__/test/test_descriptor.py +++ b/pypy/module/__builtin__/test/test_descriptor.py @@ -242,7 +242,7 @@ c = C() assert C.goo(1) == (C, 1) assert c.goo(1) == (C, 1) - + assert c.foo(1) == (c, 1) class D(C): pass @@ -266,6 +266,17 @@ meth = classmethod(1).__get__(1) raises(TypeError, meth) + def test_super_thisclass(self): + class A(object): + pass + + assert super(A, A()).__thisclass__ is A + + class B(A): + pass + + assert super(B, B()).__thisclass__ is B + assert super(A, B()).__thisclass__ is A def test_property_docstring(self): assert property.__doc__.startswith('property') diff --git a/pypy/module/_collections/app_defaultdict.py b/pypy/module/_collections/app_defaultdict.py --- a/pypy/module/_collections/app_defaultdict.py +++ b/pypy/module/_collections/app_defaultdict.py @@ -12,6 +12,7 @@ class defaultdict(dict): __slots__ = ['default_factory'] + __module__ = 'collections' def __init__(self, *args, **kwds): if len(args) > 0: diff --git a/pypy/module/_collections/test/test_defaultdict.py b/pypy/module/_collections/test/test_defaultdict.py --- a/pypy/module/_collections/test/test_defaultdict.py +++ b/pypy/module/_collections/test/test_defaultdict.py @@ -16,6 +16,12 @@ d[5].append(44) assert l == [42, 43] and l2 == [44] + def test_module(self): + from _collections import defaultdict + assert repr(defaultdict) in ( + "", # on PyPy + "") # on CPython + def test_keyerror_without_factory(self): from _collections import defaultdict for d1 in [defaultdict(), defaultdict(None)]: diff --git a/pypy/module/_sre/interp_sre.py b/pypy/module/_sre/interp_sre.py --- a/pypy/module/_sre/interp_sre.py +++ b/pypy/module/_sre/interp_sre.py @@ -7,6 +7,7 @@ from pypy.interpreter.error import OperationError from rpython.rlib.rarithmetic import intmask from rpython.rlib import jit +from rpython.rlib.rstring import StringBuilder, UnicodeBuilder # ____________________________________________________________ # @@ -37,6 +38,8 @@ if isinstance(ctx, rsre_core.BufMatchContext): return space.wrapbytes(ctx._buffer.getslice(start, end, 1, end-start)) + if isinstance(ctx, rsre_core.StrMatchContext): + return space.wrapbytes(ctx._string[start:end]) elif isinstance(ctx, rsre_core.UnicodeMatchContext): return space.wrap(ctx._unicodestr[start:end]) else: @@ -98,8 +101,8 @@ space.wrap("cannot copy this pattern object")) def make_ctx(self, w_string, pos=0, endpos=sys.maxint): - """Make a BufMatchContext or a UnicodeMatchContext for searching - in the given w_string object.""" + """Make a StrMatchContext, BufMatchContext or a UnicodeMatchContext for + searching in the given w_string object.""" space = self.space if pos < 0: pos = 0 @@ -117,6 +120,14 @@ endpos = len(unicodestr) return rsre_core.UnicodeMatchContext(self.code, unicodestr, pos, endpos, self.flags) + elif space.isinstance_w(w_string, space.w_str): + str = space.str_w(w_string) + if pos > len(str): + pos = len(str) + if endpos > len(str): + endpos = len(str) + return rsre_core.StrMatchContext(self.code, str, + pos, endpos, self.flags) else: buf = space.readbuf_w(w_string) if (not space.is_none(self.w_pattern) and @@ -224,6 +235,11 @@ def subx(self, w_ptemplate, w_string, count): space = self.space + # use a (much faster) string/unicode builder if w_ptemplate and + # w_string are both string or both unicode objects, and if w_ptemplate + # is a literal + use_builder = False + filter_as_unicode = filter_as_string = None if space.is_true(space.callable(w_ptemplate)): w_filter = w_ptemplate filter_is_callable = True @@ -231,6 +247,8 @@ if space.isinstance_w(w_ptemplate, space.w_unicode): filter_as_unicode = space.unicode_w(w_ptemplate) literal = u'\\' not in filter_as_unicode + use_builder = ( + space.isinstance_w(w_string, space.w_unicode) and literal) else: try: filter_as_string = space.bytes_w(w_ptemplate) @@ -240,6 +258,8 @@ literal = False else: literal = '\\' not in filter_as_string + use_builder = ( + space.isinstance_w(w_string, space.w_str) and literal) if literal: w_filter = w_ptemplate filter_is_callable = False @@ -250,19 +270,44 @@ space.wrap(self), w_ptemplate) filter_is_callable = space.is_true(space.callable(w_filter)) # + # XXX this is a bit of a mess, but it improves performance a lot ctx = self.make_ctx(w_string) - sublist_w = [] + sublist_w = strbuilder = unicodebuilder = None + if use_builder: + if filter_as_unicode is not None: + unicodebuilder = UnicodeBuilder(ctx.end) + else: + assert filter_as_string is not None + strbuilder = StringBuilder(ctx.end) + else: + sublist_w = [] n = last_pos = 0 while not count or n < count: + sub_jitdriver.jit_merge_point( + self=self, + use_builder=use_builder, + filter_is_callable=filter_is_callable, + filter_type=type(w_filter), + ctx=ctx, + w_filter=w_filter, + strbuilder=strbuilder, + unicodebuilder=unicodebuilder, + filter_as_string=filter_as_string, + filter_as_unicode=filter_as_unicode, + count=count, + w_string=w_string, + n=n, last_pos=last_pos, sublist_w=sublist_w + ) + space = self.space if not searchcontext(space, ctx): break if last_pos < ctx.match_start: - sublist_w.append(slice_w(space, ctx, last_pos, - ctx.match_start, space.w_None)) + _sub_append_slice( + ctx, space, use_builder, sublist_w, + strbuilder, unicodebuilder, last_pos, ctx.match_start) start = ctx.match_end if start == ctx.match_start: start += 1 - nextctx = ctx.fresh_copy(start) if not (last_pos == ctx.match_start == ctx.match_end and n > 0): # the above ignores empty matches on latest position @@ -270,28 +315,71 @@ w_match = self.getmatch(ctx, True) w_piece = space.call_function(w_filter, w_match) if not space.is_w(w_piece, space.w_None): + assert strbuilder is None and unicodebuilder is None + assert not use_builder sublist_w.append(w_piece) else: - sublist_w.append(w_filter) + if use_builder: + if strbuilder is not None: + assert filter_as_string is not None + strbuilder.append(filter_as_string) + else: + assert unicodebuilder is not None + assert filter_as_unicode is not None + unicodebuilder.append(filter_as_unicode) + else: + sublist_w.append(w_filter) last_pos = ctx.match_end n += 1 elif last_pos >= ctx.end: break # empty match at the end: finished - ctx = nextctx + ctx.reset(start) if last_pos < ctx.end: - sublist_w.append(slice_w(space, ctx, last_pos, ctx.end, - space.w_None)) + _sub_append_slice(ctx, space, use_builder, sublist_w, + strbuilder, unicodebuilder, last_pos, ctx.end) + if use_builder: + if strbuilder is not None: + return space.wrapbytes(strbuilder.build()), n + else: + assert unicodebuilder is not None + return space.wrap(unicodebuilder.build()), n + else: + if space.isinstance_w(w_string, space.w_unicode): + w_emptystr = space.wrap(u'') + else: + w_emptystr = space.wrapbytes('') + w_item = space.call_method(w_emptystr, 'join', + space.newlist(sublist_w)) + return w_item, n - if space.isinstance_w(w_string, space.w_unicode): - w_emptystr = space.wrap(u'') - else: - w_emptystr = space.wrapbytes('') - w_item = space.call_method(w_emptystr, 'join', - space.newlist(sublist_w)) - return w_item, n +sub_jitdriver = jit.JitDriver( + reds="""count n last_pos + ctx w_filter + strbuilder unicodebuilder + filter_as_string + filter_as_unicode + w_string sublist_w + self""".split(), + greens=["filter_is_callable", "use_builder", "filter_type", "ctx.pattern"]) +def _sub_append_slice(ctx, space, use_builder, sublist_w, + strbuilder, unicodebuilder, start, end): + if use_builder: + if isinstance(ctx, rsre_core.BufMatchContext): + assert strbuilder is not None + return strbuilder.append(ctx._buffer.getslice(start, end, 1, end-start)) + if isinstance(ctx, rsre_core.StrMatchContext): + assert strbuilder is not None + return strbuilder.append_slice(ctx._string, start, end) + elif isinstance(ctx, rsre_core.UnicodeMatchContext): + assert unicodebuilder is not None + return unicodebuilder.append_slice(ctx._unicodestr, start, end) + assert 0, "unreachable" + else: + sublist_w.append(slice_w(space, ctx, start, end, space.w_None)) + @unwrap_spec(flags=int, groups=int, w_groupindex=WrappedDefault(None), w_indexgroup=WrappedDefault(None)) def SRE_Pattern__new__(space, w_subtype, w_pattern, flags, w_code, @@ -494,6 +582,8 @@ ctx = self.ctx if isinstance(ctx, rsre_core.BufMatchContext): return space.wrapbytes(ctx._buffer.as_str()) + elif isinstance(ctx, rsre_core.StrMatchContext): + return space.wrapbytes(ctx._string) elif isinstance(ctx, rsre_core.UnicodeMatchContext): return space.wrap(ctx._unicodestr) else: diff --git a/pypy/module/micronumpy/appbridge.py b/pypy/module/micronumpy/appbridge.py --- a/pypy/module/micronumpy/appbridge.py +++ b/pypy/module/micronumpy/appbridge.py @@ -9,6 +9,7 @@ w_array_repr = None w_array_str = None w__usefields = None + w_partition = None def __init__(self, space): pass diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -557,8 +557,12 @@ return self.get_scalar_value().item(space) l_w = [] for i in range(self.get_shape()[0]): - l_w.append(space.call_method(self.descr_getitem(space, - space.wrap(i)), "tolist")) + item_w = self.descr_getitem(space, space.wrap(i)) + if (isinstance(item_w, W_NDimArray) or + isinstance(item_w, boxes.W_GenericBox)): + l_w.append(space.call_method(item_w, "tolist")) + else: + l_w.append(item_w) return space.newlist(l_w) def descr_ravel(self, space, w_order=None): @@ -926,6 +930,10 @@ return return self.implementation.sort(space, w_axis, w_order) + def descr_partition(self, space, __args__): + return get_appbridge_cache(space).call_method( + space, 'numpy.core._partition_use', 'partition', __args__.prepend(self)) + def descr_squeeze(self, space, w_axis=None): cur_shape = self.get_shape() if not space.is_none(w_axis): @@ -1639,6 +1647,7 @@ argsort = interp2app(W_NDimArray.descr_argsort), sort = interp2app(W_NDimArray.descr_sort), + partition = interp2app(W_NDimArray.descr_partition), astype = interp2app(W_NDimArray.descr_astype), base = GetSetProperty(W_NDimArray.descr_get_base), byteswap = interp2app(W_NDimArray.descr_byteswap), diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -1936,6 +1936,22 @@ a = array([[1, 2], [3, 4]]) assert (a + a).tolist() == [[2, 4], [6, 8]] + def test_tolist_object(self): + from numpy import array + a = array([0], dtype=object) + assert a.tolist() == [0] + + def test_tolist_object_slice(self): + from numpy import array + list_expected = [slice(0, 1), 0] + a = array(list_expected, dtype=object) + assert a.tolist() == list_expected + + def test_tolist_object_slice_2d(self): + from numpy import array + a = array([(slice(0, 1), 1), (0, 1)], dtype=object) + assert a.tolist() == [[slice(0, 1, None), 1], [0, 1]] + def test_tolist_slice(self): from numpy import array a = array([[17.1, 27.2], [40.3, 50.3]]) diff --git a/pypy/objspace/std/complexobject.py b/pypy/objspace/std/complexobject.py --- a/pypy/objspace/std/complexobject.py +++ b/pypy/objspace/std/complexobject.py @@ -250,11 +250,12 @@ return None from rpython.rlib.longlong2float import float2longlong from pypy.objspace.std.util import IDTAG_COMPLEX as tag + from pypy.objspace.std.util import IDTAG_SHIFT real = space.float_w(space.getattr(self, space.wrap("real"))) imag = space.float_w(space.getattr(self, space.wrap("imag"))) real_b = rbigint.fromrarith_int(float2longlong(real)) imag_b = rbigint.fromrarith_int(r_ulonglong(float2longlong(imag))) - val = real_b.lshift(64).or_(imag_b).lshift(3).int_or_(tag) + val = real_b.lshift(64).or_(imag_b).lshift(IDTAG_SHIFT).int_or_(tag) return space.newlong_from_rbigint(val) def int(self, space): diff --git a/pypy/objspace/std/floatobject.py b/pypy/objspace/std/floatobject.py --- a/pypy/objspace/std/floatobject.py +++ b/pypy/objspace/std/floatobject.py @@ -191,9 +191,10 @@ return None from rpython.rlib.longlong2float import float2longlong from pypy.objspace.std.util import IDTAG_FLOAT as tag + from pypy.objspace.std.util import IDTAG_SHIFT val = float2longlong(space.float_w(self)) b = rbigint.fromrarith_int(val) - b = b.lshift(3).int_or_(tag) + b = b.lshift(IDTAG_SHIFT).int_or_(tag) return space.newlong_from_rbigint(b) def __repr__(self): diff --git a/pypy/objspace/std/intobject.py b/pypy/objspace/std/intobject.py --- a/pypy/objspace/std/intobject.py +++ b/pypy/objspace/std/intobject.py @@ -28,7 +28,7 @@ from pypy.interpreter.typedef import TypeDef from pypy.objspace.std import newformat from pypy.objspace.std.util import ( - BINARY_OPS, CMP_OPS, COMMUTATIVE_OPS, IDTAG_INT, wrap_parsestringerror) + BINARY_OPS, CMP_OPS, COMMUTATIVE_OPS, IDTAG_INT, IDTAG_SHIFT, wrap_parsestringerror) SENTINEL = object() @@ -54,7 +54,7 @@ if self.user_overridden_class: return None b = space.bigint_w(self) - b = b.lshift(3).int_or_(IDTAG_INT) + b = b.lshift(IDTAG_SHIFT).int_or_(IDTAG_INT) return space.newlong_from_rbigint(b) @staticmethod diff --git a/pypy/objspace/std/longobject.py b/pypy/objspace/std/longobject.py --- a/pypy/objspace/std/longobject.py +++ b/pypy/objspace/std/longobject.py @@ -13,7 +13,8 @@ from pypy.objspace.std import newformat from pypy.objspace.std.intobject import ( HASH_BITS, HASH_MODULUS, W_AbstractIntObject, W_IntObject) -from pypy.objspace.std.util import COMMUTATIVE_OPS +from pypy.objspace.std.util import ( + BINARY_OPS, CMP_OPS, COMMUTATIVE_OPS, IDTAG_LONG, IDTAG_SHIFT, wrap_parsestringerror) def delegate_other(func): diff --git a/pypy/objspace/std/test/test_obj.py b/pypy/objspace/std/test/test_obj.py --- a/pypy/objspace/std/test/test_obj.py +++ b/pypy/objspace/std/test/test_obj.py @@ -160,14 +160,14 @@ def test_id_on_primitives(self): if self.cpython_apptest: skip("cpython behaves differently") - assert id(1) == (1 << 3) + 1 + assert id(1) == (1 << 4) + 1 class myint(int): pass assert id(myint(1)) != id(1) assert id(1.0) & 7 == 5 assert id(-0.0) != id(0.0) - assert hex(id(2.0)) == '0x20000000000000005' + assert hex(id(2.0)) == '0x40000000000000005' assert id(0.0) == 5 def test_id_on_strs(self): diff --git a/pypy/objspace/std/util.py b/pypy/objspace/std/util.py --- a/pypy/objspace/std/util.py +++ b/pypy/objspace/std/util.py @@ -2,11 +2,13 @@ from pypy.interpreter.error import OperationError +IDTAG_SHIFT = 4 IDTAG_INT = 1 IDTAG_LONG = 3 IDTAG_FLOAT = 5 IDTAG_COMPLEX = 7 +IDTAG_METHOD = 9 CMP_OPS = dict(lt='<', le='<=', eq='==', ne='!=', gt='>', ge='>=') BINARY_BITWISE_OPS = {'and': '&', 'lshift': '<<', 'or': '|', 'rshift': '>>', diff --git a/pypy/tool/release/force-builds.py b/pypy/tool/release/force-builds.py --- a/pypy/tool/release/force-builds.py +++ b/pypy/tool/release/force-builds.py @@ -9,7 +9,7 @@ modified by PyPy team """ -import os, sys, urllib +import os, sys, urllib, subprocess from twisted.internet import reactor, defer from twisted.python import log @@ -83,4 +83,9 @@ (options, args) = parser.parse_args() if not options.branch: parser.error("branch option required") + try: + subprocess.check_call(['hg','id','-r', options.branch]) + except subprocess.CalledProcessError: + print 'branch', options.branch, 'could not be found in local repository' + sys.exit(-1) main(options.branch, options.server, user=options.user) diff --git a/rpython/doc/translation.rst b/rpython/doc/translation.rst --- a/rpython/doc/translation.rst +++ b/rpython/doc/translation.rst @@ -33,7 +33,7 @@ The RPython translation toolchain never sees Python source code or syntax trees, but rather starts with the *code objects* that define the behaviour of the function objects one gives it as input. The -`bytecode evaluator`_ and the :ref:`flow graph builder` work through these +:ref:`flow graph builder` works through these code objects using `abstract interpretation`_ to produce a control flow graph (one per function): yet another representation of the source program, but one which is suitable for applying type inference @@ -85,7 +85,6 @@ .. _PDF color version: _static/translation.pdf -.. _bytecode evaluator: interpreter.html .. _abstract interpretation: http://en.wikipedia.org/wiki/Abstract_interpretation From pypy.commits at gmail.com Sun Mar 6 17:48:20 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 06 Mar 2016 14:48:20 -0800 (PST) Subject: [pypy-commit] extradoc extradoc: Talk proposal to EuroPython 2016 Message-ID: <56dcb3b4.c52f1c0a.aa093.1adc@mx.google.com> Author: Armin Rigo Branch: extradoc Changeset: r5613:28ca300ed0f3 Date: 2016-03-06 23:48 +0100 http://bitbucket.org/pypy/extradoc/changeset/28ca300ed0f3/ Log: Talk proposal to EuroPython 2016 diff --git a/talk/ep2016/abstract-cffi.rst b/talk/ep2016/abstract-cffi.rst new file mode 100644 --- /dev/null +++ b/talk/ep2016/abstract-cffi.rst @@ -0,0 +1,47 @@ +CFFI: calling C from Python +=========================== + +Abstract (longer version) +------------------------- + +I will introduce CFFI, a way to call C libraries from Python. + + http://cffi.readthedocs.org/ + +CFFI was designed in 2012 to get away from Python's C extension modules, +which require hand-written CPython-specific C code. CFFI is arguably +simpler to use: you call C from Python directly, instead of going +through an intermediate layer. It is not tied to CPython's internals, +and works natively on two different Python implementations: CPython and +PyPy. It could be ported to more implementations. + +It is also a big success, according to the download statistics. Some +high-visibility projects like Cryptography have switched to it. + +Part of the motivation for developing CFFI is that it is a minimal layer +that allows direct access to C from Python, with no fixed intermediate C +API. It shares ideas from Cython, ctypes, and LuaJIT's ffi, but the +non-dependence on any fixed C API is a central point. + + +It is a possible solution to a problem that hits notably PyPy --- the +CPython C API. The CPython C API was great and, we can argue, it +contributed a lot to the present-day success of Python, together with +tools built on top of it like Cython and SWIG. However, it may be time +to look beyond it. This talk will thus present CFFI as such an example. +This independence is what lets CFFI work equally well on CPython and on +PyPy (and be very fast on the latter thanks to the JIT compiler). + + +Abstract (short version) +------------------------ + +In this talk, we will see an intro to CFFI, an alternative to using the +standard C API to extend Python. CFFI works on CPython and on PyPy. It +is a possible solution to a problem that hits notably PyPy --- the +CPython C API. + +The CPython C API was great and contributed to the present-day success +of Python, together with tools built on top of it like Cython and SWIG. +I will argue that it may be time to look beyond it, and present CFFI as +such an example. From pypy.commits at gmail.com Sun Mar 6 20:58:43 2016 From: pypy.commits at gmail.com (mjacob) Date: Sun, 06 Mar 2016 17:58:43 -0800 (PST) Subject: [pypy-commit] pypy py3k: Adapt method identity changes to py3k. Message-ID: <56dce053.500f1c0a.a3c89.6fda@mx.google.com> Author: Manuel Jacob Branch: py3k Changeset: r82834:12207b28f69e Date: 2016-03-07 02:47 +0100 http://bitbucket.org/pypy/pypy/changeset/12207b28f69e/ Log: Adapt method identity changes to py3k. diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py --- a/pypy/interpreter/function.py +++ b/pypy/interpreter/function.py @@ -539,8 +539,7 @@ if not isinstance(other, Method): return False return (self.w_instance is other.w_instance and - self.w_function is other.w_function and - self.w_class is other.w_class) + self.w_function is other.w_function) def immutable_unique_id(self, space): from pypy.objspace.std.util import IDTAG_METHOD as tag @@ -551,7 +550,6 @@ else: id = rbigint.fromint(0) id = id.or_(space.bigint_w(space.id(self.w_function))) - id = id.lshift(LONG_BIT).or_(space.bigint_w(space.id(self.w_class))) id = id.lshift(IDTAG_SHIFT).int_or_(tag) return space.newlong_from_rbigint(id) diff --git a/pypy/interpreter/test/test_function.py b/pypy/interpreter/test/test_function.py --- a/pypy/interpreter/test/test_function.py +++ b/pypy/interpreter/test/test_function.py @@ -563,8 +563,8 @@ assert id(A.m) == id(A.m) assert A.m is not A.n assert id(A.m) != id(A.n) - assert A.m is not B.m - assert id(A.m) != id(B.m) + assert A.m is B.m + assert id(A.m) == id(B.m) class TestMethod: From pypy.commits at gmail.com Sun Mar 6 20:58:45 2016 From: pypy.commits at gmail.com (mjacob) Date: Sun, 06 Mar 2016 17:58:45 -0800 (PST) Subject: [pypy-commit] pypy py3.3: hg merge py3k Message-ID: <56dce055.a3f6c20a.82b86.ffffef3e@mx.google.com> Author: Manuel Jacob Branch: py3.3 Changeset: r82835:a05bd4b30cfe Date: 2016-03-07 02:56 +0100 http://bitbucket.org/pypy/pypy/changeset/a05bd4b30cfe/ Log: hg merge py3k diff too long, truncating to 2000 out of 2700 lines diff --git a/pypy/doc/contributor.rst b/pypy/doc/contributor.rst --- a/pypy/doc/contributor.rst +++ b/pypy/doc/contributor.rst @@ -11,29 +11,29 @@ Amaury Forgeot d'Arc Antonio Cuni Samuele Pedroni + Matti Picus Alex Gaynor Brian Kearns - Matti Picus Philip Jenvey Michael Hudson + Ronan Lamy David Schneider + Manuel Jacob Holger Krekel Christian Tismer Hakan Ardo - Manuel Jacob - Ronan Lamy Benjamin Peterson + Richard Plangger Anders Chrigstrom Eric van Riet Paap Wim Lavrijsen - Richard Plangger Richard Emslie Alexander Schremmer Dan Villiom Podlaski Christiansen + Remi Meier Lukas Diekmann Sven Hager Anders Lehmann - Remi Meier Aurelien Campeas Niklaus Haldimann Camillo Bruni @@ -42,8 +42,8 @@ Romain Guillebert Leonardo Santagada Seo Sanghyeon + Ronny Pfannschmidt Justin Peel - Ronny Pfannschmidt David Edelsohn Anders Hammarquist Jakub Gustak @@ -65,6 +65,7 @@ Tyler Wade Michael Foord Stephan Diehl + Vincent Legoll Stefan Schwarzer Valentino Volonghi Tomek Meka @@ -75,9 +76,9 @@ Jean-Paul Calderone Timo Paulssen Squeaky + Marius Gedminas Alexandre Fayolle Simon Burton - Marius Gedminas Martin Matusiak Konstantin Lopuhin Wenzhu Man @@ -86,16 +87,20 @@ Ivan Sichmann Freitas Greg Price Dario Bertini + Stefano Rivera Mark Pearse Simon Cross Andreas Stührk - Stefano Rivera + Edd Barrett Jean-Philippe St. Pierre Guido van Rossum Pavel Vinogradov + Jeremy Thurgood Paweł Piotr Przeradowski + Spenser Bauman Paul deGrandis Ilya Osadchiy + marky1991 Tobias Oberstein Adrian Kuhn Boris Feigin @@ -104,14 +109,12 @@ Georg Brandl Bert Freudenberg Stian Andreassen - Edd Barrett + Tobias Pape Wanja Saatkamp Gerald Klix Mike Blume - Tobias Pape Oscar Nierstrasz Stefan H. Muller - Jeremy Thurgood Rami Chowdhury Eugene Oden Henry Mason @@ -123,6 +126,8 @@ Lukas Renggli Guenter Jantzen Ned Batchelder + Tim Felgentreff + Anton Gulenko Amit Regmi Ben Young Nicolas Chauvat @@ -132,12 +137,12 @@ Nicholas Riley Jason Chu Igor Trindade Oliveira - Tim Felgentreff + Yichao Yu Rocco Moretti Gintautas Miliauskas Michael Twomey Lucian Branescu Mihaila - Yichao Yu + Devin Jeanpierre Gabriel Lavoie Olivier Dormond Jared Grubb @@ -161,33 +166,33 @@ Stanislaw Halik Mikael Schönenberg Berkin Ilbeyi - Elmo M?ntynen + Elmo Mäntynen + Faye Zhao Jonathan David Riehl Anders Qvist Corbin Simpson Chirag Jadwani Beatrice During Alex Perry - Vincent Legoll + Vaibhav Sood Alan McIntyre - Spenser Bauman + William Leslie Alexander Sedov Attila Gobi + Jasper.Schulz Christopher Pope - Devin Jeanpierre - Vaibhav Sood Christian Tismer Marc Abramowitz Dan Stromberg Arjun Naik Valentina Mukhamedzhanova Stefano Parmesan + Mark Young Alexis Daboville Jens-Uwe Mager Carl Meyer Karl Ramm Pieter Zieschang - Anton Gulenko Gabriel Lukas Vacek Andrew Dalke @@ -195,6 +200,7 @@ Jakub Stasiak Nathan Taylor Vladimir Kryachko + Omer Katz Jacek Generowicz Alejandro J. Cura Jacob Oscarson @@ -209,6 +215,7 @@ Lars Wassermann Philipp Rustemeuer Henrik Vendelbo + Richard Lancaster Dan Buch Miguel de Val Borro Artur Lisiecki @@ -220,18 +227,18 @@ Tomo Cocoa Kim Jin Su Toni Mattis + Amber Brown Lucas Stadler Julian Berman Markus Holtermann roberto at goyle Yury V. Zaytsev Anna Katrina Dominguez - William Leslie Bobby Impollonia - Faye Zhao timo at eistee.fritz.box Andrew Thompson Yusei Tahara + Aaron Tubbs Ben Darnell Roberto De Ioris Juan Francisco Cantero Hurtado @@ -243,6 +250,7 @@ Christopher Armstrong Michael Hudson-Doyle Anders Sigfridsson + Nikolay Zinov Yasir Suhail Jason Michalski rafalgalczynski at gmail.com @@ -252,6 +260,7 @@ Gustavo Niemeyer Stephan Busemann Rafał Gałczyński + Matt Bogosian Christian Muirhead Berker Peksag James Lan @@ -286,9 +295,9 @@ Stefan Marr jiaaro Mads Kiilerich - Richard Lancaster opassembler.py Antony Lee + Jason Madden Yaroslav Fedevych Jim Hunziker Markus Unterwaditzer @@ -297,6 +306,7 @@ squeaky Zearin soareschen + Jonas Pfannschmidt Kurt Griffiths Mike Bayer Matthew Miller @@ -311,4 +321,3 @@ Julien Phalip Roman Podoliaka Dan Loewenherz - diff --git a/pypy/doc/index-of-release-notes.rst b/pypy/doc/index-of-release-notes.rst --- a/pypy/doc/index-of-release-notes.rst +++ b/pypy/doc/index-of-release-notes.rst @@ -6,6 +6,7 @@ .. toctree:: + release-5.0.0.rst release-4.0.1.rst release-4.0.0.rst release-2.6.1.rst diff --git a/pypy/doc/index-of-whatsnew.rst b/pypy/doc/index-of-whatsnew.rst --- a/pypy/doc/index-of-whatsnew.rst +++ b/pypy/doc/index-of-whatsnew.rst @@ -7,6 +7,7 @@ .. toctree:: whatsnew-head.rst + whatsnew-5.0.0.rst whatsnew-4.0.1.rst whatsnew-4.0.0.rst whatsnew-2.6.1.rst diff --git a/pypy/doc/release-5.0.0.rst b/pypy/doc/release-5.0.0.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-5.0.0.rst @@ -0,0 +1,226 @@ +========== +PyPy 5.0.0 +========== + +We have released PyPy 5.0.0, about three months after PyPy 4.0.0. +We encourage all users of PyPy to update to this version. Apart from the usual +bug fixes, there is an ongoing effort to improve the warmup time and memory +usage of JIT-related metadata. The exact effects depend vastly on the program +you're running and can range from insignificant to warmup being up to 30% +faster and memory dropping by about 30%. + +We also merged a major upgrade to our C-API layer (cpyext), simplifying the +interaction between c-level objects and PyPy interpreter level objects. As a +result, lxml with its cython compiled component `passes all tests`_ on PyPy + +Users who have gotten used to vmprof_ on Linux, and those on other platforms +who have not yet tried its awesomeness, will be happy to hear that vmprof +now just works on MacOS and Windows too, in both PyPy (built-in support) and +CPython (as an installed module). + +You can download the PyPy 5.0.0 release here: + + http://pypy.org/download.html + +We would like to thank our donors for the continued support of the PyPy +project. + +We would also like to thank our contributors and +encourage new people to join the project. PyPy has many +layers and we need help with all of them: `PyPy`_ and `RPython`_ documentation +improvements, tweaking popular `modules`_ to run on pypy, or general `help`_ +with making RPython's JIT even better. + +CFFI +==== + +While not applicable only to PyPy, `cffi`_ is arguably our most significant +contribution to the python ecosystem. PyPy 5.0.0 ships with +`cffi-1.5.2`_ which now allows embedding PyPy (or cpython) in a c program. + +.. _`PyPy`: http://doc.pypy.org +.. _`RPython`: https://rpython.readthedocs.org +.. _`cffi`: https://cffi.readthedocs.org +.. _`cffi-1.5.2`: http://cffi.readthedocs.org/en/latest/whatsnew.html#v1-5-2 +.. _`modules`: http://doc.pypy.org/en/latest/project-ideas.html#make-more-python-modules-pypy-friendly +.. _`help`: http://doc.pypy.org/en/latest/project-ideas.html +.. _`numpy`: https://bitbucket.org/pypy/numpy +.. _`passes all tests`: https://bitbucket.org/pypy/compatibility/wiki/lxml +.. _vmprof: http://vmprof.readthedocs.org + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7. It's fast (`pypy and cpython 2.7.x`_ performance comparison) +due to its integrated tracing JIT compiler. + +We also welcome developers of other +`dynamic languages`_ to see what RPython can do for them. + +This release supports **x86** machines on most common operating systems +(Linux 32/64, Mac OS X 64, Windows 32, OpenBSD, freebsd), +newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux, and the +big- and little-endian variants of **ppc64** running Linux. + +.. _`pypy and cpython 2.7.x`: http://speed.pypy.org +.. _`dynamic languages`: http://pypyjs.org + +Other Highlights (since 4.0.1 released in November 2015) +========================================================= + +* New features: + + * Support embedding PyPy in a C-program via cffi and static callbacks in cffi. + This deprecates the old method of embedding PyPy + + * Refactor vmprof to work cross-operating-system, deprecate using buggy + libunwind on Linux platforms. Vmprof even works on Windows now. + + * Support more of the C-API type slots, like tp_getattro, and fix C-API + macros, functions, and structs such as _PyLong_FromByteArray(), + PyString_GET_SIZE, f_locals in PyFrameObject, Py_NAN, co_filename in + PyCodeObject + + * Use a more stable approach for allocating PyObjects in cpyext. (see + `blog post`_). Once the PyObject corresponding to a PyPy object is created, + it stays around at the same location until the death of the PyPy object. + Done with a little bit of custom GC support. It allows us to kill the + notion of "borrowing" inside cpyext, reduces 4 dictionaries down to 1, and + significantly simplifies the whole approach (which is why it is a new + feature while technically a refactoring) and allows PyPy to support the + populart lxml module (as of the *next* release) with no PyPy specific + patches needed + + * Make the default filesystem encoding ASCII, like CPython + + * Use `hypothesis`_ in test creation, which is great for randomizing tests + +* Bug Fixes + + * Backport always using os.urandom for uuid4 from cpython and fix the JIT as well + (issue #2202) + + * More completely support datetime, optimize timedelta creation + + * Fix for issue 2185 which caused an inconsistent list of operations to be + generated by the unroller, appeared in a complicated DJango app + + * Fix an elusive issue with stacklets on shadowstack which showed up when + forgetting stacklets without resuming them + + * Fix entrypoint() which now acquires the GIL + + * Fix direct_ffi_call() so failure does not bail out before setting CALL_MAY_FORCE + + * Fix (de)pickling long values by simplifying the implementation + + * Fix RPython rthread so that objects stored as threadlocal do not force minor + GC collection and are kept alive automatically. This improves perfomance of + short-running Python callbacks and prevents resetting such object between + calls + + * Support floats as parameters to itertools.isslice() + + * Check for the existence of CODESET, ignoring it should have prevented PyPy + from working on FreeBSD + + * Fix for corner case (likely shown by Krakatau) for consecutive guards with + interdependencies + + * Fix applevel bare class method comparisons which should fix pretty printing + in IPython + + * Issues reported with our previous release were resolved_ after reports from users on + our issue tracker at https://bitbucket.org/pypy/pypy/issues or on IRC at + #pypy + +* Numpy: + + * Updates to numpy 1.10.2 (incompatibilities and not-implemented features + still exist) + + * Support dtype=(('O', spec)) union while disallowing record arrays with + mixed object, non-object values + + * Remove all traces of micronumpy from cpyext if --withoutmod-micronumpy option used + + * Support indexing filtering with a boolean ndarray + + * Support partition() as an app-level function, together with a cffi wrapper + in pypy/numpy, this now provides partial support for partition() + +* Performance improvements and refactorings: + + * Refactor and improve exception analysis in the annotator + + * Remove unnecessary special handling of space.wrap(). + + * Improve the memory signature of numbering instances in the JIT. This should + massively decrease the amount of memory consumed by the JIT, which is + significant for most programs. Also compress the numberings using variable- + size encoding + + * Support list-resizing setslice operations in RPython + + * Tweak the trace-too-long heuristic for multiple jit drivers + + * Refactor bookkeeping (such a cool word - three double letters) in the + annotater + + * Refactor wrappers for OS functions from rtyper to rlib and simplify them + + * Simplify backend loading instructions to only use four variants + + * Optimize string concatination + + * Simplify GIL handling in non-jitted code + + * Use INT_LSHIFT instead of INT_MUL when possible + + * Improve struct.unpack by casting directly from the underlying buffer. + Unpacking floats and doubles is about 15 times faster, and integer types + about 50% faster (on 64 bit integers). This was then subsequently + improved further in optimizeopt.py. + + * Refactor naming in optimizeopt + + * Change GraphAnalyzer to use a more precise way to recognize external + functions and fix null pointer handling, generally clean up external + function handling + + * Optimize global lookups + + * Optimize two-tuple lookups in mapdict + + * Remove pure variants of ``getfield_gc_*`` operations from the JIT by + determining purity while tracing + + * Refactor databasing + + * Simplify bootstrapping in cpyext + + * Refactor rtyper debug code into python.rtyper.debug + + * Seperate structmember.h from Python.h Also enhance creating api functions + to specify which header file they appear in (previously only pypy_decl.h) + + * Reduce all guards from int_floordiv_ovf if one of the arguments is constant + + * Fix tokenizer to enforce universal newlines, needed for Python 3 support + + * Identify permutations of attributes at instance creation, reducing the + number of bridges created + + * Greatly improve re.sub() performance + +.. _resolved: http://doc.pypy.org/en/latest/whatsnew-5.0.0.html +.. _`hypothesis`: http://hypothesis.readthedocs.org +.. _`blog post`: http://morepypy.blogspot.com/2016/02/c-api-support-update.html + +Please update, and continue to help us make PyPy better. + +Cheers + +The PyPy Team + diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-5.0.0.rst copy from pypy/doc/whatsnew-head.rst copy to pypy/doc/whatsnew-5.0.0.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-5.0.0.rst @@ -1,6 +1,6 @@ -========================= -What's new in PyPy 4.1.+ -========================= +======================== +What's new in PyPy 5.0.0 +======================== .. this is a revision shortly after release-4.0.1 .. startrev: 4b5c840d0da2 @@ -183,4 +183,15 @@ .. branch: vlen-resume -Compress resume data, saving 10-20% of memory consumed by the JIT \ No newline at end of file +Compress resume data, saving 10-20% of memory consumed by the JIT + +.. branch: issue-2248 + +.. branch: ndarray-setitem-filtered + +Fix boolean-array indexing in micronumpy + +.. branch: numpy_partition +Support ndarray.partition() as an app-level function numpy.core._partition_use, +provided as a cffi wrapper to upstream's implementation in the pypy/numpy repo + diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -1,186 +1,7 @@ ========================= -What's new in PyPy 4.1.+ +What's new in PyPy 5.0.+ ========================= -.. this is a revision shortly after release-4.0.1 -.. startrev: 4b5c840d0da2 +.. this is a revision shortly after release-5.0.0 +.. startrev: 7bb6381d084c -Fixed ``_PyLong_FromByteArray()``, which was buggy. - -Fixed a crash with stacklets (or greenlets) on non-Linux machines -which showed up if you forget stacklets without resuming them. - -.. branch: numpy-1.10 - -Fix tests to run cleanly with -A and start to fix micronumpy for upstream numpy -which is now 1.10.2 - -.. branch: osx-flat-namespace - -Fix the cpyext tests on OSX by linking with -flat_namespace - -.. branch: anntype - -Refactor and improve exception analysis in the annotator. - -.. branch: posita/2193-datetime-timedelta-integrals - -Fix issue #2193. ``isinstance(..., int)`` => ``isinstance(..., numbers.Integral)`` -to allow for alternate ``int``-like implementations (e.g., ``future.types.newint``) - -.. branch: faster-rstruct - -Improve the performace of struct.unpack, which now directly reads inside the -string buffer and directly casts the bytes to the appropriate type, when -allowed. Unpacking of floats and doubles is about 15 times faster now, while -for integer types it's up to ~50% faster for 64bit integers. - -.. branch: wrap-specialisation - -Remove unnecessary special handling of space.wrap(). - -.. branch: compress-numbering - -Improve the memory signature of numbering instances in the JIT. This should massively -decrease the amount of memory consumed by the JIT, which is significant for most programs. - -.. branch: fix-trace-too-long-heuristic - -Improve the heuristic when disable trace-too-long - -.. branch: fix-setslice-can-resize - -Make rlist's ll_listsetslice() able to resize the target list to help -simplify objspace/std/listobject.py. Was issue #2196. - -.. branch: anntype2 - -A somewhat random bunch of changes and fixes following up on branch 'anntype'. Highlights: - -- Implement @doubledispatch decorator and use it for intersection() and difference(). - -- Turn isinstance into a SpaceOperation - -- Create a few direct tests of the fundamental annotation invariant in test_model.py - -- Remove bookkeeper attribute from DictDef and ListDef. - -.. branch: cffi-static-callback - -.. branch: vecopt-absvalue - -- Enhancement. Removed vector fields from AbstractValue. - -.. branch: memop-simplify2 - -Simplification. Backends implement too many loading instructions, only having a slightly different interface. -Four new operations (gc_load/gc_load_indexed, gc_store/gc_store_indexed) replace all the -commonly known loading operations - -.. branch: more-rposix - -Move wrappers for OS functions from `rpython/rtyper` to `rpython/rlib` and -turn them into regular RPython functions. Most RPython-compatible `os.*` -functions are now directly accessible as `rpython.rposix.*`. - -.. branch: always-enable-gil - -Simplify a bit the GIL handling in non-jitted code. Fixes issue #2205. - -.. branch: flowspace-cleanups - -Trivial cleanups in flowspace.operation : fix comment & duplicated method - -.. branch: test-AF_NETLINK - -Add a test for pre-existing AF_NETLINK support. Was part of issue #1942. - -.. branch: small-cleanups-misc - -Trivial misc cleanups: typo, whitespace, obsolete comments - -.. branch: cpyext-slotdefs -.. branch: fix-missing-canraise -.. branch: whatsnew - -.. branch: fix-2211 - -Fix the cryptic exception message when attempting to use extended slicing -in rpython. Was issue #2211. - -.. branch: ec-keepalive - -Optimize the case where, in a new C-created thread, we keep invoking -short-running Python callbacks. (CFFI on CPython has a hack to achieve -the same result.) This can also be seen as a bug fix: previously, -thread-local objects would be reset between two such calls. - -.. branch: globals-quasiimmut - -Optimize global lookups. - -.. branch: cffi-static-callback-embedding - -Updated to CFFI 1.5, which supports a new way to do embedding. -Deprecates http://pypy.readthedocs.org/en/latest/embedding.html. - -.. branch: fix-cpython-ssl-tests-2.7 - -Fix SSL tests by importing cpython's patch - - -.. branch: remove-getfield-pure - -Remove pure variants of ``getfield_gc_*`` operations from the JIT. Relevant -optimizations instead consult the field descriptor to determine the purity of -the operation. Additionally, pure ``getfield`` operations are now handled -entirely by `rpython/jit/metainterp/optimizeopt/heap.py` rather than -`rpython/jit/metainterp/optimizeopt/pure.py`, which can result in better codegen -for traces containing a large number of pure getfield operations. - -.. branch: exctrans - -Try to ensure that no new functions get annotated during the 'source_c' phase. -Refactor sandboxing to operate at a higher level. - -.. branch: cpyext-bootstrap - -.. branch: vmprof-newstack - -Refactor vmprof to work cross-operating-system. - -.. branch: seperate-strucmember_h - -Seperate structmember.h from Python.h Also enhance creating api functions -to specify which header file they appear in (previously only pypy_decl.h) - -.. branch: llimpl - -Refactor register_external(), remove running_on_llinterp mechanism and -apply sandbox transform on externals at the end of annotation. - -.. branch: cffi-embedding-win32 - -.. branch: windows-vmprof-support - -vmprof should work on Windows. - - -.. branch: reorder-map-attributes - -When creating instances and adding attributes in several different orders -depending on some condition, the JIT would create too much code. This is now -fixed. - -.. branch: cpyext-gc-support-2 - -Improve CPython C API support, which means lxml now runs unmodified -(after removing pypy hacks, pending pull request) - -.. branch: look-inside-tuple-hash - -Look inside tuple hash, improving mdp benchmark - -.. branch: vlen-resume - -Compress resume data, saving 10-20% of memory consumed by the JIT \ No newline at end of file diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py --- a/pypy/interpreter/function.py +++ b/pypy/interpreter/function.py @@ -13,6 +13,9 @@ from pypy.interpreter.argument import Arguments from rpython.rlib import jit +from rpython.rlib.rarithmetic import LONG_BIT +from rpython.rlib.rbigint import rbigint + funccallunrolling = unrolling_iterable(range(4)) @@ -547,6 +550,24 @@ return space.w_False return space.eq(self.w_function, w_other.w_function) + def is_w(self, space, other): + if not isinstance(other, Method): + return False + return (self.w_instance is other.w_instance and + self.w_function is other.w_function) + + def immutable_unique_id(self, space): + from pypy.objspace.std.util import IDTAG_METHOD as tag + from pypy.objspace.std.util import IDTAG_SHIFT + if self.w_instance is not None: + id = space.bigint_w(space.id(self.w_instance)) + id = id.lshift(LONG_BIT) + else: + id = rbigint.fromint(0) + id = id.or_(space.bigint_w(space.id(self.w_function))) + id = id.lshift(IDTAG_SHIFT).int_or_(tag) + return space.newlong_from_rbigint(id) + def descr_method_hash(self): space = self.space w_result = space.hash(self.w_function) diff --git a/pypy/interpreter/test/test_app_main.py b/pypy/interpreter/test/test_app_main.py --- a/pypy/interpreter/test/test_app_main.py +++ b/pypy/interpreter/test/test_app_main.py @@ -133,7 +133,7 @@ self.check(['-S', '-O', '--info'], {}, output_contains='translation') self.check(['-S', '-O', '--version'], {}, output_contains='Python') self.check(['-S', '-OV'], {}, output_contains='Python') - self.check(['--jit', 'foobar', '-S'], {}, sys_argv=[''], + self.check(['--jit', 'off', '-S'], {}, sys_argv=[''], run_stdin=True, no_site=1) self.check(['-c', 'pass'], {}, sys_argv=['-c'], run_command='pass') self.check(['-cpass'], {}, sys_argv=['-c'], run_command='pass') diff --git a/pypy/interpreter/test/test_function.py b/pypy/interpreter/test/test_function.py --- a/pypy/interpreter/test/test_function.py +++ b/pypy/interpreter/test/test_function.py @@ -1,5 +1,5 @@ # encoding: utf-8 -import unittest +import pytest from pypy.interpreter import eval from pypy.interpreter.function import Function, Method, descr_function_get from pypy.interpreter.pycode import PyCode @@ -232,6 +232,7 @@ raises( TypeError, func, 42, {'arg1': 23}) + @pytest.mark.skipif("config.option.runappdirect") def test_kwargs_nondict_mapping(self): class Mapping: def keys(self): @@ -302,6 +303,14 @@ meth = func.__get__(obj, object) assert meth() == obj + def test_none_get_interaction(self): + skip("XXX issue #2083") + assert type(None).__repr__(None) == 'None' + + def test_none_get_interaction_2(self): + f = None.__repr__ + assert f() == 'None' + def test_no_get_builtin(self): assert not hasattr(dir, '__get__') class A(object): @@ -329,6 +338,7 @@ raises(TypeError, len, s, some_unknown_keyword=s) raises(TypeError, len, s, s, some_unknown_keyword=s) + @pytest.mark.skipif("config.option.runappdirect") def test_call_error_message(self): try: len() @@ -374,6 +384,7 @@ f = lambda: 42 assert f.__doc__ is None + @pytest.mark.skipif("config.option.runappdirect") def test_setstate_called_with_wrong_args(self): f = lambda: 42 # not sure what it should raise, since CPython doesn't have setstate @@ -536,6 +547,37 @@ assert A().m == X() assert X() == A().m + @pytest.mark.skipif("config.option.runappdirect") + def test_method_identity(self): + class A(object): + def m(self): + pass + def n(self): + pass + + class B(A): + pass + + class X(object): + def __eq__(self, other): + return True + + a = A() + a2 = A() + assert a.m is a.m + assert id(a.m) == id(a.m) + assert a.m is not a.n + assert id(a.m) != id(a.n) + assert a.m is not a2.m + assert id(a.m) != id(a2.m) + + assert A.m is A.m + assert id(A.m) == id(A.m) + assert A.m is not A.n + assert id(A.m) != id(A.n) + assert A.m is B.m + assert id(A.m) == id(B.m) + class TestMethod: def setup_method(self, method): diff --git a/pypy/module/__builtin__/descriptor.py b/pypy/module/__builtin__/descriptor.py --- a/pypy/module/__builtin__/descriptor.py +++ b/pypy/module/__builtin__/descriptor.py @@ -112,6 +112,7 @@ W_Super.typedef = TypeDef( 'super', __new__ = interp2app(descr_new_super), + __thisclass__ = interp_attrproperty_w("w_starttype", W_Super), __getattribute__ = interp2app(W_Super.getattribute), __get__ = interp2app(W_Super.get), __doc__ = """super(type) -> unbound super object diff --git a/pypy/module/__builtin__/test/test_descriptor.py b/pypy/module/__builtin__/test/test_descriptor.py --- a/pypy/module/__builtin__/test/test_descriptor.py +++ b/pypy/module/__builtin__/test/test_descriptor.py @@ -242,7 +242,7 @@ c = C() assert C.goo(1) == (C, 1) assert c.goo(1) == (C, 1) - + assert c.foo(1) == (c, 1) class D(C): pass @@ -266,6 +266,17 @@ meth = classmethod(1).__get__(1) raises(TypeError, meth) + def test_super_thisclass(self): + class A(object): + pass + + assert super(A, A()).__thisclass__ is A + + class B(A): + pass + + assert super(B, B()).__thisclass__ is B + assert super(A, B()).__thisclass__ is A def test_property_docstring(self): assert property.__doc__.startswith('property') diff --git a/pypy/module/_collections/app_defaultdict.py b/pypy/module/_collections/app_defaultdict.py --- a/pypy/module/_collections/app_defaultdict.py +++ b/pypy/module/_collections/app_defaultdict.py @@ -12,6 +12,7 @@ class defaultdict(dict): __slots__ = ['default_factory'] + __module__ = 'collections' def __init__(self, *args, **kwds): if len(args) > 0: diff --git a/pypy/module/_collections/test/test_defaultdict.py b/pypy/module/_collections/test/test_defaultdict.py --- a/pypy/module/_collections/test/test_defaultdict.py +++ b/pypy/module/_collections/test/test_defaultdict.py @@ -16,6 +16,12 @@ d[5].append(44) assert l == [42, 43] and l2 == [44] + def test_module(self): + from _collections import defaultdict + assert repr(defaultdict) in ( + "", # on PyPy + "") # on CPython + def test_keyerror_without_factory(self): from _collections import defaultdict for d1 in [defaultdict(), defaultdict(None)]: diff --git a/pypy/module/_sre/interp_sre.py b/pypy/module/_sre/interp_sre.py --- a/pypy/module/_sre/interp_sre.py +++ b/pypy/module/_sre/interp_sre.py @@ -7,6 +7,7 @@ from pypy.interpreter.error import OperationError from rpython.rlib.rarithmetic import intmask from rpython.rlib import jit +from rpython.rlib.rstring import StringBuilder, UnicodeBuilder # ____________________________________________________________ # @@ -37,6 +38,8 @@ if isinstance(ctx, rsre_core.BufMatchContext): return space.wrapbytes(ctx._buffer.getslice(start, end, 1, end-start)) + if isinstance(ctx, rsre_core.StrMatchContext): + return space.wrapbytes(ctx._string[start:end]) elif isinstance(ctx, rsre_core.UnicodeMatchContext): return space.wrap(ctx._unicodestr[start:end]) else: @@ -98,8 +101,8 @@ space.wrap("cannot copy this pattern object")) def make_ctx(self, w_string, pos=0, endpos=sys.maxint): - """Make a BufMatchContext or a UnicodeMatchContext for searching - in the given w_string object.""" + """Make a StrMatchContext, BufMatchContext or a UnicodeMatchContext for + searching in the given w_string object.""" space = self.space if pos < 0: pos = 0 @@ -117,6 +120,14 @@ endpos = len(unicodestr) return rsre_core.UnicodeMatchContext(self.code, unicodestr, pos, endpos, self.flags) + elif space.isinstance_w(w_string, space.w_str): + str = space.str_w(w_string) + if pos > len(str): + pos = len(str) + if endpos > len(str): + endpos = len(str) + return rsre_core.StrMatchContext(self.code, str, + pos, endpos, self.flags) else: buf = space.readbuf_w(w_string) if (not space.is_none(self.w_pattern) and @@ -224,6 +235,11 @@ def subx(self, w_ptemplate, w_string, count): space = self.space + # use a (much faster) string/unicode builder if w_ptemplate and + # w_string are both string or both unicode objects, and if w_ptemplate + # is a literal + use_builder = False + filter_as_unicode = filter_as_string = None if space.is_true(space.callable(w_ptemplate)): w_filter = w_ptemplate filter_is_callable = True @@ -231,6 +247,8 @@ if space.isinstance_w(w_ptemplate, space.w_unicode): filter_as_unicode = space.unicode_w(w_ptemplate) literal = u'\\' not in filter_as_unicode + use_builder = ( + space.isinstance_w(w_string, space.w_unicode) and literal) else: try: filter_as_string = space.bytes_w(w_ptemplate) @@ -240,6 +258,8 @@ literal = False else: literal = '\\' not in filter_as_string + use_builder = ( + space.isinstance_w(w_string, space.w_str) and literal) if literal: w_filter = w_ptemplate filter_is_callable = False @@ -250,19 +270,44 @@ space.wrap(self), w_ptemplate) filter_is_callable = space.is_true(space.callable(w_filter)) # + # XXX this is a bit of a mess, but it improves performance a lot ctx = self.make_ctx(w_string) - sublist_w = [] + sublist_w = strbuilder = unicodebuilder = None + if use_builder: + if filter_as_unicode is not None: + unicodebuilder = UnicodeBuilder(ctx.end) + else: + assert filter_as_string is not None + strbuilder = StringBuilder(ctx.end) + else: + sublist_w = [] n = last_pos = 0 while not count or n < count: + sub_jitdriver.jit_merge_point( + self=self, + use_builder=use_builder, + filter_is_callable=filter_is_callable, + filter_type=type(w_filter), + ctx=ctx, + w_filter=w_filter, + strbuilder=strbuilder, + unicodebuilder=unicodebuilder, + filter_as_string=filter_as_string, + filter_as_unicode=filter_as_unicode, + count=count, + w_string=w_string, + n=n, last_pos=last_pos, sublist_w=sublist_w + ) + space = self.space if not searchcontext(space, ctx): break if last_pos < ctx.match_start: - sublist_w.append(slice_w(space, ctx, last_pos, - ctx.match_start, space.w_None)) + _sub_append_slice( + ctx, space, use_builder, sublist_w, + strbuilder, unicodebuilder, last_pos, ctx.match_start) start = ctx.match_end if start == ctx.match_start: start += 1 - nextctx = ctx.fresh_copy(start) if not (last_pos == ctx.match_start == ctx.match_end and n > 0): # the above ignores empty matches on latest position @@ -270,28 +315,71 @@ w_match = self.getmatch(ctx, True) w_piece = space.call_function(w_filter, w_match) if not space.is_w(w_piece, space.w_None): + assert strbuilder is None and unicodebuilder is None + assert not use_builder sublist_w.append(w_piece) else: - sublist_w.append(w_filter) + if use_builder: + if strbuilder is not None: + assert filter_as_string is not None + strbuilder.append(filter_as_string) + else: + assert unicodebuilder is not None + assert filter_as_unicode is not None + unicodebuilder.append(filter_as_unicode) + else: + sublist_w.append(w_filter) last_pos = ctx.match_end n += 1 elif last_pos >= ctx.end: break # empty match at the end: finished - ctx = nextctx + ctx.reset(start) if last_pos < ctx.end: - sublist_w.append(slice_w(space, ctx, last_pos, ctx.end, - space.w_None)) + _sub_append_slice(ctx, space, use_builder, sublist_w, + strbuilder, unicodebuilder, last_pos, ctx.end) + if use_builder: + if strbuilder is not None: + return space.wrapbytes(strbuilder.build()), n + else: + assert unicodebuilder is not None + return space.wrap(unicodebuilder.build()), n + else: + if space.isinstance_w(w_string, space.w_unicode): + w_emptystr = space.wrap(u'') + else: + w_emptystr = space.wrapbytes('') + w_item = space.call_method(w_emptystr, 'join', + space.newlist(sublist_w)) + return w_item, n - if space.isinstance_w(w_string, space.w_unicode): - w_emptystr = space.wrap(u'') - else: - w_emptystr = space.wrapbytes('') - w_item = space.call_method(w_emptystr, 'join', - space.newlist(sublist_w)) - return w_item, n +sub_jitdriver = jit.JitDriver( + reds="""count n last_pos + ctx w_filter + strbuilder unicodebuilder + filter_as_string + filter_as_unicode + w_string sublist_w + self""".split(), + greens=["filter_is_callable", "use_builder", "filter_type", "ctx.pattern"]) +def _sub_append_slice(ctx, space, use_builder, sublist_w, + strbuilder, unicodebuilder, start, end): + if use_builder: + if isinstance(ctx, rsre_core.BufMatchContext): + assert strbuilder is not None + return strbuilder.append(ctx._buffer.getslice(start, end, 1, end-start)) + if isinstance(ctx, rsre_core.StrMatchContext): + assert strbuilder is not None + return strbuilder.append_slice(ctx._string, start, end) + elif isinstance(ctx, rsre_core.UnicodeMatchContext): + assert unicodebuilder is not None + return unicodebuilder.append_slice(ctx._unicodestr, start, end) + assert 0, "unreachable" + else: + sublist_w.append(slice_w(space, ctx, start, end, space.w_None)) + @unwrap_spec(flags=int, groups=int, w_groupindex=WrappedDefault(None), w_indexgroup=WrappedDefault(None)) def SRE_Pattern__new__(space, w_subtype, w_pattern, flags, w_code, @@ -494,6 +582,8 @@ ctx = self.ctx if isinstance(ctx, rsre_core.BufMatchContext): return space.wrapbytes(ctx._buffer.as_str()) + elif isinstance(ctx, rsre_core.StrMatchContext): + return space.wrapbytes(ctx._string) elif isinstance(ctx, rsre_core.UnicodeMatchContext): return space.wrap(ctx._unicodestr) else: diff --git a/pypy/module/cpyext/include/patchlevel.h b/pypy/module/cpyext/include/patchlevel.h --- a/pypy/module/cpyext/include/patchlevel.h +++ b/pypy/module/cpyext/include/patchlevel.h @@ -29,8 +29,8 @@ #define PY_VERSION "3.3.5" /* PyPy version as a string */ -#define PYPY_VERSION "4.1.0-alpha0" -#define PYPY_VERSION_NUM 0x04010000 +#define PYPY_VERSION "5.1.0-alpha0" +#define PYPY_VERSION_NUM 0x05010000 /* Defined to mean a PyPy where cpyext holds more regular references to PyObjects, e.g. staying alive as long as the internal PyPy object diff --git a/pypy/module/micronumpy/appbridge.py b/pypy/module/micronumpy/appbridge.py --- a/pypy/module/micronumpy/appbridge.py +++ b/pypy/module/micronumpy/appbridge.py @@ -9,6 +9,7 @@ w_array_repr = None w_array_str = None w__usefields = None + w_partition = None def __init__(self, space): pass diff --git a/pypy/module/micronumpy/concrete.py b/pypy/module/micronumpy/concrete.py --- a/pypy/module/micronumpy/concrete.py +++ b/pypy/module/micronumpy/concrete.py @@ -298,7 +298,14 @@ except IndexError: # not a single result chunks = self._prepare_slice_args(space, w_index) - return new_view(space, orig_arr, chunks) + copy = False + if isinstance(chunks[0], BooleanChunk): + # numpy compatibility + copy = True + w_ret = new_view(space, orig_arr, chunks) + if copy: + w_ret = w_ret.descr_copy(space, space.wrap(w_ret.get_order())) + return w_ret def descr_setitem(self, space, orig_arr, w_index, w_value): try: diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -22,7 +22,8 @@ from pypy.module.micronumpy.flagsobj import W_FlagsObject from pypy.module.micronumpy.strides import ( get_shape_from_iterable, shape_agreement, shape_agreement_multiple, - is_c_contiguous, is_f_contiguous, calc_strides, new_view) + is_c_contiguous, is_f_contiguous, calc_strides, new_view, BooleanChunk, + SliceChunk) from pypy.module.micronumpy.casting import can_cast_array from pypy.module.micronumpy.descriptor import get_dtype_cache @@ -204,7 +205,13 @@ if iter_shape is None: # w_index is a list of slices, return a view chunks = self.implementation._prepare_slice_args(space, w_index) - return new_view(space, self, chunks) + copy = False + if isinstance(chunks[0], BooleanChunk): + copy = True + w_ret = new_view(space, self, chunks) + if copy: + w_ret = w_ret.descr_copy(space, space.wrap(w_ret.get_order())) + return w_ret shape = res_shape + self.get_shape()[len(indexes):] w_res = W_NDimArray.from_shape(space, shape, self.get_dtype(), self.get_order(), w_instance=self) @@ -220,8 +227,24 @@ if iter_shape is None: # w_index is a list of slices chunks = self.implementation._prepare_slice_args(space, w_index) - view = new_view(space, self, chunks) - view.implementation.setslice(space, val_arr) + dim = -1 + view = self + for i, c in enumerate(chunks): + if isinstance(c, BooleanChunk): + dim = i + idx = c.w_idx + chunks.pop(i) + chunks.insert(0, SliceChunk(space.newslice(space.wrap(0), + space.w_None, space.w_None))) + break + if dim > 0: + view = self.implementation.swapaxes(space, self, 0, dim) + if dim >= 0: + view = new_view(space, self, chunks) + view.setitem_filter(space, idx, val_arr) + else: + view = new_view(space, self, chunks) + view.implementation.setslice(space, val_arr) return if support.product(iter_shape) == 0: return @@ -534,8 +557,12 @@ return self.get_scalar_value().item(space) l_w = [] for i in range(self.get_shape()[0]): - l_w.append(space.call_method(self.descr_getitem(space, - space.wrap(i)), "tolist")) + item_w = self.descr_getitem(space, space.wrap(i)) + if (isinstance(item_w, W_NDimArray) or + isinstance(item_w, boxes.W_GenericBox)): + l_w.append(space.call_method(item_w, "tolist")) + else: + l_w.append(item_w) return space.newlist(l_w) def descr_ravel(self, space, w_order=None): @@ -903,6 +930,10 @@ return return self.implementation.sort(space, w_axis, w_order) + def descr_partition(self, space, __args__): + return get_appbridge_cache(space).call_method( + space, 'numpy.core._partition_use', 'partition', __args__.prepend(self)) + def descr_squeeze(self, space, w_axis=None): cur_shape = self.get_shape() if not space.is_none(w_axis): @@ -1616,6 +1647,7 @@ argsort = interp2app(W_NDimArray.descr_argsort), sort = interp2app(W_NDimArray.descr_sort), + partition = interp2app(W_NDimArray.descr_partition), astype = interp2app(W_NDimArray.descr_astype), base = GetSetProperty(W_NDimArray.descr_get_base), byteswap = interp2app(W_NDimArray.descr_byteswap), diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py --- a/pypy/module/micronumpy/strides.py +++ b/pypy/module/micronumpy/strides.py @@ -97,22 +97,19 @@ # filter by axis dim filtr = chunks[dim] assert isinstance(filtr, BooleanChunk) + # XXX this creates a new array, and fails in setitem w_arr = w_arr.getitem_filter(space, filtr.w_idx, axis=dim) arr = w_arr.implementation chunks[dim] = SliceChunk(space.newslice(space.wrap(0), - space.wrap(-1), space.w_None)) + space.w_None, space.w_None)) r = calculate_slice_strides(space, arr.shape, arr.start, arr.get_strides(), arr.get_backstrides(), chunks) else: r = calculate_slice_strides(space, arr.shape, arr.start, arr.get_strides(), arr.get_backstrides(), chunks) shape, start, strides, backstrides = r - w_ret = W_NDimArray.new_slice(space, start, strides[:], backstrides[:], + return W_NDimArray.new_slice(space, start, strides[:], backstrides[:], shape[:], arr, w_arr) - if dim == 0: - # Do not return a view - return w_ret.descr_copy(space, space.wrap(w_ret.get_order())) - return w_ret @jit.unroll_safe def _extend_shape(old_shape, chunks): diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -1936,6 +1936,22 @@ a = array([[1, 2], [3, 4]]) assert (a + a).tolist() == [[2, 4], [6, 8]] + def test_tolist_object(self): + from numpy import array + a = array([0], dtype=object) + assert a.tolist() == [0] + + def test_tolist_object_slice(self): + from numpy import array + list_expected = [slice(0, 1), 0] + a = array(list_expected, dtype=object) + assert a.tolist() == list_expected + + def test_tolist_object_slice_2d(self): + from numpy import array + a = array([(slice(0, 1), 1), (0, 1)], dtype=object) + assert a.tolist() == [[slice(0, 1, None), 1], [0, 1]] + def test_tolist_slice(self): from numpy import array a = array([[17.1, 27.2], [40.3, 50.3]]) @@ -2541,8 +2557,10 @@ assert b.base is None b = a[:, np.array([True, False, True])] assert b.base is not None + a[np.array([True, False]), 0] = 100 b = a[np.array([True, False]), 0] - assert (b ==[0]).all() + assert b.shape == (1,) + assert (b ==[100]).all() def test_scalar_indexing(self): import numpy as np diff --git a/pypy/module/pypyjit/test_pypy_c/test_00_model.py b/pypy/module/pypyjit/test_pypy_c/test_00_model.py --- a/pypy/module/pypyjit/test_pypy_c/test_00_model.py +++ b/pypy/module/pypyjit/test_pypy_c/test_00_model.py @@ -68,9 +68,12 @@ pipe.returncode,)) if stderr.startswith('SKIP:'): py.test.skip(stderr) - if stderr.startswith('debug_alloc.h:'): # lldebug builds - stderr = '' + #if stderr.startswith('debug_alloc.h:'): # lldebug builds + # stderr = '' #assert not stderr + if stderr: + print '*** stderr of the subprocess: ***' + print stderr # if discard_stdout_before_last_line: stdout = stdout.splitlines(True)[-1] diff --git a/pypy/module/sys/version.py b/pypy/module/sys/version.py --- a/pypy/module/sys/version.py +++ b/pypy/module/sys/version.py @@ -10,7 +10,7 @@ #XXX # sync CPYTHON_VERSION with patchlevel.h, package.py CPYTHON_API_VERSION = 1013 #XXX # sync with include/modsupport.h -PYPY_VERSION = (4, 1, 0, "alpha", 0) #XXX # sync patchlevel.h +PYPY_VERSION = (5, 1, 0, "alpha", 0) #XXX # sync patchlevel.h import pypy diff --git a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_zintegration.py b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_zintegration.py --- a/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_zintegration.py +++ b/pypy/module/test_lib_pypy/cffi_tests/cffi0/test_zintegration.py @@ -12,7 +12,9 @@ def create_venv(name): tmpdir = udir.join(name) try: - subprocess.check_call(['virtualenv', '--distribute', + subprocess.check_call(['virtualenv', + #'--never-download', <= could be added, but causes failures + # in random cases on random machines '-p', os.path.abspath(sys.executable), str(tmpdir)]) except OSError as e: diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/add1-test.c b/pypy/module/test_lib_pypy/cffi_tests/embedding/add1-test.c --- a/pypy/module/test_lib_pypy/cffi_tests/embedding/add1-test.c +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/add1-test.c @@ -1,3 +1,4 @@ +/* Generated by pypy/tool/import_cffi.py */ #include extern int add1(int, int); diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/add2-test.c b/pypy/module/test_lib_pypy/cffi_tests/embedding/add2-test.c --- a/pypy/module/test_lib_pypy/cffi_tests/embedding/add2-test.c +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/add2-test.c @@ -1,3 +1,4 @@ +/* Generated by pypy/tool/import_cffi.py */ #include extern int add1(int, int); diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/add_recursive-test.c b/pypy/module/test_lib_pypy/cffi_tests/embedding/add_recursive-test.c --- a/pypy/module/test_lib_pypy/cffi_tests/embedding/add_recursive-test.c +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/add_recursive-test.c @@ -1,3 +1,4 @@ +/* Generated by pypy/tool/import_cffi.py */ #include #ifdef _MSC_VER diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/perf-test.c b/pypy/module/test_lib_pypy/cffi_tests/embedding/perf-test.c --- a/pypy/module/test_lib_pypy/cffi_tests/embedding/perf-test.c +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/perf-test.c @@ -1,10 +1,12 @@ +/* Generated by pypy/tool/import_cffi.py */ #include #include #include #ifdef PTEST_USE_THREAD # include -# include -static sem_t done; +static pthread_mutex_t mutex1 = PTHREAD_MUTEX_INITIALIZER; +static pthread_cond_t cond1 = PTHREAD_COND_INITIALIZER; +static int remaining; #endif @@ -54,8 +56,11 @@ printf("time per call: %.3g\n", t); #ifdef PTEST_USE_THREAD - int status = sem_post(&done); - assert(status == 0); + pthread_mutex_lock(&mutex1); + remaining -= 1; + if (!remaining) + pthread_cond_signal(&cond1); + pthread_mutex_unlock(&mutex1); #endif return arg; @@ -68,19 +73,19 @@ start_routine(0); #else pthread_t th; - int i, status = sem_init(&done, 0, 0); - assert(status == 0); + int i, status; add1(0, 0); /* this is the main thread */ + remaining = PTEST_USE_THREAD; for (i = 0; i < PTEST_USE_THREAD; i++) { status = pthread_create(&th, NULL, start_routine, NULL); assert(status == 0); } - for (i = 0; i < PTEST_USE_THREAD; i++) { - status = sem_wait(&done); - assert(status == 0); - } + pthread_mutex_lock(&mutex1); + while (remaining) + pthread_cond_wait(&cond1, &mutex1); + pthread_mutex_unlock(&mutex1); #endif return 0; } diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/test_basic.py b/pypy/module/test_lib_pypy/cffi_tests/embedding/test_basic.py --- a/pypy/module/test_lib_pypy/cffi_tests/embedding/test_basic.py +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/test_basic.py @@ -33,8 +33,12 @@ pythonpath.insert(0, cffi_base) return os.pathsep.join(pythonpath) -def setup_module(mod): - mod.org_env = os.environ.copy() +def copy_away_env(): + global org_env + try: + org_env + except NameError: + org_env = os.environ.copy() class EmbeddingTests: @@ -122,6 +126,7 @@ os.chdir(curdir) def patch_environment(self): + copy_away_env() path = self.get_path() # for libpypy-c.dll or Python27.dll path = os.path.split(sys.executable)[0] + os.path.pathsep + path diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/thread-test.h b/pypy/module/test_lib_pypy/cffi_tests/embedding/thread-test.h --- a/pypy/module/test_lib_pypy/cffi_tests/embedding/thread-test.h +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/thread-test.h @@ -1,10 +1,45 @@ +/* Generated by pypy/tool/import_cffi.py */ /************************************************************/ #ifndef _MSC_VER /************************************************************/ #include -#include + +/* don't include , it is not available on OS/X */ + +typedef struct { + pthread_mutex_t mutex1; + pthread_cond_t cond1; + unsigned int value; +} sem_t; + +static int sem_init(sem_t *sem, int pshared, unsigned int value) +{ + assert(pshared == 0); + sem->value = value; + return (pthread_mutex_init(&sem->mutex1, NULL) || + pthread_cond_init(&sem->cond1, NULL)); +} + +static int sem_post(sem_t *sem) +{ + pthread_mutex_lock(&sem->mutex1); + sem->value += 1; + pthread_cond_signal(&sem->cond1); + pthread_mutex_unlock(&sem->mutex1); + return 0; +} + +static int sem_wait(sem_t *sem) +{ + pthread_mutex_lock(&sem->mutex1); + while (sem->value == 0) + pthread_cond_wait(&sem->cond1, &sem->mutex1); + sem->value -= 1; + pthread_mutex_unlock(&sem->mutex1); + return 0; +} /************************************************************/ @@ -22,7 +57,7 @@ typedef HANDLE sem_t; typedef HANDLE pthread_t; -int sem_init(sem_t *sem, int pshared, unsigned int value) +static int sem_init(sem_t *sem, int pshared, unsigned int value) { assert(pshared == 0); assert(value == 0); @@ -30,26 +65,26 @@ return *sem ? 0 : -1; } -int sem_post(sem_t *sem) +static int sem_post(sem_t *sem) { return ReleaseSemaphore(*sem, 1, NULL) ? 0 : -1; } -int sem_wait(sem_t *sem) +static int sem_wait(sem_t *sem) { WaitForSingleObject(*sem, INFINITE); return 0; } -DWORD WINAPI myThreadProc(LPVOID lpParameter) +static DWORD WINAPI myThreadProc(LPVOID lpParameter) { void *(* start_routine)(void *) = (void *(*)(void *))lpParameter; start_routine(NULL); return 0; } -int pthread_create(pthread_t *thread, void *attr, - void *start_routine(void *), void *arg) +static int pthread_create(pthread_t *thread, void *attr, + void *start_routine(void *), void *arg) { assert(arg == NULL); *thread = CreateThread(NULL, 0, myThreadProc, start_routine, 0, NULL); diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/thread1-test.c b/pypy/module/test_lib_pypy/cffi_tests/embedding/thread1-test.c --- a/pypy/module/test_lib_pypy/cffi_tests/embedding/thread1-test.c +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/thread1-test.c @@ -1,3 +1,4 @@ +/* Generated by pypy/tool/import_cffi.py */ #include #include #include "thread-test.h" diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/thread2-test.c b/pypy/module/test_lib_pypy/cffi_tests/embedding/thread2-test.c --- a/pypy/module/test_lib_pypy/cffi_tests/embedding/thread2-test.c +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/thread2-test.c @@ -1,3 +1,4 @@ +/* Generated by pypy/tool/import_cffi.py */ #include #include #include "thread-test.h" diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/thread3-test.c b/pypy/module/test_lib_pypy/cffi_tests/embedding/thread3-test.c --- a/pypy/module/test_lib_pypy/cffi_tests/embedding/thread3-test.c +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/thread3-test.c @@ -1,3 +1,4 @@ +/* Generated by pypy/tool/import_cffi.py */ #include #include #include "thread-test.h" diff --git a/pypy/module/test_lib_pypy/cffi_tests/embedding/tlocal-test.c b/pypy/module/test_lib_pypy/cffi_tests/embedding/tlocal-test.c --- a/pypy/module/test_lib_pypy/cffi_tests/embedding/tlocal-test.c +++ b/pypy/module/test_lib_pypy/cffi_tests/embedding/tlocal-test.c @@ -1,3 +1,4 @@ +/* Generated by pypy/tool/import_cffi.py */ #include #include #include "thread-test.h" diff --git a/pypy/objspace/std/celldict.py b/pypy/objspace/std/celldict.py --- a/pypy/objspace/std/celldict.py +++ b/pypy/objspace/std/celldict.py @@ -64,6 +64,9 @@ def setitem_str(self, w_dict, key, w_value): cell = self.getdictvalue_no_unwrapping(w_dict, key) + return self._setitem_str_cell_known(cell, w_dict, key, w_value) + + def _setitem_str_cell_known(self, cell, w_dict, key, w_value): w_value = write_cell(self.space, cell, w_value) if w_value is None: return @@ -74,10 +77,11 @@ space = self.space if space.is_w(space.type(w_key), space.w_unicode): key = space.str_w(w_key) - w_result = self.getitem_str(w_dict, key) + cell = self.getdictvalue_no_unwrapping(w_dict, key) + w_result = unwrap_cell(self.space, cell) if w_result is not None: return w_result - self.setitem_str(w_dict, key, w_default) + self._setitem_str_cell_known(cell, w_dict, key, w_default) return w_default else: self.switch_to_object_strategy(w_dict) diff --git a/pypy/objspace/std/complexobject.py b/pypy/objspace/std/complexobject.py --- a/pypy/objspace/std/complexobject.py +++ b/pypy/objspace/std/complexobject.py @@ -250,11 +250,12 @@ return None from rpython.rlib.longlong2float import float2longlong from pypy.objspace.std.util import IDTAG_COMPLEX as tag + from pypy.objspace.std.util import IDTAG_SHIFT real = space.float_w(space.getattr(self, space.wrap("real"))) imag = space.float_w(space.getattr(self, space.wrap("imag"))) real_b = rbigint.fromrarith_int(float2longlong(real)) imag_b = rbigint.fromrarith_int(r_ulonglong(float2longlong(imag))) - val = real_b.lshift(64).or_(imag_b).lshift(3).int_or_(tag) + val = real_b.lshift(64).or_(imag_b).lshift(IDTAG_SHIFT).int_or_(tag) return space.newlong_from_rbigint(val) def int(self, space): diff --git a/pypy/objspace/std/floatobject.py b/pypy/objspace/std/floatobject.py --- a/pypy/objspace/std/floatobject.py +++ b/pypy/objspace/std/floatobject.py @@ -170,15 +170,11 @@ return self.floatval def int(self, space): + # this is a speed-up only, for space.int(w_float). if (type(self) is not W_FloatObject and space.is_overloaded(self, space.w_float, '__int__')): return W_Root.int(self, space) - try: - value = ovfcheck_float_to_int(self.floatval) - except OverflowError: - return newlong_from_float(space, self.floatval) - else: - return space.newint(value) + return self.descr_trunc(space) def is_w(self, space, w_other): from rpython.rlib.longlong2float import float2longlong @@ -195,9 +191,10 @@ return None from rpython.rlib.longlong2float import float2longlong from pypy.objspace.std.util import IDTAG_FLOAT as tag + from pypy.objspace.std.util import IDTAG_SHIFT val = float2longlong(space.float_w(self)) b = rbigint.fromrarith_int(val) - b = b.lshift(3).int_or_(tag) + b = b.lshift(IDTAG_SHIFT).int_or_(tag) return space.newlong_from_rbigint(b) def __repr__(self): @@ -417,11 +414,10 @@ return W_FloatObject(a) def descr_trunc(self, space): - whole = math.modf(self.floatval)[1] try: - value = ovfcheck_float_to_int(whole) + value = ovfcheck_float_to_int(self.floatval) except OverflowError: - return newlong_from_float(space, whole) + return newlong_from_float(space, self.floatval) else: return space.newint(value) @@ -656,7 +652,7 @@ __hash__ = interp2app(W_FloatObject.descr_hash), __format__ = interp2app(W_FloatObject.descr_format), __bool__ = interp2app(W_FloatObject.descr_bool), - __int__ = interp2app(W_FloatObject.int), + __int__ = interp2app(W_FloatObject.descr_trunc), __float__ = interp2app(W_FloatObject.descr_float), __trunc__ = interp2app(W_FloatObject.descr_trunc), __neg__ = interp2app(W_FloatObject.descr_neg), diff --git a/pypy/objspace/std/intobject.py b/pypy/objspace/std/intobject.py --- a/pypy/objspace/std/intobject.py +++ b/pypy/objspace/std/intobject.py @@ -28,7 +28,7 @@ from pypy.interpreter.typedef import TypeDef from pypy.objspace.std import newformat from pypy.objspace.std.util import ( - BINARY_OPS, CMP_OPS, COMMUTATIVE_OPS, IDTAG_INT, wrap_parsestringerror) + BINARY_OPS, CMP_OPS, COMMUTATIVE_OPS, IDTAG_INT, IDTAG_SHIFT, wrap_parsestringerror) SENTINEL = object() @@ -54,7 +54,7 @@ if self.user_overridden_class: return None b = space.bigint_w(self) - b = b.lshift(3).int_or_(IDTAG_INT) + b = b.lshift(IDTAG_SHIFT).int_or_(IDTAG_INT) return space.newlong_from_rbigint(b) @staticmethod diff --git a/pypy/objspace/std/longobject.py b/pypy/objspace/std/longobject.py --- a/pypy/objspace/std/longobject.py +++ b/pypy/objspace/std/longobject.py @@ -13,7 +13,8 @@ from pypy.objspace.std import newformat from pypy.objspace.std.intobject import ( HASH_BITS, HASH_MODULUS, W_AbstractIntObject, W_IntObject) -from pypy.objspace.std.util import COMMUTATIVE_OPS +from pypy.objspace.std.util import ( + BINARY_OPS, CMP_OPS, COMMUTATIVE_OPS, IDTAG_LONG, IDTAG_SHIFT, wrap_parsestringerror) def delegate_other(func): diff --git a/pypy/objspace/std/setobject.py b/pypy/objspace/std/setobject.py --- a/pypy/objspace/std/setobject.py +++ b/pypy/objspace/std/setobject.py @@ -1056,7 +1056,7 @@ if self is w_other.strategy: strategy = self if w_set.length() > w_other.length(): - # swap operants + # swap operands storage = self._intersect_unwrapped(w_other, w_set) else: storage = self._intersect_unwrapped(w_set, w_other) @@ -1066,7 +1066,7 @@ else: strategy = self.space.fromcache(ObjectSetStrategy) if w_set.length() > w_other.length(): - # swap operants + # swap operands storage = w_other.strategy._intersect_wrapped(w_other, w_set) else: storage = self._intersect_wrapped(w_set, w_other) diff --git a/pypy/objspace/std/test/test_celldict.py b/pypy/objspace/std/test/test_celldict.py --- a/pypy/objspace/std/test/test_celldict.py +++ b/pypy/objspace/std/test/test_celldict.py @@ -114,22 +114,11 @@ class TestModuleDictImplementation(BaseTestRDictImplementation): StrategyClass = ModuleDictStrategy - -class TestModuleDictImplementationWithBuiltinNames(BaseTestRDictImplementation): - StrategyClass = ModuleDictStrategy - - string = "int" - string2 = "isinstance" - + setdefault_hash_count = 2 class TestDevolvedModuleDictImplementation(BaseTestDevolvedDictImplementation): StrategyClass = ModuleDictStrategy - -class TestDevolvedModuleDictImplementationWithBuiltinNames(BaseTestDevolvedDictImplementation): - StrategyClass = ModuleDictStrategy - - string = "int" - string2 = "isinstance" + setdefault_hash_count = 2 class AppTestCellDict(object): diff --git a/pypy/objspace/std/test/test_dictmultiobject.py b/pypy/objspace/std/test/test_dictmultiobject.py --- a/pypy/objspace/std/test/test_dictmultiobject.py +++ b/pypy/objspace/std/test/test_dictmultiobject.py @@ -1324,6 +1324,9 @@ impl.setitem(x, x) assert type(impl.get_strategy()) is ObjectDictStrategy + + setdefault_hash_count = 1 + def test_setdefault_fast(self): on_pypy = "__pypy__" in sys.builtin_module_names impl = self.impl @@ -1331,11 +1334,11 @@ x = impl.setdefault(key, 1) assert x == 1 if on_pypy and self.FakeString is FakeString: - assert key.hash_count == 1 + assert key.hash_count == self.setdefault_hash_count x = impl.setdefault(key, 2) assert x == 1 if on_pypy and self.FakeString is FakeString: - assert key.hash_count == 2 + assert key.hash_count == self.setdefault_hash_count + 1 def test_fallback_evil_key(self): class F(object): diff --git a/pypy/objspace/std/test/test_kwargsdict.py b/pypy/objspace/std/test/test_kwargsdict.py --- a/pypy/objspace/std/test/test_kwargsdict.py +++ b/pypy/objspace/std/test/test_kwargsdict.py @@ -119,10 +119,16 @@ def test_delitem(self): pass # delitem devolves for now + def test_setdefault_fast(self): + pass # not based on hashing at all + class TestDevolvedKwargsDictImplementation(BaseTestDevolvedDictImplementation): get_impl = get_impl StrategyClass = KwargsDictStrategy + def test_setdefault_fast(self): + pass # not based on hashing at all + class AppTestKwargsDictStrategy(object): def setup_class(cls): diff --git a/pypy/objspace/std/test/test_obj.py b/pypy/objspace/std/test/test_obj.py --- a/pypy/objspace/std/test/test_obj.py +++ b/pypy/objspace/std/test/test_obj.py @@ -170,14 +170,14 @@ def test_id_on_primitives(self): if self.cpython_apptest: skip("cpython behaves differently") - assert id(1) == (1 << 3) + 1 + assert id(1) == (1 << 4) + 1 class myint(int): pass assert id(myint(1)) != id(1) assert id(1.0) & 7 == 5 assert id(-0.0) != id(0.0) - assert hex(id(2.0)) == '0x20000000000000005' + assert hex(id(2.0)) == '0x40000000000000005' assert id(0.0) == 5 def test_id_on_strs(self): diff --git a/pypy/objspace/std/util.py b/pypy/objspace/std/util.py --- a/pypy/objspace/std/util.py +++ b/pypy/objspace/std/util.py @@ -3,11 +3,13 @@ from pypy.interpreter.error import OperationError from pypy.interpreter import gateway +IDTAG_SHIFT = 4 IDTAG_INT = 1 IDTAG_LONG = 3 IDTAG_FLOAT = 5 IDTAG_COMPLEX = 7 +IDTAG_METHOD = 9 CMP_OPS = dict(lt='<', le='<=', eq='==', ne='!=', gt='>', ge='>=') BINARY_BITWISE_OPS = {'and': '&', 'lshift': '<<', 'or': '|', 'rshift': '>>', diff --git a/pypy/tool/import_cffi.py b/pypy/tool/import_cffi.py --- a/pypy/tool/import_cffi.py +++ b/pypy/tool/import_cffi.py @@ -7,11 +7,18 @@ import sys, py -def mangle(lines): - yield "# Generated by pypy/tool/import_cffi.py\n" - for line in lines: - line = line.replace('from testing', 'from pypy.module.test_lib_pypy.cffi_tests') - yield line +def mangle(lines, ext): + if ext == '.py': + yield "# Generated by pypy/tool/import_cffi.py\n" + for line in lines: + line = line.replace('from testing', 'from pypy.module.test_lib_pypy.cffi_tests') + yield line + elif ext in ('.c', '.h'): + yield "/* Generated by pypy/tool/import_cffi.py */\n" + for line in lines: + yield line + else: + raise AssertionError(ext) def main(cffi_dir): cffi_dir = py.path.local(cffi_dir) @@ -23,10 +30,12 @@ for p in (list(cffi_dir.join('cffi').visit(fil='*.py')) + list(cffi_dir.join('cffi').visit(fil='*.h'))): cffi_dest.join('..', p.relto(cffi_dir)).write(p.read()) - for p in cffi_dir.join('testing').visit(fil='*.py'): + for p in (list(cffi_dir.join('testing').visit(fil='*.py')) + + list(cffi_dir.join('testing').visit(fil='*.h')) + + list(cffi_dir.join('testing').visit(fil='*.c'))): path = test_dest.join(p.relto(cffi_dir.join('testing'))) path.join('..').ensure(dir=1) - path.write(''.join(mangle(p.readlines()))) + path.write(''.join(mangle(p.readlines(), p.ext))) if __name__ == '__main__': if len(sys.argv) != 2: diff --git a/pypy/tool/release/force-builds.py b/pypy/tool/release/force-builds.py --- a/pypy/tool/release/force-builds.py +++ b/pypy/tool/release/force-builds.py @@ -9,7 +9,7 @@ modified by PyPy team """ -import os, sys, urllib +import os, sys, urllib, subprocess from twisted.internet import reactor, defer from twisted.python import log @@ -83,4 +83,9 @@ (options, args) = parser.parse_args() if not options.branch: parser.error("branch option required") + try: + subprocess.check_call(['hg','id','-r', options.branch]) + except subprocess.CalledProcessError: + print 'branch', options.branch, 'could not be found in local repository' + sys.exit(-1) main(options.branch, options.server, user=options.user) diff --git a/rpython/doc/translation.rst b/rpython/doc/translation.rst --- a/rpython/doc/translation.rst +++ b/rpython/doc/translation.rst @@ -33,7 +33,7 @@ The RPython translation toolchain never sees Python source code or syntax trees, but rather starts with the *code objects* that define the behaviour of the function objects one gives it as input. The -`bytecode evaluator`_ and the :ref:`flow graph builder` work through these +:ref:`flow graph builder` works through these code objects using `abstract interpretation`_ to produce a control flow graph (one per function): yet another representation of the source program, but one which is suitable for applying type inference @@ -85,7 +85,6 @@ .. _PDF color version: _static/translation.pdf -.. _bytecode evaluator: interpreter.html .. _abstract interpretation: http://en.wikipedia.org/wiki/Abstract_interpretation diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -548,7 +548,9 @@ if cpu.supports_floats: def func(f0, f1, f2, f3, f4, f5, f6, i0, f7, i1, f8, f9): + seen.append((f0, f1, f2, f3, f4, f5, f6, i0, f7, i1, f8, f9)) return f0 + f1 + f2 + f3 + f4 + f5 + f6 + float(i0 + i1) + f7 + f8 + f9 + seen = [] F = lltype.Float I = lltype.Signed FUNC = self.FuncType([F] * 7 + [I] + [F] + [I] + [F]* 2, F) @@ -557,13 +559,15 @@ calldescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, EffectInfo.MOST_GENERAL) funcbox = self.get_funcbox(cpu, func_ptr) - args = ([boxfloat(.1) for i in range(7)] + - [InputArgInt(1), boxfloat(.2), InputArgInt(2), boxfloat(.3), - boxfloat(.4)]) + args = ([boxfloat(.0), boxfloat(.1), boxfloat(.2), boxfloat(.3), + boxfloat(.4), boxfloat(.5), boxfloat(.6), + InputArgInt(1), boxfloat(.7), InputArgInt(2), boxfloat(.8), + boxfloat(.9)]) res = self.execute_operation(rop.CALL_F, [funcbox] + args, From pypy.commits at gmail.com Mon Mar 7 02:21:01 2016 From: pypy.commits at gmail.com (plan_rich) Date: Sun, 06 Mar 2016 23:21:01 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: r.length_loc does not exist! removed "r." Message-ID: <56dd2bdd.c74fc20a.1869a.3a48@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r82836:04dc68d8235f Date: 2016-03-07 08:20 +0100 http://bitbucket.org/pypy/pypy/changeset/04dc68d8235f/ Log: r.length_loc does not exist! removed "r." diff --git a/rpython/jit/backend/ppc/opassembler.py b/rpython/jit/backend/ppc/opassembler.py --- a/rpython/jit/backend/ppc/opassembler.py +++ b/rpython/jit/backend/ppc/opassembler.py @@ -901,7 +901,7 @@ jlt_location = self.mc.currpos() self.mc.trap() - self.mc.sradi(r.SCRATCH.value, r.length_loc.value, shift_by) + self.mc.sradi(r.SCRATCH.value, length_loc.value, shift_by) self.mc.mtctr(r.SCRATCH.value) # store the length in count register self.mc.li(r.SCRATCH.value, 0) From pypy.commits at gmail.com Mon Mar 7 03:35:41 2016 From: pypy.commits at gmail.com (fijal) Date: Mon, 07 Mar 2016 00:35:41 -0800 (PST) Subject: [pypy-commit] pypy jit-leaner-frontend: start passing those tests, slowly Message-ID: <56dd3d5d.13821c0a.d964b.ffffcf95@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r82837:7d27725a0a80 Date: 2016-03-07 10:34 +0200 http://bitbucket.org/pypy/pypy/changeset/7d27725a0a80/ Log: start passing those tests, slowly diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -29,7 +29,7 @@ memo = None def forget_optimization_info(self): - return + return # no longer necessary? for arg in self.start_label.getarglist(): arg.set_forwarded(None) for op in self.operations: @@ -41,14 +41,14 @@ This is the case of label() ops label() """ - def __init__(self, start_label, end_label, operations, + def __init__(self, start_label, end_label, trace, call_pure_results=None, enable_opts=None): self.start_label = start_label self.end_label = end_label self.enable_opts = enable_opts assert start_label.getopnum() == rop.LABEL assert end_label.getopnum() == rop.LABEL - self.operations = operations + self.trace = trace self.call_pure_results = call_pure_results def optimize(self, metainterp_sd, jitdriver_sd, optimizations, unroll): @@ -58,13 +58,13 @@ if unroll: opt = UnrollOptimizer(metainterp_sd, jitdriver_sd, optimizations) return opt.optimize_preamble(self.start_label, self.end_label, - self.operations, + self.trace, self.call_pure_results, self.box_names_memo) else: opt = Optimizer(metainterp_sd, jitdriver_sd, optimizations) return opt.propagate_all_forward(self.start_label.getarglist(), - self.operations, self.call_pure_results) + self.trace, self.call_pure_results) class SimpleCompileData(CompileData): """ This represents label() ops jump with no extra info associated with @@ -264,7 +264,8 @@ enable_opts = enable_opts.copy() del enable_opts['unroll'] - ops = history.operations[start:] + assert start == 0 + #ops = history.operations[start:] if 'unroll' not in enable_opts or not metainterp.cpu.supports_guard_gc_type: return compile_simple_loop(metainterp, greenkey, start, inputargs, ops, jumpargs, enable_opts) @@ -273,7 +274,7 @@ descr=TargetToken(jitcell_token)) end_label = ResOperation(rop.LABEL, jumpargs, descr=jitcell_token) call_pure_results = metainterp.call_pure_results - preamble_data = LoopCompileData(label, end_label, ops, + preamble_data = LoopCompileData(label, end_label, history.trace, call_pure_results=call_pure_results, enable_opts=enable_opts) try: @@ -291,7 +292,7 @@ start_descr = TargetToken(jitcell_token, original_jitcell_token=jitcell_token) jitcell_token.target_tokens = [start_descr] - loop_data = UnrolledLoopData(end_label, jump_op, ops, start_state, + loop_data = UnrolledLoopData(end_label, jump_op, history.trace, start_state, call_pure_results=call_pure_results, enable_opts=enable_opts) try: diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -654,6 +654,9 @@ self.trace = Trace(inpargs) self.inputargs = inpargs + def length(self): + return self.trace._count + def any_operation(self): return self.trace._count > 0 diff --git a/rpython/jit/metainterp/opencoder.py b/rpython/jit/metainterp/opencoder.py --- a/rpython/jit/metainterp/opencoder.py +++ b/rpython/jit/metainterp/opencoder.py @@ -37,10 +37,13 @@ return res def next(self): - r = self.main_iter._get(self._next()) + r = self.main_iter._untag(self._next()) assert r return r + def read_boxes(self, size): + return [self.next() for i in range(size)] + def get_size_jitcode_pc(self): if self.save_pos >= 0: self.pos = self.save_pos @@ -132,6 +135,9 @@ self._count = 0 self.inputargs = inputargs + def length(self): + return len(self._ops) + def _encode(self, box): if isinstance(box, Const): if (isinstance(box, ConstInt) and @@ -203,7 +209,7 @@ self._ops.append(jitcode.index) self._ops.append(pc) for box in active_boxes: - self._ops.append(box.position) # not tagged, as it must be boxes + self._ops.append(self._encode(box)) # not tagged, as it must be boxes return pos def get_patchable_position(self): diff --git a/rpython/jit/metainterp/optimizeopt/unroll.py b/rpython/jit/metainterp/optimizeopt/unroll.py --- a/rpython/jit/metainterp/optimizeopt/unroll.py +++ b/rpython/jit/metainterp/optimizeopt/unroll.py @@ -121,12 +121,11 @@ if check_newops: assert not self.optimizer._newoperations - def optimize_preamble(self, start_label, end_label, ops, call_pure_results, + def optimize_preamble(self, start_label, end_label, trace, call_pure_results, memo): - self._check_no_forwarding([[start_label, end_label], ops]) - info, newops = self.optimizer.propagate_all_forward( - start_label.getarglist()[:], ops, call_pure_results, True, - flush=False) + #self._check_no_forwarding([[start_label, end_label], ops]) + info, newops = self.optimizer.propagate_all_forward(trace, + call_pure_results, flush=False) exported_state = self.export_state(start_label, end_label.getarglist(), info.inputargs, memo) exported_state.quasi_immutable_deps = info.quasi_immutable_deps diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -2259,7 +2259,7 @@ def blackhole_if_trace_too_long(self): warmrunnerstate = self.jitdriver_sd.warmstate - if len(self.history.operations) > warmrunnerstate.trace_limit: + if self.history.length() > warmrunnerstate.trace_limit: jd_sd, greenkey_of_huge_function = self.find_biggest_function() self.staticdata.stats.record_aborted(greenkey_of_huge_function) self.portal_trace_positions = None diff --git a/rpython/jit/metainterp/resume.py b/rpython/jit/metainterp/resume.py --- a/rpython/jit/metainterp/resume.py +++ b/rpython/jit/metainterp/resume.py @@ -129,6 +129,7 @@ def capture_resumedata(framestack, virtualizable_boxes, virtualref_boxes, t): n = len(framestack) - 1 + result = t.length() if virtualizable_boxes is not None: virtualizable_boxes = ([virtualizable_boxes[-1]] + virtualizable_boxes[:-1]) @@ -147,6 +148,7 @@ snapshot_storage.rd_frame_info_list = None snapshot_storage.rd_snapshot = TopSnapshot(None, virtualref_boxes, virtualizable_boxes) + return result PENDINGFIELDSTRUCT = lltype.Struct('PendingField', ('lldescr', OBJECTPTR), @@ -263,6 +265,8 @@ """ n = state.n v = state.v + import pdb + pdb.set_trace() liveboxes = state.liveboxes for i in range(length): box = iter.next() diff --git a/rpython/jit/metainterp/test/test_opencoder.py b/rpython/jit/metainterp/test/test_opencoder.py --- a/rpython/jit/metainterp/test/test_opencoder.py +++ b/rpython/jit/metainterp/test/test_opencoder.py @@ -24,6 +24,20 @@ def get_list_of_active_boxes(self, flag): return self.boxes +def unpack_snapshot(t, pos): + trace = t.trace + first = trace._ops[pos] # this is the size + pos += 1 + boxes = [] + while first > pos + 1: + snapshot_size = trace._ops[pos] + # 2 for jitcode and pc + pos += 1 + 2 + boxes += [t._untag(trace._ops[i + pos]) for i in range(snapshot_size)] + pos += len(boxes) + return boxes + + class TestOpencoder(object): def unpack(self, t): iter = t.get_iter() @@ -46,19 +60,6 @@ assert l[0].getarg(0) is i0 assert l[0].getarg(1) is i1 - def unpack_snapshot(self, t, pos): - trace = t.trace - first = trace._ops[pos] # this is the size - pos += 1 - boxes = [] - while first > pos + 1: - snapshot_size = trace._ops[pos] - # 2 for jitcode and pc - pos += 1 + 2 - boxes += [t._get(trace._ops[i + pos]) for i in range(snapshot_size)] - pos += len(boxes) - return boxes - def test_rd_snapshot(self): i0, i1 = InputArgInt(), InputArgInt() t = Trace([i0, i1]) @@ -71,16 +72,16 @@ resume.capture_resumedata(framestack, None, [], t) (i0, i1), l, iter = self.unpack(t) assert l[1].opnum == rop.GUARD_FALSE - boxes = self.unpack_snapshot(iter, l[1].rd_resume_position) + boxes = unpack_snapshot(iter, l[1].rd_resume_position) assert boxes == [i0, i1] t.record_op(rop.GUARD_FALSE, [add]) resume.capture_resumedata([frame0, frame1], None, [], t) (i0, i1), l, iter = self.unpack(t) assert l[1].opnum == rop.GUARD_FALSE - boxes = self.unpack_snapshot(iter, l[1].rd_resume_position) + boxes = unpack_snapshot(iter, l[1].rd_resume_position) assert boxes == [i0, i1] assert l[2].opnum == rop.GUARD_FALSE - boxes = self.unpack_snapshot(iter, l[2].rd_resume_position) + boxes = unpack_snapshot(iter, l[2].rd_resume_position) assert boxes == [i0, i1, i0, i0, l[0]] def test_read_snapshot_interface(self): diff --git a/rpython/jit/metainterp/test/test_resume.py b/rpython/jit/metainterp/test/test_resume.py --- a/rpython/jit/metainterp/test/test_resume.py +++ b/rpython/jit/metainterp/test/test_resume.py @@ -13,6 +13,7 @@ annlowlevel, PENDINGFIELDSP, unpack_uint, TAG_CONST_OFFSET, TopSnapshot from rpython.jit.metainterp.resumecode import unpack_numbering,\ create_numbering, NULL_NUMBER +from rpython.jit.metainterp.opencoder import Trace from rpython.jit.metainterp.optimizeopt import info from rpython.jit.metainterp.history import ConstInt, Const, AbstractDescr @@ -145,7 +146,7 @@ class FakeVirtualValue(info.AbstractVirtualPtrInfo): def visitor_dispatch_virtual_type(self, *args): return FakeVInfo() - modifier = ResumeDataVirtualAdder(None, None, None, None) + modifier = ResumeDataVirtualAdder(None, None, None, None, None) v1 = FakeVirtualValue() vinfo1 = modifier.make_virtual_info(v1, [1, 2, 4]) vinfo2 = modifier.make_virtual_info(v1, [1, 2, 4]) @@ -502,8 +503,7 @@ class FakeFrame(object): - parent_resumedata_snapshot = None - parent_resumedata_frame_info_list = None + parent_resume_position = -1 def __init__(self, code, pc, *boxes): self.jitcode = code @@ -539,49 +539,35 @@ self.name = name self.index = index -def test_FrameInfo_create(): - jitcode = FakeJitCode("jitcode", 13) - fi = FrameInfo(None, jitcode, 1) - assert fi.prev is None - jitcode_pos, pc = unpack_uint(fi.packed_jitcode_pc) - assert jitcode_pos == 13 - assert pc == 1 - - jitcode1 = FakeJitCode("JITCODE1", 42) - fi1 = FrameInfo(fi, jitcode1, 3) - assert fi1.prev is fi - jitcode_pos, pc = unpack_uint(fi1.packed_jitcode_pc) - assert jitcode_pos == 42 - assert pc == 3 - def test_capture_resumedata(): b1, b2, b3 = [InputArgInt(), InputArgRef(), InputArgInt()] c1, c2, c3 = [ConstInt(1), ConstInt(2), ConstInt(3)] fs = [FakeFrame(FakeJitCode("code0", 13), 0, b1, c1, b2)] - storage = Storage() - capture_resumedata(fs, None, [], storage) + t = Trace([b1, b2, b3]) + pos = capture_resumedata(fs, None, [], t) - assert fs[0].parent_resumedata_snapshot is None - assert fs[0].parent_resumedata_frame_info_list is None + assert fs[0].parent_resume_position == -1 + s = t.get_iter().get_snapshot_iter(pos) - assert storage.rd_frame_info_list.prev is None - assert unpack_uint(storage.rd_frame_info_list.packed_jitcode_pc)[0] == 13 - assert storage.rd_snapshot.boxes == [] # for virtualrefs - snapshot = storage.rd_snapshot.prev - assert snapshot.prev is None - assert snapshot.boxes == fs[0]._env + size, jitcode, pc = s.get_size_jitcode_pc() + assert jitcode == 13 + boxes = s.read_boxes(size) + assert boxes == fs[0]._env storage = Storage() fs = [FakeFrame(FakeJitCode("code0", 0), 0, b1, c1, b2), FakeFrame(FakeJitCode("code1", 1), 3, b3, c2, b1), FakeFrame(FakeJitCode("code2", 2), 9, c3, b2)] - capture_resumedata(fs, None, [], storage) + t = Trace([b1, b2, b3]) + pos = capture_resumedata(fs, None, [], t) - frame_info_list = storage.rd_frame_info_list - assert frame_info_list.prev is fs[2].parent_resumedata_frame_info_list - assert unpack_uint(frame_info_list.packed_jitcode_pc) == (2, 9) + assert fs[2].parent_resume_position != -1 + s = t.get_iter().get_snapshot_iter(pos) + size, jitcode, pc = s.get_size_jitcode_pc() + assert (jitcode, pc) == (2, 9) + xxx assert storage.rd_snapshot.boxes == [] # for virtualrefs snapshot = storage.rd_snapshot.prev assert snapshot.prev is fs[2].parent_resumedata_snapshot From pypy.commits at gmail.com Mon Mar 7 03:42:42 2016 From: pypy.commits at gmail.com (cfbolz) Date: Mon, 07 Mar 2016 00:42:42 -0800 (PST) Subject: [pypy-commit] pypy release-5.x: use upper case more Message-ID: <56dd3f02.c52f1c0a.aa093.ffffa11d@mx.google.com> Author: Carl Friedrich Bolz Branch: release-5.x Changeset: r82838:d7fe9ed4eb8f Date: 2016-03-07 09:41 +0100 http://bitbucket.org/pypy/pypy/changeset/d7fe9ed4eb8f/ Log: use upper case more diff --git a/pypy/doc/release-5.0.0.rst b/pypy/doc/release-5.0.0.rst --- a/pypy/doc/release-5.0.0.rst +++ b/pypy/doc/release-5.0.0.rst @@ -36,7 +36,7 @@ While not applicable only to PyPy, `cffi`_ is arguably our most significant contribution to the python ecosystem. PyPy 5.0.0 ships with -`cffi-1.5.2`_ which now allows embedding PyPy (or cpython) in a c program. +`cffi-1.5.2`_ which now allows embedding PyPy (or cpython) in a C program. .. _`PyPy`: http://doc.pypy.org .. _`RPython`: https://rpython.readthedocs.org @@ -52,18 +52,18 @@ ============= PyPy is a very compliant Python interpreter, almost a drop-in replacement for -CPython 2.7. It's fast (`pypy and cpython 2.7.x`_ performance comparison) +CPython 2.7. It's fast (`PyPy and CPython 2.7.x`_ performance comparison) due to its integrated tracing JIT compiler. We also welcome developers of other `dynamic languages`_ to see what RPython can do for them. This release supports **x86** machines on most common operating systems -(Linux 32/64, Mac OS X 64, Windows 32, OpenBSD, freebsd), +(Linux 32/64, Mac OS X 64, Windows 32, OpenBSD, FreeBSD), newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux, and the -big- and little-endian variants of **ppc64** running Linux. +big- and little-endian variants of **PPC64** running Linux. -.. _`pypy and cpython 2.7.x`: http://speed.pypy.org +.. _`PyPy and CPython 2.7.x`: http://speed.pypy.org .. _`dynamic languages`: http://pypyjs.org Other Highlights (since 4.0.1 released in November 2015) @@ -103,7 +103,7 @@ * More completely support datetime, optimize timedelta creation - * Fix for issue 2185 which caused an inconsistent list of operations to be + * Fix for issue #2185 which caused an inconsistent list of operations to be generated by the unroller, appeared in a complicated DJango app * Fix an elusive issue with stacklets on shadowstack which showed up when From pypy.commits at gmail.com Mon Mar 7 03:53:38 2016 From: pypy.commits at gmail.com (fijal) Date: Mon, 07 Mar 2016 00:53:38 -0800 (PST) Subject: [pypy-commit] pypy release-5.x: expand Message-ID: <56dd4192.a3f6c20a.82b86.54b3@mx.google.com> Author: fijal Branch: release-5.x Changeset: r82839:9d1c0be464a7 Date: 2016-03-07 10:52 +0200 http://bitbucket.org/pypy/pypy/changeset/9d1c0be464a7/ Log: expand diff --git a/pypy/doc/release-5.0.0.rst b/pypy/doc/release-5.0.0.rst --- a/pypy/doc/release-5.0.0.rst +++ b/pypy/doc/release-5.0.0.rst @@ -12,11 +12,11 @@ We also merged a major upgrade to our C-API layer (cpyext), simplifying the interaction between c-level objects and PyPy interpreter level objects. As a result, lxml with its cython compiled component `passes all tests`_ on PyPy +and the new cpyext is a lot faster than the previous one. -Users who have gotten used to vmprof_ on Linux, and those on other platforms -who have not yet tried its awesomeness, will be happy to hear that vmprof -now just works on MacOS and Windows too, in both PyPy (built-in support) and -CPython (as an installed module). +vmprof_ has been a go-to profiler for PyPy on linux for a few releases +and we're happy to announce that thanks to commercial cooperation, vmprof +now works on Linux, OS X and Windows on both PyPy and CPython. You can download the PyPy 5.0.0 release here: From pypy.commits at gmail.com Mon Mar 7 04:16:24 2016 From: pypy.commits at gmail.com (fijal) Date: Mon, 07 Mar 2016 01:16:24 -0800 (PST) Subject: [pypy-commit] pypy jit-leaner-frontend: fixes Message-ID: <56dd46e8.8ee61c0a.5cd18.ffffd712@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r82840:710fa15b8ea5 Date: 2016-03-07 11:08 +0200 http://bitbucket.org/pypy/pypy/changeset/710fa15b8ea5/ Log: fixes diff --git a/rpython/jit/metainterp/opencoder.py b/rpython/jit/metainterp/opencoder.py --- a/rpython/jit/metainterp/opencoder.py +++ b/rpython/jit/metainterp/opencoder.py @@ -26,7 +26,7 @@ self.save_pos = -1 def length(self): - return self.end - self.start + return self.end - self.start - 1 def done(self): return self.pos >= self.end diff --git a/rpython/jit/metainterp/optimizeopt/test/test_util.py b/rpython/jit/metainterp/optimizeopt/test/test_util.py --- a/rpython/jit/metainterp/optimizeopt/test/test_util.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_util.py @@ -494,10 +494,10 @@ final_descr = history.BasicFinalDescr() class FakeFrame(object): - pc = 0 + pc = 100 class jitcode: - index = 0 + index = 200 def __init__(self, boxes): self.boxes = boxes diff --git a/rpython/jit/metainterp/resume.py b/rpython/jit/metainterp/resume.py --- a/rpython/jit/metainterp/resume.py +++ b/rpython/jit/metainterp/resume.py @@ -265,8 +265,6 @@ """ n = state.n v = state.v - import pdb - pdb.set_trace() liveboxes = state.liveboxes for i in range(length): box = iter.next() @@ -298,11 +296,6 @@ def number(self, optimizer, position, trace): snapshot_iter = trace.get_snapshot_iter(position) state = NumberingState(snapshot_iter.length()) - while not snapshot_iter.done(): - size, jitcode_index, pc = snapshot_iter.get_size_jitcode_pc() - state.append(rffi.cast(rffi.SHORT, jitcode_index)) - state.append(rffi.cast(rffi.SHORT, pc)) - self._number_boxes(snapshot_iter, size, optimizer, state) state.append(rffi.cast(rffi.SHORT, 0)) n = 0 # len(topsnapshot.boxes) @@ -324,6 +317,12 @@ #self._number_boxes(topsnapshot.boxes, optimizer, state) #assert state.position == special_boxes_size + while not snapshot_iter.done(): + size, jitcode_index, pc = snapshot_iter.get_size_jitcode_pc() + state.append(rffi.cast(rffi.SHORT, jitcode_index)) + state.append(rffi.cast(rffi.SHORT, pc)) + self._number_boxes(snapshot_iter, size, optimizer, state) + numb = resumecode.create_numbering(state.current) return numb, state.liveboxes, state.v diff --git a/rpython/jit/metainterp/test/test_opencoder.py b/rpython/jit/metainterp/test/test_opencoder.py --- a/rpython/jit/metainterp/test/test_opencoder.py +++ b/rpython/jit/metainterp/test/test_opencoder.py @@ -76,6 +76,7 @@ assert boxes == [i0, i1] t.record_op(rop.GUARD_FALSE, [add]) resume.capture_resumedata([frame0, frame1], None, [], t) + t.record_op(rop.INT_ADD, [add, add]) (i0, i1), l, iter = self.unpack(t) assert l[1].opnum == rop.GUARD_FALSE boxes = unpack_snapshot(iter, l[1].rd_resume_position) diff --git a/rpython/jit/metainterp/test/test_resume.py b/rpython/jit/metainterp/test/test_resume.py --- a/rpython/jit/metainterp/test/test_resume.py +++ b/rpython/jit/metainterp/test/test_resume.py @@ -1145,6 +1145,7 @@ index = callback_f(index, index) else: assert 0 + size, self.cur_index = resumecode.numb_next_item(self.numb, 0) assert size == 0 size, self.cur_index = resumecode.numb_next_item(self.numb, self.cur_index) From pypy.commits at gmail.com Mon Mar 7 04:16:25 2016 From: pypy.commits at gmail.com (fijal) Date: Mon, 07 Mar 2016 01:16:25 -0800 (PST) Subject: [pypy-commit] pypy jit-leaner-frontend: pass the whole test_optimizebasic Message-ID: <56dd46e9.85b01c0a.7a902.ffffd620@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r82841:0fed4262ac28 Date: 2016-03-07 11:09 +0200 http://bitbucket.org/pypy/pypy/changeset/0fed4262ac28/ Log: pass the whole test_optimizebasic diff --git a/rpython/jit/metainterp/test/test_resume.py b/rpython/jit/metainterp/test/test_resume.py --- a/rpython/jit/metainterp/test/test_resume.py +++ b/rpython/jit/metainterp/test/test_resume.py @@ -184,7 +184,7 @@ def execute_and_record(self, opnum, descr, *argboxes): resvalue = executor.execute(self.cpu, None, opnum, descr, *argboxes) - op = ResOperation(opnum, list(argboxes), descr) + op = ResOperation(opnum, list(argboxes), -1, descr) setvalue(op, resvalue) self.trace.append((opnum, list(argboxes), resvalue, descr)) return op From pypy.commits at gmail.com Mon Mar 7 04:16:27 2016 From: pypy.commits at gmail.com (fijal) Date: Mon, 07 Mar 2016 01:16:27 -0800 (PST) Subject: [pypy-commit] pypy jit-leaner-frontend: get to the point of running into problems with unrolling Message-ID: <56dd46eb.890bc30a.8bff0.5b4f@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r82842:c6a2a4c8c114 Date: 2016-03-07 11:15 +0200 http://bitbucket.org/pypy/pypy/changeset/c6a2a4c8c114/ Log: get to the point of running into problems with unrolling diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -48,7 +48,7 @@ def optimize_loop(self, ops, expected, expected_preamble=None, call_pure_results=None, expected_short=None, jump_values=None): - loop = self.parse(ops, postprocess=self.postprocess) + loop = self.parse(ops) self.set_values(loop.operations, jump_values) if expected != "crash!": expected = self.parse(expected) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_util.py b/rpython/jit/metainterp/optimizeopt/test/test_util.py --- a/rpython/jit/metainterp/optimizeopt/test/test_util.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_util.py @@ -516,7 +516,7 @@ def add_guard_future_condition(self, res): # invent a GUARD_FUTURE_CONDITION to not have to change all tests if res.operations[-1].getopnum() == rop.JUMP: - guard = ResOperation(rop.GUARD_FUTURE_CONDITION, [], None) + guard = ResOperation(rop.GUARD_FUTURE_CONDITION, []) guard.rd_snapshot = resume.TopSnapshot(None, [], []) res.operations.insert(-1, guard) @@ -553,13 +553,19 @@ call_pure_results[list(k)] = v return call_pure_results - def convert_loop_to_packed(self, loop): + def convert_loop_to_packed(self, loop, skip_last=False): from rpython.jit.metainterp.opencoder import Trace trace = Trace(loop.inputargs) - for op in loop.operations: + ops = loop.operations + if skip_last: + ops = ops[:-1] + for op in ops: newop = trace.record_op(op.getopnum(), op.getarglist(), op.getdescr()) if rop.is_guard(op.getopnum()): - frame = FakeFrame(op.getfailargs()) + failargs = [] + if op.getfailargs(): + failargs = op.getfailargs() + frame = FakeFrame(failargs) resume.capture_resumedata([frame], None, [], trace) op.position = newop.position return trace @@ -568,18 +574,18 @@ self.add_guard_future_condition(loop) jump_op = loop.operations[-1] assert jump_op.getopnum() == rop.JUMP - ops = loop.operations[:-1] jump_op.setdescr(JitCellToken()) start_label = ResOperation(rop.LABEL, loop.inputargs, - jump_op.getdescr()) + descr=jump_op.getdescr()) end_label = jump_op.copy_and_change(opnum=rop.LABEL) call_pure_results = self._convert_call_pure_results(call_pure_results) - preamble_data = compile.LoopCompileData(start_label, end_label, ops, + t = self.convert_loop_to_packed(loop, skip_last=True) + preamble_data = compile.LoopCompileData(start_label, end_label, t, call_pure_results) start_state, preamble_ops = self._do_optimize_loop(preamble_data) preamble_data.forget_optimization_info() loop_data = compile.UnrolledLoopData(start_label, jump_op, - ops, start_state, + t, start_state, call_pure_results) loop_info, ops = self._do_optimize_loop(loop_data) preamble = TreeLoop('preamble') diff --git a/rpython/jit/metainterp/optimizeopt/unroll.py b/rpython/jit/metainterp/optimizeopt/unroll.py --- a/rpython/jit/metainterp/optimizeopt/unroll.py +++ b/rpython/jit/metainterp/optimizeopt/unroll.py @@ -115,6 +115,7 @@ return modifier.get_virtual_state(args) def _check_no_forwarding(self, lsts, check_newops=True): + return for lst in lsts: for op in lst: assert op.get_forwarded() is None @@ -134,9 +135,9 @@ self.optimizer._clean_optimization_info(self.optimizer._newoperations) return exported_state, self.optimizer._newoperations - def optimize_peeled_loop(self, start_label, end_jump, ops, state, + def optimize_peeled_loop(self, start_label, end_jump, trace, state, call_pure_results, inline_short_preamble=True): - self._check_no_forwarding([[start_label, end_jump], ops]) + #self._check_no_forwarding([[start_label, end_jump], ops]) try: label_args = self.import_state(start_label, state) except VirtualStatesCantMatch: @@ -145,11 +146,11 @@ self.optimizer.init_inparg_dict_from(label_args) try: info, _ = self.optimizer.propagate_all_forward( - start_label.getarglist()[:], ops, call_pure_results, False, - flush=False) + trace, call_pure_results, flush=False) except SpeculativeError: raise InvalidLoop("Speculative heap access would be ill-typed") - label_op = ResOperation(rop.LABEL, label_args, start_label.getdescr()) + label_op = ResOperation(rop.LABEL, label_args, + descr=start_label.getdescr()) for a in end_jump.getarglist(): self.optimizer.force_box_for_end_of_preamble( self.optimizer.get_box_replacement(a)) From pypy.commits at gmail.com Mon Mar 7 06:01:07 2016 From: pypy.commits at gmail.com (plan_rich) Date: Mon, 07 Mar 2016 03:01:07 -0800 (PST) Subject: [pypy-commit] pypy s390x-enhance-speed: partly enabling the literal pool. it is now simpler and does not allocate 32bit values Message-ID: <56dd5f73.c85b1c0a.db10b.fffff14f@mx.google.com> Author: Richard Plangger Branch: s390x-enhance-speed Changeset: r82843:9c1e430cbed5 Date: 2016-03-07 12:00 +0100 http://bitbucket.org/pypy/pypy/changeset/9c1e430cbed5/ Log: partly enabling the literal pool. it is now simpler and does not allocate 32bit values diff --git a/rpython/jit/backend/zarch/assembler.py b/rpython/jit/backend/zarch/assembler.py --- a/rpython/jit/backend/zarch/assembler.py +++ b/rpython/jit/backend/zarch/assembler.py @@ -50,7 +50,7 @@ self.gcrootmap_retaddr_forced = 0 self.failure_recovery_code = [0, 0, 0, 0] self.wb_slowpath = [0,0,0,0,0] - # self.pool = None + self.pool = None def setup(self, looptoken): BaseAssembler.setup(self, looptoken) @@ -58,7 +58,7 @@ if we_are_translated(): self.debug = False self.current_clt = looptoken.compiled_loop_token - # POOL self.pool = LiteralPool() + self.pool = LiteralPool() self.mc = InstrBuilder(None) self.pending_guard_tokens = [] self.pending_guard_tokens_recovered = 0 @@ -76,7 +76,7 @@ self.current_clt = None self._regalloc = None self.mc = None - # self.pool = None + self.pool = None def target_arglocs(self, looptoken): @@ -636,7 +636,7 @@ # operations = regalloc.prepare_loop(inputargs, operations, looptoken, clt.allgcrefs) - # POOL self.pool.pre_assemble(self, operations) + self.pool.pre_assemble(self, operations) entrypos = self.mc.get_relative_pos() self._call_header_with_stack_check() looppos = self.mc.get_relative_pos() @@ -645,7 +645,7 @@ self.update_frame_depth(frame_depth_no_fixed_size + JITFRAME_FIXED_SIZE) # size_excluding_failure_stuff = self.mc.get_relative_pos() - # POOL self.pool.post_assemble(self) + #self.pool.post_assemble(self) self.write_pending_failure_recoveries() full_size = self.mc.get_relative_pos() # @@ -704,13 +704,13 @@ operations, self.current_clt.allgcrefs, self.current_clt.frame_info) - # POOL self.pool.pre_assemble(self, operations, bridge=True) + self.pool.pre_assemble(self, operations, bridge=True) startpos = self.mc.get_relative_pos() - # POOL self.mc.LARL(r.POOL, l.halfword(self.pool.pool_start - startpos)) + self.mc.LARL(r.POOL, l.halfword(self.pool.pool_start - startpos)) self._check_frame_depth(self.mc, regalloc.get_gcmap()) frame_depth_no_fixed_size = self._assemble(regalloc, inputargs, operations) codeendpos = self.mc.get_relative_pos() - # POOL self.pool.post_assemble(self) + #self.pool.post_assemble(self) self.write_pending_failure_recoveries() fullsize = self.mc.get_relative_pos() # @@ -735,7 +735,6 @@ # 'faildescr.adr_jump_offset' is the address of an instruction that is a # conditional jump. We must patch this conditional jump to go # to 'adr_new_target'. - # Updates the pool address mc = InstrBuilder() mc.b_abs(adr_new_target) mc.copy_to_raw_memory(faildescr.adr_jump_offset) @@ -922,14 +921,17 @@ return assert 0, "not supported location" elif prev_loc.is_in_pool(): + if loc.is_core_reg(): + self.mc.LG(loc, prev_loc) + return # move immediate value to fp register if loc.is_fp_reg(): - self.mc.LD(loc, prev_loc) + self.mc.LDY(loc, prev_loc) return # move immediate value to memory elif loc.is_stack(): offset = loc.value - self.mc.LD(r.FP_SCRATCH, prev_loc) + self.mc.LDY(r.FP_SCRATCH, prev_loc) self.mc.STDY(r.FP_SCRATCH, l.addr(offset, r.SPP)) return assert 0, "not supported location" @@ -1019,7 +1021,7 @@ # Build a new stackframe of size STD_FRAME_SIZE_IN_BYTES fpoff = JIT_ENTER_EXTRA_STACK_SPACE self.mc.STMG(r.r6, r.r15, l.addr(-fpoff+6*WORD, r.SP)) - # POOL self.mc.LARL(r.POOL, l.halfword(self.pool.pool_start - self.mc.get_relative_pos())) + self.mc.LARL(r.POOL, l.halfword(self.pool.pool_start - self.mc.get_relative_pos())) # f8 through f15 are saved registers (= non volatile) # TODO it would be good to detect if any float is used in the loop # and to skip this push/pop whenever no float operation occurs @@ -1180,11 +1182,9 @@ # ASSEMBLER EMISSION def emit_label(self, op, arglocs, regalloc): - pass - # POOL - #offset = self.pool.pool_start - self.mc.get_relative_pos() + offset = self.pool.pool_start - self.mc.get_relative_pos() # load the pool address at each label - #self.mc.LARL(r.POOL, l.halfword(offset)) + self.mc.LARL(r.POOL, l.halfword(offset)) def emit_jump(self, op, arglocs, regalloc): # The backend's logic assumes that the target code is in a piece of @@ -1201,7 +1201,7 @@ if descr in self.target_tokens_currently_compiling: # a label has a LARL instruction that does not need # to be executed, thus remove the first opcode - self.mc.b_offset(descr._ll_loop_code) # POOL + self.mc.LARL_byte_count) + self.mc.b_offset(descr._ll_loop_code + self.mc.LARL_byte_count) else: # POOL #offset = self.pool.get_descr_offset(descr) + \ diff --git a/rpython/jit/backend/zarch/codebuilder.py b/rpython/jit/backend/zarch/codebuilder.py --- a/rpython/jit/backend/zarch/codebuilder.py +++ b/rpython/jit/backend/zarch/codebuilder.py @@ -35,7 +35,6 @@ GuardToken.__init__(self, cpu, gcmap, descr, failargs, faillocs, guard_opnum, frame_depth) self.fcond = fcond - # POOL self._pool_offset = -1 class AbstractZARCHBuilder(object): diff --git a/rpython/jit/backend/zarch/helper/assembler.py b/rpython/jit/backend/zarch/helper/assembler.py --- a/rpython/jit/backend/zarch/helper/assembler.py +++ b/rpython/jit/backend/zarch/helper/assembler.py @@ -12,8 +12,7 @@ l1 = arglocs[1] assert not l0.is_imm() # do the comparison - # POOL self.mc.cmp_op(l0, l1, pool=l1.is_in_pool(), imm=l1.is_imm(), signed=signed, fp=fp) - self.mc.cmp_op(l0, l1, imm=l1.is_imm(), signed=signed, fp=fp) + self.mc.cmp_op(l0, l1, pool=l1.is_in_pool(), imm=l1.is_imm(), signed=signed, fp=fp) self.flush_cc(condition, arglocs[2]) @@ -30,31 +29,21 @@ f.name = 'emit_shift_' + func return f -def gen_emit_rr(rr_func): +def gen_emit_rr_rp(rr_func, rp_func): def f(self, op, arglocs, regalloc): l0, l1 = arglocs - getattr(self.mc, rr_func)(l0, l1) + if l1.is_in_pool(): + getattr(self.mc, rp_func)(l0, l1) + else: + getattr(self.mc, rr_func)(l0, l1) return f -# POOL -#def gen_emit_rr_or_rpool(rr_func, rp_func): -# """ the parameters can either be both in registers or -# the first is in the register, second in literal pool. -# """ -# def f(self, op, arglocs, regalloc): -# l0, l1 = arglocs -# if l1.is_imm() and not l1.is_in_pool(): -# assert 0, "logical imm must reside in pool!" -# if l1.is_in_pool(): -# getattr(self.mc, rp_func)(l0, l1) -# else: -# getattr(self.mc, rr_func)(l0, l1) -# return f - -def gen_emit_rr_rh_ri(rr_func, rh_func, ri_func): +def gen_emit_rr_rh_ri_rp(rr_func, rh_func, ri_func, rp_func): def emit(self, op, arglocs, regalloc): l0, l1 = arglocs - if l1.is_imm(): + if l1.is_in_pool(): + getattr(self.mc, rp_func)(l0, l1) + elif l1.is_imm(): if check_imm_value(l1.value): getattr(self.mc, rh_func)(l0, l1) else: @@ -63,27 +52,18 @@ getattr(self.mc, rr_func)(l0, l1) return emit -# POOL -#def gen_emit_imm_pool_rr(imm_func, pool_func, rr_func): -# def emit(self, op, arglocs, regalloc): -# l0, l1 = arglocs -# if l1.is_in_pool(): -# getattr(self.mc, pool_func)(l0, l1) -# elif l1.is_imm(): -# getattr(self.mc, imm_func)(l0, l1) -# else: -# getattr(self.mc, rr_func)(l0, l1) -# return emit - -def gen_emit_div_mod(rr_func): +def gen_emit_div_mod(rr_func, rp_func): def emit(self, op, arglocs, regalloc): lr, lq, l1 = arglocs # lr == remainer, lq == quotient # when entering the function lr contains the dividend # after this operation either lr or lq is used further assert not l1.is_imm(), "imm divider not supported" - # remainer is always a even register r0, r2, ... , r14 + # remainer is always an even register r0, r2, ... , r14 assert lr.is_even() assert lq.is_odd() self.mc.XGR(lr, lr) - getattr(self.mc,rr_func)(lr, l1) + if l1.is_in_pool(): + getattr(self.mc,rp_func)(lr, l1) + else: + getattr(self.mc,rr_func)(lr, l1) return emit diff --git a/rpython/jit/backend/zarch/helper/regalloc.py b/rpython/jit/backend/zarch/helper/regalloc.py --- a/rpython/jit/backend/zarch/helper/regalloc.py +++ b/rpython/jit/backend/zarch/helper/regalloc.py @@ -26,8 +26,7 @@ if check_imm32(a1): l1 = imm(a1.getint()) else: - # POOL l1 = self.ensure_reg_or_pool(a1) - l1 = self.ensure_reg(a1) + l1 = self.ensure_reg_or_pool(a1) l0 = self.force_result_in_reg(op, a0) return [l0, l1] @@ -39,7 +38,7 @@ if check_imm32(a1): l1 = imm(a1.getint()) else: - l1 = self.ensure_reg(a1) + l1 = self.ensure_reg_or_pool(a1) l0 = self.force_result_in_reg(op, a0) return [l0, l1] @@ -51,7 +50,7 @@ if check_imm32(a1): l1 = imm(a1.getint()) else: - l1 = self.ensure_reg(a1) + l1 = self.ensure_reg_or_pool(a1) lr,lq = self.rm.ensure_even_odd_pair(a0, op, bind_first=False) return [lr, lq, l1] @@ -61,7 +60,7 @@ a1 = op.getarg(1) l1 = self.ensure_reg(a1) if isinstance(a0, Const): - loc = self.ensure_reg(a0) + loc = self.ensure_reg_or_pool(a0) lr,lq = self.rm.ensure_even_odd_pair(a0, op, bind_first=modulus, must_exist=False, move_regs=False) @@ -78,7 +77,6 @@ a0 = op.getarg(0) a1 = op.getarg(1) # sub is not commotative, thus cannot swap operands - # POOL l1 = self.ensure_reg_or_pool(a1) l0 = self.ensure_reg(a0) l1 = self.ensure_reg(a1) res = self.force_allocate_reg(op) diff --git a/rpython/jit/backend/zarch/opassembler.py b/rpython/jit/backend/zarch/opassembler.py --- a/rpython/jit/backend/zarch/opassembler.py +++ b/rpython/jit/backend/zarch/opassembler.py @@ -3,7 +3,7 @@ STD_FRAME_SIZE_IN_BYTES) from rpython.jit.backend.zarch.arch import THREADLOCAL_ADDR_OFFSET from rpython.jit.backend.zarch.helper.assembler import (gen_emit_cmp_op, - gen_emit_rr, gen_emit_shift, gen_emit_rr_rh_ri, gen_emit_div_mod) + gen_emit_rr_rp, gen_emit_shift, gen_emit_rr_rh_ri_rp, gen_emit_div_mod) from rpython.jit.backend.zarch.helper.regalloc import (check_imm, check_imm_value) from rpython.jit.metainterp.history import (ConstInt) @@ -28,7 +28,7 @@ class IntOpAssembler(object): _mixin_ = True - emit_int_add = gen_emit_rr_rh_ri('AGR', 'AGHI', 'AGFI') + emit_int_add = gen_emit_rr_rh_ri_rp('AGR', 'AGHI', 'AGFI', 'AG') emit_int_add_ovf = emit_int_add emit_nursery_ptr_increment = emit_int_add @@ -36,25 +36,16 @@ def emit_int_sub(self, op, arglocs, regalloc): res, l0, l1 = arglocs self.mc.SGRK(res, l0, l1) - # POOL - #if l1.is_imm() and not l1.is_in_pool(): - # assert 0, "logical imm must reside in pool!" - #if l1.is_in_pool(): - # self.mc.SG(l0, l1) - #else: - # self.mc.SGR(l0, l1) emit_int_sub_ovf = emit_int_sub - emit_int_mul = gen_emit_rr_rh_ri('MSGR', 'MGHI', 'MSGFI') + emit_int_mul = gen_emit_rr_rh_ri_rp('MSGR', 'MGHI', 'MSGFI', 'MSG') def emit_int_mul_ovf(self, op, arglocs, regalloc): lr, lq, l1 = arglocs - # POOL - # if l1.is_in_pool(): - # self.mc.LG(r.SCRATCH, l1) - # l1 = r.SCRATCH - # elif - if l1.is_imm(): + if l1.is_in_pool(): + self.mc.LG(r.SCRATCH, l1) + l1 = r.SCRATCH + elif l1.is_imm(): self.mc.LGFI(r.SCRATCH, l1) l1 = r.SCRATCH else: @@ -169,11 +160,11 @@ omc.BRC(c.ANY, l.imm(label_end - jmp_neither_lqlr_overflow)) omc.overwrite() - emit_int_floordiv = gen_emit_div_mod('DSGR') - emit_uint_floordiv = gen_emit_div_mod('DLGR') + emit_int_floordiv = gen_emit_div_mod('DSGR', 'DSG') + emit_uint_floordiv = gen_emit_div_mod('DLGR', 'DLG') # NOTE division sets one register with the modulo value, thus # the regalloc ensures the right register survives. - emit_int_mod = gen_emit_div_mod('DSGR') + emit_int_mod = gen_emit_div_mod('DSGR', 'DSG') def emit_int_invert(self, op, arglocs, regalloc): l0, = arglocs @@ -213,9 +204,9 @@ self.mc.CGHI(l0, l.imm(0)) self.flush_cc(c.NE, res) - emit_int_and = gen_emit_rr("NGR") - emit_int_or = gen_emit_rr("OGR") - emit_int_xor = gen_emit_rr("XGR") + emit_int_and = gen_emit_rr_rp("NGR", "NG") + emit_int_or = gen_emit_rr_rp("OGR", "OG") + emit_int_xor = gen_emit_rr_rp("XGR", "XG") emit_int_rshift = gen_emit_shift("SRAG") emit_int_lshift = gen_emit_shift("SLLG") @@ -242,10 +233,10 @@ class FloatOpAssembler(object): _mixin_ = True - emit_float_add = gen_emit_rr('ADBR') - emit_float_sub = gen_emit_rr('SDBR') - emit_float_mul = gen_emit_rr('MDBR') - emit_float_truediv = gen_emit_rr('DDBR') + emit_float_add = gen_emit_rr_rp('ADBR', 'ADB') + emit_float_sub = gen_emit_rr_rp('SDBR', 'SDB') + emit_float_mul = gen_emit_rr_rp('MDBR', 'MDB') + emit_float_truediv = gen_emit_rr_rp('DDBR', 'DDB') # Support for NaNs: S390X sets condition code to 0x3 (unordered) # whenever any operand is nan. @@ -1072,7 +1063,7 @@ self._store_force_index(self._find_nearby_operation(regalloc, +1)) # 'result_loc' is either r2, f0 or None self.call_assembler(op, argloc, vloc, result_loc, r.r2) - # POOL self.mc.LARL(r.POOL, l.halfword(self.pool.pool_start - self.mc.get_relative_pos())) + self.mc.LARL(r.POOL, l.halfword(self.pool.pool_start - self.mc.get_relative_pos())) emit_call_assembler_i = _genop_call_assembler emit_call_assembler_r = _genop_call_assembler diff --git a/rpython/jit/backend/zarch/pool.py b/rpython/jit/backend/zarch/pool.py --- a/rpython/jit/backend/zarch/pool.py +++ b/rpython/jit/backend/zarch/pool.py @@ -19,81 +19,25 @@ self.size = 0 # the offset to index the pool self.pool_start = 0 - self.label_offset = 0 - self.label_count = 0 # for constant offsets self.offset_map = {} # for descriptors self.offset_descr = {} - self.constant_64_zeros = -1 - self.constant_64_ones = -1 - self.constant_64_sign_bit = -1 - self.constant_max_64_positive = -1 + + def reset(self): + self.pool_start = 0 + self.size = 0 + self.offset_map = {} + self.offset_descr = {} def ensure_can_hold_constants(self, asm, op): - opnum = op.getopnum() - if op.is_guard(): - # 1x gcmap pointer - # 1x target address - self.offset_descr[op.getdescr()] = self.size - self.allocate_slot(2*8) - elif op.getopnum() == rop.JUMP: - descr = op.getdescr() - if descr not in asm.target_tokens_currently_compiling: - # this is a 'long' jump instead of a relative jump - self.offset_descr[descr] = self.size - self.allocate_slot(8) - elif op.getopnum() == rop.LABEL: - descr = op.getdescr() - if descr not in asm.target_tokens_currently_compiling: - # this is a 'long' jump instead of a relative jump - self.offset_descr[descr] = self.size - self.allocate_slot(8) - elif op.getopnum() == rop.INT_INVERT: - self.constant_64_ones = 1 # we need constant ones!!! - elif op.getopnum() == rop.INT_MUL_OVF: - self.constant_64_sign_bit = 1 - self.constant_max_64_positive = 1 - elif opnum == rop.INT_RSHIFT or opnum == rop.INT_LSHIFT or \ - opnum == rop.UINT_RSHIFT: - a0 = op.getarg(0) - if a0.is_constant(): - self.reserve_literal(8, a0) + # allocates 8 bytes in memory for pointers, long integers or floats + if op.is_jit_debug(): return - elif opnum == rop.GC_STORE or opnum == rop.GC_STORE_INDEXED: - arg = op.getarg(0) - if arg.is_constant(): - self.reserve_literal(8, arg) - arg = op.getarg(1) - if arg.is_constant(): - self.reserve_literal(8, arg) - arg = op.getarg(2) - if arg.is_constant(): - self.reserve_literal(8, arg) - return - elif opnum in (rop.GC_LOAD_F, - rop.GC_LOAD_I, - rop.GC_LOAD_R,) \ - or opnum in (rop.GC_LOAD_INDEXED_F, - rop.GC_LOAD_INDEXED_R, - rop.GC_LOAD_INDEXED_I,): - arg = op.getarg(0) - if arg.is_constant(): - self.reserve_literal(8, arg) - arg = op.getarg(1) - if arg.is_constant(): - self.reserve_literal(8, arg) - return - elif op.is_call_release_gil(): - for arg in op.getarglist()[1:]: - if arg.is_constant(): - self.reserve_literal(8, arg) - return - elif opnum == rop.COND_CALL_GC_WB_ARRAY: - self.constant_64_ones = 1 # we need constant ones!!! + for arg in op.getarglist(): if arg.is_constant(): - self.reserve_literal(8, arg) + self.reserve_literal(8, arg, asm) def contains_constant(self, unique_val): return unique_val in self.offset_map @@ -101,6 +45,10 @@ def get_descr_offset(self, descr): return self.offset_descr[descr] + def contains_box(self, box): + uvalue = self.unique_value(box) + return self.contains_constant(uvalue) + def get_offset(self, box): assert box.is_constant() uvalue = self.unique_value(box) @@ -108,11 +56,6 @@ assert self.offset_map[uvalue] >= 0 return self.offset_map[uvalue] - def get_direct_offset(self, unique_val): - """ Get the offset directly using a unique value, - use get_offset if you have a Const box """ - return self.offset_map[unique_val] - def unique_value(self, val): if val.type == FLOAT: if val.getfloat() == 0.0: @@ -124,21 +67,14 @@ assert val.type == REF return rffi.cast(lltype.Signed, val.getref_base()) - def reserve_literal(self, size, box): + def reserve_literal(self, size, box, asm): uvalue = self.unique_value(box) - if uvalue not in self.offset_map: - self.offset_map[uvalue] = self.size - self.allocate_slot(size) - - def reset(self): - self.pool_start = 0 - self.label_offset = 0 - self.size = 0 - self.offset_map = {} - self.constant_64_zeros = -1 - self.constant_64_ones = -1 - self.constant_64_sign_bit = -1 - self.constant_max_64_positive = -1 + if box.type == INT and -2**31 <= uvalue <= 2**31-1: + # we do not allocate non 64 bit values, these + # can be loaded as imm by LGHI/LGFI + return + # + self._ensure_value(uvalue, asm) def check_size(self, size=-1): if size == -1: @@ -149,18 +85,19 @@ llop.debug_print(lltype.Void, msg) raise PoolOverflow(msg) + def _ensure_value(self, uvalue, asm): + if uvalue not in self.offset_map: + self.offset_map[uvalue] = self.size + self.allocate_slot(8) + asm.mc.write_i64(uvalue) + return self.offset_map[uvalue] + def allocate_slot(self, size): val = self.size + size self.check_size(val) self.size = val assert val >= 0 - def ensure_value(self, val): - if val not in self.offset_map: - self.offset_map[val] = self.size - self.allocate_slot(8) - return self.offset_map[val] - def pre_assemble(self, asm, operations, bridge=False): # O(len(operations)). I do not think there is a way # around this. @@ -179,27 +116,24 @@ self.pool_start = asm.mc.get_relative_pos() for op in operations: self.ensure_can_hold_constants(asm, op) - self.ensure_value(asm.cpu.pos_exc_value()) + self._ensure_value(asm.cpu.pos_exc_value(), asm) # TODO add more values that are loaded with load_imm - if self.size == 0: - # no pool needed! - return - assert self.size % 2 == 0, "not aligned properly" - if self.constant_64_ones != -1: - self.constant_64_ones = self.ensure_value(-1) - if self.constant_64_zeros != -1: - self.constant_64_zeros = self.ensure_value(0x0) - if self.constant_64_sign_bit != -1: - self.constant_64_sign_bit = self.ensure_value(-2**63) # == 0x8000000000000000 - if self.constant_max_64_positive != -1: - self.constant_max_64_positive = self.ensure_value(0x7fffFFFFffffFFFF) - asm.mc.write('\x00' * self.size) - wrote = 0 - for val, offset in self.offset_map.items(): - self.overwrite_64(asm.mc, offset, val) - wrote += 8 - def overwrite_64(self, mc, index, value): + # XXX def post_assemble(self, asm): + # XXX mc = asm.mc + # XXX pending_guard_tokens = asm.pending_guard_tokens + # XXX if self.size == 0: + # XXX return + # XXX for guard_token in pending_guard_tokens: + # XXX descr = guard_token.faildescr + # XXX offset = self.offset_descr[descr] + # XXX assert isinstance(offset, int) + # XXX assert offset >= 0 + # XXX assert guard_token._pool_offset != -1 + # XXX ptr = rffi.cast(lltype.Signed, guard_token.gcmap) + # XXX self._overwrite_64(mc, offset + RECOVERY_GCMAP_POOL_OFFSET, ptr) + + def _overwrite_64(self, mc, index, value): index += self.pool_start mc.overwrite(index, chr(value >> 56 & 0xff)) @@ -210,17 +144,3 @@ mc.overwrite(index+5, chr(value >> 16 & 0xff)) mc.overwrite(index+6, chr(value >> 8 & 0xff)) mc.overwrite(index+7, chr(value & 0xff)) - - def post_assemble(self, asm): - mc = asm.mc - pending_guard_tokens = asm.pending_guard_tokens - if self.size == 0: - return - for guard_token in pending_guard_tokens: - descr = guard_token.faildescr - offset = self.offset_descr[descr] - assert isinstance(offset, int) - assert offset >= 0 - assert guard_token._pool_offset != -1 - ptr = rffi.cast(lltype.Signed, guard_token.gcmap) - self.overwrite_64(mc, offset + RECOVERY_GCMAP_POOL_OFFSET, ptr) diff --git a/rpython/jit/backend/zarch/regalloc.py b/rpython/jit/backend/zarch/regalloc.py --- a/rpython/jit/backend/zarch/regalloc.py +++ b/rpython/jit/backend/zarch/regalloc.py @@ -62,44 +62,24 @@ assert set(save_around_call_regs).issubset(all_regs) pool = None - def convert_to_adr(self, c): - assert isinstance(c, ConstFloat) - adr = self.assembler.datablockwrapper.malloc_aligned(8, 8) - x = c.getfloatstorage() - rffi.cast(rffi.CArrayPtr(longlong.FLOATSTORAGE), adr)[0] = x - return adr - - def convert_to_imm(self, c): - adr = self.convert_to_adr(c) - return l.ConstFloatLoc(adr) - - # POOL - #def convert_to_imm(self, c): - # off = self.pool.get_offset(c) - # return l.pool(off, float=True) - def __init__(self, longevity, frame_manager=None, assembler=None): RegisterManager.__init__(self, longevity, frame_manager, assembler) def call_result_location(self, v): return r.FPR_RETURN - # POOL - # def place_in_pool(self, var): - # offset = self.assembler.pool.get_offset(var) - # return l.pool(offset, float=True) + def convert_to_imm(self, c): + return l.pool(self.assembler.pool.get_offset(c), float=True) - # POOL - #def ensure_reg_or_pool(self, box): - # if isinstance(box, Const): - # loc = self.get_scratch_reg() - # immvalue = self.convert_to_int(box) - # self.assembler.mc.load_imm(loc, immvalue) - # else: - # assert box in self.temp_boxes - # loc = self.make_sure_var_in_reg(box, - # forbidden_vars=self.temp_boxes) - # return loc + def ensure_reg_or_pool(self, box): + if isinstance(box, Const): + offset = self.assembler.pool.get_offset(box) + return l.pool(offset, float=True) + else: + assert box in self.temp_boxes + loc = self.make_sure_var_in_reg(box, + forbidden_vars=self.temp_boxes) + return loc def get_scratch_reg(self): box = TempVar() @@ -109,21 +89,14 @@ def ensure_reg(self, box): if isinstance(box, Const): - # POOL - #poolloc = self.place_in_pool(box) - #tmp = TempVar() - #reg = self.force_allocate_reg(tmp, self.temp_boxes) - #self.temp_boxes.append(tmp) - #assert poolloc.displace >= 0 - #if poolloc.displace <= 2**12-1: - # self.assembler.mc.LD(reg, poolloc) - #else: - # self.assembler.mc.LDY(reg, poolloc) - loc = self.get_scratch_reg() - immadrvalue = self.convert_to_adr(box) - mc = self.assembler.mc - mc.load_imm(r.SCRATCH, immadrvalue) - mc.LD(loc, l.addr(0, r.SCRATCH)) + offset = self.assembler.pool.get_offset(box) + poolloc = l.pool(offset, float=True) + reg = self.get_scratch_reg() + if poolloc.displace <= 2**11-1: + self.assembler.mc.LD(reg, poolloc) + else: + self.assembler.mc.LDY(reg, poolloc) + return reg else: assert box in self.temp_boxes loc = self.make_sure_var_in_reg(box, @@ -159,32 +132,25 @@ assert isinstance(c, ConstPtr) return rffi.cast(lltype.Signed, c.value) + def ensure_reg_or_pool(self, box): + if isinstance(box, Const): + if self.assembler.pool.contains_box(box): + offset = self.assembler.pool.get_offset(box) + return l.pool(offset) + else: + return self.ensure_reg(box) + else: + assert box in self.temp_boxes + loc = self.make_sure_var_in_reg(box, + forbidden_vars=self.temp_boxes) + return loc + def convert_to_imm(self, c): - val = self.convert_to_int(c) - return l.imm(val) + if self.assembler.pool.contains_box(c): + return l.pool(self.assembler.pool.get_offset(c)) + immvalue = self.convert_to_int(c) + return l.imm(immvalue) - # POOL - #def convert_to_imm(self, c): - # off = self.pool.get_offset(c) - # return l.pool(off) - - #def ensure_reg_or_pool(self, box): - # if isinstance(box, Const): - # offset = self.assembler.pool.get_offset(box) - # return l.pool(offset) - # else: - # assert box in self.temp_boxes - # loc = self.make_sure_var_in_reg(box, - # forbidden_vars=self.temp_boxes) - # return loc - - # POOL - #offset = self.assembler.pool.get_offset(box) - #poolloc = l.pool(offset) - #tmp = TempInt() - #reg = self.force_allocate_reg(tmp, forbidden_vars=self.temp_boxes) - #self.temp_boxes.append(tmp) - #self.assembler.mc.LG(reg, poolloc) def ensure_reg(self, box): if isinstance(box, Const): loc = self.get_scratch_reg() @@ -388,10 +354,10 @@ self.rm = ZARCHRegisterManager(self.longevity, frame_manager = self.fm, assembler = self.assembler) - #self.rm.pool = self.assembler.pool + self.rm.pool = self.assembler.pool self.fprm = FPRegisterManager(self.longevity, frame_manager = self.fm, assembler = self.assembler) - #self.fprm.pool = self.assembler.pool + self.fprm.pool = self.assembler.pool return operations def prepare_loop(self, inputargs, operations, looptoken, allgcrefs): @@ -607,12 +573,11 @@ else: return self.rm.call_result_location(v) - # POOL - #def ensure_reg_or_pool(self, box): - # if box.type == FLOAT: - # return self.fprm.ensure_reg_or_pool(box) - # else: - # return self.rm.ensure_reg_or_pool(box) + def ensure_reg_or_pool(self, box): + if box.type == FLOAT: + return self.fprm.ensure_reg_or_pool(box) + else: + return self.rm.ensure_reg_or_pool(box) def ensure_reg(self, box): if box.type == FLOAT: diff --git a/rpython/jit/backend/zarch/registers.py b/rpython/jit/backend/zarch/registers.py --- a/rpython/jit/backend/zarch/registers.py +++ b/rpython/jit/backend/zarch/registers.py @@ -7,7 +7,7 @@ [r0,r1,r2,r3,r4,r5,r6,r7,r8, r9,r10,r11,r12,r13,r14,r15] = registers -MANAGED_REGS = [r2,r3,r4,r5,r6,r7,r8,r9,r10,r11,r13] # keep this list sorted (asc)! +MANAGED_REGS = [r2,r3,r4,r5,r6,r7,r8,r9,r10,r11] # keep this list sorted (asc)! MANAGED_REG_PAIRS = [(r2,r3), (r4,r5), (r6,r7), (r8,r9), (r10,r11)] VOLATILES = [r2,r3,r4,r5,r6] SP = r15 @@ -39,6 +39,7 @@ for _r in MANAGED_FP_REGS: ALL_REG_INDEXES[_r] = len(ALL_REG_INDEXES) # NOT used, but keeps JITFRAME_FIXED_SIZE even +ALL_REG_INDEXES[f15] = len(ALL_REG_INDEXES) JITFRAME_FIXED_SIZE = len(ALL_REG_INDEXES) def odd_reg(r): diff --git a/rpython/jit/backend/zarch/test/test_pool.py b/rpython/jit/backend/zarch/test/test_pool.py --- a/rpython/jit/backend/zarch/test/test_pool.py +++ b/rpython/jit/backend/zarch/test/test_pool.py @@ -12,13 +12,18 @@ from rpython.jit.backend.detect_cpu import getcpuclass from rpython.jit.tool.oparser import parse +class FakeAsm(object): + def write_i64(self, val): + pass + class TestPoolZARCH(object): def setup_class(self): self.calldescr = None def setup_method(self, name): self.pool = LiteralPool() - self.asm = None + self.asm = FakeAsm() + self.asm.mc = FakeAsm() self.cpu = getcpuclass()(None, None) self.cpu.setup_once() @@ -34,20 +39,20 @@ return False def test_constant_in_call_malloc(self): - c = ConstPtr(rffi.cast(llmemory.GCREF, 0xdeadbeef)) + c = ConstPtr(rffi.cast(llmemory.GCREF, 0xdeadbeef1234)) self.ensure_can_hold(rop.CALL_MALLOC_GC, [c], descr=self.calldescr) assert self.const_in_pool(c) - assert self.const_in_pool(ConstPtr(rffi.cast(llmemory.GCREF, 0xdeadbeef))) + assert self.const_in_pool(ConstPtr(rffi.cast(llmemory.GCREF, 0xdeadbeef1234))) @py.test.mark.parametrize('opnum', [rop.INT_ADD, rop.INT_SUB, rop.INT_MUL]) def test_constants_arith(self, opnum): for c1 in [ConstInt(1), ConstInt(2**44), InputArgInt(1)]: - for c2 in [InputArgInt(1), ConstInt(1), ConstInt(2**55)]: + for c2 in [InputArgInt(1), ConstInt(-2**33), ConstInt(2**55)]: self.ensure_can_hold(opnum, [c1,c2]) - if c1.is_constant(): + if c1.is_constant() and not -2**31 <= c1.getint() <= 2**31-1: assert self.const_in_pool(c1) - if c2.is_constant(): + if c2.is_constant() and not -2**31 <= c1.getint() <= 2**31-1: assert self.const_in_pool(c2) def test_pool_overflow(self): diff --git a/rpython/jit/backend/zarch/test/test_runner.py b/rpython/jit/backend/zarch/test/test_runner.py --- a/rpython/jit/backend/zarch/test/test_runner.py +++ b/rpython/jit/backend/zarch/test/test_runner.py @@ -24,6 +24,6 @@ cpu.setup_once() return cpu - add_loop_instructions = "lg; lgr; agr; cgfi; jge; j;$" - bridge_loop_instructions = "lg; cgfi; jnl; lghi; " \ - "iilf;( iihf;)? iilf;( iihf;)? basr; iilf;( iihf;)? br;$" + add_loop_instructions = "lg; lgr; larl; agr; cgfi; jge; j;$" + bridge_loop_instructions = "larl; lg; cgfi; jnl; lghi; " \ + "(lgfi|iilf);( iihf;)? (lgfi|iilf);( iihf;)? basr; (lgfi|iilf);( iihf;)? br;$" From pypy.commits at gmail.com Mon Mar 7 06:19:00 2016 From: pypy.commits at gmail.com (plan_rich) Date: Mon, 07 Mar 2016 03:19:00 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: updated s390x docu Message-ID: <56dd63a4.88c8c20a.8eb98.ffff9298@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r82844:a144c706eae1 Date: 2016-03-07 12:18 +0100 http://bitbucket.org/pypy/pypy/changeset/a144c706eae1/ Log: updated s390x docu diff --git a/rpython/doc/s390x.rst b/rpython/doc/s390x.rst --- a/rpython/doc/s390x.rst +++ b/rpython/doc/s390x.rst @@ -1,16 +1,20 @@ .. _s390x: -Translation on the IBM Mainframe -================================ +S390X JIT Backend +================= -Redhat Linux (rel65) --------------------- +Our JIT implements the 64 bit version of the IBM Mainframe called s390x. +Note that this architecture is big endian. -Unfortunatley there is no ffi development package (yet?), thus -one needs to install this manually. -libffi is not installed on the rl65. -This can be resolved by installing it locally (./configure && make install) and -adjusting th PKG_CONFIG_PATH to point to the install location. -In addition the LD_LIBRARY_PATH must be set to the install location the libffi.so -can be found. +The following facilities need to be installed to operate +correctly (all of the machines used for development these where installed): +* General-Instructions-Extension +* Long-Displacement +* Binary Floating Point (IEEE) + +Translating +----------- + +Ensure that libffi is installed (version should do > 3.0.+). +CPython should be version 2.7.+. From pypy.commits at gmail.com Mon Mar 7 09:11:19 2016 From: pypy.commits at gmail.com (plan_rich) Date: Mon, 07 Mar 2016 06:11:19 -0800 (PST) Subject: [pypy-commit] pypy s390x-enhance-speed: using load_imm using pool instead of load_imm_plus for gcrootmap (root stack top addr). there is not gain for doing so on s390x. Message-ID: <56dd8c07.88c8c20a.8eb98.ffffda89@mx.google.com> Author: Richard Plangger Branch: s390x-enhance-speed Changeset: r82845:62241a24deb5 Date: 2016-03-07 15:10 +0100 http://bitbucket.org/pypy/pypy/changeset/62241a24deb5/ Log: using load_imm using pool instead of load_imm_plus for gcrootmap (root stack top addr). there is not gain for doing so on s390x. diff --git a/rpython/jit/backend/zarch/assembler.py b/rpython/jit/backend/zarch/assembler.py --- a/rpython/jit/backend/zarch/assembler.py +++ b/rpython/jit/backend/zarch/assembler.py @@ -350,8 +350,8 @@ gcrootmap = self.cpu.gc_ll_descr.gcrootmap if gcrootmap and gcrootmap.is_shadow_stack: - diff = mc.load_imm_plus(r.r5, gcrootmap.get_root_stack_top_addr()) - mc.load(r.r5, r.r5, diff) + diff = mc.load_imm(r.r5, gcrootmap.get_root_stack_top_addr()) + mc.load(r.r5, r.r5, 0) mc.store(r.r2, r.r5, -WORD) self._pop_core_regs_from_jitframe(mc, r.MANAGED_REGS) @@ -978,9 +978,8 @@ if gcrootmap: if gcrootmap.is_shadow_stack: if shadowstack_reg is None: - diff = mc.load_imm_plus(r.SPP, - gcrootmap.get_root_stack_top_addr()) - mc.load(r.SPP, r.SPP, diff) + diff = mc.load_imm(r.SPP, gcrootmap.get_root_stack_top_addr()) + mc.load(r.SPP, r.SPP, 0) shadowstack_reg = r.SPP mc.load(r.SPP, shadowstack_reg, -WORD) wbdescr = self.cpu.gc_ll_descr.write_barrier_descr @@ -1048,18 +1047,18 @@ def _call_header_shadowstack(self, gcrootmap): # we need to put one word into the shadowstack: the jitframe (SPP) # we saved all registers to the stack - RCS1 = r.r2 - RCS2 = r.r3 - RCS3 = r.r4 + RCS1 = r.r3 + RCS2 = r.r4 + RCS3 = r.r5 mc = self.mc - diff = mc.load_imm_plus(RCS1, gcrootmap.get_root_stack_top_addr()) - mc.load(RCS2, RCS1, diff) # ld RCS2, [rootstacktop] + mc.load_imm(RCS1, gcrootmap.get_root_stack_top_addr()) + mc.load(RCS2, RCS1, 0) # ld RCS2, [rootstacktop] # mc.LGR(RCS3, RCS2) mc.AGHI(RCS3, l.imm(WORD)) # add RCS3, RCS2, WORD mc.store(r.SPP, RCS2, 0) # std SPP, RCS2 # - mc.store(RCS3, RCS1, diff) # std RCS3, [rootstacktop] + mc.store(RCS3, RCS1, 0) # std RCS3, [rootstacktop] def _call_footer_shadowstack(self, gcrootmap): # r6 -> r15 can be used freely, they will be restored by @@ -1067,10 +1066,10 @@ RCS1 = r.r9 RCS2 = r.r10 mc = self.mc - diff = mc.load_imm_plus(RCS1, gcrootmap.get_root_stack_top_addr()) - mc.load(RCS2, RCS1, diff) # ld RCS2, [rootstacktop] + mc.load_imm(RCS1, gcrootmap.get_root_stack_top_addr()) + mc.load(RCS2, RCS1, 0) # ld RCS2, [rootstacktop] mc.AGHI(RCS2, l.imm(-WORD)) # sub RCS2, RCS2, WORD - mc.store(RCS2, RCS1, diff) # std RCS2, [rootstacktop] + mc.store(RCS2, RCS1, 0) # std RCS2, [rootstacktop] def _call_footer(self): # the return value is the jitframe diff --git a/rpython/jit/backend/zarch/pool.py b/rpython/jit/backend/zarch/pool.py --- a/rpython/jit/backend/zarch/pool.py +++ b/rpython/jit/backend/zarch/pool.py @@ -117,6 +117,9 @@ for op in operations: self.ensure_can_hold_constants(asm, op) self._ensure_value(asm.cpu.pos_exc_value(), asm) + gcrootmap = asm.cpu.gc_ll_descr.gcrootmap + if gcrootmap and gcrootmap.is_shadow_stack: + self._ensure_value(gcrootmap.get_root_stack_top_addr()) # TODO add more values that are loaded with load_imm # XXX def post_assemble(self, asm): From pypy.commits at gmail.com Mon Mar 7 10:16:17 2016 From: pypy.commits at gmail.com (fijal) Date: Mon, 07 Mar 2016 07:16:17 -0800 (PST) Subject: [pypy-commit] pypy jit-leaner-frontend: start fighting with unrolling, make most of the tests pass in test_optimizeopt by simplifying what CompileData stores Message-ID: <56dd9b41.918e1c0a.7d129.6990@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r82846:19da356e7036 Date: 2016-03-07 17:15 +0200 http://bitbucket.org/pypy/pypy/changeset/19da356e7036/ Log: start fighting with unrolling, make most of the tests pass in test_optimizeopt by simplifying what CompileData stores diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -29,11 +29,8 @@ memo = None def forget_optimization_info(self): - return # no longer necessary? - for arg in self.start_label.getarglist(): + for arg in self.trace.inputargs: arg.set_forwarded(None) - for op in self.operations: - op.set_forwarded(None) class LoopCompileData(CompileData): """ An object that accumulates all of the necessary info for @@ -41,13 +38,8 @@ This is the case of label() ops label() """ - def __init__(self, start_label, end_label, trace, - call_pure_results=None, enable_opts=None): - self.start_label = start_label - self.end_label = end_label + def __init__(self, trace, call_pure_results=None, enable_opts=None): self.enable_opts = enable_opts - assert start_label.getopnum() == rop.LABEL - assert end_label.getopnum() == rop.LABEL self.trace = trace self.call_pure_results = call_pure_results @@ -57,22 +49,19 @@ if unroll: opt = UnrollOptimizer(metainterp_sd, jitdriver_sd, optimizations) - return opt.optimize_preamble(self.start_label, self.end_label, - self.trace, + return opt.optimize_preamble(self.trace, self.call_pure_results, self.box_names_memo) else: opt = Optimizer(metainterp_sd, jitdriver_sd, optimizations) - return opt.propagate_all_forward(self.start_label.getarglist(), - self.trace, self.call_pure_results) + return opt.propagate_all_forward(self.trace, self.call_pure_results) class SimpleCompileData(CompileData): """ This represents label() ops jump with no extra info associated with the label """ - def __init__(self, start_label, trace, call_pure_results=None, + def __init__(self, trace, call_pure_results=None, enable_opts=None): - self.start_label = start_label self.trace = trace self.call_pure_results = call_pure_results self.enable_opts = enable_opts @@ -82,7 +71,8 @@ #assert not unroll opt = Optimizer(metainterp_sd, jitdriver_sd, optimizations) - return opt.propagate_all_forward(self.trace, self.call_pure_results) + return opt.propagate_all_forward(self.trace.get_iter(), + self.call_pure_results) class BridgeCompileData(CompileData): """ This represents ops() with a jump at the end that goes to some @@ -109,12 +99,11 @@ """ This represents label() ops jump with extra info that's from the run of LoopCompileData. Jump goes to the same label """ - def __init__(self, start_label, end_jump, operations, state, + def __init__(self, trace, celltoken, state, call_pure_results=None, enable_opts=None, inline_short_preamble=True): - self.start_label = start_label - self.end_jump = end_jump - self.operations = operations + self.trace = trace + self.celltoken = celltoken self.enable_opts = enable_opts self.state = state self.call_pure_results = call_pure_results @@ -125,9 +114,8 @@ assert unroll # we should not be here if it's disabled opt = UnrollOptimizer(metainterp_sd, jitdriver_sd, optimizations) - return opt.optimize_peeled_loop(self.start_label, self.end_jump, - self.operations, self.state, self.call_pure_results, - self.inline_short_preamble) + return opt.optimize_peeled_loop(self.trace, self.celltoken, self.state, + self.call_pure_results, self.inline_short_preamble) def show_procedures(metainterp_sd, procedure=None, error=None): # debugging diff --git a/rpython/jit/metainterp/opencoder.py b/rpython/jit/metainterp/opencoder.py --- a/rpython/jit/metainterp/opencoder.py +++ b/rpython/jit/metainterp/opencoder.py @@ -61,6 +61,7 @@ self.trace = trace self.inputargs = [rop.inputarg_from_tp(arg.type) for arg in self.trace.inputargs] + self.start = 0 self.pos = 0 self._count = 0 self.end = end diff --git a/rpython/jit/metainterp/optimizeopt/info.py b/rpython/jit/metainterp/optimizeopt/info.py --- a/rpython/jit/metainterp/optimizeopt/info.py +++ b/rpython/jit/metainterp/optimizeopt/info.py @@ -326,17 +326,17 @@ def make_guards(self, op, short, optimizer): if self._known_class is not None: - short.append(ResOperation(rop.GUARD_NONNULL, [op], None)) + short.append(ResOperation(rop.GUARD_NONNULL, [op])) if not optimizer.cpu.remove_gctypeptr: - short.append(ResOperation(rop.GUARD_IS_OBJECT, [op], None)) + short.append(ResOperation(rop.GUARD_IS_OBJECT, [op])) short.append(ResOperation(rop.GUARD_CLASS, - [op, self._known_class], None)) + [op, self._known_class])) elif self.descr is not None: - short.append(ResOperation(rop.GUARD_NONNULL, [op], None)) + short.append(ResOperation(rop.GUARD_NONNULL, [op])) if not optimizer.cpu.remove_gctypeptr: - short.append(ResOperation(rop.GUARD_IS_OBJECT, [op], None)) + short.append(ResOperation(rop.GUARD_IS_OBJECT, [op])) short.append(ResOperation(rop.GUARD_SUBCLASS, [op, - ConstInt(self.descr.get_vtable())], None)) + ConstInt(self.descr.get_vtable())])) else: AbstractStructPtrInfo.make_guards(self, op, short, optimizer) @@ -349,8 +349,8 @@ if self.descr is not None: c_typeid = ConstInt(self.descr.get_type_id()) short.extend([ - ResOperation(rop.GUARD_NONNULL, [op], None), - ResOperation(rop.GUARD_GC_TYPE, [op, c_typeid], None) + ResOperation(rop.GUARD_NONNULL, [op]), + ResOperation(rop.GUARD_GC_TYPE, [op, c_typeid]) ]) @specialize.argtype(1) diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -25,9 +25,9 @@ pass class BasicLoopInfo(LoopInfo): - def __init__(self, inputargs, quasi_immutable_deps): + def __init__(self, inputargs, quasi_immutable_deps, jump_op): self.inputargs = inputargs - self.label_op = ResOperation(rop.LABEL, inputargs, -1) + self.jump_op = jump_op self.quasi_immutable_deps = quasi_immutable_deps self.extra_same_as = [] @@ -506,7 +506,6 @@ return CONST_0 def propagate_all_forward(self, trace, call_pure_results=None, flush=True): - trace = trace.get_iter() self.trace = trace self.call_pure_results = call_pure_results last_op = None @@ -522,11 +521,11 @@ # accumulate counters if flush: self.flush() - if last_op: - self.first_optimization.propagate_forward(last_op) + if last_op: + self.first_optimization.propagate_forward(last_op) self.resumedata_memo.update_counters(self.metainterp_sd.profiler) - return (BasicLoopInfo(trace.inputargs, self.quasi_immutable_deps), + return (BasicLoopInfo(trace.inputargs, self.quasi_immutable_deps, last_op), self._newoperations) def _clean_optimization_info(self, lst): diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -24,15 +24,13 @@ def optimize_loop(self, ops, optops, call_pure_results=None): loop = self.parse(ops) token = JitCellToken() - label_op = ResOperation(rop.LABEL, loop.inputargs, -1, - descr=TargetToken(token)) if loop.operations[-1].getopnum() == rop.JUMP: loop.operations[-1].setdescr(token) exp = parse(optops, namespace=self.namespace.copy()) expected = convert_old_style_to_targets(exp, jump=True) call_pure_results = self._convert_call_pure_results(call_pure_results) trace = self.convert_loop_to_packed(loop) - compile_data = compile.SimpleCompileData(label_op, trace, + compile_data = compile.SimpleCompileData(trace, call_pure_results) info, ops = self._do_optimize_loop(compile_data) label_op = ResOperation(rop.LABEL, info.inputargs, -1) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_util.py b/rpython/jit/metainterp/optimizeopt/test/test_util.py --- a/rpython/jit/metainterp/optimizeopt/test/test_util.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_util.py @@ -574,19 +574,18 @@ self.add_guard_future_condition(loop) jump_op = loop.operations[-1] assert jump_op.getopnum() == rop.JUMP - jump_op.setdescr(JitCellToken()) - start_label = ResOperation(rop.LABEL, loop.inputargs, - descr=jump_op.getdescr()) - end_label = jump_op.copy_and_change(opnum=rop.LABEL) + celltoken = JitCellToken() + jump_op.setdescr(celltoken) + #start_label = ResOperation(rop.LABEL, loop.inputargs, + # descr=jump_op.getdescr()) + #end_label = jump_op.copy_and_change(opnum=rop.LABEL) call_pure_results = self._convert_call_pure_results(call_pure_results) - t = self.convert_loop_to_packed(loop, skip_last=True) - preamble_data = compile.LoopCompileData(start_label, end_label, t, - call_pure_results) + t = self.convert_loop_to_packed(loop) + preamble_data = compile.LoopCompileData(t, call_pure_results) start_state, preamble_ops = self._do_optimize_loop(preamble_data) preamble_data.forget_optimization_info() - loop_data = compile.UnrolledLoopData(start_label, jump_op, - t, start_state, - call_pure_results) + loop_data = compile.UnrolledLoopData(preamble_data.trace, + celltoken, start_state, call_pure_results) loop_info, ops = self._do_optimize_loop(loop_data) preamble = TreeLoop('preamble') preamble.inputargs = start_state.renamed_inputargs diff --git a/rpython/jit/metainterp/optimizeopt/unroll.py b/rpython/jit/metainterp/optimizeopt/unroll.py --- a/rpython/jit/metainterp/optimizeopt/unroll.py +++ b/rpython/jit/metainterp/optimizeopt/unroll.py @@ -115,19 +115,16 @@ return modifier.get_virtual_state(args) def _check_no_forwarding(self, lsts, check_newops=True): - return for lst in lsts: for op in lst: assert op.get_forwarded() is None if check_newops: assert not self.optimizer._newoperations - def optimize_preamble(self, start_label, end_label, trace, call_pure_results, - memo): - #self._check_no_forwarding([[start_label, end_label], ops]) - info, newops = self.optimizer.propagate_all_forward(trace, + def optimize_preamble(self, trace, call_pure_results, memo): + info, newops = self.optimizer.propagate_all_forward(trace.get_iter(), call_pure_results, flush=False) - exported_state = self.export_state(start_label, end_label.getarglist(), + exported_state = self.export_state(info.jump_op.getarglist(), info.inputargs, memo) exported_state.quasi_immutable_deps = info.quasi_immutable_deps # we need to absolutely make sure that we've cleaned up all @@ -135,11 +132,11 @@ self.optimizer._clean_optimization_info(self.optimizer._newoperations) return exported_state, self.optimizer._newoperations - def optimize_peeled_loop(self, start_label, end_jump, trace, state, + def optimize_peeled_loop(self, trace, celltoken, state, call_pure_results, inline_short_preamble=True): - #self._check_no_forwarding([[start_label, end_jump], ops]) + trace = trace.get_iter() try: - label_args = self.import_state(start_label, state) + label_args = self.import_state(trace.inputargs, state) except VirtualStatesCantMatch: raise InvalidLoop("Cannot import state, virtual states don't match") self.potential_extra_ops = {} @@ -149,14 +146,14 @@ trace, call_pure_results, flush=False) except SpeculativeError: raise InvalidLoop("Speculative heap access would be ill-typed") + end_jump = info.jump_op label_op = ResOperation(rop.LABEL, label_args, - descr=start_label.getdescr()) + descr=celltoken) for a in end_jump.getarglist(): self.optimizer.force_box_for_end_of_preamble( self.optimizer.get_box_replacement(a)) current_vs = self.get_virtual_state(end_jump.getarglist()) # pick the vs we want to jump to - celltoken = start_label.getdescr() assert isinstance(celltoken, JitCellToken) target_virtual_state = self.pick_virtual_state(current_vs, @@ -227,11 +224,10 @@ def optimize_bridge(self, start_label, operations, call_pure_results, inline_short_preamble, box_names_memo): - self._check_no_forwarding([start_label.getarglist(), - operations]) + self._check_no_forwarding([start_label.getarglist()]) info, ops = self.optimizer.propagate_all_forward( start_label.getarglist()[:], operations[:-1], - call_pure_results, True) + call_pure_results) jump_op = operations[-1] cell_token = jump_op.getdescr() assert isinstance(cell_token, JitCellToken) @@ -375,8 +371,7 @@ op = sop.copy_and_change(sop.getopnum(), arglist, descr=compile.ResumeAtPositionDescr()) assert isinstance(op, GuardResOp) - op.rd_snapshot = patchguardop.rd_snapshot - op.rd_frame_info_list = patchguardop.rd_frame_info_list + op.rd_resume_position = patchguardop.rd_resume_position else: op = sop.copy_and_change(sop.getopnum(), arglist) mapping[sop] = op @@ -412,8 +407,7 @@ continue self._expand_info(item, infos) - def export_state(self, start_label, original_label_args, renamed_inputargs, - memo): + def export_state(self, original_label_args, renamed_inputargs, memo): end_args = [self.optimizer.force_box_for_end_of_preamble(a) for a in original_label_args] self.optimizer.flush() @@ -435,18 +429,17 @@ if not isinstance(op, Const): self._expand_info(op, infos) self.optimizer._clean_optimization_info(end_args) - self.optimizer._clean_optimization_info(start_label.getarglist()) return ExportedState(label_args, end_args, virtual_state, infos, short_boxes, renamed_inputargs, short_inputargs, memo) - def import_state(self, targetop, exported_state): + def import_state(self, targetargs, exported_state): # the mapping between input args (from old label) and what we need # to actually emit. Update the info assert (len(exported_state.next_iteration_args) == - len(targetop.getarglist())) + len(targetargs)) for i, target in enumerate(exported_state.next_iteration_args): - source = targetop.getarg(i) + source = targetargs[i] assert source is not target source.set_forwarded(target) info = exported_state.exported_infos.get(target, None) @@ -456,7 +449,7 @@ # import the optimizer state, starting from boxes that can be produced # by short preamble label_args = exported_state.virtual_state.make_inputargs( - targetop.getarglist(), self.optimizer) + targetargs, self.optimizer) self.short_preamble_producer = ShortPreambleBuilder( label_args, exported_state.short_boxes, From pypy.commits at gmail.com Mon Mar 7 10:33:59 2016 From: pypy.commits at gmail.com (plan_rich) Date: Mon, 07 Mar 2016 07:33:59 -0800 (PST) Subject: [pypy-commit] pypy s390x-enhance-speed: added two more pointer to the literal pool, exchanged some registers for better pipeline flow Message-ID: <56dd9f67.c16dc20a.d91b7.fffff68f@mx.google.com> Author: Richard Plangger Branch: s390x-enhance-speed Changeset: r82847:cc0fd9f1b25f Date: 2016-03-07 15:38 +0100 http://bitbucket.org/pypy/pypy/changeset/cc0fd9f1b25f/ Log: added two more pointer to the literal pool, exchanged some registers for better pipeline flow diff --git a/rpython/jit/backend/zarch/assembler.py b/rpython/jit/backend/zarch/assembler.py --- a/rpython/jit/backend/zarch/assembler.py +++ b/rpython/jit/backend/zarch/assembler.py @@ -1063,8 +1063,8 @@ def _call_footer_shadowstack(self, gcrootmap): # r6 -> r15 can be used freely, they will be restored by # _call_footer after this call - RCS1 = r.r9 - RCS2 = r.r10 + RCS1 = r.r8 + RCS2 = r.r7 mc = self.mc mc.load_imm(RCS1, gcrootmap.get_root_stack_top_addr()) mc.load(RCS2, RCS1, 0) # ld RCS2, [rootstacktop] @@ -1072,13 +1072,14 @@ mc.store(RCS2, RCS1, 0) # std RCS2, [rootstacktop] def _call_footer(self): - # the return value is the jitframe - self.mc.LGR(r.r2, r.SPP) gcrootmap = self.cpu.gc_ll_descr.gcrootmap if gcrootmap and gcrootmap.is_shadow_stack: self._call_footer_shadowstack(gcrootmap) + # the return value is the jitframe + self.mc.LGR(r.r2, r.SPP) + size = STD_FRAME_SIZE_IN_BYTES # f8 through f15 are saved registers (= non volatile) # TODO it would be good to detect if any float is used in the loop @@ -1248,11 +1249,11 @@ gcmap = self._finish_gcmap else: gcmap = lltype.nullptr(jitframe.GCMAP) - self.load_gcmap(self.mc, r.r2, gcmap) + self.load_gcmap(self.mc, r.r9, gcmap) - self.mc.load_imm(r.r3, fail_descr_loc.getint()) - self.mc.STG(r.r3, l.addr(ofs, r.SPP)) - self.mc.STG(r.r2, l.addr(ofs2, r.SPP)) + self.mc.load_imm(r.r10, fail_descr_loc.getint()) + self.mc.STG(r.r9, l.addr(ofs2, r.SPP)) + self.mc.STG(r.r10, l.addr(ofs, r.SPP)) # exit function self._call_footer() diff --git a/rpython/jit/backend/zarch/pool.py b/rpython/jit/backend/zarch/pool.py --- a/rpython/jit/backend/zarch/pool.py +++ b/rpython/jit/backend/zarch/pool.py @@ -117,9 +117,16 @@ for op in operations: self.ensure_can_hold_constants(asm, op) self._ensure_value(asm.cpu.pos_exc_value(), asm) + # the top of shadow stack gcrootmap = asm.cpu.gc_ll_descr.gcrootmap if gcrootmap and gcrootmap.is_shadow_stack: - self._ensure_value(gcrootmap.get_root_stack_top_addr()) + self._ensure_value(gcrootmap.get_root_stack_top_addr(), asm) + # endaddr of insert stack check + endaddr, lengthaddr, _ = self.cpu.insert_stack_check() + self._ensure_value(endaddr, asm) + # fast gil + fastgil = rgil.gil_fetch_fastgil() + self._ensure_value(fastgil, asm) # TODO add more values that are loaded with load_imm # XXX def post_assemble(self, asm): From pypy.commits at gmail.com Mon Mar 7 10:34:00 2016 From: pypy.commits at gmail.com (plan_rich) Date: Mon, 07 Mar 2016 07:34:00 -0800 (PST) Subject: [pypy-commit] pypy s390x-enhance-speed: missing module import, wrong attr access Message-ID: <56dd9f68.552f1c0a.2831d.ffffaea7@mx.google.com> Author: Richard Plangger Branch: s390x-enhance-speed Changeset: r82848:adcad73cca0f Date: 2016-03-07 16:28 +0100 http://bitbucket.org/pypy/pypy/changeset/adcad73cca0f/ Log: missing module import, wrong attr access diff --git a/rpython/jit/backend/zarch/pool.py b/rpython/jit/backend/zarch/pool.py --- a/rpython/jit/backend/zarch/pool.py +++ b/rpython/jit/backend/zarch/pool.py @@ -1,5 +1,6 @@ from rpython.jit.backend.zarch import registers as r from rpython.jit.backend.zarch import locations as l +from rpython.rlib import rgil from rpython.jit.metainterp.history import (INT, REF, FLOAT, TargetToken) from rpython.rlib.objectmodel import we_are_translated @@ -122,10 +123,10 @@ if gcrootmap and gcrootmap.is_shadow_stack: self._ensure_value(gcrootmap.get_root_stack_top_addr(), asm) # endaddr of insert stack check - endaddr, lengthaddr, _ = self.cpu.insert_stack_check() + endaddr, lengthaddr, _ = asm.cpu.insert_stack_check() self._ensure_value(endaddr, asm) # fast gil - fastgil = rgil.gil_fetch_fastgil() + fastgil = rffi.cast(lltype.Signed, rgil.gil_fetch_fastgil()) self._ensure_value(fastgil, asm) # TODO add more values that are loaded with load_imm From pypy.commits at gmail.com Mon Mar 7 11:05:08 2016 From: pypy.commits at gmail.com (fijal) Date: Mon, 07 Mar 2016 08:05:08 -0800 (PST) Subject: [pypy-commit] pypy jit-leaner-frontend: almost fix test_optimizeopt, two tests left Message-ID: <56dda6b4.030f1c0a.b2bc5.ffff83f5@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r82849:91bcb43bca18 Date: 2016-03-07 18:04 +0200 http://bitbucket.org/pypy/pypy/changeset/91bcb43bca18/ Log: almost fix test_optimizeopt, two tests left diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -38,10 +38,13 @@ This is the case of label() ops label() """ - def __init__(self, trace, call_pure_results=None, enable_opts=None): + def __init__(self, trace, runtime_boxes, call_pure_results=None, + enable_opts=None): self.enable_opts = enable_opts self.trace = trace self.call_pure_results = call_pure_results + assert runtime_boxes is not None + self.runtime_boxes = runtime_boxes def optimize(self, metainterp_sd, jitdriver_sd, optimizations, unroll): from rpython.jit.metainterp.optimizeopt.unroll import (UnrollOptimizer, @@ -50,6 +53,7 @@ if unroll: opt = UnrollOptimizer(metainterp_sd, jitdriver_sd, optimizations) return opt.optimize_preamble(self.trace, + self.runtime_boxes, self.call_pure_results, self.box_names_memo) else: diff --git a/rpython/jit/metainterp/optimizeopt/info.py b/rpython/jit/metainterp/optimizeopt/info.py --- a/rpython/jit/metainterp/optimizeopt/info.py +++ b/rpython/jit/metainterp/optimizeopt/info.py @@ -114,7 +114,7 @@ assert self.get_last_guard(optimizer).is_guard() def make_guards(self, op, short, optimizer): - op = ResOperation(rop.GUARD_NONNULL, [op], None) + op = ResOperation(rop.GUARD_NONNULL, [op]) short.append(op) class AbstractVirtualPtrInfo(NonNullPtrInfo): @@ -592,7 +592,7 @@ def make_guards(self, op, short, optimizer): AbstractVirtualPtrInfo.make_guards(self, op, short, optimizer) c_type_id = ConstInt(self.descr.get_type_id()) - short.append(ResOperation(rop.GUARD_GC_TYPE, [op, c_type_id], None)) + short.append(ResOperation(rop.GUARD_GC_TYPE, [op, c_type_id])) if self.lenbound is not None: lenop = ResOperation(rop.ARRAYLEN_GC, [op], descr=self.descr) short.append(lenop) diff --git a/rpython/jit/metainterp/optimizeopt/shortpreamble.py b/rpython/jit/metainterp/optimizeopt/shortpreamble.py --- a/rpython/jit/metainterp/optimizeopt/shortpreamble.py +++ b/rpython/jit/metainterp/optimizeopt/shortpreamble.py @@ -396,7 +396,7 @@ arg.set_forwarded(None) self.short.append(preamble_op) if preamble_op.is_ovf(): - self.short.append(ResOperation(rop.GUARD_NO_OVERFLOW, [], None)) + self.short.append(ResOperation(rop.GUARD_NO_OVERFLOW, [])) info = preamble_op.get_forwarded() preamble_op.set_forwarded(None) if optimizer is not None: diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -49,7 +49,6 @@ call_pure_results=None, expected_short=None, jump_values=None): loop = self.parse(ops) - self.set_values(loop.operations, jump_values) if expected != "crash!": expected = self.parse(expected) if expected_preamble: @@ -58,7 +57,7 @@ # the short preamble doesn't have fail descrs, they are patched in when it is used expected_short = self.parse(expected_short, want_fail_descr=False) - info = self.unroll_and_optimize(loop, call_pure_results) + info = self.unroll_and_optimize(loop, call_pure_results, jump_values) preamble = info.preamble preamble.check_consistency(check_descr=False) @@ -873,7 +872,7 @@ jump(i1, p1, p3sub) """ self.optimize_loop(ops, expected, preamble, - jump_values=[None, self.nodefulladdr, self.nodefulladdr, None]) + jump_values=[None, self.nodefulladdr, self.nodefulladdr]) def test_dont_delay_setfields(self): ops = """ diff --git a/rpython/jit/metainterp/optimizeopt/test/test_util.py b/rpython/jit/metainterp/optimizeopt/test/test_util.py --- a/rpython/jit/metainterp/optimizeopt/test/test_util.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_util.py @@ -570,18 +570,21 @@ op.position = newop.position return trace - def unroll_and_optimize(self, loop, call_pure_results=None): + def unroll_and_optimize(self, loop, call_pure_results=None, + jump_values=None): self.add_guard_future_condition(loop) jump_op = loop.operations[-1] assert jump_op.getopnum() == rop.JUMP celltoken = JitCellToken() + runtime_boxes = self.pack_into_boxes(jump_op, jump_values) jump_op.setdescr(celltoken) #start_label = ResOperation(rop.LABEL, loop.inputargs, # descr=jump_op.getdescr()) #end_label = jump_op.copy_and_change(opnum=rop.LABEL) call_pure_results = self._convert_call_pure_results(call_pure_results) t = self.convert_loop_to_packed(loop) - preamble_data = compile.LoopCompileData(t, call_pure_results) + preamble_data = compile.LoopCompileData(t, runtime_boxes, + call_pure_results) start_state, preamble_ops = self._do_optimize_loop(preamble_data) preamble_data.forget_optimization_info() loop_data = compile.UnrolledLoopData(preamble_data.trace, @@ -597,13 +600,16 @@ return Info(preamble, loop_info.target_token.short_preamble, start_state.virtual_state) - def set_values(self, ops, jump_values=None): - jump_op = ops[-1] + def pack_into_boxes(self, jump_op, jump_values): assert jump_op.getopnum() == rop.JUMP + r = [] if jump_values is not None: + assert len(jump_values) == len(jump_op.getarglist()) for i, v in enumerate(jump_values): if v is not None: - jump_op.getarg(i).setref_base(v) + r.append(InputArgRef(v)) + else: + r.append(None) else: for i, box in enumerate(jump_op.getarglist()): if box.type == 'r' and not box.is_constant(): @@ -611,9 +617,10 @@ # object here. If you need something different, you # need to pass a 'jump_values' argument to e.g. # optimize_loop() - box.setref_base(self.nodefulladdr) - - + r.append(InputArgRef(self.nodefulladdr)) + else: + r.append(None) + return r class FakeDescr(compile.ResumeGuardDescr): def clone_if_mutable(self): diff --git a/rpython/jit/metainterp/optimizeopt/unroll.py b/rpython/jit/metainterp/optimizeopt/unroll.py --- a/rpython/jit/metainterp/optimizeopt/unroll.py +++ b/rpython/jit/metainterp/optimizeopt/unroll.py @@ -121,11 +121,12 @@ if check_newops: assert not self.optimizer._newoperations - def optimize_preamble(self, trace, call_pure_results, memo): + def optimize_preamble(self, trace, runtime_boxes, call_pure_results, memo): info, newops = self.optimizer.propagate_all_forward(trace.get_iter(), call_pure_results, flush=False) exported_state = self.export_state(info.jump_op.getarglist(), - info.inputargs, memo) + info.inputargs, + runtime_boxes, memo) exported_state.quasi_immutable_deps = info.quasi_immutable_deps # we need to absolutely make sure that we've cleaned up all # the optimization info @@ -182,13 +183,14 @@ self.optimizer._newoperations) try: - new_virtual_state = self.jump_to_existing_trace(end_jump, label_op) + new_virtual_state = self.jump_to_existing_trace(end_jump, label_op, + state.runtime_boxes) except InvalidLoop: # inlining short preamble failed, jump to preamble self.jump_to_preamble(celltoken, end_jump, info) return (UnrollInfo(target_token, label_op, extra_same_as, self.optimizer.quasi_immutable_deps), - self.optimizer._newoperations) + self.optimizer._newoperations) if new_virtual_state is not None: self.jump_to_preamble(celltoken, end_jump, info) return (UnrollInfo(target_token, label_op, extra_same_as, @@ -286,7 +288,7 @@ return info, self.optimizer._newoperations[:] - def jump_to_existing_trace(self, jump_op, label_op): + def jump_to_existing_trace(self, jump_op, label_op, runtime_boxes): jitcelltoken = jump_op.getdescr() assert isinstance(jitcelltoken, JitCellToken) virtual_state = self.get_virtual_state(jump_op.getarglist()) @@ -297,7 +299,7 @@ continue try: extra_guards = target_virtual_state.generate_guards( - virtual_state, args, jump_op.getarglist(), self.optimizer) + virtual_state, args, runtime_boxes, self.optimizer) patchguardop = self.optimizer.patchguardop for guard in extra_guards.extra_guards: if isinstance(guard, GuardResOp): @@ -407,7 +409,8 @@ continue self._expand_info(item, infos) - def export_state(self, original_label_args, renamed_inputargs, memo): + def export_state(self, original_label_args, renamed_inputargs, + runtime_boxes, memo): end_args = [self.optimizer.force_box_for_end_of_preamble(a) for a in original_label_args] self.optimizer.flush() @@ -431,7 +434,7 @@ self.optimizer._clean_optimization_info(end_args) return ExportedState(label_args, end_args, virtual_state, infos, short_boxes, renamed_inputargs, - short_inputargs, memo) + short_inputargs, runtime_boxes, memo) def import_state(self, targetargs, exported_state): # the mapping between input args (from old label) and what we need @@ -492,11 +495,13 @@ * renamed_inputargs - the start label arguments in optimized version * short_inputargs - the renamed inputargs for short preamble * quasi_immutable_deps - for tracking quasi immutables + * runtime_boxes - runtime values for boxes, necessary when generating + guards to jump to """ def __init__(self, end_args, next_iteration_args, virtual_state, exported_infos, short_boxes, renamed_inputargs, - short_inputargs, memo): + short_inputargs, runtime_boxes, memo): self.end_args = end_args self.next_iteration_args = next_iteration_args self.virtual_state = virtual_state @@ -504,6 +509,7 @@ self.short_boxes = short_boxes self.renamed_inputargs = renamed_inputargs self.short_inputargs = short_inputargs + self.runtime_boxes = runtime_boxes self.dump(memo) def dump(self, memo): From pypy.commits at gmail.com Mon Mar 7 11:09:44 2016 From: pypy.commits at gmail.com (fijal) Date: Mon, 07 Mar 2016 08:09:44 -0800 (PST) Subject: [pypy-commit] pypy jit-leaner-frontend: whack at compile.py Message-ID: <56dda7c8.c74fc20a.1869a.0e5c@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r82850:4c340087dad9 Date: 2016-03-07 18:08 +0200 http://bitbucket.org/pypy/pypy/changeset/4c340087dad9/ Log: whack at compile.py diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -259,14 +259,13 @@ assert start == 0 #ops = history.operations[start:] if 'unroll' not in enable_opts or not metainterp.cpu.supports_guard_gc_type: + xxx return compile_simple_loop(metainterp, greenkey, start, inputargs, ops, jumpargs, enable_opts) jitcell_token = make_jitcell_token(jitdriver_sd) - label = ResOperation(rop.LABEL, inputargs, - descr=TargetToken(jitcell_token)) end_label = ResOperation(rop.LABEL, jumpargs, descr=jitcell_token) call_pure_results = metainterp.call_pure_results - preamble_data = LoopCompileData(label, end_label, history.trace, + preamble_data = LoopCompileData(history.trace, inputargs, call_pure_results=call_pure_results, enable_opts=enable_opts) try: @@ -1031,19 +1030,19 @@ inline_short_preamble = True inputargs = metainterp.history.inputargs[:] trace = metainterp.history.trace - label = ResOperation(rop.LABEL, inputargs) jitdriver_sd = metainterp.jitdriver_sd enable_opts = jitdriver_sd.warmstate.enable_opts call_pure_results = metainterp.call_pure_results if metainterp.history.ends_with_jump: + xxx data = BridgeCompileData(label, operations[:], call_pure_results=call_pure_results, enable_opts=enable_opts, inline_short_preamble=inline_short_preamble) else: - data = SimpleCompileData(label, trace, + data = SimpleCompileData(trace, call_pure_results=call_pure_results, enable_opts=enable_opts) try: diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebridge.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebridge.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebridge.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebridge.py @@ -11,7 +11,7 @@ def optimize(self, ops, bridge_ops, expected, expected_loop=None, inline_short_preamble=True, jump_values=None, bridge_values=None): - loop = self.parse(ops, postprocess=self.postprocess) + loop = self.parse(ops) self.set_values(loop.operations, jump_values) if expected_loop is not None: xxx From pypy.commits at gmail.com Mon Mar 7 11:29:06 2016 From: pypy.commits at gmail.com (fijal) Date: Mon, 07 Mar 2016 08:29:06 -0800 (PST) Subject: [pypy-commit] pypy jit-leaner-frontend: fix compile.py until first test passes Message-ID: <56ddac52.703dc20a.8ac20.0c8a@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r82851:c214873db69e Date: 2016-03-07 18:14 +0200 http://bitbucket.org/pypy/pypy/changeset/c214873db69e/ Log: fix compile.py until first test passes diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -258,12 +258,12 @@ assert start == 0 #ops = history.operations[start:] + jitcell_token = make_jitcell_token(jitdriver_sd) + history.record(rop.JUMP, jumpargs, None, descr=jitcell_token) if 'unroll' not in enable_opts or not metainterp.cpu.supports_guard_gc_type: xxx return compile_simple_loop(metainterp, greenkey, start, inputargs, ops, jumpargs, enable_opts) - jitcell_token = make_jitcell_token(jitdriver_sd) - end_label = ResOperation(rop.LABEL, jumpargs, descr=jitcell_token) call_pure_results = metainterp.call_pure_results preamble_data = LoopCompileData(history.trace, inputargs, call_pure_results=call_pure_results, @@ -277,13 +277,10 @@ metainterp_sd = metainterp.staticdata jitdriver_sd = metainterp.jitdriver_sd - end_label = ResOperation(rop.LABEL, inputargs, - descr=jitcell_token) - jump_op = ResOperation(rop.JUMP, jumpargs, descr=jitcell_token) start_descr = TargetToken(jitcell_token, original_jitcell_token=jitcell_token) jitcell_token.target_tokens = [start_descr] - loop_data = UnrolledLoopData(end_label, jump_op, history.trace, start_state, + loop_data = UnrolledLoopData(history.trace, jitcell_token, start_state, call_pure_results=call_pure_results, enable_opts=enable_opts) try: From pypy.commits at gmail.com Mon Mar 7 11:29:08 2016 From: pypy.commits at gmail.com (fijal) Date: Mon, 07 Mar 2016 08:29:08 -0800 (PST) Subject: [pypy-commit] pypy jit-leaner-frontend: in-progress Message-ID: <56ddac54.6bb8c20a.bd6f.0926@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r82852:2c79e6dbfade Date: 2016-03-07 18:28 +0200 http://bitbucket.org/pypy/pypy/changeset/2c79e6dbfade/ Log: in-progress diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -82,10 +82,10 @@ """ This represents ops() with a jump at the end that goes to some loop, we need to deal with virtual state and inlining of short preamble """ - def __init__(self, start_label, operations, call_pure_results=None, + def __init__(self, trace, runtime_boxes, call_pure_results=None, enable_opts=None, inline_short_preamble=False): - self.start_label = start_label - self.operations = operations + self.trace = trace + self.runtime_boxes = runtime_boxes self.call_pure_results = call_pure_results self.enable_opts = enable_opts self.inline_short_preamble = inline_short_preamble @@ -94,7 +94,7 @@ from rpython.jit.metainterp.optimizeopt.unroll import UnrollOptimizer opt = UnrollOptimizer(metainterp_sd, jitdriver_sd, optimizations) - return opt.optimize_bridge(self.start_label, self.operations, + return opt.optimize_bridge(self.trace, self.runtime_boxes, self.call_pure_results, self.inline_short_preamble, self.box_names_memo) @@ -200,8 +200,9 @@ # ____________________________________________________________ -def compile_simple_loop(metainterp, greenkey, start, inputargs, ops, jumpargs, +def compile_simple_loop(metainterp, greenkey, start, trace, jumpargs, enable_opts): + xxxx from rpython.jit.metainterp.optimizeopt import optimize_trace jitdriver_sd = metainterp.jitdriver_sd @@ -261,8 +262,8 @@ jitcell_token = make_jitcell_token(jitdriver_sd) history.record(rop.JUMP, jumpargs, None, descr=jitcell_token) if 'unroll' not in enable_opts or not metainterp.cpu.supports_guard_gc_type: - xxx - return compile_simple_loop(metainterp, greenkey, start, inputargs, ops, + return compile_simple_loop(metainterp, greenkey, start, inputargs, + history.trace, jumpargs, enable_opts) call_pure_results = metainterp.call_pure_results preamble_data = LoopCompileData(history.trace, inputargs, @@ -1033,8 +1034,7 @@ call_pure_results = metainterp.call_pure_results if metainterp.history.ends_with_jump: - xxx - data = BridgeCompileData(label, operations[:], + data = BridgeCompileData(trace, inputargs, call_pure_results=call_pure_results, enable_opts=enable_opts, inline_short_preamble=inline_short_preamble) diff --git a/rpython/jit/metainterp/logger.py b/rpython/jit/metainterp/logger.py --- a/rpython/jit/metainterp/logger.py +++ b/rpython/jit/metainterp/logger.py @@ -12,6 +12,15 @@ self.metainterp_sd = metainterp_sd self.guard_number = guard_number + def log_loop_from_trace(self, trace, memo): + if not have_debug_prints(): + return + ops = [] + i = trace.get_iter() + while not i.done(): + ops.append(i.next()) + self.log_loop(i.inputargs, ops, memo=memo) + def log_loop(self, inputargs, operations, number=0, type=None, ops_offset=None, name='', memo=None): if type is None: diff --git a/rpython/jit/metainterp/optimizeopt/__init__.py b/rpython/jit/metainterp/optimizeopt/__init__.py --- a/rpython/jit/metainterp/optimizeopt/__init__.py +++ b/rpython/jit/metainterp/optimizeopt/__init__.py @@ -53,7 +53,7 @@ """ debug_start("jit-optimize") try: - #metainterp_sd.logger_noopt.log_loop(compile_data.trace, memo=memo) + metainterp_sd.logger_noopt.log_loop_from_trace(compile_data.trace, memo=memo) if memo is None: memo = {} compile_data.box_names_memo = memo diff --git a/rpython/jit/metainterp/optimizeopt/unroll.py b/rpython/jit/metainterp/optimizeopt/unroll.py --- a/rpython/jit/metainterp/optimizeopt/unroll.py +++ b/rpython/jit/metainterp/optimizeopt/unroll.py @@ -224,13 +224,12 @@ return token.virtual_state return label_vs - def optimize_bridge(self, start_label, operations, call_pure_results, + def optimize_bridge(self, trace, runtime_boxes, call_pure_results, inline_short_preamble, box_names_memo): - self._check_no_forwarding([start_label.getarglist()]) - info, ops = self.optimizer.propagate_all_forward( - start_label.getarglist()[:], operations[:-1], + self._check_no_forwarding([trace.inputargs]) + info, ops = self.optimizer.propagate_all_forward(trace.get_iter(), call_pure_results) - jump_op = operations[-1] + jump_op = info.jump_op cell_token = jump_op.getdescr() assert isinstance(cell_token, JitCellToken) if not inline_short_preamble or len(cell_token.target_tokens) == 1: @@ -241,9 +240,9 @@ for a in jump_op.getarglist(): self.optimizer.force_box_for_end_of_preamble(a) try: - vs = self.jump_to_existing_trace(jump_op, None) + vs = self.jump_to_existing_trace(jump_op, None, runtime_boxes) except InvalidLoop: - return self.jump_to_preamble(cell_token, jump_op, info) + return self.jump_to_preamble(cell_token, jump_op, info) if vs is None: return info, self.optimizer._newoperations[:] warmrunnerdescr = self.optimizer.metainterp_sd.warmrunnerdesc @@ -254,6 +253,7 @@ else: debug_print("Retrace count reached, jumping to preamble") return self.jump_to_preamble(cell_token, jump_op, info) + xxx exported_state = self.export_state(start_label, operations[-1].getarglist(), info.inputargs, box_names_memo) diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -2609,14 +2609,12 @@ self.history.record(rop.JUMP, live_arg_boxes[num_green_args:], None, descr=target_jitcell_token) self.history.ends_with_jump = True - try: - target_token = compile.compile_trace(self, self.resumekey) - finally: - self.history.operations.pop() # remove the JUMP + target_token = compile.compile_trace(self, self.resumekey) if target_token is not None: # raise if it *worked* correctly assert isinstance(target_token, TargetToken) jitcell_token = target_token.targeting_jitcell_token self.raise_continue_running_normally(live_arg_boxes, jitcell_token) + xxxx # remove the jump op and continue tracing def compile_done_with_this_frame(self, exitbox): # temporarily put a JUMP to a pseudo-loop @@ -2709,7 +2707,7 @@ self.history = history.History() inputargs_and_holes = self.rebuild_state_after_failure(resumedescr, deadframe) - self.history.inputargs = [box for box in inputargs_and_holes if box] + self.history.set_inputargs([box for box in inputargs_and_holes if box]) finally: rstack._stack_criticalcode_stop() diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -1436,7 +1436,16 @@ def is_pure_with_descr(opnum, descr): if rop.is_always_pure(opnum): return True - xxxx + if (opnum == rop.GETFIELD_RAW_I or + opnum == rop.GETFIELD_RAW_R or + opnum == rop.GETFIELD_RAW_F or + opnum == rop.GETFIELD_GC_I or + opnum == rop.GETFIELD_GC_R or + opnum == rop.GETFIELD_GC_F or + opnum == rop.GETARRAYITEM_RAW_I or + opnum == rop.GETARRAYITEM_RAW_F): + return descr.is_always_pure() + return False @staticmethod def is_pure_getfield(opnum, descr): From pypy.commits at gmail.com Mon Mar 7 11:49:55 2016 From: pypy.commits at gmail.com (fijal) Date: Mon, 07 Mar 2016 08:49:55 -0800 (PST) Subject: [pypy-commit] pypy default: list enum as a dependency directly Message-ID: <56ddb133.c711c30a.49b7.16fb@mx.google.com> Author: fijal Branch: Changeset: r82853:aec781f25ac8 Date: 2016-03-07 18:49 +0200 http://bitbucket.org/pypy/pypy/changeset/aec781f25ac8/ Log: list enum as a dependency directly diff --git a/requirements.txt b/requirements.txt --- a/requirements.txt +++ b/requirements.txt @@ -1,3 +1,3 @@ # hypothesis is used for test generation on untranslated jit tests hypothesis - +enum # is a dependency, but old pip does not pick it up From pypy.commits at gmail.com Mon Mar 7 11:56:57 2016 From: pypy.commits at gmail.com (fijal) Date: Mon, 07 Mar 2016 08:56:57 -0800 (PST) Subject: [pypy-commit] pypy default: make sure we install the latest and greates Message-ID: <56ddb2d9.463f1c0a.2710b.ffff92a6@mx.google.com> Author: fijal Branch: Changeset: r82854:ab7250954435 Date: 2016-03-07 18:56 +0200 http://bitbucket.org/pypy/pypy/changeset/ab7250954435/ Log: make sure we install the latest and greates diff --git a/requirements.txt b/requirements.txt --- a/requirements.txt +++ b/requirements.txt @@ -1,3 +1,3 @@ # hypothesis is used for test generation on untranslated jit tests hypothesis -enum # is a dependency, but old pip does not pick it up +enum>=0.4.6 # is a dependency, but old pip does not pick it up From pypy.commits at gmail.com Mon Mar 7 12:51:57 2016 From: pypy.commits at gmail.com (mjacob) Date: Mon, 07 Mar 2016 09:51:57 -0800 (PST) Subject: [pypy-commit] pypy py3k: Fix error messages when unicode and bytes are mixed in the re module usage. Message-ID: <56ddbfbd.83301c0a.c31df.3bbb@mx.google.com> Author: Manuel Jacob Branch: py3k Changeset: r82855:528d213af6e5 Date: 2016-03-07 18:52 +0100 http://bitbucket.org/pypy/pypy/changeset/528d213af6e5/ Log: Fix error messages when unicode and bytes are mixed in the re module usage. 1) Tests were failing after the last merge from default because we were missing a type check. 2) Even before, two error messages were interchanged. diff --git a/pypy/module/_sre/interp_sre.py b/pypy/module/_sre/interp_sre.py --- a/pypy/module/_sre/interp_sre.py +++ b/pypy/module/_sre/interp_sre.py @@ -113,7 +113,7 @@ if not (space.is_none(self.w_pattern) or space.isinstance_w(self.w_pattern, space.w_unicode)): raise OperationError(space.w_TypeError, space.wrap( - "can't use a string pattern on a bytes-like object")) + "can't use a bytes pattern on a string-like object")) if pos > len(unicodestr): pos = len(unicodestr) if endpos > len(unicodestr): @@ -121,6 +121,10 @@ return rsre_core.UnicodeMatchContext(self.code, unicodestr, pos, endpos, self.flags) elif space.isinstance_w(w_string, space.w_str): + if (not space.is_none(self.w_pattern) and + space.isinstance_w(self.w_pattern, space.w_unicode)): + raise OperationError(space.w_TypeError, space.wrap( + "can't use a string pattern on a bytes-like object")) str = space.str_w(w_string) if pos > len(str): pos = len(str) @@ -133,7 +137,7 @@ if (not space.is_none(self.w_pattern) and space.isinstance_w(self.w_pattern, space.w_unicode)): raise OperationError(space.w_TypeError, space.wrap( - "can't use a bytes pattern on a string-like object")) + "can't use a string pattern on a bytes-like object")) size = buf.getlength() assert size >= 0 if pos > size: From pypy.commits at gmail.com Mon Mar 7 13:06:21 2016 From: pypy.commits at gmail.com (mattip) Date: Mon, 07 Mar 2016 10:06:21 -0800 (PST) Subject: [pypy-commit] pypy release-5.x: backout 666871b885d9, might be the cause of lib-python.2.7.test.test_xml_etree failure Message-ID: <56ddc31d.85b01c0a.7a902.ffffb1ee@mx.google.com> Author: mattip Branch: release-5.x Changeset: r82856:1f238bdfca88 Date: 2016-03-07 19:52 +0200 http://bitbucket.org/pypy/pypy/changeset/1f238bdfca88/ Log: backout 666871b885d9, might be the cause of lib- python.2.7.test.test_xml_etree failure diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py --- a/pypy/interpreter/function.py +++ b/pypy/interpreter/function.py @@ -13,9 +13,6 @@ from pypy.interpreter.argument import Arguments from rpython.rlib import jit -from rpython.rlib.rarithmetic import LONG_BIT -from rpython.rlib.rbigint import rbigint - funccallunrolling = unrolling_iterable(range(4)) @@ -560,26 +557,6 @@ return space.w_False return space.eq(self.w_function, w_other.w_function) - def is_w(self, space, other): - if not isinstance(other, Method): - return False - return (self.w_instance is other.w_instance and - self.w_function is other.w_function and - self.w_class is other.w_class) - - def immutable_unique_id(self, space): - from pypy.objspace.std.util import IDTAG_METHOD as tag - from pypy.objspace.std.util import IDTAG_SHIFT - if self.w_instance is not None: - id = space.bigint_w(space.id(self.w_instance)) - id = id.lshift(LONG_BIT) - else: - id = rbigint.fromint(0) - id = id.or_(space.bigint_w(space.id(self.w_function))) - id = id.lshift(LONG_BIT).or_(space.bigint_w(space.id(self.w_class))) - id = id.lshift(IDTAG_SHIFT).int_or_(tag) - return space.newlong_from_rbigint(id) - def descr_method_hash(self): space = self.space w_result = space.hash(self.w_function) diff --git a/pypy/interpreter/test/test_function.py b/pypy/interpreter/test/test_function.py --- a/pypy/interpreter/test/test_function.py +++ b/pypy/interpreter/test/test_function.py @@ -560,37 +560,6 @@ assert A().m == X() assert X() == A().m - @pytest.mark.skipif("config.option.runappdirect") - def test_method_identity(self): - class A(object): - def m(self): - pass - def n(self): - pass - - class B(A): - pass - - class X(object): - def __eq__(self, other): - return True - - a = A() - a2 = A() - assert a.m is a.m - assert id(a.m) == id(a.m) - assert a.m is not a.n - assert id(a.m) != id(a.n) - assert a.m is not a2.m - assert id(a.m) != id(a2.m) - - assert A.m is A.m - assert id(A.m) == id(A.m) - assert A.m is not A.n - assert id(A.m) != id(A.n) - assert A.m is not B.m - assert id(A.m) != id(B.m) - class TestMethod: def setup_method(self, method): diff --git a/pypy/objspace/std/util.py b/pypy/objspace/std/util.py --- a/pypy/objspace/std/util.py +++ b/pypy/objspace/std/util.py @@ -2,13 +2,12 @@ from pypy.interpreter.error import OperationError, oefmt -IDTAG_SHIFT = 4 +IDTAG_SHIFT = 3 IDTAG_INT = 1 IDTAG_LONG = 3 IDTAG_FLOAT = 5 IDTAG_COMPLEX = 7 -IDTAG_METHOD = 9 CMP_OPS = dict(lt='<', le='<=', eq='==', ne='!=', gt='>', ge='>=') BINARY_BITWISE_OPS = {'and': '&', 'lshift': '<<', 'or': '|', 'rshift': '>>', From pypy.commits at gmail.com Mon Mar 7 13:06:26 2016 From: pypy.commits at gmail.com (mattip) Date: Mon, 07 Mar 2016 10:06:26 -0800 (PST) Subject: [pypy-commit] pypy release-5.x: comment the reason for the change Message-ID: <56ddc322.99e61c0a.71a5.4435@mx.google.com> Author: mattip Branch: release-5.x Changeset: r82859:ad5a4e55fa8e Date: 2016-03-07 20:02 +0200 http://bitbucket.org/pypy/pypy/changeset/ad5a4e55fa8e/ Log: comment the reason for the change diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -548,6 +548,9 @@ assert relpypath, ("%r should be relative to %r" % (localpath, pypkgpath.dirname)) if len(relpypath.split(os.path.sep)) > 2: + # pypy detail to agregate the c files by directory, + # since the enormous number of files was causing + # memory issues linking on win32 return os.path.split(relpypath)[0] + '.c' return relpypath.replace('.py', '.c') return None From pypy.commits at gmail.com Mon Mar 7 13:06:23 2016 From: pypy.commits at gmail.com (mattip) Date: Mon, 07 Mar 2016 10:06:23 -0800 (PST) Subject: [pypy-commit] pypy release-5.x: backout c7cc35224c29 - tests for 666871b885d9 Message-ID: <56ddc31f.4577c20a.149e9.380a@mx.google.com> Author: mattip Branch: release-5.x Changeset: r82857:71586f7de6b0 Date: 2016-03-07 19:53 +0200 http://bitbucket.org/pypy/pypy/changeset/71586f7de6b0/ Log: backout c7cc35224c29 - tests for 666871b885d9 diff --git a/pypy/objspace/std/test/test_obj.py b/pypy/objspace/std/test/test_obj.py --- a/pypy/objspace/std/test/test_obj.py +++ b/pypy/objspace/std/test/test_obj.py @@ -172,15 +172,15 @@ def test_id_on_primitives(self): if self.cpython_apptest: skip("cpython behaves differently") - assert id(1) == (1 << 4) + 1 - assert id(1l) == (1 << 4) + 3 + assert id(1) == (1 << 3) + 1 + assert id(1l) == (1 << 3) + 3 class myint(int): pass assert id(myint(1)) != id(1) assert id(1.0) & 7 == 5 assert id(-0.0) != id(0.0) - assert hex(id(2.0)) == '0x40000000000000005L' + assert hex(id(2.0)) == '0x20000000000000005L' assert id(0.0) == 5 def test_id_on_strs(self): From pypy.commits at gmail.com Mon Mar 7 13:06:28 2016 From: pypy.commits at gmail.com (mattip) Date: Mon, 07 Mar 2016 10:06:28 -0800 (PST) Subject: [pypy-commit] pypy default: create larger c files by indexing directories together (previously each file was seperate) Message-ID: <56ddc324.02f0c20a.19980.ffffae24@mx.google.com> Author: mattip Branch: Changeset: r82860:3c4aee3b5f7a Date: 2016-02-08 21:53 +0200 http://bitbucket.org/pypy/pypy/changeset/3c4aee3b5f7a/ Log: create larger c files by indexing directories together (previously each file was seperate) diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -547,6 +547,8 @@ relpypath = localpath.relto(pypkgpath.dirname) assert relpypath, ("%r should be relative to %r" % (localpath, pypkgpath.dirname)) + if len(relpypath.split(os.path.sep)) > 2: + return os.path.split(relpypath)[0] + '.c' return relpypath.replace('.py', '.c') return None if hasattr(node.obj, 'graph'): diff --git a/rpython/translator/c/test/test_standalone.py b/rpython/translator/c/test/test_standalone.py --- a/rpython/translator/c/test/test_standalone.py +++ b/rpython/translator/c/test/test_standalone.py @@ -123,9 +123,9 @@ # Verify that the generated C files have sane names: gen_c_files = [str(f) for f in cbuilder.extrafiles] - for expfile in ('rpython_rlib_rposix.c', - 'rpython_rtyper_lltypesystem_rstr.c', - 'rpython_translator_c_test_test_standalone.c'): + for expfile in ('rpython_rlib.c', + 'rpython_rtyper_lltypesystem.c', + 'rpython_translator_c_test.c'): assert cbuilder.targetdir.join(expfile) in gen_c_files def test_print(self): From pypy.commits at gmail.com Mon Mar 7 13:06:25 2016 From: pypy.commits at gmail.com (mattip) Date: Mon, 07 Mar 2016 10:06:25 -0800 (PST) Subject: [pypy-commit] pypy release-5.x: create larger c files by indexing directories together (previously each file was seperate) Message-ID: <56ddc321.a185c20a.89b03.3fe4@mx.google.com> Author: mattip Branch: release-5.x Changeset: r82858:0bd65aa2c604 Date: 2016-02-08 21:53 +0200 http://bitbucket.org/pypy/pypy/changeset/0bd65aa2c604/ Log: create larger c files by indexing directories together (previously each file was seperate) diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -547,6 +547,8 @@ relpypath = localpath.relto(pypkgpath.dirname) assert relpypath, ("%r should be relative to %r" % (localpath, pypkgpath.dirname)) + if len(relpypath.split(os.path.sep)) > 2: + return os.path.split(relpypath)[0] + '.c' return relpypath.replace('.py', '.c') return None if hasattr(node.obj, 'graph'): diff --git a/rpython/translator/c/test/test_standalone.py b/rpython/translator/c/test/test_standalone.py --- a/rpython/translator/c/test/test_standalone.py +++ b/rpython/translator/c/test/test_standalone.py @@ -123,9 +123,9 @@ # Verify that the generated C files have sane names: gen_c_files = [str(f) for f in cbuilder.extrafiles] - for expfile in ('rpython_rlib_rposix.c', - 'rpython_rtyper_lltypesystem_rstr.c', - 'rpython_translator_c_test_test_standalone.c'): + for expfile in ('rpython_rlib.c', + 'rpython_rtyper_lltypesystem.c', + 'rpython_translator_c_test.c'): assert cbuilder.targetdir.join(expfile) in gen_c_files def test_print(self): From pypy.commits at gmail.com Mon Mar 7 13:06:30 2016 From: pypy.commits at gmail.com (mattip) Date: Mon, 07 Mar 2016 10:06:30 -0800 (PST) Subject: [pypy-commit] pypy default: comment the reason for the change Message-ID: <56ddc326.d4e41c0a.bc3bf.ffffaf64@mx.google.com> Author: mattip Branch: Changeset: r82861:31f9f0c39273 Date: 2016-03-07 20:02 +0200 http://bitbucket.org/pypy/pypy/changeset/31f9f0c39273/ Log: comment the reason for the change diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -548,6 +548,9 @@ assert relpypath, ("%r should be relative to %r" % (localpath, pypkgpath.dirname)) if len(relpypath.split(os.path.sep)) > 2: + # pypy detail to agregate the c files by directory, + # since the enormous number of files was causing + # memory issues linking on win32 return os.path.split(relpypath)[0] + '.c' return relpypath.replace('.py', '.c') return None From pypy.commits at gmail.com Mon Mar 7 13:25:34 2016 From: pypy.commits at gmail.com (cfbolz) Date: Mon, 07 Mar 2016 10:25:34 -0800 (PST) Subject: [pypy-commit] pypy default: split performance improvements from refactorings Message-ID: <56ddc79e.e6bbc20a.114b2.4152@mx.google.com> Author: Carl Friedrich Bolz Branch: Changeset: r82862:b5226bfbeb24 Date: 2016-03-07 19:23 +0100 http://bitbucket.org/pypy/pypy/changeset/b5226bfbeb24/ Log: split performance improvements from refactorings diff --git a/pypy/doc/release-5.0.0.rst b/pypy/doc/release-5.0.0.rst --- a/pypy/doc/release-5.0.0.rst +++ b/pypy/doc/release-5.0.0.rst @@ -150,49 +150,59 @@ * Support partition() as an app-level function, together with a cffi wrapper in pypy/numpy, this now provides partial support for partition() -* Performance improvements and refactorings: +* Performance improvements: - * Refactor and improve exception analysis in the annotator - - * Remove unnecessary special handling of space.wrap(). + * Optimize global lookups * Improve the memory signature of numbering instances in the JIT. This should massively decrease the amount of memory consumed by the JIT, which is significant for most programs. Also compress the numberings using variable- size encoding + * Optimize string concatination + + * Use INT_LSHIFT instead of INT_MUL when possible + + * Improve struct.unpack by casting directly from the underlying buffer. + Unpacking floats and doubles is about 15 times faster, and integer types + about 50% faster (on 64 bit integers). This was then subsequently + improved further in optimizeopt.py. + + * Optimize two-tuple lookups in mapdict + + * Reduce all guards from int_floordiv_ovf if one of the arguments is constant + + * Identify permutations of attributes at instance creation, reducing the + number of bridges created + + * Greatly improve re.sub() performance + + +* Internal refactorings: + + * Refactor and improve exception analysis in the annotator + + * Remove unnecessary special handling of space.wrap(). + * Support list-resizing setslice operations in RPython * Tweak the trace-too-long heuristic for multiple jit drivers * Refactor bookkeeping (such a cool word - three double letters) in the annotater - + * Refactor wrappers for OS functions from rtyper to rlib and simplify them * Simplify backend loading instructions to only use four variants - * Optimize string concatination - * Simplify GIL handling in non-jitted code - * Use INT_LSHIFT instead of INT_MUL when possible - - * Improve struct.unpack by casting directly from the underlying buffer. - Unpacking floats and doubles is about 15 times faster, and integer types - about 50% faster (on 64 bit integers). This was then subsequently - improved further in optimizeopt.py. - * Refactor naming in optimizeopt * Change GraphAnalyzer to use a more precise way to recognize external functions and fix null pointer handling, generally clean up external function handling - * Optimize global lookups - - * Optimize two-tuple lookups in mapdict - * Remove pure variants of ``getfield_gc_*`` operations from the JIT by determining purity while tracing @@ -203,17 +213,10 @@ * Refactor rtyper debug code into python.rtyper.debug * Seperate structmember.h from Python.h Also enhance creating api functions - to specify which header file they appear in (previously only pypy_decl.h) - - * Reduce all guards from int_floordiv_ovf if one of the arguments is constant + to specify which header file they appear in (previously only pypy_decl.h) * Fix tokenizer to enforce universal newlines, needed for Python 3 support - * Identify permutations of attributes at instance creation, reducing the - number of bridges created - - * Greatly improve re.sub() performance - .. _resolved: http://doc.pypy.org/en/latest/whatsnew-5.0.0.html .. _`hypothesis`: http://hypothesis.readthedocs.org .. _`blog post`: http://morepypy.blogspot.com/2016/02/c-api-support-update.html From pypy.commits at gmail.com Mon Mar 7 13:25:36 2016 From: pypy.commits at gmail.com (cfbolz) Date: Mon, 07 Mar 2016 10:25:36 -0800 (PST) Subject: [pypy-commit] pypy default: whitespace cleanup, typo, extend one point Message-ID: <56ddc7a0.857ac20a.c9638.3d5a@mx.google.com> Author: Carl Friedrich Bolz Branch: Changeset: r82863:0ea92ff6957c Date: 2016-03-07 19:24 +0100 http://bitbucket.org/pypy/pypy/changeset/0ea92ff6957c/ Log: whitespace cleanup, typo, extend one point diff --git a/pypy/doc/release-5.0.0.rst b/pypy/doc/release-5.0.0.rst --- a/pypy/doc/release-5.0.0.rst +++ b/pypy/doc/release-5.0.0.rst @@ -2,12 +2,12 @@ PyPy 5.0 ======== -We have released PyPy 5.0, about three months after PyPy 4.0.1. +We have released PyPy 5.0, about three months after PyPy 4.0.1. We encourage all users of PyPy to update to this version. Apart from the usual bug fixes, there is an ongoing effort to improve the warmup time and memory usage of JIT-related metadata. The exact effects depend vastly on the program you're running and can range from insignificant to warmup being up to 30% -faster and memory dropping by about 30%. +faster and memory dropping by about 30%. We also merged a major upgrade to our C-API layer (cpyext), simplifying the interaction between c-level objects and PyPy interpreter level objects. As a @@ -25,20 +25,20 @@ We would like to thank our donors for the continued support of the PyPy project. -We would also like to thank our contributors and +We would also like to thank our contributors and encourage new people to join the project. PyPy has many layers and we need help with all of them: `PyPy`_ and `RPython`_ documentation -improvements, tweaking popular `modules`_ to run on pypy, or general `help`_ -with making RPython's JIT even better. +improvements, tweaking popular `modules`_ to run on pypy, or general `help`_ +with making RPython's JIT even better. CFFI ==== While not applicable only to PyPy, `cffi`_ is arguably our most significant -contribution to the python ecosystem. PyPy 5.0 ships with +contribution to the python ecosystem. PyPy 5.0 ships with `cffi-1.5.2`_ which now allows embedding PyPy (or cpython) in a c program. -.. _`PyPy`: http://doc.pypy.org +.. _`PyPy`: http://doc.pypy.org .. _`RPython`: https://rpython.readthedocs.org .. _`cffi`: https://cffi.readthedocs.org .. _`cffi-1.5.2`: http://cffi.readthedocs.org/en/latest/whatsnew.html#v1-5-2 @@ -159,7 +159,7 @@ significant for most programs. Also compress the numberings using variable- size encoding - * Optimize string concatination + * Optimize string concatenation * Use INT_LSHIFT instead of INT_MUL when possible @@ -168,7 +168,8 @@ about 50% faster (on 64 bit integers). This was then subsequently improved further in optimizeopt.py. - * Optimize two-tuple lookups in mapdict + * Optimize two-tuple lookups in mapdict, which improves warmup of instance + variable access somewhat * Reduce all guards from int_floordiv_ovf if one of the arguments is constant From pypy.commits at gmail.com Mon Mar 7 13:41:34 2016 From: pypy.commits at gmail.com (rlamy) Date: Mon, 07 Mar 2016 10:41:34 -0800 (PST) Subject: [pypy-commit] pypy rawrefcount-review: Factor out all interpreter-dependent behaviour into a few GC methods Message-ID: <56ddcb5e.0e2e1c0a.5de9.ffffc18f@mx.google.com> Author: Ronan Lamy Branch: rawrefcount-review Changeset: r82864:7daebe814a60 Date: 2016-03-06 00:27 +0000 http://bitbucket.org/pypy/pypy/changeset/7daebe814a60/ Log: Factor out all interpreter-dependent behaviour into a few GC methods diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py --- a/rpython/memory/gc/incminimark.py +++ b/rpython/memory/gc/incminimark.py @@ -2775,6 +2775,26 @@ int_gcobj = self._pyobj(adr_rawobj).ob_pypy_link return llmemory.cast_int_to_adr(int_gcobj) + def _rrc_has_untracked_referents(self, raw_obj): + from rpython.rlib.rawrefcount import ( + REFCNT_FROM_PYPY, REFCNT_FROM_PYPY_LIGHT) + rc = self._pyobj(raw_obj).ob_refcnt + return rc != REFCNT_FROM_PYPY and rc != REFCNT_FROM_PYPY_LIGHT + + def _rrc_unlink(self, adr_rawobj): + from rpython.rlib.rawrefcount import REFCNT_FROM_PYPY + RC_MASK = REFCNT_FROM_PYPY - 1 + rawobj = self._pyobj(adr_rawobj) + rawobj.ob_refcnt &= RC_MASK + rawobj.ob_pypy_link = 0 + + def _rrc_is_light(self, adr_rawobj): + from rpython.rlib.rawrefcount import REFCNT_FROM_PYPY_LIGHT + return self._pyobj(adr_rawobj).ob_refcnt >= REFCNT_FROM_PYPY_LIGHT + + def _rrc_dealloc_light(self, adr_rawobj): + lltype.free(self._pyobj(adr_rawobj), flavor='raw') + def rawrefcount_init(self, dealloc_trigger_callback): # see pypy/doc/discussion/rawrefcount.rst if not self.rrc_enabled: @@ -2844,7 +2864,6 @@ return self.rrc_dealloc_pending.pop() return llmemory.NULL - def rrc_invoke_callback(self): if self.rrc_enabled and self.rrc_dealloc_pending.non_empty(): self.rrc_dealloc_trigger_callback() @@ -2857,13 +2876,7 @@ self.rrc_singleaddr) def _rrc_minor_trace(self, pyobject, singleaddr): - from rpython.rlib.rawrefcount import REFCNT_FROM_PYPY - from rpython.rlib.rawrefcount import REFCNT_FROM_PYPY_LIGHT - # - rc = self._pyobj(pyobject).ob_refcnt - if rc == REFCNT_FROM_PYPY or rc == REFCNT_FROM_PYPY_LIGHT: - pass # the corresponding object may die - else: + if self._rrc_has_untracked_referents(pyobject): # force the corresponding object to be alive singleaddr.address[0] = self._rrc_get_gc_partner(pyobject) self._trace_drag_out(singleaddr, llmemory.NULL) @@ -2915,40 +2928,20 @@ self._rrc_free(pyobject) def _rrc_free(self, pyobject): - from rpython.rlib.rawrefcount import REFCNT_FROM_PYPY - from rpython.rlib.rawrefcount import REFCNT_FROM_PYPY_LIGHT - # - rc = self._pyobj(pyobject).ob_refcnt - if rc >= REFCNT_FROM_PYPY_LIGHT: - rc -= REFCNT_FROM_PYPY_LIGHT - if rc == 0: - lltype.free(self._pyobj(pyobject), flavor='raw') - else: - # can only occur if LIGHT is used in create_link_pyobj() - self._pyobj(pyobject).ob_refcnt = rc - self._pyobj(pyobject).ob_pypy_link = 0 + if self._rrc_has_untracked_referents(pyobject): + self._rrc_unlink(pyobject) + elif self._rrc_is_light(pyobject): + self._rrc_dealloc_light(pyobject) else: - ll_assert(rc >= REFCNT_FROM_PYPY, "refcount underflow?") - ll_assert(rc < int(REFCNT_FROM_PYPY_LIGHT * 0.99), - "refcount underflow from REFCNT_FROM_PYPY_LIGHT?") - rc -= REFCNT_FROM_PYPY - self._pyobj(pyobject).ob_refcnt = rc - self._pyobj(pyobject).ob_pypy_link = 0 - if rc == 0: - self.rrc_dealloc_pending.append(pyobject) + self._rrc_unlink(pyobject) + self.rrc_dealloc_pending.append(pyobject) _rrc_free._always_inline_ = True def rrc_major_collection_trace(self): self.rrc_p_list_old.foreach(self._rrc_major_trace, None) def _rrc_major_trace(self, pyobject, ignore): - from rpython.rlib.rawrefcount import REFCNT_FROM_PYPY - from rpython.rlib.rawrefcount import REFCNT_FROM_PYPY_LIGHT - # - rc = self._pyobj(pyobject).ob_refcnt - if rc == REFCNT_FROM_PYPY or rc == REFCNT_FROM_PYPY_LIGHT: - pass # the corresponding object may die - else: + if self._rrc_has_untracked_referents(pyobject): # force the corresponding object to be alive obj = self._rrc_get_gc_partner(pyobject) self.objects_to_trace.append(obj) From pypy.commits at gmail.com Mon Mar 7 13:41:36 2016 From: pypy.commits at gmail.com (rlamy) Date: Mon, 07 Mar 2016 10:41:36 -0800 (PST) Subject: [pypy-commit] pypy rawrefcount-review: Add stateful hypothesis testing for rawrefcount Message-ID: <56ddcb60.6bb8c20a.bd6f.3e00@mx.google.com> Author: Ronan Lamy Branch: rawrefcount-review Changeset: r82865:7ddef219107f Date: 2016-03-07 18:40 +0000 http://bitbucket.org/pypy/pypy/changeset/7ddef219107f/ Log: Add stateful hypothesis testing for rawrefcount diff --git a/rpython/memory/gc/test/test_rawrefcount.py b/rpython/memory/gc/test/test_rawrefcount.py --- a/rpython/memory/gc/test/test_rawrefcount.py +++ b/rpython/memory/gc/test/test_rawrefcount.py @@ -1,7 +1,7 @@ import py from rpython.rtyper.lltypesystem import lltype, llmemory from rpython.memory.gc.incminimark import IncrementalMiniMarkGC -from rpython.memory.gc.test.test_direct import BaseDirectGCTest +from rpython.memory.gc.test.test_direct import BaseDirectGCTest, GCSpace from rpython.rlib.rawrefcount import REFCNT_FROM_PYPY from rpython.rlib.rawrefcount import REFCNT_FROM_PYPY_LIGHT @@ -289,3 +289,178 @@ check_alive(0) self._collect(major=True) check_alive(0) + +class RefcountSpace(GCSpace): + def __init__(self): + GCSpace.__init__(self, IncrementalMiniMarkGC, {}) + self.trigger = [] + self.gc.rawrefcount_init(lambda: self.trigger.append(1)) + + def new_rawobj(self): + r1 = lltype.malloc(PYOBJ_HDR, flavor='raw') + r1.ob_refcnt = 0 + r1.ob_pypy_link = 0 + return r1 + + def new_gcobj(self, intval): + p1 = self.malloc(S) + p1.x = intval + return p1 + + def create_link(self, rawobj, gcobj, is_light=False, is_pyobj=False): + if is_light: + rawobj.ob_refcnt += REFCNT_FROM_PYPY_LIGHT + else: + rawobj.ob_refcnt += REFCNT_FROM_PYPY + rawaddr = llmemory.cast_ptr_to_adr(rawobj) + gcref = lltype.cast_opaque_ptr(llmemory.GCREF, gcobj) + if is_pyobj: + self.gc.rawrefcount_create_link_pyobj(gcref, rawaddr) + else: + self.gc.rawrefcount_create_link_pypy(gcref, rawaddr) + + def from_gc(self, gcobj): + gcref = lltype.cast_opaque_ptr(llmemory.GCREF, gcobj) + rawaddr = self.gc.rawrefcount_from_obj(gcref) + if rawaddr == llmemory.NULL: + return None + else: + return self.gc._pyobj(rawaddr) + +from rpython.rtyper.test.test_rdict import signal_timeout, Action +from hypothesis.strategies import ( + builds, sampled_from, binary, just, integers, text, characters, tuples, + booleans, one_of) +from hypothesis.stateful import GenericStateMachine, run_state_machine_as_test +from rpython.tool.leakfinder import start_tracking_allocations, stop_tracking_allocations + +RC_MASK = REFCNT_FROM_PYPY - 1 + +class StateMachine(GenericStateMachine): + def __init__(self): + self.space = RefcountSpace() + self.rawobjs = [] + self.rootlinks = [] + self.next_id = 0 + start_tracking_allocations() + + def free(self, rawobj): + lltype.free(rawobj, flavor='raw') + + def incref(self, rawobj): + rawobj.ob_refcnt += 1 + + def decref(self, rawobj): + assert rawobj.ob_refcnt > 0 + rawobj.ob_refcnt -= 1 + if rawobj.ob_refcnt == 0: + i = self.rawobjs.index(rawobj) + self.free(rawobj) + del self.rawobjs[i] + elif rawobj.ob_refcnt & RC_MASK == 0: + i = self.rawobjs.index(rawobj) + del self.rawobjs[i] + + def get_linkable_gcobjs(self): + res = [] + for p, has_link in zip(self.space.stackroots, self.rootlinks): + if not has_link: + res.append(p) + return res + + def get_linkable_rawobjs(self): + return [r for r in self.rawobjs + if r.ob_refcnt != 0 and r.ob_pypy_link == 0] + + def find_root_index(self, p): + return self.space.stackroots.index(p) + + def add_rawobj(self): + r = self.space.new_rawobj() + self.incref(r) + self.rawobjs.append(r) + + def add_gcobj(self): + p = self.space.new_gcobj(self.next_id) + self.space.stackroots.append(p) + self.rootlinks.append(False) + self.next_id += 1 + return p + + def create_gcpartner(self, raw, is_light=False, is_pyobj=False): + p = self.space.new_gcobj(self.next_id) + self.next_id += 1 + self.space.create_link(raw, p, is_light=is_light, is_pyobj=is_pyobj) + + def create_rawpartner(self, p, is_light=False, is_pyobj=False): + assert self.space.from_gc(p) is None + i = self.find_root_index(p) + raw = self.space.new_rawobj() + self.space.create_link(raw, p, is_light=is_light, is_pyobj=is_pyobj) + self.rootlinks[i] = True + + def minor_collection(self): + self.space.gc.minor_collection() + + def major_collection(self): + self.space.gc.collect() + + def forget_root(self, n): + del self.space.stackroots[n] + del self.rootlinks[n] + + def steps(self): + valid_st = [] + global_actions = [ + Action('add_rawobj', ()), + Action('minor_collection', ()), + Action('major_collection', ()), + ] + valid_st.append(sampled_from(global_actions)) + valid_st.append(builds(Action, just('add_gcobj'), tuples())) + if self.rawobjs: + valid_st.append(builds(Action, just('incref'), tuples( + sampled_from(self.rawobjs)))) + candidates = [r for r in self.rawobjs if r.ob_refcnt & RC_MASK > 0] + if candidates: + valid_st.append(builds(Action, just('decref'), tuples( + sampled_from(candidates)))) + candidates = self.get_linkable_rawobjs() + if candidates: + st = builds(Action, just('create_gcpartner'), tuples( + sampled_from(candidates), + booleans(), booleans())) + valid_st.append(st) + candidates = self.get_linkable_gcobjs() + if candidates: + st = builds(Action, just('create_rawpartner'), tuples( + sampled_from(candidates), + booleans(), booleans())) + valid_st.append(st) + if self.space.stackroots: + st = builds(Action, just('forget_root'), tuples( + sampled_from(range(len(self.space.stackroots))))) + valid_st.append(st) + return one_of(*valid_st) + + def execute_step(self, action): + with signal_timeout(1): # catches infinite loops + action.execute(self) + + def teardown(self): + self.space.stackroots[:] = [] + self.space.gc.collect() + for r in self.rawobjs: + lltype.free(r, flavor='raw') + while True: + r = self.space.gc.rawrefcount_next_dead() + if r == llmemory.NULL: + break + else: + lltype.free(self.space.gc._pyobj(r), flavor='raw') + stop_tracking_allocations(check=True) + + +def test_hypothesis(): + run_state_machine_as_test(StateMachine) +test_hypothesis.dont_track_allocations = True From pypy.commits at gmail.com Mon Mar 7 13:48:30 2016 From: pypy.commits at gmail.com (mattip) Date: Mon, 07 Mar 2016 10:48:30 -0800 (PST) Subject: [pypy-commit] pypy default: update rev numbers in script Message-ID: <56ddccfe.4d0d1c0a.2c758.ffffcec6@mx.google.com> Author: mattip Branch: Changeset: r82866:ed4de848b26a Date: 2016-03-07 20:11 +0200 http://bitbucket.org/pypy/pypy/changeset/ed4de848b26a/ Log: update rev numbers in script diff --git a/pypy/tool/release/repackage.sh b/pypy/tool/release/repackage.sh --- a/pypy/tool/release/repackage.sh +++ b/pypy/tool/release/repackage.sh @@ -1,7 +1,7 @@ # Edit these appropriately before running this script -maj=4 +maj=5 min=0 -rev=1 +rev=0 # This script will download latest builds from the buildmaster, rename the top # level directory, and repackage ready to be uploaded to bitbucket. It will also # download source, assuming a tag for the release already exists, and repackage them. From pypy.commits at gmail.com Mon Mar 7 13:48:31 2016 From: pypy.commits at gmail.com (cfbolz) Date: Mon, 07 Mar 2016 10:48:31 -0800 (PST) Subject: [pypy-commit] pypy default: use upper case more Message-ID: <56ddccff.e853c20a.12016.3fed@mx.google.com> Author: Carl Friedrich Bolz Branch: Changeset: r82867:937e32c2f31c Date: 2016-03-07 09:41 +0100 http://bitbucket.org/pypy/pypy/changeset/937e32c2f31c/ Log: use upper case more diff --git a/pypy/doc/release-5.0.0.rst b/pypy/doc/release-5.0.0.rst --- a/pypy/doc/release-5.0.0.rst +++ b/pypy/doc/release-5.0.0.rst @@ -36,7 +36,7 @@ While not applicable only to PyPy, `cffi`_ is arguably our most significant contribution to the python ecosystem. PyPy 5.0 ships with -`cffi-1.5.2`_ which now allows embedding PyPy (or cpython) in a c program. +`cffi-1.5.2`_ which now allows embedding PyPy (or cpython) in a C program. .. _`PyPy`: http://doc.pypy.org .. _`RPython`: https://rpython.readthedocs.org @@ -52,18 +52,18 @@ ============= PyPy is a very compliant Python interpreter, almost a drop-in replacement for -CPython 2.7. It's fast (`pypy and cpython 2.7.x`_ performance comparison) +CPython 2.7. It's fast (`PyPy and CPython 2.7.x`_ performance comparison) due to its integrated tracing JIT compiler. We also welcome developers of other `dynamic languages`_ to see what RPython can do for them. This release supports **x86** machines on most common operating systems -(Linux 32/64, Mac OS X 64, Windows 32, OpenBSD, freebsd), +(Linux 32/64, Mac OS X 64, Windows 32, OpenBSD, FreeBSD), newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux, and the -big- and little-endian variants of **ppc64** running Linux. +big- and little-endian variants of **PPC64** running Linux. -.. _`pypy and cpython 2.7.x`: http://speed.pypy.org +.. _`PyPy and CPython 2.7.x`: http://speed.pypy.org .. _`dynamic languages`: http://pypyjs.org Other Highlights (since 4.0.1 released in November 2015) @@ -103,7 +103,7 @@ * More completely support datetime, optimize timedelta creation - * Fix for issue 2185 which caused an inconsistent list of operations to be + * Fix for issue #2185 which caused an inconsistent list of operations to be generated by the unroller, appeared in a complicated DJango app * Fix an elusive issue with stacklets on shadowstack which showed up when From pypy.commits at gmail.com Mon Mar 7 13:48:33 2016 From: pypy.commits at gmail.com (fijal) Date: Mon, 07 Mar 2016 10:48:33 -0800 (PST) Subject: [pypy-commit] pypy default: expand Message-ID: <56ddcd01.a8c0c20a.d20f2.418b@mx.google.com> Author: fijal Branch: Changeset: r82868:cc69148b1226 Date: 2016-03-07 10:52 +0200 http://bitbucket.org/pypy/pypy/changeset/cc69148b1226/ Log: expand diff --git a/pypy/doc/release-5.0.0.rst b/pypy/doc/release-5.0.0.rst --- a/pypy/doc/release-5.0.0.rst +++ b/pypy/doc/release-5.0.0.rst @@ -12,11 +12,11 @@ We also merged a major upgrade to our C-API layer (cpyext), simplifying the interaction between c-level objects and PyPy interpreter level objects. As a result, lxml with its cython compiled component `passes all tests`_ on PyPy +and the new cpyext is a lot faster than the previous one. -Users who have gotten used to vmprof_ on Linux, and those on other platforms -who have not yet tried its awesomeness, will be happy to hear that vmprof -now just works on MacOS and Windows too, in both PyPy (built-in support) and -CPython (as an installed module). +vmprof_ has been a go-to profiler for PyPy on linux for a few releases +and we're happy to announce that thanks to commercial cooperation, vmprof +now works on Linux, OS X and Windows on both PyPy and CPython. You can download the PyPy 5.0 release here: From pypy.commits at gmail.com Mon Mar 7 13:48:43 2016 From: pypy.commits at gmail.com (plan_rich) Date: Mon, 07 Mar 2016 10:48:43 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: correct shift for ppc zero array, ofs_loc can be a register Message-ID: <56ddcd0b.4577c20a.149e9.4a65@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r82869:014ea5f68140 Date: 2016-03-07 19:47 +0100 http://bitbucket.org/pypy/pypy/changeset/014ea5f68140/ Log: correct shift for ppc zero array, ofs_loc can be a register diff --git a/rpython/jit/backend/ppc/opassembler.py b/rpython/jit/backend/ppc/opassembler.py --- a/rpython/jit/backend/ppc/opassembler.py +++ b/rpython/jit/backend/ppc/opassembler.py @@ -887,8 +887,12 @@ self.mc.load_imm(r.SCRATCH, startindex_loc.value) startindex_loc = r.SCRATCH - self.mc.addi(r.SCRATCH2.value, startindex_loc.value, ofs_loc.getint()) + if ofs_loc.is_imm(): + self.mc.addi(r.SCRATCH2.value, startindex_loc.value, ofs_loc.value) + else: + self.mc.add(r.SCRATCH2.value, startindex_loc.value, ofs_loc.value) ofs_loc = r.SCRATCH2 + assert base_loc.is_core_reg() self.mc.add(ofs_loc.value, ofs_loc.value, base_loc.value) # ofs_loc is now the real address pointing to the first # byte to be zeroed @@ -901,7 +905,7 @@ jlt_location = self.mc.currpos() self.mc.trap() - self.mc.sradi(r.SCRATCH.value, length_loc.value, shift_by) + self.mc.sradi(r.SCRATCH.value, length_loc.value, shift_by, 0) self.mc.mtctr(r.SCRATCH.value) # store the length in count register self.mc.li(r.SCRATCH.value, 0) @@ -929,7 +933,7 @@ if length_loc.is_imm(): self.mc.load_imm(r.SCRATCH, length_loc.value & (stepsize-1)) else: - self.mc.andi(r.SCRATCH.value, length_loc.value, stepsize-1) + self.mc.andix(r.SCRATCH.value, length_loc.value, (stepsize-1) & 0xff) self.mc.cmp_op(0, r.SCRATCH.value, 0, imm=True) jle_location = self.mc.currpos() From pypy.commits at gmail.com Mon Mar 7 14:26:40 2016 From: pypy.commits at gmail.com (mattip) Date: Mon, 07 Mar 2016 11:26:40 -0800 (PST) Subject: [pypy-commit] pypy default: fix test after 3c4aee3b5f7a Message-ID: <56ddd5f0.8ee61c0a.5cd18.ffffd4fe@mx.google.com> Author: mattip Branch: Changeset: r82870:d3f6d015182f Date: 2016-03-07 21:25 +0200 http://bitbucket.org/pypy/pypy/changeset/d3f6d015182f/ Log: fix test after 3c4aee3b5f7a diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -240,6 +240,7 @@ Kristjan Valur Jonsson David Lievens Neil Blakey-Milner + Sergey Matyunin Lutz Paelike Lucio Torre Lars Wassermann @@ -271,6 +272,7 @@ Aaron Tubbs Ben Darnell Roberto De Ioris + Logan Chien Juan Francisco Cantero Hurtado Ruochen Huang Jeong YunWon diff --git a/pypy/doc/contributor.rst b/pypy/doc/contributor.rst --- a/pypy/doc/contributor.rst +++ b/pypy/doc/contributor.rst @@ -210,6 +210,7 @@ Kristjan Valur Jonsson David Lievens Neil Blakey-Milner + Sergey Matyunin Lutz Paelike Lucio Torre Lars Wassermann @@ -241,6 +242,7 @@ Aaron Tubbs Ben Darnell Roberto De Ioris + Logan Chien Juan Francisco Cantero Hurtado Ruochen Huang Jeong YunWon diff --git a/rpython/translator/c/test/test_genc.py b/rpython/translator/c/test/test_genc.py --- a/rpython/translator/c/test/test_genc.py +++ b/rpython/translator/c/test/test_genc.py @@ -596,7 +596,7 @@ t.context._graphof(foobar_fn).inhibit_tail_call = True t.source_c() lines = t.driver.cbuilder.c_source_filename.join('..', - 'rpython_translator_c_test_test_genc.c').readlines() + 'rpython_translator_c_test.c').readlines() for i, line in enumerate(lines): if '= pypy_g_foobar_fn' in line: break From pypy.commits at gmail.com Mon Mar 7 14:48:11 2016 From: pypy.commits at gmail.com (plan_rich) Date: Mon, 07 Mar 2016 11:48:11 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: correct positioning of the ptr to write Message-ID: <56dddafb.86b71c0a.15e17.ffffd2be@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r82871:89a10fc86c4b Date: 2016-03-07 20:47 +0100 http://bitbucket.org/pypy/pypy/changeset/89a10fc86c4b/ Log: correct positioning of the ptr to write diff --git a/rpython/jit/backend/ppc/opassembler.py b/rpython/jit/backend/ppc/opassembler.py --- a/rpython/jit/backend/ppc/opassembler.py +++ b/rpython/jit/backend/ppc/opassembler.py @@ -884,9 +884,8 @@ return # nothing to do if startindex_loc.is_imm(): - self.mc.load_imm(r.SCRATCH, startindex_loc.value) - startindex_loc = r.SCRATCH - + self.mc.load_imm(r.SCRATCH2, startindex_loc.value) + startindex_loc = r.SCRATCH2 if ofs_loc.is_imm(): self.mc.addi(r.SCRATCH2.value, startindex_loc.value, ofs_loc.value) else: @@ -897,6 +896,7 @@ # ofs_loc is now the real address pointing to the first # byte to be zeroed + prev_length_loc = length_loc if length_loc.is_imm(): self.mc.load_imm(r.SCRATCH, length_loc.value) length_loc = r.SCRATCH @@ -905,7 +905,7 @@ jlt_location = self.mc.currpos() self.mc.trap() - self.mc.sradi(r.SCRATCH.value, length_loc.value, shift_by, 0) + self.mc.sradi(r.SCRATCH.value, length_loc.value, shift_by, 31) self.mc.mtctr(r.SCRATCH.value) # store the length in count register self.mc.li(r.SCRATCH.value, 0) @@ -914,6 +914,8 @@ # and length. Thus we zero 4/8 bytes in a loop in 1) and every remaining # byte is zeroed in another loop in 2) + self.mc.subi(ofs_loc.value, ofs_loc.value, stepsize) + # first store of case 1) # 1) The next loop copies WORDS into the memory chunk starting at startindex # ending at startindex + length. These are bytes @@ -921,6 +923,8 @@ self.eza_stXu(r.SCRATCH.value, ofs_loc.value, stepsize, stepsize) self.mc.bdnz(loop_location - self.mc.currpos()) + self.mc.addi(ofs_loc.value, ofs_loc.value, stepsize) + pmc = OverwritingBuilder(self.mc, jlt_location, 1) pmc.blt(self.mc.currpos() - jlt_location) # jump if length < WORD pmc.overwrite() @@ -930,6 +934,7 @@ # need to write the last bytes. # move the last bytes to the count register + length_loc = prev_length_loc if length_loc.is_imm(): self.mc.load_imm(r.SCRATCH, length_loc.value & (stepsize-1)) else: @@ -942,6 +947,8 @@ self.mc.mtctr(r.SCRATCH.value) self.mc.li(r.SCRATCH.value, 0) + self.mc.subi(ofs_loc.value, ofs_loc.value, 1) + loop_position = self.mc.currpos() self.eza_stXu(r.SCRATCH.value, ofs_loc.value, 1, 1) self.mc.bdnz(self.mc.currpos() - loop_location) From pypy.commits at gmail.com Mon Mar 7 15:11:35 2016 From: pypy.commits at gmail.com (fijal) Date: Mon, 07 Mar 2016 12:11:35 -0800 (PST) Subject: [pypy-commit] pypy jit-leaner-frontend: one more commit of progress Message-ID: <56dde077.418f1c0a.8d718.ffffe343@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r82872:87305d3006b1 Date: 2016-03-07 22:10 +0200 http://bitbucket.org/pypy/pypy/changeset/87305d3006b1/ Log: one more commit of progress diff --git a/rpython/jit/metainterp/opencoder.py b/rpython/jit/metainterp/opencoder.py --- a/rpython/jit/metainterp/opencoder.py +++ b/rpython/jit/metainterp/opencoder.py @@ -26,7 +26,7 @@ self.save_pos = -1 def length(self): - return self.end - self.start - 1 + return self.end - self.start def done(self): return self.pos >= self.end diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -2609,7 +2609,10 @@ self.history.record(rop.JUMP, live_arg_boxes[num_green_args:], None, descr=target_jitcell_token) self.history.ends_with_jump = True - target_token = compile.compile_trace(self, self.resumekey) + try: + target_token = compile.compile_trace(self, self.resumekey) + finally: + xxxx if target_token is not None: # raise if it *worked* correctly assert isinstance(target_token, TargetToken) jitcell_token = target_token.targeting_jitcell_token diff --git a/rpython/jit/metainterp/resume.py b/rpython/jit/metainterp/resume.py --- a/rpython/jit/metainterp/resume.py +++ b/rpython/jit/metainterp/resume.py @@ -317,13 +317,15 @@ #self._number_boxes(topsnapshot.boxes, optimizer, state) #assert state.position == special_boxes_size + total = 2 while not snapshot_iter.done(): size, jitcode_index, pc = snapshot_iter.get_size_jitcode_pc() + total += 2 + size state.append(rffi.cast(rffi.SHORT, jitcode_index)) state.append(rffi.cast(rffi.SHORT, pc)) self._number_boxes(snapshot_iter, size, optimizer, state) - numb = resumecode.create_numbering(state.current) + numb = resumecode.create_numbering(state.current, total) return numb, state.liveboxes, state.v def forget_numberings(self): diff --git a/rpython/jit/metainterp/resumecode.py b/rpython/jit/metainterp/resumecode.py --- a/rpython/jit/metainterp/resumecode.py +++ b/rpython/jit/metainterp/resumecode.py @@ -24,9 +24,12 @@ NUMBERINGP.TO.become(NUMBERING) NULL_NUMBER = lltype.nullptr(NUMBERING) -def create_numbering(lst): +def create_numbering(lst, total=-1): + if total == -1: + total = len(lst) result = [] - for item in lst: + for i in range(total): + item = lst[i] item = rffi.cast(lltype.Signed, item) item *= 2 if item < 0: From pypy.commits at gmail.com Tue Mar 8 03:26:08 2016 From: pypy.commits at gmail.com (plan_rich) Date: Tue, 08 Mar 2016 00:26:08 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: jump location was off, shift by parameter of sradi is weird (but it works now) Message-ID: <56de8ca0.6507c20a.944ab.72c2@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r82874:78f8d539bef6 Date: 2016-03-08 09:25 +0100 http://bitbucket.org/pypy/pypy/changeset/78f8d539bef6/ Log: jump location was off, shift by parameter of sradi is weird (but it works now) diff --git a/rpython/jit/backend/ppc/opassembler.py b/rpython/jit/backend/ppc/opassembler.py --- a/rpython/jit/backend/ppc/opassembler.py +++ b/rpython/jit/backend/ppc/opassembler.py @@ -905,7 +905,7 @@ jlt_location = self.mc.currpos() self.mc.trap() - self.mc.sradi(r.SCRATCH.value, length_loc.value, shift_by, 31) + self.mc.sradi(r.SCRATCH.value, length_loc.value, 0, shift_by) self.mc.mtctr(r.SCRATCH.value) # store the length in count register self.mc.li(r.SCRATCH.value, 0) @@ -949,9 +949,9 @@ self.mc.subi(ofs_loc.value, ofs_loc.value, 1) - loop_position = self.mc.currpos() + loop_location = self.mc.currpos() self.eza_stXu(r.SCRATCH.value, ofs_loc.value, 1, 1) - self.mc.bdnz(self.mc.currpos() - loop_location) + self.mc.bdnz(loop_location - self.mc.currpos()) pmc = OverwritingBuilder(self.mc, jle_location, 1) pmc.ble(self.mc.currpos() - jle_location) # !GT From pypy.commits at gmail.com Tue Mar 8 04:42:46 2016 From: pypy.commits at gmail.com (fijal) Date: Tue, 08 Mar 2016 01:42:46 -0800 (PST) Subject: [pypy-commit] pypy jit-leaner-frontend: fix the order of resumedata rebuild Message-ID: <56de9e96.d3921c0a.eb707.ffffa242@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r82875:e48b95c1467b Date: 2016-03-08 11:42 +0200 http://bitbucket.org/pypy/pypy/changeset/e48b95c1467b/ Log: fix the order of resumedata rebuild diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -657,6 +657,12 @@ def length(self): return self.trace._count + def get_cut_position(self): + return len(self.trace._ops) + + def cut(self, cut_at): + self.trace.cut_at(cut_at) + def any_operation(self): return self.trace._count > 0 diff --git a/rpython/jit/metainterp/opencoder.py b/rpython/jit/metainterp/opencoder.py --- a/rpython/jit/metainterp/opencoder.py +++ b/rpython/jit/metainterp/opencoder.py @@ -139,6 +139,9 @@ def length(self): return len(self._ops) + def cut_at(self, end): + self._ops = self._ops[:end] + def _encode(self, box): if isinstance(box, Const): if (isinstance(box, ConstInt) and diff --git a/rpython/jit/metainterp/optimizeopt/unroll.py b/rpython/jit/metainterp/optimizeopt/unroll.py --- a/rpython/jit/metainterp/optimizeopt/unroll.py +++ b/rpython/jit/metainterp/optimizeopt/unroll.py @@ -253,10 +253,9 @@ else: debug_print("Retrace count reached, jumping to preamble") return self.jump_to_preamble(cell_token, jump_op, info) - xxx - exported_state = self.export_state(start_label, - operations[-1].getarglist(), - info.inputargs, box_names_memo) + exported_state = self.export_state(info.jump_op.getarglist(), + info.inputargs, runtime_boxes, + box_names_memo) exported_state.quasi_immutable_deps = self.optimizer.quasi_immutable_deps self.optimizer._clean_optimization_info(self.optimizer._newoperations) return exported_state, self.optimizer._newoperations diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -1918,7 +1918,7 @@ def retrace_needed(self, trace, exported_state): self.partial_trace = trace - self.retracing_from = len(self.history.operations) - 1 + self.retracing_from = self.history.length() self.exported_state = exported_state self.heapcache.reset() @@ -2245,7 +2245,7 @@ jd_sd.warmstate.get_location_str(greenkey), self.staticdata.logger_ops._make_log_operations( self.box_names_memo), - self.history.operations) + self.history.trace) if self.aborted_tracing_jitdriver is not None: jd_sd = self.aborted_tracing_jitdriver greenkey = self.aborted_tracing_greenkey @@ -2454,7 +2454,7 @@ self.staticdata.log('cancelled, tracing more...') # Otherwise, no loop found so far, so continue tracing. - start = len(self.history.operations) + start = self.history.get_cut_position() self.current_merge_points.append((live_arg_boxes, start)) def _unpack_boxes(self, boxes, start, stop): @@ -2606,18 +2606,18 @@ if not target_jitcell_token: return + cut_at = self.history.get_cut_position() self.history.record(rop.JUMP, live_arg_boxes[num_green_args:], None, descr=target_jitcell_token) self.history.ends_with_jump = True try: target_token = compile.compile_trace(self, self.resumekey) finally: - xxxx + self.history.cut(cut_at) # pop the jump if target_token is not None: # raise if it *worked* correctly assert isinstance(target_token, TargetToken) jitcell_token = target_token.targeting_jitcell_token self.raise_continue_running_normally(live_arg_boxes, jitcell_token) - xxxx # remove the jump op and continue tracing def compile_done_with_this_frame(self, exitbox): # temporarily put a JUMP to a pseudo-loop diff --git a/rpython/jit/metainterp/resume.py b/rpython/jit/metainterp/resume.py --- a/rpython/jit/metainterp/resume.py +++ b/rpython/jit/metainterp/resume.py @@ -1094,7 +1094,6 @@ f.setup_resume_at_op(pc) resumereader.consume_boxes(f.get_current_position_info(), f.registers_i, f.registers_r, f.registers_f) - metainterp.framestack.reverse() return resumereader.liveboxes, virtualizable_boxes, virtualref_boxes @@ -1368,22 +1367,16 @@ # by the positions in the numbering. The first one we get must be # the bottom one, i.e. the last one in the chain, in order to make # the comment in BlackholeInterpreter.setposition() valid. - prevbh = None - firstbh = None curbh = None while not resumereader.done_reading(): - curbh = blackholeinterpbuilder.acquire_interp() - if prevbh is not None: - prevbh.nextblackholeinterp = curbh - else: - firstbh = curbh - prevbh = curbh + nextbh = blackholeinterpbuilder.acquire_interp() + nextbh.nextblackholeinterp = curbh + curbh = nextbh jitcode_pos, pc = resumereader.read_jitcode_pos_pc() jitcode = jitcodes[jitcode_pos] curbh.setposition(jitcode, pc) resumereader.consume_one_section(curbh) - curbh.nextblackholeinterp = None - return firstbh + return curbh def force_from_resumedata(metainterp_sd, storage, deadframe, vinfo, ginfo): resumereader = ResumeDataDirectReader(metainterp_sd, storage, deadframe) From pypy.commits at gmail.com Tue Mar 8 05:18:32 2016 From: pypy.commits at gmail.com (fijal) Date: Tue, 08 Mar 2016 02:18:32 -0800 (PST) Subject: [pypy-commit] pypy jit-leaner-frontend: fix Message-ID: <56dea6f8.a3f6c20a.71577.ffff99ec@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r82876:7d2ffc440da7 Date: 2016-03-08 12:17 +0200 http://bitbucket.org/pypy/pypy/changeset/7d2ffc440da7/ Log: fix diff --git a/rpython/jit/metainterp/optimizeopt/unroll.py b/rpython/jit/metainterp/optimizeopt/unroll.py --- a/rpython/jit/metainterp/optimizeopt/unroll.py +++ b/rpython/jit/metainterp/optimizeopt/unroll.py @@ -228,7 +228,7 @@ inline_short_preamble, box_names_memo): self._check_no_forwarding([trace.inputargs]) info, ops = self.optimizer.propagate_all_forward(trace.get_iter(), - call_pure_results) + call_pure_results, False) jump_op = info.jump_op cell_token = jump_op.getdescr() assert isinstance(cell_token, JitCellToken) From pypy.commits at gmail.com Tue Mar 8 05:38:31 2016 From: pypy.commits at gmail.com (fijal) Date: Tue, 08 Mar 2016 02:38:31 -0800 (PST) Subject: [pypy-commit] pypy jit-leaner-frontend: progress Message-ID: <56deaba7.49f9c20a.8174f.ffffa21d@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r82877:0be62192045f Date: 2016-03-08 12:31 +0200 http://bitbucket.org/pypy/pypy/changeset/0be62192045f/ Log: progress diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -1007,7 +1007,7 @@ metainterp_sd.stats.add_jitcell_token(jitcell_token) -def compile_trace(metainterp, resumekey): +def compile_trace(metainterp, resumekey, runtime_boxes): """Try to compile a new bridge leading from the beginning of the history to some existing place. """ @@ -1034,7 +1034,7 @@ call_pure_results = metainterp.call_pure_results if metainterp.history.ends_with_jump: - data = BridgeCompileData(trace, inputargs, + data = BridgeCompileData(trace, runtime_boxes, call_pure_results=call_pure_results, enable_opts=enable_opts, inline_short_preamble=inline_short_preamble) diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -2611,7 +2611,8 @@ descr=target_jitcell_token) self.history.ends_with_jump = True try: - target_token = compile.compile_trace(self, self.resumekey) + target_token = compile.compile_trace(self, self.resumekey, + live_arg_boxes[num_green_args:]) finally: self.history.cut(cut_at) # pop the jump if target_token is not None: # raise if it *worked* correctly @@ -2643,7 +2644,7 @@ # FIXME: can we call compile_trace? token = loop_tokens[0].finishdescr self.history.record(rop.FINISH, exits, None, descr=token) - target_token = compile.compile_trace(self, self.resumekey) + target_token = compile.compile_trace(self, self.resumekey, exits) if target_token is not token: compile.giveup() From pypy.commits at gmail.com Tue Mar 8 05:38:33 2016 From: pypy.commits at gmail.com (fijal) Date: Tue, 08 Mar 2016 02:38:33 -0800 (PST) Subject: [pypy-commit] pypy default: an attempt to fix OS X 32bit Message-ID: <56deaba9.a3f6c20a.71577.ffffa317@mx.google.com> Author: fijal Branch: Changeset: r82878:455ba7f390b8 Date: 2016-03-08 12:36 +0200 http://bitbucket.org/pypy/pypy/changeset/455ba7f390b8/ Log: an attempt to fix OS X 32bit diff --git a/rpython/rlib/rvmprof/src/vmprof_getpc.h b/rpython/rlib/rvmprof/src/vmprof_getpc.h --- a/rpython/rlib/rvmprof/src/vmprof_getpc.h +++ b/rpython/rlib/rvmprof/src/vmprof_getpc.h @@ -54,6 +54,7 @@ // It will cause problems for FreeBSD though!, because it turns off // the needed __BSD_VISIBLE. #ifdef __APPLE__ +#include #define _XOPEN_SOURCE 500 #endif @@ -144,7 +145,11 @@ #else intptr_t GetPC(ucontext_t *signal_ucontext) { #ifdef __APPLE__ +#if ((ULONG_MAX) == (UINT_MAX)) + return (signal_ucontext->uc_mcontext->__ss.__eip); +#else return (signal_ucontext->uc_mcontext->__ss.__rip); +#endif #else return signal_ucontext->PC_FROM_UCONTEXT; // defined in config.h #endif From pypy.commits at gmail.com Tue Mar 8 05:38:35 2016 From: pypy.commits at gmail.com (fijal) Date: Tue, 08 Mar 2016 02:38:35 -0800 (PST) Subject: [pypy-commit] pypy default: merge Message-ID: <56deabab.46fac20a.bff32.ffffa2f1@mx.google.com> Author: fijal Branch: Changeset: r82879:e1352ae844c0 Date: 2016-03-08 12:37 +0200 http://bitbucket.org/pypy/pypy/changeset/e1352ae844c0/ Log: merge diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -240,6 +240,7 @@ Kristjan Valur Jonsson David Lievens Neil Blakey-Milner + Sergey Matyunin Lutz Paelike Lucio Torre Lars Wassermann @@ -271,6 +272,7 @@ Aaron Tubbs Ben Darnell Roberto De Ioris + Logan Chien Juan Francisco Cantero Hurtado Ruochen Huang Jeong YunWon diff --git a/pypy/doc/contributor.rst b/pypy/doc/contributor.rst --- a/pypy/doc/contributor.rst +++ b/pypy/doc/contributor.rst @@ -210,6 +210,7 @@ Kristjan Valur Jonsson David Lievens Neil Blakey-Milner + Sergey Matyunin Lutz Paelike Lucio Torre Lars Wassermann @@ -241,6 +242,7 @@ Aaron Tubbs Ben Darnell Roberto De Ioris + Logan Chien Juan Francisco Cantero Hurtado Ruochen Huang Jeong YunWon diff --git a/pypy/doc/release-5.0.0.rst b/pypy/doc/release-5.0.0.rst --- a/pypy/doc/release-5.0.0.rst +++ b/pypy/doc/release-5.0.0.rst @@ -12,11 +12,11 @@ We also merged a major upgrade to our C-API layer (cpyext), simplifying the interaction between c-level objects and PyPy interpreter level objects. As a result, lxml with its cython compiled component `passes all tests`_ on PyPy +and the new cpyext is a lot faster than the previous one. -Users who have gotten used to vmprof_ on Linux, and those on other platforms -who have not yet tried its awesomeness, will be happy to hear that vmprof -now just works on MacOS and Windows too, in both PyPy (built-in support) and -CPython (as an installed module). +vmprof_ has been a go-to profiler for PyPy on linux for a few releases +and we're happy to announce that thanks to commercial cooperation, vmprof +now works on Linux, OS X and Windows on both PyPy and CPython. You can download the PyPy 5.0 release here: @@ -36,7 +36,7 @@ While not applicable only to PyPy, `cffi`_ is arguably our most significant contribution to the python ecosystem. PyPy 5.0 ships with -`cffi-1.5.2`_ which now allows embedding PyPy (or cpython) in a c program. +`cffi-1.5.2`_ which now allows embedding PyPy (or cpython) in a C program. .. _`PyPy`: http://doc.pypy.org .. _`RPython`: https://rpython.readthedocs.org @@ -52,18 +52,18 @@ ============= PyPy is a very compliant Python interpreter, almost a drop-in replacement for -CPython 2.7. It's fast (`pypy and cpython 2.7.x`_ performance comparison) +CPython 2.7. It's fast (`PyPy and CPython 2.7.x`_ performance comparison) due to its integrated tracing JIT compiler. We also welcome developers of other `dynamic languages`_ to see what RPython can do for them. This release supports **x86** machines on most common operating systems -(Linux 32/64, Mac OS X 64, Windows 32, OpenBSD, freebsd), +(Linux 32/64, Mac OS X 64, Windows 32, OpenBSD, FreeBSD), newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux, and the -big- and little-endian variants of **ppc64** running Linux. +big- and little-endian variants of **PPC64** running Linux. -.. _`pypy and cpython 2.7.x`: http://speed.pypy.org +.. _`PyPy and CPython 2.7.x`: http://speed.pypy.org .. _`dynamic languages`: http://pypyjs.org Other Highlights (since 4.0.1 released in November 2015) @@ -103,7 +103,7 @@ * More completely support datetime, optimize timedelta creation - * Fix for issue 2185 which caused an inconsistent list of operations to be + * Fix for issue #2185 which caused an inconsistent list of operations to be generated by the unroller, appeared in a complicated DJango app * Fix an elusive issue with stacklets on shadowstack which showed up when diff --git a/pypy/tool/release/repackage.sh b/pypy/tool/release/repackage.sh --- a/pypy/tool/release/repackage.sh +++ b/pypy/tool/release/repackage.sh @@ -1,7 +1,7 @@ # Edit these appropriately before running this script -maj=4 +maj=5 min=0 -rev=1 +rev=0 # This script will download latest builds from the buildmaster, rename the top # level directory, and repackage ready to be uploaded to bitbucket. It will also # download source, assuming a tag for the release already exists, and repackage them. diff --git a/rpython/translator/c/test/test_genc.py b/rpython/translator/c/test/test_genc.py --- a/rpython/translator/c/test/test_genc.py +++ b/rpython/translator/c/test/test_genc.py @@ -596,7 +596,7 @@ t.context._graphof(foobar_fn).inhibit_tail_call = True t.source_c() lines = t.driver.cbuilder.c_source_filename.join('..', - 'rpython_translator_c_test_test_genc.c').readlines() + 'rpython_translator_c_test.c').readlines() for i, line in enumerate(lines): if '= pypy_g_foobar_fn' in line: break From pypy.commits at gmail.com Tue Mar 8 06:42:55 2016 From: pypy.commits at gmail.com (mattip) Date: Tue, 08 Mar 2016 03:42:55 -0800 (PST) Subject: [pypy-commit] pypy default: remove bugfix that didn't make it into the release Message-ID: <56debabf.03dd1c0a.62e34.ffffcb86@mx.google.com> Author: mattip Branch: Changeset: r82880:b96f63e5e9fd Date: 2016-03-08 13:39 +0200 http://bitbucket.org/pypy/pypy/changeset/b96f63e5e9fd/ Log: remove bugfix that didn't make it into the release diff --git a/pypy/doc/release-5.0.0.rst b/pypy/doc/release-5.0.0.rst --- a/pypy/doc/release-5.0.0.rst +++ b/pypy/doc/release-5.0.0.rst @@ -128,9 +128,6 @@ * Fix for corner case (likely shown by Krakatau) for consecutive guards with interdependencies - * Fix applevel bare class method comparisons which should fix pretty printing - in IPython - * Issues reported with our previous release were resolved_ after reports from users on our issue tracker at https://bitbucket.org/pypy/pypy/issues or on IRC at #pypy From pypy.commits at gmail.com Tue Mar 8 09:38:20 2016 From: pypy.commits at gmail.com (fijal) Date: Tue, 08 Mar 2016 06:38:20 -0800 (PST) Subject: [pypy-commit] pypy default: Merged in werat/pypy/werat/fix-typo-in-documentation-1457447122290 (pull request #412) Message-ID: <56dee3dc.06b01c0a.4fdc0.22a8@mx.google.com> Author: Maciej Fijalkowski Branch: Changeset: r82882:72e2c0d9368a Date: 2016-03-08 16:37 +0200 http://bitbucket.org/pypy/pypy/changeset/72e2c0d9368a/ Log: Merged in werat/pypy/werat/fix-typo-in-documentation-1457447122290 (pull request #412) Fix typo in documentation diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -265,7 +265,7 @@ return False def evil(y): - d = {x(): 1} + d = {X(): 1} X.__eq__ = __evil_eq__ d[y] # might trigger a call to __eq__? From pypy.commits at gmail.com Tue Mar 8 09:38:34 2016 From: pypy.commits at gmail.com (werat) Date: Tue, 08 Mar 2016 06:38:34 -0800 (PST) Subject: [pypy-commit] pypy werat/fix-typo-in-documentation-1457447122290: Fix typo in documentation Message-ID: <56dee3ea.86351c0a.2535e.0926@mx.google.com> Author: werat Branch: werat/fix-typo-in-documentation-1457447122290 Changeset: r82881:33c65ef7faf9 Date: 2016-03-08 14:29 +0000 http://bitbucket.org/pypy/pypy/changeset/33c65ef7faf9/ Log: Fix typo in documentation diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -265,7 +265,7 @@ return False def evil(y): - d = {x(): 1} + d = {X(): 1} X.__eq__ = __evil_eq__ d[y] # might trigger a call to __eq__? From pypy.commits at gmail.com Tue Mar 8 10:13:32 2016 From: pypy.commits at gmail.com (fijal) Date: Tue, 08 Mar 2016 07:13:32 -0800 (PST) Subject: [pypy-commit] pypy default: ugh, export that Message-ID: <56deec1c.49f9c20a.8174f.0bde@mx.google.com> Author: fijal Branch: Changeset: r82883:70bd51df3fe6 Date: 2016-03-08 17:11 +0200 http://bitbucket.org/pypy/pypy/changeset/70bd51df3fe6/ Log: ugh, export that diff --git a/rpython/translator/c/src/entrypoint.c b/rpython/translator/c/src/entrypoint.c --- a/rpython/translator/c/src/entrypoint.c +++ b/rpython/translator/c/src/entrypoint.c @@ -37,6 +37,7 @@ # include #endif +RPY_EXTERN void rpython_startup_code(void) { #ifdef RPY_WITH_GIL From pypy.commits at gmail.com Tue Mar 8 10:13:34 2016 From: pypy.commits at gmail.com (fijal) Date: Tue, 08 Mar 2016 07:13:34 -0800 (PST) Subject: [pypy-commit] pypy default: merge Message-ID: <56deec1e.654fc20a.ca3e7.0d18@mx.google.com> Author: fijal Branch: Changeset: r82884:9f7abe836d20 Date: 2016-03-08 17:12 +0200 http://bitbucket.org/pypy/pypy/changeset/9f7abe836d20/ Log: merge diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -240,6 +240,7 @@ Kristjan Valur Jonsson David Lievens Neil Blakey-Milner + Sergey Matyunin Lutz Paelike Lucio Torre Lars Wassermann @@ -271,6 +272,7 @@ Aaron Tubbs Ben Darnell Roberto De Ioris + Logan Chien Juan Francisco Cantero Hurtado Ruochen Huang Jeong YunWon diff --git a/pypy/doc/contributor.rst b/pypy/doc/contributor.rst --- a/pypy/doc/contributor.rst +++ b/pypy/doc/contributor.rst @@ -210,6 +210,7 @@ Kristjan Valur Jonsson David Lievens Neil Blakey-Milner + Sergey Matyunin Lutz Paelike Lucio Torre Lars Wassermann @@ -241,6 +242,7 @@ Aaron Tubbs Ben Darnell Roberto De Ioris + Logan Chien Juan Francisco Cantero Hurtado Ruochen Huang Jeong YunWon diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -265,7 +265,7 @@ return False def evil(y): - d = {x(): 1} + d = {X(): 1} X.__eq__ = __evil_eq__ d[y] # might trigger a call to __eq__? diff --git a/pypy/doc/release-5.0.0.rst b/pypy/doc/release-5.0.0.rst --- a/pypy/doc/release-5.0.0.rst +++ b/pypy/doc/release-5.0.0.rst @@ -12,11 +12,11 @@ We also merged a major upgrade to our C-API layer (cpyext), simplifying the interaction between c-level objects and PyPy interpreter level objects. As a result, lxml with its cython compiled component `passes all tests`_ on PyPy +and the new cpyext is a lot faster than the previous one. -Users who have gotten used to vmprof_ on Linux, and those on other platforms -who have not yet tried its awesomeness, will be happy to hear that vmprof -now just works on MacOS and Windows too, in both PyPy (built-in support) and -CPython (as an installed module). +vmprof_ has been a go-to profiler for PyPy on linux for a few releases +and we're happy to announce that thanks to commercial cooperation, vmprof +now works on Linux, OS X and Windows on both PyPy and CPython. You can download the PyPy 5.0 release here: @@ -36,7 +36,7 @@ While not applicable only to PyPy, `cffi`_ is arguably our most significant contribution to the python ecosystem. PyPy 5.0 ships with -`cffi-1.5.2`_ which now allows embedding PyPy (or cpython) in a c program. +`cffi-1.5.2`_ which now allows embedding PyPy (or cpython) in a C program. .. _`PyPy`: http://doc.pypy.org .. _`RPython`: https://rpython.readthedocs.org @@ -52,18 +52,18 @@ ============= PyPy is a very compliant Python interpreter, almost a drop-in replacement for -CPython 2.7. It's fast (`pypy and cpython 2.7.x`_ performance comparison) +CPython 2.7. It's fast (`PyPy and CPython 2.7.x`_ performance comparison) due to its integrated tracing JIT compiler. We also welcome developers of other `dynamic languages`_ to see what RPython can do for them. This release supports **x86** machines on most common operating systems -(Linux 32/64, Mac OS X 64, Windows 32, OpenBSD, freebsd), +(Linux 32/64, Mac OS X 64, Windows 32, OpenBSD, FreeBSD), newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux, and the -big- and little-endian variants of **ppc64** running Linux. +big- and little-endian variants of **PPC64** running Linux. -.. _`pypy and cpython 2.7.x`: http://speed.pypy.org +.. _`PyPy and CPython 2.7.x`: http://speed.pypy.org .. _`dynamic languages`: http://pypyjs.org Other Highlights (since 4.0.1 released in November 2015) @@ -103,7 +103,7 @@ * More completely support datetime, optimize timedelta creation - * Fix for issue 2185 which caused an inconsistent list of operations to be + * Fix for issue #2185 which caused an inconsistent list of operations to be generated by the unroller, appeared in a complicated DJango app * Fix an elusive issue with stacklets on shadowstack which showed up when @@ -128,9 +128,6 @@ * Fix for corner case (likely shown by Krakatau) for consecutive guards with interdependencies - * Fix applevel bare class method comparisons which should fix pretty printing - in IPython - * Issues reported with our previous release were resolved_ after reports from users on our issue tracker at https://bitbucket.org/pypy/pypy/issues or on IRC at #pypy diff --git a/pypy/tool/release/repackage.sh b/pypy/tool/release/repackage.sh --- a/pypy/tool/release/repackage.sh +++ b/pypy/tool/release/repackage.sh @@ -1,7 +1,7 @@ # Edit these appropriately before running this script -maj=4 +maj=5 min=0 -rev=1 +rev=0 # This script will download latest builds from the buildmaster, rename the top # level directory, and repackage ready to be uploaded to bitbucket. It will also # download source, assuming a tag for the release already exists, and repackage them. diff --git a/rpython/rlib/rvmprof/src/vmprof_getpc.h b/rpython/rlib/rvmprof/src/vmprof_getpc.h --- a/rpython/rlib/rvmprof/src/vmprof_getpc.h +++ b/rpython/rlib/rvmprof/src/vmprof_getpc.h @@ -54,6 +54,7 @@ // It will cause problems for FreeBSD though!, because it turns off // the needed __BSD_VISIBLE. #ifdef __APPLE__ +#include #define _XOPEN_SOURCE 500 #endif @@ -144,7 +145,11 @@ #else intptr_t GetPC(ucontext_t *signal_ucontext) { #ifdef __APPLE__ +#if ((ULONG_MAX) == (UINT_MAX)) + return (signal_ucontext->uc_mcontext->__ss.__eip); +#else return (signal_ucontext->uc_mcontext->__ss.__rip); +#endif #else return signal_ucontext->PC_FROM_UCONTEXT; // defined in config.h #endif diff --git a/rpython/translator/c/test/test_genc.py b/rpython/translator/c/test/test_genc.py --- a/rpython/translator/c/test/test_genc.py +++ b/rpython/translator/c/test/test_genc.py @@ -596,7 +596,7 @@ t.context._graphof(foobar_fn).inhibit_tail_call = True t.source_c() lines = t.driver.cbuilder.c_source_filename.join('..', - 'rpython_translator_c_test_test_genc.c').readlines() + 'rpython_translator_c_test.c').readlines() for i, line in enumerate(lines): if '= pypy_g_foobar_fn' in line: break From pypy.commits at gmail.com Tue Mar 8 10:47:44 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 08 Mar 2016 07:47:44 -0800 (PST) Subject: [pypy-commit] pypy default: Test for rpython_startup_code being exported. And fix: it was using the Message-ID: <56def420.703dc20a.1a549.1f46@mx.google.com> Author: Armin Rigo Branch: Changeset: r82885:62a5b8816876 Date: 2016-03-08 16:46 +0100 http://bitbucket.org/pypy/pypy/changeset/62a5b8816876/ Log: Test for rpython_startup_code being exported. And fix: it was using the wrong macro anyway... diff --git a/rpython/translator/c/src/entrypoint.c b/rpython/translator/c/src/entrypoint.c --- a/rpython/translator/c/src/entrypoint.c +++ b/rpython/translator/c/src/entrypoint.c @@ -37,7 +37,7 @@ # include #endif -RPY_EXTERN +RPY_EXPORTED void rpython_startup_code(void) { #ifdef RPY_WITH_GIL diff --git a/rpython/translator/c/test/test_standalone.py b/rpython/translator/c/test/test_standalone.py --- a/rpython/translator/c/test/test_standalone.py +++ b/rpython/translator/c/test/test_standalone.py @@ -81,7 +81,7 @@ # # verify that the executable re-export symbols, but not too many if sys.platform.startswith('linux') and not kwds.get('shared', False): - seen_main = False + seen = set() g = os.popen("objdump -T '%s'" % builder.executable_name, 'r') for line in g: if not line.strip(): @@ -91,8 +91,8 @@ name = line.split()[-1] if name.startswith('__'): continue + seen.add(name) if name == 'main': - seen_main = True continue if name == 'pypy_debug_file': # ok to export this one continue @@ -104,7 +104,9 @@ "declaration of this C function or global variable" % (name,)) g.close() - assert seen_main, "did not see 'main' exported" + # list of symbols that we *want* to be exported: + for name in ['main', 'pypy_debug_file', 'rpython_startup_code']: + assert name in seen, "did not see '%r' exported" % name # return t, builder From pypy.commits at gmail.com Tue Mar 8 11:15:30 2016 From: pypy.commits at gmail.com (mattip) Date: Tue, 08 Mar 2016 08:15:30 -0800 (PST) Subject: [pypy-commit] pypy default: try to fix html titles (thanks Pim van der Eijk)w Message-ID: <56defaa2.29cec20a.2cd69.2543@mx.google.com> Author: mattip Branch: Changeset: r82886:181dc3529afd Date: 2016-03-08 18:12 +0200 http://bitbucket.org/pypy/pypy/changeset/181dc3529afd/ Log: try to fix html titles (thanks Pim van der Eijk)w diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -123,7 +123,7 @@ # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". -#html_title = None +html_title = 'PyPy documentation' # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None diff --git a/rpython/doc/conf.py b/rpython/doc/conf.py --- a/rpython/doc/conf.py +++ b/rpython/doc/conf.py @@ -59,7 +59,7 @@ # General information about the project. project = u'RPython' -copyright = u'2015, The PyPy Project' +copyright = u'2016, The PyPy Project' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the @@ -121,7 +121,7 @@ # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". -#html_title = None +html_title = RPython Documentation # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None From pypy.commits at gmail.com Tue Mar 8 11:36:26 2016 From: pypy.commits at gmail.com (mattip) Date: Tue, 08 Mar 2016 08:36:26 -0800 (PST) Subject: [pypy-commit] pypy default: more precise wording Message-ID: <56deff8a.080a1c0a.56fd6.3ae6@mx.google.com> Author: mattip Branch: Changeset: r82888:774aa4687a6f Date: 2016-03-08 18:33 +0200 http://bitbucket.org/pypy/pypy/changeset/774aa4687a6f/ Log: more precise wording diff --git a/pypy/doc/release-5.0.0.rst b/pypy/doc/release-5.0.0.rst --- a/pypy/doc/release-5.0.0.rst +++ b/pypy/doc/release-5.0.0.rst @@ -11,8 +11,8 @@ We also merged a major upgrade to our C-API layer (cpyext), simplifying the interaction between c-level objects and PyPy interpreter level objects. As a -result, lxml with its cython compiled component `passes all tests`_ on PyPy -and the new cpyext is a lot faster than the previous one. +result, lxml (prerelease) with its cython compiled component +`passes all tests`_ on PyPy. The new cpyext is also much faster. vmprof_ has been a go-to profiler for PyPy on linux for a few releases and we're happy to announce that thanks to commercial cooperation, vmprof From pypy.commits at gmail.com Tue Mar 8 11:36:28 2016 From: pypy.commits at gmail.com (mattip) Date: Tue, 08 Mar 2016 08:36:28 -0800 (PST) Subject: [pypy-commit] pypy release-5.x: merge default into release Message-ID: <56deff8c.2968c20a.fdbf4.3627@mx.google.com> Author: mattip Branch: release-5.x Changeset: r82889:8c3942dc33cf Date: 2016-03-08 18:34 +0200 http://bitbucket.org/pypy/pypy/changeset/8c3942dc33cf/ Log: merge default into release diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -123,7 +123,7 @@ # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". -#html_title = None +html_title = 'PyPy documentation' # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -265,7 +265,7 @@ return False def evil(y): - d = {x(): 1} + d = {X(): 1} X.__eq__ = __evil_eq__ d[y] # might trigger a call to __eq__? diff --git a/pypy/doc/release-5.0.0.rst b/pypy/doc/release-5.0.0.rst --- a/pypy/doc/release-5.0.0.rst +++ b/pypy/doc/release-5.0.0.rst @@ -11,8 +11,8 @@ We also merged a major upgrade to our C-API layer (cpyext), simplifying the interaction between c-level objects and PyPy interpreter level objects. As a -result, lxml with its cython compiled component `passes all tests`_ on PyPy -and the new cpyext is a lot faster than the previous one. +result, lxml (prerelease) with its cython compiled component +`passes all tests`_ on PyPy. The new cpyext is also much faster. vmprof_ has been a go-to profiler for PyPy on linux for a few releases and we're happy to announce that thanks to commercial cooperation, vmprof @@ -128,9 +128,6 @@ * Fix for corner case (likely shown by Krakatau) for consecutive guards with interdependencies - * Fix applevel bare class method comparisons which should fix pretty printing - in IPython - * Issues reported with our previous release were resolved_ after reports from users on our issue tracker at https://bitbucket.org/pypy/pypy/issues or on IRC at #pypy diff --git a/rpython/doc/conf.py b/rpython/doc/conf.py --- a/rpython/doc/conf.py +++ b/rpython/doc/conf.py @@ -59,7 +59,7 @@ # General information about the project. project = u'RPython' -copyright = u'2015, The PyPy Project' +copyright = u'2016, The PyPy Project' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the @@ -121,7 +121,7 @@ # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". -#html_title = None +html_title = 'RPython Documentation' # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None diff --git a/rpython/rlib/rvmprof/src/vmprof_getpc.h b/rpython/rlib/rvmprof/src/vmprof_getpc.h --- a/rpython/rlib/rvmprof/src/vmprof_getpc.h +++ b/rpython/rlib/rvmprof/src/vmprof_getpc.h @@ -54,6 +54,7 @@ // It will cause problems for FreeBSD though!, because it turns off // the needed __BSD_VISIBLE. #ifdef __APPLE__ +#include #define _XOPEN_SOURCE 500 #endif @@ -144,7 +145,11 @@ #else intptr_t GetPC(ucontext_t *signal_ucontext) { #ifdef __APPLE__ +#if ((ULONG_MAX) == (UINT_MAX)) + return (signal_ucontext->uc_mcontext->__ss.__eip); +#else return (signal_ucontext->uc_mcontext->__ss.__rip); +#endif #else return signal_ucontext->PC_FROM_UCONTEXT; // defined in config.h #endif diff --git a/rpython/translator/c/src/entrypoint.c b/rpython/translator/c/src/entrypoint.c --- a/rpython/translator/c/src/entrypoint.c +++ b/rpython/translator/c/src/entrypoint.c @@ -37,6 +37,7 @@ # include #endif +RPY_EXPORTED void rpython_startup_code(void) { #ifdef RPY_WITH_GIL diff --git a/rpython/translator/c/test/test_standalone.py b/rpython/translator/c/test/test_standalone.py --- a/rpython/translator/c/test/test_standalone.py +++ b/rpython/translator/c/test/test_standalone.py @@ -81,7 +81,7 @@ # # verify that the executable re-export symbols, but not too many if sys.platform.startswith('linux') and not kwds.get('shared', False): - seen_main = False + seen = set() g = os.popen("objdump -T '%s'" % builder.executable_name, 'r') for line in g: if not line.strip(): @@ -91,8 +91,8 @@ name = line.split()[-1] if name.startswith('__'): continue + seen.add(name) if name == 'main': - seen_main = True continue if name == 'pypy_debug_file': # ok to export this one continue @@ -104,7 +104,9 @@ "declaration of this C function or global variable" % (name,)) g.close() - assert seen_main, "did not see 'main' exported" + # list of symbols that we *want* to be exported: + for name in ['main', 'pypy_debug_file', 'rpython_startup_code']: + assert name in seen, "did not see '%r' exported" % name # return t, builder From pypy.commits at gmail.com Tue Mar 8 11:36:24 2016 From: pypy.commits at gmail.com (mattip) Date: Tue, 08 Mar 2016 08:36:24 -0800 (PST) Subject: [pypy-commit] pypy default: typo Message-ID: <56deff88.463f1c0a.2710b.3acf@mx.google.com> Author: mattip Branch: Changeset: r82887:169da91b21a2 Date: 2016-03-08 18:16 +0200 http://bitbucket.org/pypy/pypy/changeset/169da91b21a2/ Log: typo diff --git a/rpython/doc/conf.py b/rpython/doc/conf.py --- a/rpython/doc/conf.py +++ b/rpython/doc/conf.py @@ -121,7 +121,7 @@ # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". -html_title = RPython Documentation +html_title = 'RPython Documentation' # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None From pypy.commits at gmail.com Tue Mar 8 14:29:18 2016 From: pypy.commits at gmail.com (fijal) Date: Tue, 08 Mar 2016 11:29:18 -0800 (PST) Subject: [pypy-commit] pypy default: mention jetbrains Message-ID: <56df280e.0357c20a.116f8.7362@mx.google.com> Author: fijal Branch: Changeset: r82890:9c4299dc2d60 Date: 2016-03-08 21:28 +0200 http://bitbucket.org/pypy/pypy/changeset/9c4299dc2d60/ Log: mention jetbrains diff --git a/pypy/doc/release-5.0.0.rst b/pypy/doc/release-5.0.0.rst --- a/pypy/doc/release-5.0.0.rst +++ b/pypy/doc/release-5.0.0.rst @@ -15,8 +15,8 @@ `passes all tests`_ on PyPy. The new cpyext is also much faster. vmprof_ has been a go-to profiler for PyPy on linux for a few releases -and we're happy to announce that thanks to commercial cooperation, vmprof -now works on Linux, OS X and Windows on both PyPy and CPython. +and we're happy to announce that thanks to the cooperation with jetbrains, +vmprof now works on Linux, OS X and Windows on both PyPy and CPython. You can download the PyPy 5.0 release here: From pypy.commits at gmail.com Tue Mar 8 14:55:35 2016 From: pypy.commits at gmail.com (fijal) Date: Tue, 08 Mar 2016 11:55:35 -0800 (PST) Subject: [pypy-commit] pypy default: disable the warning - it always shows (until we can find a better solution) Message-ID: <56df2e37.c85b1c0a.db10b.1eb3@mx.google.com> Author: fijal Branch: Changeset: r82891:e4cbd702822c Date: 2016-03-08 21:54 +0200 http://bitbucket.org/pypy/pypy/changeset/e4cbd702822c/ Log: disable the warning - it always shows (until we can find a better solution) diff --git a/pypy/module/_vmprof/interp_vmprof.py b/pypy/module/_vmprof/interp_vmprof.py --- a/pypy/module/_vmprof/interp_vmprof.py +++ b/pypy/module/_vmprof/interp_vmprof.py @@ -60,10 +60,10 @@ Must be smaller than 1.0 """ w_modules = space.sys.get('modules') - if space.contains_w(w_modules, space.wrap('_continuation')): - space.warn(space.wrap("Using _continuation/greenlet/stacklet together " - "with vmprof will crash"), - space.w_RuntimeWarning) + #if space.contains_w(w_modules, space.wrap('_continuation')): + # space.warn(space.wrap("Using _continuation/greenlet/stacklet together " + # "with vmprof will crash"), + # space.w_RuntimeWarning) try: rvmprof.enable(fileno, period) except rvmprof.VMProfError, e: From pypy.commits at gmail.com Tue Mar 8 14:59:53 2016 From: pypy.commits at gmail.com (krono) Date: Tue, 08 Mar 2016 11:59:53 -0800 (PST) Subject: [pypy-commit] pypy default: Fix guard sorting in trace viewer Message-ID: <56df2f39.a2afc20a.6d26a.7cfd@mx.google.com> Author: Tobias Pape Branch: Changeset: r82892:d86a5d5d083f Date: 2016-03-08 20:49 +0100 http://bitbucket.org/pypy/pypy/changeset/d86a5d5d083f/ Log: Fix guard sorting in trace viewer (previously sorted by string order, which does not work for hex) diff --git a/rpython/jit/tool/traceviewer.py b/rpython/jit/tool/traceviewer.py --- a/rpython/jit/tool/traceviewer.py +++ b/rpython/jit/tool/traceviewer.py @@ -103,9 +103,9 @@ self.last_guard = -1 else: # guards can be out of order nowadays - groups = sorted(groups) - self.first_guard = guard_number(groups[0]) - self.last_guard = guard_number(groups[-1]) + groups = sorted(map(guard_number, groups)) + self.first_guard = groups[0] + self.last_guard = groups[-1] content = property(get_content, set_content) @@ -156,8 +156,7 @@ dotgen.emit_edge(self.name(), self.right.name()) def split_one_loop(real_loops, guard_s, guard_content, lineno, no, allloops): - for i in range(len(allloops) - 1, -1, -1): - loop = allloops[i] + for i, loop in enumerate(allloops): if no < loop.first_guard or no > loop.last_guard: continue content = loop.content From pypy.commits at gmail.com Tue Mar 8 14:59:55 2016 From: pypy.commits at gmail.com (krono) Date: Tue, 08 Mar 2016 11:59:55 -0800 (PST) Subject: [pypy-commit] pypy default: Automated merge with ssh://bitbucket.org/pypy/pypy Message-ID: <56df2f3b.e6ebc20a.36ac0.7fe2@mx.google.com> Author: Tobias Pape Branch: Changeset: r82893:c49a7c2adcb7 Date: 2016-03-08 20:49 +0100 http://bitbucket.org/pypy/pypy/changeset/c49a7c2adcb7/ Log: Automated merge with ssh://bitbucket.org/pypy/pypy diff --git a/rpython/jit/tool/traceviewer.py b/rpython/jit/tool/traceviewer.py --- a/rpython/jit/tool/traceviewer.py +++ b/rpython/jit/tool/traceviewer.py @@ -103,9 +103,9 @@ self.last_guard = -1 else: # guards can be out of order nowadays - groups = sorted(groups) - self.first_guard = guard_number(groups[0]) - self.last_guard = guard_number(groups[-1]) + groups = sorted(map(guard_number, groups)) + self.first_guard = groups[0] + self.last_guard = groups[-1] content = property(get_content, set_content) @@ -156,8 +156,7 @@ dotgen.emit_edge(self.name(), self.right.name()) def split_one_loop(real_loops, guard_s, guard_content, lineno, no, allloops): - for i in range(len(allloops) - 1, -1, -1): - loop = allloops[i] + for i, loop in enumerate(allloops): if no < loop.first_guard or no > loop.last_guard: continue content = loop.content From pypy.commits at gmail.com Tue Mar 8 14:59:59 2016 From: pypy.commits at gmail.com (krono) Date: Tue, 08 Mar 2016 11:59:59 -0800 (PST) Subject: [pypy-commit] pypy default: Automated merge with ssh://bitbucket.org/pypy/pypy Message-ID: <56df2f3f.046f1c0a.c8d06.ffff8f63@mx.google.com> Author: Tobias Pape Branch: Changeset: r82895:7395989c4270 Date: 2016-03-08 20:58 +0100 http://bitbucket.org/pypy/pypy/changeset/7395989c4270/ Log: Automated merge with ssh://bitbucket.org/pypy/pypy diff --git a/rpython/jit/tool/traceviewer.py b/rpython/jit/tool/traceviewer.py --- a/rpython/jit/tool/traceviewer.py +++ b/rpython/jit/tool/traceviewer.py @@ -103,9 +103,9 @@ self.last_guard = -1 else: # guards can be out of order nowadays - groups = sorted(groups) - self.first_guard = guard_number(groups[0]) - self.last_guard = guard_number(groups[-1]) + groups = sorted(map(guard_number, groups)) + self.first_guard = groups[0] + self.last_guard = groups[-1] content = property(get_content, set_content) From pypy.commits at gmail.com Tue Mar 8 14:59:57 2016 From: pypy.commits at gmail.com (krono) Date: Tue, 08 Mar 2016 11:59:57 -0800 (PST) Subject: [pypy-commit] pypy default: revert tiny refactoring to retain reversedness Message-ID: <56df2f3d.465ec20a.11873.7cb0@mx.google.com> Author: Tobias Pape Branch: Changeset: r82894:13d96e945b0b Date: 2016-03-08 20:57 +0100 http://bitbucket.org/pypy/pypy/changeset/13d96e945b0b/ Log: revert tiny refactoring to retain reversedness diff --git a/rpython/jit/tool/traceviewer.py b/rpython/jit/tool/traceviewer.py --- a/rpython/jit/tool/traceviewer.py +++ b/rpython/jit/tool/traceviewer.py @@ -156,7 +156,8 @@ dotgen.emit_edge(self.name(), self.right.name()) def split_one_loop(real_loops, guard_s, guard_content, lineno, no, allloops): - for i, loop in enumerate(allloops): + for i in range(len(allloops) - 1, -1, -1): + loop = allloops[i] if no < loop.first_guard or no > loop.last_guard: continue content = loop.content From pypy.commits at gmail.com Tue Mar 8 18:54:38 2016 From: pypy.commits at gmail.com (krono) Date: Tue, 08 Mar 2016 15:54:38 -0800 (PST) Subject: [pypy-commit] pypy traceviewer-common-merge-point-formats: (traceviewer) try to parse most common debug_merge_point formarts out there Message-ID: <56df663e.6bb8c20a.10f54.ffffb852@mx.google.com> Author: Tobias Pape Branch: traceviewer-common-merge-point-formats Changeset: r82896:00a68dd7adc9 Date: 2016-03-09 00:53 +0100 http://bitbucket.org/pypy/pypy/changeset/00a68dd7adc9/ Log: (traceviewer) try to parse most common debug_merge_point formarts out there Includes: - PyPy (clearly) - PyPy cffi callbacks - RSqueak - js - pycket - others (hippy, topaz) via fallback diff --git a/rpython/jit/tool/test/test_traceviewer.py b/rpython/jit/tool/test/test_traceviewer.py --- a/rpython/jit/tool/test/test_traceviewer.py +++ b/rpython/jit/tool/test/test_traceviewer.py @@ -1,7 +1,8 @@ import math import py from rpython.jit.tool.traceviewer import splitloops, FinalBlock, Block,\ - split_one_loop, postprocess, main, get_gradient_color, guard_number + split_one_loop, postprocess, main, get_gradient_color, guard_number,\ + find_name_key def test_gradient_color(): @@ -103,3 +104,46 @@ fname = py.path.local(__file__).join('..', 'f.pypylog.bz2') main(str(fname), False, view=False) # assert did not explode + +class TestMergPointStringExtraciton(object): + + def test_find_name_key(self): + def find(s): + return find_name_key(FinalBlock(s, None)) + assert find(r"debug_merge_point(0, 0, ' #63 GET_ITER')") \ + == (r"f5. file 'f.py'. line 34 #63 GET_ITER", r" #63 GET_ITER") + assert find(r"debug_merge_point(0, 0, ' ')") \ + == (r"f5. file 'f.py'. line 34 ", r" ") + assert find(r"debug_merge_point(0, 0, 'cffi_callback ')") \ + == (r"f5. file 'f.py'. line 34 (cffi_callback)", r"cffi_callback ") + assert find(r"debug_merge_point(0, 0, 'cffi_callback ')") \ + == (r"? (cffi_callback)", r"cffi_callback ") + assert find(r"debug_merge_point(0, 0, 'cffi_call_python somestr')") \ + == (r"somestr (cffi_call_python)", r"cffi_call_python somestr") + assert find(r"debug_merge_point(0, 0, '(SequenceableCollection >> #replaceFrom:to:with:startingAt:) [8]: <0x14>pushTemporaryVariableBytecode(4)')") \ + == (r"SequenceableCollection>>#replaceFrom:to:with:startingAt: @ 8 ", r"(SequenceableCollection >> #replaceFrom:to:with:startingAt:) [8]: <0x14>pushTemporaryVariableBytecode(4)") + assert find(r"debug_merge_point(1, 4, '(Magnitude >> #min:max:) [0]: <0x70>pushReceiverBytecode')") \ + == (r"Magnitude>>#min:max: @ 0 ", r"(Magnitude >> #min:max:) [0]: <0x70>pushReceiverBytecode") + assert find(r"debug_merge_point(0, 0, '(#DoIt) [0]: <0x70>pushReceiverBytecode')") \ + == (r"#DoIt @ 0 ", r"(#DoIt) [0]: <0x70>pushReceiverBytecode") + + assert find(r"debug_merge_point(0, 0, '54: LOAD LIST 4')") \ + == (r"? @ 54 ", r"54: LOAD LIST 4") + assert find(r"debug_merge_point(0, 0, '44: LOAD_MEMBER_DOT function: barfoo')") \ + == (r"barfoo @ 44 ", r"44: LOAD_MEMBER_DOT function: barfoo") + assert find(r"debug_merge_point(0, 0, '87: end of opcodes')") \ + == (r"? @ 87 ", r"87: end of opcodes") + assert find(r"debug_merge_point(0, 0, 'Green_Ast is None')") \ + == (r"Green_Ast is None", r"Green_Ast is None") + assert find(r"debug_merge_point(0, 0, 'Label(safe_return_multi_vals:pycket.interpreter:565)')") \ + == (r"Label(safe_return_multi_vals:pycket.interpreter:565)", r"Label(safe_return_multi_vals:pycket.interpreter:565)") + assert find(r"debug_merge_point(0, 0, '(*node2 item AppRand1_289 AppRand2_116)')") \ + == (r"(*node2 item AppRand1_289 AppRand2_116)", r"(*node2 item AppRand1_289 AppRand2_116)") + assert find(r"debug_merge_point(0, 0, '(let ([if_2417 (let ([AppRand0_2026 (* Zr Zr)][AppRand1_1531 (* Zi Zi)]) (let ([AppRand0_2027 (+ AppRand0_2026 AppRand1_1531)]) (> AppRand0_2027 LIMIT-SQR)))]) (if if_2417 0 (let ([if_2416 (= i ITERATIONS)]) (if if_2416 1 (let ([Zr199 (let ([AppRand0_2041 (* Zr Zr)][AppRand1_1540 (* Zi Zi)]) (let ([AppRand0_2042 (- AppRand0_2041 AppRand1_1540)]) (+ AppRand0_2042 Cr)))][Zi206 (let ([AppRand1_1541 (* Zr Zi)]) (let ([AppRand0_2043 (* 2.0 AppRand1_1541)]) (+ AppRand0_2043 Ci)))]) (let ([Zr211 (let ([AppRand0_2038 (* Zr199 Zr199)][AppRand1_1538 (* Zi206 Zi206)]) (let ([AppRand0_2039 (- AppRand0_2038 AppRand1_1538)]) (+ AppRand0_2039 Cr)))][Zi218 (let ([AppRand1_1539 (* Zr199 Zi206)]) (let ([AppRand0_2040 (* 2.0 AppRand1_1539)]) (+ AppRand0_2040 Ci)))]) (let ([Zr223 (let ([AppRand0_2035 (* Zr211 Zr211)][AppRand1_1536 (* Zi218 Zi218)]) (let ([AppRand0_2036 (- AppRand0_2035 AppRand1_1536)]) (+ AppRand0_2036 Cr)))][Zi230 (let ([AppRand1_1537 (* Zr211 Zi218)]) (let ([AppRand0_2037 (* 2.0 AppRand1_1537)]) (+ AppRand0_2037 Ci)))]) (let ([Zr235 (let ([AppRand0_2032 (* Zr223 Zr223)][AppRand1_1534 (* Zi230 Zi230)]) (let ([AppRand0_2033 (- AppRand0_2032 AppRand1_1534)]) (+ AppRand0_2033 Cr)))][Zi242 (let ([AppRand1_1535 (* Zr223 Zi230)]) (let ([AppRand0_2034 (* 2.0 AppRand1_1535)]) (+ AppRand0_2034 Ci)))]) (let ([Zr247 (let ([AppRand0_2029 (* Zr235 Zr235)][AppRand1_1532 (* Zi242 Zi242)]) (let ([AppRand0_2030 (- AppRand0_2029 AppRand1_1532)]) (+ AppRand0_2030 Cr)))][Zi254 (let ([AppRand1_1533 (* Zr235 Zi242)]) (let ([AppRand0_2031 (* 2.0 AppRand1_1533)]) (+ AppRand0_2031 Ci)))]) (let ([AppRand0_2028 (+ i 5)]) (loop AppRand0_2028 Zr247 Zi254))))))))))) from (loop AppRand0_2028 Zr247 Zi254)')") \ + == (r"(let ([if_2417 (let ([AppRand0_2026 (* Zr Zr)][AppRand1_1531 (* ...", r"(let ([if_2417 (let ([AppRand0_2026 (* Zr Zr)][AppRand1_1531 (* Zi Zi)]) (let ([AppRand0_2027 (+ AppRand0_2026 AppRand1_1531)]) (> AppRand0_2027 LIMIT-SQR)))]) (if if_2417 0 (let ([if_2416 (= i ITERATIONS)]) (if if_2416 1 (let ([Zr199 (let ([AppRand0_2041 (* Zr Zr)][AppRand1_1540 (* Zi Zi)]) (let ([AppRand0_2042 (- AppRand0_2041 AppRand1_1540)]) (+ AppRand0_2042 Cr)))][Zi206 (let ([AppRand1_1541 (* Zr Zi)]) (let ([AppRand0_2043 (* 2.0 AppRand1_1541)]) (+ AppRand0_2043 Ci)))]) (let ([Zr211 (let ([AppRand0_2038 (* Zr199 Zr199)][AppRand1_1538 (* Zi206 Zi206)]) (let ([AppRand0_2039 (- AppRand0_2038 AppRand1_1538)]) (+ AppRand0_2039 Cr)))][Zi218 (let ([AppRand1_1539 (* Zr199 Zi206)]) (let ([AppRand0_2040 (* 2.0 AppRand1_1539)]) (+ AppRand0_2040 Ci)))]) (let ([Zr223 (let ([AppRand0_2035 (* Zr211 Zr211)][AppRand1_1536 (* Zi218 Zi218)]) (let ([AppRand0_2036 (- AppRand0_2035 AppRand1_1536)]) (+ AppRand0_2036 Cr)))][Zi230 (let ([AppRand1_1537 (* Zr211 Zi218)]) (let ([AppRand0_2037 (* 2.0 AppRand1_1537)]) (+ AppRand0_2037 Ci)))]) (let ([Zr235 (let ([AppRand0_2032 (* Zr223 Zr223)][AppRand1_1534 (* Zi230 Zi230)]) (let ([AppRand0_2033 (- AppRand0_2032 AppRand1_1534)]) (+ AppRand0_2033 Cr)))][Zi242 (let ([AppRand1_1535 (* Zr223 Zi230)]) (let ([AppRand0_2034 (* 2.0 AppRand1_1535)]) (+ AppRand0_2034 Ci)))]) (let ([Zr247 (let ([AppRand0_2029 (* Zr235 Zr235)][AppRand1_1532 (* Zi242 Zi242)]) (let ([AppRand0_2030 (- AppRand0_2029 AppRand1_1532)]) (+ AppRand0_2030 Cr)))][Zi254 (let ([AppRand1_1533 (* Zr235 Zi242)]) (let ([AppRand0_2031 (* 2.0 AppRand1_1533)]) (+ AppRand0_2031 Ci)))]) (let ([AppRand0_2028 (+ i 5)]) (loop AppRand0_2028 Zr247 Zi254))))))))))) from (loop AppRand0_2028 Zr247 Zi254)") + assert find(r"debug_merge_point(0, 0, 'times at LOAD_SELF')") \ + == (r"times at LOAD_SELF", r"times at LOAD_SELF") + assert find(r"debug_merge_point(1, 1, 'block in
    at LOAD_DEREF')") \ + == (r"block in
    at LOAD_DEREF", r"block in
    at LOAD_DEREF") + assert find(r"debug_merge_point(0, 0, '
    at SEND')") \ + == (r"
    at SEND", r"
    at SEND") diff --git a/rpython/jit/tool/traceviewer.py b/rpython/jit/tool/traceviewer.py --- a/rpython/jit/tool/traceviewer.py +++ b/rpython/jit/tool/traceviewer.py @@ -219,19 +219,78 @@ counter += loop.count("\n") + 2 return real_loops, allloops + +def find_name_key(l): + m = re.search("debug_merge_point\((?:\d+,\ )*'(.*)'(?:, \d+)*\)", l.content) + if m is None: + # default fallback + return '?', '?' + info = m.group(1) + + # PyPy (pypy/module/pypyjit/interp_jit.py, pypy/interpreter/generator.py) + # ' #63 GET_ITER' + # ' ' + m = re.search("^( (.*?))$", info) + if m: + return m.group(2) + " " + m.group(3), m.group(1) + + # PyPy cffi (pypy/module/_cffi_backend/ccallback.py) + # 'cffi_callback ', 'cffi_callback ' + # 'cffi_call_python somestr' + m = re.search("^((cffi_callback) )$", info) + if m: + return "%s (%s)" %(m.group(3), m.group(2)), m.group(1) + m = re.search("^((cffi_callback) <\?>)$", info) + if m: + return "? (%s)" %(m.group(2)), m.group(1) + m = re.search("^((cffi_call_python) (.*))$", info) + if m: + return "%s (%s)" %(m.group(3), m.group(2)), m.group(1) + + # RSqueak/lang-smalltalk (spyvm/interpreter.py) + # '(SequenceableCollection >> #replaceFrom:to:with:startingAt:) [8]: <0x14>pushTemporaryVariableBytecode(4)' + m = re.search("^(\(((.+?) >> )?(#.*)\) \[(\d+)\].+?>(.*?)(?:\(\d+\))?)$", info) + if m: + if m.group(3): + return "%s>>%s @ %s <%s>" % (m.group(3), m.group(4), m.group(5), m.group(6)), m.group(1) + else: + return "%s @ %s <%s>" % (m.group(4), m.group(5), m.group(6)), m.group(1) + + # lang-js (js/jscode.py) + # '54: LOAD LIST 4' + # '44: LOAD_MEMBER_DOT function: barfoo' + # '87: end of opcodes' + m = re.search("^((\d+): (.+?)(:? function: (.+?))?)$", info) + if m: + if m.group(5): + return "%s @ %s <%s>" % (m.group(5), m.group(2), m.group(3)), m.group(1) + else: + return "? @ %s <%s>" % (m.group(2), m.group(3)), m.group(1) + + # pycket (pycket/interpreter.py) [sorted down because the s-exp is very generic] + # 'Green_Ast is None' + # 'Label(safe_return_multi_vals:pycket.interpreter:565)' + # '(*node2 item AppRand1_289 AppRand2_116)' + if info[0] == '(' and info[-1] == ')': + if len(info) > 64: #s-exp can be quite long + return info[:64] +'...', info + + # info fallback (eg, rsre_jit, qoppy, but also + # pyhaskell (pyhaskell/interpreter/haskell.py) + # pyrolog (prolog/interpreter/continuation.py) + # RPySOM/RTruffleSom (src/som/interpreter/interpreter.py) + # Topaz (topaz/interpreter.py) + # hippyvm (hippy/interpreter.py) + return info, info + def postprocess_loop(loop, loops, memo, counts): + if loop in memo: return memo.add(loop) if loop is None: return - m = re.search("debug_merge_point\((?:\d+,\ )*'( (.*?))'", loop.content) - if m is None: - name = '?' - loop.key = '?' - else: - name = m.group(2) + " " + m.group(3) - loop.key = m.group(1) + name, loop.key = find_name_key(loop) opsno = loop.content.count("\n") lastline = loop.content[loop.content.rfind("\n", 0, len(loop.content) - 2):] m = re.search('descr= Author: Armin Rigo Branch: extradoc Changeset: r715:60b977006d8b Date: 2016-03-09 02:54 +0100 http://bitbucket.org/pypy/pypy.org/changeset/60b977006d8b/ Log: update the values diff --git a/don1.html b/don1.html --- a/don1.html +++ b/don1.html @@ -15,7 +15,7 @@ - $63060 of $105000 (60.1%) + $63079 of $105000 (60.1%)
    @@ -23,7 +23,7 @@
  • From pypy.commits at gmail.com Wed Mar 9 02:59:21 2016 From: pypy.commits at gmail.com (fijal) Date: Tue, 08 Mar 2016 23:59:21 -0800 (PST) Subject: [pypy-commit] pypy jit-leaner-frontend: progress towards a non-unrolled features Message-ID: <56dfd7d9.4c181c0a.d2b1d.4067@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r82897:ee3bb1c8b518 Date: 2016-03-09 09:58 +0200 http://bitbucket.org/pypy/pypy/changeset/ee3bb1c8b518/ Log: progress towards a non-unrolled features diff --git a/rpython/jit/metainterp/blackhole.py b/rpython/jit/metainterp/blackhole.py --- a/rpython/jit/metainterp/blackhole.py +++ b/rpython/jit/metainterp/blackhole.py @@ -1563,7 +1563,6 @@ def _done_with_this_frame(self): # rare case: we only get there if the blackhole interps all returned # normally (in general we get a ContinueRunningNormally exception). - sd = self.builder.metainterp_sd kind = self._return_type if kind == 'v': raise jitexc.DoneWithThisFrameVoid() diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -200,20 +200,15 @@ # ____________________________________________________________ -def compile_simple_loop(metainterp, greenkey, start, trace, jumpargs, - enable_opts): - xxxx +def compile_simple_loop(metainterp, greenkey, trace, enable_opts): from rpython.jit.metainterp.optimizeopt import optimize_trace jitdriver_sd = metainterp.jitdriver_sd metainterp_sd = metainterp.staticdata jitcell_token = make_jitcell_token(jitdriver_sd) - label = ResOperation(rop.LABEL, inputargs[:], descr=jitcell_token) - jump_op = ResOperation(rop.JUMP, jumpargs[:], descr=jitcell_token) call_pure_results = metainterp.call_pure_results - data = SimpleCompileData(label, ops + [jump_op], - call_pure_results=call_pure_results, - enable_opts=enable_opts) + data = SimpleCompileData(trace, call_pure_results=call_pure_results, + enable_opts=enable_opts) try: loop_info, ops = optimize_trace(metainterp_sd, jitdriver_sd, data, metainterp.box_names_memo) @@ -234,7 +229,7 @@ loop.check_consistency() jitcell_token.target_tokens = [target_token] send_loop_to_backend(greenkey, jitdriver_sd, metainterp_sd, loop, "loop", - inputargs, metainterp.box_names_memo) + loop_info.inputargs, metainterp.box_names_memo) record_loop_or_bridge(metainterp_sd, loop) return target_token @@ -262,9 +257,8 @@ jitcell_token = make_jitcell_token(jitdriver_sd) history.record(rop.JUMP, jumpargs, None, descr=jitcell_token) if 'unroll' not in enable_opts or not metainterp.cpu.supports_guard_gc_type: - return compile_simple_loop(metainterp, greenkey, start, inputargs, - history.trace, - jumpargs, enable_opts) + return compile_simple_loop(metainterp, greenkey, history.trace, + enable_opts) call_pure_results = metainterp.call_pure_results preamble_data = LoopCompileData(history.trace, inputargs, call_pure_results=call_pure_results, @@ -332,22 +326,22 @@ to the first operation. """ from rpython.jit.metainterp.optimizeopt import optimize_trace - from rpython.jit.metainterp.optimizeopt.optimizer import BasicLoopInfo - history = metainterp.history + trace = metainterp.history.trace.cut_trace_from(start, inputargs) metainterp_sd = metainterp.staticdata jitdriver_sd = metainterp.jitdriver_sd + history = metainterp.history loop_jitcell_token = metainterp.get_procedure_token(greenkey) assert loop_jitcell_token end_label = ResOperation(rop.LABEL, inputargs[:], descr=loop_jitcell_token) - jump_op = ResOperation(rop.JUMP, jumpargs[:], descr=loop_jitcell_token) + cut_pos = history.get_trace_position() + history.record(rop.JUMP, jumpargs[:], None, descr=loop_jitcell_token) enable_opts = jitdriver_sd.warmstate.enable_opts - ops = history.operations[start:] call_pure_results = metainterp.call_pure_results - loop_data = UnrolledLoopData(end_label, jump_op, ops, start_state, + loop_data = UnrolledLoopData(trace, loop_jitcell_token, start_state, call_pure_results=call_pure_results, enable_opts=enable_opts) try: @@ -356,6 +350,7 @@ metainterp.box_names_memo) except InvalidLoop: # Fall back on jumping directly to preamble + xxxx jump_op = ResOperation(rop.JUMP, inputargs[:], descr=loop_jitcell_token) loop_data = UnrolledLoopData(end_label, jump_op, [jump_op], start_state, call_pure_results=call_pure_results, diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -657,8 +657,8 @@ def length(self): return self.trace._count - def get_cut_position(self): - return len(self.trace._ops) + def get_trace_position(self): + return self.trace.cut_point() def cut(self, cut_at): self.trace.cut_at(cut_at) @@ -686,11 +686,13 @@ op.setref_base(value) return op + def record_nospec(self, opnum, argboxes, descr=None): + return self.trace.record_op(opnum, argboxes, descr) + def record_default_val(self, opnum, argboxes, descr=None): - op = ResOperation(opnum, argboxes, descr) - assert op.is_same_as() + assert rop.is_same_as(opnum) + op = self.trace.record_op(opnum, argboxes, descr) op.copy_value_from(argboxes[0]) - self.operations.append(op) return op diff --git a/rpython/jit/metainterp/opencoder.py b/rpython/jit/metainterp/opencoder.py --- a/rpython/jit/metainterp/opencoder.py +++ b/rpython/jit/metainterp/opencoder.py @@ -57,15 +57,22 @@ return size, self._next(), self._next() class TraceIterator(object): - def __init__(self, trace, end): + def __init__(self, trace, start, end, force_inputargs=None): self.trace = trace - self.inputargs = [rop.inputarg_from_tp(arg.type) for - arg in self.trace.inputargs] - self.start = 0 - self.pos = 0 + self._cache = [None] * trace._count + if force_inputargs is not None: + self.inputargs = [rop.inputarg_from_tp(arg.type) for + arg in force_inputargs] + for i, arg in enumerate(force_inputargs): + if arg.position >= 0: + self._cache[arg.position] = self.inputargs[i] + else: + self.inputargs = [rop.inputarg_from_tp(arg.type) for + arg in self.trace.inputargs] + self.start = start + self.pos = start self._count = 0 self.end = end - self._cache = [None] * trace._count def _get(self, i): if i < 0: @@ -126,7 +133,23 @@ self._count += 1 return res -class Trace(object): +class BaseTrace(object): + pass + +class CutTrace(BaseTrace): + def __init__(self, trace, start, count, inputargs): + self.trace = trace + self.start = start + self.inputargs = inputargs + self.count = count + + def get_iter(self): + iter = TraceIterator(self.trace, self.start, len(self.trace._ops), + self.inputargs) + iter._count = self.count + return iter + +class Trace(BaseTrace): def __init__(self, inputargs): self._ops = [] self._descrs = [None] @@ -139,8 +162,15 @@ def length(self): return len(self._ops) + def cut_point(self): + return len(self._ops), self._count + def cut_at(self, end): - self._ops = self._ops[:end] + self._ops = self._ops[:end[0]] + self._count = end[1] + + def cut_trace_from(self, (start, count), inputargs): + return CutTrace(self, start, count, inputargs) def _encode(self, box): if isinstance(box, Const): @@ -237,7 +267,7 @@ assert self._ops[resumedata_pos + 2] == pc def get_iter(self): - return TraceIterator(self, len(self._ops)) + return TraceIterator(self, 0, len(self._ops)) def _get_operations(self): """ NOT_RPYTHON diff --git a/rpython/jit/metainterp/optimizeopt/unroll.py b/rpython/jit/metainterp/optimizeopt/unroll.py --- a/rpython/jit/metainterp/optimizeopt/unroll.py +++ b/rpython/jit/metainterp/optimizeopt/unroll.py @@ -226,8 +226,9 @@ def optimize_bridge(self, trace, runtime_boxes, call_pure_results, inline_short_preamble, box_names_memo): + trace = trace.get_iter() self._check_no_forwarding([trace.inputargs]) - info, ops = self.optimizer.propagate_all_forward(trace.get_iter(), + info, ops = self.optimizer.propagate_all_forward(trace, call_pure_results, False) jump_op = info.jump_op cell_token = jump_op.getdescr() diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -1523,10 +1523,11 @@ @specialize.arg(1) def execute_varargs(self, opnum, argboxes, descr, exc, pure): self.metainterp.clear_exception() + patch_pos = self.metainterp.history.get_trace_position() op = self.metainterp.execute_and_record_varargs(opnum, argboxes, descr=descr) if pure and not self.metainterp.last_exc_value and op: - op = self.metainterp.record_result_of_call_pure(op) + op = self.metainterp.record_result_of_call_pure(op, patch_pos) exc = exc and not isinstance(op, Const) if exc: if op is not None: @@ -1918,7 +1919,7 @@ def retrace_needed(self, trace, exported_state): self.partial_trace = trace - self.retracing_from = self.history.length() + self.retracing_from = self.potential_retrace_position self.exported_state = exported_state self.heapcache.reset() @@ -2435,7 +2436,7 @@ break else: if self.partial_trace: - if start != self.retracing_from: + if start != self.retracing_from: raise SwitchToBlackhole(Counters.ABORT_BAD_LOOP) # For now # Found! Compile it as a loop. # raises in case it works -- which is the common case @@ -2454,7 +2455,7 @@ self.staticdata.log('cancelled, tracing more...') # Otherwise, no loop found so far, so continue tracing. - start = self.history.get_cut_position() + start = self.history.get_trace_position() self.current_merge_points.append((live_arg_boxes, start)) def _unpack_boxes(self, boxes, start, stop): @@ -2606,7 +2607,8 @@ if not target_jitcell_token: return - cut_at = self.history.get_cut_position() + cut_at = self.history.get_trace_position() + self.potential_retrace_position = cut_at self.history.record(rop.JUMP, live_arg_boxes[num_green_args:], None, descr=target_jitcell_token) self.history.ends_with_jump = True @@ -2981,7 +2983,7 @@ debug_stop("jit-abort-longest-function") return max_jdsd, max_key - def record_result_of_call_pure(self, op): + def record_result_of_call_pure(self, op, patch_pos): """ Patch a CALL into a CALL_PURE. """ opnum = op.getopnum() @@ -2993,15 +2995,16 @@ else: # all-constants: remove the CALL operation now and propagate a # constant result - self.history.operations.pop() + self.history.cut(patch_pos) return resbox_as_const # not all constants (so far): turn CALL into CALL_PURE, which might # be either removed later by optimizeopt or turned back into CALL. arg_consts = [executor.constant_from_op(a) for a in op.getarglist()] self.call_pure_results[arg_consts] = resbox_as_const opnum = OpHelpers.call_pure_for_descr(op.getdescr()) - newop = op.copy_and_change(opnum, args=op.getarglist()) - self.history.operations[-1] = newop + self.history.cut(patch_pos) + newop = self.history.record_nospec(opnum, op.getarglist(), op.getdescr()) + newop.copy_value_from(op) return newop def direct_assembler_call(self, targetjitdriver_sd): diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -1470,8 +1470,9 @@ def can_malloc(self): return self.is_call() or self.is_malloc() - def is_same_as(self): - return self.opnum in (rop.SAME_AS_I, rop.SAME_AS_F, rop.SAME_AS_R) + @staticmethod + def is_same_as(opnum): + return opnum in (rop.SAME_AS_I, rop.SAME_AS_F, rop.SAME_AS_R) def is_getfield(self): return self.opnum in (rop.GETFIELD_GC_I, rop.GETFIELD_GC_F, diff --git a/rpython/jit/metainterp/test/test_opencoder.py b/rpython/jit/metainterp/test/test_opencoder.py --- a/rpython/jit/metainterp/test/test_opencoder.py +++ b/rpython/jit/metainterp/test/test_opencoder.py @@ -130,4 +130,18 @@ loop2.inputargs = inpargs loop2.operations = l BaseTest.assert_equal(loop1, loop2) - print "success" \ No newline at end of file + + def test_cut_trace_from(self): + i0, i1, i2 = InputArgInt(), InputArgInt(), InputArgInt() + t = Trace([i0, i1, i2]) + add1 = t.record_op(rop.INT_ADD, [i0, i1]) + cut_point = t.cut_point() + add2 = t.record_op(rop.INT_ADD, [add1, i1]) + t.record_op(rop.GUARD_TRUE, [add2]) + resume.capture_resumedata([FakeFrame(3, JitCode(4), [add2, add1, i1])], + None, [], t) + t.record_op(rop.INT_SUB, [add2, add1]) + t2 = t.cut_trace_from(cut_point, [add1, i1]) + (i0, i1), l, iter = self.unpack(t2) + assert len(l) == 3 + assert l[0].getarglist() == [i0, i1] \ No newline at end of file From pypy.commits at gmail.com Wed Mar 9 03:04:35 2016 From: pypy.commits at gmail.com (fijal) Date: Wed, 09 Mar 2016 00:04:35 -0800 (PST) Subject: [pypy-commit] pypy jit-leaner-frontend: fix the cutting and starting point, enough to pass test_loop (without unrolling) Message-ID: <56dfd913.e5ecc20a.94f48.2a14@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r82898:670c52c38925 Date: 2016-03-09 10:03 +0200 http://bitbucket.org/pypy/pypy/changeset/670c52c38925/ Log: fix the cutting and starting point, enough to pass test_loop (without unrolling) diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -243,6 +243,7 @@ metainterp_sd = metainterp.staticdata jitdriver_sd = metainterp.jitdriver_sd history = metainterp.history + trace = history.trace warmstate = jitdriver_sd.warmstate enable_opts = jitdriver_sd.warmstate.enable_opts @@ -252,15 +253,15 @@ enable_opts = enable_opts.copy() del enable_opts['unroll'] - assert start == 0 - #ops = history.operations[start:] jitcell_token = make_jitcell_token(jitdriver_sd) history.record(rop.JUMP, jumpargs, None, descr=jitcell_token) + if start != (0, 0): + trace = trace.cut_trace_from(start, inputargs) if 'unroll' not in enable_opts or not metainterp.cpu.supports_guard_gc_type: - return compile_simple_loop(metainterp, greenkey, history.trace, + return compile_simple_loop(metainterp, greenkey, trace, enable_opts) call_pure_results = metainterp.call_pure_results - preamble_data = LoopCompileData(history.trace, inputargs, + preamble_data = LoopCompileData(trace, inputargs, call_pure_results=call_pure_results, enable_opts=enable_opts) try: @@ -275,7 +276,7 @@ start_descr = TargetToken(jitcell_token, original_jitcell_token=jitcell_token) jitcell_token.target_tokens = [start_descr] - loop_data = UnrolledLoopData(history.trace, jitcell_token, start_state, + loop_data = UnrolledLoopData(trace, jitcell_token, start_state, call_pure_results=call_pure_results, enable_opts=enable_opts) try: diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -2317,7 +2317,7 @@ def _compile_and_run_once(self, original_boxes): self.initialize_state_from_start(original_boxes) - self.current_merge_points = [(original_boxes, 0)] + self.current_merge_points = [(original_boxes, (0, 0))] num_green_args = self.jitdriver_sd.num_green_args original_greenkey = original_boxes[:num_green_args] self.resumekey = compile.ResumeFromInterpDescr(original_greenkey) From pypy.commits at gmail.com Wed Mar 9 03:53:50 2016 From: pypy.commits at gmail.com (plan_rich) Date: Wed, 09 Mar 2016 00:53:50 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: merged the speed improvements from s390x-enhance-speed Message-ID: <56dfe49e.d3921c0a.eb707.53ce@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r82899:c63346ce0b33 Date: 2016-03-09 09:46 +0100 http://bitbucket.org/pypy/pypy/changeset/c63346ce0b33/ Log: merged the speed improvements from s390x-enhance-speed diff --git a/rpython/jit/backend/zarch/assembler.py b/rpython/jit/backend/zarch/assembler.py --- a/rpython/jit/backend/zarch/assembler.py +++ b/rpython/jit/backend/zarch/assembler.py @@ -50,7 +50,7 @@ self.gcrootmap_retaddr_forced = 0 self.failure_recovery_code = [0, 0, 0, 0] self.wb_slowpath = [0,0,0,0,0] - # self.pool = None + self.pool = None def setup(self, looptoken): BaseAssembler.setup(self, looptoken) @@ -58,7 +58,7 @@ if we_are_translated(): self.debug = False self.current_clt = looptoken.compiled_loop_token - # POOL self.pool = LiteralPool() + self.pool = LiteralPool() self.mc = InstrBuilder(None) self.pending_guard_tokens = [] self.pending_guard_tokens_recovered = 0 @@ -76,7 +76,7 @@ self.current_clt = None self._regalloc = None self.mc = None - # self.pool = None + self.pool = None def target_arglocs(self, looptoken): @@ -350,8 +350,8 @@ gcrootmap = self.cpu.gc_ll_descr.gcrootmap if gcrootmap and gcrootmap.is_shadow_stack: - diff = mc.load_imm_plus(r.r5, gcrootmap.get_root_stack_top_addr()) - mc.load(r.r5, r.r5, diff) + diff = mc.load_imm(r.r5, gcrootmap.get_root_stack_top_addr()) + mc.load(r.r5, r.r5, 0) mc.store(r.r2, r.r5, -WORD) self._pop_core_regs_from_jitframe(mc, r.MANAGED_REGS) @@ -636,7 +636,7 @@ # operations = regalloc.prepare_loop(inputargs, operations, looptoken, clt.allgcrefs) - # POOL self.pool.pre_assemble(self, operations) + self.pool.pre_assemble(self, operations) entrypos = self.mc.get_relative_pos() self._call_header_with_stack_check() looppos = self.mc.get_relative_pos() @@ -645,7 +645,7 @@ self.update_frame_depth(frame_depth_no_fixed_size + JITFRAME_FIXED_SIZE) # size_excluding_failure_stuff = self.mc.get_relative_pos() - # POOL self.pool.post_assemble(self) + #self.pool.post_assemble(self) self.write_pending_failure_recoveries() full_size = self.mc.get_relative_pos() # @@ -704,13 +704,13 @@ operations, self.current_clt.allgcrefs, self.current_clt.frame_info) - # POOL self.pool.pre_assemble(self, operations, bridge=True) + self.pool.pre_assemble(self, operations, bridge=True) startpos = self.mc.get_relative_pos() - # POOL self.mc.LARL(r.POOL, l.halfword(self.pool.pool_start - startpos)) + self.mc.LARL(r.POOL, l.halfword(self.pool.pool_start - startpos)) self._check_frame_depth(self.mc, regalloc.get_gcmap()) frame_depth_no_fixed_size = self._assemble(regalloc, inputargs, operations) codeendpos = self.mc.get_relative_pos() - # POOL self.pool.post_assemble(self) + #self.pool.post_assemble(self) self.write_pending_failure_recoveries() fullsize = self.mc.get_relative_pos() # @@ -735,7 +735,6 @@ # 'faildescr.adr_jump_offset' is the address of an instruction that is a # conditional jump. We must patch this conditional jump to go # to 'adr_new_target'. - # Updates the pool address mc = InstrBuilder() mc.b_abs(adr_new_target) mc.copy_to_raw_memory(faildescr.adr_jump_offset) @@ -922,14 +921,17 @@ return assert 0, "not supported location" elif prev_loc.is_in_pool(): + if loc.is_core_reg(): + self.mc.LG(loc, prev_loc) + return # move immediate value to fp register if loc.is_fp_reg(): - self.mc.LD(loc, prev_loc) + self.mc.LDY(loc, prev_loc) return # move immediate value to memory elif loc.is_stack(): offset = loc.value - self.mc.LD(r.FP_SCRATCH, prev_loc) + self.mc.LDY(r.FP_SCRATCH, prev_loc) self.mc.STDY(r.FP_SCRATCH, l.addr(offset, r.SPP)) return assert 0, "not supported location" @@ -976,9 +978,8 @@ if gcrootmap: if gcrootmap.is_shadow_stack: if shadowstack_reg is None: - diff = mc.load_imm_plus(r.SPP, - gcrootmap.get_root_stack_top_addr()) - mc.load(r.SPP, r.SPP, diff) + diff = mc.load_imm(r.SPP, gcrootmap.get_root_stack_top_addr()) + mc.load(r.SPP, r.SPP, 0) shadowstack_reg = r.SPP mc.load(r.SPP, shadowstack_reg, -WORD) wbdescr = self.cpu.gc_ll_descr.write_barrier_descr @@ -1019,7 +1020,7 @@ # Build a new stackframe of size STD_FRAME_SIZE_IN_BYTES fpoff = JIT_ENTER_EXTRA_STACK_SPACE self.mc.STMG(r.r6, r.r15, l.addr(-fpoff+6*WORD, r.SP)) - # POOL self.mc.LARL(r.POOL, l.halfword(self.pool.pool_start - self.mc.get_relative_pos())) + self.mc.LARL(r.POOL, l.halfword(self.pool.pool_start - self.mc.get_relative_pos())) # f8 through f15 are saved registers (= non volatile) # TODO it would be good to detect if any float is used in the loop # and to skip this push/pop whenever no float operation occurs @@ -1046,38 +1047,39 @@ def _call_header_shadowstack(self, gcrootmap): # we need to put one word into the shadowstack: the jitframe (SPP) # we saved all registers to the stack - RCS1 = r.r2 - RCS2 = r.r3 - RCS3 = r.r4 + RCS1 = r.r3 + RCS2 = r.r4 + RCS3 = r.r5 mc = self.mc - diff = mc.load_imm_plus(RCS1, gcrootmap.get_root_stack_top_addr()) - mc.load(RCS2, RCS1, diff) # ld RCS2, [rootstacktop] + mc.load_imm(RCS1, gcrootmap.get_root_stack_top_addr()) + mc.load(RCS2, RCS1, 0) # ld RCS2, [rootstacktop] # mc.LGR(RCS3, RCS2) mc.AGHI(RCS3, l.imm(WORD)) # add RCS3, RCS2, WORD mc.store(r.SPP, RCS2, 0) # std SPP, RCS2 # - mc.store(RCS3, RCS1, diff) # std RCS3, [rootstacktop] + mc.store(RCS3, RCS1, 0) # std RCS3, [rootstacktop] def _call_footer_shadowstack(self, gcrootmap): # r6 -> r15 can be used freely, they will be restored by # _call_footer after this call - RCS1 = r.r9 - RCS2 = r.r10 + RCS1 = r.r8 + RCS2 = r.r7 mc = self.mc - diff = mc.load_imm_plus(RCS1, gcrootmap.get_root_stack_top_addr()) - mc.load(RCS2, RCS1, diff) # ld RCS2, [rootstacktop] + mc.load_imm(RCS1, gcrootmap.get_root_stack_top_addr()) + mc.load(RCS2, RCS1, 0) # ld RCS2, [rootstacktop] mc.AGHI(RCS2, l.imm(-WORD)) # sub RCS2, RCS2, WORD - mc.store(RCS2, RCS1, diff) # std RCS2, [rootstacktop] + mc.store(RCS2, RCS1, 0) # std RCS2, [rootstacktop] def _call_footer(self): - # the return value is the jitframe - self.mc.LGR(r.r2, r.SPP) gcrootmap = self.cpu.gc_ll_descr.gcrootmap if gcrootmap and gcrootmap.is_shadow_stack: self._call_footer_shadowstack(gcrootmap) + # the return value is the jitframe + self.mc.LGR(r.r2, r.SPP) + size = STD_FRAME_SIZE_IN_BYTES # f8 through f15 are saved registers (= non volatile) # TODO it would be good to detect if any float is used in the loop @@ -1180,11 +1182,9 @@ # ASSEMBLER EMISSION def emit_label(self, op, arglocs, regalloc): - pass - # POOL - #offset = self.pool.pool_start - self.mc.get_relative_pos() + offset = self.pool.pool_start - self.mc.get_relative_pos() # load the pool address at each label - #self.mc.LARL(r.POOL, l.halfword(offset)) + self.mc.LARL(r.POOL, l.halfword(offset)) def emit_jump(self, op, arglocs, regalloc): # The backend's logic assumes that the target code is in a piece of @@ -1201,7 +1201,7 @@ if descr in self.target_tokens_currently_compiling: # a label has a LARL instruction that does not need # to be executed, thus remove the first opcode - self.mc.b_offset(descr._ll_loop_code) # POOL + self.mc.LARL_byte_count) + self.mc.b_offset(descr._ll_loop_code + self.mc.LARL_byte_count) else: # POOL #offset = self.pool.get_descr_offset(descr) + \ @@ -1249,11 +1249,11 @@ gcmap = self._finish_gcmap else: gcmap = lltype.nullptr(jitframe.GCMAP) - self.load_gcmap(self.mc, r.r2, gcmap) + self.load_gcmap(self.mc, r.r9, gcmap) - self.mc.load_imm(r.r3, fail_descr_loc.getint()) - self.mc.STG(r.r3, l.addr(ofs, r.SPP)) - self.mc.STG(r.r2, l.addr(ofs2, r.SPP)) + self.mc.load_imm(r.r10, fail_descr_loc.getint()) + self.mc.STG(r.r9, l.addr(ofs2, r.SPP)) + self.mc.STG(r.r10, l.addr(ofs, r.SPP)) # exit function self._call_footer() diff --git a/rpython/jit/backend/zarch/codebuilder.py b/rpython/jit/backend/zarch/codebuilder.py --- a/rpython/jit/backend/zarch/codebuilder.py +++ b/rpython/jit/backend/zarch/codebuilder.py @@ -35,7 +35,6 @@ GuardToken.__init__(self, cpu, gcmap, descr, failargs, faillocs, guard_opnum, frame_depth) self.fcond = fcond - # POOL self._pool_offset = -1 class AbstractZARCHBuilder(object): diff --git a/rpython/jit/backend/zarch/helper/assembler.py b/rpython/jit/backend/zarch/helper/assembler.py --- a/rpython/jit/backend/zarch/helper/assembler.py +++ b/rpython/jit/backend/zarch/helper/assembler.py @@ -12,8 +12,7 @@ l1 = arglocs[1] assert not l0.is_imm() # do the comparison - # POOL self.mc.cmp_op(l0, l1, pool=l1.is_in_pool(), imm=l1.is_imm(), signed=signed, fp=fp) - self.mc.cmp_op(l0, l1, imm=l1.is_imm(), signed=signed, fp=fp) + self.mc.cmp_op(l0, l1, pool=l1.is_in_pool(), imm=l1.is_imm(), signed=signed, fp=fp) self.flush_cc(condition, arglocs[2]) @@ -30,31 +29,21 @@ f.name = 'emit_shift_' + func return f -def gen_emit_rr(rr_func): +def gen_emit_rr_rp(rr_func, rp_func): def f(self, op, arglocs, regalloc): l0, l1 = arglocs - getattr(self.mc, rr_func)(l0, l1) + if l1.is_in_pool(): + getattr(self.mc, rp_func)(l0, l1) + else: + getattr(self.mc, rr_func)(l0, l1) return f -# POOL -#def gen_emit_rr_or_rpool(rr_func, rp_func): -# """ the parameters can either be both in registers or -# the first is in the register, second in literal pool. -# """ -# def f(self, op, arglocs, regalloc): -# l0, l1 = arglocs -# if l1.is_imm() and not l1.is_in_pool(): -# assert 0, "logical imm must reside in pool!" -# if l1.is_in_pool(): -# getattr(self.mc, rp_func)(l0, l1) -# else: -# getattr(self.mc, rr_func)(l0, l1) -# return f - -def gen_emit_rr_rh_ri(rr_func, rh_func, ri_func): +def gen_emit_rr_rh_ri_rp(rr_func, rh_func, ri_func, rp_func): def emit(self, op, arglocs, regalloc): l0, l1 = arglocs - if l1.is_imm(): + if l1.is_in_pool(): + getattr(self.mc, rp_func)(l0, l1) + elif l1.is_imm(): if check_imm_value(l1.value): getattr(self.mc, rh_func)(l0, l1) else: @@ -63,27 +52,18 @@ getattr(self.mc, rr_func)(l0, l1) return emit -# POOL -#def gen_emit_imm_pool_rr(imm_func, pool_func, rr_func): -# def emit(self, op, arglocs, regalloc): -# l0, l1 = arglocs -# if l1.is_in_pool(): -# getattr(self.mc, pool_func)(l0, l1) -# elif l1.is_imm(): -# getattr(self.mc, imm_func)(l0, l1) -# else: -# getattr(self.mc, rr_func)(l0, l1) -# return emit - -def gen_emit_div_mod(rr_func): +def gen_emit_div_mod(rr_func, rp_func): def emit(self, op, arglocs, regalloc): lr, lq, l1 = arglocs # lr == remainer, lq == quotient # when entering the function lr contains the dividend # after this operation either lr or lq is used further assert not l1.is_imm(), "imm divider not supported" - # remainer is always a even register r0, r2, ... , r14 + # remainer is always an even register r0, r2, ... , r14 assert lr.is_even() assert lq.is_odd() self.mc.XGR(lr, lr) - getattr(self.mc,rr_func)(lr, l1) + if l1.is_in_pool(): + getattr(self.mc,rp_func)(lr, l1) + else: + getattr(self.mc,rr_func)(lr, l1) return emit diff --git a/rpython/jit/backend/zarch/helper/regalloc.py b/rpython/jit/backend/zarch/helper/regalloc.py --- a/rpython/jit/backend/zarch/helper/regalloc.py +++ b/rpython/jit/backend/zarch/helper/regalloc.py @@ -26,8 +26,7 @@ if check_imm32(a1): l1 = imm(a1.getint()) else: - # POOL l1 = self.ensure_reg_or_pool(a1) - l1 = self.ensure_reg(a1) + l1 = self.ensure_reg_or_pool(a1) l0 = self.force_result_in_reg(op, a0) return [l0, l1] @@ -39,7 +38,7 @@ if check_imm32(a1): l1 = imm(a1.getint()) else: - l1 = self.ensure_reg(a1) + l1 = self.ensure_reg_or_pool(a1) l0 = self.force_result_in_reg(op, a0) return [l0, l1] @@ -51,7 +50,7 @@ if check_imm32(a1): l1 = imm(a1.getint()) else: - l1 = self.ensure_reg(a1) + l1 = self.ensure_reg_or_pool(a1) lr,lq = self.rm.ensure_even_odd_pair(a0, op, bind_first=False) return [lr, lq, l1] @@ -61,7 +60,7 @@ a1 = op.getarg(1) l1 = self.ensure_reg(a1) if isinstance(a0, Const): - loc = self.ensure_reg(a0) + loc = self.ensure_reg_or_pool(a0) lr,lq = self.rm.ensure_even_odd_pair(a0, op, bind_first=modulus, must_exist=False, move_regs=False) @@ -78,7 +77,6 @@ a0 = op.getarg(0) a1 = op.getarg(1) # sub is not commotative, thus cannot swap operands - # POOL l1 = self.ensure_reg_or_pool(a1) l0 = self.ensure_reg(a0) l1 = self.ensure_reg(a1) res = self.force_allocate_reg(op) diff --git a/rpython/jit/backend/zarch/opassembler.py b/rpython/jit/backend/zarch/opassembler.py --- a/rpython/jit/backend/zarch/opassembler.py +++ b/rpython/jit/backend/zarch/opassembler.py @@ -3,7 +3,7 @@ STD_FRAME_SIZE_IN_BYTES) from rpython.jit.backend.zarch.arch import THREADLOCAL_ADDR_OFFSET from rpython.jit.backend.zarch.helper.assembler import (gen_emit_cmp_op, - gen_emit_rr, gen_emit_shift, gen_emit_rr_rh_ri, gen_emit_div_mod) + gen_emit_rr_rp, gen_emit_shift, gen_emit_rr_rh_ri_rp, gen_emit_div_mod) from rpython.jit.backend.zarch.helper.regalloc import (check_imm, check_imm_value) from rpython.jit.metainterp.history import (ConstInt) @@ -28,7 +28,7 @@ class IntOpAssembler(object): _mixin_ = True - emit_int_add = gen_emit_rr_rh_ri('AGR', 'AGHI', 'AGFI') + emit_int_add = gen_emit_rr_rh_ri_rp('AGR', 'AGHI', 'AGFI', 'AG') emit_int_add_ovf = emit_int_add emit_nursery_ptr_increment = emit_int_add @@ -36,25 +36,16 @@ def emit_int_sub(self, op, arglocs, regalloc): res, l0, l1 = arglocs self.mc.SGRK(res, l0, l1) - # POOL - #if l1.is_imm() and not l1.is_in_pool(): - # assert 0, "logical imm must reside in pool!" - #if l1.is_in_pool(): - # self.mc.SG(l0, l1) - #else: - # self.mc.SGR(l0, l1) emit_int_sub_ovf = emit_int_sub - emit_int_mul = gen_emit_rr_rh_ri('MSGR', 'MGHI', 'MSGFI') + emit_int_mul = gen_emit_rr_rh_ri_rp('MSGR', 'MGHI', 'MSGFI', 'MSG') def emit_int_mul_ovf(self, op, arglocs, regalloc): lr, lq, l1 = arglocs - # POOL - # if l1.is_in_pool(): - # self.mc.LG(r.SCRATCH, l1) - # l1 = r.SCRATCH - # elif - if l1.is_imm(): + if l1.is_in_pool(): + self.mc.LG(r.SCRATCH, l1) + l1 = r.SCRATCH + elif l1.is_imm(): self.mc.LGFI(r.SCRATCH, l1) l1 = r.SCRATCH else: @@ -169,11 +160,11 @@ omc.BRC(c.ANY, l.imm(label_end - jmp_neither_lqlr_overflow)) omc.overwrite() - emit_int_floordiv = gen_emit_div_mod('DSGR') - emit_uint_floordiv = gen_emit_div_mod('DLGR') + emit_int_floordiv = gen_emit_div_mod('DSGR', 'DSG') + emit_uint_floordiv = gen_emit_div_mod('DLGR', 'DLG') # NOTE division sets one register with the modulo value, thus # the regalloc ensures the right register survives. - emit_int_mod = gen_emit_div_mod('DSGR') + emit_int_mod = gen_emit_div_mod('DSGR', 'DSG') def emit_int_invert(self, op, arglocs, regalloc): l0, = arglocs @@ -213,9 +204,9 @@ self.mc.CGHI(l0, l.imm(0)) self.flush_cc(c.NE, res) - emit_int_and = gen_emit_rr("NGR") - emit_int_or = gen_emit_rr("OGR") - emit_int_xor = gen_emit_rr("XGR") + emit_int_and = gen_emit_rr_rp("NGR", "NG") + emit_int_or = gen_emit_rr_rp("OGR", "OG") + emit_int_xor = gen_emit_rr_rp("XGR", "XG") emit_int_rshift = gen_emit_shift("SRAG") emit_int_lshift = gen_emit_shift("SLLG") @@ -242,10 +233,10 @@ class FloatOpAssembler(object): _mixin_ = True - emit_float_add = gen_emit_rr('ADBR') - emit_float_sub = gen_emit_rr('SDBR') - emit_float_mul = gen_emit_rr('MDBR') - emit_float_truediv = gen_emit_rr('DDBR') + emit_float_add = gen_emit_rr_rp('ADBR', 'ADB') + emit_float_sub = gen_emit_rr_rp('SDBR', 'SDB') + emit_float_mul = gen_emit_rr_rp('MDBR', 'MDB') + emit_float_truediv = gen_emit_rr_rp('DDBR', 'DDB') # Support for NaNs: S390X sets condition code to 0x3 (unordered) # whenever any operand is nan. @@ -1072,7 +1063,7 @@ self._store_force_index(self._find_nearby_operation(regalloc, +1)) # 'result_loc' is either r2, f0 or None self.call_assembler(op, argloc, vloc, result_loc, r.r2) - # POOL self.mc.LARL(r.POOL, l.halfword(self.pool.pool_start - self.mc.get_relative_pos())) + self.mc.LARL(r.POOL, l.halfword(self.pool.pool_start - self.mc.get_relative_pos())) emit_call_assembler_i = _genop_call_assembler emit_call_assembler_r = _genop_call_assembler diff --git a/rpython/jit/backend/zarch/pool.py b/rpython/jit/backend/zarch/pool.py --- a/rpython/jit/backend/zarch/pool.py +++ b/rpython/jit/backend/zarch/pool.py @@ -1,5 +1,6 @@ from rpython.jit.backend.zarch import registers as r from rpython.jit.backend.zarch import locations as l +from rpython.rlib import rgil from rpython.jit.metainterp.history import (INT, REF, FLOAT, TargetToken) from rpython.rlib.objectmodel import we_are_translated @@ -19,81 +20,25 @@ self.size = 0 # the offset to index the pool self.pool_start = 0 - self.label_offset = 0 - self.label_count = 0 # for constant offsets self.offset_map = {} # for descriptors self.offset_descr = {} - self.constant_64_zeros = -1 - self.constant_64_ones = -1 - self.constant_64_sign_bit = -1 - self.constant_max_64_positive = -1 + + def reset(self): + self.pool_start = 0 + self.size = 0 + self.offset_map = {} + self.offset_descr = {} def ensure_can_hold_constants(self, asm, op): - opnum = op.getopnum() - if op.is_guard(): - # 1x gcmap pointer - # 1x target address - self.offset_descr[op.getdescr()] = self.size - self.allocate_slot(2*8) - elif op.getopnum() == rop.JUMP: - descr = op.getdescr() - if descr not in asm.target_tokens_currently_compiling: - # this is a 'long' jump instead of a relative jump - self.offset_descr[descr] = self.size - self.allocate_slot(8) - elif op.getopnum() == rop.LABEL: - descr = op.getdescr() - if descr not in asm.target_tokens_currently_compiling: - # this is a 'long' jump instead of a relative jump - self.offset_descr[descr] = self.size - self.allocate_slot(8) - elif op.getopnum() == rop.INT_INVERT: - self.constant_64_ones = 1 # we need constant ones!!! - elif op.getopnum() == rop.INT_MUL_OVF: - self.constant_64_sign_bit = 1 - self.constant_max_64_positive = 1 - elif opnum == rop.INT_RSHIFT or opnum == rop.INT_LSHIFT or \ - opnum == rop.UINT_RSHIFT: - a0 = op.getarg(0) - if a0.is_constant(): - self.reserve_literal(8, a0) + # allocates 8 bytes in memory for pointers, long integers or floats + if op.is_jit_debug(): return - elif opnum == rop.GC_STORE or opnum == rop.GC_STORE_INDEXED: - arg = op.getarg(0) - if arg.is_constant(): - self.reserve_literal(8, arg) - arg = op.getarg(1) - if arg.is_constant(): - self.reserve_literal(8, arg) - arg = op.getarg(2) - if arg.is_constant(): - self.reserve_literal(8, arg) - return - elif opnum in (rop.GC_LOAD_F, - rop.GC_LOAD_I, - rop.GC_LOAD_R,) \ - or opnum in (rop.GC_LOAD_INDEXED_F, - rop.GC_LOAD_INDEXED_R, - rop.GC_LOAD_INDEXED_I,): - arg = op.getarg(0) - if arg.is_constant(): - self.reserve_literal(8, arg) - arg = op.getarg(1) - if arg.is_constant(): - self.reserve_literal(8, arg) - return - elif op.is_call_release_gil(): - for arg in op.getarglist()[1:]: - if arg.is_constant(): - self.reserve_literal(8, arg) - return - elif opnum == rop.COND_CALL_GC_WB_ARRAY: - self.constant_64_ones = 1 # we need constant ones!!! + for arg in op.getarglist(): if arg.is_constant(): - self.reserve_literal(8, arg) + self.reserve_literal(8, arg, asm) def contains_constant(self, unique_val): return unique_val in self.offset_map @@ -101,6 +46,10 @@ def get_descr_offset(self, descr): return self.offset_descr[descr] + def contains_box(self, box): + uvalue = self.unique_value(box) + return self.contains_constant(uvalue) + def get_offset(self, box): assert box.is_constant() uvalue = self.unique_value(box) @@ -108,11 +57,6 @@ assert self.offset_map[uvalue] >= 0 return self.offset_map[uvalue] - def get_direct_offset(self, unique_val): - """ Get the offset directly using a unique value, - use get_offset if you have a Const box """ - return self.offset_map[unique_val] - def unique_value(self, val): if val.type == FLOAT: if val.getfloat() == 0.0: @@ -124,21 +68,14 @@ assert val.type == REF return rffi.cast(lltype.Signed, val.getref_base()) - def reserve_literal(self, size, box): + def reserve_literal(self, size, box, asm): uvalue = self.unique_value(box) - if uvalue not in self.offset_map: - self.offset_map[uvalue] = self.size - self.allocate_slot(size) - - def reset(self): - self.pool_start = 0 - self.label_offset = 0 - self.size = 0 - self.offset_map = {} - self.constant_64_zeros = -1 - self.constant_64_ones = -1 - self.constant_64_sign_bit = -1 - self.constant_max_64_positive = -1 + if box.type == INT and -2**31 <= uvalue <= 2**31-1: + # we do not allocate non 64 bit values, these + # can be loaded as imm by LGHI/LGFI + return + # + self._ensure_value(uvalue, asm) def check_size(self, size=-1): if size == -1: @@ -149,18 +86,19 @@ llop.debug_print(lltype.Void, msg) raise PoolOverflow(msg) + def _ensure_value(self, uvalue, asm): + if uvalue not in self.offset_map: + self.offset_map[uvalue] = self.size + self.allocate_slot(8) + asm.mc.write_i64(uvalue) + return self.offset_map[uvalue] + def allocate_slot(self, size): val = self.size + size self.check_size(val) self.size = val assert val >= 0 - def ensure_value(self, val): - if val not in self.offset_map: - self.offset_map[val] = self.size - self.allocate_slot(8) - return self.offset_map[val] - def pre_assemble(self, asm, operations, bridge=False): # O(len(operations)). I do not think there is a way # around this. @@ -179,27 +117,34 @@ self.pool_start = asm.mc.get_relative_pos() for op in operations: self.ensure_can_hold_constants(asm, op) - self.ensure_value(asm.cpu.pos_exc_value()) + self._ensure_value(asm.cpu.pos_exc_value(), asm) + # the top of shadow stack + gcrootmap = asm.cpu.gc_ll_descr.gcrootmap + if gcrootmap and gcrootmap.is_shadow_stack: + self._ensure_value(gcrootmap.get_root_stack_top_addr(), asm) + # endaddr of insert stack check + endaddr, lengthaddr, _ = asm.cpu.insert_stack_check() + self._ensure_value(endaddr, asm) + # fast gil + fastgil = rffi.cast(lltype.Signed, rgil.gil_fetch_fastgil()) + self._ensure_value(fastgil, asm) # TODO add more values that are loaded with load_imm - if self.size == 0: - # no pool needed! - return - assert self.size % 2 == 0, "not aligned properly" - if self.constant_64_ones != -1: - self.constant_64_ones = self.ensure_value(-1) - if self.constant_64_zeros != -1: - self.constant_64_zeros = self.ensure_value(0x0) - if self.constant_64_sign_bit != -1: - self.constant_64_sign_bit = self.ensure_value(-2**63) # == 0x8000000000000000 - if self.constant_max_64_positive != -1: - self.constant_max_64_positive = self.ensure_value(0x7fffFFFFffffFFFF) - asm.mc.write('\x00' * self.size) - wrote = 0 - for val, offset in self.offset_map.items(): - self.overwrite_64(asm.mc, offset, val) - wrote += 8 - def overwrite_64(self, mc, index, value): + # XXX def post_assemble(self, asm): + # XXX mc = asm.mc + # XXX pending_guard_tokens = asm.pending_guard_tokens + # XXX if self.size == 0: + # XXX return + # XXX for guard_token in pending_guard_tokens: + # XXX descr = guard_token.faildescr + # XXX offset = self.offset_descr[descr] + # XXX assert isinstance(offset, int) + # XXX assert offset >= 0 + # XXX assert guard_token._pool_offset != -1 + # XXX ptr = rffi.cast(lltype.Signed, guard_token.gcmap) + # XXX self._overwrite_64(mc, offset + RECOVERY_GCMAP_POOL_OFFSET, ptr) + + def _overwrite_64(self, mc, index, value): index += self.pool_start mc.overwrite(index, chr(value >> 56 & 0xff)) @@ -210,17 +155,3 @@ mc.overwrite(index+5, chr(value >> 16 & 0xff)) mc.overwrite(index+6, chr(value >> 8 & 0xff)) mc.overwrite(index+7, chr(value & 0xff)) - - def post_assemble(self, asm): - mc = asm.mc - pending_guard_tokens = asm.pending_guard_tokens - if self.size == 0: - return - for guard_token in pending_guard_tokens: - descr = guard_token.faildescr - offset = self.offset_descr[descr] - assert isinstance(offset, int) - assert offset >= 0 - assert guard_token._pool_offset != -1 - ptr = rffi.cast(lltype.Signed, guard_token.gcmap) - self.overwrite_64(mc, offset + RECOVERY_GCMAP_POOL_OFFSET, ptr) diff --git a/rpython/jit/backend/zarch/regalloc.py b/rpython/jit/backend/zarch/regalloc.py --- a/rpython/jit/backend/zarch/regalloc.py +++ b/rpython/jit/backend/zarch/regalloc.py @@ -62,44 +62,24 @@ assert set(save_around_call_regs).issubset(all_regs) pool = None - def convert_to_adr(self, c): - assert isinstance(c, ConstFloat) - adr = self.assembler.datablockwrapper.malloc_aligned(8, 8) - x = c.getfloatstorage() - rffi.cast(rffi.CArrayPtr(longlong.FLOATSTORAGE), adr)[0] = x - return adr - - def convert_to_imm(self, c): - adr = self.convert_to_adr(c) - return l.ConstFloatLoc(adr) - - # POOL - #def convert_to_imm(self, c): - # off = self.pool.get_offset(c) - # return l.pool(off, float=True) - def __init__(self, longevity, frame_manager=None, assembler=None): RegisterManager.__init__(self, longevity, frame_manager, assembler) def call_result_location(self, v): return r.FPR_RETURN - # POOL - # def place_in_pool(self, var): - # offset = self.assembler.pool.get_offset(var) - # return l.pool(offset, float=True) + def convert_to_imm(self, c): + return l.pool(self.assembler.pool.get_offset(c), float=True) - # POOL - #def ensure_reg_or_pool(self, box): - # if isinstance(box, Const): - # loc = self.get_scratch_reg() - # immvalue = self.convert_to_int(box) - # self.assembler.mc.load_imm(loc, immvalue) - # else: - # assert box in self.temp_boxes - # loc = self.make_sure_var_in_reg(box, - # forbidden_vars=self.temp_boxes) - # return loc + def ensure_reg_or_pool(self, box): + if isinstance(box, Const): + offset = self.assembler.pool.get_offset(box) + return l.pool(offset, float=True) + else: + assert box in self.temp_boxes + loc = self.make_sure_var_in_reg(box, + forbidden_vars=self.temp_boxes) + return loc def get_scratch_reg(self): box = TempVar() @@ -109,21 +89,14 @@ def ensure_reg(self, box): if isinstance(box, Const): - # POOL - #poolloc = self.place_in_pool(box) - #tmp = TempVar() - #reg = self.force_allocate_reg(tmp, self.temp_boxes) - #self.temp_boxes.append(tmp) - #assert poolloc.displace >= 0 - #if poolloc.displace <= 2**12-1: - # self.assembler.mc.LD(reg, poolloc) - #else: - # self.assembler.mc.LDY(reg, poolloc) - loc = self.get_scratch_reg() - immadrvalue = self.convert_to_adr(box) - mc = self.assembler.mc - mc.load_imm(r.SCRATCH, immadrvalue) - mc.LD(loc, l.addr(0, r.SCRATCH)) + offset = self.assembler.pool.get_offset(box) + poolloc = l.pool(offset, float=True) + reg = self.get_scratch_reg() + if poolloc.displace <= 2**11-1: + self.assembler.mc.LD(reg, poolloc) + else: + self.assembler.mc.LDY(reg, poolloc) + return reg else: assert box in self.temp_boxes loc = self.make_sure_var_in_reg(box, @@ -159,32 +132,25 @@ assert isinstance(c, ConstPtr) return rffi.cast(lltype.Signed, c.value) + def ensure_reg_or_pool(self, box): + if isinstance(box, Const): + if self.assembler.pool.contains_box(box): + offset = self.assembler.pool.get_offset(box) + return l.pool(offset) + else: + return self.ensure_reg(box) + else: + assert box in self.temp_boxes + loc = self.make_sure_var_in_reg(box, + forbidden_vars=self.temp_boxes) + return loc + def convert_to_imm(self, c): - val = self.convert_to_int(c) - return l.imm(val) + if self.assembler.pool.contains_box(c): + return l.pool(self.assembler.pool.get_offset(c)) + immvalue = self.convert_to_int(c) + return l.imm(immvalue) - # POOL - #def convert_to_imm(self, c): - # off = self.pool.get_offset(c) - # return l.pool(off) - - #def ensure_reg_or_pool(self, box): - # if isinstance(box, Const): - # offset = self.assembler.pool.get_offset(box) - # return l.pool(offset) - # else: - # assert box in self.temp_boxes - # loc = self.make_sure_var_in_reg(box, - # forbidden_vars=self.temp_boxes) - # return loc - - # POOL - #offset = self.assembler.pool.get_offset(box) - #poolloc = l.pool(offset) - #tmp = TempInt() - #reg = self.force_allocate_reg(tmp, forbidden_vars=self.temp_boxes) - #self.temp_boxes.append(tmp) - #self.assembler.mc.LG(reg, poolloc) def ensure_reg(self, box): if isinstance(box, Const): loc = self.get_scratch_reg() @@ -388,10 +354,10 @@ self.rm = ZARCHRegisterManager(self.longevity, frame_manager = self.fm, assembler = self.assembler) - #self.rm.pool = self.assembler.pool + self.rm.pool = self.assembler.pool self.fprm = FPRegisterManager(self.longevity, frame_manager = self.fm, assembler = self.assembler) - #self.fprm.pool = self.assembler.pool + self.fprm.pool = self.assembler.pool return operations def prepare_loop(self, inputargs, operations, looptoken, allgcrefs): @@ -607,12 +573,11 @@ else: return self.rm.call_result_location(v) - # POOL - #def ensure_reg_or_pool(self, box): - # if box.type == FLOAT: - # return self.fprm.ensure_reg_or_pool(box) - # else: - # return self.rm.ensure_reg_or_pool(box) + def ensure_reg_or_pool(self, box): + if box.type == FLOAT: + return self.fprm.ensure_reg_or_pool(box) + else: + return self.rm.ensure_reg_or_pool(box) def ensure_reg(self, box): if box.type == FLOAT: diff --git a/rpython/jit/backend/zarch/registers.py b/rpython/jit/backend/zarch/registers.py --- a/rpython/jit/backend/zarch/registers.py +++ b/rpython/jit/backend/zarch/registers.py @@ -7,7 +7,7 @@ [r0,r1,r2,r3,r4,r5,r6,r7,r8, r9,r10,r11,r12,r13,r14,r15] = registers -MANAGED_REGS = [r2,r3,r4,r5,r6,r7,r8,r9,r10,r11,r13] # keep this list sorted (asc)! +MANAGED_REGS = [r2,r3,r4,r5,r6,r7,r8,r9,r10,r11] # keep this list sorted (asc)! MANAGED_REG_PAIRS = [(r2,r3), (r4,r5), (r6,r7), (r8,r9), (r10,r11)] VOLATILES = [r2,r3,r4,r5,r6] SP = r15 @@ -39,6 +39,7 @@ for _r in MANAGED_FP_REGS: ALL_REG_INDEXES[_r] = len(ALL_REG_INDEXES) # NOT used, but keeps JITFRAME_FIXED_SIZE even +ALL_REG_INDEXES[f15] = len(ALL_REG_INDEXES) JITFRAME_FIXED_SIZE = len(ALL_REG_INDEXES) def odd_reg(r): diff --git a/rpython/jit/backend/zarch/test/test_pool.py b/rpython/jit/backend/zarch/test/test_pool.py --- a/rpython/jit/backend/zarch/test/test_pool.py +++ b/rpython/jit/backend/zarch/test/test_pool.py @@ -12,13 +12,18 @@ from rpython.jit.backend.detect_cpu import getcpuclass from rpython.jit.tool.oparser import parse +class FakeAsm(object): + def write_i64(self, val): + pass + class TestPoolZARCH(object): def setup_class(self): self.calldescr = None def setup_method(self, name): self.pool = LiteralPool() - self.asm = None + self.asm = FakeAsm() + self.asm.mc = FakeAsm() self.cpu = getcpuclass()(None, None) self.cpu.setup_once() @@ -34,20 +39,20 @@ return False def test_constant_in_call_malloc(self): - c = ConstPtr(rffi.cast(llmemory.GCREF, 0xdeadbeef)) + c = ConstPtr(rffi.cast(llmemory.GCREF, 0xdeadbeef1234)) self.ensure_can_hold(rop.CALL_MALLOC_GC, [c], descr=self.calldescr) assert self.const_in_pool(c) - assert self.const_in_pool(ConstPtr(rffi.cast(llmemory.GCREF, 0xdeadbeef))) + assert self.const_in_pool(ConstPtr(rffi.cast(llmemory.GCREF, 0xdeadbeef1234))) @py.test.mark.parametrize('opnum', [rop.INT_ADD, rop.INT_SUB, rop.INT_MUL]) def test_constants_arith(self, opnum): for c1 in [ConstInt(1), ConstInt(2**44), InputArgInt(1)]: - for c2 in [InputArgInt(1), ConstInt(1), ConstInt(2**55)]: + for c2 in [InputArgInt(1), ConstInt(-2**33), ConstInt(2**55)]: self.ensure_can_hold(opnum, [c1,c2]) - if c1.is_constant(): + if c1.is_constant() and not -2**31 <= c1.getint() <= 2**31-1: assert self.const_in_pool(c1) - if c2.is_constant(): + if c2.is_constant() and not -2**31 <= c1.getint() <= 2**31-1: assert self.const_in_pool(c2) def test_pool_overflow(self): diff --git a/rpython/jit/backend/zarch/test/test_runner.py b/rpython/jit/backend/zarch/test/test_runner.py --- a/rpython/jit/backend/zarch/test/test_runner.py +++ b/rpython/jit/backend/zarch/test/test_runner.py @@ -24,6 +24,6 @@ cpu.setup_once() return cpu - add_loop_instructions = "lg; lgr; agr; cgfi; jge; j;$" - bridge_loop_instructions = "lg; cgfi; jnl; lghi; " \ - "iilf;( iihf;)? iilf;( iihf;)? basr; iilf;( iihf;)? br;$" + add_loop_instructions = "lg; lgr; larl; agr; cgfi; jge; j;$" + bridge_loop_instructions = "larl; lg; cgfi; jnl; lghi; " \ + "(lgfi|iilf);( iihf;)? (lgfi|iilf);( iihf;)? basr; (lgfi|iilf);( iihf;)? br;$" From pypy.commits at gmail.com Wed Mar 9 03:53:52 2016 From: pypy.commits at gmail.com (plan_rich) Date: Wed, 09 Mar 2016 00:53:52 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: merged default Message-ID: <56dfe4a0.e853c20a.2f740.34bb@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r82900:acc1954c74fa Date: 2016-03-09 09:52 +0100 http://bitbucket.org/pypy/pypy/changeset/acc1954c74fa/ Log: merged default diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -240,6 +240,7 @@ Kristjan Valur Jonsson David Lievens Neil Blakey-Milner + Sergey Matyunin Lutz Paelike Lucio Torre Lars Wassermann @@ -271,6 +272,7 @@ Aaron Tubbs Ben Darnell Roberto De Ioris + Logan Chien Juan Francisco Cantero Hurtado Ruochen Huang Jeong YunWon diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -123,7 +123,7 @@ # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". -#html_title = None +html_title = 'PyPy documentation' # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None diff --git a/pypy/doc/contributor.rst b/pypy/doc/contributor.rst --- a/pypy/doc/contributor.rst +++ b/pypy/doc/contributor.rst @@ -210,6 +210,7 @@ Kristjan Valur Jonsson David Lievens Neil Blakey-Milner + Sergey Matyunin Lutz Paelike Lucio Torre Lars Wassermann @@ -241,6 +242,7 @@ Aaron Tubbs Ben Darnell Roberto De Ioris + Logan Chien Juan Francisco Cantero Hurtado Ruochen Huang Jeong YunWon diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -265,7 +265,7 @@ return False def evil(y): - d = {x(): 1} + d = {X(): 1} X.__eq__ = __evil_eq__ d[y] # might trigger a call to __eq__? diff --git a/pypy/doc/release-5.0.0.rst b/pypy/doc/release-5.0.0.rst --- a/pypy/doc/release-5.0.0.rst +++ b/pypy/doc/release-5.0.0.rst @@ -2,21 +2,21 @@ PyPy 5.0 ======== -We have released PyPy 5.0, about three months after PyPy 4.0.1. +We have released PyPy 5.0, about three months after PyPy 4.0.1. We encourage all users of PyPy to update to this version. Apart from the usual bug fixes, there is an ongoing effort to improve the warmup time and memory usage of JIT-related metadata. The exact effects depend vastly on the program you're running and can range from insignificant to warmup being up to 30% -faster and memory dropping by about 30%. +faster and memory dropping by about 30%. We also merged a major upgrade to our C-API layer (cpyext), simplifying the interaction between c-level objects and PyPy interpreter level objects. As a -result, lxml with its cython compiled component `passes all tests`_ on PyPy +result, lxml (prerelease) with its cython compiled component +`passes all tests`_ on PyPy. The new cpyext is also much faster. -Users who have gotten used to vmprof_ on Linux, and those on other platforms -who have not yet tried its awesomeness, will be happy to hear that vmprof -now just works on MacOS and Windows too, in both PyPy (built-in support) and -CPython (as an installed module). +vmprof_ has been a go-to profiler for PyPy on linux for a few releases +and we're happy to announce that thanks to the cooperation with jetbrains, +vmprof now works on Linux, OS X and Windows on both PyPy and CPython. You can download the PyPy 5.0 release here: @@ -25,20 +25,20 @@ We would like to thank our donors for the continued support of the PyPy project. -We would also like to thank our contributors and +We would also like to thank our contributors and encourage new people to join the project. PyPy has many layers and we need help with all of them: `PyPy`_ and `RPython`_ documentation -improvements, tweaking popular `modules`_ to run on pypy, or general `help`_ -with making RPython's JIT even better. +improvements, tweaking popular `modules`_ to run on pypy, or general `help`_ +with making RPython's JIT even better. CFFI ==== While not applicable only to PyPy, `cffi`_ is arguably our most significant -contribution to the python ecosystem. PyPy 5.0 ships with -`cffi-1.5.2`_ which now allows embedding PyPy (or cpython) in a c program. +contribution to the python ecosystem. PyPy 5.0 ships with +`cffi-1.5.2`_ which now allows embedding PyPy (or cpython) in a C program. -.. _`PyPy`: http://doc.pypy.org +.. _`PyPy`: http://doc.pypy.org .. _`RPython`: https://rpython.readthedocs.org .. _`cffi`: https://cffi.readthedocs.org .. _`cffi-1.5.2`: http://cffi.readthedocs.org/en/latest/whatsnew.html#v1-5-2 @@ -52,18 +52,18 @@ ============= PyPy is a very compliant Python interpreter, almost a drop-in replacement for -CPython 2.7. It's fast (`pypy and cpython 2.7.x`_ performance comparison) +CPython 2.7. It's fast (`PyPy and CPython 2.7.x`_ performance comparison) due to its integrated tracing JIT compiler. We also welcome developers of other `dynamic languages`_ to see what RPython can do for them. This release supports **x86** machines on most common operating systems -(Linux 32/64, Mac OS X 64, Windows 32, OpenBSD, freebsd), +(Linux 32/64, Mac OS X 64, Windows 32, OpenBSD, FreeBSD), newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux, and the -big- and little-endian variants of **ppc64** running Linux. +big- and little-endian variants of **PPC64** running Linux. -.. _`pypy and cpython 2.7.x`: http://speed.pypy.org +.. _`PyPy and CPython 2.7.x`: http://speed.pypy.org .. _`dynamic languages`: http://pypyjs.org Other Highlights (since 4.0.1 released in November 2015) @@ -103,7 +103,7 @@ * More completely support datetime, optimize timedelta creation - * Fix for issue 2185 which caused an inconsistent list of operations to be + * Fix for issue #2185 which caused an inconsistent list of operations to be generated by the unroller, appeared in a complicated DJango app * Fix an elusive issue with stacklets on shadowstack which showed up when @@ -128,9 +128,6 @@ * Fix for corner case (likely shown by Krakatau) for consecutive guards with interdependencies - * Fix applevel bare class method comparisons which should fix pretty printing - in IPython - * Issues reported with our previous release were resolved_ after reports from users on our issue tracker at https://bitbucket.org/pypy/pypy/issues or on IRC at #pypy @@ -150,49 +147,60 @@ * Support partition() as an app-level function, together with a cffi wrapper in pypy/numpy, this now provides partial support for partition() -* Performance improvements and refactorings: +* Performance improvements: - * Refactor and improve exception analysis in the annotator - - * Remove unnecessary special handling of space.wrap(). + * Optimize global lookups * Improve the memory signature of numbering instances in the JIT. This should massively decrease the amount of memory consumed by the JIT, which is significant for most programs. Also compress the numberings using variable- size encoding + * Optimize string concatenation + + * Use INT_LSHIFT instead of INT_MUL when possible + + * Improve struct.unpack by casting directly from the underlying buffer. + Unpacking floats and doubles is about 15 times faster, and integer types + about 50% faster (on 64 bit integers). This was then subsequently + improved further in optimizeopt.py. + + * Optimize two-tuple lookups in mapdict, which improves warmup of instance + variable access somewhat + + * Reduce all guards from int_floordiv_ovf if one of the arguments is constant + + * Identify permutations of attributes at instance creation, reducing the + number of bridges created + + * Greatly improve re.sub() performance + + +* Internal refactorings: + + * Refactor and improve exception analysis in the annotator + + * Remove unnecessary special handling of space.wrap(). + * Support list-resizing setslice operations in RPython * Tweak the trace-too-long heuristic for multiple jit drivers * Refactor bookkeeping (such a cool word - three double letters) in the annotater - + * Refactor wrappers for OS functions from rtyper to rlib and simplify them * Simplify backend loading instructions to only use four variants - * Optimize string concatination - * Simplify GIL handling in non-jitted code - * Use INT_LSHIFT instead of INT_MUL when possible - - * Improve struct.unpack by casting directly from the underlying buffer. - Unpacking floats and doubles is about 15 times faster, and integer types - about 50% faster (on 64 bit integers). This was then subsequently - improved further in optimizeopt.py. - * Refactor naming in optimizeopt * Change GraphAnalyzer to use a more precise way to recognize external functions and fix null pointer handling, generally clean up external function handling - * Optimize global lookups - - * Optimize two-tuple lookups in mapdict - * Remove pure variants of ``getfield_gc_*`` operations from the JIT by determining purity while tracing @@ -203,17 +211,10 @@ * Refactor rtyper debug code into python.rtyper.debug * Seperate structmember.h from Python.h Also enhance creating api functions - to specify which header file they appear in (previously only pypy_decl.h) - - * Reduce all guards from int_floordiv_ovf if one of the arguments is constant + to specify which header file they appear in (previously only pypy_decl.h) * Fix tokenizer to enforce universal newlines, needed for Python 3 support - * Identify permutations of attributes at instance creation, reducing the - number of bridges created - - * Greatly improve re.sub() performance - .. _resolved: http://doc.pypy.org/en/latest/whatsnew-5.0.0.html .. _`hypothesis`: http://hypothesis.readthedocs.org .. _`blog post`: http://morepypy.blogspot.com/2016/02/c-api-support-update.html diff --git a/pypy/module/_vmprof/interp_vmprof.py b/pypy/module/_vmprof/interp_vmprof.py --- a/pypy/module/_vmprof/interp_vmprof.py +++ b/pypy/module/_vmprof/interp_vmprof.py @@ -60,10 +60,10 @@ Must be smaller than 1.0 """ w_modules = space.sys.get('modules') - if space.contains_w(w_modules, space.wrap('_continuation')): - space.warn(space.wrap("Using _continuation/greenlet/stacklet together " - "with vmprof will crash"), - space.w_RuntimeWarning) + #if space.contains_w(w_modules, space.wrap('_continuation')): + # space.warn(space.wrap("Using _continuation/greenlet/stacklet together " + # "with vmprof will crash"), + # space.w_RuntimeWarning) try: rvmprof.enable(fileno, period) except rvmprof.VMProfError, e: diff --git a/pypy/tool/release/repackage.sh b/pypy/tool/release/repackage.sh --- a/pypy/tool/release/repackage.sh +++ b/pypy/tool/release/repackage.sh @@ -1,7 +1,7 @@ # Edit these appropriately before running this script -maj=4 +maj=5 min=0 -rev=1 +rev=0 # This script will download latest builds from the buildmaster, rename the top # level directory, and repackage ready to be uploaded to bitbucket. It will also # download source, assuming a tag for the release already exists, and repackage them. diff --git a/requirements.txt b/requirements.txt --- a/requirements.txt +++ b/requirements.txt @@ -1,3 +1,3 @@ # hypothesis is used for test generation on untranslated jit tests hypothesis - +enum>=0.4.6 # is a dependency, but old pip does not pick it up diff --git a/rpython/doc/conf.py b/rpython/doc/conf.py --- a/rpython/doc/conf.py +++ b/rpython/doc/conf.py @@ -59,7 +59,7 @@ # General information about the project. project = u'RPython' -copyright = u'2015, The PyPy Project' +copyright = u'2016, The PyPy Project' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the @@ -121,7 +121,7 @@ # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". -#html_title = None +html_title = 'RPython Documentation' # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None diff --git a/rpython/jit/tool/traceviewer.py b/rpython/jit/tool/traceviewer.py --- a/rpython/jit/tool/traceviewer.py +++ b/rpython/jit/tool/traceviewer.py @@ -103,9 +103,9 @@ self.last_guard = -1 else: # guards can be out of order nowadays - groups = sorted(groups) - self.first_guard = guard_number(groups[0]) - self.last_guard = guard_number(groups[-1]) + groups = sorted(map(guard_number, groups)) + self.first_guard = groups[0] + self.last_guard = groups[-1] content = property(get_content, set_content) diff --git a/rpython/rlib/rvmprof/src/vmprof_getpc.h b/rpython/rlib/rvmprof/src/vmprof_getpc.h --- a/rpython/rlib/rvmprof/src/vmprof_getpc.h +++ b/rpython/rlib/rvmprof/src/vmprof_getpc.h @@ -54,6 +54,7 @@ // It will cause problems for FreeBSD though!, because it turns off // the needed __BSD_VISIBLE. #ifdef __APPLE__ +#include #define _XOPEN_SOURCE 500 #endif @@ -144,7 +145,11 @@ #else intptr_t GetPC(ucontext_t *signal_ucontext) { #ifdef __APPLE__ +#if ((ULONG_MAX) == (UINT_MAX)) + return (signal_ucontext->uc_mcontext->__ss.__eip); +#else return (signal_ucontext->uc_mcontext->__ss.__rip); +#endif #else return signal_ucontext->PC_FROM_UCONTEXT; // defined in config.h #endif diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -547,6 +547,11 @@ relpypath = localpath.relto(pypkgpath.dirname) assert relpypath, ("%r should be relative to %r" % (localpath, pypkgpath.dirname)) + if len(relpypath.split(os.path.sep)) > 2: + # pypy detail to agregate the c files by directory, + # since the enormous number of files was causing + # memory issues linking on win32 + return os.path.split(relpypath)[0] + '.c' return relpypath.replace('.py', '.c') return None if hasattr(node.obj, 'graph'): diff --git a/rpython/translator/c/src/entrypoint.c b/rpython/translator/c/src/entrypoint.c --- a/rpython/translator/c/src/entrypoint.c +++ b/rpython/translator/c/src/entrypoint.c @@ -37,6 +37,7 @@ # include #endif +RPY_EXPORTED void rpython_startup_code(void) { #ifdef RPY_WITH_GIL diff --git a/rpython/translator/c/test/test_genc.py b/rpython/translator/c/test/test_genc.py --- a/rpython/translator/c/test/test_genc.py +++ b/rpython/translator/c/test/test_genc.py @@ -596,7 +596,7 @@ t.context._graphof(foobar_fn).inhibit_tail_call = True t.source_c() lines = t.driver.cbuilder.c_source_filename.join('..', - 'rpython_translator_c_test_test_genc.c').readlines() + 'rpython_translator_c_test.c').readlines() for i, line in enumerate(lines): if '= pypy_g_foobar_fn' in line: break diff --git a/rpython/translator/c/test/test_standalone.py b/rpython/translator/c/test/test_standalone.py --- a/rpython/translator/c/test/test_standalone.py +++ b/rpython/translator/c/test/test_standalone.py @@ -81,7 +81,7 @@ # # verify that the executable re-export symbols, but not too many if sys.platform.startswith('linux') and not kwds.get('shared', False): - seen_main = False + seen = set() g = os.popen("objdump -T '%s'" % builder.executable_name, 'r') for line in g: if not line.strip(): @@ -91,8 +91,8 @@ name = line.split()[-1] if name.startswith('__'): continue + seen.add(name) if name == 'main': - seen_main = True continue if name == 'pypy_debug_file': # ok to export this one continue @@ -104,7 +104,9 @@ "declaration of this C function or global variable" % (name,)) g.close() - assert seen_main, "did not see 'main' exported" + # list of symbols that we *want* to be exported: + for name in ['main', 'pypy_debug_file', 'rpython_startup_code']: + assert name in seen, "did not see '%r' exported" % name # return t, builder @@ -123,9 +125,9 @@ # Verify that the generated C files have sane names: gen_c_files = [str(f) for f in cbuilder.extrafiles] - for expfile in ('rpython_rlib_rposix.c', - 'rpython_rtyper_lltypesystem_rstr.c', - 'rpython_translator_c_test_test_standalone.c'): + for expfile in ('rpython_rlib.c', + 'rpython_rtyper_lltypesystem.c', + 'rpython_translator_c_test.c'): assert cbuilder.targetdir.join(expfile) in gen_c_files def test_print(self): diff --git a/rpython/translator/interactive.py b/rpython/translator/interactive.py --- a/rpython/translator/interactive.py +++ b/rpython/translator/interactive.py @@ -32,12 +32,6 @@ self.context.viewcg() def ensure_setup(self, argtypes=None, policy=None): - standalone = argtypes is None - if standalone: - assert argtypes is None - else: - if argtypes is None: - argtypes = [] self.driver.setup(self.entry_point, argtypes, policy, empty_translator=self.context) self.ann_argtypes = argtypes From pypy.commits at gmail.com Wed Mar 9 04:40:41 2016 From: pypy.commits at gmail.com (plan_rich) Date: Wed, 09 Mar 2016 01:40:41 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: removed skip from signal test Message-ID: <56dfef99.85b01c0a.7a902.5f27@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r82901:2d40f0c2257e Date: 2016-03-09 10:39 +0100 http://bitbucket.org/pypy/pypy/changeset/2d40f0c2257e/ Log: removed skip from signal test diff --git a/pypy/module/__pypy__/test/test_signal.py b/pypy/module/__pypy__/test/test_signal.py --- a/pypy/module/__pypy__/test/test_signal.py +++ b/pypy/module/__pypy__/test/test_signal.py @@ -2,12 +2,6 @@ from pypy.module.thread.test.support import GenericTestThread -import os -if os.uname()[4] == 's390x': - # TMP!!! - import py - py.test.skip("skip for now s390x") - class AppTestMinimal: spaceconfig = dict(usemodules=['__pypy__']) From pypy.commits at gmail.com Wed Mar 9 04:58:13 2016 From: pypy.commits at gmail.com (plan_rich) Date: Wed, 09 Mar 2016 01:58:13 -0800 (PST) Subject: [pypy-commit] buildbot default: replaced python vm for building pypy with pypy! builds for s390x now spawn NOT at the same time, but shifted by 2-3h Message-ID: <56dff3b5.2968c20a.fdbf4.5eb9@mx.google.com> Author: Richard Plangger Branch: Changeset: r991:2041ec4f50e8 Date: 2016-03-09 10:57 +0100 http://bitbucket.org/pypy/buildbot/changeset/2041ec4f50e8/ Log: replaced python vm for building pypy with pypy! builds for s390x now spawn NOT at the same time, but shifted by 2-3h diff --git a/bot2/pypybuildbot/master.py b/bot2/pypybuildbot/master.py --- a/bot2/pypybuildbot/master.py +++ b/bot2/pypybuildbot/master.py @@ -66,8 +66,7 @@ platform='linux64') pypyTranslatedAppLevelTestFactoryS390X = pypybuilds.Translated(lib_python=True, app_tests=True, - platform='s390x', - interpreter='python') + platform='s390x') # these are like the two above: the only difference is that they only run # lib-python tests,not -A tests @@ -156,9 +155,7 @@ targetArgs=[], lib_python=True, pypyjit=True, - app_tests=True, - interpreter='python', # use cpython for now - ) + app_tests=True) pypyJITBenchmarkFactory_tannit = pypybuilds.JITBenchmark(host='tannit') pypyJITBenchmarkFactory64_tannit = pypybuilds.JITBenchmark(platform='linux64', @@ -289,12 +286,6 @@ PYPYBUILDBOT # on cobra ], branch='default', hour=0, minute=0), - Nightly("nightly-0-01", [ - LINUX_S390X, # vm (ibm-research) - JITLINUX_S390X, # vm (ibm-research) - APPLVLLINUX_S390X, # vm (ibm-research) - ], branch='s390x-backend', hour=2, minute=0), - Nightly("nightly-1-00", [ LINUX64, # on speed-old, uses all cores JITBENCH, # on tannit32, uses 1 core (in part exclusively) @@ -316,6 +307,11 @@ APPLVLLINUX64, # on speed-old, uses 1 core ], branch="py3.3", hour=3, minute=0), + # S390X vm (ibm-research) + Nightly("nightly-4-00", [LINUX_S390X], branch='s390x-backend', hour=0, minute=0), + Nightly("nightly-4-01", [JITLINUX_S390X], branch='s390x-backend', hour=2, minute=0), + Nightly("nightly-4-02", [APPLVLLINUX_S390X], branch='s390x-backend', hour=5, minute=0), + # this one has faithfully run every night even though the latest # change to that branch was in January 2013. Re-enable one day. #Nightly("nighly-ppc", [ @@ -327,22 +323,18 @@ PYPYBUILDBOT, LINUX32, LINUX64, - LINUX_S390X, MACOSX32, WIN32, APPLVLLINUX32, APPLVLLINUX64, - APPLVLLINUX_S390X, APPLVLWIN32, LIBPYTHON_LINUX32, LIBPYTHON_LINUX64, - LIBPYTHON_LINUX_S390X, JITLINUX32, JITLINUX64, - JITLINUX_S390X, JITMACOSX64, JITWIN32, JITFREEBSD964, @@ -360,6 +352,12 @@ #JITFREEBSD764, #JITFREEBSD864, #JITINDIANA32, + + LINUX_S390X, + APPLVLLINUX_S390X, + LIBPYTHON_LINUX_S390X, + JITLINUX_S390X, + ] + ARM.builderNames, properties=[]), ] + ARM.schedulers, @@ -384,13 +382,6 @@ "category": 'linux64', #"locks": [TannitCPU.access('counting')], }, - {"name": LINUX_S390X, - "slavenames": ["dje"], - "builddir": LINUX_S390X, - "factory": pypyOwnTestFactory, - "category": 's390x', - #"locks": [TannitCPU.access('counting')], - }, {"name": APPLVLLINUX32, #"slavenames": ["allegro32"], "slavenames": ["tannit32"], @@ -406,13 +397,6 @@ "category": "linux64", #"locks": [TannitCPU.access('counting')], }, - {"name": APPLVLLINUX_S390X, - "slavenames": ["dje"], - "builddir": APPLVLLINUX_S390X, - "factory": pypyTranslatedAppLevelTestFactoryS390X, - "category": "s390x", - #"locks": [TannitCPU.access('counting')], - }, {"name": LIBPYTHON_LINUX32, "slavenames": ["tannit32"], #"slavenames": ["allegro32"], @@ -428,13 +412,6 @@ "category": "linux64", #"locks": [TannitCPU.access('counting')], }, - {"name": LIBPYTHON_LINUX_S390X, - "slavenames": ["dje"], - "builddir": LIBPYTHON_LINUX_S390X, - "factory": pypyTranslatedLibPythonTestFactory, - "category": "s390x", - #"locks": [TannitCPU.access('counting')], - }, {"name" : JITLINUX32, #"slavenames": ["allegro32"], "slavenames": ["tannit32"], @@ -548,7 +525,26 @@ 'builddir': PYPYBUILDBOT, 'factory': pypybuilds.PyPyBuildbotTestFactory(), 'category': 'buildbot', - } + }, + # S390X + {"name": LINUX_S390X, + "slavenames": ["dje"], + "builddir": LINUX_S390X, + "factory": pypyOwnTestFactory, + "category": 's390x', + }, + {"name": APPLVLLINUX_S390X, + "slavenames": ["dje"], + "builddir": APPLVLLINUX_S390X, + "factory": pypyTranslatedAppLevelTestFactoryS390X, + "category": "s390x", + }, + {"name": LIBPYTHON_LINUX_S390X, + "slavenames": ["dje"], + "builddir": LIBPYTHON_LINUX_S390X, + "factory": pypyTranslatedLibPythonTestFactory, + "category": "s390x", + }, ] + ARM.builders, From pypy.commits at gmail.com Wed Mar 9 07:13:33 2016 From: pypy.commits at gmail.com (fijal) Date: Wed, 09 Mar 2016 04:13:33 -0800 (PST) Subject: [pypy-commit] pypy jit-leaner-frontend: do a hack in history to record the operations *earlier* than we have inputargs Message-ID: <56e0136d.aa09c20a.ddddb.ffff88b7@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r82903:bd97c8e4e94b Date: 2016-03-09 14:12 +0200 http://bitbucket.org/pypy/pypy/changeset/bd97c8e4e94b/ Log: do a hack in history to record the operations *earlier* than we have inputargs diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -642,17 +642,26 @@ class History(object): ends_with_jump = False + trace = None def __init__(self): self.descr_cache = {} self.descrs = {} self.consts = [] + self._cache = [] def set_inputargs(self, inpargs): from rpython.jit.metainterp.opencoder import Trace self.trace = Trace(inpargs) self.inputargs = inpargs + if self._cache: + # hack to record the ops *after* we know our inputargs + for op in self._cache: + newop = self.trace.record_op(op.getopnum(), op.getarglist(), + op.getdescr()) + op.position = newop.position + self._cache = None def length(self): return self.trace._count @@ -668,7 +677,11 @@ @specialize.argtype(3) def record(self, opnum, argboxes, value, descr=None): - op = self.trace.record_op(opnum, argboxes, descr) + if self.trace is None: + op = ResOperation(opnum, argboxes, -1, descr) + self._cache.append(op) + else: + op = self.trace.record_op(opnum, argboxes, descr) if value is None: assert op.type == 'v' elif isinstance(value, bool): diff --git a/rpython/jit/metainterp/opencoder.py b/rpython/jit/metainterp/opencoder.py --- a/rpython/jit/metainterp/opencoder.py +++ b/rpython/jit/metainterp/opencoder.py @@ -63,7 +63,7 @@ if force_inputargs is not None: self.inputargs = [rop.inputarg_from_tp(arg.type) for arg in force_inputargs] - self._inputargs = [None] * len(force_inputargs) + self._inputargs = [None] * len(trace.inputargs) for i, arg in enumerate(force_inputargs): if arg.position >= 0: self._cache[arg.position] = self.inputargs[i] diff --git a/rpython/jit/metainterp/optimizeopt/unroll.py b/rpython/jit/metainterp/optimizeopt/unroll.py --- a/rpython/jit/metainterp/optimizeopt/unroll.py +++ b/rpython/jit/metainterp/optimizeopt/unroll.py @@ -303,8 +303,7 @@ patchguardop = self.optimizer.patchguardop for guard in extra_guards.extra_guards: if isinstance(guard, GuardResOp): - guard.rd_snapshot = patchguardop.rd_snapshot - guard.rd_frame_info_list = patchguardop.rd_frame_info_list + guard.rd_resume_position = patchguardop.rd_resume_position guard.setdescr(compile.ResumeAtPositionDescr()) self.send_extra_operation(guard) except VirtualStatesCantMatch: diff --git a/rpython/jit/metainterp/optimizeopt/virtualstate.py b/rpython/jit/metainterp/optimizeopt/virtualstate.py --- a/rpython/jit/metainterp/optimizeopt/virtualstate.py +++ b/rpython/jit/metainterp/optimizeopt/virtualstate.py @@ -444,7 +444,7 @@ if other.level == LEVEL_UNKNOWN: if (runtime_box and runtime_box.nonnull() and self.known_class.same_constant(cpu.ts.cls_of_box(runtime_box))): - op = ResOperation(rop.GUARD_NONNULL_CLASS, [box, self.known_class], None) + op = ResOperation(rop.GUARD_NONNULL_CLASS, [box, self.known_class]) extra_guards.append(op) return else: @@ -452,7 +452,7 @@ elif other.level == LEVEL_NONNULL: if (runtime_box and self.known_class.same_constant( cpu.ts.cls_of_box(runtime_box))): - op = ResOperation(rop.GUARD_CLASS, [box, self.known_class], None) + op = ResOperation(rop.GUARD_CLASS, [box, self.known_class]) extra_guards.append(op) return else: @@ -476,7 +476,7 @@ return raise VirtualStatesCantMatch("different constants") if runtime_box is not None and self.constbox.same_constant(runtime_box.constbox()): - op = ResOperation(rop.GUARD_VALUE, [box, self.constbox], None) + op = ResOperation(rop.GUARD_VALUE, [box, self.constbox]) extra_guards.append(op) return else: From pypy.commits at gmail.com Wed Mar 9 07:13:32 2016 From: pypy.commits at gmail.com (fijal) Date: Wed, 09 Mar 2016 04:13:32 -0800 (PST) Subject: [pypy-commit] pypy jit-leaner-frontend: yay, fix the retracing Message-ID: <56e0136c.29cec20a.2cd69.ffff8d0f@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r82902:ed279ae0f4f6 Date: 2016-03-09 13:35 +0200 http://bitbucket.org/pypy/pypy/changeset/ed279ae0f4f6/ Log: yay, fix the retracing diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -338,7 +338,7 @@ end_label = ResOperation(rop.LABEL, inputargs[:], descr=loop_jitcell_token) - cut_pos = history.get_trace_position() + #cut_pos = history.get_trace_position() history.record(rop.JUMP, jumpargs[:], None, descr=loop_jitcell_token) enable_opts = jitdriver_sd.warmstate.enable_opts call_pure_results = metainterp.call_pure_results diff --git a/rpython/jit/metainterp/opencoder.py b/rpython/jit/metainterp/opencoder.py --- a/rpython/jit/metainterp/opencoder.py +++ b/rpython/jit/metainterp/opencoder.py @@ -63,12 +63,16 @@ if force_inputargs is not None: self.inputargs = [rop.inputarg_from_tp(arg.type) for arg in force_inputargs] + self._inputargs = [None] * len(force_inputargs) for i, arg in enumerate(force_inputargs): if arg.position >= 0: self._cache[arg.position] = self.inputargs[i] + else: + self._inputargs[-arg.position-1] = self.inputargs[i] else: self.inputargs = [rop.inputarg_from_tp(arg.type) for arg in self.trace.inputargs] + self._inputargs = self.inputargs[:] self.start = start self.pos = start self._count = 0 @@ -76,7 +80,7 @@ def _get(self, i): if i < 0: - return self.inputargs[-i - 1] + return self._inputargs[-i - 1] res = self._cache[i] assert res is not None return res diff --git a/rpython/jit/metainterp/optimizeopt/test/test_util.py b/rpython/jit/metainterp/optimizeopt/test/test_util.py --- a/rpython/jit/metainterp/optimizeopt/test/test_util.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_util.py @@ -454,6 +454,10 @@ def log_loop(*args, **kwds): pass + @classmethod + def log_loop_from_trace(*args, **kwds): + pass + class logger_ops: repr_of_resop = repr From pypy.commits at gmail.com Wed Mar 9 07:16:45 2016 From: pypy.commits at gmail.com (fijal) Date: Wed, 09 Mar 2016 04:16:45 -0800 (PST) Subject: [pypy-commit] pypy jit-leaner-frontend: fix check_history Message-ID: <56e0142d.8b941c0a.4de56.6c6e@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r82904:091da4a9fe3a Date: 2016-03-09 14:15 +0200 http://bitbucket.org/pypy/pypy/changeset/091da4a9fe3a/ Log: fix check_history diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -753,7 +753,6 @@ compiled_count = 0 enter_count = 0 aborted_count = 0 - operations = None def __init__(self): self.loops = [] @@ -780,9 +779,7 @@ self.jitcell_token_wrefs.append(weakref.ref(token)) def set_history(self, history): - # XXX think about something - pass - #self.operations = history.operations + self.history = history def aborted(self): self.aborted_count += 1 @@ -819,7 +816,9 @@ def check_history(self, expected=None, **check): insns = {} - for op in self.operations: + t = self.history.trace.get_iter() + while not t.done(): + op = t.next() opname = op.getopname() insns[opname] = insns.get(opname, 0) + 1 if expected is not None: From pypy.commits at gmail.com Wed Mar 9 08:11:10 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 09 Mar 2016 05:11:10 -0800 (PST) Subject: [pypy-commit] pypy default: Test and workaround for issue #2200: tries to remove the rpython-level Message-ID: <56e020ee.cf0b1c0a.629b.ffffcd6a@mx.google.com> Author: Armin Rigo Branch: Changeset: r82905:c4c54cb69aba Date: 2016-03-09 14:04 +0100 http://bitbucket.org/pypy/pypy/changeset/c4c54cb69aba/ Log: Test and workaround for issue #2200: tries to remove the rpython- level recursion that can rarely occur after failing a guard in the jit diff --git a/rpython/jit/metainterp/blackhole.py b/rpython/jit/metainterp/blackhole.py --- a/rpython/jit/metainterp/blackhole.py +++ b/rpython/jit/metainterp/blackhole.py @@ -1148,35 +1148,45 @@ @arguments("cpu", "i", "R", "d", returns="i") def bhimpl_residual_call_r_i(cpu, func, args_r, calldescr): + workaround2200.active = True return cpu.bh_call_i(func, None, args_r, None, calldescr) @arguments("cpu", "i", "R", "d", returns="r") def bhimpl_residual_call_r_r(cpu, func, args_r, calldescr): + workaround2200.active = True return cpu.bh_call_r(func, None, args_r, None, calldescr) @arguments("cpu", "i", "R", "d") def bhimpl_residual_call_r_v(cpu, func, args_r, calldescr): + workaround2200.active = True return cpu.bh_call_v(func, None, args_r, None, calldescr) @arguments("cpu", "i", "I", "R", "d", returns="i") def bhimpl_residual_call_ir_i(cpu, func, args_i, args_r, calldescr): + workaround2200.active = True return cpu.bh_call_i(func, args_i, args_r, None, calldescr) @arguments("cpu", "i", "I", "R", "d", returns="r") def bhimpl_residual_call_ir_r(cpu, func, args_i, args_r, calldescr): + workaround2200.active = True return cpu.bh_call_r(func, args_i, args_r, None, calldescr) @arguments("cpu", "i", "I", "R", "d") def bhimpl_residual_call_ir_v(cpu, func, args_i, args_r, calldescr): + workaround2200.active = True return cpu.bh_call_v(func, args_i, args_r, None, calldescr) @arguments("cpu", "i", "I", "R", "F", "d", returns="i") def bhimpl_residual_call_irf_i(cpu, func, args_i,args_r,args_f,calldescr): + workaround2200.active = True return cpu.bh_call_i(func, args_i, args_r, args_f, calldescr) @arguments("cpu", "i", "I", "R", "F", "d", returns="r") def bhimpl_residual_call_irf_r(cpu, func, args_i,args_r,args_f,calldescr): + workaround2200.active = True return cpu.bh_call_r(func, args_i, args_r, args_f, calldescr) @arguments("cpu", "i", "I", "R", "F", "d", returns="f") def bhimpl_residual_call_irf_f(cpu, func, args_i,args_r,args_f,calldescr): + workaround2200.active = True return cpu.bh_call_f(func, args_i, args_r, args_f, calldescr) @arguments("cpu", "i", "I", "R", "F", "d") def bhimpl_residual_call_irf_v(cpu, func, args_i,args_r,args_f,calldescr): + workaround2200.active = True return cpu.bh_call_v(func, args_i, args_r, args_f, calldescr) # conditional calls - note that they cannot return stuff @@ -1204,44 +1214,54 @@ @arguments("cpu", "j", "R", returns="i") def bhimpl_inline_call_r_i(cpu, jitcode, args_r): + workaround2200.active = True return cpu.bh_call_i(jitcode.get_fnaddr_as_int(), None, args_r, None, jitcode.calldescr) @arguments("cpu", "j", "R", returns="r") def bhimpl_inline_call_r_r(cpu, jitcode, args_r): + workaround2200.active = True return cpu.bh_call_r(jitcode.get_fnaddr_as_int(), None, args_r, None, jitcode.calldescr) @arguments("cpu", "j", "R") def bhimpl_inline_call_r_v(cpu, jitcode, args_r): + workaround2200.active = True return cpu.bh_call_v(jitcode.get_fnaddr_as_int(), None, args_r, None, jitcode.calldescr) @arguments("cpu", "j", "I", "R", returns="i") def bhimpl_inline_call_ir_i(cpu, jitcode, args_i, args_r): + workaround2200.active = True return cpu.bh_call_i(jitcode.get_fnaddr_as_int(), args_i, args_r, None, jitcode.calldescr) @arguments("cpu", "j", "I", "R", returns="r") def bhimpl_inline_call_ir_r(cpu, jitcode, args_i, args_r): + workaround2200.active = True return cpu.bh_call_r(jitcode.get_fnaddr_as_int(), args_i, args_r, None, jitcode.calldescr) @arguments("cpu", "j", "I", "R") def bhimpl_inline_call_ir_v(cpu, jitcode, args_i, args_r): + workaround2200.active = True return cpu.bh_call_v(jitcode.get_fnaddr_as_int(), args_i, args_r, None, jitcode.calldescr) @arguments("cpu", "j", "I", "R", "F", returns="i") def bhimpl_inline_call_irf_i(cpu, jitcode, args_i, args_r, args_f): + workaround2200.active = True return cpu.bh_call_i(jitcode.get_fnaddr_as_int(), args_i, args_r, args_f, jitcode.calldescr) @arguments("cpu", "j", "I", "R", "F", returns="r") def bhimpl_inline_call_irf_r(cpu, jitcode, args_i, args_r, args_f): + workaround2200.active = True return cpu.bh_call_r(jitcode.get_fnaddr_as_int(), args_i, args_r, args_f, jitcode.calldescr) @arguments("cpu", "j", "I", "R", "F", returns="f") def bhimpl_inline_call_irf_f(cpu, jitcode, args_i, args_r, args_f): + workaround2200.active = True return cpu.bh_call_f(jitcode.get_fnaddr_as_int(), args_i, args_r, args_f, jitcode.calldescr) @arguments("cpu", "j", "I", "R", "F") def bhimpl_inline_call_irf_v(cpu, jitcode, args_i, args_r, args_f): + workaround2200.active = True return cpu.bh_call_v(jitcode.get_fnaddr_as_int(), args_i, args_r, args_f, jitcode.calldescr) @@ -1528,6 +1548,8 @@ if not self.nextblackholeinterp: self._exit_frame_with_exception(current_exc) return current_exc + finally: + workaround2200.active = False # # pass the frame's return value to the caller caller = self.nextblackholeinterp @@ -1701,3 +1723,10 @@ # _run_forever(firstbh, current_exc) convert_and_run_from_pyjitpl._dont_inline_ = True + +# ____________________________________________________________ + +class WorkaroundIssue2200(object): + pass +workaround2200 = WorkaroundIssue2200() +workaround2200.active = False diff --git a/rpython/jit/metainterp/test/test_ajit.py b/rpython/jit/metainterp/test/test_ajit.py --- a/rpython/jit/metainterp/test/test_ajit.py +++ b/rpython/jit/metainterp/test/test_ajit.py @@ -4377,3 +4377,30 @@ assert res == -1 else: assert res == 4294967295 + + def test_issue2200_recursion(self): + # Reproduces issue #2200. This test contains no recursion, + # but due to an unlikely combination of factors it ends up + # creating an RPython-level recursion, one per loop iteration. + # The recursion is: blackhole interp from the failing guard -> + # does the call to enter() as a normal call -> enter() runs + # can_enter_jit() as if we're interpreted -> we enter the JIT + # again from the start of the loop -> the guard fails again + # during the next iteration -> blackhole interp. All arrows + # in the previous sentence are one or more levels of RPython + # function calls. + driver = JitDriver(greens=[], reds=["i"]) + def enter(i): + driver.can_enter_jit(i=i) + def f(): + set_param(None, 'trace_eagerness', 999999) + i = 0 + while True: + driver.jit_merge_point(i=i) + i += 1 + if i >= 300: + return i + promote(i + 1) # a failing guard + enter(i) + + self.meta_interp(f, []) diff --git a/rpython/jit/metainterp/warmstate.py b/rpython/jit/metainterp/warmstate.py --- a/rpython/jit/metainterp/warmstate.py +++ b/rpython/jit/metainterp/warmstate.py @@ -433,6 +433,14 @@ bound_reached(hash, None, *args) return + # Workaround for issue #2200, maybe temporary. This is not + # a proper fix, but only a hack that should work well enough + # for PyPy's main jitdriver... See test_issue2200_recursion + from rpython.jit.metainterp.blackhole import workaround2200 + if workaround2200.active: + workaround2200.active = False + return + # Here, we have found 'cell'. # if cell.flags & (JC_TRACING | JC_TEMPORARY): From pypy.commits at gmail.com Wed Mar 9 08:11:12 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 09 Mar 2016 05:11:12 -0800 (PST) Subject: [pypy-commit] pypy default: merge heads Message-ID: <56e020f0.49f9c20a.8174f.ffffa0c4@mx.google.com> Author: Armin Rigo Branch: Changeset: r82906:bd53b782ce3d Date: 2016-03-09 14:10 +0100 http://bitbucket.org/pypy/pypy/changeset/bd53b782ce3d/ Log: merge heads diff --git a/pypy/module/_vmprof/interp_vmprof.py b/pypy/module/_vmprof/interp_vmprof.py --- a/pypy/module/_vmprof/interp_vmprof.py +++ b/pypy/module/_vmprof/interp_vmprof.py @@ -60,10 +60,10 @@ Must be smaller than 1.0 """ w_modules = space.sys.get('modules') - if space.contains_w(w_modules, space.wrap('_continuation')): - space.warn(space.wrap("Using _continuation/greenlet/stacklet together " - "with vmprof will crash"), - space.w_RuntimeWarning) + #if space.contains_w(w_modules, space.wrap('_continuation')): + # space.warn(space.wrap("Using _continuation/greenlet/stacklet together " + # "with vmprof will crash"), + # space.w_RuntimeWarning) try: rvmprof.enable(fileno, period) except rvmprof.VMProfError, e: diff --git a/rpython/jit/tool/traceviewer.py b/rpython/jit/tool/traceviewer.py --- a/rpython/jit/tool/traceviewer.py +++ b/rpython/jit/tool/traceviewer.py @@ -103,9 +103,9 @@ self.last_guard = -1 else: # guards can be out of order nowadays - groups = sorted(groups) - self.first_guard = guard_number(groups[0]) - self.last_guard = guard_number(groups[-1]) + groups = sorted(map(guard_number, groups)) + self.first_guard = groups[0] + self.last_guard = groups[-1] content = property(get_content, set_content) From pypy.commits at gmail.com Wed Mar 9 08:38:51 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 09 Mar 2016 05:38:51 -0800 (PST) Subject: [pypy-commit] pypy default: push forward the starting point Message-ID: <56e0276b.84c9c20a.5eb4d.ffffb29b@mx.google.com> Author: Armin Rigo Branch: Changeset: r82907:b238b48f9138 Date: 2016-03-09 14:37 +0100 http://bitbucket.org/pypy/pypy/changeset/b238b48f9138/ Log: push forward the starting point diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -3,5 +3,5 @@ ========================= .. this is a revision shortly after release-5.0 -.. startrev: 7bb6381d084c +.. startrev: 9c4299dc2d60 From pypy.commits at gmail.com Wed Mar 9 09:57:24 2016 From: pypy.commits at gmail.com (mattip) Date: Wed, 09 Mar 2016 06:57:24 -0800 (PST) Subject: [pypy-commit] pypy default: push forward last starting point Message-ID: <56e039d4.c16dc20a.1a4b8.ffffd1c3@mx.google.com> Author: mattip Branch: Changeset: r82909:4dc888d04ebf Date: 2016-03-09 16:41 +0200 http://bitbucket.org/pypy/pypy/changeset/4dc888d04ebf/ Log: push forward last starting point diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -3,5 +3,5 @@ ========================= .. this is a revision shortly after release-5.0 -.. startrev: 9c4299dc2d60 +.. startrev: b238b48f9138 From pypy.commits at gmail.com Wed Mar 9 09:57:22 2016 From: pypy.commits at gmail.com (mattip) Date: Wed, 09 Mar 2016 06:57:22 -0800 (PST) Subject: [pypy-commit] pypy release-5.x: merge default into release Message-ID: <56e039d2.03dd1c0a.62e34.ffffe124@mx.google.com> Author: mattip Branch: release-5.x Changeset: r82908:fa95fe4dcfe3 Date: 2016-03-09 16:37 +0200 http://bitbucket.org/pypy/pypy/changeset/fa95fe4dcfe3/ Log: merge default into release diff --git a/pypy/doc/release-5.0.0.rst b/pypy/doc/release-5.0.0.rst --- a/pypy/doc/release-5.0.0.rst +++ b/pypy/doc/release-5.0.0.rst @@ -15,8 +15,8 @@ `passes all tests`_ on PyPy. The new cpyext is also much faster. vmprof_ has been a go-to profiler for PyPy on linux for a few releases -and we're happy to announce that thanks to commercial cooperation, vmprof -now works on Linux, OS X and Windows on both PyPy and CPython. +and we're happy to announce that thanks to the cooperation with jetbrains, +vmprof now works on Linux, OS X and Windows on both PyPy and CPython. You can download the PyPy 5.0 release here: diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -3,5 +3,5 @@ ========================= .. this is a revision shortly after release-5.0 -.. startrev: 7bb6381d084c +.. startrev: 9c4299dc2d60 diff --git a/pypy/module/_vmprof/interp_vmprof.py b/pypy/module/_vmprof/interp_vmprof.py --- a/pypy/module/_vmprof/interp_vmprof.py +++ b/pypy/module/_vmprof/interp_vmprof.py @@ -60,10 +60,10 @@ Must be smaller than 1.0 """ w_modules = space.sys.get('modules') - if space.contains_w(w_modules, space.wrap('_continuation')): - space.warn(space.wrap("Using _continuation/greenlet/stacklet together " - "with vmprof will crash"), - space.w_RuntimeWarning) + #if space.contains_w(w_modules, space.wrap('_continuation')): + # space.warn(space.wrap("Using _continuation/greenlet/stacklet together " + # "with vmprof will crash"), + # space.w_RuntimeWarning) try: rvmprof.enable(fileno, period) except rvmprof.VMProfError, e: diff --git a/rpython/jit/metainterp/blackhole.py b/rpython/jit/metainterp/blackhole.py --- a/rpython/jit/metainterp/blackhole.py +++ b/rpython/jit/metainterp/blackhole.py @@ -1148,35 +1148,45 @@ @arguments("cpu", "i", "R", "d", returns="i") def bhimpl_residual_call_r_i(cpu, func, args_r, calldescr): + workaround2200.active = True return cpu.bh_call_i(func, None, args_r, None, calldescr) @arguments("cpu", "i", "R", "d", returns="r") def bhimpl_residual_call_r_r(cpu, func, args_r, calldescr): + workaround2200.active = True return cpu.bh_call_r(func, None, args_r, None, calldescr) @arguments("cpu", "i", "R", "d") def bhimpl_residual_call_r_v(cpu, func, args_r, calldescr): + workaround2200.active = True return cpu.bh_call_v(func, None, args_r, None, calldescr) @arguments("cpu", "i", "I", "R", "d", returns="i") def bhimpl_residual_call_ir_i(cpu, func, args_i, args_r, calldescr): + workaround2200.active = True return cpu.bh_call_i(func, args_i, args_r, None, calldescr) @arguments("cpu", "i", "I", "R", "d", returns="r") def bhimpl_residual_call_ir_r(cpu, func, args_i, args_r, calldescr): + workaround2200.active = True return cpu.bh_call_r(func, args_i, args_r, None, calldescr) @arguments("cpu", "i", "I", "R", "d") def bhimpl_residual_call_ir_v(cpu, func, args_i, args_r, calldescr): + workaround2200.active = True return cpu.bh_call_v(func, args_i, args_r, None, calldescr) @arguments("cpu", "i", "I", "R", "F", "d", returns="i") def bhimpl_residual_call_irf_i(cpu, func, args_i,args_r,args_f,calldescr): + workaround2200.active = True return cpu.bh_call_i(func, args_i, args_r, args_f, calldescr) @arguments("cpu", "i", "I", "R", "F", "d", returns="r") def bhimpl_residual_call_irf_r(cpu, func, args_i,args_r,args_f,calldescr): + workaround2200.active = True return cpu.bh_call_r(func, args_i, args_r, args_f, calldescr) @arguments("cpu", "i", "I", "R", "F", "d", returns="f") def bhimpl_residual_call_irf_f(cpu, func, args_i,args_r,args_f,calldescr): + workaround2200.active = True return cpu.bh_call_f(func, args_i, args_r, args_f, calldescr) @arguments("cpu", "i", "I", "R", "F", "d") def bhimpl_residual_call_irf_v(cpu, func, args_i,args_r,args_f,calldescr): + workaround2200.active = True return cpu.bh_call_v(func, args_i, args_r, args_f, calldescr) # conditional calls - note that they cannot return stuff @@ -1204,44 +1214,54 @@ @arguments("cpu", "j", "R", returns="i") def bhimpl_inline_call_r_i(cpu, jitcode, args_r): + workaround2200.active = True return cpu.bh_call_i(jitcode.get_fnaddr_as_int(), None, args_r, None, jitcode.calldescr) @arguments("cpu", "j", "R", returns="r") def bhimpl_inline_call_r_r(cpu, jitcode, args_r): + workaround2200.active = True return cpu.bh_call_r(jitcode.get_fnaddr_as_int(), None, args_r, None, jitcode.calldescr) @arguments("cpu", "j", "R") def bhimpl_inline_call_r_v(cpu, jitcode, args_r): + workaround2200.active = True return cpu.bh_call_v(jitcode.get_fnaddr_as_int(), None, args_r, None, jitcode.calldescr) @arguments("cpu", "j", "I", "R", returns="i") def bhimpl_inline_call_ir_i(cpu, jitcode, args_i, args_r): + workaround2200.active = True return cpu.bh_call_i(jitcode.get_fnaddr_as_int(), args_i, args_r, None, jitcode.calldescr) @arguments("cpu", "j", "I", "R", returns="r") def bhimpl_inline_call_ir_r(cpu, jitcode, args_i, args_r): + workaround2200.active = True return cpu.bh_call_r(jitcode.get_fnaddr_as_int(), args_i, args_r, None, jitcode.calldescr) @arguments("cpu", "j", "I", "R") def bhimpl_inline_call_ir_v(cpu, jitcode, args_i, args_r): + workaround2200.active = True return cpu.bh_call_v(jitcode.get_fnaddr_as_int(), args_i, args_r, None, jitcode.calldescr) @arguments("cpu", "j", "I", "R", "F", returns="i") def bhimpl_inline_call_irf_i(cpu, jitcode, args_i, args_r, args_f): + workaround2200.active = True return cpu.bh_call_i(jitcode.get_fnaddr_as_int(), args_i, args_r, args_f, jitcode.calldescr) @arguments("cpu", "j", "I", "R", "F", returns="r") def bhimpl_inline_call_irf_r(cpu, jitcode, args_i, args_r, args_f): + workaround2200.active = True return cpu.bh_call_r(jitcode.get_fnaddr_as_int(), args_i, args_r, args_f, jitcode.calldescr) @arguments("cpu", "j", "I", "R", "F", returns="f") def bhimpl_inline_call_irf_f(cpu, jitcode, args_i, args_r, args_f): + workaround2200.active = True return cpu.bh_call_f(jitcode.get_fnaddr_as_int(), args_i, args_r, args_f, jitcode.calldescr) @arguments("cpu", "j", "I", "R", "F") def bhimpl_inline_call_irf_v(cpu, jitcode, args_i, args_r, args_f): + workaround2200.active = True return cpu.bh_call_v(jitcode.get_fnaddr_as_int(), args_i, args_r, args_f, jitcode.calldescr) @@ -1528,6 +1548,8 @@ if not self.nextblackholeinterp: self._exit_frame_with_exception(current_exc) return current_exc + finally: + workaround2200.active = False # # pass the frame's return value to the caller caller = self.nextblackholeinterp @@ -1701,3 +1723,10 @@ # _run_forever(firstbh, current_exc) convert_and_run_from_pyjitpl._dont_inline_ = True + +# ____________________________________________________________ + +class WorkaroundIssue2200(object): + pass +workaround2200 = WorkaroundIssue2200() +workaround2200.active = False diff --git a/rpython/jit/metainterp/test/test_ajit.py b/rpython/jit/metainterp/test/test_ajit.py --- a/rpython/jit/metainterp/test/test_ajit.py +++ b/rpython/jit/metainterp/test/test_ajit.py @@ -4377,3 +4377,30 @@ assert res == -1 else: assert res == 4294967295 + + def test_issue2200_recursion(self): + # Reproduces issue #2200. This test contains no recursion, + # but due to an unlikely combination of factors it ends up + # creating an RPython-level recursion, one per loop iteration. + # The recursion is: blackhole interp from the failing guard -> + # does the call to enter() as a normal call -> enter() runs + # can_enter_jit() as if we're interpreted -> we enter the JIT + # again from the start of the loop -> the guard fails again + # during the next iteration -> blackhole interp. All arrows + # in the previous sentence are one or more levels of RPython + # function calls. + driver = JitDriver(greens=[], reds=["i"]) + def enter(i): + driver.can_enter_jit(i=i) + def f(): + set_param(None, 'trace_eagerness', 999999) + i = 0 + while True: + driver.jit_merge_point(i=i) + i += 1 + if i >= 300: + return i + promote(i + 1) # a failing guard + enter(i) + + self.meta_interp(f, []) diff --git a/rpython/jit/metainterp/warmstate.py b/rpython/jit/metainterp/warmstate.py --- a/rpython/jit/metainterp/warmstate.py +++ b/rpython/jit/metainterp/warmstate.py @@ -433,6 +433,14 @@ bound_reached(hash, None, *args) return + # Workaround for issue #2200, maybe temporary. This is not + # a proper fix, but only a hack that should work well enough + # for PyPy's main jitdriver... See test_issue2200_recursion + from rpython.jit.metainterp.blackhole import workaround2200 + if workaround2200.active: + workaround2200.active = False + return + # Here, we have found 'cell'. # if cell.flags & (JC_TRACING | JC_TEMPORARY): diff --git a/rpython/jit/tool/traceviewer.py b/rpython/jit/tool/traceviewer.py --- a/rpython/jit/tool/traceviewer.py +++ b/rpython/jit/tool/traceviewer.py @@ -103,9 +103,9 @@ self.last_guard = -1 else: # guards can be out of order nowadays - groups = sorted(groups) - self.first_guard = guard_number(groups[0]) - self.last_guard = guard_number(groups[-1]) + groups = sorted(map(guard_number, groups)) + self.first_guard = groups[0] + self.last_guard = groups[-1] content = property(get_content, set_content) From pypy.commits at gmail.com Wed Mar 9 10:35:39 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 09 Mar 2016 07:35:39 -0800 (PST) Subject: [pypy-commit] pypy default: An arguably bogus check in the stdlib. Message-ID: <56e042cb.d4e41c0a.c6818.0823@mx.google.com> Author: Armin Rigo Branch: Changeset: r82910:8f04b833f59a Date: 2016-03-09 16:34 +0100 http://bitbucket.org/pypy/pypy/changeset/8f04b833f59a/ Log: An arguably bogus check in the stdlib. diff --git a/lib-python/2.7/xml/etree/ElementTree.py b/lib-python/2.7/xml/etree/ElementTree.py --- a/lib-python/2.7/xml/etree/ElementTree.py +++ b/lib-python/2.7/xml/etree/ElementTree.py @@ -1606,7 +1606,17 @@ pubid = pubid[1:-1] if hasattr(self.target, "doctype"): self.target.doctype(name, pubid, system[1:-1]) - elif self.doctype is not self._XMLParser__doctype: + elif 1: # XXX PyPy fix, used to be + # elif self.doctype is not self._XMLParser__doctype: + # but that condition is always True on CPython, as far + # as I can tell: self._XMLParser__doctype always + # returns a fresh unbound method object. + # On PyPy, unbound and bound methods have stronger + # unicity guarantees: self._XMLParser__doctype + # can return the same unbound method object, in + # some cases making the test above incorrectly False. + # (My guess would be that the line above is a backport + # from Python 3.) # warn about deprecated call self._XMLParser__doctype(name, pubid, system[1:-1]) self.doctype(name, pubid, system[1:-1]) From pypy.commits at gmail.com Wed Mar 9 11:15:06 2016 From: pypy.commits at gmail.com (mattip) Date: Wed, 09 Mar 2016 08:15:06 -0800 (PST) Subject: [pypy-commit] pypy release-5.x: redo changeset 666871b885d9, which was backed out in 1f238bdfca88 Message-ID: <56e04c0a.e6ebc20a.36ac0.ffffee67@mx.google.com> Author: mattip Branch: release-5.x Changeset: r82911:0ae5f56feb15 Date: 2016-03-09 18:12 +0200 http://bitbucket.org/pypy/pypy/changeset/0ae5f56feb15/ Log: redo changeset 666871b885d9, which was backed out in 1f238bdfca88 diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py --- a/pypy/interpreter/function.py +++ b/pypy/interpreter/function.py @@ -13,6 +13,9 @@ from pypy.interpreter.argument import Arguments from rpython.rlib import jit +from rpython.rlib.rarithmetic import LONG_BIT +from rpython.rlib.rbigint import rbigint + funccallunrolling = unrolling_iterable(range(4)) @@ -557,6 +560,26 @@ return space.w_False return space.eq(self.w_function, w_other.w_function) + def is_w(self, space, other): + if not isinstance(other, Method): + return False + return (self.w_instance is other.w_instance and + self.w_function is other.w_function and + self.w_class is other.w_class) + + def immutable_unique_id(self, space): + from pypy.objspace.std.util import IDTAG_METHOD as tag + from pypy.objspace.std.util import IDTAG_SHIFT + if self.w_instance is not None: + id = space.bigint_w(space.id(self.w_instance)) + id = id.lshift(LONG_BIT) + else: + id = rbigint.fromint(0) + id = id.or_(space.bigint_w(space.id(self.w_function))) + id = id.lshift(LONG_BIT).or_(space.bigint_w(space.id(self.w_class))) + id = id.lshift(IDTAG_SHIFT).int_or_(tag) + return space.newlong_from_rbigint(id) + def descr_method_hash(self): space = self.space w_result = space.hash(self.w_function) diff --git a/pypy/interpreter/test/test_function.py b/pypy/interpreter/test/test_function.py --- a/pypy/interpreter/test/test_function.py +++ b/pypy/interpreter/test/test_function.py @@ -560,6 +560,37 @@ assert A().m == X() assert X() == A().m + @pytest.mark.skipif("config.option.runappdirect") + def test_method_identity(self): + class A(object): + def m(self): + pass + def n(self): + pass + + class B(A): + pass + + class X(object): + def __eq__(self, other): + return True + + a = A() + a2 = A() + assert a.m is a.m + assert id(a.m) == id(a.m) + assert a.m is not a.n + assert id(a.m) != id(a.n) + assert a.m is not a2.m + assert id(a.m) != id(a2.m) + + assert A.m is A.m + assert id(A.m) == id(A.m) + assert A.m is not A.n + assert id(A.m) != id(A.n) + assert A.m is not B.m + assert id(A.m) != id(B.m) + class TestMethod: def setup_method(self, method): diff --git a/pypy/objspace/std/util.py b/pypy/objspace/std/util.py --- a/pypy/objspace/std/util.py +++ b/pypy/objspace/std/util.py @@ -2,12 +2,13 @@ from pypy.interpreter.error import OperationError, oefmt -IDTAG_SHIFT = 3 +IDTAG_SHIFT = 4 IDTAG_INT = 1 IDTAG_LONG = 3 IDTAG_FLOAT = 5 IDTAG_COMPLEX = 7 +IDTAG_METHOD = 9 CMP_OPS = dict(lt='<', le='<=', eq='==', ne='!=', gt='>', ge='>=') BINARY_BITWISE_OPS = {'and': '&', 'lshift': '<<', 'or': '|', 'rshift': '>>', From pypy.commits at gmail.com Wed Mar 9 11:15:08 2016 From: pypy.commits at gmail.com (mattip) Date: Wed, 09 Mar 2016 08:15:08 -0800 (PST) Subject: [pypy-commit] pypy release-5.x: redo c7cc35224c29 which was backed out in 71586f7de6b0 Message-ID: <56e04c0c.e213c20a.c6806.ffffeef1@mx.google.com> Author: mattip Branch: release-5.x Changeset: r82912:f989e0a0742a Date: 2016-03-09 18:13 +0200 http://bitbucket.org/pypy/pypy/changeset/f989e0a0742a/ Log: redo c7cc35224c29 which was backed out in 71586f7de6b0 diff --git a/pypy/objspace/std/test/test_obj.py b/pypy/objspace/std/test/test_obj.py --- a/pypy/objspace/std/test/test_obj.py +++ b/pypy/objspace/std/test/test_obj.py @@ -172,15 +172,15 @@ def test_id_on_primitives(self): if self.cpython_apptest: skip("cpython behaves differently") - assert id(1) == (1 << 3) + 1 - assert id(1l) == (1 << 3) + 3 + assert id(1) == (1 << 4) + 1 + assert id(1l) == (1 << 4) + 3 class myint(int): pass assert id(myint(1)) != id(1) assert id(1.0) & 7 == 5 assert id(-0.0) != id(0.0) - assert hex(id(2.0)) == '0x20000000000000005L' + assert hex(id(2.0)) == '0x40000000000000005L' assert id(0.0) == 5 def test_id_on_strs(self): From pypy.commits at gmail.com Wed Mar 9 11:15:09 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 09 Mar 2016 08:15:09 -0800 (PST) Subject: [pypy-commit] pypy release-5.x: An arguably bogus check in the stdlib. Message-ID: <56e04c0d.8ee61c0a.a3473.126a@mx.google.com> Author: Armin Rigo Branch: release-5.x Changeset: r82913:246c9cf22037 Date: 2016-03-09 16:34 +0100 http://bitbucket.org/pypy/pypy/changeset/246c9cf22037/ Log: An arguably bogus check in the stdlib. diff --git a/lib-python/2.7/xml/etree/ElementTree.py b/lib-python/2.7/xml/etree/ElementTree.py --- a/lib-python/2.7/xml/etree/ElementTree.py +++ b/lib-python/2.7/xml/etree/ElementTree.py @@ -1606,7 +1606,17 @@ pubid = pubid[1:-1] if hasattr(self.target, "doctype"): self.target.doctype(name, pubid, system[1:-1]) - elif self.doctype is not self._XMLParser__doctype: + elif 1: # XXX PyPy fix, used to be + # elif self.doctype is not self._XMLParser__doctype: + # but that condition is always True on CPython, as far + # as I can tell: self._XMLParser__doctype always + # returns a fresh unbound method object. + # On PyPy, unbound and bound methods have stronger + # unicity guarantees: self._XMLParser__doctype + # can return the same unbound method object, in + # some cases making the test above incorrectly False. + # (My guess would be that the line above is a backport + # from Python 3.) # warn about deprecated call self._XMLParser__doctype(name, pubid, system[1:-1]) self.doctype(name, pubid, system[1:-1]) From pypy.commits at gmail.com Wed Mar 9 11:40:44 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 09 Mar 2016 08:40:44 -0800 (PST) Subject: [pypy-commit] pypy default: Playing around with hypothesis Message-ID: <56e0520c.890bc30a.f4844.fffffbcc@mx.google.com> Author: Armin Rigo Branch: Changeset: r82914:d994482a6040 Date: 2016-03-09 17:40 +0100 http://bitbucket.org/pypy/pypy/changeset/d994482a6040/ Log: Playing around with hypothesis diff --git a/rpython/rlib/test/test_rbigint.py b/rpython/rlib/test/test_rbigint.py --- a/rpython/rlib/test/test_rbigint.py +++ b/rpython/rlib/test/test_rbigint.py @@ -15,6 +15,8 @@ from rpython.rtyper.test.test_llinterp import interpret from rpython.translator.c.test.test_standalone import StandaloneTests +from hypothesis import given, strategies + long_vals_not_too_big = range(17) + [ 37, 50, 127, 128, 129, 511, 512, 513, sys.maxint, sys.maxint + 1, @@ -967,6 +969,14 @@ py.test.raises(InvalidSignednessError, i.tobytes, 3, 'little', signed=False) py.test.raises(OverflowError, i.tobytes, 2, 'little', signed=True) + @given(strategies.binary(), strategies.booleans(), strategies.booleans()) + def test_frombytes_tobytes_hypothesis(self, s, big, signed): + # check the roundtrip from binary strings to bigints and back + byteorder = 'big' if big else 'little' + bigint = rbigint.frombytes(s, byteorder=byteorder, signed=signed) + t = bigint.tobytes(len(s), byteorder=byteorder, signed=signed) + assert s == t + class TestTranslated(StandaloneTests): From pypy.commits at gmail.com Wed Mar 9 12:02:17 2016 From: pypy.commits at gmail.com (mattip) Date: Wed, 09 Mar 2016 09:02:17 -0800 (PST) Subject: [pypy-commit] pypy default: Backed out changeset: b96f63e5e9fd, the bugfix made it into the release after all Message-ID: <56e05719.46fac20a.bff32.0027@mx.google.com> Author: mattip Branch: Changeset: r82915:92200db9424a Date: 2016-03-09 19:00 +0200 http://bitbucket.org/pypy/pypy/changeset/92200db9424a/ Log: Backed out changeset: b96f63e5e9fd, the bugfix made it into the release after all diff --git a/pypy/doc/release-5.0.0.rst b/pypy/doc/release-5.0.0.rst --- a/pypy/doc/release-5.0.0.rst +++ b/pypy/doc/release-5.0.0.rst @@ -128,6 +128,9 @@ * Fix for corner case (likely shown by Krakatau) for consecutive guards with interdependencies + * Fix applevel bare class method comparisons which should fix pretty printing + in IPython + * Issues reported with our previous release were resolved_ after reports from users on our issue tracker at https://bitbucket.org/pypy/pypy/issues or on IRC at #pypy From pypy.commits at gmail.com Wed Mar 9 12:52:21 2016 From: pypy.commits at gmail.com (rlamy) Date: Wed, 09 Mar 2016 09:52:21 -0800 (PST) Subject: [pypy-commit] pypy default: small cleanup Message-ID: <56e062d5.a3f6c20a.71577.168a@mx.google.com> Author: Ronan Lamy Branch: Changeset: r82916:6e82c4562984 Date: 2016-03-09 17:51 +0000 http://bitbucket.org/pypy/pypy/changeset/6e82c4562984/ Log: small cleanup diff --git a/pypy/interpreter/mixedmodule.py b/pypy/interpreter/mixedmodule.py --- a/pypy/interpreter/mixedmodule.py +++ b/pypy/interpreter/mixedmodule.py @@ -3,7 +3,7 @@ from pypy.interpreter import gateway from pypy.interpreter.error import OperationError from pypy.interpreter.baseobjspace import W_Root -import os, sys +import sys class MixedModule(Module): applevel_name = None @@ -60,7 +60,7 @@ def save_module_content_for_future_reload(self): self.w_initialdict = self.space.call_method(self.w_dict, 'items') - + @classmethod def get_applevel_name(cls): """ NOT_RPYTHON """ if cls.applevel_name is not None: @@ -68,7 +68,6 @@ else: pkgroot = cls.__module__ return pkgroot.split('.')[-1] - get_applevel_name = classmethod(get_applevel_name) def get(self, name): space = self.space @@ -103,7 +102,7 @@ # be normal Functions to get the correct binding behaviour func = w_value if (isinstance(func, Function) and - type(func) is not BuiltinFunction): + type(func) is not BuiltinFunction): try: bltin = func._builtinversion_ except AttributeError: @@ -115,7 +114,6 @@ space.setitem(self.w_dict, w_name, w_value) return w_value - def getdict(self, space): if self.lazy: for name in self.loaders: @@ -131,6 +129,7 @@ self.startup_called = False self._frozen = True + @classmethod def buildloaders(cls): """ NOT_RPYTHON """ if not hasattr(cls, 'loaders'): @@ -149,8 +148,6 @@ if '__doc__' not in loaders: loaders['__doc__'] = cls.get__doc__ - buildloaders = classmethod(buildloaders) - def extra_interpdef(self, name, spec): cls = self.__class__ pkgroot = cls.__module__ @@ -159,21 +156,21 @@ w_obj = loader(space) space.setattr(space.wrap(self), space.wrap(name), w_obj) + @classmethod def get__doc__(cls, space): return space.wrap(cls.__doc__) - get__doc__ = classmethod(get__doc__) def getinterpevalloader(pkgroot, spec): """ NOT_RPYTHON """ def ifileloader(space): - d = {'space' : space} + d = {'space':space} # EVIL HACK (but it works, and this is not RPython :-) while 1: try: value = eval(spec, d) except NameError, ex: - name = ex.args[0].split("'")[1] # super-Evil + name = ex.args[0].split("'")[1] # super-Evil if name in d: raise # propagate the NameError try: From pypy.commits at gmail.com Wed Mar 9 13:16:20 2016 From: pypy.commits at gmail.com (mattip) Date: Wed, 09 Mar 2016 10:16:20 -0800 (PST) Subject: [pypy-commit] pypy default: Added tag release-5.0 for changeset 246c9cf22037 Message-ID: <56e06874.465ec20a.11873.2095@mx.google.com> Author: mattip Branch: Changeset: r82917:8bf6238bab25 Date: 2016-03-09 20:14 +0200 http://bitbucket.org/pypy/pypy/changeset/8bf6238bab25/ Log: Added tag release-5.0 for changeset 246c9cf22037 diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -18,3 +18,4 @@ f3ad1e1e1d6215e20d34bb65ab85ff9188c9f559 release-2.6.1 850edf14b2c75573720f59e95767335fb1affe55 release-4.0.0 5f8302b8bf9f53056e40426f10c72151564e5b19 release-4.0.1 +246c9cf22037b11dc0e8c29ce3f291d3b8c5935a release-5.0 From pypy.commits at gmail.com Wed Mar 9 13:49:27 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 09 Mar 2016 10:49:27 -0800 (PST) Subject: [pypy-commit] pypy default: update Message-ID: <56e07037.d4e41c0a.c6818.575f@mx.google.com> Author: Armin Rigo Branch: Changeset: r82918:558191c38db3 Date: 2016-03-09 19:48 +0100 http://bitbucket.org/pypy/pypy/changeset/558191c38db3/ Log: update diff --git a/pypy/tool/release/repackage.sh b/pypy/tool/release/repackage.sh --- a/pypy/tool/release/repackage.sh +++ b/pypy/tool/release/repackage.sh @@ -2,6 +2,8 @@ maj=5 min=0 rev=0 +branchname=release-$maj.x # ==OR== release-$maj.$min.x +tagname=release-$maj.$min # ==OR== release-$maj.$min.$rev # This script will download latest builds from the buildmaster, rename the top # level directory, and repackage ready to be uploaded to bitbucket. It will also # download source, assuming a tag for the release already exists, and repackage them. @@ -9,7 +11,7 @@ for plat in linux linux64 linux-armhf-raspbian linux-armhf-raring linux-armel osx64 freebsd64 do - wget http://buildbot.pypy.org/nightly/release-$maj.$min.x/pypy-c-jit-latest-$plat.tar.bz2 + wget http://buildbot.pypy.org/nightly/$branchname/pypy-c-jit-latest-$plat.tar.bz2 tar -xf pypy-c-jit-latest-$plat.tar.bz2 rm pypy-c-jit-latest-$plat.tar.bz2 mv pypy-c-jit-*-$plat pypy-$maj.$min.$rev-$plat @@ -18,7 +20,7 @@ done plat=win32 -wget http://buildbot.pypy.org/nightly/release-$maj.$min.x/pypy-c-jit-latest-$plat.zip +wget http://buildbot.pypy.org/nightly/$branchname/pypy-c-jit-latest-$plat.zip unzip pypy-c-jit-latest-$plat.zip mv pypy-c-jit-*-$plat pypy-$maj.$min.$rev-$plat zip -r pypy-$maj.$min.$rev-$plat.zip pypy-$maj.$min.$rev-$plat @@ -26,7 +28,7 @@ # Do this after creating a tag, note the untarred directory is pypy-pypy- # so make sure there is not another one -wget https://bitbucket.org/pypy/pypy/get/release-$maj.$min.$rev.tar.bz2 +wget https://bitbucket.org/pypy/pypy/get/$tagname.tar.bz2 tar -xf release-$maj.$min.$rev.tar.bz2 mv pypy-pypy-* pypy-$maj.$min.$rev-src tar --owner=root --group=root --numeric-owner -cvjf pypy-$maj.$min.$rev-src.tar.bz2 pypy-$maj.$min.$rev-src From pypy.commits at gmail.com Wed Mar 9 14:11:43 2016 From: pypy.commits at gmail.com (mattip) Date: Wed, 09 Mar 2016 11:11:43 -0800 (PST) Subject: [pypy-commit] pypy default: hack test runner to log wall time to debug buildbot getting kill signal Message-ID: <56e0756f.e853c20a.2f740.2b79@mx.google.com> Author: mattip Branch: Changeset: r82919:380503fd3fa7 Date: 2016-03-09 21:10 +0200 http://bitbucket.org/pypy/pypy/changeset/380503fd3fa7/ Log: hack test runner to log wall time to debug buildbot getting kill signal diff --git a/testrunner/runner.py b/testrunner/runner.py --- a/testrunner/runner.py +++ b/testrunner/runner.py @@ -259,7 +259,8 @@ if res[0] == 'start': started += 1 - out.write("++ starting %s [%d started in total]\n" % (res[1], + now = time.strftime('%H:%M:%S') + out.write("++ %s starting %s [%d started in total]\n" % (now, res[1], started)) continue From pypy.commits at gmail.com Wed Mar 9 14:23:34 2016 From: pypy.commits at gmail.com (mattip) Date: Wed, 09 Mar 2016 11:23:34 -0800 (PST) Subject: [pypy-commit] jitviewer default: Added tag pypy-5.0 for changeset e49b88557c99 Message-ID: <56e07836.07b71c0a.de980.07f1@mx.google.com> Author: mattip Branch: Changeset: r278:e792c6a807c3 Date: 2016-03-09 21:23 +0200 http://bitbucket.org/pypy/jitviewer/changeset/e792c6a807c3/ Log: Added tag pypy-5.0 for changeset e49b88557c99 diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -7,3 +7,4 @@ 3a0152b4ac6b8f930c493ef357fc5e9d8f4b91b7 pypy-2.6.0 01dbb2d473162ccf7f2efe47888e86429c2498ad pypy-4.0.0 6b11394062835e86ca53bd7ec34d427a0485acd5 pypy-4.0.1 +e49b88557c99c38e7698a6dced70a8739a36ec9b pypy-5.0 From pypy.commits at gmail.com Wed Mar 9 14:32:48 2016 From: pypy.commits at gmail.com (mattip) Date: Wed, 09 Mar 2016 11:32:48 -0800 (PST) Subject: [pypy-commit] pypy cpyext-ext: merge default into branch Message-ID: <56e07a60.6bb8c20a.7652f.07ff@mx.google.com> Author: mattip Branch: cpyext-ext Changeset: r82920:48d4c2cf24e3 Date: 2016-03-09 21:18 +0200 http://bitbucket.org/pypy/pypy/changeset/48d4c2cf24e3/ Log: merge default into branch diff too long, truncating to 2000 out of 4911 lines diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -18,3 +18,4 @@ f3ad1e1e1d6215e20d34bb65ab85ff9188c9f559 release-2.6.1 850edf14b2c75573720f59e95767335fb1affe55 release-4.0.0 5f8302b8bf9f53056e40426f10c72151564e5b19 release-4.0.1 +246c9cf22037b11dc0e8c29ce3f291d3b8c5935a release-5.0 diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -41,29 +41,29 @@ Amaury Forgeot d'Arc Antonio Cuni Samuele Pedroni + Matti Picus Alex Gaynor Brian Kearns - Matti Picus Philip Jenvey Michael Hudson + Ronan Lamy David Schneider + Manuel Jacob Holger Krekel Christian Tismer Hakan Ardo - Manuel Jacob - Ronan Lamy Benjamin Peterson + Richard Plangger Anders Chrigstrom Eric van Riet Paap Wim Lavrijsen - Richard Plangger Richard Emslie Alexander Schremmer Dan Villiom Podlaski Christiansen + Remi Meier Lukas Diekmann Sven Hager Anders Lehmann - Remi Meier Aurelien Campeas Niklaus Haldimann Camillo Bruni @@ -72,8 +72,8 @@ Romain Guillebert Leonardo Santagada Seo Sanghyeon + Ronny Pfannschmidt Justin Peel - Ronny Pfannschmidt David Edelsohn Anders Hammarquist Jakub Gustak @@ -95,6 +95,7 @@ Tyler Wade Michael Foord Stephan Diehl + Vincent Legoll Stefan Schwarzer Valentino Volonghi Tomek Meka @@ -105,9 +106,9 @@ Jean-Paul Calderone Timo Paulssen Squeaky + Marius Gedminas Alexandre Fayolle Simon Burton - Marius Gedminas Martin Matusiak Konstantin Lopuhin Wenzhu Man @@ -116,16 +117,20 @@ Ivan Sichmann Freitas Greg Price Dario Bertini + Stefano Rivera Mark Pearse Simon Cross Andreas Stührk - Stefano Rivera + Edd Barrett Jean-Philippe St. Pierre Guido van Rossum Pavel Vinogradov + Jeremy Thurgood Paweł Piotr Przeradowski + Spenser Bauman Paul deGrandis Ilya Osadchiy + marky1991 Tobias Oberstein Adrian Kuhn Boris Feigin @@ -134,14 +139,12 @@ Georg Brandl Bert Freudenberg Stian Andreassen - Edd Barrett + Tobias Pape Wanja Saatkamp Gerald Klix Mike Blume - Tobias Pape Oscar Nierstrasz Stefan H. Muller - Jeremy Thurgood Rami Chowdhury Eugene Oden Henry Mason @@ -153,6 +156,8 @@ Lukas Renggli Guenter Jantzen Ned Batchelder + Tim Felgentreff + Anton Gulenko Amit Regmi Ben Young Nicolas Chauvat @@ -162,12 +167,12 @@ Nicholas Riley Jason Chu Igor Trindade Oliveira - Tim Felgentreff + Yichao Yu Rocco Moretti Gintautas Miliauskas Michael Twomey Lucian Branescu Mihaila - Yichao Yu + Devin Jeanpierre Gabriel Lavoie Olivier Dormond Jared Grubb @@ -191,33 +196,33 @@ Stanislaw Halik Mikael Schönenberg Berkin Ilbeyi - Elmo M?ntynen + Elmo Mäntynen + Faye Zhao Jonathan David Riehl Anders Qvist Corbin Simpson Chirag Jadwani Beatrice During Alex Perry - Vincent Legoll + Vaibhav Sood Alan McIntyre - Spenser Bauman + William Leslie Alexander Sedov Attila Gobi + Jasper.Schulz Christopher Pope - Devin Jeanpierre - Vaibhav Sood Christian Tismer Marc Abramowitz Dan Stromberg Arjun Naik Valentina Mukhamedzhanova Stefano Parmesan + Mark Young Alexis Daboville Jens-Uwe Mager Carl Meyer Karl Ramm Pieter Zieschang - Anton Gulenko Gabriel Lukas Vacek Andrew Dalke @@ -225,6 +230,7 @@ Jakub Stasiak Nathan Taylor Vladimir Kryachko + Omer Katz Jacek Generowicz Alejandro J. Cura Jacob Oscarson @@ -234,11 +240,13 @@ Kristjan Valur Jonsson David Lievens Neil Blakey-Milner + Sergey Matyunin Lutz Paelike Lucio Torre Lars Wassermann Philipp Rustemeuer Henrik Vendelbo + Richard Lancaster Dan Buch Miguel de Val Borro Artur Lisiecki @@ -250,20 +258,21 @@ Tomo Cocoa Kim Jin Su Toni Mattis + Amber Brown Lucas Stadler Julian Berman Markus Holtermann roberto at goyle Yury V. Zaytsev Anna Katrina Dominguez - William Leslie Bobby Impollonia - Faye Zhao timo at eistee.fritz.box Andrew Thompson Yusei Tahara + Aaron Tubbs Ben Darnell Roberto De Ioris + Logan Chien Juan Francisco Cantero Hurtado Ruochen Huang Jeong YunWon @@ -273,6 +282,7 @@ Christopher Armstrong Michael Hudson-Doyle Anders Sigfridsson + Nikolay Zinov Yasir Suhail Jason Michalski rafalgalczynski at gmail.com @@ -282,6 +292,7 @@ Gustavo Niemeyer Stephan Busemann Rafał Gałczyński + Matt Bogosian Christian Muirhead Berker Peksag James Lan @@ -316,9 +327,9 @@ Stefan Marr jiaaro Mads Kiilerich - Richard Lancaster opassembler.py Antony Lee + Jason Madden Yaroslav Fedevych Jim Hunziker Markus Unterwaditzer @@ -327,6 +338,7 @@ squeaky Zearin soareschen + Jonas Pfannschmidt Kurt Griffiths Mike Bayer Matthew Miller diff --git a/lib-python/2.7/xml/etree/ElementTree.py b/lib-python/2.7/xml/etree/ElementTree.py --- a/lib-python/2.7/xml/etree/ElementTree.py +++ b/lib-python/2.7/xml/etree/ElementTree.py @@ -1606,7 +1606,17 @@ pubid = pubid[1:-1] if hasattr(self.target, "doctype"): self.target.doctype(name, pubid, system[1:-1]) - elif self.doctype is not self._XMLParser__doctype: + elif 1: # XXX PyPy fix, used to be + # elif self.doctype is not self._XMLParser__doctype: + # but that condition is always True on CPython, as far + # as I can tell: self._XMLParser__doctype always + # returns a fresh unbound method object. + # On PyPy, unbound and bound methods have stronger + # unicity guarantees: self._XMLParser__doctype + # can return the same unbound method object, in + # some cases making the test above incorrectly False. + # (My guess would be that the line above is a backport + # from Python 3.) # warn about deprecated call self._XMLParser__doctype(name, pubid, system[1:-1]) self.doctype(name, pubid, system[1:-1]) diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -170,12 +170,8 @@ cmdline="--translationmodules", suggests=[("objspace.allworkingmodules", False)]), - BoolOption("usepycfiles", "Write and read pyc files when importing", - default=True), - BoolOption("lonepycfiles", "Import pyc files with no matching py file", - default=False, - requires=[("objspace.usepycfiles", True)]), + default=False), StrOption("soabi", "Tag to differentiate extension modules built for different Python interpreters", diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -123,7 +123,7 @@ # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". -#html_title = None +html_title = 'PyPy documentation' # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None diff --git a/pypy/doc/contributor.rst b/pypy/doc/contributor.rst --- a/pypy/doc/contributor.rst +++ b/pypy/doc/contributor.rst @@ -11,29 +11,29 @@ Amaury Forgeot d'Arc Antonio Cuni Samuele Pedroni + Matti Picus Alex Gaynor Brian Kearns - Matti Picus Philip Jenvey Michael Hudson + Ronan Lamy David Schneider + Manuel Jacob Holger Krekel Christian Tismer Hakan Ardo - Manuel Jacob - Ronan Lamy Benjamin Peterson + Richard Plangger Anders Chrigstrom Eric van Riet Paap Wim Lavrijsen - Richard Plangger Richard Emslie Alexander Schremmer Dan Villiom Podlaski Christiansen + Remi Meier Lukas Diekmann Sven Hager Anders Lehmann - Remi Meier Aurelien Campeas Niklaus Haldimann Camillo Bruni @@ -42,8 +42,8 @@ Romain Guillebert Leonardo Santagada Seo Sanghyeon + Ronny Pfannschmidt Justin Peel - Ronny Pfannschmidt David Edelsohn Anders Hammarquist Jakub Gustak @@ -65,6 +65,7 @@ Tyler Wade Michael Foord Stephan Diehl + Vincent Legoll Stefan Schwarzer Valentino Volonghi Tomek Meka @@ -75,9 +76,9 @@ Jean-Paul Calderone Timo Paulssen Squeaky + Marius Gedminas Alexandre Fayolle Simon Burton - Marius Gedminas Martin Matusiak Konstantin Lopuhin Wenzhu Man @@ -86,16 +87,20 @@ Ivan Sichmann Freitas Greg Price Dario Bertini + Stefano Rivera Mark Pearse Simon Cross Andreas Stührk - Stefano Rivera + Edd Barrett Jean-Philippe St. Pierre Guido van Rossum Pavel Vinogradov + Jeremy Thurgood Paweł Piotr Przeradowski + Spenser Bauman Paul deGrandis Ilya Osadchiy + marky1991 Tobias Oberstein Adrian Kuhn Boris Feigin @@ -104,14 +109,12 @@ Georg Brandl Bert Freudenberg Stian Andreassen - Edd Barrett + Tobias Pape Wanja Saatkamp Gerald Klix Mike Blume - Tobias Pape Oscar Nierstrasz Stefan H. Muller - Jeremy Thurgood Rami Chowdhury Eugene Oden Henry Mason @@ -123,6 +126,8 @@ Lukas Renggli Guenter Jantzen Ned Batchelder + Tim Felgentreff + Anton Gulenko Amit Regmi Ben Young Nicolas Chauvat @@ -132,12 +137,12 @@ Nicholas Riley Jason Chu Igor Trindade Oliveira - Tim Felgentreff + Yichao Yu Rocco Moretti Gintautas Miliauskas Michael Twomey Lucian Branescu Mihaila - Yichao Yu + Devin Jeanpierre Gabriel Lavoie Olivier Dormond Jared Grubb @@ -161,33 +166,33 @@ Stanislaw Halik Mikael Schönenberg Berkin Ilbeyi - Elmo M?ntynen + Elmo Mäntynen + Faye Zhao Jonathan David Riehl Anders Qvist Corbin Simpson Chirag Jadwani Beatrice During Alex Perry - Vincent Legoll + Vaibhav Sood Alan McIntyre - Spenser Bauman + William Leslie Alexander Sedov Attila Gobi + Jasper.Schulz Christopher Pope - Devin Jeanpierre - Vaibhav Sood Christian Tismer Marc Abramowitz Dan Stromberg Arjun Naik Valentina Mukhamedzhanova Stefano Parmesan + Mark Young Alexis Daboville Jens-Uwe Mager Carl Meyer Karl Ramm Pieter Zieschang - Anton Gulenko Gabriel Lukas Vacek Andrew Dalke @@ -195,6 +200,7 @@ Jakub Stasiak Nathan Taylor Vladimir Kryachko + Omer Katz Jacek Generowicz Alejandro J. Cura Jacob Oscarson @@ -204,11 +210,13 @@ Kristjan Valur Jonsson David Lievens Neil Blakey-Milner + Sergey Matyunin Lutz Paelike Lucio Torre Lars Wassermann Philipp Rustemeuer Henrik Vendelbo + Richard Lancaster Dan Buch Miguel de Val Borro Artur Lisiecki @@ -220,20 +228,21 @@ Tomo Cocoa Kim Jin Su Toni Mattis + Amber Brown Lucas Stadler Julian Berman Markus Holtermann roberto at goyle Yury V. Zaytsev Anna Katrina Dominguez - William Leslie Bobby Impollonia - Faye Zhao timo at eistee.fritz.box Andrew Thompson Yusei Tahara + Aaron Tubbs Ben Darnell Roberto De Ioris + Logan Chien Juan Francisco Cantero Hurtado Ruochen Huang Jeong YunWon @@ -243,6 +252,7 @@ Christopher Armstrong Michael Hudson-Doyle Anders Sigfridsson + Nikolay Zinov Yasir Suhail Jason Michalski rafalgalczynski at gmail.com @@ -252,6 +262,7 @@ Gustavo Niemeyer Stephan Busemann Rafał Gałczyński + Matt Bogosian Christian Muirhead Berker Peksag James Lan @@ -286,9 +297,9 @@ Stefan Marr jiaaro Mads Kiilerich - Richard Lancaster opassembler.py Antony Lee + Jason Madden Yaroslav Fedevych Jim Hunziker Markus Unterwaditzer @@ -297,6 +308,7 @@ squeaky Zearin soareschen + Jonas Pfannschmidt Kurt Griffiths Mike Bayer Matthew Miller @@ -311,4 +323,3 @@ Julien Phalip Roman Podoliaka Dan Loewenherz - diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -265,7 +265,7 @@ return False def evil(y): - d = {x(): 1} + d = {X(): 1} X.__eq__ = __evil_eq__ d[y] # might trigger a call to __eq__? diff --git a/pypy/doc/how-to-release.rst b/pypy/doc/how-to-release.rst --- a/pypy/doc/how-to-release.rst +++ b/pypy/doc/how-to-release.rst @@ -1,5 +1,20 @@ -Making a PyPy Release -===================== +The PyPy Release Process +======================== + +Release Policy +++++++++++++++ + +We try to create a stable release a few times a year. These are released on +a branch named like release-2.x or release-4.x, and each release is tagged, +for instance release-4.0.1. + +After release, inevitably there are bug fixes. It is the responsibility of +the commiter who fixes a bug to make sure this fix is on the release branch, +so that we can then create a tagged bug-fix release, which will hopefully +happen more often than stable releases. + +How to Create a PyPy Release +++++++++++++++++++++++++++++ Overview -------- diff --git a/pypy/doc/index-of-release-notes.rst b/pypy/doc/index-of-release-notes.rst --- a/pypy/doc/index-of-release-notes.rst +++ b/pypy/doc/index-of-release-notes.rst @@ -6,6 +6,7 @@ .. toctree:: + release-5.0.0.rst release-4.0.1.rst release-4.0.0.rst release-2.6.1.rst diff --git a/pypy/doc/index-of-whatsnew.rst b/pypy/doc/index-of-whatsnew.rst --- a/pypy/doc/index-of-whatsnew.rst +++ b/pypy/doc/index-of-whatsnew.rst @@ -7,6 +7,7 @@ .. toctree:: whatsnew-head.rst + whatsnew-5.0.0.rst whatsnew-4.0.1.rst whatsnew-4.0.0.rst whatsnew-2.6.1.rst diff --git a/pypy/doc/release-5.0.0.rst b/pypy/doc/release-5.0.0.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-5.0.0.rst @@ -0,0 +1,230 @@ +======== +PyPy 5.0 +======== + +We have released PyPy 5.0, about three months after PyPy 4.0.1. +We encourage all users of PyPy to update to this version. Apart from the usual +bug fixes, there is an ongoing effort to improve the warmup time and memory +usage of JIT-related metadata. The exact effects depend vastly on the program +you're running and can range from insignificant to warmup being up to 30% +faster and memory dropping by about 30%. + +We also merged a major upgrade to our C-API layer (cpyext), simplifying the +interaction between c-level objects and PyPy interpreter level objects. As a +result, lxml (prerelease) with its cython compiled component +`passes all tests`_ on PyPy. The new cpyext is also much faster. + +vmprof_ has been a go-to profiler for PyPy on linux for a few releases +and we're happy to announce that thanks to the cooperation with jetbrains, +vmprof now works on Linux, OS X and Windows on both PyPy and CPython. + +You can download the PyPy 5.0 release here: + + http://pypy.org/download.html + +We would like to thank our donors for the continued support of the PyPy +project. + +We would also like to thank our contributors and +encourage new people to join the project. PyPy has many +layers and we need help with all of them: `PyPy`_ and `RPython`_ documentation +improvements, tweaking popular `modules`_ to run on pypy, or general `help`_ +with making RPython's JIT even better. + +CFFI +==== + +While not applicable only to PyPy, `cffi`_ is arguably our most significant +contribution to the python ecosystem. PyPy 5.0 ships with +`cffi-1.5.2`_ which now allows embedding PyPy (or cpython) in a C program. + +.. _`PyPy`: http://doc.pypy.org +.. _`RPython`: https://rpython.readthedocs.org +.. _`cffi`: https://cffi.readthedocs.org +.. _`cffi-1.5.2`: http://cffi.readthedocs.org/en/latest/whatsnew.html#v1-5-2 +.. _`modules`: http://doc.pypy.org/en/latest/project-ideas.html#make-more-python-modules-pypy-friendly +.. _`help`: http://doc.pypy.org/en/latest/project-ideas.html +.. _`numpy`: https://bitbucket.org/pypy/numpy +.. _`passes all tests`: https://bitbucket.org/pypy/compatibility/wiki/lxml +.. _vmprof: http://vmprof.readthedocs.org + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7. It's fast (`PyPy and CPython 2.7.x`_ performance comparison) +due to its integrated tracing JIT compiler. + +We also welcome developers of other +`dynamic languages`_ to see what RPython can do for them. + +This release supports **x86** machines on most common operating systems +(Linux 32/64, Mac OS X 64, Windows 32, OpenBSD, FreeBSD), +newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux, and the +big- and little-endian variants of **PPC64** running Linux. + +.. _`PyPy and CPython 2.7.x`: http://speed.pypy.org +.. _`dynamic languages`: http://pypyjs.org + +Other Highlights (since 4.0.1 released in November 2015) +========================================================= + +* New features: + + * Support embedding PyPy in a C-program via cffi and static callbacks in cffi. + This deprecates the old method of embedding PyPy + + * Refactor vmprof to work cross-operating-system, deprecate using buggy + libunwind on Linux platforms. Vmprof even works on Windows now. + + * Support more of the C-API type slots, like tp_getattro, and fix C-API + macros, functions, and structs such as _PyLong_FromByteArray(), + PyString_GET_SIZE, f_locals in PyFrameObject, Py_NAN, co_filename in + PyCodeObject + + * Use a more stable approach for allocating PyObjects in cpyext. (see + `blog post`_). Once the PyObject corresponding to a PyPy object is created, + it stays around at the same location until the death of the PyPy object. + Done with a little bit of custom GC support. It allows us to kill the + notion of "borrowing" inside cpyext, reduces 4 dictionaries down to 1, and + significantly simplifies the whole approach (which is why it is a new + feature while technically a refactoring) and allows PyPy to support the + populart lxml module (as of the *next* release) with no PyPy specific + patches needed + + * Make the default filesystem encoding ASCII, like CPython + + * Use `hypothesis`_ in test creation, which is great for randomizing tests + +* Bug Fixes + + * Backport always using os.urandom for uuid4 from cpython and fix the JIT as well + (issue #2202) + + * More completely support datetime, optimize timedelta creation + + * Fix for issue #2185 which caused an inconsistent list of operations to be + generated by the unroller, appeared in a complicated DJango app + + * Fix an elusive issue with stacklets on shadowstack which showed up when + forgetting stacklets without resuming them + + * Fix entrypoint() which now acquires the GIL + + * Fix direct_ffi_call() so failure does not bail out before setting CALL_MAY_FORCE + + * Fix (de)pickling long values by simplifying the implementation + + * Fix RPython rthread so that objects stored as threadlocal do not force minor + GC collection and are kept alive automatically. This improves perfomance of + short-running Python callbacks and prevents resetting such object between + calls + + * Support floats as parameters to itertools.isslice() + + * Check for the existence of CODESET, ignoring it should have prevented PyPy + from working on FreeBSD + + * Fix for corner case (likely shown by Krakatau) for consecutive guards with + interdependencies + + * Fix applevel bare class method comparisons which should fix pretty printing + in IPython + + * Issues reported with our previous release were resolved_ after reports from users on + our issue tracker at https://bitbucket.org/pypy/pypy/issues or on IRC at + #pypy + +* Numpy: + + * Updates to numpy 1.10.2 (incompatibilities and not-implemented features + still exist) + + * Support dtype=(('O', spec)) union while disallowing record arrays with + mixed object, non-object values + + * Remove all traces of micronumpy from cpyext if --withoutmod-micronumpy option used + + * Support indexing filtering with a boolean ndarray + + * Support partition() as an app-level function, together with a cffi wrapper + in pypy/numpy, this now provides partial support for partition() + +* Performance improvements: + + * Optimize global lookups + + * Improve the memory signature of numbering instances in the JIT. This should + massively decrease the amount of memory consumed by the JIT, which is + significant for most programs. Also compress the numberings using variable- + size encoding + + * Optimize string concatenation + + * Use INT_LSHIFT instead of INT_MUL when possible + + * Improve struct.unpack by casting directly from the underlying buffer. + Unpacking floats and doubles is about 15 times faster, and integer types + about 50% faster (on 64 bit integers). This was then subsequently + improved further in optimizeopt.py. + + * Optimize two-tuple lookups in mapdict, which improves warmup of instance + variable access somewhat + + * Reduce all guards from int_floordiv_ovf if one of the arguments is constant + + * Identify permutations of attributes at instance creation, reducing the + number of bridges created + + * Greatly improve re.sub() performance + + +* Internal refactorings: + + * Refactor and improve exception analysis in the annotator + + * Remove unnecessary special handling of space.wrap(). + + * Support list-resizing setslice operations in RPython + + * Tweak the trace-too-long heuristic for multiple jit drivers + + * Refactor bookkeeping (such a cool word - three double letters) in the + annotater + + * Refactor wrappers for OS functions from rtyper to rlib and simplify them + + * Simplify backend loading instructions to only use four variants + + * Simplify GIL handling in non-jitted code + + * Refactor naming in optimizeopt + + * Change GraphAnalyzer to use a more precise way to recognize external + functions and fix null pointer handling, generally clean up external + function handling + + * Remove pure variants of ``getfield_gc_*`` operations from the JIT by + determining purity while tracing + + * Refactor databasing + + * Simplify bootstrapping in cpyext + + * Refactor rtyper debug code into python.rtyper.debug + + * Seperate structmember.h from Python.h Also enhance creating api functions + to specify which header file they appear in (previously only pypy_decl.h) + + * Fix tokenizer to enforce universal newlines, needed for Python 3 support + +.. _resolved: http://doc.pypy.org/en/latest/whatsnew-5.0.0.html +.. _`hypothesis`: http://hypothesis.readthedocs.org +.. _`blog post`: http://morepypy.blogspot.com/2016/02/c-api-support-update.html + +Please update, and continue to help us make PyPy better. + +Cheers + +The PyPy Team + diff --git a/pypy/doc/tool/makecontributor.py b/pypy/doc/tool/makecontributor.py --- a/pypy/doc/tool/makecontributor.py +++ b/pypy/doc/tool/makecontributor.py @@ -72,6 +72,7 @@ 'Anton Gulenko':['anton gulenko', 'anton_gulenko'], 'Richard Lancaster':['richardlancaster'], 'William Leslie':['William ML Leslie'], + 'Spenser Bauman':['Spenser Andrew Bauman'], } alias_map = {} diff --git a/pypy/doc/whatsnew-5.0.0.rst b/pypy/doc/whatsnew-5.0.0.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/whatsnew-5.0.0.rst @@ -0,0 +1,197 @@ +====================== +What's new in PyPy 5.0 +====================== + +.. this is a revision shortly after release-4.0.1 +.. startrev: 4b5c840d0da2 + +Fixed ``_PyLong_FromByteArray()``, which was buggy. + +Fixed a crash with stacklets (or greenlets) on non-Linux machines +which showed up if you forget stacklets without resuming them. + +.. branch: numpy-1.10 + +Fix tests to run cleanly with -A and start to fix micronumpy for upstream numpy +which is now 1.10.2 + +.. branch: osx-flat-namespace + +Fix the cpyext tests on OSX by linking with -flat_namespace + +.. branch: anntype + +Refactor and improve exception analysis in the annotator. + +.. branch: posita/2193-datetime-timedelta-integrals + +Fix issue #2193. ``isinstance(..., int)`` => ``isinstance(..., numbers.Integral)`` +to allow for alternate ``int``-like implementations (e.g., ``future.types.newint``) + +.. branch: faster-rstruct + +Improve the performace of struct.unpack, which now directly reads inside the +string buffer and directly casts the bytes to the appropriate type, when +allowed. Unpacking of floats and doubles is about 15 times faster now, while +for integer types it's up to ~50% faster for 64bit integers. + +.. branch: wrap-specialisation + +Remove unnecessary special handling of space.wrap(). + +.. branch: compress-numbering + +Improve the memory signature of numbering instances in the JIT. This should massively +decrease the amount of memory consumed by the JIT, which is significant for most programs. + +.. branch: fix-trace-too-long-heuristic + +Improve the heuristic when disable trace-too-long + +.. branch: fix-setslice-can-resize + +Make rlist's ll_listsetslice() able to resize the target list to help +simplify objspace/std/listobject.py. Was issue #2196. + +.. branch: anntype2 + +A somewhat random bunch of changes and fixes following up on branch 'anntype'. Highlights: + +- Implement @doubledispatch decorator and use it for intersection() and difference(). + +- Turn isinstance into a SpaceOperation + +- Create a few direct tests of the fundamental annotation invariant in test_model.py + +- Remove bookkeeper attribute from DictDef and ListDef. + +.. branch: cffi-static-callback + +.. branch: vecopt-absvalue + +- Enhancement. Removed vector fields from AbstractValue. + +.. branch: memop-simplify2 + +Simplification. Backends implement too many loading instructions, only having a slightly different interface. +Four new operations (gc_load/gc_load_indexed, gc_store/gc_store_indexed) replace all the +commonly known loading operations + +.. branch: more-rposix + +Move wrappers for OS functions from `rpython/rtyper` to `rpython/rlib` and +turn them into regular RPython functions. Most RPython-compatible `os.*` +functions are now directly accessible as `rpython.rposix.*`. + +.. branch: always-enable-gil + +Simplify a bit the GIL handling in non-jitted code. Fixes issue #2205. + +.. branch: flowspace-cleanups + +Trivial cleanups in flowspace.operation : fix comment & duplicated method + +.. branch: test-AF_NETLINK + +Add a test for pre-existing AF_NETLINK support. Was part of issue #1942. + +.. branch: small-cleanups-misc + +Trivial misc cleanups: typo, whitespace, obsolete comments + +.. branch: cpyext-slotdefs +.. branch: fix-missing-canraise +.. branch: whatsnew + +.. branch: fix-2211 + +Fix the cryptic exception message when attempting to use extended slicing +in rpython. Was issue #2211. + +.. branch: ec-keepalive + +Optimize the case where, in a new C-created thread, we keep invoking +short-running Python callbacks. (CFFI on CPython has a hack to achieve +the same result.) This can also be seen as a bug fix: previously, +thread-local objects would be reset between two such calls. + +.. branch: globals-quasiimmut + +Optimize global lookups. + +.. branch: cffi-static-callback-embedding + +Updated to CFFI 1.5, which supports a new way to do embedding. +Deprecates http://pypy.readthedocs.org/en/latest/embedding.html. + +.. branch: fix-cpython-ssl-tests-2.7 + +Fix SSL tests by importing cpython's patch + + +.. branch: remove-getfield-pure + +Remove pure variants of ``getfield_gc_*`` operations from the JIT. Relevant +optimizations instead consult the field descriptor to determine the purity of +the operation. Additionally, pure ``getfield`` operations are now handled +entirely by `rpython/jit/metainterp/optimizeopt/heap.py` rather than +`rpython/jit/metainterp/optimizeopt/pure.py`, which can result in better codegen +for traces containing a large number of pure getfield operations. + +.. branch: exctrans + +Try to ensure that no new functions get annotated during the 'source_c' phase. +Refactor sandboxing to operate at a higher level. + +.. branch: cpyext-bootstrap + +.. branch: vmprof-newstack + +Refactor vmprof to work cross-operating-system. + +.. branch: seperate-strucmember_h + +Seperate structmember.h from Python.h Also enhance creating api functions +to specify which header file they appear in (previously only pypy_decl.h) + +.. branch: llimpl + +Refactor register_external(), remove running_on_llinterp mechanism and +apply sandbox transform on externals at the end of annotation. + +.. branch: cffi-embedding-win32 + +.. branch: windows-vmprof-support + +vmprof should work on Windows. + + +.. branch: reorder-map-attributes + +When creating instances and adding attributes in several different orders +depending on some condition, the JIT would create too much code. This is now +fixed. + +.. branch: cpyext-gc-support-2 + +Improve CPython C API support, which means lxml now runs unmodified +(after removing pypy hacks, pending pull request) + +.. branch: look-inside-tuple-hash + +Look inside tuple hash, improving mdp benchmark + +.. branch: vlen-resume + +Compress resume data, saving 10-20% of memory consumed by the JIT + +.. branch: issue-2248 + +.. branch: ndarray-setitem-filtered + +Fix boolean-array indexing in micronumpy + +.. branch: numpy_partition +Support ndarray.partition() as an app-level function numpy.core._partition_use, +provided as a cffi wrapper to upstream's implementation in the pypy/numpy repo + diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -1,173 +1,7 @@ ========================= -What's new in PyPy 4.1.+ +What's new in PyPy 5.0.+ ========================= -.. this is a revision shortly after release-4.0.1 -.. startrev: 4b5c840d0da2 +.. this is a revision shortly after release-5.0 +.. startrev: b238b48f9138 -Fixed ``_PyLong_FromByteArray()``, which was buggy. - -Fixed a crash with stacklets (or greenlets) on non-Linux machines -which showed up if you forget stacklets without resuming them. - -.. branch: numpy-1.10 - -Fix tests to run cleanly with -A and start to fix micronumpy for upstream numpy -which is now 1.10.2 - -.. branch: osx-flat-namespace - -Fix the cpyext tests on OSX by linking with -flat_namespace - -.. branch: anntype - -Refactor and improve exception analysis in the annotator. - -.. branch: posita/2193-datetime-timedelta-integrals - -Fix issue #2193. ``isinstance(..., int)`` => ``isinstance(..., numbers.Integral)`` -to allow for alternate ``int``-like implementations (e.g., ``future.types.newint``) - -.. branch: faster-rstruct - -Improve the performace of struct.unpack, which now directly reads inside the -string buffer and directly casts the bytes to the appropriate type, when -allowed. Unpacking of floats and doubles is about 15 times faster now, while -for integer types it's up to ~50% faster for 64bit integers. - -.. branch: wrap-specialisation - -Remove unnecessary special handling of space.wrap(). - -.. branch: compress-numbering - -Improve the memory signature of numbering instances in the JIT. This should massively -decrease the amount of memory consumed by the JIT, which is significant for most programs. - -.. branch: fix-trace-too-long-heuristic - -Improve the heuristic when disable trace-too-long - -.. branch: fix-setslice-can-resize - -Make rlist's ll_listsetslice() able to resize the target list to help -simplify objspace/std/listobject.py. Was issue #2196. - -.. branch: anntype2 - -A somewhat random bunch of changes and fixes following up on branch 'anntype'. Highlights: - -- Implement @doubledispatch decorator and use it for intersection() and difference(). - -- Turn isinstance into a SpaceOperation - -- Create a few direct tests of the fundamental annotation invariant in test_model.py - -- Remove bookkeeper attribute from DictDef and ListDef. - -.. branch: cffi-static-callback - -.. branch: vecopt-absvalue - -- Enhancement. Removed vector fields from AbstractValue. - -.. branch: memop-simplify2 - -Simplification. Backends implement too many loading instructions, only having a slightly different interface. -Four new operations (gc_load/gc_load_indexed, gc_store/gc_store_indexed) replace all the -commonly known loading operations - -.. branch: more-rposix - -Move wrappers for OS functions from `rpython/rtyper` to `rpython/rlib` and -turn them into regular RPython functions. Most RPython-compatible `os.*` -functions are now directly accessible as `rpython.rposix.*`. - -.. branch: always-enable-gil - -Simplify a bit the GIL handling in non-jitted code. Fixes issue #2205. - -.. branch: flowspace-cleanups - -Trivial cleanups in flowspace.operation : fix comment & duplicated method - -.. branch: test-AF_NETLINK - -Add a test for pre-existing AF_NETLINK support. Was part of issue #1942. - -.. branch: small-cleanups-misc - -Trivial misc cleanups: typo, whitespace, obsolete comments - -.. branch: cpyext-slotdefs -.. branch: fix-missing-canraise -.. branch: whatsnew - -.. branch: fix-2211 - -Fix the cryptic exception message when attempting to use extended slicing -in rpython. Was issue #2211. - -.. branch: ec-keepalive - -Optimize the case where, in a new C-created thread, we keep invoking -short-running Python callbacks. (CFFI on CPython has a hack to achieve -the same result.) This can also be seen as a bug fix: previously, -thread-local objects would be reset between two such calls. - -.. branch: globals-quasiimmut - -Optimize global lookups. - -.. branch: cffi-static-callback-embedding - -Updated to CFFI 1.5, which supports a new way to do embedding. -Deprecates http://pypy.readthedocs.org/en/latest/embedding.html. - -.. branch: fix-cpython-ssl-tests-2.7 - -Fix SSL tests by importing cpython's patch - - -.. branch: remove-getfield-pure - -Remove pure variants of ``getfield_gc_*`` operations from the JIT. Relevant -optimizations instead consult the field descriptor to determine the purity of -the operation. Additionally, pure ``getfield`` operations are now handled -entirely by `rpython/jit/metainterp/optimizeopt/heap.py` rather than -`rpython/jit/metainterp/optimizeopt/pure.py`, which can result in better codegen -for traces containing a large number of pure getfield operations. - -.. branch: exctrans - -Try to ensure that no new functions get annotated during the 'source_c' phase. -Refactor sandboxing to operate at a higher level. - -.. branch: cpyext-bootstrap - -.. branch: vmprof-newstack - -Refactor vmprof to work cross-operating-system. - -.. branch: seperate-strucmember_h - -Seperate structmember.h from Python.h Also enhance creating api functions -to specify which header file they appear in (previously only pypy_decl.h) - -.. branch: llimpl - -Refactor register_external(), remove running_on_llinterp mechanism and -apply sandbox transform on externals at the end of annotation. - -.. branch: cffi-embedding-win32 - -.. branch: windows-vmprof-support - -vmprof should work on Windows. - - -.. branch: reorder-map-attributes - -When creating instances and adding attributes in several different orders -depending on some condition, the JIT would create too much code. This is now -fixed. \ No newline at end of file diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -277,7 +277,6 @@ if config.translation.sandbox: config.objspace.lonepycfiles = False - config.objspace.usepycfiles = False config.translating = True diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py --- a/pypy/interpreter/function.py +++ b/pypy/interpreter/function.py @@ -13,6 +13,9 @@ from pypy.interpreter.argument import Arguments from rpython.rlib import jit +from rpython.rlib.rarithmetic import LONG_BIT +from rpython.rlib.rbigint import rbigint + funccallunrolling = unrolling_iterable(range(4)) @@ -557,6 +560,26 @@ return space.w_False return space.eq(self.w_function, w_other.w_function) + def is_w(self, space, other): + if not isinstance(other, Method): + return False + return (self.w_instance is other.w_instance and + self.w_function is other.w_function and + self.w_class is other.w_class) + + def immutable_unique_id(self, space): + from pypy.objspace.std.util import IDTAG_METHOD as tag + from pypy.objspace.std.util import IDTAG_SHIFT + if self.w_instance is not None: + id = space.bigint_w(space.id(self.w_instance)) + id = id.lshift(LONG_BIT) + else: + id = rbigint.fromint(0) + id = id.or_(space.bigint_w(space.id(self.w_function))) + id = id.lshift(LONG_BIT).or_(space.bigint_w(space.id(self.w_class))) + id = id.lshift(IDTAG_SHIFT).int_or_(tag) + return space.newlong_from_rbigint(id) + def descr_method_hash(self): space = self.space w_result = space.hash(self.w_function) diff --git a/pypy/interpreter/mixedmodule.py b/pypy/interpreter/mixedmodule.py --- a/pypy/interpreter/mixedmodule.py +++ b/pypy/interpreter/mixedmodule.py @@ -3,7 +3,7 @@ from pypy.interpreter import gateway from pypy.interpreter.error import OperationError from pypy.interpreter.baseobjspace import W_Root -import os, sys +import sys class MixedModule(Module): applevel_name = None @@ -60,7 +60,7 @@ def save_module_content_for_future_reload(self): self.w_initialdict = self.space.call_method(self.w_dict, 'items') - + @classmethod def get_applevel_name(cls): """ NOT_RPYTHON """ if cls.applevel_name is not None: @@ -68,7 +68,6 @@ else: pkgroot = cls.__module__ return pkgroot.split('.')[-1] - get_applevel_name = classmethod(get_applevel_name) def get(self, name): space = self.space @@ -103,7 +102,7 @@ # be normal Functions to get the correct binding behaviour func = w_value if (isinstance(func, Function) and - type(func) is not BuiltinFunction): + type(func) is not BuiltinFunction): try: bltin = func._builtinversion_ except AttributeError: @@ -115,7 +114,6 @@ space.setitem(self.w_dict, w_name, w_value) return w_value - def getdict(self, space): if self.lazy: for name in self.loaders: @@ -131,6 +129,7 @@ self.startup_called = False self._frozen = True + @classmethod def buildloaders(cls): """ NOT_RPYTHON """ if not hasattr(cls, 'loaders'): @@ -149,8 +148,6 @@ if '__doc__' not in loaders: loaders['__doc__'] = cls.get__doc__ - buildloaders = classmethod(buildloaders) - def extra_interpdef(self, name, spec): cls = self.__class__ pkgroot = cls.__module__ @@ -159,21 +156,21 @@ w_obj = loader(space) space.setattr(space.wrap(self), space.wrap(name), w_obj) + @classmethod def get__doc__(cls, space): return space.wrap(cls.__doc__) - get__doc__ = classmethod(get__doc__) def getinterpevalloader(pkgroot, spec): """ NOT_RPYTHON """ def ifileloader(space): - d = {'space' : space} + d = {'space':space} # EVIL HACK (but it works, and this is not RPython :-) while 1: try: value = eval(spec, d) except NameError, ex: - name = ex.args[0].split("'")[1] # super-Evil + name = ex.args[0].split("'")[1] # super-Evil if name in d: raise # propagate the NameError try: diff --git a/pypy/interpreter/test/test_app_main.py b/pypy/interpreter/test/test_app_main.py --- a/pypy/interpreter/test/test_app_main.py +++ b/pypy/interpreter/test/test_app_main.py @@ -133,7 +133,7 @@ self.check(['-S', '-tO', '--info'], {}, output_contains='translation') self.check(['-S', '-tO', '--version'], {}, output_contains='Python') self.check(['-S', '-tOV'], {}, output_contains='Python') - self.check(['--jit', 'foobar', '-S'], {}, sys_argv=[''], + self.check(['--jit', 'off', '-S'], {}, sys_argv=[''], run_stdin=True, no_site=1) self.check(['-c', 'pass'], {}, sys_argv=['-c'], run_command='pass') self.check(['-cpass'], {}, sys_argv=['-c'], run_command='pass') diff --git a/pypy/interpreter/test/test_function.py b/pypy/interpreter/test/test_function.py --- a/pypy/interpreter/test/test_function.py +++ b/pypy/interpreter/test/test_function.py @@ -1,5 +1,4 @@ - -import unittest +import pytest from pypy.interpreter import eval from pypy.interpreter.function import Function, Method, descr_function_get from pypy.interpreter.pycode import PyCode @@ -187,6 +186,7 @@ raises( TypeError, func, 42, {'arg1': 23}) + @pytest.mark.skipif("config.option.runappdirect") def test_kwargs_nondict_mapping(self): class Mapping: def keys(self): @@ -257,6 +257,14 @@ meth = func.__get__(obj, object) assert meth() == obj + def test_none_get_interaction(self): + skip("XXX issue #2083") + assert type(None).__repr__(None) == 'None' + + def test_none_get_interaction_2(self): + f = None.__repr__ + assert f() == 'None' + def test_no_get_builtin(self): assert not hasattr(dir, '__get__') class A(object): @@ -284,6 +292,7 @@ raises(TypeError, len, s, some_unknown_keyword=s) raises(TypeError, len, s, s, some_unknown_keyword=s) + @pytest.mark.skipif("config.option.runappdirect") def test_call_error_message(self): try: len() @@ -325,6 +334,7 @@ f = lambda: 42 assert f.func_doc is None + @pytest.mark.skipif("config.option.runappdirect") def test_setstate_called_with_wrong_args(self): f = lambda: 42 # not sure what it should raise, since CPython doesn't have setstate @@ -550,6 +560,37 @@ assert A().m == X() assert X() == A().m + @pytest.mark.skipif("config.option.runappdirect") + def test_method_identity(self): + class A(object): + def m(self): + pass + def n(self): + pass + + class B(A): + pass + + class X(object): + def __eq__(self, other): + return True + + a = A() + a2 = A() + assert a.m is a.m + assert id(a.m) == id(a.m) + assert a.m is not a.n + assert id(a.m) != id(a.n) + assert a.m is not a2.m + assert id(a.m) != id(a2.m) + + assert A.m is A.m + assert id(A.m) == id(A.m) + assert A.m is not A.n + assert id(A.m) != id(A.n) + assert A.m is not B.m + assert id(A.m) != id(B.m) + class TestMethod: def setup_method(self, method): diff --git a/pypy/module/__builtin__/descriptor.py b/pypy/module/__builtin__/descriptor.py --- a/pypy/module/__builtin__/descriptor.py +++ b/pypy/module/__builtin__/descriptor.py @@ -79,6 +79,7 @@ W_Super.typedef = TypeDef( 'super', __new__ = interp2app(descr_new_super), + __thisclass__ = interp_attrproperty_w("w_starttype", W_Super), __getattribute__ = interp2app(W_Super.getattribute), __get__ = interp2app(W_Super.get), __doc__ = """super(type) -> unbound super object diff --git a/pypy/module/__builtin__/test/test_classobj.py b/pypy/module/__builtin__/test/test_classobj.py --- a/pypy/module/__builtin__/test/test_classobj.py +++ b/pypy/module/__builtin__/test/test_classobj.py @@ -452,7 +452,6 @@ assert a + 1 == 2 assert a + 1.1 == 2 - def test_binaryop_calls_coerce_always(self): l = [] class A: @@ -1076,6 +1075,16 @@ assert (D() > A()) == 'D:A.gt' assert (D() >= A()) == 'D:A.ge' + def test_override___int__(self): + class F(float): + def __int__(self): + return 666 + f = F(-12.3) + assert int(f) == 666 + # on cpython, this calls float_trunc() in floatobject.c + # which ends up calling PyFloat_AS_DOUBLE((PyFloatObject*) f) + assert float.__int__(f) == -12 + class AppTestOldStyleClassBytesDict(object): def setup_class(cls): diff --git a/pypy/module/__builtin__/test/test_descriptor.py b/pypy/module/__builtin__/test/test_descriptor.py --- a/pypy/module/__builtin__/test/test_descriptor.py +++ b/pypy/module/__builtin__/test/test_descriptor.py @@ -214,7 +214,7 @@ c = C() assert C.goo(1) == (C, 1) assert c.goo(1) == (C, 1) - + assert c.foo(1) == (c, 1) class D(C): pass @@ -238,6 +238,17 @@ meth = classmethod(1).__get__(1) raises(TypeError, meth) + def test_super_thisclass(self): + class A(object): + pass + + assert super(A, A()).__thisclass__ is A + + class B(A): + pass + + assert super(B, B()).__thisclass__ is B + assert super(A, B()).__thisclass__ is A def test_property_docstring(self): assert property.__doc__.startswith('property') diff --git a/pypy/module/_collections/app_defaultdict.py b/pypy/module/_collections/app_defaultdict.py --- a/pypy/module/_collections/app_defaultdict.py +++ b/pypy/module/_collections/app_defaultdict.py @@ -12,6 +12,7 @@ class defaultdict(dict): __slots__ = ['default_factory'] + __module__ = 'collections' def __init__(self, *args, **kwds): if len(args) > 0: diff --git a/pypy/module/_collections/test/test_defaultdict.py b/pypy/module/_collections/test/test_defaultdict.py --- a/pypy/module/_collections/test/test_defaultdict.py +++ b/pypy/module/_collections/test/test_defaultdict.py @@ -14,6 +14,12 @@ d[5].append(44) assert l == [42, 43] and l2 == [44] + def test_module(self): + from _collections import defaultdict + assert repr(defaultdict) in ( + "", # on PyPy + "") # on CPython + def test_keyerror_without_factory(self): from _collections import defaultdict for d1 in [defaultdict(), defaultdict(None)]: diff --git a/pypy/module/_sre/interp_sre.py b/pypy/module/_sre/interp_sre.py --- a/pypy/module/_sre/interp_sre.py +++ b/pypy/module/_sre/interp_sre.py @@ -7,6 +7,7 @@ from pypy.interpreter.error import OperationError from rpython.rlib.rarithmetic import intmask from rpython.rlib import jit +from rpython.rlib.rstring import StringBuilder, UnicodeBuilder # ____________________________________________________________ # @@ -36,6 +37,8 @@ if 0 <= start <= end: if isinstance(ctx, rsre_core.BufMatchContext): return space.wrap(ctx._buffer.getslice(start, end, 1, end-start)) + if isinstance(ctx, rsre_core.StrMatchContext): + return space.wrap(ctx._string[start:end]) elif isinstance(ctx, rsre_core.UnicodeMatchContext): return space.wrap(ctx._unicodestr[start:end]) else: @@ -98,8 +101,8 @@ space.wrap("cannot copy this pattern object")) def make_ctx(self, w_string, pos=0, endpos=sys.maxint): - """Make a BufMatchContext or a UnicodeMatchContext for searching - in the given w_string object.""" + """Make a StrMatchContext, BufMatchContext or a UnicodeMatchContext for + searching in the given w_string object.""" space = self.space if pos < 0: pos = 0 @@ -113,6 +116,14 @@ endpos = len(unicodestr) return rsre_core.UnicodeMatchContext(self.code, unicodestr, pos, endpos, self.flags) + elif space.isinstance_w(w_string, space.w_str): + str = space.str_w(w_string) + if pos > len(str): + pos = len(str) + if endpos > len(str): + endpos = len(str) + return rsre_core.StrMatchContext(self.code, str, + pos, endpos, self.flags) else: buf = space.readbuf_w(w_string) size = buf.getlength() @@ -216,6 +227,11 @@ def subx(self, w_ptemplate, w_string, count): space = self.space + # use a (much faster) string/unicode builder if w_ptemplate and + # w_string are both string or both unicode objects, and if w_ptemplate + # is a literal + use_builder = False + filter_as_unicode = filter_as_string = None if space.is_true(space.callable(w_ptemplate)): w_filter = w_ptemplate filter_is_callable = True @@ -223,6 +239,8 @@ if space.isinstance_w(w_ptemplate, space.w_unicode): filter_as_unicode = space.unicode_w(w_ptemplate) literal = u'\\' not in filter_as_unicode + use_builder = ( + space.isinstance_w(w_string, space.w_unicode) and literal) else: try: filter_as_string = space.str_w(w_ptemplate) @@ -232,6 +250,8 @@ literal = False else: literal = '\\' not in filter_as_string + use_builder = ( + space.isinstance_w(w_string, space.w_str) and literal) if literal: w_filter = w_ptemplate filter_is_callable = False @@ -242,19 +262,44 @@ space.wrap(self), w_ptemplate) filter_is_callable = space.is_true(space.callable(w_filter)) # + # XXX this is a bit of a mess, but it improves performance a lot ctx = self.make_ctx(w_string) - sublist_w = [] + sublist_w = strbuilder = unicodebuilder = None + if use_builder: + if filter_as_unicode is not None: + unicodebuilder = UnicodeBuilder(ctx.end) + else: + assert filter_as_string is not None + strbuilder = StringBuilder(ctx.end) + else: + sublist_w = [] n = last_pos = 0 while not count or n < count: + sub_jitdriver.jit_merge_point( + self=self, + use_builder=use_builder, + filter_is_callable=filter_is_callable, + filter_type=type(w_filter), + ctx=ctx, + w_filter=w_filter, + strbuilder=strbuilder, + unicodebuilder=unicodebuilder, + filter_as_string=filter_as_string, + filter_as_unicode=filter_as_unicode, + count=count, + w_string=w_string, + n=n, last_pos=last_pos, sublist_w=sublist_w + ) + space = self.space if not searchcontext(space, ctx): break if last_pos < ctx.match_start: - sublist_w.append(slice_w(space, ctx, last_pos, - ctx.match_start, space.w_None)) + _sub_append_slice( + ctx, space, use_builder, sublist_w, + strbuilder, unicodebuilder, last_pos, ctx.match_start) start = ctx.match_end if start == ctx.match_start: start += 1 - nextctx = ctx.fresh_copy(start) if not (last_pos == ctx.match_start == ctx.match_end and n > 0): # the above ignores empty matches on latest position @@ -262,28 +307,71 @@ w_match = self.getmatch(ctx, True) w_piece = space.call_function(w_filter, w_match) if not space.is_w(w_piece, space.w_None): + assert strbuilder is None and unicodebuilder is None + assert not use_builder sublist_w.append(w_piece) else: - sublist_w.append(w_filter) + if use_builder: + if strbuilder is not None: + assert filter_as_string is not None + strbuilder.append(filter_as_string) + else: + assert unicodebuilder is not None + assert filter_as_unicode is not None + unicodebuilder.append(filter_as_unicode) + else: + sublist_w.append(w_filter) last_pos = ctx.match_end n += 1 elif last_pos >= ctx.end: break # empty match at the end: finished - ctx = nextctx + ctx.reset(start) if last_pos < ctx.end: - sublist_w.append(slice_w(space, ctx, last_pos, ctx.end, - space.w_None)) + _sub_append_slice(ctx, space, use_builder, sublist_w, + strbuilder, unicodebuilder, last_pos, ctx.end) + if use_builder: + if strbuilder is not None: + return space.wrap(strbuilder.build()), n + else: + assert unicodebuilder is not None + return space.wrap(unicodebuilder.build()), n + else: + if space.isinstance_w(w_string, space.w_unicode): + w_emptystr = space.wrap(u'') + else: + w_emptystr = space.wrap('') + w_item = space.call_method(w_emptystr, 'join', + space.newlist(sublist_w)) + return w_item, n - if space.isinstance_w(w_string, space.w_unicode): - w_emptystr = space.wrap(u'') - else: - w_emptystr = space.wrap('') - w_item = space.call_method(w_emptystr, 'join', - space.newlist(sublist_w)) - return w_item, n +sub_jitdriver = jit.JitDriver( + reds="""count n last_pos + ctx w_filter + strbuilder unicodebuilder + filter_as_string + filter_as_unicode + w_string sublist_w + self""".split(), + greens=["filter_is_callable", "use_builder", "filter_type", "ctx.pattern"]) +def _sub_append_slice(ctx, space, use_builder, sublist_w, + strbuilder, unicodebuilder, start, end): + if use_builder: + if isinstance(ctx, rsre_core.BufMatchContext): + assert strbuilder is not None + return strbuilder.append(ctx._buffer.getslice(start, end, 1, end-start)) + if isinstance(ctx, rsre_core.StrMatchContext): + assert strbuilder is not None + return strbuilder.append_slice(ctx._string, start, end) + elif isinstance(ctx, rsre_core.UnicodeMatchContext): + assert unicodebuilder is not None + return unicodebuilder.append_slice(ctx._unicodestr, start, end) + assert 0, "unreachable" + else: + sublist_w.append(slice_w(space, ctx, start, end, space.w_None)) + @unwrap_spec(flags=int, groups=int, w_groupindex=WrappedDefault(None), w_indexgroup=WrappedDefault(None)) def SRE_Pattern__new__(space, w_subtype, w_pattern, flags, w_code, @@ -482,6 +570,8 @@ ctx = self.ctx if isinstance(ctx, rsre_core.BufMatchContext): return space.wrap(ctx._buffer.as_str()) + elif isinstance(ctx, rsre_core.StrMatchContext): + return space.wrap(ctx._string) elif isinstance(ctx, rsre_core.UnicodeMatchContext): return space.wrap(ctx._unicodestr) else: diff --git a/pypy/module/_vmprof/interp_vmprof.py b/pypy/module/_vmprof/interp_vmprof.py --- a/pypy/module/_vmprof/interp_vmprof.py +++ b/pypy/module/_vmprof/interp_vmprof.py @@ -60,10 +60,10 @@ Must be smaller than 1.0 """ w_modules = space.sys.get('modules') - if space.contains_w(w_modules, space.wrap('_continuation')): - space.warn(space.wrap("Using _continuation/greenlet/stacklet together " - "with vmprof will crash"), - space.w_RuntimeWarning) + #if space.contains_w(w_modules, space.wrap('_continuation')): + # space.warn(space.wrap("Using _continuation/greenlet/stacklet together " + # "with vmprof will crash"), + # space.w_RuntimeWarning) try: rvmprof.enable(fileno, period) except rvmprof.VMProfError, e: diff --git a/pypy/module/_vmprof/test/test__vmprof.py b/pypy/module/_vmprof/test/test__vmprof.py --- a/pypy/module/_vmprof/test/test__vmprof.py +++ b/pypy/module/_vmprof/test/test__vmprof.py @@ -5,14 +5,15 @@ class AppTestVMProf(object): def setup_class(cls): cls.space = gettestobjspace(usemodules=['_vmprof', 'struct']) - cls.tmpfile = udir.join('test__vmprof.1').open('wb') - cls.w_tmpfileno = cls.space.wrap(cls.tmpfile.fileno()) - cls.w_tmpfilename = cls.space.wrap(cls.tmpfile.name) - cls.tmpfile2 = udir.join('test__vmprof.2').open('wb') - cls.w_tmpfileno2 = cls.space.wrap(cls.tmpfile2.fileno()) - cls.w_tmpfilename2 = cls.space.wrap(cls.tmpfile2.name) + cls.w_tmpfilename = cls.space.wrap(str(udir.join('test__vmprof.1'))) + cls.w_tmpfilename2 = cls.space.wrap(str(udir.join('test__vmprof.2'))) def test_import_vmprof(self): + tmpfile = open(self.tmpfilename, 'wb') + tmpfileno = tmpfile.fileno() + tmpfile2 = open(self.tmpfilename2, 'wb') + tmpfileno2 = tmpfile2.fileno() + import struct, sys WORD = struct.calcsize('l') @@ -45,7 +46,7 @@ return count import _vmprof - _vmprof.enable(self.tmpfileno, 0.01) + _vmprof.enable(tmpfileno, 0.01) _vmprof.disable() s = open(self.tmpfilename, 'rb').read() no_of_codes = count(s) @@ -56,7 +57,7 @@ pass """ in d - _vmprof.enable(self.tmpfileno2, 0.01) + _vmprof.enable(tmpfileno2, 0.01) exec """def foo2(): pass diff --git a/pypy/module/cpyext/__init__.py b/pypy/module/cpyext/__init__.py --- a/pypy/module/cpyext/__init__.py +++ b/pypy/module/cpyext/__init__.py @@ -34,7 +34,7 @@ import pypy.module.cpyext.pyerrors import pypy.module.cpyext.typeobject import pypy.module.cpyext.object -import pypy.module.cpyext.stringobject +import pypy.module.cpyext.bytesobject import pypy.module.cpyext.tupleobject import pypy.module.cpyext.setobject import pypy.module.cpyext.dictobject diff --git a/pypy/module/cpyext/stringobject.py b/pypy/module/cpyext/bytesobject.py rename from pypy/module/cpyext/stringobject.py rename to pypy/module/cpyext/bytesobject.py diff --git a/pypy/module/cpyext/include/patchlevel.h b/pypy/module/cpyext/include/patchlevel.h --- a/pypy/module/cpyext/include/patchlevel.h +++ b/pypy/module/cpyext/include/patchlevel.h @@ -29,8 +29,8 @@ #define PY_VERSION "2.7.10" /* PyPy version as a string */ -#define PYPY_VERSION "4.1.0-alpha0" -#define PYPY_VERSION_NUM 0x04010000 +#define PYPY_VERSION "5.1.0-alpha0" +#define PYPY_VERSION_NUM 0x05010000 /* Defined to mean a PyPy where cpyext holds more regular references to PyObjects, e.g. staying alive as long as the internal PyPy object diff --git a/pypy/module/cpyext/pyobject.py b/pypy/module/cpyext/pyobject.py --- a/pypy/module/cpyext/pyobject.py +++ b/pypy/module/cpyext/pyobject.py @@ -192,7 +192,7 @@ def from_ref(space, ref): """ Finds the interpreter object corresponding to the given reference. If the - object is not yet realized (see stringobject.py), creates it. + object is not yet realized (see bytesobject.py), creates it. """ assert is_pyobj(ref) if not ref: diff --git a/pypy/module/cpyext/structmember.py b/pypy/module/cpyext/structmember.py --- a/pypy/module/cpyext/structmember.py +++ b/pypy/module/cpyext/structmember.py @@ -6,7 +6,7 @@ from pypy.module.cpyext.intobject import PyInt_AsLong, PyInt_AsUnsignedLong from pypy.module.cpyext.pyerrors import PyErr_Occurred from pypy.module.cpyext.pyobject import PyObject, Py_DecRef, from_ref, make_ref -from pypy.module.cpyext.stringobject import ( +from pypy.module.cpyext.bytesobject import ( PyString_FromString, PyString_FromStringAndSize) from pypy.module.cpyext.floatobject import PyFloat_AsDouble from pypy.module.cpyext.longobject import ( diff --git a/pypy/module/cpyext/test/test_stringobject.py b/pypy/module/cpyext/test/test_bytesobject.py rename from pypy/module/cpyext/test/test_stringobject.py rename to pypy/module/cpyext/test/test_bytesobject.py --- a/pypy/module/cpyext/test/test_stringobject.py +++ b/pypy/module/cpyext/test/test_bytesobject.py @@ -1,7 +1,7 @@ from rpython.rtyper.lltypesystem import rffi, lltype from pypy.module.cpyext.test.test_api import BaseApiTest from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase -from pypy.module.cpyext.stringobject import new_empty_str, PyStringObject +from pypy.module.cpyext.bytesobject import new_empty_str, PyStringObject from pypy.module.cpyext.api import PyObjectP, PyObject, Py_ssize_tP from pypy.module.cpyext.pyobject import Py_DecRef, from_ref, make_ref from pypy.module.cpyext.typeobjectdefs import PyTypeObjectPtr diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -422,7 +422,7 @@ @cpython_api([PyObject, Py_ssize_t, rffi.VOIDPP], lltype.Signed, header=None, error=-1) def str_getreadbuffer(space, w_str, segment, ref): - from pypy.module.cpyext.stringobject import PyString_AsString + from pypy.module.cpyext.bytesobject import PyString_AsString if segment != 0: raise OperationError(space.w_SystemError, space.wrap ("accessing non-existent string segment")) @@ -435,7 +435,7 @@ @cpython_api([PyObject, Py_ssize_t, rffi.CCHARPP], lltype.Signed, header=None, error=-1) def str_getcharbuffer(space, w_str, segment, ref): - from pypy.module.cpyext.stringobject import PyString_AsString + from pypy.module.cpyext.bytesobject import PyString_AsString if segment != 0: raise OperationError(space.w_SystemError, space.wrap ("accessing non-existent string segment")) @@ -552,7 +552,7 @@ w_typename = space.getattr(w_type, space.wrap('__name__')) heaptype = rffi.cast(PyHeapTypeObject, pto) heaptype.c_ht_name = make_ref(space, w_typename) - from pypy.module.cpyext.stringobject import PyString_AsString + from pypy.module.cpyext.bytesobject import PyString_AsString pto.c_tp_name = PyString_AsString(space, heaptype.c_ht_name) else: pto.c_tp_name = rffi.str2charp(w_type.name) diff --git a/pypy/module/cpyext/unicodeobject.py b/pypy/module/cpyext/unicodeobject.py --- a/pypy/module/cpyext/unicodeobject.py +++ b/pypy/module/cpyext/unicodeobject.py @@ -9,7 +9,7 @@ from pypy.module.cpyext.pyobject import ( PyObject, PyObjectP, Py_DecRef, make_ref, from_ref, track_reference, make_typedescr, get_typedescr, as_pyobj) -from pypy.module.cpyext.stringobject import PyString_Check +from pypy.module.cpyext.bytesobject import PyString_Check from pypy.module.sys.interp_encoding import setdefaultencoding from pypy.module._codecs.interp_codecs import CodecState from pypy.objspace.std import unicodeobject @@ -17,7 +17,7 @@ from rpython.tool.sourcetools import func_renamer import sys -## See comment in stringobject.py. +## See comment in bytesobject.py. PyUnicodeObjectStruct = lltype.ForwardReference() PyUnicodeObject = lltype.Ptr(PyUnicodeObjectStruct) diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -85,7 +85,7 @@ # The "imp" module does not respect this, and is allowed to find # lone .pyc files. # check the .pyc file - if space.config.objspace.usepycfiles and space.config.objspace.lonepycfiles: + if space.config.objspace.lonepycfiles: pycfile = filepart + ".pyc" if file_exists(pycfile): # existing .pyc file @@ -888,17 +888,11 @@ """ w = space.wrap - if space.config.objspace.usepycfiles: From pypy.commits at gmail.com Wed Mar 9 14:32:50 2016 From: pypy.commits at gmail.com (mattip) Date: Wed, 09 Mar 2016 11:32:50 -0800 (PST) Subject: [pypy-commit] pypy default: tweak, removed the release version from html page titles in 181dc3529afd Message-ID: <56e07a62.02931c0a.ba4dc.0673@mx.google.com> Author: mattip Branch: Changeset: r82921:502d06a17d85 Date: 2016-03-09 21:32 +0200 http://bitbucket.org/pypy/pypy/changeset/502d06a17d85/ Log: tweak, removed the release version from html page titles in 181dc3529afd diff --git a/pypy/doc/how-to-release.rst b/pypy/doc/how-to-release.rst --- a/pypy/doc/how-to-release.rst +++ b/pypy/doc/how-to-release.rst @@ -76,5 +76,4 @@ * add a tag on the pypy/jitviewer repo that corresponds to pypy release * add a tag on the codespeed web site that corresponds to pypy release -* update the version number in {rpython,pypy}/doc/conf.py. * revise versioning at https://readthedocs.org/projects/pypy From pypy.commits at gmail.com Wed Mar 9 15:32:27 2016 From: pypy.commits at gmail.com (mattip) Date: Wed, 09 Mar 2016 12:32:27 -0800 (PST) Subject: [pypy-commit] pypy default: update after cpyext-gc-support-2, cffi-embedding Message-ID: <56e0885b.85371c0a.fc4e6.1b8f@mx.google.com> Author: mattip Branch: Changeset: r82922:b1391819cc98 Date: 2016-03-09 22:31 +0200 http://bitbucket.org/pypy/pypy/changeset/b1391819cc98/ Log: update after cpyext-gc-support-2, cffi-embedding diff --git a/pypy/doc/project-ideas.rst b/pypy/doc/project-ideas.rst --- a/pypy/doc/project-ideas.rst +++ b/pypy/doc/project-ideas.rst @@ -167,22 +167,13 @@ * `hg` -Embedding PyPy and improving CFFI ---------------------------------- - -PyPy has some basic :doc:`embedding infrastructure `. The idea would be to improve -upon that with cffi hacks that can automatically generate embeddable .so/.dll -library - - Optimising cpyext (CPython C-API compatibility layer) ----------------------------------------------------- A lot of work has gone into PyPy's implementation of CPython's C-API over the last years to let it reach a practical level of compatibility, so that C extensions for CPython work on PyPy without major rewrites. However, -there are still many edges and corner cases where it misbehaves, and it has -not received any substantial optimisation so far. +there are still many edges and corner cases where it misbehaves. The objective of this project is to fix bugs in cpyext and to optimise several performance critical parts of it, such as the reference counting From pypy.commits at gmail.com Wed Mar 9 15:45:39 2016 From: pypy.commits at gmail.com (rlamy) Date: Wed, 09 Mar 2016 12:45:39 -0800 (PST) Subject: [pypy-commit] pypy pypy3.3-bootstrap: Do not store the absolute path of the source into the translated executable (breaks stuff) Message-ID: <56e08b73.c711c30a.d2f7e.2437@mx.google.com> Author: Ronan Lamy Branch: pypy3.3-bootstrap Changeset: r82923:6a5e412892d7 Date: 2016-03-09 20:43 +0000 http://bitbucket.org/pypy/pypy/changeset/6a5e412892d7/ Log: Do not store the absolute path of the source into the translated executable (breaks stuff) diff --git a/pypy/module/sys/state.py b/pypy/module/sys/state.py --- a/pypy/module/sys/state.py +++ b/pypy/module/sys/state.py @@ -14,16 +14,7 @@ self.w_modules = space.newdict(module=True) self.w_warnoptions = space.newlist([]) self.w_argv = space.newlist([]) - - self.setinitialpath(space) - - def setinitialpath(self, space): - from pypy.module.sys.initpath import compute_stdlib_path - # Initialize the default path - pypydir = os.path.dirname(os.path.abspath(pypy.__file__)) - srcdir = os.path.dirname(pypydir) - path = compute_stdlib_path(self, srcdir) - self.w_path = space.newlist([space.wrap(p) for p in path]) + self.w_path = space.newlist([]) def get(space): return space.fromcache(State) From pypy.commits at gmail.com Wed Mar 9 16:32:53 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 09 Mar 2016 13:32:53 -0800 (PST) Subject: [pypy-commit] pypy default: We made a vailiant effort to add the freebsd64 buildslave to the release Message-ID: <56e09685.463f1c0a.4d645.2f77@mx.google.com> Author: Armin Rigo Branch: Changeset: r82924:a06076d31e8d Date: 2016-03-09 22:32 +0100 http://bitbucket.org/pypy/pypy/changeset/a06076d31e8d/ Log: We made a vailiant effort to add the freebsd64 buildslave to the release script the 23-Aug-15, hoping it would stay around this time, but alas, it disappeared 27-Oct-15 and never showed up again diff --git a/pypy/tool/release/repackage.sh b/pypy/tool/release/repackage.sh --- a/pypy/tool/release/repackage.sh +++ b/pypy/tool/release/repackage.sh @@ -9,7 +9,7 @@ # download source, assuming a tag for the release already exists, and repackage them. # The script should be run in an empty directory, i.e. /tmp/release_xxx -for plat in linux linux64 linux-armhf-raspbian linux-armhf-raring linux-armel osx64 freebsd64 +for plat in linux linux64 linux-armhf-raspbian linux-armhf-raring linux-armel osx64 do wget http://buildbot.pypy.org/nightly/$branchname/pypy-c-jit-latest-$plat.tar.bz2 tar -xf pypy-c-jit-latest-$plat.tar.bz2 From pypy.commits at gmail.com Thu Mar 10 03:45:28 2016 From: pypy.commits at gmail.com (fijal) Date: Thu, 10 Mar 2016 00:45:28 -0800 (PST) Subject: [pypy-commit] pypy jit-leaner-frontend: use more hypothesis and fix the next problem Message-ID: <56e13428.d4e01c0a.cdf5a.ffffba23@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r82925:7fef527aef0b Date: 2016-03-10 10:44 +0200 http://bitbucket.org/pypy/pypy/changeset/7fef527aef0b/ Log: use more hypothesis and fix the next problem diff --git a/rpython/jit/metainterp/opencoder.py b/rpython/jit/metainterp/opencoder.py --- a/rpython/jit/metainterp/opencoder.py +++ b/rpython/jit/metainterp/opencoder.py @@ -47,6 +47,7 @@ def get_size_jitcode_pc(self): if self.save_pos >= 0: self.pos = self.save_pos + self.save_pos = -1 size = self._next() if size < 0: self.save_pos = self.pos diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -1581,6 +1581,7 @@ effectinfo = descr.get_extra_info() if effectinfo.oopspecindex == effectinfo.OS_NOT_IN_TRACE: return self.metainterp.do_not_in_trace_call(allboxes, descr) + cut_pos = self.metainterp.history.get_trace_position() if (assembler_call or effectinfo.check_forces_virtual_or_virtualizable()): @@ -1618,8 +1619,8 @@ self.metainterp.vrefs_after_residual_call() vablebox = None if assembler_call: - vablebox, resbox = self.metainterp.direct_assembler_call( - assembler_call_jd) + vablebox, resbox = self.metainterp.direct_assembler_call(resbox, + assembler_call_jd, cut_pos) if resbox and resbox.type != 'v': self.make_result_of_lastop(resbox) self.metainterp.vable_after_residual_call(funcbox) @@ -3007,11 +3008,11 @@ newop.copy_value_from(op) return newop - def direct_assembler_call(self, targetjitdriver_sd): + def direct_assembler_call(self, op, targetjitdriver_sd, cut_pos): """ Generate a direct call to assembler for portal entry point, patching the CALL_MAY_FORCE that occurred just now. """ - op = self.history.operations.pop() + self.history.cut(cut_pos) assert op.is_call_may_force() num_green_args = targetjitdriver_sd.num_green_args arglist = op.getarglist() @@ -3021,8 +3022,9 @@ warmrunnerstate = targetjitdriver_sd.warmstate token = warmrunnerstate.get_assembler_token(greenargs) opnum = OpHelpers.call_assembler_for_descr(op.getdescr()) - op = op.copy_and_change(opnum, args=args, descr=token) - self.history.operations.append(op) + oldop = op + op = self.history.record_nospec(opnum, args, descr=token) + op.copy_value_from(oldop) if opnum == rop.CALL_ASSEMBLER_N: op = None # diff --git a/rpython/jit/metainterp/resume.py b/rpython/jit/metainterp/resume.py --- a/rpython/jit/metainterp/resume.py +++ b/rpython/jit/metainterp/resume.py @@ -200,14 +200,12 @@ class NumberingState(object): def __init__(self, size): self.liveboxes = {} - self.current = [rffi.cast(rffi.SHORT, 0)] * (size + 2) - self.position = 0 + self.current = [] self.n = 0 self.v = 0 def append(self, item): - self.current[self.position] = item - self.position += 1 + self.current.append(item) class ResumeDataLoopMemo(object): @@ -317,15 +315,13 @@ #self._number_boxes(topsnapshot.boxes, optimizer, state) #assert state.position == special_boxes_size - total = 2 while not snapshot_iter.done(): size, jitcode_index, pc = snapshot_iter.get_size_jitcode_pc() - total += 2 + size state.append(rffi.cast(rffi.SHORT, jitcode_index)) state.append(rffi.cast(rffi.SHORT, pc)) self._number_boxes(snapshot_iter, size, optimizer, state) - numb = resumecode.create_numbering(state.current, total) + numb = resumecode.create_numbering(state.current) return numb, state.liveboxes, state.v def forget_numberings(self): diff --git a/rpython/jit/metainterp/test/strategies.py b/rpython/jit/metainterp/test/strategies.py --- a/rpython/jit/metainterp/test/strategies.py +++ b/rpython/jit/metainterp/test/strategies.py @@ -12,27 +12,64 @@ boxlists = strategies.lists(boxes, min_size=1).flatmap( lambda cis: strategies.lists(strategies.sampled_from(cis))) +const_or_box = strategies.sampled_from(['const', 'box']) + +class JitCode(object): + def __init__(self, index): + self.index = index + +class Frame(object): + parent_resumedata_position = -1 + + def __init__(self, jitcode, pc, boxes): + self.jitcode = jitcode + self.pc = pc + self.boxes = boxes + + def get_list_of_active_boxes(self, flag): + return self.boxes + +def get_arg(draw, all_ops, allow_const=True): + if allow_const: + tp = draw(const_or_box) + if tp == 'const': + return draw(intconsts) + return draw(strategies.sampled_from(all_ops)) + +def gen_int_add(draw, all_ops, framestack): + arg0 = get_arg(draw, all_ops) + arg1 = get_arg(draw, all_ops) + res = ResOperation(rop.INT_ADD, [arg0, arg1]) + all_ops.add(res) + return res + +def gen_guard_true(draw, all_ops, framestack): + arg = get_arg(draw, all_ops, allow_const=False) + res = ResOperation(rop.GUARD_TRUE, [arg]) + if draw(strategies.booleans()): + s = [] + for i in range(10): + s.append(get_arg(draw, all_ops, allow_const=False)) + boxes = list(set(s)) + framestack.append(Frame(JitCode(1), 2, boxes)) + res.framestack = framestack[:] + return res + +resops = strategies.sampled_from([gen_int_add, gen_guard_true]) + @strategies.composite def lists_of_operations(draw, inputboxes=intboxes): - def get(draw, l1, l2, index): - if index < len(l1): - return l1[index] - index -= len(l1) - if index >= len(l2): - return draw(intconsts) - return l2[index] - size = draw(strategies.integers(min_value=1, max_value=100)) inputargs = [] for i in range(size): inputargs.append(draw(inputboxes)) size = draw(strategies.integers(min_value=1, max_value=100)) ops = [] + all_ops = set(inputargs) + framestack = [Frame(JitCode(1), 2, [])] for i in range(size): - s = strategies.integers(min_value=0, max_value=len(inputargs) + 2 * len(ops)) - arg0 = get(draw, inputargs, ops, draw(s)) - arg1 = get(draw, inputargs, ops, draw(s)) - ops.append(ResOperation(rop.INT_ADD, [arg0, arg1], -1)) + opgen = draw(resops) + ops.append(opgen(draw, all_ops, framestack)) return inputargs, ops if __name__ == '__main__': diff --git a/rpython/jit/metainterp/test/test_opencoder.py b/rpython/jit/metainterp/test/test_opencoder.py --- a/rpython/jit/metainterp/test/test_opencoder.py +++ b/rpython/jit/metainterp/test/test_opencoder.py @@ -43,7 +43,17 @@ iter = t.get_iter() l = [] while not iter.done(): - l.append(iter.next()) + op = iter.next() + if op.is_guard(): + op.framestack = [] + si = iter.get_snapshot_iter(op.rd_resume_position) + while not si.done(): + size, jitcode, pc = si.get_size_jitcode_pc() + boxes = [] + for i in range(size): + boxes.append(si.next()) + op.framestack.append(FakeFrame(JitCode(jitcode), pc, boxes)) + l.append(op) return iter.inputargs, l, iter def test_simple_iterator(self): @@ -121,6 +131,10 @@ t = Trace(inputargs) for op in ops: newop = t.record_op(op.getopnum(), op.getarglist()) + newop.orig_op = op + if newop.is_guard(): + resume.capture_resumedata(op.framestack, + None, [], t) op.position = newop.position inpargs, l, iter = self.unpack(t) loop1 = TreeLoop("loop1") From pypy.commits at gmail.com Thu Mar 10 03:48:48 2016 From: pypy.commits at gmail.com (plan_rich) Date: Thu, 10 Mar 2016 00:48:48 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: test fixes broken by merge Message-ID: <56e134f0.6672c20a.19861.ffffc3c8@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r82926:8cdcda35c815 Date: 2016-03-10 08:47 +0100 http://bitbucket.org/pypy/pypy/changeset/8cdcda35c815/ Log: test fixes broken by merge diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -12,3 +12,8 @@ .. branch: s390x-backend The jit compiler backend implementation for the s390x architecutre. + +.. branch: s390x-enhance-speedup + +Refactoring to only store 64-bit values in the literal pool of the assembly. Generated machine code uses less space and runs faster. + diff --git a/rpython/jit/backend/zarch/test/test_assembler.py b/rpython/jit/backend/zarch/test/test_assembler.py --- a/rpython/jit/backend/zarch/test/test_assembler.py +++ b/rpython/jit/backend/zarch/test/test_assembler.py @@ -605,7 +605,7 @@ # ensure there is just on instruction for the 'best case' self.pushpop_jitframe(r.MANAGED_REGS) - assert stored == [(r.r2, r.r11), (r.r13,)] + assert stored == [(r.r2, r.r11)] assert stored == loaded stored = [] loaded = [] From pypy.commits at gmail.com Thu Mar 10 03:48:50 2016 From: pypy.commits at gmail.com (plan_rich) Date: Thu, 10 Mar 2016 00:48:50 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: added print statement to test Message-ID: <56e134f2.04371c0a.e08b4.ffffbedf@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r82927:290a4a57f4ac Date: 2016-03-10 09:48 +0100 http://bitbucket.org/pypy/pypy/changeset/290a4a57f4ac/ Log: added print statement to test diff --git a/rpython/rlib/test/test_rthread.py b/rpython/rlib/test/test_rthread.py --- a/rpython/rlib/test/test_rthread.py +++ b/rpython/rlib/test/test_rthread.py @@ -150,6 +150,7 @@ willing_to_wait_more -= 1 done = len(state.answers) == expected + print "waitting %d more iterations" % willing_to_wait_more time.sleep(0.01) time.sleep(0.1) From pypy.commits at gmail.com Thu Mar 10 03:52:37 2016 From: pypy.commits at gmail.com (fijal) Date: Thu, 10 Mar 2016 00:52:37 -0800 (PST) Subject: [pypy-commit] pypy jit-leaner-frontend: fix some typos and details Message-ID: <56e135d5.0775c20a.7b5df.ffffc16e@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r82928:96773552cf51 Date: 2016-03-10 10:51 +0200 http://bitbucket.org/pypy/pypy/changeset/96773552cf51/ Log: fix some typos and details diff --git a/rpython/jit/metainterp/opencoder.py b/rpython/jit/metainterp/opencoder.py --- a/rpython/jit/metainterp/opencoder.py +++ b/rpython/jit/metainterp/opencoder.py @@ -274,6 +274,15 @@ def get_iter(self): return TraceIterator(self, 0, len(self._ops)) + def unpack(self): + """ NOT_RPYTHON - really rpython, but only use for debugging + """ + iter = self.get_iter() + ops = [] + while not iter.done(): + ops.append(iter.next()) + return ops + def _get_operations(self): """ NOT_RPYTHON """ diff --git a/rpython/jit/metainterp/optimizeopt/virtualstate.py b/rpython/jit/metainterp/optimizeopt/virtualstate.py --- a/rpython/jit/metainterp/optimizeopt/virtualstate.py +++ b/rpython/jit/metainterp/optimizeopt/virtualstate.py @@ -424,7 +424,7 @@ elif self.level == LEVEL_NONNULL: if other.level == LEVEL_UNKNOWN: if runtime_box is not None and runtime_box.nonnull(): - op = ResOperation(rop.GUARD_NONNULL, [box], None) + op = ResOperation(rop.GUARD_NONNULL, [box]) extra_guards.append(op) return else: diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -1387,7 +1387,7 @@ @arguments() def opimpl_current_trace_length(self): - trace_length = len(self.metainterp.history.operations) + trace_length = self.metainterp.history.length() return ConstInt(trace_length) @arguments("box") @@ -2618,6 +2618,7 @@ live_arg_boxes[num_green_args:]) finally: self.history.cut(cut_at) # pop the jump + self.history.ends_with_jump = False if target_token is not None: # raise if it *worked* correctly assert isinstance(target_token, TargetToken) jitcell_token = target_token.targeting_jitcell_token diff --git a/rpython/jit/metainterp/test/test_ajit.py b/rpython/jit/metainterp/test/test_ajit.py --- a/rpython/jit/metainterp/test/test_ajit.py +++ b/rpython/jit/metainterp/test/test_ajit.py @@ -2007,7 +2007,7 @@ res = self.meta_interp(g, [3, 14]) assert res == g(3, 14) - def test_specialied_bridge(self): + def test_specialized_bridge(self): myjitdriver = JitDriver(greens = [], reds = ['y', 'x', 'res']) class A: def __init__(self, val): From pypy.commits at gmail.com Thu Mar 10 04:04:28 2016 From: pypy.commits at gmail.com (fijal) Date: Thu, 10 Mar 2016 01:04:28 -0800 (PST) Subject: [pypy-commit] pypy jit-leaner-frontend: a hack to allow dance around handling exception and resume data before writing actual history Message-ID: <56e1389c.a151c20a.b61d7.ffffc7eb@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r82929:43e4af36f9a3 Date: 2016-03-10 11:03 +0200 http://bitbucket.org/pypy/pypy/changeset/43e4af36f9a3/ Log: a hack to allow dance around handling exception and resume data before writing actual history diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -2344,22 +2344,22 @@ if self.resumekey_original_loop_token is None: raise compile.giveup() # should be rare self.staticdata.try_to_free_some_loops() - self.initialize_state_from_guard_failure(key, deadframe) + inputargs = self.initialize_state_from_guard_failure(key, deadframe) try: - return self._handle_guard_failure(resumedescr, key, deadframe) + return self._handle_guard_failure(resumedescr, key, inputargs, deadframe) finally: self.resumekey_original_loop_token = None self.staticdata.profiler.end_tracing() debug_stop('jit-tracing') - def _handle_guard_failure(self, resumedescr, key, deadframe): + def _handle_guard_failure(self, resumedescr, key, inputargs, deadframe): self.current_merge_points = [] self.resumekey = resumedescr self.seen_loop_header_for_jdindex = -1 if isinstance(key, compile.ResumeAtPositionDescr): self.seen_loop_header_for_jdindex = self.jitdriver_sd.index try: - self.prepare_resume_from_failure(deadframe, resumedescr) + self.prepare_resume_from_failure(deadframe, inputargs, resumedescr) if self.resumekey_original_loop_token is None: # very rare case raise SwitchToBlackhole(Counters.ABORT_BRIDGE) self.interpret() @@ -2502,7 +2502,7 @@ else: assert 0 self.jitdriver_sd.warmstate.execute_assembler(loop_token, *args) - def prepare_resume_from_failure(self, deadframe, resumedescr): + def prepare_resume_from_failure(self, deadframe, inputargs, resumedescr): exception = self.cpu.grab_exc_value(deadframe) if (isinstance(resumedescr, compile.ResumeGuardExcDescr) or isinstance(resumedescr, compile.ResumeGuardCopiedExcDescr)): @@ -2526,13 +2526,15 @@ llmemory.cast_ptr_to_adr(exception_obj.typeptr)) else: exc_class = 0 - i = len(self.history.operations) + assert self.history.trace is None + i = len(self.history._cache) op1 = self.history.record(rop.SAVE_EXC_CLASS, [], exc_class) op2 = self.history.record(rop.SAVE_EXCEPTION, [], exception) - assert op1 is self.history.operations[i] - assert op2 is self.history.operations[i + 1] - self.history.operations = [op1, op2] + self.history.operations[:i] + assert op1 is self.history._cache[i] + assert op2 is self.history._cache[i + 1] + self.history._cache = [op1, op2] + self.history._cache[:i] self.history.record(rop.RESTORE_EXCEPTION, [op1, op2], None) + self.history.set_inputargs(inputargs) if exception_obj: self.execute_ll_raised(exception_obj) else: @@ -2542,6 +2544,7 @@ except ChangeFrame: pass else: + self.history.set_inputargs(inputargs) assert not exception def get_procedure_token(self, greenkey, with_compiled_targets=False): @@ -2673,7 +2676,7 @@ sd = self.staticdata token = sd.loop_tokens_exit_frame_with_exception_ref[0].finishdescr self.history.record(rop.FINISH, [valuebox], None, descr=token) - target_token = compile.compile_trace(self, self.resumekey) + target_token = compile.compile_trace(self, self.resumekey, [valuebox]) if target_token is not token: compile.giveup() @@ -2715,7 +2718,7 @@ self.history = history.History() inputargs_and_holes = self.rebuild_state_after_failure(resumedescr, deadframe) - self.history.set_inputargs([box for box in inputargs_and_holes if box]) + return [box for box in inputargs_and_holes if box] finally: rstack._stack_criticalcode_stop() From pypy.commits at gmail.com Thu Mar 10 05:26:37 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 10 Mar 2016 02:26:37 -0800 (PST) Subject: [pypy-commit] pypy default: oops Message-ID: <56e14bdd.c711c30a.d2f7e.ffffe547@mx.google.com> Author: Armin Rigo Branch: Changeset: r82930:f7c9e212e5f3 Date: 2016-03-10 11:25 +0100 http://bitbucket.org/pypy/pypy/changeset/f7c9e212e5f3/ Log: oops diff --git a/pypy/tool/release/repackage.sh b/pypy/tool/release/repackage.sh --- a/pypy/tool/release/repackage.sh +++ b/pypy/tool/release/repackage.sh @@ -29,7 +29,7 @@ # Do this after creating a tag, note the untarred directory is pypy-pypy- # so make sure there is not another one wget https://bitbucket.org/pypy/pypy/get/$tagname.tar.bz2 -tar -xf release-$maj.$min.$rev.tar.bz2 +tar -xf $tagname.tar.bz2 mv pypy-pypy-* pypy-$maj.$min.$rev-src tar --owner=root --group=root --numeric-owner -cvjf pypy-$maj.$min.$rev-src.tar.bz2 pypy-$maj.$min.$rev-src zip -r pypy-$maj.$min.$rev-src.zip pypy-$maj.$min.$rev-src From pypy.commits at gmail.com Thu Mar 10 05:33:27 2016 From: pypy.commits at gmail.com (plan_rich) Date: Thu, 10 Mar 2016 02:33:27 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: condition code to jmp param included a case it should not have! __sync_lock_test_and_set changed to do exactly the same as emitted by gcc Message-ID: <56e14d77.12871c0a.4cac2.ffffec6b@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r82931:87b9598ed2c7 Date: 2016-03-10 11:32 +0100 http://bitbucket.org/pypy/pypy/changeset/87b9598ed2c7/ Log: condition code to jmp param included a case it should not have! __sync_lock_test_and_set changed to do exactly the same as emitted by gcc diff --git a/rpython/jit/backend/zarch/callbuilder.py b/rpython/jit/backend/zarch/callbuilder.py --- a/rpython/jit/backend/zarch/callbuilder.py +++ b/rpython/jit/backend/zarch/callbuilder.py @@ -216,15 +216,16 @@ # Equivalent of 'r13 = __sync_lock_test_and_set(&rpy_fastgil, 1);' self.mc.LGHI(r.SCRATCH, l.imm(1)) + self.mc.LG(r.r13, l.addr(0, RFASTGILPTR)) retry_label = self.mc.currpos() - self.mc.LG(r.r13, l.addr(0, RFASTGILPTR)) + self.mc.LGR(r.r14, r.r13) self.mc.CSG(r.r13, r.SCRATCH, l.addr(0, RFASTGILPTR)) # try to claim lock - self.mc.BRC(c.NE, l.imm(retry_label - self.mc.currpos())) # retry if failed + self.mc.BRC(c.LT, l.imm(retry_label - self.mc.currpos())) # retry if failed # CSG performs a serialization # zarch is sequential consistent! - self.mc.CGHI(r.r13, l.imm0) + self.mc.CGHI(r.r14, l.imm0) b1_location = self.mc.currpos() # boehm: patched with a BEQ: jump if r13 is zero # shadowstack: patched with BNE instead @@ -246,8 +247,8 @@ # revert the rpy_fastgil acquired above, so that the # general 'reacqgil_addr' below can acquire it again... - # (here, r13 is conveniently zero) - self.mc.STG(r.r13, l.addr(0, RFASTGILPTR)) + # (here, r14 is conveniently zero) + self.mc.STG(r.r14, l.addr(0, RFASTGILPTR)) pmc = OverwritingBuilder(self.mc, bne_location, 1) pmc.BRCL(c.NE, l.imm(self.mc.currpos() - bne_location)) From pypy.commits at gmail.com Thu Mar 10 05:35:06 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 10 Mar 2016 02:35:06 -0800 (PST) Subject: [pypy-commit] pypy.org extradoc: write checksums Message-ID: <56e14dda.a3abc20a.7a71e.ffffe226@mx.google.com> Author: Armin Rigo Branch: extradoc Changeset: r716:936b6cb4040c Date: 2016-03-10 11:34 +0100 http://bitbucket.org/pypy/pypy.org/changeset/936b6cb4040c/ Log: write checksums diff --git a/source/download.txt b/source/download.txt --- a/source/download.txt +++ b/source/download.txt @@ -417,6 +417,20 @@ d4492e65201bb09dca5f97601113dc57 pypy-4.0.1-ppc64le.tar.bz2 2aadbb7638153b9d7c2a832888ed3c1e pypy-4.0.1-ppc64.tar.bz2 +pypy-5.0.0 md5:: + + bcb5c830d6380ff78b759dbe075dfc14 pypy-5.0.0-linux-armel.tar.bz2 + 5649117ba754bef14550b6abc2135eab pypy-5.0.0-linux-armhf-raring.tar.bz2 + 949344fff9c6942713f34e1a1fcbc7aa pypy-5.0.0-linux-armhf-raspbian.tar.bz2 + 5e005cf26a3a58552fd77f9aaae6f614 pypy-5.0.0-linux.tar.bz2 + 133530cb9957a67807b25d23bb74ac24 pypy-5.0.0-linux64.tar.bz2 + a091398908bf525149a1fdea8bf48ec2 pypy-5.0.0-osx64.tar.bz2 + f243ff399a55f4370b6d1dc0a3650f1d pypy-5.0.0-ppc64.tar.bz2 + 51fb75ae0a143faa9a5b39f094965050 pypy-5.0.0-ppc64le.tar.bz2 + 6a26f735cb45a10255076fdd6cebee84 pypy-5.0.0-src.tar.bz2 + 1be14cf3ffc97da7521637f8f81abc3c pypy-5.0.0-src.zip + d2c8237e8106b535850596f0e9762246 pypy-5.0.0-win32.zip + pypy3-2.4.0 md5:: eadbc9790823fc0ae40c943087cd7cb3 pypy3-2.4.0-linux64.tar.bz2 @@ -450,6 +464,34 @@ e344b383e8c745cc7c26bbcb0a43958e768fdd1d29dd0799cc148e0518d8d36f pypy-4.0.1-src.zip 9a350a5e6f9b86fb525c6f1300b0c97c021ea8b1e37bfd32a8c4bb7a415d5329 pypy-4.0.1-win32.zip +pypy-5.0.0 sha1:: + + b7c82d437086660759ec18582dbdaf198b77e467 pypy-5.0.0-linux-armel.tar.bz2 + 85978b1d33b0db8b512eebb1558200c3ab76d462 pypy-5.0.0-linux-armhf-raring.tar.bz2 + 271472d0362ce02fd656024b64f0343cc8193f9d pypy-5.0.0-linux-armhf-raspbian.tar.bz2 + 88ac71eebd65c35032325497cc450b4d184be005 pypy-5.0.0-linux.tar.bz2 + 22d32d92899a07cb8cbba4b8918a7919e34246c4 pypy-5.0.0-linux64.tar.bz2 + f652b264ba063a8c472b753baaaacf63690be6c5 pypy-5.0.0-osx64.tar.bz2 + 5620cead511ad33f9fface224544b70d72d9e4c9 pypy-5.0.0-ppc64.tar.bz2 + 6ee6b0eb574f3d29a5eaf29fdae8745fd9fe3c38 pypy-5.0.0-ppc64le.tar.bz2 + 62ce000b887ea22f5bdddcc0f24dd571ca534f57 pypy-5.0.0-src.tar.bz2 + 6dcbde8242e0ee985ffed63c5bf204e7fd74ac2c pypy-5.0.0-src.zip + 62cef0e0dd8849c224c647e53b13d3c47c99807d pypy-5.0.0-win32.zip + +pypy-5.0.0 sha256:: + + 87bd85441b16ecca0d45ba6e9c0e9d26bb7bd8867afbf79d80312cf79b032dc1 pypy-5.0.0-linux-armel.tar.bz2 + 5bb52cf5db4ae8497c4e03cd8a70e49867e6b93d9f29ad335d030fcd3a375769 pypy-5.0.0-linux-armhf-raring.tar.bz2 + 8033c0cc39e9f6771688f2eda95c726595f5453b3e73e1cd5f7ebbe3dae1f685 pypy-5.0.0-linux-armhf-raspbian.tar.bz2 + a9cc9afa94ff1cde811626a70081c477c9840e7816c0562d1903fd823d222ceb pypy-5.0.0-linux.tar.bz2 + b9c73be8e3c3b0835df83bdb86335712005240071cdd4dc245ac30b457063ae0 pypy-5.0.0-linux64.tar.bz2 + 45ed8bf799d0fd8eb051cbcc427173fba74dc9c2f6c309d7a3cc90f4917e6a10 pypy-5.0.0-osx64.tar.bz2 + 334a37e68cb543cf2cbcdd12379b9b770064bb70ba7fd104f1e451cfa10cdda5 pypy-5.0.0-ppc64.tar.bz2 + e72fe5c094186f79c997000ddbaa01616def652a8d1338b75a27dfa3755eb86c pypy-5.0.0-ppc64le.tar.bz2 + 89027b1b33553b53ff7733dc4838f0a76af23552c0d915d9f6de5875b8d7d4ab pypy-5.0.0-src.tar.bz2 + 03e19e9bafccf5b2f4dd422699f3fe42da754c3fcc1d1fd4c8d585d7c9d1849d pypy-5.0.0-src.zip + c53f0946703f5e4885484c7cde2554a0320537135bf8965e054757c214412438 pypy-5.0.0-win32.zip + pypy3-2.4.0 sha1:: 7d715742f6929351b310a2ca3b924cab35913089 pypy3-2.4.0-linux64.tar.bz2 From pypy.commits at gmail.com Thu Mar 10 05:41:26 2016 From: pypy.commits at gmail.com (plan_rich) Date: Thu, 10 Mar 2016 02:41:26 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: added serialization point (to test if this is a problem for the threading issues) Message-ID: <56e14f56.49f9c20a.dd60d.ffffe5d3@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r82932:632d518f2072 Date: 2016-03-10 11:40 +0100 http://bitbucket.org/pypy/pypy/changeset/632d518f2072/ Log: added serialization point (to test if this is a problem for the threading issues) diff --git a/rpython/jit/backend/zarch/callbuilder.py b/rpython/jit/backend/zarch/callbuilder.py --- a/rpython/jit/backend/zarch/callbuilder.py +++ b/rpython/jit/backend/zarch/callbuilder.py @@ -203,6 +203,7 @@ self.mc.XGR(r.SCRATCH, r.SCRATCH) # zarch is sequentially consistent self.mc.STG(r.SCRATCH, l.addr(0, RFASTGILPTR)) + self.mc.BCR_rr(0xe, 0x0) def move_real_result_and_call_reacqgil_addr(self, fastgil): From pypy.commits at gmail.com Thu Mar 10 05:56:43 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 10 Mar 2016 02:56:43 -0800 (PST) Subject: [pypy-commit] pypy default: also prints sha256 Message-ID: <56e152eb.49f9c20a.dd60d.ffffeb91@mx.google.com> Author: Armin Rigo Branch: Changeset: r82933:3af1e20d2bd1 Date: 2016-03-10 11:55 +0100 http://bitbucket.org/pypy/pypy/changeset/3af1e20d2bd1/ Log: also prints sha256 diff --git a/pypy/tool/release/repackage.sh b/pypy/tool/release/repackage.sh --- a/pypy/tool/release/repackage.sh +++ b/pypy/tool/release/repackage.sh @@ -35,9 +35,10 @@ zip -r pypy-$maj.$min.$rev-src.zip pypy-$maj.$min.$rev-src rm -rf pypy-$maj.$min.$rev-src -# Print out the md5, sha1 +# Print out the md5, sha1, sha256 md5sum *.bz2 *.zip sha1sum *.bz2 *.zip +sha256sum *.bz2 *.zip # Now upload all the bz2 and zip From pypy.commits at gmail.com Thu Mar 10 05:57:23 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 10 Mar 2016 02:57:23 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: Fix branch name Message-ID: <56e15313.e5ecc20a.f9fb4.fffff1a4@mx.google.com> Author: Armin Rigo Branch: s390x-backend Changeset: r82934:5eb1d600077e Date: 2016-03-10 11:56 +0100 http://bitbucket.org/pypy/pypy/changeset/5eb1d600077e/ Log: Fix branch name diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -13,7 +13,7 @@ The jit compiler backend implementation for the s390x architecutre. -.. branch: s390x-enhance-speedup +.. branch: s390x-enhance-speed Refactoring to only store 64-bit values in the literal pool of the assembly. Generated machine code uses less space and runs faster. From pypy.commits at gmail.com Thu Mar 10 06:28:52 2016 From: pypy.commits at gmail.com (fijal) Date: Thu, 10 Mar 2016 03:28:52 -0800 (PST) Subject: [pypy-commit] pypy jit-leaner-frontend: lots of random progress + implement virtualref and virtualizable support Message-ID: <56e15a74.4d0d1c0a.de497.fffffb24@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r82935:ca0177a1db3a Date: 2016-03-10 13:28 +0200 http://bitbucket.org/pypy/pypy/changeset/ca0177a1db3a/ Log: lots of random progress + implement virtualref and virtualizable support diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -431,13 +431,13 @@ box = inputargs[i] opnum = OpHelpers.getfield_for_descr(descr) emit_op(extra_ops, - ResOperation(opnum, [vable_box], descr)) + ResOperation(opnum, [vable_box], descr=descr)) box.set_forwarded(extra_ops[-1]) i += 1 arrayindex = 0 for descr in vinfo.array_field_descrs: arraylen = vinfo.get_array_length(vable, arrayindex) - arrayop = ResOperation(rop.GETFIELD_GC_R, [vable_box], descr) + arrayop = ResOperation(rop.GETFIELD_GC_R, [vable_box], descr=descr) emit_op(extra_ops, arrayop) arraydescr = vinfo.array_descrs[arrayindex] assert i + arraylen <= len(inputargs) @@ -1005,7 +1005,7 @@ def compile_trace(metainterp, resumekey, runtime_boxes): """Try to compile a new bridge leading from the beginning of the history - to some existing place. + to some existging place. """ from rpython.jit.metainterp.optimizeopt import optimize_trace diff --git a/rpython/jit/metainterp/logger.py b/rpython/jit/metainterp/logger.py --- a/rpython/jit/metainterp/logger.py +++ b/rpython/jit/metainterp/logger.py @@ -15,11 +15,15 @@ def log_loop_from_trace(self, trace, memo): if not have_debug_prints(): return + inputargs, ops = self._unpack_trace(trace) + self.log_loop(inputargs, ops, memo=memo) + + def _unpack_trace(self, trace): ops = [] i = trace.get_iter() while not i.done(): ops.append(i.next()) - self.log_loop(i.inputargs, ops, memo=memo) + return i.inputargs, ops def log_loop(self, inputargs, operations, number=0, type=None, ops_offset=None, name='', memo=None): @@ -91,8 +95,11 @@ debug_stop("jit-log-short-preamble") return logops - def log_abort_loop(self, inputargs, operations, memo=None): + def log_abort_loop(self, trace, memo=None): debug_start("jit-abort-log") + if not have_debug_prints(): + return + inputargs, operations = self._unpack_trace(trace) logops = self._log_operations(inputargs, operations, ops_offset=None, memo=memo) debug_stop("jit-abort-log") diff --git a/rpython/jit/metainterp/opencoder.py b/rpython/jit/metainterp/opencoder.py --- a/rpython/jit/metainterp/opencoder.py +++ b/rpython/jit/metainterp/opencoder.py @@ -1,5 +1,10 @@ """ Storage format: +for each operation (inputargs numbered with negative numbers) + [size-if-unknown-arity] [ ...] [descr] [potential snapshot] +snapshot is as follows + + [ ...] """ from rpython.jit.metainterp.history import ConstInt, Const @@ -57,6 +62,13 @@ assert size >= 0 return size, self._next(), self._next() + def get_list_of_boxes(self): + size = self._next() + l = [] + for i in range(size): + l.append(self.next()) + return l + class TraceIterator(object): def __init__(self, trace, start, end, force_inputargs=None): self.trace = trace @@ -251,6 +263,11 @@ self._ops.append(self._encode(box)) # not tagged, as it must be boxes return pos + def record_list_of_boxes(self, boxes): + self._ops.append(len(boxes)) + for box in boxes: + self._ops.append(self._encode(box)) + def get_patchable_position(self): p = len(self._ops) if not we_are_translated(): diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -1619,8 +1619,8 @@ self.metainterp.vrefs_after_residual_call() vablebox = None if assembler_call: - vablebox, resbox = self.metainterp.direct_assembler_call(resbox, - assembler_call_jd, cut_pos) + vablebox, resbox = self.metainterp.direct_assembler_call( + self.metainterp._last_op, assembler_call_jd, cut_pos) if resbox and resbox.type != 'v': self.make_result_of_lastop(resbox) self.metainterp.vable_after_residual_call(funcbox) @@ -1893,6 +1893,7 @@ cancel_count = 0 exported_state = None last_exc_box = None + _last_op = None def __init__(self, staticdata, jitdriver_sd): self.staticdata = staticdata @@ -1949,7 +1950,7 @@ self.current_call_id += 1 if greenkey is not None and self.is_main_jitcode(jitcode): self.portal_trace_positions.append( - (jitcode.jitdriver_sd, greenkey, len(self.history.operations))) + (jitcode.jitdriver_sd, greenkey, self.history.get_trace_position())) if len(self.free_frames_list) > 0: f = self.free_frames_list.pop() else: @@ -1976,7 +1977,7 @@ self.call_ids.pop() if frame.greenkey is not None and self.is_main_jitcode(jitcode): self.portal_trace_positions.append( - (jitcode.jitdriver_sd, None, len(self.history.operations))) + (jitcode.jitdriver_sd, None, self.history.get_trace_position())) # we save the freed MIFrames to avoid needing to re-create new # MIFrame objects all the time; they are a bit big, with their # 3*256 register entries. @@ -2162,6 +2163,7 @@ profiler.count_ops(opnum, Counters.RECORDED_OPS) self.heapcache.invalidate_caches(opnum, descr, argboxes) op = self.history.record(opnum, argboxes, resvalue, descr) + self._last_op = op self.attach_debug_info(op) if op.type != 'v': return op @@ -2962,7 +2964,7 @@ else: jitdriver_sd, greenkey, startpos = start_stack.pop() warmstate = jitdriver_sd.warmstate - size = pos - startpos + size = pos[0] - startpos[0] if size > max_size: if warmstate is not None: r = warmstate.get_location_str(greenkey) @@ -2982,8 +2984,7 @@ max_jdsd = jitdriver_sd max_key = key if self.portal_trace_positions: # tests - self.staticdata.logger_ops.log_abort_loop(self.history.inputargs, - self.history.operations, + self.staticdata.logger_ops.log_abort_loop(self.history.trace, self.box_names_memo) debug_stop("jit-abort-longest-function") return max_jdsd, max_key @@ -3028,9 +3029,10 @@ opnum = OpHelpers.call_assembler_for_descr(op.getdescr()) oldop = op op = self.history.record_nospec(opnum, args, descr=token) - op.copy_value_from(oldop) if opnum == rop.CALL_ASSEMBLER_N: op = None + else: + op.copy_value_from(oldop) # # To fix an obscure issue, make sure the vable stays alive # longer than the CALL_ASSEMBLER operation. We do it by diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -110,7 +110,7 @@ return VecOperationNew(opnum, args, datatype, bytesize, signed, count, descr) def VecOperationNew(opnum, args, datatype, bytesize, signed, count, descr=None): - op = ResOperation(opnum, args, descr) + op = ResOperation(opnum, args, descr=descr) vecinfo = VectorizationInfo(None) vecinfo.setinfo(datatype, bytesize, signed) vecinfo.count = count diff --git a/rpython/jit/metainterp/resume.py b/rpython/jit/metainterp/resume.py --- a/rpython/jit/metainterp/resume.py +++ b/rpython/jit/metainterp/resume.py @@ -136,18 +136,15 @@ else: virtualizable_boxes = [] virtualref_boxes = virtualref_boxes[:] + pos = t.get_patchable_position() + t.record_list_of_boxes(virtualizable_boxes) + t.record_list_of_boxes(virtualref_boxes) if n >= 0: top = framestack[n] - pos = t.get_patchable_position() _ensure_parent_resumedata(framestack, n, t) t.record_snapshot(top.jitcode, top.pc, top.get_list_of_active_boxes(False)) - t.patch_position_to_current(pos) - else: - yyy - snapshot_storage.rd_frame_info_list = None - snapshot_storage.rd_snapshot = TopSnapshot(None, virtualref_boxes, - virtualizable_boxes) + t.patch_position_to_current(pos) return result PENDINGFIELDSTRUCT = lltype.Struct('PendingField', @@ -295,25 +292,16 @@ snapshot_iter = trace.get_snapshot_iter(position) state = NumberingState(snapshot_iter.length()) - state.append(rffi.cast(rffi.SHORT, 0)) - n = 0 # len(topsnapshot.boxes) + virtualizable_length = snapshot_iter._next() + + state.append(rffi.cast(rffi.SHORT, virtualizable_length)) + self._number_boxes(snapshot_iter, virtualizable_length, optimizer, state) + + n = snapshot_iter._next() assert not (n & 1) state.append(rffi.cast(rffi.SHORT, n >> 1)) - # - # XXX ignore vables and virtualrefs for now - #assert isinstance(topsnapshot, TopSnapshot) - #special_boxes_size = (1 + len(topsnapshot.vable_boxes) + - # 1 + len(topsnapshot.boxes)) - #assert state.position == special_boxes_size - #state.position = 0 - #state.append(rffi.cast(rffi.SHORT, len(topsnapshot.vable_boxes))) - #self._number_boxes(topsnapshot.vable_boxes, optimizer, state) - #n = len(topsnapshot.boxes) - #assert not (n & 1) - #state.append(rffi.cast(rffi.SHORT, n >> 1)) - #self._number_boxes(topsnapshot.boxes, optimizer, state) - #assert state.position == special_boxes_size + self._number_boxes(snapshot_iter, n, optimizer, state) while not snapshot_iter.done(): size, jitcode_index, pc = snapshot_iter.get_size_jitcode_pc() diff --git a/rpython/jit/metainterp/test/test_compile.py b/rpython/jit/metainterp/test/test_compile.py --- a/rpython/jit/metainterp/test/test_compile.py +++ b/rpython/jit/metainterp/test/test_compile.py @@ -31,6 +31,9 @@ def log_loop(self, inputargs, operations, number=0, type=None, ops_offset=None, name='', memo=None): pass + def log_loop_from_trace(self, *args, **kwds): + pass + def repr_of_resop(self, op): return repr(op) @@ -91,11 +94,13 @@ metainterp.staticdata = staticdata metainterp.cpu = cpu metainterp.history = History() - metainterp.history.operations = loop.operations[:-1] - metainterp.history.inputargs = loop.inputargs[:] + metainterp.history.set_inputargs(loop.inputargs[:]) + for op in loop.operations: + newop = metainterp.history.record_nospec(op.getopnum(), op.getarglist(), op.getdescr()) + op.position = newop.position # greenkey = 'faked' - target_token = compile_loop(metainterp, greenkey, 0, + target_token = compile_loop(metainterp, greenkey, (0, 0), loop.inputargs, loop.operations[-1].getarglist(), None) diff --git a/rpython/jit/metainterp/test/test_history.py b/rpython/jit/metainterp/test/test_history.py --- a/rpython/jit/metainterp/test/test_history.py +++ b/rpython/jit/metainterp/test/test_history.py @@ -62,17 +62,6 @@ assert c5.nonnull() assert c6.nonnull() -class TestHistoryEncoding(object): - def test_encode_basic(self): - history = History() - i0 = InputArgInt() - i1 = InputArgInt() - history.set_inputargs([i0, i1]) - history.record(rop.INT_ADD, [i0, i1], 13) - rh = history.get_recorded_history() - op = rh.get_next_op() - assert op.getopnum() == rop.INT_ADD - class TestZTranslated(StandaloneTests): def test_ztranslated_same_constant_float(self): def fn(args): diff --git a/rpython/jit/metainterp/test/test_jitiface.py b/rpython/jit/metainterp/test/test_jitiface.py --- a/rpython/jit/metainterp/test/test_jitiface.py +++ b/rpython/jit/metainterp/test/test_jitiface.py @@ -18,12 +18,12 @@ reasons = [] class MyJitIface(JitHookInterface): - def on_abort(self, reason, jitdriver, greenkey, greenkey_repr, logops, operations): + def on_abort(self, reason, jitdriver, greenkey, greenkey_repr, logops, trace): assert jitdriver is myjitdriver assert len(greenkey) == 1 reasons.append(reason) assert greenkey_repr == 'blah' - assert len(operations) > 1 + assert trace.length() > 1 iface = MyJitIface() diff --git a/rpython/jit/metainterp/test/test_opencoder.py b/rpython/jit/metainterp/test/test_opencoder.py --- a/rpython/jit/metainterp/test/test_opencoder.py +++ b/rpython/jit/metainterp/test/test_opencoder.py @@ -6,7 +6,7 @@ from rpython.jit.metainterp import resume from rpython.jit.metainterp.test.strategies import lists_of_operations from rpython.jit.metainterp.optimizeopt.test.test_util import BaseTest -from rpython.jit.metainterp.history import TreeLoop +from rpython.jit.metainterp.history import TreeLoop, AbstractDescr from hypothesis import given class JitCode(object): @@ -24,19 +24,19 @@ def get_list_of_active_boxes(self, flag): return self.boxes -def unpack_snapshot(t, pos): - trace = t.trace - first = trace._ops[pos] # this is the size - pos += 1 - boxes = [] - while first > pos + 1: - snapshot_size = trace._ops[pos] - # 2 for jitcode and pc - pos += 1 + 2 - boxes += [t._untag(trace._ops[i + pos]) for i in range(snapshot_size)] - pos += len(boxes) - return boxes - +def unpack_snapshot(t, op, pos): + op.framestack = [] + si = t.get_snapshot_iter(op.rd_resume_position) + virtualizables = si.get_list_of_boxes() + vref_boxes = si.get_list_of_boxes() + while not si.done(): + size, jitcode, pc = si.get_size_jitcode_pc() + boxes = [] + for i in range(size): + boxes.append(si.next()) + op.framestack.append(FakeFrame(JitCode(jitcode), pc, boxes)) + op.virtualizables = virtualizables + op.vref_boxes = vref_boxes class TestOpencoder(object): def unpack(self, t): @@ -45,14 +45,7 @@ while not iter.done(): op = iter.next() if op.is_guard(): - op.framestack = [] - si = iter.get_snapshot_iter(op.rd_resume_position) - while not si.done(): - size, jitcode, pc = si.get_size_jitcode_pc() - boxes = [] - for i in range(size): - boxes.append(si.next()) - op.framestack.append(FakeFrame(JitCode(jitcode), pc, boxes)) + unpack_snapshot(iter, op, op.rd_resume_position) l.append(op) return iter.inputargs, l, iter @@ -82,18 +75,17 @@ resume.capture_resumedata(framestack, None, [], t) (i0, i1), l, iter = self.unpack(t) assert l[1].opnum == rop.GUARD_FALSE - boxes = unpack_snapshot(iter, l[1].rd_resume_position) - assert boxes == [i0, i1] + assert l[1].framestack[0].boxes == [i0, i1] t.record_op(rop.GUARD_FALSE, [add]) resume.capture_resumedata([frame0, frame1], None, [], t) t.record_op(rop.INT_ADD, [add, add]) (i0, i1), l, iter = self.unpack(t) assert l[1].opnum == rop.GUARD_FALSE - boxes = unpack_snapshot(iter, l[1].rd_resume_position) - assert boxes == [i0, i1] + assert l[1].framestack[0].boxes == [i0, i1] assert l[2].opnum == rop.GUARD_FALSE - boxes = unpack_snapshot(iter, l[2].rd_resume_position) - assert boxes == [i0, i1, i0, i0, l[0]] + fstack = l[2].framestack + assert fstack[0].boxes == [i0, i1] + assert fstack[1].boxes == [i0, i0, l[0]] def test_read_snapshot_interface(self): i0, i1, i2 = InputArgInt(), InputArgInt(), InputArgInt() @@ -107,6 +99,8 @@ (i0, i1, i2), l, iter = self.unpack(t) pos = l[0].rd_resume_position snapshot_iter = iter.get_snapshot_iter(pos) + assert snapshot_iter.get_list_of_boxes() == [] + assert snapshot_iter.get_list_of_boxes() == [] size, jc_index, pc = snapshot_iter.get_size_jitcode_pc() assert size == 2 assert jc_index == 2 @@ -119,6 +113,8 @@ assert [snapshot_iter.next() for i in range(2)] == [i2, i2] pos = l[1].rd_resume_position snapshot_iter = iter.get_snapshot_iter(pos) + assert snapshot_iter.get_list_of_boxes() == [] + assert snapshot_iter.get_list_of_boxes() == [] size, jc_index, pc = snapshot_iter.get_size_jitcode_pc() assert size == 2 assert jc_index == 2 @@ -158,4 +154,18 @@ t2 = t.cut_trace_from(cut_point, [add1, i1]) (i0, i1), l, iter = self.unpack(t2) assert len(l) == 3 - assert l[0].getarglist() == [i0, i1] \ No newline at end of file + assert l[0].getarglist() == [i0, i1] + + def test_virtualizable_virtualref(self): + class SomeDescr(AbstractDescr): + pass + + i0, i1, i2 = InputArgInt(), InputArgInt(), InputArgInt() + t = Trace([i0, i1, i2]) + p0 = t.record_op(rop.NEW_WITH_VTABLE, [], descr=SomeDescr()) + t.record_op(rop.GUARD_TRUE, [i0]) + resume.capture_resumedata([], [i1, i2, p0], [p0, i1], t) + (i0, i1, i2), l, iter = self.unpack(t) + assert not l[1].framestack + assert l[1].virtualizables == [l[0], i1, i2] + assert l[1].vref_boxes == [l[0], i1] \ No newline at end of file diff --git a/rpython/jit/metainterp/test/test_pyjitpl.py b/rpython/jit/metainterp/test/test_pyjitpl.py --- a/rpython/jit/metainterp/test/test_pyjitpl.py +++ b/rpython/jit/metainterp/test/test_pyjitpl.py @@ -87,6 +87,7 @@ c3 = ConstInt(3) boxes = [b1, b2, b1, c3] dup = {} + metainterp.history.set_inputargs([b1, b2]) metainterp.remove_consts_and_duplicates(boxes, 4, dup) assert boxes[0] is b1 assert boxes[1] is b2 From pypy.commits at gmail.com Thu Mar 10 08:01:08 2016 From: pypy.commits at gmail.com (fijal) Date: Thu, 10 Mar 2016 05:01:08 -0800 (PST) Subject: [pypy-commit] pypy jit-leaner-frontend: fix what we pass to runtime boxes Message-ID: <56e17014.03dd1c0a.3a902.23d9@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r82936:b62dddb64712 Date: 2016-03-10 15:00 +0200 http://bitbucket.org/pypy/pypy/changeset/b62dddb64712/ Log: fix what we pass to runtime boxes diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -261,7 +261,7 @@ return compile_simple_loop(metainterp, greenkey, trace, enable_opts) call_pure_results = metainterp.call_pure_results - preamble_data = LoopCompileData(trace, inputargs, + preamble_data = LoopCompileData(trace, jumpargs, call_pure_results=call_pure_results, enable_opts=enable_opts) try: From pypy.commits at gmail.com Thu Mar 10 08:59:56 2016 From: pypy.commits at gmail.com (fijal) Date: Thu, 10 Mar 2016 05:59:56 -0800 (PST) Subject: [pypy-commit] pypy jit-leaner-frontend: fix fix fix fix Message-ID: <56e17ddc.857ac20a.8280b.33c4@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r82937:829e272ed683 Date: 2016-03-10 15:59 +0200 http://bitbucket.org/pypy/pypy/changeset/829e272ed683/ Log: fix fix fix fix diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py --- a/rpython/jit/backend/llsupport/assembler.py +++ b/rpython/jit/backend/llsupport/assembler.py @@ -331,7 +331,7 @@ counter = self._register_counter(tp, number, token) c_adr = ConstInt(rffi.cast(lltype.Signed, counter)) operations.append( - ResOperation(rop.INCREMENT_DEBUG_COUNTER, [c_adr], None)) + ResOperation(rop.INCREMENT_DEBUG_COUNTER, [c_adr])) def _register_counter(self, tp, number, token): # YYY very minor leak -- we need the counters to stay alive diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py +++ b/rpython/jit/backend/llsupport/rewrite.py @@ -361,8 +361,8 @@ # return True in cases where the operation and the following guard # should likely remain together. Simplified version of # can_merge_with_next_guard() in llsupport/regalloc.py. - if not op.is_comparison(): - return op.is_ovf() # int_xxx_ovf() / guard_no_overflow() + if not rop.is_comparison(op.opnum): + return rop.is_ovf(op.opnum) # int_xxx_ovf() / guard_no_overflow() if i + 1 >= len(operations): return False next_op = operations[i + 1] @@ -614,8 +614,7 @@ args = [frame, arglist[jd.index_of_virtualizable]] else: args = [frame] - call_asm = ResOperation(op.getopnum(), args, - op.getdescr()) + call_asm = ResOperation(op.getopnum(), args, descr=op.getdescr()) self.replace_op_with(self.get_box_replacement(op), call_asm) self.emit_op(call_asm) @@ -673,7 +672,7 @@ def _gen_call_malloc_gc(self, args, v_result, descr): """Generate a CALL_MALLOC_GC with the given args.""" self.emitting_an_operation_that_can_collect() - op = ResOperation(rop.CALL_MALLOC_GC, args, descr) + op = ResOperation(rop.CALL_MALLOC_GC, args, descr=descr) self.replace_op_with(v_result, op) self.emit_op(op) # In general, don't add v_result to write_barrier_applied: diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -351,6 +351,7 @@ metainterp.box_names_memo) except InvalidLoop: # Fall back on jumping directly to preamble + raise InvalidLoop xxxx jump_op = ResOperation(rop.JUMP, inputargs[:], descr=loop_jitcell_token) loop_data = UnrolledLoopData(end_label, jump_op, [jump_op], start_state, diff --git a/rpython/jit/metainterp/opencoder.py b/rpython/jit/metainterp/opencoder.py --- a/rpython/jit/metainterp/opencoder.py +++ b/rpython/jit/metainterp/opencoder.py @@ -9,7 +9,7 @@ from rpython.jit.metainterp.history import ConstInt, Const from rpython.jit.metainterp.resoperation import AbstractResOp, AbstractInputArg,\ - ResOperation, oparity, rop, opwithdescr + ResOperation, oparity, rop, opwithdescr, GuardResOp from rpython.rlib.rarithmetic import intmask from rpython.rlib.objectmodel import we_are_translated @@ -21,6 +21,9 @@ class Sentinel(object): pass +class BaseTrace(object): + pass + class SnapshotIterator(object): def __init__(self, main_iter, pos, end_pos): self.trace = main_iter.trace @@ -69,7 +72,7 @@ l.append(self.next()) return l -class TraceIterator(object): +class TraceIterator(BaseTrace): def __init__(self, trace, start, end, force_inputargs=None): self.trace = trace self._cache = [None] * trace._count @@ -78,10 +81,10 @@ arg in force_inputargs] self._inputargs = [None] * len(trace.inputargs) for i, arg in enumerate(force_inputargs): - if arg.position >= 0: - self._cache[arg.position] = self.inputargs[i] + if arg.get_position() >= 0: + self._cache[arg.get_position()] = self.inputargs[i] else: - self._inputargs[-arg.position-1] = self.inputargs[i] + self._inputargs[-arg.get_position()-1] = self.inputargs[i] else: self.inputargs = [rop.inputarg_from_tp(arg.type) for arg in self.trace.inputargs] @@ -115,7 +118,7 @@ elif tag == TAGCONST: return self.trace._consts[v] else: - yyyy + assert False def skip_resume_data(self): pos = self.pos @@ -145,14 +148,12 @@ descr = None res = ResOperation(opnum, args, -1, descr=descr) if rop.is_guard(opnum): + assert isinstance(res, GuardResOp) res.rd_resume_position = self.skip_resume_data() self._cache[self._count] = res self._count += 1 return res -class BaseTrace(object): - pass - class CutTrace(BaseTrace): def __init__(self, trace, start, count, inputargs): self.trace = trace @@ -172,6 +173,7 @@ self._descrs = [None] self._consts = [None] for i, inparg in enumerate(inputargs): + assert isinstance(inparg, AbstractInputArg) inparg.position = -i - 1 self._count = 0 self.inputargs = inputargs @@ -199,9 +201,9 @@ self._consts.append(box) return tag(TAGCONST, len(self._consts) - 1) elif isinstance(box, AbstractResOp): - return tag(TAGBOX, box.position) + return tag(TAGBOX, box.get_position()) elif isinstance(box, AbstractInputArg): - return tag(TAGBOX, box.position) + return tag(TAGBOX, box.get_position()) else: assert False, "unreachable code" diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -348,7 +348,7 @@ opnum == rop.COPYUNICODECONTENT): # no effect on GC struct/array return if op.is_call(): - if op.is_call_assembler(): + if rop.is_call_assembler(op.getopnum()): self._seen_guard_not_invalidated = False else: effectinfo = op.getdescr().get_extra_info() diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -553,7 +553,7 @@ @specialize.argtype(0) def _emit_operation(self, op): - assert not op.is_call_pure() + assert not rop.is_call_pure(op.getopnum()) orig_op = op op = self.get_box_replacement(op) if op.is_constant(): @@ -660,7 +660,7 @@ return self._really_emitted_operation def is_call_pure_pure_canraise(self, op): - if not op.is_call_pure(): + if not rop.is_call_pure(op.getopnum()): return False effectinfo = op.getdescr().get_extra_info() if effectinfo.check_can_raise(ignore_memoryerror=True): diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -1616,7 +1616,7 @@ resbox = None else: assert False - self.metainterp.vrefs_after_residual_call() + self.metainterp.vrefs_after_residual_call(self.metainterp._last_op, cut_pos) vablebox = None if assembler_call: vablebox, resbox = self.metainterp.direct_assembler_call( @@ -1907,7 +1907,7 @@ self.last_exc_value = lltype.nullptr(rclass.OBJECT) self.forced_virtualizable = None self.partial_trace = None - self.retracing_from = -1 + self.retracing_from = (-1, -1) self.call_pure_results = args_dict() self.heapcache = HeapCache() @@ -2770,7 +2770,7 @@ force_token], None, descr=vinfo.vable_token_descr) - def vrefs_after_residual_call(self): + def vrefs_after_residual_call(self, op, cut_pos): vrefinfo = self.staticdata.virtualref_info for i in range(0, len(self.virtualref_boxes), 2): vrefbox = self.virtualref_boxes[i+1] @@ -2780,7 +2780,7 @@ # during this CALL_MAY_FORCE. Mark this fact by # generating a VIRTUAL_REF_FINISH on it and replacing # it by ConstPtr(NULL). - self.stop_tracking_virtualref(i) + self.stop_tracking_virtualref(i, op, cut_pos) def vable_after_residual_call(self, funcbox): vinfo = self.jitdriver_sd.virtualizable_info @@ -2804,15 +2804,17 @@ # have the eventual exception raised (this is normally done # after the call to vable_after_residual_call()). - def stop_tracking_virtualref(self, i): + def stop_tracking_virtualref(self, i, op, cut_pos): virtualbox = self.virtualref_boxes[i] vrefbox = self.virtualref_boxes[i+1] # record VIRTUAL_REF_FINISH just before the current CALL_MAY_FORCE - call_may_force_op = self.history.operations.pop() - assert call_may_force_op.is_call_may_force() - self.history.record(rop.VIRTUAL_REF_FINISH, + self.history.cut(cut_pos) # pop the CALL + assert rop.is_call_may_force(op.getopnum()) + self.history.record_nospec(rop.VIRTUAL_REF_FINISH, [vrefbox, virtualbox], None) - self.history.operations.append(call_may_force_op) + newop = self.history.record_nospec(op.getopnum(), op.getarglist(), + op.getdescr()) + op.position = newop.position # mark by replacing it with ConstPtr(NULL) self.virtualref_boxes[i+1] = self.cpu.ts.CONST_NULL @@ -3018,7 +3020,7 @@ patching the CALL_MAY_FORCE that occurred just now. """ self.history.cut(cut_pos) - assert op.is_call_may_force() + assert rop.is_call_may_force(op.getopnum()) num_green_args = targetjitdriver_sd.num_green_args arglist = op.getarglist() greenargs = arglist[1:num_green_args+1] diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -232,12 +232,15 @@ class AbstractResOpOrInputArg(AbstractValue): - _attrs_ = ('_forwarded',) + _attrs_ = ('_forwarded', 'position') _forwarded = None # either another resop or OptInfo def get_forwarded(self): return self._forwarded + def get_position(self): + return self.position + def set_forwarded(self, forwarded_to): assert forwarded_to is not self self._forwarded = forwarded_to @@ -405,12 +408,11 @@ def can_raise(self): return rop.can_raise(self.getopnum()) - # XXX fix - def is_foldable_guard(self): - return rop._GUARD_FOLDABLE_FIRST <= self.getopnum() <= rop._GUARD_FOLDABLE_LAST + return rop.is_foldable_guard(self.getopnun()) def is_guard_exception(self): + return rop.is_guard_ return (self.getopnum() == rop.GUARD_EXCEPTION or self.getopnum() == rop.GUARD_NO_EXCEPTION) @@ -451,41 +453,6 @@ rop.GETARRAYITEM_GC_PURE_F, rop.GETARRAYITEM_GC_PURE_R) - @staticmethod - def is_real_call(opnum): - return (opnum == rop.CALL_I or - opnum == rop.CALL_R or - opnum == rop.CALL_F or - opnum == rop.CALL_N) - - def is_call_assembler(self): - opnum = self.opnum - return (opnum == rop.CALL_ASSEMBLER_I or - opnum == rop.CALL_ASSEMBLER_R or - opnum == rop.CALL_ASSEMBLER_N or - opnum == rop.CALL_ASSEMBLER_F) - - def is_call_may_force(self): - opnum = self.opnum - return (opnum == rop.CALL_MAY_FORCE_I or - opnum == rop.CALL_MAY_FORCE_R or - opnum == rop.CALL_MAY_FORCE_N or - opnum == rop.CALL_MAY_FORCE_F) - - def is_call_pure(self): - opnum = self.opnum - return (opnum == rop.CALL_PURE_I or - opnum == rop.CALL_PURE_R or - opnum == rop.CALL_PURE_N or - opnum == rop.CALL_PURE_F) - - def is_call_release_gil(self): - opnum = self.opnum - # no R returning call_release_gil - return (opnum == rop.CALL_RELEASE_GIL_I or - opnum == rop.CALL_RELEASE_GIL_F or - opnum == rop.CALL_RELEASE_GIL_N) - def is_vector_arithmetic(self): return rop._VEC_ARITHMETIC_FIRST <= self.getopnum() <= rop._VEC_ARITHMETIC_LAST @@ -1492,8 +1459,8 @@ opnum == rop.CALL_F or opnum == rop.CALL_N) - def is_call_assembler(self): - opnum = self.opnum + @staticmethod + def is_call_assembler(opnum): return (opnum == rop.CALL_ASSEMBLER_I or opnum == rop.CALL_ASSEMBLER_R or opnum == rop.CALL_ASSEMBLER_N or @@ -1513,8 +1480,8 @@ opnum == rop.CALL_PURE_N or opnum == rop.CALL_PURE_F) - def is_call_release_gil(self): - opnum = self.opnum + @staticmethod + def is_call_release_gil(opnum): # no R returning call_release_gil return (opnum == rop.CALL_RELEASE_GIL_I or opnum == rop.CALL_RELEASE_GIL_F or @@ -1580,6 +1547,26 @@ opnum == rop.CALL_LOOPINVARIANT_N) @staticmethod + def get_gc_load(tp): + if tp == 'i': + return rop.GC_LOAD_I + elif tp == 'f': + return rop.GC_LOAD_F + else: + assert tp == 'r' + return rop.GC_LOAD_R + + @staticmethod + def get_gc_load_indexed(tp): + if tp == 'i': + return rop.GC_LOAD_INDEXED_I + elif tp == 'f': + return rop.GC_LOAD_INDEXED_F + else: + assert tp == 'r' + return rop.GC_LOAD_INDEXED_R + + @staticmethod def inputarg_from_tp(tp): if tp == 'i': return InputArgInt() From pypy.commits at gmail.com Thu Mar 10 09:37:17 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 10 Mar 2016 06:37:17 -0800 (PST) Subject: [pypy-commit] pypy jit-leaner-frontend: rename MAXINT, fix its value, add a few extra checks Message-ID: <56e1869d.a3f6c20a.c5d46.3f7d@mx.google.com> Author: Armin Rigo Branch: jit-leaner-frontend Changeset: r82938:2e4ec6776425 Date: 2016-03-10 15:36 +0100 http://bitbucket.org/pypy/pypy/changeset/2e4ec6776425/ Log: rename MAXINT, fix its value, add a few extra checks diff --git a/rpython/jit/metainterp/opencoder.py b/rpython/jit/metainterp/opencoder.py --- a/rpython/jit/metainterp/opencoder.py +++ b/rpython/jit/metainterp/opencoder.py @@ -16,7 +16,7 @@ TAGINT, TAGCONST, TAGBOX = range(3) TAGMASK = 0x3 TAGSHIFT = 2 -MAXINT = 65536 +NUM_SMALL_INTS = 2 ** (16 - TAGSHIFT) class Sentinel(object): pass @@ -195,7 +195,7 @@ if isinstance(box, Const): if (isinstance(box, ConstInt) and isinstance(box.getint(), int) and # symbolics - 0 <= box.getint() < MAXINT): + 0 <= box.getint() < NUM_SMALL_INTS): return tag(TAGINT, box.getint()) else: self._consts.append(box) @@ -211,8 +211,11 @@ operations = self._ops pos = self._count operations.append(opnum) - if oparity[opnum] == -1: + expected_arity = oparity[opnum] + if expected_arity == -1: operations.append(len(argboxes)) + else: + assert len(argboxes) == expected_arity operations.extend([self._encode(box) for box in argboxes]) if opwithdescr[opnum]: if descr is None: @@ -226,8 +229,11 @@ operations = self._ops pos = self._count operations.append(opnum) - if oparity[opnum] == -1: + expected_arity = oparity[opnum] + if expected_arity == -1: operations.append(len(tagged_args)) + else: + assert len(argboxes) == expected_arity operations.extend(tagged_args) if tagged_descr != -1: operations.append(tagged_descr) From pypy.commits at gmail.com Thu Mar 10 09:41:58 2016 From: pypy.commits at gmail.com (fijal) Date: Thu, 10 Mar 2016 06:41:58 -0800 (PST) Subject: [pypy-commit] pypy jit-leaner-frontend: some rpythonization Message-ID: <56e187b6.418f1c0a.96723.4338@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r82939:7d0317450958 Date: 2016-03-10 16:41 +0200 http://bitbucket.org/pypy/pypy/changeset/7d0317450958/ Log: some rpythonization diff --git a/pypy/module/pypyjit/interp_resop.py b/pypy/module/pypyjit/interp_resop.py --- a/pypy/module/pypyjit/interp_resop.py +++ b/pypy/module/pypyjit/interp_resop.py @@ -249,7 +249,7 @@ ofs = debug_info.asminfo.ops_offset else: ofs = {} - ops = debug_info.operations + _, ops = debug_info.trace.unpack() self.w_ops = space.newlist(wrap_oplist(space, logops, ops, ofs)) else: self.w_ops = space.w_None diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -287,6 +287,7 @@ return None if ((warmstate.vec and jitdriver_sd.vec) or warmstate.vec_all): + assert False, "vectorization disabled" from rpython.jit.metainterp.optimizeopt.vector import optimize_vector loop_info, loop_ops = optimize_vector(metainterp_sd, jitdriver_sd, warmstate, diff --git a/rpython/jit/metainterp/opencoder.py b/rpython/jit/metainterp/opencoder.py --- a/rpython/jit/metainterp/opencoder.py +++ b/rpython/jit/metainterp/opencoder.py @@ -300,8 +300,6 @@ return TraceIterator(self, 0, len(self._ops)) def unpack(self): - """ NOT_RPYTHON - really rpython, but only use for debugging - """ iter = self.get_iter() ops = [] while not iter.done(): diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -2977,7 +2977,7 @@ if start_stack: jitdriver_sd, key, pos = start_stack[0] warmstate = jitdriver_sd.warmstate - size = len(self.history.operations) - pos + size = self.history.get_trace_position()[0] - pos[0] if size > max_size: if warmstate is not None: r = warmstate.get_location_str(key) From pypy.commits at gmail.com Thu Mar 10 09:42:10 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 10 Mar 2016 06:42:10 -0800 (PST) Subject: [pypy-commit] pypy jit-leaner-frontend: These two methods are not used and not tested Message-ID: <56e187c2.6672c20a.19861.49e5@mx.google.com> Author: Armin Rigo Branch: jit-leaner-frontend Changeset: r82940:d8fb92d44ac9 Date: 2016-03-10 15:41 +0100 http://bitbucket.org/pypy/pypy/changeset/d8fb92d44ac9/ Log: These two methods are not used and not tested diff --git a/rpython/jit/metainterp/opencoder.py b/rpython/jit/metainterp/opencoder.py --- a/rpython/jit/metainterp/opencoder.py +++ b/rpython/jit/metainterp/opencoder.py @@ -226,6 +226,7 @@ return pos def _record_raw(self, opnum, tagged_args, tagged_descr=-1): + NOT_USED operations = self._ops pos = self._count operations.append(opnum) @@ -260,6 +261,7 @@ return ResOperation(opnum, argboxes, pos, descr) def record_op_tag(self, opnum, tagged_args, descr=None): + NOT_USED return tag(TAGBOX, self._record_raw(opnum, tagged_args, descr)) def record_snapshot(self, jitcode, pc, active_boxes): From pypy.commits at gmail.com Thu Mar 10 09:43:20 2016 From: pypy.commits at gmail.com (plan_rich) Date: Thu, 10 Mar 2016 06:43:20 -0800 (PST) Subject: [pypy-commit] pypy default: merge the new s390x backend Message-ID: <56e18808.838d1c0a.6e01f.42d7@mx.google.com> Author: Richard Plangger Branch: Changeset: r82941:308a29538cda Date: 2016-03-10 15:40 +0100 http://bitbucket.org/pypy/pypy/changeset/308a29538cda/ Log: merge the new s390x backend diff too long, truncating to 2000 out of 11876 lines diff --git a/.hgignore b/.hgignore --- a/.hgignore +++ b/.hgignore @@ -74,5 +74,6 @@ ^rpython/doc/_build/.*$ ^compiled ^.git/ +^.hypothesis/ ^release/ ^rpython/_cache$ diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -5,3 +5,15 @@ .. this is a revision shortly after release-5.0 .. startrev: b238b48f9138 +.. branch: memop-simplify3 + +Simplification of zero_array. Start and end index are scaled using res ops (or cpu scaling) rather than doing it manually. + +.. branch: s390x-backend + +The jit compiler backend implementation for the s390x architecutre. + +.. branch: s390x-enhance-speed + +Refactoring to only store 64-bit values in the literal pool of the assembly. Generated machine code uses less space and runs faster. + diff --git a/pypy/module/__pypy__/test/test_signal.py b/pypy/module/__pypy__/test/test_signal.py --- a/pypy/module/__pypy__/test/test_signal.py +++ b/pypy/module/__pypy__/test/test_signal.py @@ -2,7 +2,6 @@ from pypy.module.thread.test.support import GenericTestThread - class AppTestMinimal: spaceconfig = dict(usemodules=['__pypy__']) diff --git a/pypy/module/_file/test/test_file_extra.py b/pypy/module/_file/test/test_file_extra.py --- a/pypy/module/_file/test/test_file_extra.py +++ b/pypy/module/_file/test/test_file_extra.py @@ -389,6 +389,7 @@ def test_writelines(self): import array + import sys fn = self.temptestfile with file(fn, 'w') as f: f.writelines(['abc']) @@ -406,7 +407,10 @@ exc = raises(TypeError, f.writelines, [memoryview('jkl')]) assert str(exc.value) == "writelines() argument must be a sequence of strings" out = open(fn, 'rb').readlines()[0] - assert out[0:5] == 'abcd\x00' + if sys.byteorder == 'big': + assert out[0:7] == 'abc\x00\x00\x00d' + else: + assert out[0:5] == 'abcd\x00' assert out[-3:] == 'ghi' with file(fn, 'wb') as f: diff --git a/pypy/module/_rawffi/callback.py b/pypy/module/_rawffi/callback.py --- a/pypy/module/_rawffi/callback.py +++ b/pypy/module/_rawffi/callback.py @@ -1,17 +1,23 @@ - +import sys from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef, GetSetProperty from rpython.rtyper.lltypesystem import lltype, rffi from pypy.module._rawffi.interp_rawffi import write_ptr from pypy.module._rawffi.structure import W_Structure from pypy.module._rawffi.interp_rawffi import (W_DataInstance, letter2tp, - unwrap_value, unpack_argshapes, got_libffi_error) + unwrap_value, unpack_argshapes, got_libffi_error, is_narrow_integer_type, + LL_TYPEMAP, NARROW_INTEGER_TYPES) from rpython.rlib.clibffi import USERDATA_P, CallbackFuncPtr, FUNCFLAG_CDECL from rpython.rlib.clibffi import ffi_type_void, LibFFIError from rpython.rlib import rweakref from pypy.module._rawffi.tracker import tracker from pypy.interpreter.error import OperationError from pypy.interpreter import gateway +from rpython.rlib.unroll import unrolling_iterable + +BIGENDIAN = sys.byteorder == 'big' + +unroll_narrow_integer_types = unrolling_iterable(NARROW_INTEGER_TYPES) app = gateway.applevel(''' def tbprint(tb, err): @@ -42,8 +48,17 @@ args_w[i] = space.wrap(rffi.cast(rffi.ULONG, ll_args[i])) w_res = space.call(w_callable, space.newtuple(args_w)) if callback_ptr.result is not None: # don't return void - unwrap_value(space, write_ptr, ll_res, 0, - callback_ptr.result, w_res) + ptr = ll_res + letter = callback_ptr.result + if BIGENDIAN: + # take care of narrow integers! + for int_type in unroll_narrow_integer_types: + if int_type == letter: + T = LL_TYPEMAP[int_type] + n = rffi.sizeof(lltype.Signed) - rffi.sizeof(T) + ptr = rffi.ptradd(ptr, n) + break + unwrap_value(space, write_ptr, ptr, 0, letter, w_res) except OperationError, e: tbprint(space, space.wrap(e.get_traceback()), space.wrap(e.errorstr(space))) diff --git a/pypy/module/_rawffi/interp_rawffi.py b/pypy/module/_rawffi/interp_rawffi.py --- a/pypy/module/_rawffi/interp_rawffi.py +++ b/pypy/module/_rawffi/interp_rawffi.py @@ -1,3 +1,4 @@ +import sys from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.error import OperationError, oefmt, wrap_oserror from pypy.interpreter.gateway import interp2app, unwrap_spec @@ -19,6 +20,8 @@ from pypy.module._rawffi.buffer import RawFFIBuffer from pypy.module._rawffi.tracker import tracker +BIGENDIAN = sys.byteorder == 'big' + TYPEMAP = { # XXX A mess with unsigned/signed/normal chars :-/ 'c' : ffi_type_uchar, @@ -331,10 +334,14 @@ if tracker.DO_TRACING: ll_buf = rffi.cast(lltype.Signed, self.ll_buffer) tracker.trace_allocation(ll_buf, self) + self._ll_buffer = self.ll_buffer def getbuffer(self, space): return space.wrap(rffi.cast(lltype.Unsigned, self.ll_buffer)) + def buffer_advance(self, n): + self.ll_buffer = rffi.ptradd(self.ll_buffer, n) + def byptr(self, space): from pypy.module._rawffi.array import ARRAY_OF_PTRS array = ARRAY_OF_PTRS.allocate(space, 1) @@ -342,16 +349,17 @@ return space.wrap(array) def free(self, space): - if not self.ll_buffer: + if not self._ll_buffer: raise segfault_exception(space, "freeing NULL pointer") self._free() def _free(self): if tracker.DO_TRACING: - ll_buf = rffi.cast(lltype.Signed, self.ll_buffer) + ll_buf = rffi.cast(lltype.Signed, self._ll_buffer) tracker.trace_free(ll_buf) - lltype.free(self.ll_buffer, flavor='raw') + lltype.free(self._ll_buffer, flavor='raw') self.ll_buffer = lltype.nullptr(rffi.VOIDP.TO) + self._ll_buffer = self.ll_buffer def buffer_w(self, space, flags): return RawFFIBuffer(self) @@ -432,12 +440,19 @@ space.wrap("cannot directly read value")) wrap_value._annspecialcase_ = 'specialize:arg(1)' +NARROW_INTEGER_TYPES = 'cbhiBIH?' + +def is_narrow_integer_type(letter): + return letter in NARROW_INTEGER_TYPES class W_FuncPtr(W_Root): def __init__(self, space, ptr, argshapes, resshape): self.ptr = ptr self.argshapes = argshapes self.resshape = resshape + self.narrow_integer = False + if resshape is not None: + self.narrow_integer = is_narrow_integer_type(resshape.itemcode.lower()) def getbuffer(self, space): return space.wrap(rffi.cast(lltype.Unsigned, self.ptr.funcsym)) @@ -497,6 +512,10 @@ result = self.resshape.allocate(space, 1, autofree=True) # adjust_return_size() was used here on result.ll_buffer self.ptr.call(args_ll, result.ll_buffer) + if BIGENDIAN and self.narrow_integer: + # we get a 8 byte value in big endian + n = rffi.sizeof(lltype.Signed) - result.shape.size + result.buffer_advance(n) return space.wrap(result) else: self.ptr.call(args_ll, lltype.nullptr(rffi.VOIDP.TO)) diff --git a/pypy/module/_rawffi/structure.py b/pypy/module/_rawffi/structure.py --- a/pypy/module/_rawffi/structure.py +++ b/pypy/module/_rawffi/structure.py @@ -18,6 +18,9 @@ from rpython.rlib.rarithmetic import intmask, signedtype, r_uint, \ r_ulonglong from rpython.rtyper.lltypesystem import lltype, rffi +import sys + +IS_BIG_ENDIAN = sys.byteorder == 'big' @@ -114,20 +117,32 @@ size += intmask(fieldsize) bitsizes.append(fieldsize) elif field_type == NEW_BITFIELD: - bitsizes.append((bitsize << 16) + bitoffset) + if IS_BIG_ENDIAN: + off = last_size - bitoffset - bitsize + else: + off = bitoffset + bitsizes.append((bitsize << 16) + off) bitoffset = bitsize size = round_up(size, fieldalignment) pos.append(size) size += fieldsize elif field_type == CONT_BITFIELD: - bitsizes.append((bitsize << 16) + bitoffset) + if IS_BIG_ENDIAN: + off = last_size - bitoffset - bitsize + else: + off = bitoffset + bitsizes.append((bitsize << 16) + off) bitoffset += bitsize # offset is already updated for the NEXT field pos.append(size - fieldsize) elif field_type == EXPAND_BITFIELD: size += fieldsize - last_size / 8 last_size = fieldsize * 8 - bitsizes.append((bitsize << 16) + bitoffset) + if IS_BIG_ENDIAN: + off = last_size - bitoffset - bitsize + else: + off = bitoffset + bitsizes.append((bitsize << 16) + off) bitoffset += bitsize # offset is already updated for the NEXT field pos.append(size - fieldsize) diff --git a/pypy/module/_rawffi/test/test__rawffi.py b/pypy/module/_rawffi/test/test__rawffi.py --- a/pypy/module/_rawffi/test/test__rawffi.py +++ b/pypy/module/_rawffi/test/test__rawffi.py @@ -704,7 +704,6 @@ def compare(a, b): a1 = _rawffi.Array('i').fromaddress(_rawffi.Array('P').fromaddress(a, 1)[0], 1) a2 = _rawffi.Array('i').fromaddress(_rawffi.Array('P').fromaddress(b, 1)[0], 1) - print "comparing", a1[0], "with", a2[0] if a1[0] not in [1,2,3,4] or a2[0] not in [1,2,3,4]: bogus_args.append((a1[0], a2[0])) if a1[0] > a2[0]: @@ -715,7 +714,7 @@ a2[0] = len(ll_to_sort) a3 = _rawffi.Array('l')(1) a3[0] = struct.calcsize('i') - cb = _rawffi.CallbackPtr(compare, ['P', 'P'], 'i') + cb = _rawffi.CallbackPtr(compare, ['P', 'P'], 'l') a4 = cb.byptr() qsort(a1, a2, a3, a4) res = [ll_to_sort[i] for i in range(len(ll_to_sort))] @@ -896,11 +895,21 @@ b = _rawffi.Array('c').fromaddress(a.buffer, 38) if sys.maxunicode > 65535: # UCS4 build - assert b[0] == 'x' - assert b[1] == '\x00' - assert b[2] == '\x00' - assert b[3] == '\x00' - assert b[4] == 'y' + if sys.byteorder == 'big': + assert b[0] == '\x00' + assert b[1] == '\x00' + assert b[2] == '\x00' + assert b[3] == 'x' + assert b[4] == '\x00' + assert b[5] == '\x00' + assert b[6] == '\x00' + assert b[7] == 'y' + else: + assert b[0] == 'x' + assert b[1] == '\x00' + assert b[2] == '\x00' + assert b[3] == '\x00' + assert b[4] == 'y' else: # UCS2 build assert b[0] == 'x' diff --git a/pypy/module/_rawffi/test/test_struct.py b/pypy/module/_rawffi/test/test_struct.py --- a/pypy/module/_rawffi/test/test_struct.py +++ b/pypy/module/_rawffi/test/test_struct.py @@ -1,4 +1,4 @@ - +import sys from pypy.module._rawffi.structure import size_alignment_pos from pypy.module._rawffi.interp_rawffi import TYPEMAP, letter2tp @@ -63,4 +63,7 @@ for (name, t, size) in fields]) assert size == 8 assert pos == [0, 0, 0] - assert bitsizes == [0x10000, 0x3e0001, 0x1003f] + if sys.byteorder == 'little': + assert bitsizes == [0x10000, 0x3e0001, 0x1003f] + else: + assert bitsizes == [0x1003f, 0x3e0001, 0x10000] diff --git a/pypy/module/_vmprof/conftest.py b/pypy/module/_vmprof/conftest.py new file mode 100644 --- /dev/null +++ b/pypy/module/_vmprof/conftest.py @@ -0,0 +1,6 @@ +import py, os + +def pytest_collect_directory(path, parent): + if os.uname()[4] == 's390x': + py.test.skip("zarch tests skipped") +pytest_collect_file = pytest_collect_directory diff --git a/pypy/module/cppyy/src/dummy_backend.cxx b/pypy/module/cppyy/src/dummy_backend.cxx --- a/pypy/module/cppyy/src/dummy_backend.cxx +++ b/pypy/module/cppyy/src/dummy_backend.cxx @@ -390,7 +390,7 @@ ((dummy::cppyy_test_data*)self)->destroy_arrays(); } else if (idx == s_methods["cppyy_test_data::set_bool"]) { assert(self && nargs == 1); - ((dummy::cppyy_test_data*)self)->set_bool((bool)((CPPYY_G__value*)args)[0].obj.in); + ((dummy::cppyy_test_data*)self)->set_bool((bool)((CPPYY_G__value*)args)[0].obj.i); } else if (idx == s_methods["cppyy_test_data::set_char"]) { assert(self && nargs == 1); ((dummy::cppyy_test_data*)self)->set_char(((CPPYY_G__value*)args)[0].obj.ch); diff --git a/pypy/module/cpyext/test/test_arraymodule.py b/pypy/module/cpyext/test/test_arraymodule.py --- a/pypy/module/cpyext/test/test_arraymodule.py +++ b/pypy/module/cpyext/test/test_arraymodule.py @@ -51,13 +51,19 @@ assert arr.tolist() == [1, 23, 4] def test_buffer(self): + import sys module = self.import_module(name='array') arr = module.array('i', [1,2,3,4]) buf = buffer(arr) exc = raises(TypeError, "buf[1] = '1'") assert str(exc.value) == "buffer is read-only" - # XXX big-endian - assert str(buf) == ('\x01\0\0\0' - '\x02\0\0\0' - '\x03\0\0\0' - '\x04\0\0\0') + if sys.byteorder == 'big': + assert str(buf) == ('\0\0\0\x01' + '\0\0\0\x02' + '\0\0\0\x03' + '\0\0\0\x04') + else: + assert str(buf) == ('\x01\0\0\0' + '\x02\0\0\0' + '\x03\0\0\0' + '\x04\0\0\0') diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -606,7 +606,7 @@ long intval; PyObject *name; - if (!PyArg_ParseTuple(args, "i", &intval)) + if (!PyArg_ParseTuple(args, "l", &intval)) return NULL; IntLike_Type.tp_as_number = &intlike_as_number; diff --git a/pypy/module/cpyext/test/test_unicodeobject.py b/pypy/module/cpyext/test/test_unicodeobject.py --- a/pypy/module/cpyext/test/test_unicodeobject.py +++ b/pypy/module/cpyext/test/test_unicodeobject.py @@ -386,11 +386,11 @@ lltype.free(pendian, flavor='raw') test("\x61\x00\x62\x00\x63\x00\x64\x00", -1) - - test("\x61\x00\x62\x00\x63\x00\x64\x00", None) - + if sys.byteorder == 'big': + test("\x00\x61\x00\x62\x00\x63\x00\x64", None) + else: + test("\x61\x00\x62\x00\x63\x00\x64\x00", None) test("\x00\x61\x00\x62\x00\x63\x00\x64", 1) - test("\xFE\xFF\x00\x61\x00\x62\x00\x63\x00\x64", 0, 1) test("\xFF\xFE\x61\x00\x62\x00\x63\x00\x64\x00", 0, -1) @@ -423,7 +423,10 @@ test("\x61\x00\x00\x00\x62\x00\x00\x00", -1) - test("\x61\x00\x00\x00\x62\x00\x00\x00", None) + if sys.byteorder == 'big': + test("\x00\x00\x00\x61\x00\x00\x00\x62", None) + else: + test("\x61\x00\x00\x00\x62\x00\x00\x00", None) test("\x00\x00\x00\x61\x00\x00\x00\x62", 1) diff --git a/pypy/module/marshal/test/test_marshalimpl.py b/pypy/module/marshal/test/test_marshalimpl.py --- a/pypy/module/marshal/test/test_marshalimpl.py +++ b/pypy/module/marshal/test/test_marshalimpl.py @@ -64,14 +64,17 @@ import marshal, struct class FakeM: + # NOTE: marshal is platform independent, running this test must assume + # that self.seen gets values from the endianess of the marshal module. + # (which is little endian!) def __init__(self): self.seen = [] def start(self, code): self.seen.append(code) def put_int(self, value): - self.seen.append(struct.pack("i", value)) + self.seen.append(struct.pack("i4'), ('y', '>f4')]" in repr(a) + else: + assert "[('x', 'i4" + E = '<' if sys.byteorder == 'little' else '>' + b = np.dtype((xyz, [("col1", E+"i4"), ("col2", E+"i4"), ("col3", E+"i4")])) data = [(1, 2,3), (4, 5, 6)] a = np.array(data, dtype=b) x = pickle.loads(pickle.dumps(a)) @@ -423,18 +429,20 @@ assert hash(t5) != hash(t6) def test_pickle(self): + import sys import numpy as np from numpy import array, dtype from cPickle import loads, dumps a = array([1,2,3]) + E = '<' if sys.byteorder == 'little' else '>' if self.ptr_size == 8: - assert a.dtype.__reduce__() == (dtype, ('i8', 0, 1), (3, '<', None, None, None, -1, -1, 0)) + assert a.dtype.__reduce__() == (dtype, ('i8', 0, 1), (3, E, None, None, None, -1, -1, 0)) else: - assert a.dtype.__reduce__() == (dtype, ('i4', 0, 1), (3, '<', None, None, None, -1, -1, 0)) + assert a.dtype.__reduce__() == (dtype, ('i4', 0, 1), (3, E, None, None, None, -1, -1, 0)) assert loads(dumps(a.dtype)) == a.dtype assert np.dtype('bool').__reduce__() == (dtype, ('b1', 0, 1), (3, '|', None, None, None, -1, -1, 0)) assert np.dtype('|V16').__reduce__() == (dtype, ('V16', 0, 1), (3, '|', None, None, None, 16, 1, 0)) - assert np.dtype(('')+'U7' assert dtype([('', 'f8')]).str == "|V8" assert dtype(('f8', 2)).str == "|V16" @@ -968,8 +978,12 @@ def test_isnative(self): from numpy import dtype + import sys assert dtype('i4').isnative == True - assert dtype('>i8').isnative == False + if sys.byteorder == 'big': + assert dtype('i8').isnative == False def test_any_all_nonzero(self): import numpy @@ -1185,6 +1199,7 @@ def test_setstate(self): import numpy as np import sys + E = '<' if sys.byteorder == 'little' else '>' d = np.dtype('f8') d.__setstate__((3, '|', (np.dtype('float64'), (2,)), None, None, 20, 1, 0)) assert d.str == ('<' if sys.byteorder == 'little' else '>') + 'f8' @@ -1201,7 +1216,7 @@ assert d.shape == (2,) assert d.itemsize == 8 assert d.subdtype is not None - assert repr(d) == "dtype(('' + assert str(dt) == "{'names':['f0','f1'], 'formats':['%si4','u1'], 'offsets':[0,4], 'itemsize':8, 'aligned':True}" % E dt = np.dtype([('f1', 'u1'), ('f0', 'i4')], align=True) - assert str(dt) == "{'names':['f1','f0'], 'formats':['u1',' 2 ** 31 - 1: - assert (u == [1]).all() + if sys.byteorder == 'big': + assert (u == [0x0100000000000000]).all() + else: + assert (u == [1]).all() else: - assert (u == [1, 0]).all() + if sys.byteorder == 'big': + assert (u == [0x01000000, 0]).all() + else: + assert (u == [1, 0]).all() v = fromstring("abcd", dtype="|S2") assert v[0] == "ab" assert v[1] == "cd" @@ -3668,9 +3718,15 @@ k = fromstring(self.float16val, dtype='float16') assert k[0] == dtype('float16').type(5.) dt = array([5], dtype='longfloat').dtype + print(dt.itemsize) if dt.itemsize == 8: - m = fromstring('\x00\x00\x00\x00\x00\x00\x14@', - dtype='float64') + import sys + if sys.byteorder == 'big': + m = fromstring('@\x14\x00\x00\x00\x00\x00\x00', + dtype='float64') + else: + m = fromstring('\x00\x00\x00\x00\x00\x00\x14@', + dtype='float64') elif dt.itemsize == 12: m = fromstring('\x00\x00\x00\x00\x00\x00\x00\xa0\x01@\x00\x00', dtype='float96') @@ -3692,8 +3748,13 @@ def test_tostring(self): from numpy import array - assert array([1, 2, 3], 'i2').tostring() == '\x01\x00\x02\x00\x03\x00' - assert array([1, 2, 3], 'i2')[::2].tostring() == '\x01\x00\x03\x00' + import sys + if sys.byteorder == 'big': + assert array([1, 2, 3], 'i2').tostring() == '\x00\x01\x00\x02\x00\x03' + assert array([1, 2, 3], 'i2')[::2].tostring() == '\x00\x01\x00\x03' + else: + assert array([1, 2, 3], 'i2').tostring() == '\x01\x00\x02\x00\x03\x00' + assert array([1, 2, 3], 'i2')[::2].tostring() == '\x01\x00\x03\x00' assert array([1, 2, 3], 'i2')[::2].tostring() == '\x00\x01\x00\x03' assert array(0, dtype='i2').tostring() == '\x00\x00' @@ -4189,7 +4250,11 @@ v = a.view(('float32', 4)) assert v.dtype == np.dtype('float32') assert v.shape == (10, 4) - assert v[0][-1] == 2.53125 + import sys + if sys.byteorder == 'big': + assert v[0][-2] == 2.53125 + else: + assert v[0][-1] == 2.53125 exc = raises(ValueError, "a.view(('float32', 2))") assert exc.value[0] == 'new type not compatible with array.' diff --git a/pypy/module/micronumpy/test/test_scalar.py b/pypy/module/micronumpy/test/test_scalar.py --- a/pypy/module/micronumpy/test/test_scalar.py +++ b/pypy/module/micronumpy/test/test_scalar.py @@ -109,6 +109,7 @@ def test_pickle(self): from numpy import dtype, zeros + import sys try: from numpy.core.multiarray import scalar except ImportError: @@ -119,9 +120,11 @@ f = dtype('float64').type(13.37) c = dtype('complex128').type(13 + 37.j) - assert i.__reduce__() == (scalar, (dtype('int32'), '9\x05\x00\x00')) - assert f.__reduce__() == (scalar, (dtype('float64'), '=\n\xd7\xa3p\xbd*@')) - assert c.__reduce__() == (scalar, (dtype('complex128'), '\x00\x00\x00\x00\x00\x00*@\x00\x00\x00\x00\x00\x80B@')) + swap = lambda s: (''.join(reversed(s))) if sys.byteorder == 'big' else s + assert i.__reduce__() == (scalar, (dtype('int32'), swap('9\x05\x00\x00'))) + assert f.__reduce__() == (scalar, (dtype('float64'), swap('=\n\xd7\xa3p\xbd*@'))) + assert c.__reduce__() == (scalar, (dtype('complex128'), swap('\x00\x00\x00\x00\x00\x00*@') + \ + swap('\x00\x00\x00\x00\x00\x80B@'))) assert loads(dumps(i)) == i assert loads(dumps(f)) == f @@ -256,13 +259,20 @@ assert t < 7e-323 t = s.view('complex64') assert type(t) is np.complex64 - assert 0 < t.real < 1 - assert t.imag == 0 + if sys.byteorder == 'big': + assert 0 < t.imag < 1 + assert t.real == 0 + else: + assert 0 < t.real < 1 + assert t.imag == 0 exc = raises(TypeError, s.view, 'string') assert exc.value[0] == "data-type must not be 0-sized" t = s.view('S8') assert type(t) is np.string_ - assert t == '\x0c' + if sys.byteorder == 'big': + assert t == '\x00' * 7 + '\x0c' + else: + assert t == '\x0c' s = np.dtype('string').type('abc1') assert s.view('S4') == 'abc1' if '__pypy__' in sys.builtin_module_names: diff --git a/pypy/module/micronumpy/test/test_selection.py b/pypy/module/micronumpy/test/test_selection.py --- a/pypy/module/micronumpy/test/test_selection.py +++ b/pypy/module/micronumpy/test/test_selection.py @@ -327,10 +327,15 @@ # tests from numpy/core/tests/test_regression.py def test_sort_bigendian(self): from numpy import array, dtype - a = array(range(11), dtype='float64') - c = a.astype(dtype('' D.__module__ = 'mod' mod = new.module('mod') mod.D = D @@ -510,7 +511,7 @@ tp9 Rp10 (I3 - S'<' + S'{E}' p11 NNNI-1 I-1 @@ -520,7 +521,7 @@ S'\x00\x00\x00\x00\x00\x00\xf0?\x00\x00\x00\x00\x00\x00\x00@' p13 tp14 - b.'''.replace(' ','') + b.'''.replace(' ','').format(E=E) for ss,sn in zip(s.split('\n')[1:],s_from_numpy.split('\n')[1:]): if len(ss)>10: # ignore binary data, it will be checked later diff --git a/pypy/module/pypyjit/test_pypy_c/test_buffers.py b/pypy/module/pypyjit/test_pypy_c/test_buffers.py --- a/pypy/module/pypyjit/test_pypy_c/test_buffers.py +++ b/pypy/module/pypyjit/test_pypy_c/test_buffers.py @@ -34,7 +34,7 @@ i = 0 while i < n: i += 1 - struct.unpack('i', a) # ID: unpack + struct.unpack('') + else: + bit = ord('<') assert loop.match(""" guard_class(p1, #, descr=...) p4 = getfield_gc_r(p1, descr=) @@ -109,7 +113,7 @@ i9 = getfield_gc_i(p4, descr=) i10 = getfield_gc_i(p6, descr=) i12 = int_eq(i10, 61) - i14 = int_eq(i10, 60) + i14 = int_eq(i10, %d) i15 = int_or(i12, i14) f16 = raw_load_f(i9, i5, descr=) guard_true(i15, descr=...) @@ -142,7 +146,7 @@ setfield_gc(p34, i30, descr=) }}} jump(..., descr=...) - """) + """ % (bit,)) def test_reduce_logical_and(self): def main(): diff --git a/pypy/module/pypyjit/test_pypy_c/test_struct.py b/pypy/module/pypyjit/test_pypy_c/test_struct.py --- a/pypy/module/pypyjit/test_pypy_c/test_struct.py +++ b/pypy/module/pypyjit/test_pypy_c/test_struct.py @@ -19,8 +19,8 @@ import struct i = 1 while i < n: - buf = struct.pack("i", i) # ID: pack - x = struct.unpack("i", buf)[0] # ID: unpack + buf = struct.pack(" 3.0.+). +CPython should be version 2.7.+. diff --git a/rpython/jit/backend/arm/locations.py b/rpython/jit/backend/arm/locations.py --- a/rpython/jit/backend/arm/locations.py +++ b/rpython/jit/backend/arm/locations.py @@ -1,7 +1,6 @@ from rpython.jit.metainterp.history import INT, FLOAT from rpython.jit.backend.arm.arch import WORD, DOUBLE_WORD, JITFRAME_FIXED_SIZE - class AssemblerLocation(object): _immutable_ = True type = INT diff --git a/rpython/jit/backend/arm/opassembler.py b/rpython/jit/backend/arm/opassembler.py --- a/rpython/jit/backend/arm/opassembler.py +++ b/rpython/jit/backend/arm/opassembler.py @@ -1143,35 +1143,42 @@ def emit_op_zero_array(self, op, arglocs, regalloc, fcond): from rpython.jit.backend.llsupport.descr import unpack_arraydescr assert len(arglocs) == 0 - length_box = op.getarg(2) - if isinstance(length_box, ConstInt) and length_box.getint() == 0: + size_box = op.getarg(2) + if isinstance(size_box, ConstInt) and size_box.getint() == 0: return fcond # nothing to do itemsize, baseofs, _ = unpack_arraydescr(op.getdescr()) args = op.getarglist() + # + # ZERO_ARRAY(base_loc, start, size, 1, 1) + # 'start' and 'size' are both expressed in bytes, + # and the two scaling arguments should always be ConstInt(1) on ARM. + assert args[3].getint() == 1 + assert args[4].getint() == 1 + # base_loc = regalloc.rm.make_sure_var_in_reg(args[0], args) - sibox = args[1] - if isinstance(sibox, ConstInt): - startindex_loc = None - startindex = sibox.getint() - assert startindex >= 0 + startbyte_box = args[1] + if isinstance(startbyte_box, ConstInt): + startbyte_loc = None + startbyte = startbyte_box.getint() + assert startbyte >= 0 else: - startindex_loc = regalloc.rm.make_sure_var_in_reg(sibox, args) - startindex = -1 + startbyte_loc = regalloc.rm.make_sure_var_in_reg(startbyte_box, + args) + startbyte = -1 - # base_loc and startindex_loc are in two regs here (or they are - # immediates). Compute the dstaddr_loc, which is the raw + # base_loc and startbyte_loc are in two regs here (or startbyte_loc + # is an immediate). Compute the dstaddr_loc, which is the raw # address that we will pass as first argument to memset(). # It can be in the same register as either one, but not in # args[2], because we're still needing the latter. dstaddr_box = TempVar() dstaddr_loc = regalloc.rm.force_allocate_reg(dstaddr_box, [args[2]]) - if startindex >= 0: # a constant - ofs = baseofs + startindex * itemsize + if startbyte >= 0: # a constant + ofs = baseofs + startbyte reg = base_loc.value else: - self.mc.gen_load_int(r.ip.value, itemsize) - self.mc.MLA(dstaddr_loc.value, r.ip.value, - startindex_loc.value, base_loc.value) + self.mc.ADD_rr(dstaddr_loc.value, + base_loc.value, startbyte_loc.value) ofs = baseofs reg = dstaddr_loc.value if check_imm_arg(ofs): @@ -1180,20 +1187,27 @@ self.mc.gen_load_int(r.ip.value, ofs) self.mc.ADD_rr(dstaddr_loc.value, reg, r.ip.value) - if (isinstance(length_box, ConstInt) and - length_box.getint() <= 14 and # same limit as GCC - itemsize in (4, 2, 1)): + # We use STRB, STRH or STR based on whether we know the array + # item size is a multiple of 1, 2 or 4. + if itemsize & 1: itemsize = 1 + elif itemsize & 2: itemsize = 2 + else: itemsize = 4 + limit = itemsize + next_group = -1 + if itemsize < 4 and startbyte >= 0: + # we optimize STRB/STRH into STR, but this needs care: + # it only works if startindex_loc is a constant, otherwise + # we'd be doing unaligned accesses. + next_group = (-startbyte) & 3 + limit = 4 + + if (isinstance(size_box, ConstInt) and + size_box.getint() <= 14 * limit): # same limit as GCC # Inline a series of STR operations, starting at 'dstaddr_loc'. - next_group = -1 - if itemsize < 4 and startindex >= 0: - # we optimize STRB/STRH into STR, but this needs care: - # it only works if startindex_loc is a constant, otherwise - # we'd be doing unaligned accesses. - next_group = (-startindex * itemsize) & 3 # self.mc.gen_load_int(r.ip.value, 0) i = 0 - total_size = length_box.getint() * itemsize + total_size = size_box.getint() while i < total_size: sz = itemsize if i == next_group: @@ -1209,29 +1223,18 @@ i += sz else: - if isinstance(length_box, ConstInt): - length_loc = imm(length_box.getint() * itemsize) + if isinstance(size_box, ConstInt): + size_loc = imm(size_box.getint()) else: - # load length_loc in a register different than dstaddr_loc - length_loc = regalloc.rm.make_sure_var_in_reg(length_box, - [dstaddr_box]) - if itemsize > 1: - # we need a register that is different from dstaddr_loc, - # but which can be identical to length_loc (as usual, - # only if the length_box is not used by future operations) - bytes_box = TempVar() - bytes_loc = regalloc.rm.force_allocate_reg(bytes_box, - [dstaddr_box]) - self.mc.gen_load_int(r.ip.value, itemsize) - self.mc.MUL(bytes_loc.value, r.ip.value, length_loc.value) - length_box = bytes_box - length_loc = bytes_loc + # load size_loc in a register different than dstaddr_loc + size_loc = regalloc.rm.make_sure_var_in_reg(size_box, + [dstaddr_box]) # # call memset() regalloc.before_call() self.simple_call_no_collect(imm(self.memset_addr), - [dstaddr_loc, imm(0), length_loc]) - regalloc.rm.possibly_free_var(length_box) + [dstaddr_loc, imm(0), size_loc]) + regalloc.rm.possibly_free_var(size_box) regalloc.rm.possibly_free_var(dstaddr_box) return fcond diff --git a/rpython/jit/backend/detect_cpu.py b/rpython/jit/backend/detect_cpu.py --- a/rpython/jit/backend/detect_cpu.py +++ b/rpython/jit/backend/detect_cpu.py @@ -16,6 +16,7 @@ MODEL_X86_64_SSE4 = 'x86-64-sse4' MODEL_ARM = 'arm' MODEL_PPC_64 = 'ppc-64' +MODEL_S390_64 = 's390x' # don't use '_' in the model strings; they are replaced by '-' @@ -27,6 +28,7 @@ MODEL_ARM: ['__arm__', '__thumb__','_M_ARM_EP'], MODEL_X86: ['i386', '__i386', '__i386__', '__i686__','_M_IX86'], MODEL_PPC_64: ['__powerpc64__'], + MODEL_S390_64:['__s390x__'], } for k, v in mapping.iteritems(): for macro in v: @@ -67,6 +69,7 @@ 'armv7l': MODEL_ARM, 'armv6l': MODEL_ARM, 'arm': MODEL_ARM, # freebsd + 's390x': MODEL_S390_64 }.get(mach) if result is None: @@ -88,7 +91,6 @@ if feature.detect_x32_mode(): raise ProcessorAutodetectError( 'JITting in x32 mode is not implemented') - # if result.startswith('arm'): from rpython.jit.backend.arm.detect import detect_float @@ -122,6 +124,8 @@ return "rpython.jit.backend.arm.runner", "CPU_ARM" elif backend_name == MODEL_PPC_64: return "rpython.jit.backend.ppc.runner", "PPC_CPU" + elif backend_name == MODEL_S390_64: + return "rpython.jit.backend.zarch.runner", "CPU_S390_64" else: raise ProcessorAutodetectError, ( "we have no JIT backend for this cpu: '%s'" % backend_name) @@ -142,6 +146,7 @@ MODEL_X86_64_SSE4: ['floats', 'singlefloats'], MODEL_ARM: ['floats', 'singlefloats', 'longlong'], MODEL_PPC_64: [], # we don't even have PPC directory, so no + MODEL_S390_64: ['floats'], }[backend_name] if __name__ == '__main__': diff --git a/rpython/jit/backend/llsupport/gc.py b/rpython/jit/backend/llsupport/gc.py --- a/rpython/jit/backend/llsupport/gc.py +++ b/rpython/jit/backend/llsupport/gc.py @@ -164,13 +164,11 @@ array_index = moving_obj_tracker.get_array_index(v) size, offset, _ = unpack_arraydescr(moving_obj_tracker.ptr_array_descr) - scale = size + array_index = array_index * size + offset args = [moving_obj_tracker.const_ptr_gcref_array, ConstInt(array_index), - ConstInt(scale), - ConstInt(offset), ConstInt(size)] - load_op = ResOperation(rop.GC_LOAD_INDEXED_R, args) + load_op = ResOperation(rop.GC_LOAD_R, args) newops.append(load_op) op.setarg(arg_i, load_op) # diff --git a/rpython/jit/backend/llsupport/jump.py b/rpython/jit/backend/llsupport/jump.py new file mode 100644 --- /dev/null +++ b/rpython/jit/backend/llsupport/jump.py @@ -0,0 +1,107 @@ +def remap_frame_layout(assembler, src_locations, dst_locations, tmpreg): + pending_dests = len(dst_locations) + srccount = {} # maps dst_locations to how many times the same + # location appears in src_locations + for dst in dst_locations: + key = dst.as_key() + assert key not in srccount, "duplicate value in dst_locations!" + srccount[key] = 0 + for i in range(len(dst_locations)): + src = src_locations[i] + if src.is_imm(): + continue + key = src.as_key() + if key in srccount: + if key == dst_locations[i].as_key(): + # ignore a move "x = x" + # setting any "large enough" negative value is ok, but + # be careful of overflows, don't use -sys.maxint + srccount[key] = -len(dst_locations) - 1 + pending_dests -= 1 + else: + srccount[key] += 1 + + while pending_dests > 0: + progress = False + for i in range(len(dst_locations)): + dst = dst_locations[i] + key = dst.as_key() + if srccount[key] == 0: + srccount[key] = -1 # means "it's done" + pending_dests -= 1 + src = src_locations[i] + if not src.is_imm(): + key = src.as_key() + if key in srccount: + srccount[key] -= 1 + _move(assembler, src, dst, tmpreg) + progress = True + if not progress: + # we are left with only pure disjoint cycles + sources = {} # maps dst_locations to src_locations + for i in range(len(dst_locations)): + src = src_locations[i] + dst = dst_locations[i] + sources[dst.as_key()] = src + # + for i in range(len(dst_locations)): + dst = dst_locations[i] + originalkey = dst.as_key() + if srccount[originalkey] >= 0: + assembler.regalloc_push(dst, 0) + while True: + key = dst.as_key() + assert srccount[key] == 1 + # ^^^ because we are in a simple cycle + srccount[key] = -1 + pending_dests -= 1 + src = sources[key] + if src.as_key() == originalkey: + break + _move(assembler, src, dst, tmpreg) + dst = src + assembler.regalloc_pop(dst, 0) + assert pending_dests == 0 + +def _move(assembler, src, dst, tmpreg): + # some assembler cannot handle memory to memory moves without + # a tmp register, thus prepare src according to the ISA capabilities + src = assembler.regalloc_prepare_move(src, dst, tmpreg) + assembler.regalloc_mov(src, dst) + +def remap_frame_layout_mixed(assembler, + src_locations1, dst_locations1, tmpreg1, + src_locations2, dst_locations2, tmpreg2, WORD): + # find and push the fp stack locations from src_locations2 that + # are going to be overwritten by dst_locations1 + extrapushes = [] + dst_keys = {} + for loc in dst_locations1: + dst_keys[loc.as_key()] = None + src_locations2red = [] + dst_locations2red = [] + for i in range(len(src_locations2)): + loc = src_locations2[i] + dstloc = dst_locations2[i] + if loc.is_stack(): + key = loc.as_key() + if (key in dst_keys or (loc.width > WORD and + (key + 1) in dst_keys)): + assembler.regalloc_push(loc, len(extrapushes)) + extrapushes.append(dstloc) + continue + src_locations2red.append(loc) + dst_locations2red.append(dstloc) + src_locations2 = src_locations2red + dst_locations2 = dst_locations2red + # + # remap the integer and pointer registers and stack locations + remap_frame_layout(assembler, src_locations1, dst_locations1, tmpreg1) + # + # remap the fp registers and stack locations + remap_frame_layout(assembler, src_locations2, dst_locations2, tmpreg2) + # + # finally, pop the extra fp stack locations + while len(extrapushes) > 0: + loc = extrapushes.pop() + assembler.regalloc_pop(loc, len(extrapushes)) diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py +++ b/rpython/jit/backend/llsupport/rewrite.py @@ -203,38 +203,47 @@ def transform_to_gc_load(self, op): NOT_SIGNED = 0 CINT_ZERO = ConstInt(0) + opnum = op.getopnum() + #if opnum == rop.CALL_MALLOC_NURSERY_VARSIZE: + # v_length = op.getarg(2) + # scale = op.getarg(1).getint() + # if scale not in self.cpu.load_supported_factors: + # scale, offset, v_length = \ + # self._emit_mul_if_factor_offset_not_supported(v_length, scale, 0) + # op.setarg(1, ConstInt(scale)) + # op.setarg(2, v_length) if op.is_getarrayitem() or \ - op.getopnum() in (rop.GETARRAYITEM_RAW_I, - rop.GETARRAYITEM_RAW_F): + opnum in (rop.GETARRAYITEM_RAW_I, + rop.GETARRAYITEM_RAW_F): self.handle_getarrayitem(op) - elif op.getopnum() in (rop.SETARRAYITEM_GC, rop.SETARRAYITEM_RAW): + elif opnum in (rop.SETARRAYITEM_GC, rop.SETARRAYITEM_RAW): self.handle_setarrayitem(op) - elif op.getopnum() == rop.RAW_STORE: + elif opnum == rop.RAW_STORE: itemsize, ofs, _ = unpack_arraydescr(op.getdescr()) ptr_box = op.getarg(0) index_box = op.getarg(1) value_box = op.getarg(2) self.emit_gc_store_or_indexed(op, ptr_box, index_box, value_box, itemsize, 1, ofs) - elif op.getopnum() in (rop.RAW_LOAD_I, rop.RAW_LOAD_F): + elif opnum in (rop.RAW_LOAD_I, rop.RAW_LOAD_F): itemsize, ofs, sign = unpack_arraydescr(op.getdescr()) ptr_box = op.getarg(0) index_box = op.getarg(1) self.emit_gc_load_or_indexed(op, ptr_box, index_box, itemsize, 1, ofs, sign) - elif op.getopnum() in (rop.GETINTERIORFIELD_GC_I, rop.GETINTERIORFIELD_GC_R, - rop.GETINTERIORFIELD_GC_F): + elif opnum in (rop.GETINTERIORFIELD_GC_I, rop.GETINTERIORFIELD_GC_R, + rop.GETINTERIORFIELD_GC_F): ofs, itemsize, fieldsize, sign = unpack_interiorfielddescr(op.getdescr()) ptr_box = op.getarg(0) index_box = op.getarg(1) self.emit_gc_load_or_indexed(op, ptr_box, index_box, fieldsize, itemsize, ofs, sign) - elif op.getopnum() in (rop.SETINTERIORFIELD_RAW, rop.SETINTERIORFIELD_GC): + elif opnum in (rop.SETINTERIORFIELD_RAW, rop.SETINTERIORFIELD_GC): ofs, itemsize, fieldsize, sign = unpack_interiorfielddescr(op.getdescr()) ptr_box = op.getarg(0) index_box = op.getarg(1) value_box = op.getarg(2) self.emit_gc_store_or_indexed(op, ptr_box, index_box, value_box, fieldsize, itemsize, ofs) - elif op.getopnum() in (rop.GETFIELD_GC_I, rop.GETFIELD_GC_F, rop.GETFIELD_GC_R, - rop.GETFIELD_RAW_I, rop.GETFIELD_RAW_F, rop.GETFIELD_RAW_R): + elif opnum in (rop.GETFIELD_GC_I, rop.GETFIELD_GC_F, rop.GETFIELD_GC_R, + rop.GETFIELD_RAW_I, rop.GETFIELD_RAW_F, rop.GETFIELD_RAW_R): ofs, itemsize, sign = unpack_fielddescr(op.getdescr()) ptr_box = op.getarg(0) if op.getopnum() in (rop.GETFIELD_GC_F, rop.GETFIELD_GC_I, rop.GETFIELD_GC_R): @@ -249,45 +258,45 @@ self.emit_op(op) return True self.emit_gc_load_or_indexed(op, ptr_box, ConstInt(0), itemsize, 1, ofs, sign) - elif op.getopnum() in (rop.SETFIELD_GC, rop.SETFIELD_RAW): + elif opnum in (rop.SETFIELD_GC, rop.SETFIELD_RAW): ofs, itemsize, sign = unpack_fielddescr(op.getdescr()) ptr_box = op.getarg(0) value_box = op.getarg(1) self.emit_gc_store_or_indexed(op, ptr_box, ConstInt(0), value_box, itemsize, 1, ofs) - elif op.getopnum() == rop.ARRAYLEN_GC: + elif opnum == rop.ARRAYLEN_GC: descr = op.getdescr() assert isinstance(descr, ArrayDescr) ofs = descr.lendescr.offset self.emit_gc_load_or_indexed(op, op.getarg(0), ConstInt(0), WORD, 1, ofs, NOT_SIGNED) - elif op.getopnum() == rop.STRLEN: + elif opnum == rop.STRLEN: basesize, itemsize, ofs_length = get_array_token(rstr.STR, self.cpu.translate_support_code) self.emit_gc_load_or_indexed(op, op.getarg(0), ConstInt(0), WORD, 1, ofs_length, NOT_SIGNED) - elif op.getopnum() == rop.UNICODELEN: + elif opnum == rop.UNICODELEN: basesize, itemsize, ofs_length = get_array_token(rstr.UNICODE, self.cpu.translate_support_code) self.emit_gc_load_or_indexed(op, op.getarg(0), ConstInt(0), WORD, 1, ofs_length, NOT_SIGNED) - elif op.getopnum() == rop.STRGETITEM: + elif opnum == rop.STRGETITEM: basesize, itemsize, ofs_length = get_array_token(rstr.STR, self.cpu.translate_support_code) assert itemsize == 1 self.emit_gc_load_or_indexed(op, op.getarg(0), op.getarg(1), itemsize, itemsize, basesize, NOT_SIGNED) - elif op.getopnum() == rop.UNICODEGETITEM: + elif opnum == rop.UNICODEGETITEM: basesize, itemsize, ofs_length = get_array_token(rstr.UNICODE, self.cpu.translate_support_code) self.emit_gc_load_or_indexed(op, op.getarg(0), op.getarg(1), itemsize, itemsize, basesize, NOT_SIGNED) - elif op.getopnum() == rop.STRSETITEM: + elif opnum == rop.STRSETITEM: basesize, itemsize, ofs_length = get_array_token(rstr.STR, self.cpu.translate_support_code) assert itemsize == 1 self.emit_gc_store_or_indexed(op, op.getarg(0), op.getarg(1), op.getarg(2), itemsize, itemsize, basesize) - elif op.getopnum() == rop.UNICODESETITEM: + elif opnum == rop.UNICODESETITEM: basesize, itemsize, ofs_length = get_array_token(rstr.UNICODE, self.cpu.translate_support_code) self.emit_gc_store_or_indexed(op, op.getarg(0), op.getarg(1), op.getarg(2), @@ -488,7 +497,7 @@ total_size = arraydescr.basesize elif (self.gc_ll_descr.can_use_nursery_malloc(1) and self.gen_malloc_nursery_varsize(arraydescr.itemsize, - v_length, op, arraydescr, kind=kind)): + v_length, op, arraydescr, kind=kind)): # note that we cannot initialize tid here, because the array # might end up being allocated by malloc_external or some # stuff that initializes GC header fields differently @@ -524,8 +533,18 @@ # See emit_pending_zeros(). (This optimization is done by # hacking the object 'o' in-place: e.g., o.getarg(1) may be # replaced with another constant greater than 0.) - o = ResOperation(rop.ZERO_ARRAY, [v_arr, self.c_zero, v_length], - descr=arraydescr) + assert isinstance(arraydescr, ArrayDescr) + scale = arraydescr.itemsize + v_length_scaled = v_length + if not isinstance(v_length, ConstInt): + scale, offset, v_length_scaled = \ + self._emit_mul_if_factor_offset_not_supported(v_length, scale, 0) + v_scale = ConstInt(scale) + # there is probably no point in doing _emit_mul_if.. for c_zero! + # NOTE that the scale might be != 1 for e.g. v_length_scaled if it is a constant + # it is later applied in emit_pending_zeros + args = [v_arr, self.c_zero, v_length_scaled, ConstInt(scale), v_scale] + o = ResOperation(rop.ZERO_ARRAY, args, descr=arraydescr) self.emit_op(o) if isinstance(v_length, ConstInt): self.last_zero_arrays.append(self._newops[-1]) @@ -643,22 +662,38 @@ # are also already in 'newops', which is the point. for op in self.last_zero_arrays: assert op.getopnum() == rop.ZERO_ARRAY + descr = op.getdescr() + assert isinstance(descr, ArrayDescr) + scale = descr.itemsize box = op.getarg(0) try: intset = self.setarrayitems_occurred(box) except KeyError: + start_box = op.getarg(1) + length_box = op.getarg(2) + if isinstance(start_box, ConstInt): + start = start_box.getint() + op.setarg(1, ConstInt(start * scale)) + op.setarg(3, ConstInt(1)) + if isinstance(length_box, ConstInt): + stop = length_box.getint() + scaled_len = stop * scale + op.setarg(2, ConstInt(scaled_len)) + op.setarg(4, ConstInt(1)) continue assert op.getarg(1).getint() == 0 # always 'start=0' initially start = 0 while start in intset: start += 1 - op.setarg(1, ConstInt(start)) + op.setarg(1, ConstInt(start * scale)) stop = op.getarg(2).getint() assert start <= stop while stop > start and (stop - 1) in intset: stop -= 1 - op.setarg(2, ConstInt(stop - start)) + op.setarg(2, ConstInt((stop - start) * scale)) # ^^ may be ConstInt(0); then the operation becomes a no-op + op.setarg(3, ConstInt(1)) # set scale to 1 + op.setarg(4, ConstInt(1)) # set scale to 1 del self.last_zero_arrays[:] self._setarrayitems_occurred.clear() # @@ -759,6 +794,10 @@ arraydescr.lendescr.offset != gc_descr.standard_array_length_ofs)): return False self.emitting_an_operation_that_can_collect() + #scale = itemsize + #if scale not in self.cpu.load_supported_factors: + # scale, offset, v_length = \ + # self._emit_mul_if_factor_offset_not_supported(v_length, scale, 0) op = ResOperation(rop.CALL_MALLOC_NURSERY_VARSIZE, [ConstInt(kind), ConstInt(itemsize), v_length], descr=arraydescr) diff --git a/rpython/jit/backend/llsupport/test/test_gc_integration.py b/rpython/jit/backend/llsupport/test/test_gc_integration.py --- a/rpython/jit/backend/llsupport/test/test_gc_integration.py +++ b/rpython/jit/backend/llsupport/test/test_gc_integration.py @@ -91,6 +91,8 @@ assert nos == [0, 1, 47] elif self.cpu.backend_name.startswith('ppc64'): assert nos == [0, 1, 33] + elif self.cpu.backend_name.startswith('zarch'): + assert nos == [0, 1, 29] else: raise Exception("write the data here") assert frame.jf_frame[nos[0]] @@ -313,7 +315,9 @@ 'strdescr': arraydescr}) # check the returned pointers gc_ll_descr = self.cpu.gc_ll_descr - assert gc_ll_descr.calls == [(8, 15, 10), (5, 15, 3), ('str', 3)] + assert gc_ll_descr.calls == [(8, 15, 10), + (5, 15, 3), + ('str', 3)] # one fit, one was too large, one was not fitting def test_malloc_slowpath(self): @@ -641,11 +645,13 @@ gcmap = unpack_gcmap(frame) if self.cpu.backend_name.startswith('ppc64'): assert gcmap == [30, 31, 32] + elif self.cpu.backend_name.startswith('zarch'): + # 10 gpr, 14 fpr -> 25 is the first slot + assert gcmap == [26, 27, 28] elif self.cpu.IS_64_BIT: assert gcmap == [28, 29, 30] elif self.cpu.backend_name.startswith('arm'): assert gcmap == [44, 45, 46] - pass else: assert gcmap == [22, 23, 24] for item, s in zip(gcmap, new_items): diff --git a/rpython/jit/backend/llsupport/test/test_pinned_object_rewrite.py b/rpython/jit/backend/llsupport/test/test_pinned_object_rewrite.py --- a/rpython/jit/backend/llsupport/test/test_pinned_object_rewrite.py +++ b/rpython/jit/backend/llsupport/test/test_pinned_object_rewrite.py @@ -127,7 +127,7 @@ i0 = getfield_gc_i(ConstPtr(pinned_obj_gcref), descr=pinned_obj_my_int_descr) """, """ [] - p1 = gc_load_indexed_r(ConstPtr(ptr_array_gcref), 0, %(ptr_array_descr.itemsize)s, 1, %(ptr_array_descr.itemsize)s) + p1 = gc_load_r(ConstPtr(ptr_array_gcref), %(0 * ptr_array_descr.itemsize + 1)s, %(ptr_array_descr.itemsize)s) i0 = gc_load_i(p1, 0, -%(pinned_obj_my_int_descr.field_size)s) """) assert len(self.gc_ll_descr.last_moving_obj_tracker._indexes) == 1 @@ -140,10 +140,10 @@ i2 = getfield_gc_i(ConstPtr(pinned_obj_gcref), descr=pinned_obj_my_int_descr) """, """ [] - p1 = gc_load_indexed_r(ConstPtr(ptr_array_gcref), 0, %(ptr_array_descr.itemsize)s, 1, %(ptr_array_descr.itemsize)s) + p1 = gc_load_r(ConstPtr(ptr_array_gcref), %(0 * ptr_array_descr.itemsize + 1)s, %(ptr_array_descr.itemsize)s) i0 = gc_load_i(p1, 0, -%(pinned_obj_my_int_descr.field_size)s) i1 = gc_load_i(ConstPtr(notpinned_obj_gcref), 0, -%(notpinned_obj_my_int_descr.field_size)s) - p2 = gc_load_indexed_r(ConstPtr(ptr_array_gcref), 1, %(ptr_array_descr.itemsize)s, 1, %(ptr_array_descr.itemsize)s) + p2 = gc_load_r(ConstPtr(ptr_array_gcref), %(1 * ptr_array_descr.itemsize + 1)s, %(ptr_array_descr.itemsize)s) i2 = gc_load_i(p2, 0, -%(pinned_obj_my_int_descr.field_size)s) """) assert len(self.gc_ll_descr.last_moving_obj_tracker._indexes) == 2 diff --git a/rpython/jit/backend/llsupport/test/test_rewrite.py b/rpython/jit/backend/llsupport/test/test_rewrite.py --- a/rpython/jit/backend/llsupport/test/test_rewrite.py +++ b/rpython/jit/backend/llsupport/test/test_rewrite.py @@ -36,6 +36,21 @@ assert not isinstance(descr, (str, int)) return 'gc_store(%s, %d, %s, %d)' % (baseptr, descr.offset, newvalue, descr.field_size) + def zero_array(baseptr, start, length, descr_name, descr): + assert isinstance(baseptr, str) + assert isinstance(start, (str, int)) + assert isinstance(length, (str, int)) + assert isinstance(descr_name, str) + assert not isinstance(descr, (str,int)) + itemsize = descr.itemsize + start = start * itemsize + length_scale = 1 + if isinstance(length, str): + length_scale = itemsize + else: + length = length * itemsize + return 'zero_array(%s, %s, %s, 1, %d, descr=%s)' % \ + (baseptr, start, length, length_scale, descr_name) def setarrayitem(baseptr, index, newvalue, descr): assert isinstance(baseptr, str) assert isinstance(index, (str, int)) @@ -681,7 +696,7 @@ %(cdescr.basesize + 129 * cdescr.itemsize)d) gc_store(p1, 0, 8111, %(tiddescr.field_size)s) gc_store(p1, 0, 129, %(clendescr.field_size)s) - zero_array(p1, 0, 129, descr=cdescr) + %(zero_array('p1', 0, 129, 'cdescr', cdescr))s call_n(123456) cond_call_gc_wb(p1, descr=wbdescr) %(setarrayitem('p1', 'i2', 'p3', cdescr))s @@ -703,7 +718,7 @@ %(cdescr.basesize + 130 * cdescr.itemsize)d) gc_store(p1, 0, 8111, %(tiddescr.field_size)s) gc_store(p1, 0, 130, %(clendescr.field_size)s) - zero_array(p1, 0, 130, descr=cdescr) + %(zero_array('p1', 0, 130, 'cdescr', cdescr))s call_n(123456) cond_call_gc_wb_array(p1, i2, descr=wbdescr) %(setarrayitem('p1', 'i2', 'p3', cdescr))s @@ -735,7 +750,7 @@ %(cdescr.basesize + 5 * cdescr.itemsize)d) gc_store(p1, 0, 8111, %(tiddescr.field_size)s) gc_store(p1, 0, 5, %(clendescr.field_size)s) - zero_array(p1, 0, 5, descr=cdescr) + %(zero_array('p1', 0, 5, 'cdescr', cdescr))s label(p1, i2, p3) cond_call_gc_wb_array(p1, i2, descr=wbdescr) %(setarrayitem('p1', 'i2', 'p3', cdescr))s @@ -810,7 +825,7 @@ %(cdescr.basesize + 5 * cdescr.itemsize)d) gc_store(p0, 0, 8111, %(tiddescr.field_size)s) gc_store(p0, 0, 5, %(clendescr.field_size)s) - zero_array(p0, 0, 5, descr=cdescr) + %(zero_array('p0', 0, 5, 'cdescr', cdescr))s %(setarrayitem('p0', 'i2', 'p1', cdescr))s jump() """) @@ -828,7 +843,7 @@ %(cdescr.basesize + 5 * cdescr.itemsize)d) gc_store(p0, 0, 8111, %(tiddescr.field_size)s) gc_store(p0, 0, 5, %(clendescr.field_size)s) - zero_array(p0, 2, 3, descr=cdescr) + %(zero_array('p0', 2, 3, 'cdescr', cdescr))s %(setarrayitem('p0', 1, 'p1', cdescr))s %(setarrayitem('p0', 0, 'p2', cdescr))s jump() @@ -847,7 +862,7 @@ %(cdescr.basesize + 5 * cdescr.itemsize)d) gc_store(p0, 0, 8111, %(tiddescr.field_size)s) gc_store(p0, 0, 5, %(clendescr.field_size)s) - zero_array(p0, 0, 3, descr=cdescr) + %(zero_array('p0', 0, 3, 'cdescr', cdescr))s %(setarrayitem('p0', 3, 'p1', cdescr))s %(setarrayitem('p0', 4, 'p2', cdescr))s jump() @@ -867,7 +882,7 @@ %(cdescr.basesize + 5 * cdescr.itemsize)d) gc_store(p0, 0, 8111, %(tiddescr.field_size)s) gc_store(p0, 0, 5, %(clendescr.field_size)s) - zero_array(p0, 0, 5, descr=cdescr) + %(zero_array('p0', 0, 5, 'cdescr', cdescr))s %(setarrayitem('p0', 3, 'p1', cdescr))s %(setarrayitem('p0', 2, 'p2', cdescr))s %(setarrayitem('p0', 1, 'p2', cdescr))s @@ -890,7 +905,7 @@ %(cdescr.basesize + 5 * cdescr.itemsize)d) gc_store(p0, 0, 8111, %(tiddescr.field_size)s) gc_store(p0, 0, 5, %(clendescr.field_size)s) - zero_array(p0, 5, 0, descr=cdescr) + %(zero_array('p0', 5, 0, 'cdescr', cdescr))s %(setarrayitem('p0', 3, 'p1', cdescr))s %(setarrayitem('p0', 4, 'p2', cdescr))s %(setarrayitem('p0', 0, 'p1', cdescr))s @@ -913,7 +928,7 @@ %(cdescr.basesize + 5 * cdescr.itemsize)d) gc_store(p0, 0, 8111, %(tiddescr.field_size)s) gc_store(p0, 0, 5, %(clendescr.field_size)s) - zero_array(p0, 1, 4, descr=cdescr) + %(zero_array('p0', 1, 4, 'cdescr', cdescr))s %(setarrayitem('p0', 0, 'p1', cdescr))s call_n(321321) cond_call_gc_wb(p0, descr=wbdescr) @@ -935,7 +950,7 @@ %(cdescr.basesize + 5 * cdescr.itemsize)d) gc_store(p0, 0, 8111, %(tiddescr.field_size)s) gc_store(p0, 0, 5, %(clendescr.field_size)s) - zero_array(p0, 1, 4, descr=cdescr) + %(zero_array('p0', 1, 4, 'cdescr', cdescr))s %(setarrayitem('p0', 0, 'p1', cdescr))s label(p0, p2) cond_call_gc_wb_array(p0, 1, descr=wbdescr) @@ -952,7 +967,7 @@ [p1, p2, i3] p0 = call_malloc_nursery_varsize(0, 1, i3, descr=bdescr) gc_store(p0, 0, i3, %(blendescr.field_size)s) - zero_array(p0, 0, i3, descr=bdescr) + %(zero_array('p0', 0, 'i3', 'bdescr', bdescr))s jump() """) @@ -966,7 +981,7 @@ [p1, p2, i3] p0 = call_malloc_nursery_varsize(0, 1, i3, descr=bdescr) gc_store(p0, 0, i3, %(blendescr.field_size)s) - zero_array(p0, 0, i3, descr=bdescr) + %(zero_array('p0', 0, 'i3', 'bdescr', bdescr))s cond_call_gc_wb_array(p0, 0, descr=wbdescr) %(setarrayitem('p0', 0, 'p1', bdescr))s jump() diff --git a/rpython/jit/backend/ppc/callbuilder.py b/rpython/jit/backend/ppc/callbuilder.py --- a/rpython/jit/backend/ppc/callbuilder.py +++ b/rpython/jit/backend/ppc/callbuilder.py @@ -98,7 +98,7 @@ # We must also copy fnloc into FNREG non_float_locs.append(self.fnloc) - non_float_regs.append(self.mc.RAW_CALL_REG) # r2 or r12 + non_float_regs.append(self.mc.RAW_CALL_REG) if float_locs: assert len(float_locs) <= len(self.FPR_ARGS) diff --git a/rpython/jit/backend/ppc/opassembler.py b/rpython/jit/backend/ppc/opassembler.py --- a/rpython/jit/backend/ppc/opassembler.py +++ b/rpython/jit/backend/ppc/opassembler.py @@ -851,40 +851,6 @@ self.mc.sldi(scratch_loc.value, loc.value, scale) return scratch_loc - def _apply_scale(self, ofs, index_loc, itemsize): - # XXX should die now that getarrayitem and getinteriorfield are gone - # but can't because of emit_zero_array() at the moment - - # For arrayitem and interiorfield reads and writes: this returns an - # offset suitable for use in ld/ldx or similar instructions. - # The result will be either the register r2 or a 16-bit immediate. - # The arguments stand for "ofs + index_loc * itemsize", - # with the following constrains: - assert ofs.is_imm() # must be an immediate... - assert _check_imm_arg(ofs.getint()) # ...that fits 16 bits - assert index_loc is not r.SCRATCH2 # can be a reg or imm (any size) - assert itemsize.is_imm() # must be an immediate (any size) - - multiply_by = itemsize.value - offset = ofs.getint() - if index_loc.is_imm(): - offset += index_loc.getint() * multiply_by - if _check_imm_arg(offset): - return imm(offset) - else: - self.mc.load_imm(r.SCRATCH2, offset) - return r.SCRATCH2 - else: - index_loc = self._multiply_by_constant(index_loc, multiply_by, - r.SCRATCH2) - # here, the new index_loc contains 'index_loc * itemsize'. - # If offset != 0 then we have to add it here. Note that - # mc.addi() would not be valid with operand r0. - if offset != 0: - self.mc.addi(r.SCRATCH2.value, index_loc.value, offset) - index_loc = r.SCRATCH2 - return index_loc - def _copy_in_scratch2(self, loc): if loc.is_imm(): self.mc.li(r.SCRATCH2.value, loc.value) @@ -903,86 +869,94 @@ elif itemsize & 2: self.mc.sthu(a, b, c) From pypy.commits at gmail.com Thu Mar 10 09:45:45 2016 From: pypy.commits at gmail.com (plan_rich) Date: Thu, 10 Mar 2016 06:45:45 -0800 (PST) Subject: [pypy-commit] pypy s390x-enhance-speed: close branch Message-ID: <56e18899.0f941c0a.e504b.ffff9003@mx.google.com> Author: Richard Plangger Branch: s390x-enhance-speed Changeset: r82943:a0123b0e67f5 Date: 2016-03-10 15:44 +0100 http://bitbucket.org/pypy/pypy/changeset/a0123b0e67f5/ Log: close branch From pypy.commits at gmail.com Thu Mar 10 09:45:43 2016 From: pypy.commits at gmail.com (plan_rich) Date: Thu, 10 Mar 2016 06:45:43 -0800 (PST) Subject: [pypy-commit] pypy s390x-backend: close branch Message-ID: <56e18897.10921c0a.9b57c.4b06@mx.google.com> Author: Richard Plangger Branch: s390x-backend Changeset: r82942:9a366b997dcc Date: 2016-03-10 15:44 +0100 http://bitbucket.org/pypy/pypy/changeset/9a366b997dcc/ Log: close branch From pypy.commits at gmail.com Thu Mar 10 10:01:11 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 10 Mar 2016 07:01:11 -0800 (PST) Subject: [pypy-commit] pypy jit-leaner-frontend: minor fixes Message-ID: <56e18c37.857ac20a.8280b.4cbe@mx.google.com> Author: Armin Rigo Branch: jit-leaner-frontend Changeset: r82944:a8445a6ab102 Date: 2016-03-10 16:00 +0100 http://bitbucket.org/pypy/pypy/changeset/a8445a6ab102/ Log: minor fixes diff --git a/rpython/jit/metainterp/opencoder.py b/rpython/jit/metainterp/opencoder.py --- a/rpython/jit/metainterp/opencoder.py +++ b/rpython/jit/metainterp/opencoder.py @@ -16,7 +16,8 @@ TAGINT, TAGCONST, TAGBOX = range(3) TAGMASK = 0x3 TAGSHIFT = 2 -NUM_SMALL_INTS = 2 ** (16 - TAGSHIFT) +SMALL_INT_STOP = 2 ** (15 - TAGSHIFT) +SMALL_INT_START = -SMALL_INT_STOP class Sentinel(object): pass @@ -195,7 +196,7 @@ if isinstance(box, Const): if (isinstance(box, ConstInt) and isinstance(box.getint(), int) and # symbolics - 0 <= box.getint() < NUM_SMALL_INTS): + SMALL_INT_START <= box.getint() < SMALL_INT_STOP): return tag(TAGINT, box.getint()) else: self._consts.append(box) @@ -310,16 +311,9 @@ ops.append(iter.next()) return ops - def _get_operations(self): - """ NOT_RPYTHON - """ - l = [] - i = self.get_iter() - while not i.done(): - l.append(i.next()) - return l - def tag(kind, pos): + #if not SMALL_INT_START <= pos < SMALL_INT_STOP: + # raise some error return (pos << TAGSHIFT) | kind def untag(tagged): From pypy.commits at gmail.com Thu Mar 10 10:04:32 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 10 Mar 2016 07:04:32 -0800 (PST) Subject: [pypy-commit] pypy jit-leaner-frontend: merge heads Message-ID: <56e18d00.080a1c0a.1fc8e.4ea9@mx.google.com> Author: Armin Rigo Branch: jit-leaner-frontend Changeset: r82945:6ab28e9da460 Date: 2016-03-10 16:03 +0100 http://bitbucket.org/pypy/pypy/changeset/6ab28e9da460/ Log: merge heads diff --git a/pypy/module/pypyjit/interp_resop.py b/pypy/module/pypyjit/interp_resop.py --- a/pypy/module/pypyjit/interp_resop.py +++ b/pypy/module/pypyjit/interp_resop.py @@ -249,7 +249,7 @@ ofs = debug_info.asminfo.ops_offset else: ofs = {} - ops = debug_info.operations + _, ops = debug_info.trace.unpack() self.w_ops = space.newlist(wrap_oplist(space, logops, ops, ofs)) else: self.w_ops = space.w_None diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -287,6 +287,7 @@ return None if ((warmstate.vec and jitdriver_sd.vec) or warmstate.vec_all): + assert False, "vectorization disabled" from rpython.jit.metainterp.optimizeopt.vector import optimize_vector loop_info, loop_ops = optimize_vector(metainterp_sd, jitdriver_sd, warmstate, diff --git a/rpython/jit/metainterp/opencoder.py b/rpython/jit/metainterp/opencoder.py --- a/rpython/jit/metainterp/opencoder.py +++ b/rpython/jit/metainterp/opencoder.py @@ -303,8 +303,6 @@ return TraceIterator(self, 0, len(self._ops)) def unpack(self): - """ NOT_RPYTHON - really rpython, but only use for debugging - """ iter = self.get_iter() ops = [] while not iter.done(): diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -2977,7 +2977,7 @@ if start_stack: jitdriver_sd, key, pos = start_stack[0] warmstate = jitdriver_sd.warmstate - size = len(self.history.operations) - pos + size = self.history.get_trace_position()[0] - pos[0] if size > max_size: if warmstate is not None: r = warmstate.get_location_str(key) From pypy.commits at gmail.com Thu Mar 10 10:13:01 2016 From: pypy.commits at gmail.com (plan_rich) Date: Thu, 10 Mar 2016 07:13:01 -0800 (PST) Subject: [pypy-commit] buildbot default: s390x buildbot now nightly builds default Message-ID: <56e18efd.82561c0a.996ee.510b@mx.google.com> Author: Richard Plangger Branch: Changeset: r992:c21ef5322a5a Date: 2016-03-10 16:12 +0100 http://bitbucket.org/pypy/buildbot/changeset/c21ef5322a5a/ Log: s390x buildbot now nightly builds default diff --git a/bot2/pypybuildbot/master.py b/bot2/pypybuildbot/master.py --- a/bot2/pypybuildbot/master.py +++ b/bot2/pypybuildbot/master.py @@ -308,9 +308,9 @@ ], branch="py3.3", hour=3, minute=0), # S390X vm (ibm-research) - Nightly("nightly-4-00", [LINUX_S390X], branch='s390x-backend', hour=0, minute=0), - Nightly("nightly-4-01", [JITLINUX_S390X], branch='s390x-backend', hour=2, minute=0), - Nightly("nightly-4-02", [APPLVLLINUX_S390X], branch='s390x-backend', hour=5, minute=0), + Nightly("nightly-4-00", [LINUX_S390X], branch='default', hour=0, minute=0), + Nightly("nightly-4-01", [JITLINUX_S390X], branch='default', hour=2, minute=0), + Nightly("nightly-4-02", [APPLVLLINUX_S390X], branch='default', hour=5, minute=0), # this one has faithfully run every night even though the latest # change to that branch was in January 2013. Re-enable one day. From pypy.commits at gmail.com Thu Mar 10 10:33:22 2016 From: pypy.commits at gmail.com (fijal) Date: Thu, 10 Mar 2016 07:33:22 -0800 (PST) Subject: [pypy-commit] pypy jit-leaner-frontend: disable jit hooks Message-ID: <56e193c2.552f1c0a.98222.587c@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r82946:8e821f827b97 Date: 2016-03-10 17:32 +0200 http://bitbucket.org/pypy/pypy/changeset/8e821f827b97/ Log: disable jit hooks diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -336,7 +336,7 @@ def jitpolicy(self, driver): from pypy.module.pypyjit.policy import PyPyJitPolicy from pypy.module.pypyjit.hooks import pypy_hooks - return PyPyJitPolicy(pypy_hooks) + return PyPyJitPolicy() #pypy_hooks) def get_entry_point(self, config): from pypy.tool.lib_pypy import import_from_lib_pypy diff --git a/pypy/module/pypyjit/__init__.py b/pypy/module/pypyjit/__init__.py --- a/pypy/module/pypyjit/__init__.py +++ b/pypy/module/pypyjit/__init__.py @@ -12,9 +12,9 @@ 'dont_trace_here': 'interp_jit.dont_trace_here', 'trace_next_iteration': 'interp_jit.trace_next_iteration', 'trace_next_iteration_hash': 'interp_jit.trace_next_iteration_hash', - 'set_compile_hook': 'interp_resop.set_compile_hook', - 'set_abort_hook': 'interp_resop.set_abort_hook', - 'set_trace_too_long_hook': 'interp_resop.set_trace_too_long_hook', + #'set_compile_hook': 'interp_resop.set_compile_hook', + #'set_abort_hook': 'interp_resop.set_abort_hook', + #'set_trace_too_long_hook': 'interp_resop.set_trace_too_long_hook', 'get_stats_snapshot': 'interp_resop.get_stats_snapshot', 'get_stats_asmmemmgr': 'interp_resop.get_stats_asmmemmgr', # those things are disabled because they have bugs, but if @@ -23,10 +23,10 @@ # correct loop_runs if PYPYLOG is correct #'enable_debug': 'interp_resop.enable_debug', #'disable_debug': 'interp_resop.disable_debug', - 'ResOperation': 'interp_resop.WrappedOp', - 'GuardOp': 'interp_resop.GuardOp', - 'DebugMergePoint': 'interp_resop.DebugMergePoint', - 'JitLoopInfo': 'interp_resop.W_JitLoopInfo', + #'ResOperation': 'interp_resop.WrappedOp', + #'GuardOp': 'interp_resop.GuardOp', + #'DebugMergePoint': 'interp_resop.DebugMergePoint', + #'JitLoopInfo': 'interp_resop.W_JitLoopInfo', 'PARAMETER_DOCS': 'space.wrap(rpython.rlib.jit.PARAMETER_DOCS)', } From pypy.commits at gmail.com Thu Mar 10 11:39:52 2016 From: pypy.commits at gmail.com (mattip) Date: Thu, 10 Mar 2016 08:39:52 -0800 (PST) Subject: [pypy-commit] pypy.org extradoc: release 5.0 update Message-ID: <56e1a358.05de1c0a.5f6cb.734d@mx.google.com> Author: mattip Branch: extradoc Changeset: r717:983d67d97553 Date: 2016-03-10 18:38 +0200 http://bitbucket.org/pypy/pypy.org/changeset/983d67d97553/ Log: release 5.0 update diff --git a/download.html b/download.html --- a/download.html +++ b/download.html @@ -74,7 +74,7 @@ performance improvements.

    We provide binaries for x86 and ARM Linux, Mac OS/X and Windows for:

    @@ -113,21 +113,21 @@ degrees of being up-to-date.
  • -
    -

    Python2.7 compatible PyPy 4.0.1

    +
    +

    Python2.7 compatible PyPy 5.0

    @@ -196,7 +196,7 @@ uncompressed, they run in-place. For now you can uncompress them either somewhere in your home directory or, say, in /opt, and if you want, put a symlink from somewhere like -/usr/local/bin/pypy to /path/to/pypy-4.0.1/bin/pypy. Do +/usr/local/bin/pypy to /path/to/pypy-5.0.0/bin/pypy. Do not move or copy the executable pypy outside the tree – put a symlink to it, otherwise it will not find its libraries.

    @@ -217,7 +217,7 @@

    If you have pip:

     pypy -m pip install git+https://bitbucket.org/pypy/numpy.git
    -pypy -m pip install git+https://bitbucket.org/pypy/numpy.git@pypy-4.0.1
    +pypy -m pip install git+https://bitbucket.org/pypy/numpy.git@pypy-5.0
     

    (the second version selects a particular tag, which may be needed if your pypy is not the latest development version.)

    @@ -241,7 +241,7 @@
  • Get the source code. The following packages contain the source at the same revision as the above binaries:

    Or you can checkout the current trunk using Mercurial (the trunk usually works and is of course more up-to-date):

    @@ -369,19 +369,19 @@

    Checksums

    Here are the checksums for each of the downloads

    -

    pypy-4.0.1 md5:

    +

    pypy-5.0.0 md5:

    -f6721e62f4ba1cdc4cc5ad719369e359  pypy-4.0.1-linux64.tar.bz2
    -fe8106ac3919c7b4be2766944326a624  pypy-4.0.1-linux-armel.tar.bz2
    -823b8a457f4c48ebdb8e1ee607b0a893  pypy-4.0.1-linux-armhf-raring.tar.bz2
    -e45728d413aa88963d4462ebcfaff6ea  pypy-4.0.1-linux-armhf-raspbian.tar.bz2
    -d1d03aa44df354a3f589473a51406795  pypy-4.0.1-linux.tar.bz2
    -67ac82d88aaaef8c3074e68d700f3968  pypy-4.0.1-osx64.tar.bz2
    -f5b35ebedee2fa4fdfee82733be59996  pypy-4.0.1-src.tar.bz2
    -6b81d79886b215a877fee7624883ee94  pypy-4.0.1-src.zip
    -dc6573828ee5c82df18f9035f3b19edb  pypy-4.0.1-win32.zip
    -d4492e65201bb09dca5f97601113dc57  pypy-4.0.1-ppc64le.tar.bz2
    -2aadbb7638153b9d7c2a832888ed3c1e  pypy-4.0.1-ppc64.tar.bz2
    +bcb5c830d6380ff78b759dbe075dfc14  pypy-5.0.0-linux-armel.tar.bz2
    +5649117ba754bef14550b6abc2135eab  pypy-5.0.0-linux-armhf-raring.tar.bz2
    +949344fff9c6942713f34e1a1fcbc7aa  pypy-5.0.0-linux-armhf-raspbian.tar.bz2
    +5e005cf26a3a58552fd77f9aaae6f614  pypy-5.0.0-linux.tar.bz2
    +133530cb9957a67807b25d23bb74ac24  pypy-5.0.0-linux64.tar.bz2
    +a091398908bf525149a1fdea8bf48ec2  pypy-5.0.0-osx64.tar.bz2
    +f243ff399a55f4370b6d1dc0a3650f1d  pypy-5.0.0-ppc64.tar.bz2
    +51fb75ae0a143faa9a5b39f094965050  pypy-5.0.0-ppc64le.tar.bz2
    +6a26f735cb45a10255076fdd6cebee84  pypy-5.0.0-src.tar.bz2
    +1be14cf3ffc97da7521637f8f81abc3c  pypy-5.0.0-src.zip
    +d2c8237e8106b535850596f0e9762246  pypy-5.0.0-win32.zip
     

    pypy3-2.4.0 md5:

    @@ -400,19 +400,33 @@
     2c9f0054f3b93a6473f10be35277825a  pypy-1.8-sandbox-linux64.tar.bz2
     009c970b5fa75754ae4c32a5d108a8d4  pypy-1.8-sandbox-linux.tar.bz2
     
    -

    pypy-4.0.1 sha256:

    +

    pypy-5.0.0 sha1:

    -0d6090cee59f4b9bab91ddbea76580d0c232b78dae65aaa9e8fa8d4449ba25b4  pypy-4.0.1-linux64.tar.bz2
    -d1acdd45ebd34580dd632c63c95211f6bae5e9a8f7a46ffa6f0443286ff9f61b  pypy-4.0.1-linux-armel.tar.bz2
    -e67278ce7423aa7bf99a95fd271cb76763eae3106930f4b7de1fba6a70a3f383  pypy-4.0.1-linux-armhf-raring.tar.bz2
    -52eef495f560af59a787b9935367cb5f8c83b48e32a80ec3e7060bffac011ecc  pypy-4.0.1-linux-armhf-raspbian.tar.bz2
    -721920fcbb6aefc9a98e868e32b7f4ea5fd68b7f9305d08d0a2595327c9c0611  pypy-4.0.1-linux.tar.bz2
    -06be1299691f7ea558bf8e3bdf3d20debb8ba03cd7cadf04f2d6cbd5fd084430  pypy-4.0.1-osx64.tar.bz2
    -43be0b04bcbde1e24d5f39875c0471cdc7bdb44549e5618d32e49bccaa778111  pypy-4.0.1-ppc64le.tar.bz2
    -63c0a1614ffc94f94a64790df5ad193193b378f2cf8729213db06fbd64052911  pypy-4.0.1-ppc64.tar.bz2
    -29f5aa6ba17b34fd980e85172dfeb4086fdc373ad392b1feff2677d2d8aea23c  pypy-4.0.1-src.tar.bz2
    -e344b383e8c745cc7c26bbcb0a43958e768fdd1d29dd0799cc148e0518d8d36f  pypy-4.0.1-src.zip
    -9a350a5e6f9b86fb525c6f1300b0c97c021ea8b1e37bfd32a8c4bb7a415d5329  pypy-4.0.1-win32.zip
    +b7c82d437086660759ec18582dbdaf198b77e467  pypy-5.0.0-linux-armel.tar.bz2
    +85978b1d33b0db8b512eebb1558200c3ab76d462  pypy-5.0.0-linux-armhf-raring.tar.bz2
    +271472d0362ce02fd656024b64f0343cc8193f9d  pypy-5.0.0-linux-armhf-raspbian.tar.bz2
    +88ac71eebd65c35032325497cc450b4d184be005  pypy-5.0.0-linux.tar.bz2
    +22d32d92899a07cb8cbba4b8918a7919e34246c4  pypy-5.0.0-linux64.tar.bz2
    +f652b264ba063a8c472b753baaaacf63690be6c5  pypy-5.0.0-osx64.tar.bz2
    +5620cead511ad33f9fface224544b70d72d9e4c9  pypy-5.0.0-ppc64.tar.bz2
    +6ee6b0eb574f3d29a5eaf29fdae8745fd9fe3c38  pypy-5.0.0-ppc64le.tar.bz2
    +62ce000b887ea22f5bdddcc0f24dd571ca534f57  pypy-5.0.0-src.tar.bz2
    +6dcbde8242e0ee985ffed63c5bf204e7fd74ac2c  pypy-5.0.0-src.zip
    +62cef0e0dd8849c224c647e53b13d3c47c99807d  pypy-5.0.0-win32.zip
    +
    +

    pypy-5.0.0 sha256:

    +
    +87bd85441b16ecca0d45ba6e9c0e9d26bb7bd8867afbf79d80312cf79b032dc1  pypy-5.0.0-linux-armel.tar.bz2
    +5bb52cf5db4ae8497c4e03cd8a70e49867e6b93d9f29ad335d030fcd3a375769  pypy-5.0.0-linux-armhf-raring.tar.bz2
    +8033c0cc39e9f6771688f2eda95c726595f5453b3e73e1cd5f7ebbe3dae1f685  pypy-5.0.0-linux-armhf-raspbian.tar.bz2
    +a9cc9afa94ff1cde811626a70081c477c9840e7816c0562d1903fd823d222ceb  pypy-5.0.0-linux.tar.bz2
    +b9c73be8e3c3b0835df83bdb86335712005240071cdd4dc245ac30b457063ae0  pypy-5.0.0-linux64.tar.bz2
    +45ed8bf799d0fd8eb051cbcc427173fba74dc9c2f6c309d7a3cc90f4917e6a10  pypy-5.0.0-osx64.tar.bz2
    +334a37e68cb543cf2cbcdd12379b9b770064bb70ba7fd104f1e451cfa10cdda5  pypy-5.0.0-ppc64.tar.bz2
    +e72fe5c094186f79c997000ddbaa01616def652a8d1338b75a27dfa3755eb86c  pypy-5.0.0-ppc64le.tar.bz2
    +89027b1b33553b53ff7733dc4838f0a76af23552c0d915d9f6de5875b8d7d4ab  pypy-5.0.0-src.tar.bz2
    +03e19e9bafccf5b2f4dd422699f3fe42da754c3fcc1d1fd4c8d585d7c9d1849d  pypy-5.0.0-src.zip
    +c53f0946703f5e4885484c7cde2554a0320537135bf8965e054757c214412438  pypy-5.0.0-win32.zip
     

    pypy3-2.4.0 sha1:

    diff --git a/source/download.txt b/source/download.txt
    --- a/source/download.txt
    +++ b/source/download.txt
    @@ -14,12 +14,12 @@
     
     We provide binaries for x86 and ARM Linux, Mac OS/X and Windows for:
     
    -* the Python2.7 compatible release — **PyPy 4.0.1** — (`what's new in PyPy 4.0.1?`_)
    +* the Python2.7 compatible release — **PyPy 5.0** — (`what's new in PyPy 5.0?`_)
     * the Python3.2.5 compatible release — **PyPy3 2.4.0** — (`what's new in PyPy3 2.4.0?`_).
     
     * the Python2.7 Software Transactional Memory special release — **PyPy-STM 2.5.1** (Linux x86-64 only)
     
    -.. _what's new in PyPy 4.0.1?: http://doc.pypy.org/en/latest/release-4.0.1.html
    +.. _what's new in PyPy 5.0?: http://doc.pypy.org/en/latest/release-5.0.0.html
     .. _what's new in PyPy3 2.4.0?: http://doc.pypy.org/en/latest/release-pypy3-2.4.0.html
     
     
    @@ -73,7 +73,7 @@
     .. _`portable Linux binaries`: https://github.com/squeaky-pl/portable-pypy#portable-pypy-distribution-for-linux
     
     
    -Python2.7 compatible PyPy 4.0.1
    +Python2.7 compatible PyPy 5.0
     -----------------------------------
     
     * `Linux x86 binary (32bit, tar.bz2 built on Ubuntu 12.04 - 14.04)`__ (see ``[1]`` below)
    @@ -91,17 +91,17 @@
     * `All our downloads,`__ including previous versions.  We also have a
       mirror_, but please use only if you have troubles accessing the links above
     
    -.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-4.0.1-linux.tar.bz2
    -.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-4.0.1-linux64.tar.bz2
    -.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-4.0.1-linux-armhf-raspbian.tar.bz2
    -.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-4.0.1-linux-armhf-raring.tar.bz2
    -.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-4.0.1-linux-armel.tar.bz2
    -.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-4.0.1-osx64.tar.bz2
    -.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-4.0.1-win32.zip
    -.. __: http://cobra.cs.uni-duesseldorf.de/~buildmaster/mirror/pypy-4.0.1-ppc64.tar.bz2
    -.. __: http://cobra.cs.uni-duesseldorf.de/~buildmaster/mirror/pypy-4.0.1-ppc64le.tar.bz2
    -.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-4.0.1-src.tar.bz2
    -.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-4.0.1-src.zip
    +.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.0.0-linux.tar.bz2
    +.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.0.0-linux64.tar.bz2
    +.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.0.0-linux-armhf-raspbian.tar.bz2
    +.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.0.0-linux-armhf-raring.tar.bz2
    +.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.0.0-linux-armel.tar.bz2
    +.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.0.0-osx64.tar.bz2
    +.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.0.0-win32.zip
    +.. __: http://cobra.cs.uni-duesseldorf.de/~buildmaster/mirror/pypy-5.0.0-ppc64.tar.bz2
    +.. __: http://cobra.cs.uni-duesseldorf.de/~buildmaster/mirror/pypy-5.0.0-ppc64le.tar.bz2
    +.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.0.0-src.tar.bz2
    +.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.0.0-src.zip
     .. _`vcredist_x86.exe`: http://www.microsoft.com/en-us/download/details.aspx?id=5582
     .. __: https://bitbucket.org/pypy/pypy/downloads
     .. _mirror: http://cobra.cs.uni-duesseldorf.de/~buildmaster/mirror/
    @@ -201,7 +201,7 @@
     uncompressed, they run in-place.  For now you can uncompress them
     either somewhere in your home directory or, say, in ``/opt``, and
     if you want, put a symlink from somewhere like
    -``/usr/local/bin/pypy`` to ``/path/to/pypy-4.0.1/bin/pypy``.  Do
    +``/usr/local/bin/pypy`` to ``/path/to/pypy-5.0.0/bin/pypy``.  Do
     not move or copy the executable ``pypy`` outside the tree --- put
     a symlink to it, otherwise it will not find its libraries.
     
    @@ -231,7 +231,7 @@
     If you have pip::
     
         pypy -m pip install git+https://bitbucket.org/pypy/numpy.git
    -    pypy -m pip install git+https://bitbucket.org/pypy/numpy.git at pypy-4.0.1
    +    pypy -m pip install git+https://bitbucket.org/pypy/numpy.git at pypy-5.0
     
     (the second version selects a particular tag, which may be needed if your
     pypy is not the latest development version.)
    @@ -261,9 +261,9 @@
     1. Get the source code.  The following packages contain the source at
        the same revision as the above binaries:
     
    -   * `pypy-4.0.1-src.tar.bz2`__ (sources)
    +   * `pypy-5.0.0-src.tar.bz2`__ (sources)
     
    -   .. __: https://bitbucket.org/pypy/pypy/downloads/pypy-4.0.1-src.tar.bz2
    +   .. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.0.0-src.tar.bz2
     
        Or you can checkout the current trunk using Mercurial_ (the trunk
        usually works and is of course more up-to-date)::
    @@ -403,20 +403,6 @@
     
     Here are the checksums for each of the downloads
     
    -pypy-4.0.1 md5::
    -
    -    f6721e62f4ba1cdc4cc5ad719369e359  pypy-4.0.1-linux64.tar.bz2
    -    fe8106ac3919c7b4be2766944326a624  pypy-4.0.1-linux-armel.tar.bz2
    -    823b8a457f4c48ebdb8e1ee607b0a893  pypy-4.0.1-linux-armhf-raring.tar.bz2
    -    e45728d413aa88963d4462ebcfaff6ea  pypy-4.0.1-linux-armhf-raspbian.tar.bz2
    -    d1d03aa44df354a3f589473a51406795  pypy-4.0.1-linux.tar.bz2
    -    67ac82d88aaaef8c3074e68d700f3968  pypy-4.0.1-osx64.tar.bz2
    -    f5b35ebedee2fa4fdfee82733be59996  pypy-4.0.1-src.tar.bz2
    -    6b81d79886b215a877fee7624883ee94  pypy-4.0.1-src.zip
    -    dc6573828ee5c82df18f9035f3b19edb  pypy-4.0.1-win32.zip
    -    d4492e65201bb09dca5f97601113dc57  pypy-4.0.1-ppc64le.tar.bz2
    -    2aadbb7638153b9d7c2a832888ed3c1e  pypy-4.0.1-ppc64.tar.bz2
    -
     pypy-5.0.0 md5::
     
         bcb5c830d6380ff78b759dbe075dfc14  pypy-5.0.0-linux-armel.tar.bz2
    @@ -450,20 +436,6 @@
        009c970b5fa75754ae4c32a5d108a8d4  pypy-1.8-sandbox-linux.tar.bz2
     
     
    -pypy-4.0.1 sha256::
    -
    -    0d6090cee59f4b9bab91ddbea76580d0c232b78dae65aaa9e8fa8d4449ba25b4  pypy-4.0.1-linux64.tar.bz2
    -    d1acdd45ebd34580dd632c63c95211f6bae5e9a8f7a46ffa6f0443286ff9f61b  pypy-4.0.1-linux-armel.tar.bz2
    -    e67278ce7423aa7bf99a95fd271cb76763eae3106930f4b7de1fba6a70a3f383  pypy-4.0.1-linux-armhf-raring.tar.bz2
    -    52eef495f560af59a787b9935367cb5f8c83b48e32a80ec3e7060bffac011ecc  pypy-4.0.1-linux-armhf-raspbian.tar.bz2
    -    721920fcbb6aefc9a98e868e32b7f4ea5fd68b7f9305d08d0a2595327c9c0611  pypy-4.0.1-linux.tar.bz2
    -    06be1299691f7ea558bf8e3bdf3d20debb8ba03cd7cadf04f2d6cbd5fd084430  pypy-4.0.1-osx64.tar.bz2
    -    43be0b04bcbde1e24d5f39875c0471cdc7bdb44549e5618d32e49bccaa778111  pypy-4.0.1-ppc64le.tar.bz2
    -    63c0a1614ffc94f94a64790df5ad193193b378f2cf8729213db06fbd64052911  pypy-4.0.1-ppc64.tar.bz2
    -    29f5aa6ba17b34fd980e85172dfeb4086fdc373ad392b1feff2677d2d8aea23c  pypy-4.0.1-src.tar.bz2
    -    e344b383e8c745cc7c26bbcb0a43958e768fdd1d29dd0799cc148e0518d8d36f  pypy-4.0.1-src.zip
    -    9a350a5e6f9b86fb525c6f1300b0c97c021ea8b1e37bfd32a8c4bb7a415d5329  pypy-4.0.1-win32.zip
    -
     pypy-5.0.0 sha1::
         
         b7c82d437086660759ec18582dbdaf198b77e467  pypy-5.0.0-linux-armel.tar.bz2
    
    From pypy.commits at gmail.com  Thu Mar 10 11:45:31 2016
    From: pypy.commits at gmail.com (arigo)
    Date: Thu, 10 Mar 2016 08:45:31 -0800 (PST)
    Subject: [pypy-commit] pypy.org extradoc: update
    Message-ID: <56e1a4ab.4577c20a.bd04d.7990@mx.google.com>
    
    Author: Armin Rigo 
    Branch: extradoc
    Changeset: r718:23e34f63d9d4
    Date: 2016-03-10 17:45 +0100
    http://bitbucket.org/pypy/pypy.org/changeset/23e34f63d9d4/
    
    Log:	update
    
    diff --git a/source/download.txt b/source/download.txt
    --- a/source/download.txt
    +++ b/source/download.txt
    @@ -98,8 +98,8 @@
     .. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.0.0-linux-armel.tar.bz2
     .. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.0.0-osx64.tar.bz2
     .. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.0.0-win32.zip
    -.. __: http://cobra.cs.uni-duesseldorf.de/~buildmaster/mirror/pypy-5.0.0-ppc64.tar.bz2
    -.. __: http://cobra.cs.uni-duesseldorf.de/~buildmaster/mirror/pypy-5.0.0-ppc64le.tar.bz2
    +.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.0.0-ppc64.tar.bz2
    +.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.0.0-ppc64le.tar.bz2
     .. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.0.0-src.tar.bz2
     .. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.0.0-src.zip
     .. _`vcredist_x86.exe`: http://www.microsoft.com/en-us/download/details.aspx?id=5582
    
    From pypy.commits at gmail.com  Thu Mar 10 11:49:22 2016
    From: pypy.commits at gmail.com (mattip)
    Date: Thu, 10 Mar 2016 08:49:22 -0800 (PST)
    Subject: [pypy-commit] pypy.org extradoc: update html
    Message-ID: <56e1a592.034cc20a.ad485.774c@mx.google.com>
    
    Author: mattip 
    Branch: extradoc
    Changeset: r719:736bf9456283
    Date: 2016-03-10 18:49 +0200
    http://bitbucket.org/pypy/pypy.org/changeset/736bf9456283/
    
    Log:	update html
    
    diff --git a/download.html b/download.html
    --- a/download.html
    +++ b/download.html
    @@ -125,8 +125,8 @@
     
  • FreeBSD 9.2 x86 64 bit (hopefully availabe soon) (see [1] below)
  • Windows binary (32bit) (you might need the VS 2008 runtime library installer vcredist_x86.exe.)
  • -
  • PowerPC64 Linux binary (64bit big-endian, Fedora 20) (see [1] below)
  • -
  • PowerPC64le Linux binary (64bit little-endian, Fedora 21) (see [1] below)
  • +
  • PowerPC64 Linux binary (64bit big-endian, Fedora 20) (see [1] below)
  • +
  • PowerPC64le Linux binary (64bit little-endian, Fedora 21) (see [1] below)
  • Source (tar.bz2); Source (zip). See below for more about the sources.
  • All our downloads, including previous versions. We also have a mirror, but please use only if you have troubles accessing the links above
  • From pypy.commits at gmail.com Thu Mar 10 12:01:47 2016 From: pypy.commits at gmail.com (mattip) Date: Thu, 10 Mar 2016 09:01:47 -0800 (PST) Subject: [pypy-commit] pypy.org extradoc: mention PPC Message-ID: <56e1a87b.e853c20a.2952d.7ae6@mx.google.com> Author: mattip Branch: extradoc Changeset: r720:cb470d7e673f Date: 2016-03-10 19:01 +0200 http://bitbucket.org/pypy/pypy.org/changeset/cb470d7e673f/ Log: mention PPC diff --git a/download.html b/download.html --- a/download.html +++ b/download.html @@ -72,7 +72,7 @@

    There are nightly binary builds available. Those builds are not always as stable as the release, but they contain numerous bugfixes and performance improvements.

    -

    We provide binaries for x86 and ARM Linux, Mac OS/X and Windows for:

    +

    We provide binaries for x86, ARM, and PPC Linux, Mac OS/X and Windows for:

    • the Python2.7 compatible release — PyPy 5.0 — (what's new in PyPy 5.0?)
    • the Python3.2.5 compatible release — PyPy3 2.4.0 — (what's new in PyPy3 2.4.0?).
    • diff --git a/source/download.txt b/source/download.txt --- a/source/download.txt +++ b/source/download.txt @@ -12,7 +12,7 @@ as stable as the release, but they contain numerous bugfixes and performance improvements. -We provide binaries for x86 and ARM Linux, Mac OS/X and Windows for: +We provide binaries for x86, ARM, and PPC Linux, Mac OS/X and Windows for: * the Python2.7 compatible release — **PyPy 5.0** — (`what's new in PyPy 5.0?`_) * the Python3.2.5 compatible release — **PyPy3 2.4.0** — (`what's new in PyPy3 2.4.0?`_). From pypy.commits at gmail.com Thu Mar 10 13:13:34 2016 From: pypy.commits at gmail.com (fijal) Date: Thu, 10 Mar 2016 10:13:34 -0800 (PST) Subject: [pypy-commit] pypy jit-leaner-frontend: make the list use constant size (a bit hardcoded for now) and shorts Message-ID: <56e1b94e.8ee61c0a.47531.ffff952a@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r82947:073167872e79 Date: 2016-03-10 20:12 +0200 http://bitbucket.org/pypy/pypy/changeset/073167872e79/ Log: make the list use constant size (a bit hardcoded for now) and shorts diff --git a/rpython/jit/metainterp/opencoder.py b/rpython/jit/metainterp/opencoder.py --- a/rpython/jit/metainterp/opencoder.py +++ b/rpython/jit/metainterp/opencoder.py @@ -12,6 +12,7 @@ ResOperation, oparity, rop, opwithdescr, GuardResOp from rpython.rlib.rarithmetic import intmask from rpython.rlib.objectmodel import we_are_translated +from rpython.rtyper.lltypesystem import rffi, lltype TAGINT, TAGCONST, TAGBOX = range(3) TAGMASK = 0x3 @@ -19,9 +20,6 @@ SMALL_INT_STOP = 2 ** (15 - TAGSHIFT) SMALL_INT_START = -SMALL_INT_STOP -class Sentinel(object): - pass - class BaseTrace(object): pass @@ -41,7 +39,7 @@ return self.pos >= self.end def _next(self): - res = self.trace._ops[self.pos] + res = rffi.cast(lltype.Signed, self.trace._ops[self.pos]) self.pos += 1 return res @@ -106,7 +104,7 @@ return self.pos >= self.end def _next(self): - res = self.trace._ops[self.pos] + res = rffi.cast(lltype.Signed, self.trace._ops[self.pos]) self.pos += 1 return res @@ -127,7 +125,7 @@ return pos def get_snapshot_iter(self, pos): - end = self.trace._ops[pos] + end = rffi.cast(lltype.Signed, self.trace._ops[pos]) return SnapshotIterator(self, pos + 1, end) def next(self): @@ -163,14 +161,15 @@ self.count = count def get_iter(self): - iter = TraceIterator(self.trace, self.start, len(self.trace._ops), + iter = TraceIterator(self.trace, self.start, self.trace._pos, self.inputargs) iter._count = self.count return iter class Trace(BaseTrace): def __init__(self, inputargs): - self._ops = [] + self._ops = [rffi.cast(rffi.SHORT, -15)] * 30000 + self._pos = 0 self._descrs = [None] self._consts = [None] for i, inparg in enumerate(inputargs): @@ -179,14 +178,18 @@ self._count = 0 self.inputargs = inputargs + def append(self, v): + self._ops[self._pos] = rffi.cast(rffi.SHORT, v) + self._pos += 1 + def length(self): - return len(self._ops) + return self._pos def cut_point(self): - return len(self._ops), self._count + return self._pos, self._count def cut_at(self, end): - self._ops = self._ops[:end[0]] + self._pos = end[0] self._count = end[1] def cut_trace_from(self, (start, count), inputargs): @@ -209,20 +212,20 @@ assert False, "unreachable code" def _record_op(self, opnum, argboxes, descr=None): - operations = self._ops pos = self._count - operations.append(opnum) + self.append(opnum) expected_arity = oparity[opnum] if expected_arity == -1: - operations.append(len(argboxes)) + self.append(len(argboxes)) else: assert len(argboxes) == expected_arity - operations.extend([self._encode(box) for box in argboxes]) + for box in argboxes: + self.append(self._encode(box)) if opwithdescr[opnum]: if descr is None: - operations.append(-1) + self.append(-1) else: - operations.append(self._encode_descr(descr)) + self.append(self._encode_descr(descr)) self._count += 1 return pos @@ -253,7 +256,7 @@ # self._ops[index] = -newtag - 1 def record_snapshot_link(self, pos): - self._ops.append(-pos - 1) + self.append(-pos - 1) def record_op(self, opnum, argboxes, descr=None): # return an ResOperation instance, ideally die in hell @@ -266,41 +269,36 @@ return tag(TAGBOX, self._record_raw(opnum, tagged_args, descr)) def record_snapshot(self, jitcode, pc, active_boxes): - pos = len(self._ops) - self._ops.append(len(active_boxes)) # unnecessary, can be read from - self._ops.append(jitcode.index) - self._ops.append(pc) + pos = self._pos + self.append(len(active_boxes)) # unnecessary, can be read from + self.append(jitcode.index) + self.append(pc) for box in active_boxes: - self._ops.append(self._encode(box)) # not tagged, as it must be boxes + self.append(self._encode(box)) # not tagged, as it must be boxes return pos def record_list_of_boxes(self, boxes): - self._ops.append(len(boxes)) + self.append(len(boxes)) for box in boxes: - self._ops.append(self._encode(box)) + self.append(self._encode(box)) def get_patchable_position(self): - p = len(self._ops) - if not we_are_translated(): - self._ops.append(Sentinel()) - else: - self._ops.append(-1) + p = self._pos + self.append(-1) return p def patch_position_to_current(self, p): prev = self._ops[p] - if we_are_translated(): - assert prev == -1 - else: - assert isinstance(prev, Sentinel) - self._ops[p] = len(self._ops) + assert prev == rffi.cast(rffi.SHORT, -1) + self._ops[p] = rffi.cast(rffi.SHORT, self._pos) def check_snapshot_jitcode_pc(self, jitcode, pc, resumedata_pos): - assert self._ops[resumedata_pos + 1] == jitcode.index - assert self._ops[resumedata_pos + 2] == pc + # XXX expensive? + assert self._ops[resumedata_pos + 1] == rffi.cast(rffi.SHORT, jitcode.index) + assert self._ops[resumedata_pos + 2] == rffi.cast(rffi.SHORT, pc) def get_iter(self): - return TraceIterator(self, 0, len(self._ops)) + return TraceIterator(self, 0, self._pos) def unpack(self): iter = self.get_iter() diff --git a/rpython/jit/metainterp/resume.py b/rpython/jit/metainterp/resume.py --- a/rpython/jit/metainterp/resume.py +++ b/rpython/jit/metainterp/resume.py @@ -119,8 +119,9 @@ target = framestack[n] back = framestack[n - 1] if target.parent_resumedata_position != -1: - t.check_snapshot_jitcode_pc(back.jitcode, back.pc, - target.parent_resumedata_position) + if not we_are_translated(): + t.check_snapshot_jitcode_pc(back.jitcode, back.pc, + target.parent_resumedata_position) t.record_snapshot_link(target.parent_resumedata_position) return pos = t.record_snapshot(back.jitcode, back.pc, From pypy.commits at gmail.com Thu Mar 10 13:54:23 2016 From: pypy.commits at gmail.com (fijal) Date: Thu, 10 Mar 2016 10:54:23 -0800 (PST) Subject: [pypy-commit] pypy jit-leaner-frontend: fix the arithmetics Message-ID: <56e1c2df.030f1c0a.90c18.ffffa6e2@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r82948:8b0309662ba1 Date: 2016-03-10 20:53 +0200 http://bitbucket.org/pypy/pypy/changeset/8b0309662ba1/ Log: fix the arithmetics diff --git a/rpython/jit/metainterp/opencoder.py b/rpython/jit/metainterp/opencoder.py --- a/rpython/jit/metainterp/opencoder.py +++ b/rpython/jit/metainterp/opencoder.py @@ -289,7 +289,7 @@ def patch_position_to_current(self, p): prev = self._ops[p] - assert prev == rffi.cast(rffi.SHORT, -1) + assert rffi.cast(lltype.Signed, prev) == -1 self._ops[p] = rffi.cast(rffi.SHORT, self._pos) def check_snapshot_jitcode_pc(self, jitcode, pc, resumedata_pos): From pypy.commits at gmail.com Thu Mar 10 14:02:55 2016 From: pypy.commits at gmail.com (rlamy) Date: Thu, 10 Mar 2016 11:02:55 -0800 (PST) Subject: [pypy-commit] pypy py3.3-bootstrap-hack: Initialise the filesystem encoding only after imports have been bootstrapped. Message-ID: <56e1c4df.465ec20a.25769.ffffa5cd@mx.google.com> Author: Ronan Lamy Branch: py3.3-bootstrap-hack Changeset: r82949:eb02742ce71d Date: 2016-03-10 19:01 +0000 http://bitbucket.org/pypy/pypy/changeset/eb02742ce71d/ Log: Initialise the filesystem encoding only after imports have been bootstrapped. Creating it requires importing the 'encodings' module from the stdlib, so the stdlib path needs to have been computed first. diff --git a/pypy/interpreter/app_main.py b/pypy/interpreter/app_main.py --- a/pypy/interpreter/app_main.py +++ b/pypy/interpreter/app_main.py @@ -769,6 +769,7 @@ # import os, which is used a bit everywhere in app_main, but only imported # *after* setup_bootstrap_path setup_bootstrap_path(executable) + sys.pypy_initfsencoding() try: cmdline = parse_command_line(argv) except CommandLineError as e: @@ -862,7 +863,7 @@ sys.pypy_find_stdlib = pypy_find_stdlib sys.pypy_resolvedirof = pypy_resolvedirof sys.cpython_path = sys.path[:] - + try: sys.exit(int(entry_point(sys.argv[0], sys.argv[1:]))) finally: diff --git a/pypy/module/sys/__init__.py b/pypy/module/sys/__init__.py --- a/pypy/module/sys/__init__.py +++ b/pypy/module/sys/__init__.py @@ -40,6 +40,7 @@ 'pypy_find_stdlib' : 'initpath.pypy_find_stdlib', 'pypy_find_executable' : 'initpath.pypy_find_executable', 'pypy_resolvedirof' : 'initpath.pypy_resolvedirof', + 'pypy_initfsencoding' : 'initpath.pypy_initfsencoding', '_getframe' : 'vm._getframe', '_current_frames' : 'currentframes._current_frames', @@ -97,12 +98,7 @@ def startup(self, space): if space.config.translating: - if not we_are_translated(): - # don't get the filesystemencoding at translation time - assert self.filesystemencoding is None - else: - from pypy.module.sys.interp_encoding import _getfilesystemencoding - self.filesystemencoding = _getfilesystemencoding(space) + assert self.filesystemencoding is None if not space.config.translating or we_are_translated(): from pypy.module.sys import version diff --git a/pypy/module/sys/initpath.py b/pypy/module/sys/initpath.py --- a/pypy/module/sys/initpath.py +++ b/pypy/module/sys/initpath.py @@ -12,6 +12,7 @@ from pypy.interpreter.gateway import unwrap_spec from pypy.module.sys.state import get as get_state +from pypy.module.sys.interp_encoding import _getfilesystemencoding PLATFORM = sys.platform _MACOSX = sys.platform == 'darwin' @@ -166,7 +167,9 @@ space.setitem(space.sys.w_dict, space.wrap('base_exec_prefix'), w_prefix) return space.newlist([_w_fsdecode(space, p) for p in path]) +def pypy_initfsencoding(space): + space.sys.filesystemencoding = _getfilesystemencoding(space) + def _w_fsdecode(space, b): return space.fsdecode(space.wrapbytes(b)) - From pypy.commits at gmail.com Thu Mar 10 14:58:17 2016 From: pypy.commits at gmail.com (fijal) Date: Thu, 10 Mar 2016 11:58:17 -0800 (PST) Subject: [pypy-commit] pypy jit-leaner-frontend: fix Message-ID: <56e1d1d9.6718c20a.b2f2.ffffc570@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r82950:fe32c26b09b0 Date: 2016-03-10 21:57 +0200 http://bitbucket.org/pypy/pypy/changeset/fe32c26b09b0/ Log: fix diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -200,7 +200,7 @@ # ____________________________________________________________ -def compile_simple_loop(metainterp, greenkey, trace, enable_opts): +def compile_simple_loop(metainterp, greenkey, trace, runtime_args, enable_opts): from rpython.jit.metainterp.optimizeopt import optimize_trace jitdriver_sd = metainterp.jitdriver_sd @@ -229,7 +229,7 @@ loop.check_consistency() jitcell_token.target_tokens = [target_token] send_loop_to_backend(greenkey, jitdriver_sd, metainterp_sd, loop, "loop", - loop_info.inputargs, metainterp.box_names_memo) + runtime_args, metainterp.box_names_memo) record_loop_or_bridge(metainterp_sd, loop) return target_token @@ -258,7 +258,7 @@ if start != (0, 0): trace = trace.cut_trace_from(start, inputargs) if 'unroll' not in enable_opts or not metainterp.cpu.supports_guard_gc_type: - return compile_simple_loop(metainterp, greenkey, trace, + return compile_simple_loop(metainterp, greenkey, trace, jumpargs, enable_opts) call_pure_results = metainterp.call_pure_results preamble_data = LoopCompileData(trace, jumpargs, From pypy.commits at gmail.com Thu Mar 10 16:44:22 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 10 Mar 2016 13:44:22 -0800 (PST) Subject: [pypy-commit] pypy.org extradoc: update the mirror's location, fix baroquesoftware to provide it, add Message-ID: <56e1eab6.4412c30a.6f1b7.ffffe3eb@mx.google.com> Author: Armin Rigo Branch: extradoc Changeset: r721:f693e341f922 Date: 2016-03-10 22:44 +0100 http://bitbucket.org/pypy/pypy.org/changeset/f693e341f922/ Log: update the mirror's location, fix baroquesoftware to provide it, add pypy-5.0.0 to it diff --git a/download.html b/download.html --- a/download.html +++ b/download.html @@ -129,7 +129,7 @@
    • PowerPC64le Linux binary (64bit little-endian, Fedora 21) (see [1] below)
    • Source (tar.bz2); Source (zip). See below for more about the sources.
    • All our downloads, including previous versions. We also have a -mirror, but please use only if you have troubles accessing the links above
    • +mirror, but please use only if you have troubles accessing the links above
    @@ -149,7 +149,7 @@
  • Source (tar.bz2)
  • Source (zip)
  • All our downloads, including previous versions. We also have a -mirror, but please use only if you have troubles accessing the links above
  • +mirror, but please use only if you have troubles accessing the links above
  • If your CPU is really, really old, it may be a x86-32 without SSE2. There is untested support for manually translating PyPy's JIT without diff --git a/source/download.txt b/source/download.txt --- a/source/download.txt +++ b/source/download.txt @@ -104,7 +104,7 @@ .. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.0.0-src.zip .. _`vcredist_x86.exe`: http://www.microsoft.com/en-us/download/details.aspx?id=5582 .. __: https://bitbucket.org/pypy/pypy/downloads -.. _mirror: http://cobra.cs.uni-duesseldorf.de/~buildmaster/mirror/ +.. _mirror: http://buildbot.pypy.org/mirror/ Python 3.2.5 compatible PyPy3 2.4.0 ----------------------------------- From pypy.commits at gmail.com Thu Mar 10 16:52:13 2016 From: pypy.commits at gmail.com (fijal) Date: Thu, 10 Mar 2016 13:52:13 -0800 (PST) Subject: [pypy-commit] pypy jit-leaner-frontend: grow this stuff, just because Message-ID: <56e1ec8d.29cec20a.6211c.ffffdf29@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r82951:52a053f8341b Date: 2016-03-10 23:51 +0200 http://bitbucket.org/pypy/pypy/changeset/52a053f8341b/ Log: grow this stuff, just because diff --git a/rpython/jit/metainterp/opencoder.py b/rpython/jit/metainterp/opencoder.py --- a/rpython/jit/metainterp/opencoder.py +++ b/rpython/jit/metainterp/opencoder.py @@ -179,6 +179,9 @@ self.inputargs = inputargs def append(self, v): + if self._pos >= len(self._ops): + # grow by 2X + self._ops = self._ops + [rffi.cast(rffi.SHORT, -15)] * len(self._ops) self._ops[self._pos] = rffi.cast(rffi.SHORT, v) self._pos += 1 From pypy.commits at gmail.com Fri Mar 11 01:05:55 2016 From: pypy.commits at gmail.com (plan_rich) Date: Thu, 10 Mar 2016 22:05:55 -0800 (PST) Subject: [pypy-commit] pypy default: these branches where not merged to default, so do not document them (added docu to s390x-backend) Message-ID: <56e26043.890bc30a.12110.33e5@mx.google.com> Author: Richard Plangger Branch: Changeset: r82952:ea3532fc750b Date: 2016-03-11 07:05 +0100 http://bitbucket.org/pypy/pypy/changeset/ea3532fc750b/ Log: these branches where not merged to default, so do not document them (added docu to s390x-backend) diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -5,15 +5,9 @@ .. this is a revision shortly after release-5.0 .. startrev: b238b48f9138 -.. branch: memop-simplify3 - -Simplification of zero_array. Start and end index are scaled using res ops (or cpu scaling) rather than doing it manually. - .. branch: s390x-backend The jit compiler backend implementation for the s390x architecutre. +The backend manages 64-bit values in the literal pool of the assembly instead of loading them as immediates. +It includes a simplification for the operation 'zero_array'. Start and length parameters are bytes instead of size. -.. branch: s390x-enhance-speed - -Refactoring to only store 64-bit values in the literal pool of the assembly. Generated machine code uses less space and runs faster. - From pypy.commits at gmail.com Fri Mar 11 01:21:02 2016 From: pypy.commits at gmail.com (plan_rich) Date: Thu, 10 Mar 2016 22:21:02 -0800 (PST) Subject: [pypy-commit] pypy default: removed some dead code in the literal pool, removed some comments, unnecessary instruction Message-ID: <56e263ce.a2afc20a.6000b.3b92@mx.google.com> Author: Richard Plangger Branch: Changeset: r82953:253deb8fdde5 Date: 2016-03-11 07:19 +0100 http://bitbucket.org/pypy/pypy/changeset/253deb8fdde5/ Log: removed some dead code in the literal pool, removed some comments, unnecessary instruction diff --git a/rpython/jit/backend/zarch/assembler.py b/rpython/jit/backend/zarch/assembler.py --- a/rpython/jit/backend/zarch/assembler.py +++ b/rpython/jit/backend/zarch/assembler.py @@ -602,7 +602,7 @@ def patch_stack_checks(self, frame_depth): if frame_depth > 0x7fff: - raise JitFrameTooDeep # XXX + raise JitFrameTooDeep for traps_pos, jmp_target in self.frame_depth_to_patch: pmc = OverwritingBuilder(self.mc, traps_pos, 3) # patch 3 instructions as shown above @@ -1022,8 +1022,6 @@ self.mc.STMG(r.r6, r.r15, l.addr(-fpoff+6*WORD, r.SP)) self.mc.LARL(r.POOL, l.halfword(self.pool.pool_start - self.mc.get_relative_pos())) # f8 through f15 are saved registers (= non volatile) - # TODO it would be good to detect if any float is used in the loop - # and to skip this push/pop whenever no float operation occurs for i,reg in enumerate([r.f8, r.f9, r.f10, r.f11, r.f12, r.f13, r.f14, r.f15]): off = -fpoff + STD_FRAME_SIZE_IN_BYTES @@ -1082,8 +1080,6 @@ size = STD_FRAME_SIZE_IN_BYTES # f8 through f15 are saved registers (= non volatile) - # TODO it would be good to detect if any float is used in the loop - # and to skip this push/pop whenever no float operation occurs for i,reg in enumerate([r.f8, r.f9, r.f10, r.f11, r.f12, r.f13, r.f14, r.f15]): self.mc.LD(reg, l.addr(size + size + i*8, r.SP)) @@ -1369,8 +1365,6 @@ SIZE2SCALE = dict([(1<<_i, _i) for _i in range(32)]) def _multiply_by_constant(self, loc, multiply_by, scratch_loc): - # XXX should die together with _apply_scale() but can't because - # of emit_zero_array() and malloc_cond_varsize() at the moment assert loc.is_reg() if multiply_by == 1: return loc diff --git a/rpython/jit/backend/zarch/callbuilder.py b/rpython/jit/backend/zarch/callbuilder.py --- a/rpython/jit/backend/zarch/callbuilder.py +++ b/rpython/jit/backend/zarch/callbuilder.py @@ -203,7 +203,6 @@ self.mc.XGR(r.SCRATCH, r.SCRATCH) # zarch is sequentially consistent self.mc.STG(r.SCRATCH, l.addr(0, RFASTGILPTR)) - self.mc.BCR_rr(0xe, 0x0) def move_real_result_and_call_reacqgil_addr(self, fastgil): diff --git a/rpython/jit/backend/zarch/pool.py b/rpython/jit/backend/zarch/pool.py --- a/rpython/jit/backend/zarch/pool.py +++ b/rpython/jit/backend/zarch/pool.py @@ -128,30 +128,3 @@ # fast gil fastgil = rffi.cast(lltype.Signed, rgil.gil_fetch_fastgil()) self._ensure_value(fastgil, asm) - # TODO add more values that are loaded with load_imm - - # XXX def post_assemble(self, asm): - # XXX mc = asm.mc - # XXX pending_guard_tokens = asm.pending_guard_tokens - # XXX if self.size == 0: - # XXX return - # XXX for guard_token in pending_guard_tokens: - # XXX descr = guard_token.faildescr - # XXX offset = self.offset_descr[descr] - # XXX assert isinstance(offset, int) - # XXX assert offset >= 0 - # XXX assert guard_token._pool_offset != -1 - # XXX ptr = rffi.cast(lltype.Signed, guard_token.gcmap) - # XXX self._overwrite_64(mc, offset + RECOVERY_GCMAP_POOL_OFFSET, ptr) - - def _overwrite_64(self, mc, index, value): - index += self.pool_start - - mc.overwrite(index, chr(value >> 56 & 0xff)) - mc.overwrite(index+1, chr(value >> 48 & 0xff)) - mc.overwrite(index+2, chr(value >> 40 & 0xff)) - mc.overwrite(index+3, chr(value >> 32 & 0xff)) - mc.overwrite(index+4, chr(value >> 24 & 0xff)) - mc.overwrite(index+5, chr(value >> 16 & 0xff)) - mc.overwrite(index+6, chr(value >> 8 & 0xff)) - mc.overwrite(index+7, chr(value & 0xff)) From pypy.commits at gmail.com Fri Mar 11 04:28:00 2016 From: pypy.commits at gmail.com (fijal) Date: Fri, 11 Mar 2016 01:28:00 -0800 (PST) Subject: [pypy-commit] pypy jit-leaner-frontend: don't display huge lists either Message-ID: <56e28fa0.55031c0a.6d9d3.6749@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r82954:274d9fa82bf0 Date: 2016-03-11 11:26 +0200 http://bitbucket.org/pypy/pypy/changeset/274d9fa82bf0/ Log: don't display huge lists either diff --git a/pypy/tool/gdb_pypy.py b/pypy/tool/gdb_pypy.py --- a/pypy/tool/gdb_pypy.py +++ b/pypy/tool/gdb_pypy.py @@ -288,9 +288,11 @@ RPyListPrinter.recursive = True try: itemlist = [] - for i in range(length): + for i in range(min(length, MAX_DISPLAY_LENGTH)): item = items[i] itemlist.append(str(item)) # may recurse here + if length > MAX_DISPLAY_LENGTH: + itemlist.append("...") str_items = ', '.join(itemlist) finally: RPyListPrinter.recursive = False From pypy.commits at gmail.com Fri Mar 11 04:35:24 2016 From: pypy.commits at gmail.com (fijal) Date: Fri, 11 Mar 2016 01:35:24 -0800 (PST) Subject: [pypy-commit] pypy jit-leaner-frontend: try harder to fix stuff into SHORT Message-ID: <56e2915c.2968c20a.2ab0.7d6f@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r82955:f44ec35b6c0b Date: 2016-03-11 11:34 +0200 http://bitbucket.org/pypy/pypy/changeset/f44ec35b6c0b/ Log: try harder to fix stuff into SHORT diff --git a/rpython/jit/metainterp/opencoder.py b/rpython/jit/metainterp/opencoder.py --- a/rpython/jit/metainterp/opencoder.py +++ b/rpython/jit/metainterp/opencoder.py @@ -19,6 +19,8 @@ TAGSHIFT = 2 SMALL_INT_STOP = 2 ** (15 - TAGSHIFT) SMALL_INT_START = -SMALL_INT_STOP +MIN_SHORT = -2**15 + 1 +MAX_SHORT = 2**15 - 1 class BaseTrace(object): pass @@ -121,11 +123,11 @@ def skip_resume_data(self): pos = self.pos - self.pos = self._next() + self.pos += self._next() return pos def get_snapshot_iter(self, pos): - end = rffi.cast(lltype.Signed, self.trace._ops[pos]) + end = rffi.cast(lltype.Signed, self.trace._ops[pos]) + pos return SnapshotIterator(self, pos + 1, end) def next(self): @@ -182,6 +184,7 @@ if self._pos >= len(self._ops): # grow by 2X self._ops = self._ops + [rffi.cast(rffi.SHORT, -15)] * len(self._ops) + assert MIN_SHORT < v < MAX_SHORT self._ops[self._pos] = rffi.cast(rffi.SHORT, v) self._pos += 1 @@ -293,7 +296,7 @@ def patch_position_to_current(self, p): prev = self._ops[p] assert rffi.cast(lltype.Signed, prev) == -1 - self._ops[p] = rffi.cast(rffi.SHORT, self._pos) + self._ops[p] = rffi.cast(rffi.SHORT, self._pos - p) def check_snapshot_jitcode_pc(self, jitcode, pc, resumedata_pos): # XXX expensive? From pypy.commits at gmail.com Fri Mar 11 05:06:56 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 11 Mar 2016 02:06:56 -0800 (PST) Subject: [pypy-commit] pypy cpyext-ext: force Python.h to be included first, silences warnings during compilation Message-ID: <56e298c0.cf0b1c0a.572ba.7a3c@mx.google.com> Author: mattip Branch: cpyext-ext Changeset: r82956:dafdc7b5af5e Date: 2016-03-11 12:05 +0200 http://bitbucket.org/pypy/pypy/changeset/dafdc7b5af5e/ Log: force Python.h to be included first, silences warnings during compilation diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -59,8 +59,9 @@ class CConfig: _compilation_info_ = ExternalCompilationInfo( include_dirs=include_dirs, - includes=['Python.h', 'stdarg.h', 'structmember.h'], + includes=['stdarg.h', 'structmember.h'], compile_extra=['-DPy_BUILD_CORE'], + pre_include_bits = ['#include "Python.h"'], ) class CConfig2: From pypy.commits at gmail.com Fri Mar 11 05:06:58 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 11 Mar 2016 02:06:58 -0800 (PST) Subject: [pypy-commit] pypy cpyext-ext: ml_name and ml_doc are const char * Message-ID: <56e298c2.a8c0c20a.f5869.ffff8014@mx.google.com> Author: mattip Branch: cpyext-ext Changeset: r82957:1e56856d07a0 Date: 2016-03-11 12:05 +0200 http://bitbucket.org/pypy/pypy/changeset/1e56856d07a0/ Log: ml_name and ml_doc are const char * diff --git a/pypy/module/cpyext/methodobject.py b/pypy/module/cpyext/methodobject.py --- a/pypy/module/cpyext/methodobject.py +++ b/pypy/module/cpyext/methodobject.py @@ -21,10 +21,10 @@ PyMethodDef = cpython_struct( 'PyMethodDef', - [('ml_name', rffi.CCHARP), + [('ml_name', rffi.CONST_CCHARP), ('ml_meth', PyCFunction_typedef), ('ml_flags', rffi.INT_real), - ('ml_doc', rffi.CCHARP), + ('ml_doc', rffi.CONST_CCHARP), ]) PyCFunctionObjectStruct = cpython_struct( diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -19,7 +19,7 @@ Py_TPFLAGS_HAVE_GETCHARBUFFER, build_type_checkers, StaticObjectBuilder, PyObjectFields, Py_TPFLAGS_BASETYPE) from pypy.module.cpyext.methodobject import ( - PyDescr_NewWrapper, PyCFunction_NewEx, PyCFunction_typedef) + PyDescr_NewWrapper, PyCFunction_NewEx, PyCFunction_typedef, PyMethodDef) from pypy.module.cpyext.modsupport import convert_method_defs from pypy.module.cpyext.pyobject import ( PyObject, make_ref, create_ref, from_ref, get_typedescr, make_typedescr, @@ -289,14 +289,13 @@ state = space.fromcache(State) if state.new_method_def: return state.new_method_def - from pypy.module.cpyext.modsupport import PyMethodDef ptr = lltype.malloc(PyMethodDef, flavor="raw", zero=True, immortal=True) - ptr.c_ml_name = rffi.str2charp("__new__") + ptr.c_ml_name = rffi.cast(rffi.CONST_CCHARP, rffi.str2charp("__new__")) lltype.render_immortal(ptr.c_ml_name) rffi.setintfield(ptr, 'c_ml_flags', METH_VARARGS | METH_KEYWORDS) - ptr.c_ml_doc = rffi.str2charp( - "T.__new__(S, ...) -> a new object with type S, a subtype of T") + ptr.c_ml_doc = rffi.cast(rffi.CONST_CCHARP, rffi.str2charp( + "T.__new__(S, ...) -> a new object with type S, a subtype of T")) lltype.render_immortal(ptr.c_ml_doc) state.new_method_def = ptr return ptr From pypy.commits at gmail.com Fri Mar 11 05:15:05 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 11 Mar 2016 02:15:05 -0800 (PST) Subject: [pypy-commit] pypy default: Trying to use "FILE *" correctly to avoid some compilation warnings. Message-ID: <56e29aa9.07b71c0a.f9d29.78cc@mx.google.com> Author: Armin Rigo Branch: Changeset: r82958:2065f708bf55 Date: 2016-03-11 11:14 +0100 http://bitbucket.org/pypy/pypy/changeset/2065f708bf55/ Log: Trying to use "FILE *" correctly to avoid some compilation warnings. diff --git a/pypy/module/_cffi_backend/ctypeptr.py b/pypy/module/_cffi_backend/ctypeptr.py --- a/pypy/module/_cffi_backend/ctypeptr.py +++ b/pypy/module/_cffi_backend/ctypeptr.py @@ -353,10 +353,11 @@ # ____________________________________________________________ -rffi_fdopen = rffi.llexternal("fdopen", [rffi.INT, rffi.CCHARP], rffi.CCHARP, +FILEP = rffi.COpaquePtr("FILE") +rffi_fdopen = rffi.llexternal("fdopen", [rffi.INT, rffi.CCHARP], FILEP, save_err=rffi.RFFI_SAVE_ERRNO) -rffi_setbuf = rffi.llexternal("setbuf", [rffi.CCHARP, rffi.CCHARP], lltype.Void) -rffi_fclose = rffi.llexternal("fclose", [rffi.CCHARP], rffi.INT) +rffi_setbuf = rffi.llexternal("setbuf", [FILEP, rffi.CCHARP], lltype.Void) +rffi_fclose = rffi.llexternal("fclose", [FILEP], rffi.INT) class CffiFileObj(object): _immutable_ = True @@ -382,4 +383,4 @@ fileobj.cffi_fileobj = CffiFileObj(fd, fileobj.mode) except OSError, e: raise wrap_oserror(space, e) - return fileobj.cffi_fileobj.llf + return rffi.cast(rffi.CCHARP, fileobj.cffi_fileobj.llf) From pypy.commits at gmail.com Fri Mar 11 05:20:22 2016 From: pypy.commits at gmail.com (fijal) Date: Fri, 11 Mar 2016 02:20:22 -0800 (PST) Subject: [pypy-commit] pypy jit-leaner-frontend: encode positions in link in 2 shorts Message-ID: <56e29be6.657bc20a.8d8be.ffff9276@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r82959:a0468c0d2dca Date: 2016-03-11 12:19 +0200 http://bitbucket.org/pypy/pypy/changeset/a0468c0d2dca/ Log: encode positions in link in 2 shorts diff --git a/rpython/jit/metainterp/opencoder.py b/rpython/jit/metainterp/opencoder.py --- a/rpython/jit/metainterp/opencoder.py +++ b/rpython/jit/metainterp/opencoder.py @@ -59,8 +59,8 @@ self.save_pos = -1 size = self._next() if size < 0: - self.save_pos = self.pos - self.pos = -size - 1 + self.save_pos = self.pos + 1 + self.pos = ((-size - 1) << 15) | (self._next()) assert self.pos >= 0 size = self._next() assert size >= 0 @@ -262,7 +262,10 @@ # self._ops[index] = -newtag - 1 def record_snapshot_link(self, pos): - self.append(-pos - 1) + lower = pos & 0x7fff + upper = pos >> 15 + self.append(-upper-1) + self.append(lower) def record_op(self, opnum, argboxes, descr=None): # return an ResOperation instance, ideally die in hell diff --git a/rpython/jit/metainterp/test/test_opencoder.py b/rpython/jit/metainterp/test/test_opencoder.py --- a/rpython/jit/metainterp/test/test_opencoder.py +++ b/rpython/jit/metainterp/test/test_opencoder.py @@ -7,7 +7,7 @@ from rpython.jit.metainterp.test.strategies import lists_of_operations from rpython.jit.metainterp.optimizeopt.test.test_util import BaseTest from rpython.jit.metainterp.history import TreeLoop, AbstractDescr -from hypothesis import given +from hypothesis import given, strategies class JitCode(object): def __init__(self, index): @@ -141,6 +141,13 @@ loop2.operations = l BaseTest.assert_equal(loop1, loop2) + @given(strategies.integers(min_value=0, max_value=2**25)) + def test_packing(self, i): + t = Trace([]) + t.record_snapshot_link(i) + iter = t.get_iter() + assert (((-iter._next() - 1) << 15) | (iter._next())) == i + def test_cut_trace_from(self): i0, i1, i2 = InputArgInt(), InputArgInt(), InputArgInt() t = Trace([i0, i1, i2]) From pypy.commits at gmail.com Fri Mar 11 06:10:58 2016 From: pypy.commits at gmail.com (plan_rich) Date: Fri, 11 Mar 2016 03:10:58 -0800 (PST) Subject: [pypy-commit] pypy default: added enum34 package which is otherwise not picked up by pip on s390x build bot Message-ID: <56e2a7c2.c711c30a.d2f7e.ffffa2e8@mx.google.com> Author: Richard Plangger Branch: Changeset: r82960:4475205e86ba Date: 2016-03-11 07:55 +0100 http://bitbucket.org/pypy/pypy/changeset/4475205e86ba/ Log: added enum34 package which is otherwise not picked up by pip on s390x build bot diff --git a/requirements.txt b/requirements.txt --- a/requirements.txt +++ b/requirements.txt @@ -1,3 +1,4 @@ # hypothesis is used for test generation on untranslated jit tests hypothesis enum>=0.4.6 # is a dependency, but old pip does not pick it up +enum34>=1.1.2 From pypy.commits at gmail.com Fri Mar 11 06:11:00 2016 From: pypy.commits at gmail.com (plan_rich) Date: Fri, 11 Mar 2016 03:11:00 -0800 (PST) Subject: [pypy-commit] pypy default: os.uname not avail. on windows (thx matti for pointing it out) Message-ID: <56e2a7c4.04371c0a.9ea5f.ffff91dd@mx.google.com> Author: Richard Plangger Branch: Changeset: r82961:3cab2735d806 Date: 2016-03-11 12:07 +0100 http://bitbucket.org/pypy/pypy/changeset/3cab2735d806/ Log: os.uname not avail. on windows (thx matti for pointing it out) diff --git a/pypy/module/_vmprof/conftest.py b/pypy/module/_vmprof/conftest.py --- a/pypy/module/_vmprof/conftest.py +++ b/pypy/module/_vmprof/conftest.py @@ -1,6 +1,6 @@ -import py, os +import py, platform def pytest_collect_directory(path, parent): - if os.uname()[4] == 's390x': + if platform.machine() == 's390x': py.test.skip("zarch tests skipped") pytest_collect_file = pytest_collect_directory From pypy.commits at gmail.com Fri Mar 11 06:58:06 2016 From: pypy.commits at gmail.com (fijal) Date: Fri, 11 Mar 2016 03:58:06 -0800 (PST) Subject: [pypy-commit] pypy jit-leaner-frontend: an attempt to compress list of consts Message-ID: <56e2b2ce.c711c30a.d2f7e.ffffb49c@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r82962:5008701233ab Date: 2016-03-11 13:57 +0200 http://bitbucket.org/pypy/pypy/changeset/5008701233ab/ Log: an attempt to compress list of consts diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -205,7 +205,7 @@ class Const(AbstractValue): - __slots__ = () + __attrs__ = () @staticmethod def _new(x): diff --git a/rpython/jit/metainterp/opencoder.py b/rpython/jit/metainterp/opencoder.py --- a/rpython/jit/metainterp/opencoder.py +++ b/rpython/jit/metainterp/opencoder.py @@ -7,14 +7,14 @@ [ ...] """ -from rpython.jit.metainterp.history import ConstInt, Const +from rpython.jit.metainterp.history import ConstInt, Const, ConstFloat, ConstPtr from rpython.jit.metainterp.resoperation import AbstractResOp, AbstractInputArg,\ ResOperation, oparity, rop, opwithdescr, GuardResOp from rpython.rlib.rarithmetic import intmask from rpython.rlib.objectmodel import we_are_translated -from rpython.rtyper.lltypesystem import rffi, lltype +from rpython.rtyper.lltypesystem import rffi, lltype, llmemory -TAGINT, TAGCONST, TAGBOX = range(3) +TAGINT, TAGCONSTPTR, TAGCONSTOTHER, TAGBOX = range(4) TAGMASK = 0x3 TAGSHIFT = 2 SMALL_INT_STOP = 2 ** (15 - TAGSHIFT) @@ -116,8 +116,13 @@ return self._get(v) elif tag == TAGINT: return ConstInt(v) - elif tag == TAGCONST: - return self.trace._consts[v] + elif tag == TAGCONSTPTR: + return ConstPtr(self.trace._refs[v]) + elif tag == TAGCONSTOTHER: + if v & 1: + return ConstFloat(self.trace._floats[v >> 1]) + else: + return ConstInt(self.trace._bigints[v >> 1]) else: assert False @@ -173,7 +178,12 @@ self._ops = [rffi.cast(rffi.SHORT, -15)] * 30000 self._pos = 0 self._descrs = [None] - self._consts = [None] + self._refs = [] + self._refs_dict = {} + self._bigints = [] + self._bigints_dict = {} + self._floats = [] + self._floats_dict = {} for i, inparg in enumerate(inputargs): assert isinstance(inparg, AbstractInputArg) inparg.position = -i - 1 @@ -188,6 +198,11 @@ self._ops[self._pos] = rffi.cast(rffi.SHORT, v) self._pos += 1 + def done(self): + self._bigints_dict = {} + self._refs_dict = {} + self._floats_dict = {} + def length(self): return self._pos @@ -207,9 +222,34 @@ isinstance(box.getint(), int) and # symbolics SMALL_INT_START <= box.getint() < SMALL_INT_STOP): return tag(TAGINT, box.getint()) + elif isinstance(box, ConstInt): + if not isinstance(box.getint(), int): + # symbolics, for tests, don't worry about caching + v = len(self._bigints) << 1 + self._bigints.append(box.getint()) + else: + v = self._bigints_dict.get(box.getint(), -1) + if v == -1: + v = len(self._bigints) << 1 + self._bigints_dict[box.getint()] = v + self._bigints.append(box.getint()) + return tag(TAGCONSTOTHER, v) + elif isinstance(box, ConstFloat): + v = self._floats_dict.get(box.getfloat(), -1) + if v == -1: + v = (len(self._floats) << 1) | 1 + self._floats_dict[box.getfloat()] = v + self._floats.append(box.getfloat()) + return tag(TAGCONSTOTHER, v) else: - self._consts.append(box) - return tag(TAGCONST, len(self._consts) - 1) + assert isinstance(box, ConstPtr) + addr = llmemory.cast_ptr_to_adr(box.getref_base()) + v = self._refs_dict.get(addr, -1) + if v == -1: + v = len(self._refs) + self._refs_dict[addr] = v + self._refs.append(box.getref_base()) + return tag(TAGCONSTPTR, v) elif isinstance(box, AbstractResOp): return tag(TAGBOX, box.get_position()) elif isinstance(box, AbstractInputArg): diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -2566,6 +2566,7 @@ try_disabling_unroll=False, exported_state=None): num_green_args = self.jitdriver_sd.num_green_args greenkey = original_boxes[:num_green_args] + self.history.trace.done() if not self.partial_trace: ptoken = self.get_procedure_token(greenkey) if ptoken is not None and ptoken.target_tokens is not None: @@ -2618,6 +2619,7 @@ self.history.record(rop.JUMP, live_arg_boxes[num_green_args:], None, descr=target_jitcell_token) self.history.ends_with_jump = True + self.history.trace.done() try: target_token = compile.compile_trace(self, self.resumekey, live_arg_boxes[num_green_args:]) From pypy.commits at gmail.com Fri Mar 11 07:24:06 2016 From: pypy.commits at gmail.com (fijal) Date: Fri, 11 Mar 2016 04:24:06 -0800 (PST) Subject: [pypy-commit] pypy jit-leaner-frontend: ups Message-ID: <56e2b8e6.0775c20a.7b5df.ffffbb26@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r82963:b591405b96fe Date: 2016-03-11 14:23 +0200 http://bitbucket.org/pypy/pypy/changeset/b591405b96fe/ Log: ups diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -205,7 +205,7 @@ class Const(AbstractValue): - __attrs__ = () + _attrs_ = () @staticmethod def _new(x): From pypy.commits at gmail.com Fri Mar 11 07:39:41 2016 From: pypy.commits at gmail.com (fijal) Date: Fri, 11 Mar 2016 04:39:41 -0800 (PST) Subject: [pypy-commit] pypy jit-leaner-frontend: fix rpython Message-ID: <56e2bc8d.02f0c20a.e0411.ffffbef1@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r82964:49a54211e9fc Date: 2016-03-11 14:38 +0200 http://bitbucket.org/pypy/pypy/changeset/49a54211e9fc/ Log: fix rpython diff --git a/rpython/jit/metainterp/opencoder.py b/rpython/jit/metainterp/opencoder.py --- a/rpython/jit/metainterp/opencoder.py +++ b/rpython/jit/metainterp/opencoder.py @@ -202,6 +202,8 @@ self._bigints_dict = {} self._refs_dict = {} self._floats_dict = {} + return 0 # completely different than TraceIter.done, but we have to + # share the base class def length(self): return self._pos From pypy.commits at gmail.com Fri Mar 11 07:40:34 2016 From: pypy.commits at gmail.com (alex_gaynor) Date: Fri, 11 Mar 2016 04:40:34 -0800 (PST) Subject: [pypy-commit] pypy default: split the endswith([tuple]) path into its own function so the JIT can look inside Message-ID: <56e2bcc2.019e1c0a.e2824.ffffadb1@mx.google.com> Author: Alex Gaynor Branch: Changeset: r82965:f20ce35486b5 Date: 2016-03-11 07:39 -0500 http://bitbucket.org/pypy/pypy/changeset/f20ce35486b5/ Log: split the endswith([tuple]) path into its own function so the JIT can look inside diff --git a/pypy/objspace/std/stringmethods.py b/pypy/objspace/std/stringmethods.py --- a/pypy/objspace/std/stringmethods.py +++ b/pypy/objspace/std/stringmethods.py @@ -629,13 +629,16 @@ def descr_endswith(self, space, w_suffix, w_start=None, w_end=None): (value, start, end) = self._convert_idx_params(space, w_start, w_end) if space.isinstance_w(w_suffix, space.w_tuple): - for w_suffix in space.fixedview(w_suffix): - if self._endswith(space, value, w_suffix, start, end): - return space.w_True - return space.w_False + return self._endswith_tuple(space, value, w_suffix, start, end) return space.newbool(self._endswith(space, value, w_suffix, start, end)) + def _endswith_tuple(self, space, value, w_suffix, start, end): + for w_suffix in space.fixedview(w_suffix): + if self._endswith(space, value, w_suffix, start, end): + return space.w_True + return space.w_False + def _endswith(self, space, value, w_prefix, start, end): prefix = self._op_val(space, w_prefix) if start > len(value): @@ -795,5 +798,3 @@ def _get_buffer(space, w_obj): return space.buffer_w(w_obj, space.BUF_SIMPLE) - - From pypy.commits at gmail.com Fri Mar 11 07:40:36 2016 From: pypy.commits at gmail.com (alex_gaynor) Date: Fri, 11 Mar 2016 04:40:36 -0800 (PST) Subject: [pypy-commit] pypy default: merged upstream Message-ID: <56e2bcc4.8216c20a.e1784.ffffba59@mx.google.com> Author: Alex Gaynor Branch: Changeset: r82966:4337613701b7 Date: 2016-03-11 07:39 -0500 http://bitbucket.org/pypy/pypy/changeset/4337613701b7/ Log: merged upstream diff --git a/pypy/objspace/std/stringmethods.py b/pypy/objspace/std/stringmethods.py --- a/pypy/objspace/std/stringmethods.py +++ b/pypy/objspace/std/stringmethods.py @@ -629,13 +629,16 @@ def descr_endswith(self, space, w_suffix, w_start=None, w_end=None): (value, start, end) = self._convert_idx_params(space, w_start, w_end) if space.isinstance_w(w_suffix, space.w_tuple): - for w_suffix in space.fixedview(w_suffix): - if self._endswith(space, value, w_suffix, start, end): - return space.w_True - return space.w_False + return self._endswith_tuple(space, value, w_suffix, start, end) return space.newbool(self._endswith(space, value, w_suffix, start, end)) + def _endswith_tuple(self, space, value, w_suffix, start, end): + for w_suffix in space.fixedview(w_suffix): + if self._endswith(space, value, w_suffix, start, end): + return space.w_True + return space.w_False + def _endswith(self, space, value, w_prefix, start, end): prefix = self._op_val(space, w_prefix) if start > len(value): @@ -795,5 +798,3 @@ def _get_buffer(space, w_obj): return space.buffer_w(w_obj, space.BUF_SIMPLE) - - From pypy.commits at gmail.com Fri Mar 11 08:22:00 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 11 Mar 2016 05:22:00 -0800 (PST) Subject: [pypy-commit] pypy default: when running tests in parallel, restrict make to a single core Message-ID: <56e2c678.838d1c0a.6474b.ffffc26b@mx.google.com> Author: mattip Branch: Changeset: r82967:b2f40ec51e8b Date: 2016-03-11 15:21 +0200 http://bitbucket.org/pypy/pypy/changeset/b2f40ec51e8b/ Log: when running tests in parallel, restrict make to a single core diff --git a/testrunner/runner.py b/testrunner/runner.py --- a/testrunner/runner.py +++ b/testrunner/runner.py @@ -237,6 +237,11 @@ N = run_param.parallel_runs if N > 1: out.write("running %d parallel test workers\n" % N) + s = 'setting' + if os.environ.get('MAKEFLAGS'): + s = 'overriding' + out.write("%s MAKEFLAGS to '-j1'\n" % s) + os.environ['MAKEFLAGS'] = '-j1' failure = False for testname in testdirs: From pypy.commits at gmail.com Fri Mar 11 08:22:04 2016 From: pypy.commits at gmail.com (alex_gaynor) Date: Fri, 11 Mar 2016 05:22:04 -0800 (PST) Subject: [pypy-commit] pypy default: Do the same for startswith(tuple) Message-ID: <56e2c67c.2179c20a.fd8e2.ffffccff@mx.google.com> Author: Alex Gaynor Branch: Changeset: r82968:59636e6b392b Date: 2016-03-11 08:21 -0500 http://bitbucket.org/pypy/pypy/changeset/59636e6b392b/ Log: Do the same for startswith(tuple) diff --git a/pypy/objspace/std/stringmethods.py b/pypy/objspace/std/stringmethods.py --- a/pypy/objspace/std/stringmethods.py +++ b/pypy/objspace/std/stringmethods.py @@ -609,13 +609,16 @@ def descr_startswith(self, space, w_prefix, w_start=None, w_end=None): (value, start, end) = self._convert_idx_params(space, w_start, w_end) if space.isinstance_w(w_prefix, space.w_tuple): - for w_prefix in space.fixedview(w_prefix): - if self._startswith(space, value, w_prefix, start, end): - return space.w_True - return space.w_False + return self._startswith_tuple(space, value, w_prefix, start, end) return space.newbool(self._startswith(space, value, w_prefix, start, end)) + def _startswith_tuple(self, space, value, w_prefix, start, end): + for w_prefix in space.fixedview(w_prefix): + if self._startswith(space, value, w_prefix, start, end): + return space.w_True + return space.w_False + def _startswith(self, space, value, w_prefix, start, end): prefix = self._op_val(space, w_prefix) if start > len(value): From pypy.commits at gmail.com Fri Mar 11 08:28:41 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 11 Mar 2016 05:28:41 -0800 (PST) Subject: [pypy-commit] pypy default: merge heads, not clear what happened Message-ID: <56e2c809.c96cc20a.a5021.ffffd0f0@mx.google.com> Author: mattip Branch: Changeset: r82969:8357c9e1778f Date: 2016-03-11 15:25 +0200 http://bitbucket.org/pypy/pypy/changeset/8357c9e1778f/ Log: merge heads, not clear what happened diff --git a/pypy/objspace/std/stringmethods.py b/pypy/objspace/std/stringmethods.py --- a/pypy/objspace/std/stringmethods.py +++ b/pypy/objspace/std/stringmethods.py @@ -609,13 +609,16 @@ def descr_startswith(self, space, w_prefix, w_start=None, w_end=None): (value, start, end) = self._convert_idx_params(space, w_start, w_end) if space.isinstance_w(w_prefix, space.w_tuple): - for w_prefix in space.fixedview(w_prefix): - if self._startswith(space, value, w_prefix, start, end): - return space.w_True - return space.w_False + return self._startswith_tuple(space, value, w_prefix, start, end) return space.newbool(self._startswith(space, value, w_prefix, start, end)) + def _startswith_tuple(self, space, value, w_prefix, start, end): + for w_prefix in space.fixedview(w_prefix): + if self._startswith(space, value, w_prefix, start, end): + return space.w_True + return space.w_False + def _startswith(self, space, value, w_prefix, start, end): prefix = self._op_val(space, w_prefix) if start > len(value): From pypy.commits at gmail.com Fri Mar 11 08:42:34 2016 From: pypy.commits at gmail.com (fijal) Date: Fri, 11 Mar 2016 05:42:34 -0800 (PST) Subject: [pypy-commit] pypy jit-leaner-frontend: use a correct dict Message-ID: <56e2cb4a.500f1c0a.aa328.ffffc712@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r82970:33be2ad6b87a Date: 2016-03-11 15:41 +0200 http://bitbucket.org/pypy/pypy/changeset/33be2ad6b87a/ Log: use a correct dict diff --git a/rpython/jit/metainterp/opencoder.py b/rpython/jit/metainterp/opencoder.py --- a/rpython/jit/metainterp/opencoder.py +++ b/rpython/jit/metainterp/opencoder.py @@ -13,6 +13,7 @@ from rpython.rlib.rarithmetic import intmask from rpython.rlib.objectmodel import we_are_translated from rpython.rtyper.lltypesystem import rffi, lltype, llmemory +from rpython.jit.metainterp.typesystem import llhelper TAGINT, TAGCONSTPTR, TAGCONSTOTHER, TAGBOX = range(4) TAGMASK = 0x3 @@ -177,9 +178,13 @@ def __init__(self, inputargs): self._ops = [rffi.cast(rffi.SHORT, -15)] * 30000 self._pos = 0 + self._snapshot_lgt = 0 + self._consts_bigint = 0 + self._consts_float = 0 + self._consts_ptr = 0 self._descrs = [None] - self._refs = [] - self._refs_dict = {} + self._refs = [lltype.nullptr(llmemory.GCREF.TO)] + self._refs_dict = llhelper.new_ref_dict_3() self._bigints = [] self._bigints_dict = {} self._floats = [] @@ -199,9 +204,18 @@ self._pos += 1 def done(self): + from rpython.rlib.debug import debug_start, debug_stop, debug_print + self._bigints_dict = {} - self._refs_dict = {} + self._refs_dict = llhelper.new_ref_dict_3() self._floats_dict = {} + debug_start("jit-trace-done") + debug_print("trace length: " + str(self._pos)) + debug_print(" snapshots: " + str(self._snapshot_lgt)) + debug_print(" bigint consts: " + str(self._consts_bigint) + " " + str(len(self._bigints))) + debug_print(" float consts: " + str(self._consts_float) + " " + str(len(self._floats))) + debug_print(" ref consts: " + str(self._consts_ptr) + " " + str(len(self._refs))) + debug_stop("jit-trace-done") return 0 # completely different than TraceIter.done, but we have to # share the base class @@ -225,6 +239,7 @@ SMALL_INT_START <= box.getint() < SMALL_INT_STOP): return tag(TAGINT, box.getint()) elif isinstance(box, ConstInt): + self._consts_bigint += 1 if not isinstance(box.getint(), int): # symbolics, for tests, don't worry about caching v = len(self._bigints) << 1 @@ -237,6 +252,7 @@ self._bigints.append(box.getint()) return tag(TAGCONSTOTHER, v) elif isinstance(box, ConstFloat): + self._consts_float += 1 v = self._floats_dict.get(box.getfloat(), -1) if v == -1: v = (len(self._floats) << 1) | 1 @@ -244,7 +260,10 @@ self._floats.append(box.getfloat()) return tag(TAGCONSTOTHER, v) else: + self._consts_ptr += 1 assert isinstance(box, ConstPtr) + if not box.getref_base(): + return tag(TAGCONSTPTR, 0) addr = llmemory.cast_ptr_to_adr(box.getref_base()) v = self._refs_dict.get(addr, -1) if v == -1: @@ -341,6 +360,7 @@ def patch_position_to_current(self, p): prev = self._ops[p] assert rffi.cast(lltype.Signed, prev) == -1 + self._snapshot_lgt += self._pos - p self._ops[p] = rffi.cast(rffi.SHORT, self._pos - p) def check_snapshot_jitcode_pc(self, jitcode, pc, resumedata_pos): diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -2265,6 +2265,7 @@ warmrunnerstate = self.jitdriver_sd.warmstate if self.history.length() > warmrunnerstate.trace_limit: jd_sd, greenkey_of_huge_function = self.find_biggest_function() + self.history.trace.done() self.staticdata.stats.record_aborted(greenkey_of_huge_function) self.portal_trace_positions = None if greenkey_of_huge_function is not None: @@ -2566,6 +2567,8 @@ try_disabling_unroll=False, exported_state=None): num_green_args = self.jitdriver_sd.num_green_args greenkey = original_boxes[:num_green_args] + import pdb + pdb.set_trace() self.history.trace.done() if not self.partial_trace: ptoken = self.get_procedure_token(greenkey) diff --git a/rpython/jit/metainterp/typesystem.py b/rpython/jit/metainterp/typesystem.py --- a/rpython/jit/metainterp/typesystem.py +++ b/rpython/jit/metainterp/typesystem.py @@ -109,6 +109,8 @@ return r_dict(rd_eq, rd_hash) def new_ref_dict_2(self): return r_dict(rd_eq, rd_hash) + def new_ref_dict_3(self): + return r_dict(rd_eq, rd_hash) def cast_vtable_to_hashable(self, cpu, ptr): adr = llmemory.cast_ptr_to_adr(ptr) From pypy.commits at gmail.com Fri Mar 11 08:45:33 2016 From: pypy.commits at gmail.com (fijal) Date: Fri, 11 Mar 2016 05:45:33 -0800 (PST) Subject: [pypy-commit] pypy jit-leaner-frontend: add more debug prints Message-ID: <56e2cbfd.aa0ac20a.1b4be.ffffd699@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r82971:5645eb6ac117 Date: 2016-03-11 15:44 +0200 http://bitbucket.org/pypy/pypy/changeset/5645eb6ac117/ Log: add more debug prints diff --git a/rpython/jit/metainterp/opencoder.py b/rpython/jit/metainterp/opencoder.py --- a/rpython/jit/metainterp/opencoder.py +++ b/rpython/jit/metainterp/opencoder.py @@ -215,6 +215,7 @@ debug_print(" bigint consts: " + str(self._consts_bigint) + " " + str(len(self._bigints))) debug_print(" float consts: " + str(self._consts_float) + " " + str(len(self._floats))) debug_print(" ref consts: " + str(self._consts_ptr) + " " + str(len(self._refs))) + debug_print(" descrs: " + len(self._descrs)) debug_stop("jit-trace-done") return 0 # completely different than TraceIter.done, but we have to # share the base class From pypy.commits at gmail.com Fri Mar 11 08:59:53 2016 From: pypy.commits at gmail.com (fijal) Date: Fri, 11 Mar 2016 05:59:53 -0800 (PST) Subject: [pypy-commit] pypy jit-leaner-frontend: grrr, no complaints untraslated Message-ID: <56e2cf59.84c9c20a.96d6d.ffffd949@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r82972:a1a530abab98 Date: 2016-03-11 15:59 +0200 http://bitbucket.org/pypy/pypy/changeset/a1a530abab98/ Log: grrr, no complaints untraslated diff --git a/rpython/jit/metainterp/opencoder.py b/rpython/jit/metainterp/opencoder.py --- a/rpython/jit/metainterp/opencoder.py +++ b/rpython/jit/metainterp/opencoder.py @@ -265,7 +265,7 @@ assert isinstance(box, ConstPtr) if not box.getref_base(): return tag(TAGCONSTPTR, 0) - addr = llmemory.cast_ptr_to_adr(box.getref_base()) + addr = box.getref_base() v = self._refs_dict.get(addr, -1) if v == -1: v = len(self._refs) From pypy.commits at gmail.com Fri Mar 11 09:07:30 2016 From: pypy.commits at gmail.com (fijal) Date: Fri, 11 Mar 2016 06:07:30 -0800 (PST) Subject: [pypy-commit] pypy jit-leaner-frontend: kill a pdv Message-ID: <56e2d122.05de1c0a.4b2c3.ffffcdeb@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r82973:919e5be3f5d5 Date: 2016-03-11 16:06 +0200 http://bitbucket.org/pypy/pypy/changeset/919e5be3f5d5/ Log: kill a pdv diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -2567,8 +2567,6 @@ try_disabling_unroll=False, exported_state=None): num_green_args = self.jitdriver_sd.num_green_args greenkey = original_boxes[:num_green_args] - import pdb - pdb.set_trace() self.history.trace.done() if not self.partial_trace: ptoken = self.get_procedure_token(greenkey) From pypy.commits at gmail.com Fri Mar 11 09:16:11 2016 From: pypy.commits at gmail.com (fijal) Date: Fri, 11 Mar 2016 06:16:11 -0800 (PST) Subject: [pypy-commit] pypy jit-leaner-frontend: getting tired, clearly Message-ID: <56e2d32b.01adc20a.1ebbb.ffffe6b8@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r82974:48ee9f914fee Date: 2016-03-11 16:15 +0200 http://bitbucket.org/pypy/pypy/changeset/48ee9f914fee/ Log: getting tired, clearly diff --git a/rpython/jit/metainterp/opencoder.py b/rpython/jit/metainterp/opencoder.py --- a/rpython/jit/metainterp/opencoder.py +++ b/rpython/jit/metainterp/opencoder.py @@ -215,7 +215,7 @@ debug_print(" bigint consts: " + str(self._consts_bigint) + " " + str(len(self._bigints))) debug_print(" float consts: " + str(self._consts_float) + " " + str(len(self._floats))) debug_print(" ref consts: " + str(self._consts_ptr) + " " + str(len(self._refs))) - debug_print(" descrs: " + len(self._descrs)) + debug_print(" descrs: " + str(len(self._descrs))) debug_stop("jit-trace-done") return 0 # completely different than TraceIter.done, but we have to # share the base class From pypy.commits at gmail.com Fri Mar 11 10:07:40 2016 From: pypy.commits at gmail.com (fijal) Date: Fri, 11 Mar 2016 07:07:40 -0800 (PST) Subject: [pypy-commit] pypy jit-leaner-frontend: call done in missing places Message-ID: <56e2df3c.0e2e1c0a.151a.ffffe7e3@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r82975:88e0a5940b9c Date: 2016-03-11 17:06 +0200 http://bitbucket.org/pypy/pypy/changeset/88e0a5940b9c/ Log: call done in missing places diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -2656,6 +2656,7 @@ # FIXME: can we call compile_trace? token = loop_tokens[0].finishdescr self.history.record(rop.FINISH, exits, None, descr=token) + self.history.trace.done() target_token = compile.compile_trace(self, self.resumekey, exits) if target_token is not token: compile.giveup() @@ -2681,6 +2682,7 @@ sd = self.staticdata token = sd.loop_tokens_exit_frame_with_exception_ref[0].finishdescr self.history.record(rop.FINISH, [valuebox], None, descr=token) + self.history.trace.done() target_token = compile.compile_trace(self, self.resumekey, [valuebox]) if target_token is not token: compile.giveup() From pypy.commits at gmail.com Fri Mar 11 10:10:16 2016 From: pypy.commits at gmail.com (fijal) Date: Fri, 11 Mar 2016 07:10:16 -0800 (PST) Subject: [pypy-commit] pypy jit-leaner-frontend: improve statistics Message-ID: <56e2dfd8.02f0c20a.e0411.fffff58d@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r82976:c9d8c9308b69 Date: 2016-03-11 17:09 +0200 http://bitbucket.org/pypy/pypy/changeset/c9d8c9308b69/ Log: improve statistics diff --git a/rpython/jit/metainterp/opencoder.py b/rpython/jit/metainterp/opencoder.py --- a/rpython/jit/metainterp/opencoder.py +++ b/rpython/jit/metainterp/opencoder.py @@ -181,6 +181,8 @@ self._snapshot_lgt = 0 self._consts_bigint = 0 self._consts_float = 0 + self._sharings = 0 + self._total_snapshots = 0 self._consts_ptr = 0 self._descrs = [None] self._refs = [lltype.nullptr(llmemory.GCREF.TO)] @@ -212,6 +214,8 @@ debug_start("jit-trace-done") debug_print("trace length: " + str(self._pos)) debug_print(" snapshots: " + str(self._snapshot_lgt)) + debug_print(" sharings: " + str(self._sharings)) + debug_print(" total snapshots: " + str(self._total_snapshots)) debug_print(" bigint consts: " + str(self._consts_bigint) + " " + str(len(self._bigints))) debug_print(" float consts: " + str(self._consts_float) + " " + str(len(self._floats))) debug_print(" ref consts: " + str(self._consts_ptr) + " " + str(len(self._refs))) @@ -324,6 +328,7 @@ # self._ops[index] = -newtag - 1 def record_snapshot_link(self, pos): + self._sharings += 1 lower = pos & 0x7fff upper = pos >> 15 self.append(-upper-1) @@ -340,6 +345,7 @@ return tag(TAGBOX, self._record_raw(opnum, tagged_args, descr)) def record_snapshot(self, jitcode, pc, active_boxes): + self._total_snapshots += 1 pos = self._pos self.append(len(active_boxes)) # unnecessary, can be read from self.append(jitcode.index) From pypy.commits at gmail.com Fri Mar 11 10:50:33 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 11 Mar 2016 07:50:33 -0800 (PST) Subject: [pypy-commit] pypy remove-py-log: Start Message-ID: <56e2e949.d3921c0a.6ff3a.fffff611@mx.google.com> Author: Armin Rigo Branch: remove-py-log Changeset: r82978:55c3c232ac56 Date: 2016-03-11 16:49 +0100 http://bitbucket.org/pypy/pypy/changeset/55c3c232ac56/ Log: Start diff --git a/rpython/tool/ansi_print.py b/rpython/tool/ansi_print.py --- a/rpython/tool/ansi_print.py +++ b/rpython/tool/ansi_print.py @@ -1,11 +1,44 @@ """ -A color print. +A simple color logger. """ import sys from py.io import ansi_print from rpython.tool.ansi_mandelbrot import Driver + +isatty = getattr(sys.stderr, 'isatty', lambda: False) +mandelbrot_driver = Driver() + + +class Logger(object): + + def __init__(self, name): + self.name = name + + def _make_method(subname, colors): + # + def logger_method(self, text): + text = "[%s%s] %s" % (self.name, subname, text) + if isatty(): + col = colors + else: + col = () + ansi_print(text, col) + # + return logger_method + + red = _make_method('', (31,)) + bold = _make_method('', (1,)) + WARNING = _make_method(':WARNING', (31,)) + event = _make_method('', (1,)) + ERROR = _make_method(':ERROR', (1, 31)) + Error = _make_method(':Error', (1, 31)) + info = _make_method(':info', (35,)) + stub = _make_method(':stub', (34,)) + __call__ = _make_method('', ()) + + class AnsiLog: wrote_dot = False # XXX sharing state with all instances @@ -70,5 +103,3 @@ for line in msg.content().splitlines(): ansi_print("[%s] %s" %(":".join(keywords), line), esc, file=self.file, newline=newline, flush=flush) - -ansi_log = AnsiLog() diff --git a/rpython/tool/test/test_ansi_print.py b/rpython/tool/test/test_ansi_print.py new file mode 100644 --- /dev/null +++ b/rpython/tool/test/test_ansi_print.py @@ -0,0 +1,39 @@ +from _pytest.monkeypatch import monkeypatch +from rpython.tool import ansi_print + + +class FakeOutput(object): + def __init__(self, tty=True): + self.monkey = monkeypatch() + self.tty = tty + self.output = [] + def __enter__(self, *args): + self.monkey.setattr(ansi_print, 'ansi_print', self._print) + self.monkey.setattr(ansi_print, 'isatty', self._isatty) + return self.output + def __exit__(self, *args): + self.monkey.undo() + + def _print(self, text, colors): + self.output.append((text, colors)) + def _isatty(self): + return self.tty + + +def test_simple(): + log = ansi_print.Logger('test') + with FakeOutput() as output: + log('Hello') + assert output == [('[test] Hello', ())] + +def test_bold(): + log = ansi_print.Logger('test') + with FakeOutput() as output: + log.bold('Hello') + assert output == [('[test] Hello', (1,))] + +def test_not_a_tty(): + log = ansi_print.Logger('test') + with FakeOutput(tty=False) as output: + log.bold('Hello') + assert output == [('[test] Hello', ())] From pypy.commits at gmail.com Fri Mar 11 10:50:31 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 11 Mar 2016 07:50:31 -0800 (PST) Subject: [pypy-commit] pypy remove-py-log: py.log seems to come with high overhead. a branch to use something custom and simpler Message-ID: <56e2e947.030f1c0a.dc48a.fffff432@mx.google.com> Author: Armin Rigo Branch: remove-py-log Changeset: r82977:cacfb876d1d9 Date: 2016-03-11 16:09 +0100 http://bitbucket.org/pypy/pypy/changeset/cacfb876d1d9/ Log: py.log seems to come with high overhead. a branch to use something custom and simpler From pypy.commits at gmail.com Fri Mar 11 11:01:05 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 11 Mar 2016 08:01:05 -0800 (PST) Subject: [pypy-commit] pypy remove-py-log: log.dot() Message-ID: <56e2ebc1.e6bbc20a.265af.0906@mx.google.com> Author: Armin Rigo Branch: remove-py-log Changeset: r82979:2fd6f2c58dda Date: 2016-03-11 17:00 +0100 http://bitbucket.org/pypy/pypy/changeset/2fd6f2c58dda/ Log: log.dot() diff --git a/rpython/tool/ansi_print.py b/rpython/tool/ansi_print.py --- a/rpython/tool/ansi_print.py +++ b/rpython/tool/ansi_print.py @@ -9,6 +9,7 @@ isatty = getattr(sys.stderr, 'isatty', lambda: False) mandelbrot_driver = Driver() +wrote_dot = False # global shared state class Logger(object): @@ -19,12 +20,16 @@ def _make_method(subname, colors): # def logger_method(self, text): + global wrote_dot text = "[%s%s] %s" % (self.name, subname, text) if isatty(): col = colors else: col = () + if wrote_dot: + text = '\n' + text ansi_print(text, col) + wrote_dot = False # return logger_method @@ -38,68 +43,9 @@ stub = _make_method(':stub', (34,)) __call__ = _make_method('', ()) - -class AnsiLog: - wrote_dot = False # XXX sharing state with all instances - - KW_TO_COLOR = { - # color supress - 'red': ((31,), True), - 'bold': ((1,), True), - 'WARNING': ((31,), False), - 'event': ((1,), True), - 'ERROR': ((1, 31), False), - 'Error': ((1, 31), False), - 'info': ((35,), False), - 'stub': ((34,), False), - } - - def __init__(self, kw_to_color={}, file=None): - self.kw_to_color = self.KW_TO_COLOR.copy() - self.kw_to_color.update(kw_to_color) - self.file = file - self.fancy = True - self.isatty = getattr(sys.stderr, 'isatty', lambda: False) - if self.fancy and self.isatty(): - self.mandelbrot_driver = Driver() - else: - self.mandelbrot_driver = None - - def __call__(self, msg): - tty = self.isatty() - flush = False - newline = True - keywords = [] - esc = [] - for kw in msg.keywords: - color, supress = self.kw_to_color.get(kw, (None, False)) - if color: - esc.extend(color) - if not supress: - keywords.append(kw) - if 'start' in keywords: - if tty: - newline = False - flush = True - keywords.remove('start') - elif 'done' in keywords: - if tty: - print >> sys.stderr - return - elif 'dot' in keywords: - if tty: - if self.fancy: - if not AnsiLog.wrote_dot: - self.mandelbrot_driver.reset() - self.mandelbrot_driver.dot() - else: - ansi_print(".", tuple(esc), file=self.file, newline=False, flush=flush) - AnsiLog.wrote_dot = True - return - if AnsiLog.wrote_dot: - AnsiLog.wrote_dot = False - sys.stderr.write("\n") - esc = tuple(esc) - for line in msg.content().splitlines(): - ansi_print("[%s] %s" %(":".join(keywords), line), esc, - file=self.file, newline=newline, flush=flush) + def dot(self): + global wrote_dot + if not wrote_dot: + mandelbrot_driver.reset() + wrote_dot = True + mandelbrot_driver.dot() diff --git a/rpython/tool/test/test_ansi_print.py b/rpython/tool/test/test_ansi_print.py --- a/rpython/tool/test/test_ansi_print.py +++ b/rpython/tool/test/test_ansi_print.py @@ -1,5 +1,5 @@ from _pytest.monkeypatch import monkeypatch -from rpython.tool import ansi_print +from rpython.tool import ansi_print, ansi_mandelbrot class FakeOutput(object): @@ -10,11 +10,14 @@ def __enter__(self, *args): self.monkey.setattr(ansi_print, 'ansi_print', self._print) self.monkey.setattr(ansi_print, 'isatty', self._isatty) + self.monkey.setattr(ansi_mandelbrot, 'ansi_print', self._print) return self.output def __exit__(self, *args): self.monkey.undo() - def _print(self, text, colors): + def _print(self, text, colors, newline=True, flush=True): + if newline: + text += '\n' self.output.append((text, colors)) def _isatty(self): return self.tty @@ -24,16 +27,39 @@ log = ansi_print.Logger('test') with FakeOutput() as output: log('Hello') - assert output == [('[test] Hello', ())] + assert output == [('[test] Hello\n', ())] def test_bold(): log = ansi_print.Logger('test') with FakeOutput() as output: log.bold('Hello') - assert output == [('[test] Hello', (1,))] + assert output == [('[test] Hello\n', (1,))] def test_not_a_tty(): log = ansi_print.Logger('test') with FakeOutput(tty=False) as output: log.bold('Hello') - assert output == [('[test] Hello', ())] + assert output == [('[test] Hello\n', ())] + +def test_dot_1(): + log = ansi_print.Logger('test') + with FakeOutput() as output: + log.dot() + assert len(output) == 1 + assert len(output[0][0]) == 1 # single character + # output[0][1] is some ansi color code from mandelbort_driver + +def test_dot_mixing_with_regular_lines(): + log = ansi_print.Logger('test') + with FakeOutput() as output: + log.dot() + log.dot() + log.WARNING('oops') + log.WARNING('maybe?') + log.dot() + assert len(output) == 5 + assert len(output[0][0]) == 1 # single character + assert len(output[1][0]) == 1 # single character + assert output[2] == ('\n[test:WARNING] oops\n', (31,)) + assert output[3] == ('[test:WARNING] maybe?\n', (31,)) + assert len(output[4][0]) == 1 # single character From pypy.commits at gmail.com Fri Mar 11 11:05:00 2016 From: pypy.commits at gmail.com (plan_rich) Date: Fri, 11 Mar 2016 08:05:00 -0800 (PST) Subject: [pypy-commit] pypy gcstress-hypothesis: finally a basic block of instructions can be generated, list strategy may not append some drawn values (critical here) Message-ID: <56e2ecac.4c181c0a.aca25.fffffb39@mx.google.com> Author: Richard Plangger Branch: gcstress-hypothesis Changeset: r82980:2eb2117bac14 Date: 2016-03-11 17:04 +0100 http://bitbucket.org/pypy/pypy/changeset/2eb2117bac14/ Log: finally a basic block of instructions can be generated, list strategy may not append some drawn values (critical here) diff --git a/rpython/jit/backend/llsupport/tl/code.py b/rpython/jit/backend/llsupport/tl/code.py --- a/rpython/jit/backend/llsupport/tl/code.py +++ b/rpython/jit/backend/llsupport/tl/code.py @@ -1,22 +1,34 @@ +import struct -import struct +from hypothesis.stateful import rule, precondition class ByteCode(object): def encode(self, ctx): ctx.append_byte(self.BYTE_CODE) @classmethod - def create_from(self, draw, get_strategy_for): - pt = getattr(self.__init__, '_param_types', []) - return self(*[draw(get_strategy_for(t)) for t in pt]) - def filter_bytecode(self, stack): """ filter this byte code if the stack does not contain the right values on the stack. This should only be used for values hypothesis cannot forsee (like list manipulation) """ - return False + required_types = self._stack_types + if len(required_types) > stack.size(): + # more needed types than available + return False + # each type should match the stack entry + for i in range(len(required_types)): + item = stack.peek(i) + j = len(required_types) - i - 1 + rt = required_types[j] + if not item.is_of_type(rt): + return False + return True + + def __repr__(self): + name = self.__class__.__name__ + return name _c = 0 @@ -152,6 +164,7 @@ @requires_param(BYTE_TYP) def __init__(self, size=8): self.size = size + def encode(self, ctx): ctx.append_byte(self.BYTE_CODE) ctx.append_short(self.size) @@ -162,13 +175,16 @@ BYTE_CODE = unique_code() def __init__(self): pass + @classmethod def filter_bytecode(self, stack): + if not ByteCode.filter_bytecode.im_func(self, stack): + return False w_idx = stack.peek(1) w_list = stack.peek(2) if w_idx.value >= len(w_list.items) or \ w_idx.value < 0: - return True - return False + return False + return True @requires_stack(LIST_TYP, IDX_TYP) @leaves_on_stack(LIST_TYP) @@ -176,13 +192,16 @@ BYTE_CODE = unique_code() def __init__(self): pass + @classmethod def filter_bytecode(self, stack): + if not ByteCode.filter_bytecode.im_func(self, stack): + return False w_idx = stack.peek(0) w_list = stack.peek(1) if w_idx.value >= len(w_list.items) or \ w_idx.value < 0: - return True - return False + return False + return True @requires_stack(LIST_TYP, INT_TYP) # TODO VAL_TYP) @leaves_on_stack(LIST_TYP) @@ -192,8 +211,18 @@ pass def op_modifies_list(clazz): + """ NOT_RPYTHON """ return clazz in (DelList, InsertList) +BC_CLASSES = [] +BC_NUM_TO_CLASS = {} + +for name, clazz in locals().items(): + if hasattr(clazz, 'BYTE_CODE'): + BC_CLASSES.append(clazz) + assert clazz.BYTE_CODE not in BC_NUM_TO_CLASS + BC_NUM_TO_CLASS[clazz.BYTE_CODE] = clazz + # remove comment one by one! #@requires_stack() diff --git a/rpython/jit/backend/llsupport/tl/stack.py b/rpython/jit/backend/llsupport/tl/stack.py --- a/rpython/jit/backend/llsupport/tl/stack.py +++ b/rpython/jit/backend/llsupport/tl/stack.py @@ -48,7 +48,9 @@ if stackpos < 0: raise IndexError self.stackpos = stackpos # always store a known-nonneg integer here - return self.stack[stackpos] + elem = self.stack[stackpos] + self.stack[stackpos] = None + return elem def pick(self, i): n = self.stackpos - i - 1 @@ -91,6 +93,6 @@ def __repr__(self): """ NOT_RPYTHON """ - entry_types = [e.TYPE for e in self.stack] + entry_types = [e.TYPE for e in self.stack[:self.stackpos]] return "Stack(%s)" % ','.join(entry_types) diff --git a/rpython/jit/backend/llsupport/tl/test/code_strategies.py b/rpython/jit/backend/llsupport/tl/test/code_strategies.py --- a/rpython/jit/backend/llsupport/tl/test/code_strategies.py +++ b/rpython/jit/backend/llsupport/tl/test/code_strategies.py @@ -1,27 +1,24 @@ from hypothesis import strategies as st from hypothesis.control import assume -from hypothesis.strategies import defines_strategy, composite +from hypothesis.strategies import composite from rpython.jit.backend.llsupport.tl import code, interp, stack -from rpython.jit.backend.llsupport.tl.code import (all_types, - INT_TYP, STR_TYP, LIST_TYP, SHORT_TYP, BYTE_TYP, - COND_TYP, IDX_TYP) -from hypothesis.searchstrategy.strategies import OneOfStrategy -from hypothesis.searchstrategy.collections import TupleStrategy +from hypothesis.searchstrategy.collections import TupleStrategy, ListStrategy +import hypothesis.internal.conjecture.utils as cu def get_strategy_for(typ): - if typ == INT_TYP: + if typ == code.INT_TYP: return st.integers(min_value=-2**31, max_value=2**31-1) - elif typ == IDX_TYP: + elif typ == code.IDX_TYP: return st.integers(min_value=-2**31, max_value=2**31-1) - elif typ == SHORT_TYP: + elif typ == code.SHORT_TYP: return st.integers(min_value=-2**15, max_value=2**15-1) - elif typ == BYTE_TYP: + elif typ == code.BYTE_TYP: return st.integers(min_value=-2**7, max_value=2**7-1) - elif typ == COND_TYP: + elif typ == code.COND_TYP: return st.integers(min_value=0, max_value=4) - elif typ == STR_TYP: + elif typ == code.STR_TYP: return st.text().filter(lambda x: x is not None) - elif typ == LIST_TYP: + elif typ == code.LIST_TYP: # TODO recursive result = st.lists(elements=st.one_of(get_strategy_for('i'))) return result.filter(lambda x: x is not None) @@ -30,13 +27,10 @@ STD_SPACE = interp.Space() - at defines_strategy -def stack_entry(types=all_types): +def stack_entry(types=code.all_types): return st.one_of(*[get_strategy_for(t) for t in types]) - at defines_strategy -def runtime_stack(min_size=0, average_size=5, max_size=4096, - types=all_types): +def runtime_stack(min_size=0, average_size=5, max_size=4096, types=code.all_types): if max_size == 0: return st.just(stack.Stack(0)) stack_entries = st.lists(stack_entry(all_types), min_size=min_size, @@ -45,62 +39,86 @@ return stack_entries.map(lambda elems: \ stack.Stack.from_items(STD_SPACE, elems)) -def byte_code_classes(): - for name, clazz in code.__dict__.items(): - if hasattr(clazz, 'BYTE_CODE'): - yield clazz - def get_byte_code_class(num): - for clazz in byte_code_classes(): - if clazz.BYTE_CODE == num: - return clazz - return None + return code.BC_NUM_TO_CLASS[num] def find_next(stack, type, off=0): i = off while i < stack.size(): - if stack.peek(i).is_of_type(LIST_TYP): + if stack.peek(i).is_of_type(type): break i += 1 else: return None return stack.peek(i) - at defines_strategy +class BasicBlockStrategy(ListStrategy): + """ Generates a list of values, but does not throw away elements. + See XXX """ + + def do_draw(self, data): + if self.max_size == self.min_size: + return [ + data.draw(self.element_strategy) + for _ in range(self.min_size) + ] + + stopping_value = 1 - 1.0 / (1 + self.average_length) + result = [] + while True: + data.start_example() + more = cu.biased_coin(data, stopping_value) + value = data.draw(self.element_strategy) + data.stop_example() + if not more: + if len(result) < self.min_size: + # XXX if not appended the resulting list will have + # a bigger stack but a missing op code + result.append(value) + continue + else: + break + result.append(value) + if self.max_size < float('inf'): + result = result[:self.max_size] + return result + + def __repr__(self): + return ( + 'BasicBlockStrategy(%r, min_size=%r, average_size=%r, max_size=%r)' + ) % ( + self.element_strategy, self.min_size, self.average_length, + self.max_size + ) + + at st.defines_strategy +def basic_block(strategy, min_size=1, average_size=8, max_size=128): + return BasicBlockStrategy([strategy], min_size=min_size, + average_length=average_size, + max_size=max_size) + + at st.defines_strategy def bytecode_class(stack): - def filter_using_stack(bytecode_class): - required_types = bytecode_class._stack_types - if len(required_types) > stack.size(): - return False - for i in range(len(required_types)): - item = stack.peek(i) - j = len(required_types) - i - 1 - rt = required_types[j] - if not item.is_of_type(rt): - return False - if code.op_modifies_list(bytecode_class): - w_list = find_next(stack, LIST_TYP) - if w_list is None or len(w_list.items) == 0: - # on an empty list we cannot insert or delete - return False - return True - clazzes = filter(filter_using_stack, byte_code_classes()) - return st.sampled_from(clazzes) + # get a byte code class, only allow what is valid for the run_stack + return st.sampled_from(code.BC_CLASSES).filter(lambda clazz: clazz.filter_bytecode(stack)) + @composite def bytecode(draw, max_stack_size=4096): # get a stack that is the same for one test run - stack_strat = runtime_stack(max_size=max_stack_size) - run_stack = draw(st.shared(stack_strat, 'stack')) + run_stack = draw(st.shared(st.just(stack.Stack(0)), 'stack2')) + + # get a byte code class, only allow what is valid for the run_stack + clazz = draw(st.sampled_from(code.BC_CLASSES).filter(lambda clazz: clazz.filter_bytecode(run_stack))) + + # create an instance of the chosen class + pt = getattr(clazz.__init__, '_param_types', []) + args = [draw(get_strategy_for(t)) for t in pt] + inst = clazz(*args) + # propagate the changes to the stack - orig_stack = run_stack.copy(values=True) - assert orig_stack is not run_stack + bytecode, consts = code.Context().transform([inst]) + interp.dispatch_once(STD_SPACE, 0, bytecode, consts, run_stack) - # get a byte code class - clazz = draw(bytecode_class(run_stack)) - inst = clazz.create_from(draw, get_strategy_for) - assume(not inst.filter_bytecode(run_stack)) - bytecode, consts = code.Context().transform([inst]) + return inst - interp.dispatch_once(STD_SPACE, 0, bytecode, consts, run_stack) - return inst, orig_stack diff --git a/rpython/jit/backend/llsupport/tl/test/test_tl_interp.py b/rpython/jit/backend/llsupport/tl/test/test_tl_interp.py --- a/rpython/jit/backend/llsupport/tl/test/test_tl_interp.py +++ b/rpython/jit/backend/llsupport/tl/test/test_tl_interp.py @@ -1,5 +1,5 @@ import py -from hypothesis import given +from hypothesis import given, settings, Verbosity from hypothesis.strategies import lists, data from rpython.jit.backend.llsupport.tl import code, interp from rpython.jit.backend.llsupport.tl.stack import Stack @@ -23,7 +23,6 @@ assert c.get_short(3) == 1 class TestCodeStrategies(object): - DEFAULT_ACTION_CLASSES = (code.CreateList, code.PutInt, code.LoadStr) @@ -84,29 +83,25 @@ assert(clazz in self.DEFAULT_ACTION_CLASSES + \ (code.InsertList, code.AppendList)) + @given(data()) + def test_empty_stack_no_list_op(self, data): + space = interp.Space() + stack = Stack(0) + for i in range(10): + clazz = data.draw(st.bytecode_class(stack)) + assert not (clazz in (code.DelList, code.InsertList, + code.AppendList, code.AddList, + code.AddStr)) class TestInterp(object): - @given(st.bytecode()) - def test_consume_stack(self, args): - bc_obj, stack = args - bytecode, consts = code.Context().transform([bc_obj]) - space = interp.Space() - i = interp.dispatch_once(space, 0, bytecode, consts, stack) - assert i == len(bytecode) - clazz = st.get_byte_code_class(ord(bytecode[0])) - assert stack.size() >= len(clazz._return_on_stack_types) - for i,type in enumerate(clazz._return_on_stack_types): - j = len(clazz._return_on_stack_types) - i - 1 - assert stack.peek(j).is_of_type(type) - @given(lists(st.bytecode(max_stack_size=0), min_size=1)) - def test_execute_bytecode_block(self, codes): - bc_obj_list = [bc for bc,stack in codes] - _, stack = codes[0] + @given(st.basic_block(st.bytecode(), min_size=1)) + def test_execute_bytecode_block(self, bc_obj_list): bytecode, consts = code.Context().transform(bc_obj_list) space = interp.Space() pc = 0 end = len(bytecode) + stack = Stack(0) while pc < end: pc = interp.dispatch_once(space, pc, bytecode, consts, stack) assert pc == len(bytecode) diff --git a/rpython/jit/backend/llsupport/tl/test/zrpy_gc_hypo_test.py b/rpython/jit/backend/llsupport/tl/test/zrpy_gc_hypo_test.py --- a/rpython/jit/backend/llsupport/tl/test/zrpy_gc_hypo_test.py +++ b/rpython/jit/backend/llsupport/tl/test/zrpy_gc_hypo_test.py @@ -52,10 +52,8 @@ return res.returncode, res.out, res.err # cannot have a non empty stack, cannot pass stack to executable! - @given(st.bytecode(max_stack_size=0)) - def test_execute_single_bytecode(self, program): - bc_obj, stack = program - assert stack.size() == 0 + @given(st.bytecode()) + def test_execute_single_bytecode(self, bc_obj): bytecode, consts = code.Context().transform([bc_obj]) result, out, err = self.execute(bytecode, consts) if result != 0: @@ -63,11 +61,8 @@ " stderr:\n%s\nstdout:\n%s\n") % (result, err, out)) # cannot have a non empty stack, cannot pass stack to executable! - @given(lists(st.bytecode(max_stack_size=0), min_size=1, average_size=24)) - def test_execute_bytecodes(self, args): - _, stack = args[0] - assert stack.size() == 0 - bc_objs = [bc for bc, _ in args] + @given(st.basic_block(st.bytecode(), min_size=1, average_size=24)) + def test_execute_basic_block(self, bc_objs): bytecode, consts = code.Context().transform(bc_objs) result, out, err = self.execute(bytecode, consts) if result != 0: From pypy.commits at gmail.com Fri Mar 11 11:18:35 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 11 Mar 2016 08:18:35 -0800 (PST) Subject: [pypy-commit] pypy remove-py-log: Support for unknown names Message-ID: <56e2efdb.8673c20a.01d5.0742@mx.google.com> Author: Armin Rigo Branch: remove-py-log Changeset: r82981:6066499ee520 Date: 2016-03-11 17:17 +0100 http://bitbucket.org/pypy/pypy/changeset/6066499ee520/ Log: Support for unknown names diff --git a/rpython/tool/ansi_print.py b/rpython/tool/ansi_print.py --- a/rpython/tool/ansi_print.py +++ b/rpython/tool/ansi_print.py @@ -12,27 +12,29 @@ wrote_dot = False # global shared state -class Logger(object): +def _make_method(subname, colors): + # + def logger_method(self, text): + global wrote_dot + text = "[%s%s] %s" % (self.name, subname, text) + if isatty(): + col = colors + else: + col = () + if wrote_dot: + text = '\n' + text + ansi_print(text, col) + wrote_dot = False + # + return logger_method + + +class AnsiLogger(object): def __init__(self, name): self.name = name - def _make_method(subname, colors): - # - def logger_method(self, text): - global wrote_dot - text = "[%s%s] %s" % (self.name, subname, text) - if isatty(): - col = colors - else: - col = () - if wrote_dot: - text = '\n' + text - ansi_print(text, col) - wrote_dot = False - # - return logger_method - + # these methods write "[name:method] text" to the terminal, with color codes red = _make_method('', (31,)) bold = _make_method('', (1,)) WARNING = _make_method(':WARNING', (31,)) @@ -41,11 +43,25 @@ Error = _make_method(':Error', (1, 31)) info = _make_method(':info', (35,)) stub = _make_method(':stub', (34,)) + + # directly calling the logger writes "[name] text" with no particular color __call__ = _make_method('', ()) + # calling unknown method names writes "[name:method] text" without color + def __getattr__(self, name): + if name[0].isalpha(): + method = _make_method(':' + name, ()) + setattr(self.__class__, name, method) + return getattr(self, name) + raise AttributeError(name) + def dot(self): + """Output a mandelbrot dot to the terminal.""" global wrote_dot if not wrote_dot: mandelbrot_driver.reset() wrote_dot = True mandelbrot_driver.dot() + + def debug(self, info): + """For messages that are dropped. Can be monkeypatched in tests.""" diff --git a/rpython/tool/test/test_ansi_print.py b/rpython/tool/test/test_ansi_print.py --- a/rpython/tool/test/test_ansi_print.py +++ b/rpython/tool/test/test_ansi_print.py @@ -8,6 +8,7 @@ self.tty = tty self.output = [] def __enter__(self, *args): + ansi_print.wrote_dot = False self.monkey.setattr(ansi_print, 'ansi_print', self._print) self.monkey.setattr(ansi_print, 'isatty', self._isatty) self.monkey.setattr(ansi_mandelbrot, 'ansi_print', self._print) @@ -24,25 +25,25 @@ def test_simple(): - log = ansi_print.Logger('test') + log = ansi_print.AnsiLogger('test') with FakeOutput() as output: log('Hello') assert output == [('[test] Hello\n', ())] def test_bold(): - log = ansi_print.Logger('test') + log = ansi_print.AnsiLogger('test') with FakeOutput() as output: log.bold('Hello') assert output == [('[test] Hello\n', (1,))] def test_not_a_tty(): - log = ansi_print.Logger('test') + log = ansi_print.AnsiLogger('test') with FakeOutput(tty=False) as output: log.bold('Hello') assert output == [('[test] Hello\n', ())] def test_dot_1(): - log = ansi_print.Logger('test') + log = ansi_print.AnsiLogger('test') with FakeOutput() as output: log.dot() assert len(output) == 1 @@ -50,7 +51,7 @@ # output[0][1] is some ansi color code from mandelbort_driver def test_dot_mixing_with_regular_lines(): - log = ansi_print.Logger('test') + log = ansi_print.AnsiLogger('test') with FakeOutput() as output: log.dot() log.dot() @@ -63,3 +64,13 @@ assert output[2] == ('\n[test:WARNING] oops\n', (31,)) assert output[3] == ('[test:WARNING] maybe?\n', (31,)) assert len(output[4][0]) == 1 # single character + +def test_unknown_method_names(): + log = ansi_print.AnsiLogger('test') + with FakeOutput() as output: + log.foo('Hello') + log.foo('World') + log.BAR('!') + assert output == [('[test:foo] Hello\n', ()), + ('[test:foo] World\n', ()), + ('[test:BAR] !\n', ())] From pypy.commits at gmail.com Fri Mar 11 11:22:26 2016 From: pypy.commits at gmail.com (plan_rich) Date: Fri, 11 Mar 2016 08:22:26 -0800 (PST) Subject: [pypy-commit] pypy gcstress-hypothesis: directly invoke filter instead of the filter method of a strategy Message-ID: <56e2f0c2.49f9c20a.dd60d.0823@mx.google.com> Author: Richard Plangger Branch: gcstress-hypothesis Changeset: r82982:c9580f93ed9e Date: 2016-03-11 17:21 +0100 http://bitbucket.org/pypy/pypy/changeset/c9580f93ed9e/ Log: directly invoke filter instead of the filter method of a strategy diff --git a/rpython/jit/backend/llsupport/tl/test/code_strategies.py b/rpython/jit/backend/llsupport/tl/test/code_strategies.py --- a/rpython/jit/backend/llsupport/tl/test/code_strategies.py +++ b/rpython/jit/backend/llsupport/tl/test/code_strategies.py @@ -100,7 +100,8 @@ @st.defines_strategy def bytecode_class(stack): # get a byte code class, only allow what is valid for the run_stack - return st.sampled_from(code.BC_CLASSES).filter(lambda clazz: clazz.filter_bytecode(stack)) + clazzes = filter(lambda clazz: clazz.filter_bytecode(stack), code.BC_CLASSES) + return st.sampled_from(clazzes) @composite @@ -109,7 +110,8 @@ run_stack = draw(st.shared(st.just(stack.Stack(0)), 'stack2')) # get a byte code class, only allow what is valid for the run_stack - clazz = draw(st.sampled_from(code.BC_CLASSES).filter(lambda clazz: clazz.filter_bytecode(run_stack))) + clazzes = filter(lambda clazz: clazz.filter_bytecode(run_stack), code.BC_CLASSES) + clazz = draw(st.sampled_from(clazzes)) # create an instance of the chosen class pt = getattr(clazz.__init__, '_param_types', []) From pypy.commits at gmail.com Fri Mar 11 11:39:52 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 11 Mar 2016 08:39:52 -0800 (PST) Subject: [pypy-commit] pypy remove-py-log: Trying to change all places that use the logger Message-ID: <56e2f4d8.a3f6c20a.c5d46.101b@mx.google.com> Author: Armin Rigo Branch: remove-py-log Changeset: r82983:eaeeea383e67 Date: 2016-03-11 17:39 +0100 http://bitbucket.org/pypy/pypy/changeset/eaeeea383e67/ Log: Trying to change all places that use the logger diff --git a/rpython/annotator/annrpython.py b/rpython/annotator/annrpython.py --- a/rpython/annotator/annrpython.py +++ b/rpython/annotator/annrpython.py @@ -3,7 +3,7 @@ import types from collections import defaultdict -from rpython.tool.ansi_print import ansi_log +from rpython.tool.ansi_print import AnsiLogger from rpython.tool.pairtype import pair from rpython.tool.error import (format_blocked_annotation_error, gather_error, source_lines) @@ -15,9 +15,7 @@ from rpython.annotator.bookkeeper import Bookkeeper from rpython.rtyper.normalizecalls import perform_normalizations -import py -log = py.log.Producer("annrpython") -py.log.setconsumer("annrpython", ansi_log) +log = AnsiLogger("annrpython") class RPythonAnnotator(object): diff --git a/rpython/jit/backend/ppc/runner.py b/rpython/jit/backend/ppc/runner.py --- a/rpython/jit/backend/ppc/runner.py +++ b/rpython/jit/backend/ppc/runner.py @@ -1,4 +1,3 @@ -import py from rpython.rtyper.lltypesystem import lltype, llmemory, rffi from rpython.rtyper.llinterp import LLInterpreter from rpython.rlib import rgc @@ -9,9 +8,6 @@ from rpython.jit.backend.ppc.codebuilder import PPCBuilder from rpython.jit.backend.ppc import register as r -from rpython.tool.ansi_print import ansi_log -log = py.log.Producer('jitbackend') -py.log.setconsumer('jitbackend', ansi_log) class PPC_CPU(AbstractLLCPU): diff --git a/rpython/jit/backend/x86/runner.py b/rpython/jit/backend/x86/runner.py --- a/rpython/jit/backend/x86/runner.py +++ b/rpython/jit/backend/x86/runner.py @@ -10,10 +10,6 @@ import sys -from rpython.tool.ansi_print import ansi_log -log = py.log.Producer('jitbackend') -py.log.setconsumer('jitbackend', ansi_log) - class AbstractX86CPU(AbstractLLCPU): debug = True diff --git a/rpython/jit/codewriter/policy.py b/rpython/jit/codewriter/policy.py --- a/rpython/jit/codewriter/policy.py +++ b/rpython/jit/codewriter/policy.py @@ -1,10 +1,8 @@ from rpython.jit.metainterp import history from rpython.tool.udir import udir +from rpython.tool.ansi_print import AnsiLogger -import py -from rpython.tool.ansi_print import ansi_log -log = py.log.Producer('jitcodewriter') -py.log.setconsumer('jitcodewriter', ansi_log) +log = AnsiLogger('jitcodewriter') class JitPolicy(object): diff --git a/rpython/memory/gctransform/log.py b/rpython/memory/gctransform/log.py --- a/rpython/memory/gctransform/log.py +++ b/rpython/memory/gctransform/log.py @@ -1,4 +1,3 @@ -import py -from rpython.tool.ansi_print import ansi_log -log = py.log.Producer("gctransform") -py.log.setconsumer("gctransform", ansi_log) +from rpython.tool.ansi_print import AnsiLogger + +log = AnsiLogger("gctransform") diff --git a/rpython/memory/test/gc_test_base.py b/rpython/memory/test/gc_test_base.py --- a/rpython/memory/test/gc_test_base.py +++ b/rpython/memory/test/gc_test_base.py @@ -3,6 +3,7 @@ from rpython.memory import gcwrapper from rpython.memory.test import snippet +from rpython.rtyper import llinterp from rpython.rtyper.test.test_llinterp import get_interpreter from rpython.rtyper.lltypesystem import lltype from rpython.rtyper.lltypesystem.lloperation import llop @@ -15,11 +16,11 @@ WORD = LONG_BIT // 8 -def stdout_ignore_ll_functions(msg): - strmsg = str(msg) - if "evaluating" in strmsg and "ll_" in strmsg: - return - print >>sys.stdout, strmsg +## def stdout_ignore_ll_functions(msg): +## strmsg = str(msg) +## if "evaluating" in strmsg and "ll_" in strmsg: +## return +## print >>sys.stdout, strmsg class GCTest(object): @@ -31,13 +32,11 @@ WREF_IS_INVALID_BEFORE_DEL_IS_CALLED = False def setup_class(cls): - cls._saved_logstate = py.log._getstate() - py.log.setconsumer("llinterp", py.log.STDOUT) - py.log.setconsumer("llinterp frame", stdout_ignore_ll_functions) - py.log.setconsumer("llinterp operation", None) + # switch on logging of interp to show more info on failing tests + llinterp.log.output_disabled = False def teardown_class(cls): - py.log._setstate(cls._saved_logstate) + llinterp.log.output_disabled = True def interpret(self, func, values, **kwds): interp, graph = get_interpreter(func, values, **kwds) diff --git a/rpython/rlib/clibffi.py b/rpython/rlib/clibffi.py --- a/rpython/rlib/clibffi.py +++ b/rpython/rlib/clibffi.py @@ -22,9 +22,6 @@ import sys import ctypes.util -from rpython.tool.ansi_print import ansi_log -log = py.log.Producer("libffi") -py.log.setconsumer("libffi", ansi_log) # maaaybe isinstance here would be better. Think _MSVC = platform.name == "msvc" diff --git a/rpython/rtyper/llinterp.py b/rpython/rtyper/llinterp.py --- a/rpython/rtyper/llinterp.py +++ b/rpython/rtyper/llinterp.py @@ -14,9 +14,15 @@ r_uint, r_longlong, r_ulonglong, r_longlonglong) from rpython.rtyper.lltypesystem import lltype, llmemory, lloperation, llheap from rpython.rtyper import rclass +from rpython.tool.ansi_print import AnsiLogger -log = py.log.Producer('llinterp') +# by default this logger's output is disabled. +# e.g. tests can then switch on logging to get more help +# for failing tests +log = AnsiLogger('llinterp') +log.output_disabled = True + class LLException(Exception): def __init__(self, *args): @@ -1367,10 +1373,3 @@ class _address_of_thread_local(object): _TYPE = llmemory.Address is_fake_thread_local_addr = True - - -# by default we route all logging messages to nothingness -# e.g. tests can then switch on logging to get more help -# for failing tests -from rpython.tool.ansi_print import ansi_log -py.log.setconsumer('llinterp', ansi_log) diff --git a/rpython/rtyper/rmodel.py b/rpython/rtyper/rmodel.py --- a/rpython/rtyper/rmodel.py +++ b/rpython/rtyper/rmodel.py @@ -454,13 +454,9 @@ # logging/warning -import py -from rpython.tool.ansi_print import ansi_log +from rpython.tool.ansi_print import AnsiLogger -log = py.log.Producer("rtyper") -py.log.setconsumer("rtyper", ansi_log) -py.log.setconsumer("rtyper translating", None) -py.log.setconsumer("rtyper debug", None) +log = AnsiLogger("rtyper") def warning(msg): log.WARNING(msg) diff --git a/rpython/rtyper/test/test_llinterp.py b/rpython/rtyper/test/test_llinterp.py --- a/rpython/rtyper/test/test_llinterp.py +++ b/rpython/rtyper/test/test_llinterp.py @@ -2,7 +2,7 @@ import py import sys from rpython.rtyper.lltypesystem.lltype import typeOf, Void, malloc, free -from rpython.rtyper.llinterp import LLInterpreter, LLException +from rpython.rtyper.llinterp import LLInterpreter, LLException, log from rpython.rtyper.rmodel import inputconst from rpython.rtyper.annlowlevel import hlstr, llhelper from rpython.rtyper.exceptiondata import UnknownException @@ -16,13 +16,10 @@ from rpython.rtyper.rtyper import llinterp_backend # switch on logging of interp to show more info on failing tests - def setup_module(mod): - mod.logstate = py.log._getstate() - py.log.setconsumer("llinterp", py.log.STDOUT) - + log.output_disabled = False def teardown_module(mod): - py.log._setstate(mod.logstate) + log.output_disabled = True def gengraph(func, argtypes=[], viewbefore='auto', policy=None, diff --git a/rpython/rtyper/test/test_rtyper.py b/rpython/rtyper/test/test_rtyper.py --- a/rpython/rtyper/test/test_rtyper.py +++ b/rpython/rtyper/test/test_rtyper.py @@ -1,5 +1,3 @@ -import py - from rpython.annotator import model as annmodel, annrpython from rpython.flowspace.model import Constant from rpython.rtyper import rmodel @@ -9,14 +7,6 @@ from rpython.translator.translator import TranslationContext, graphof -def setup_module(mod): - mod.logstate = py.log._getstate() - py.log.setconsumer("rtyper", py.log.STDOUT) - py.log.setconsumer("annrpython", None) - -def teardown_module(mod): - py.log._setstate(mod.logstate) - def test_reprkeys_dont_clash(): stup1 = annmodel.SomeTuple((annmodel.SomeFloat(), annmodel.SomeInteger())) diff --git a/rpython/tool/ansi_print.py b/rpython/tool/ansi_print.py --- a/rpython/tool/ansi_print.py +++ b/rpython/tool/ansi_print.py @@ -16,6 +16,8 @@ # def logger_method(self, text): global wrote_dot + if self.output_disabled: + return text = "[%s%s] %s" % (self.name, subname, text) if isatty(): col = colors @@ -30,6 +32,7 @@ class AnsiLogger(object): + output_disabled = False def __init__(self, name): self.name = name @@ -44,6 +47,13 @@ info = _make_method(':info', (35,)) stub = _make_method(':stub', (34,)) + # some more methods used by sandlib + call = _make_method(':call', (34,)) + result = _make_method(':result', (34,)) + exception = _make_method(':exception', (34,)), + vpath = _make_method(':vpath', (35,)), + timeout = _make_method('', (1, 31)), + # directly calling the logger writes "[name] text" with no particular color __call__ = _make_method('', ()) diff --git a/rpython/tool/error.py b/rpython/tool/error.py --- a/rpython/tool/error.py +++ b/rpython/tool/error.py @@ -8,12 +8,8 @@ from rpython.flowspace.model import Variable from rpython.rlib import jit -from rpython.tool.ansi_print import ansi_log -log = py.log.Producer("error") -py.log.setconsumer("error", ansi_log) - SHOW_TRACEBACK = False SHOW_ANNOTATIONS = True SHOW_DEFAULT_LINES_OF_CODE = 0 diff --git a/rpython/tool/test/test_ansi_print.py b/rpython/tool/test/test_ansi_print.py --- a/rpython/tool/test/test_ansi_print.py +++ b/rpython/tool/test/test_ansi_print.py @@ -74,3 +74,11 @@ assert output == [('[test:foo] Hello\n', ()), ('[test:foo] World\n', ()), ('[test:BAR] !\n', ())] + +def test_output_disabled(): + log = ansi_print.AnsiLogger('test') + with FakeOutput() as output: + log('Hello') + log.output_disabled = True + log('World') + assert output == [('[test] Hello\n', ())] diff --git a/rpython/tool/version.py b/rpython/tool/version.py --- a/rpython/tool/version.py +++ b/rpython/tool/version.py @@ -10,9 +10,8 @@ if not err: return - from rpython.tool.ansi_print import ansi_log - log = py.log.Producer("version") - py.log.setconsumer("version", ansi_log) + from rpython.tool.ansi_print import AnsiLogger + log = AnsiLogger("version") log.WARNING('Errors getting %s information: %s' % (repo_type, err)) def get_repo_version_info(hgexe=None, root=rpythonroot): diff --git a/rpython/translator/backendopt/canraise.py b/rpython/translator/backendopt/canraise.py --- a/rpython/translator/backendopt/canraise.py +++ b/rpython/translator/backendopt/canraise.py @@ -1,11 +1,8 @@ -import py - from rpython.rtyper.lltypesystem.lloperation import LL_OPERATIONS -from rpython.tool.ansi_print import ansi_log +from rpython.tool.ansi_print import AnsiLogger from rpython.translator.backendopt import graphanalyze -log = py.log.Producer("canraise") -py.log.setconsumer("canraise", ansi_log) +log = AnsiLogger("canraise") class RaiseAnalyzer(graphanalyze.BoolGraphAnalyzer): diff --git a/rpython/translator/backendopt/support.py b/rpython/translator/backendopt/support.py --- a/rpython/translator/backendopt/support.py +++ b/rpython/translator/backendopt/support.py @@ -1,13 +1,9 @@ -import py - from rpython.rtyper.lltypesystem import lltype from rpython.rtyper.rmodel import inputconst -from rpython.tool.ansi_print import ansi_log +from rpython.tool.ansi_print import AnsiLogger from rpython.translator.simplify import get_graph - -log = py.log.Producer("backendopt") -py.log.setconsumer("backendopt", ansi_log) +log = AnsiLogger("backendopt") def graph_operations(graph): diff --git a/rpython/translator/backendopt/test/test_removenoops.py b/rpython/translator/backendopt/test/test_removenoops.py --- a/rpython/translator/backendopt/test/test_removenoops.py +++ b/rpython/translator/backendopt/test/test_removenoops.py @@ -12,8 +12,6 @@ from rpython.rtyper.llinterp import LLInterpreter from rpython.conftest import option -import py -log = py.log.Producer('test_backendoptimization') def get_graph(fn, signature, all_opts=True): t = TranslationContext() diff --git a/rpython/translator/c/support.py b/rpython/translator/c/support.py --- a/rpython/translator/c/support.py +++ b/rpython/translator/c/support.py @@ -166,7 +166,5 @@ # logging -import py -from rpython.tool.ansi_print import ansi_log -log = py.log.Producer("c") -py.log.setconsumer("c", ansi_log) +from rpython.tool.ansi_print import AnsiLogger +log = AnsiLogger("c") diff --git a/rpython/translator/driver.py b/rpython/translator/driver.py --- a/rpython/translator/driver.py +++ b/rpython/translator/driver.py @@ -14,9 +14,9 @@ annotated_jit_entrypoints import py -from rpython.tool.ansi_print import ansi_log -log = py.log.Producer("translation") -py.log.setconsumer("translation", ansi_log) +from rpython.tool.ansi_print import AnsiLogger + +log = AnsiLogger("translation") def taskdef(deps, title, new_state=None, expected_states=[], @@ -524,7 +524,6 @@ @taskdef([STACKCHECKINSERTION, '?'+BACKENDOPT, RTYPE], "LLInterpreting") def task_llinterpret_lltype(self): from rpython.rtyper.llinterp import LLInterpreter - py.log.setconsumer("llinterp operation", None) translator = self.translator interp = LLInterpreter(translator.rtyper) @@ -534,7 +533,7 @@ self.extra.get('get_llinterp_args', lambda: [])()) - log.llinterpret.event("result -> %s" % v) + log.llinterpret("result -> %s" % v) def proceed(self, goals): if not goals: diff --git a/rpython/translator/goal/timing.py b/rpython/translator/goal/timing.py --- a/rpython/translator/goal/timing.py +++ b/rpython/translator/goal/timing.py @@ -5,9 +5,8 @@ import time import py -from rpython.tool.ansi_print import ansi_log -log = py.log.Producer("Timer") -py.log.setconsumer("Timer", ansi_log) +from rpython.tool.ansi_print import AnsiLogger +log = AnsiLogger("Timer") class Timer(object): def __init__(self, timer=time.time): diff --git a/rpython/translator/goal/translate.py b/rpython/translator/goal/translate.py --- a/rpython/translator/goal/translate.py +++ b/rpython/translator/goal/translate.py @@ -83,9 +83,8 @@ ]) import optparse -from rpython.tool.ansi_print import ansi_log -log = py.log.Producer("translation") -py.log.setconsumer("translation", ansi_log) +from rpython.tool.ansi_print import AnsiLogger +log = AnsiLogger("translation") def load_target(targetspec): log.info("Translating target as defined by %s" % targetspec) diff --git a/rpython/translator/platform/__init__.py b/rpython/translator/platform/__init__.py --- a/rpython/translator/platform/__init__.py +++ b/rpython/translator/platform/__init__.py @@ -5,8 +5,9 @@ from rpython.tool.runsubprocess import run_subprocess as _run_subprocess from rpython.tool.udir import udir from rpython.tool.version import rpythonroot +from rpython.tool.ansi_print import AnsiLogger -log = py.log.Producer("platform") +log = AnsiLogger("platform") class CompilationError(Exception): diff --git a/rpython/translator/sandbox/rsandbox.py b/rpython/translator/sandbox/rsandbox.py --- a/rpython/translator/sandbox/rsandbox.py +++ b/rpython/translator/sandbox/rsandbox.py @@ -17,10 +17,9 @@ from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rtyper.llannotation import lltype_to_annotation from rpython.rtyper.annlowlevel import MixLevelHelperAnnotator -from rpython.tool.ansi_print import ansi_log +from rpython.tool.ansi_print import AnsiLogger -log = py.log.Producer("sandbox") -py.log.setconsumer("sandbox", ansi_log) +log = AnsiLogger("sandbox") # a version of os.read() and os.write() that are not mangled diff --git a/rpython/translator/sandbox/sandlib.py b/rpython/translator/sandbox/sandlib.py --- a/rpython/translator/sandbox/sandlib.py +++ b/rpython/translator/sandbox/sandlib.py @@ -15,21 +15,8 @@ def create_log(): """Make and return a log for the sandbox to use, if needed.""" - # These imports are local to avoid importing pypy if we don't need to. - from rpython.tool.ansi_print import AnsiLog - - class MyAnsiLog(AnsiLog): - KW_TO_COLOR = { - 'call': ((34,), False), - 'result': ((34,), False), - 'exception': ((34,), False), - 'vpath': ((35,), False), - 'timeout': ((1, 31), True), - } - - log = py.log.Producer("sandlib") - py.log.setconsumer("sandlib", MyAnsiLog()) - return log + from rpython.tool.ansi_print import AnsiLogger + return AnsiLogger("sandlib") # Note: we use lib_pypy/marshal.py instead of the built-in marshal # for two reasons. The built-in module could be made to segfault diff --git a/rpython/translator/translator.py b/rpython/translator/translator.py --- a/rpython/translator/translator.py +++ b/rpython/translator/translator.py @@ -10,13 +10,11 @@ from rpython.translator import simplify from rpython.flowspace.model import FunctionGraph, checkgraph, Block from rpython.flowspace.objspace import build_flow -from rpython.tool.ansi_print import ansi_log +from rpython.tool.ansi_print import AnsiLogger from rpython.tool.sourcetools import nice_repr_for_func from rpython.config.translationoption import get_platform -import py -log = py.log.Producer("flowgraph") -py.log.setconsumer("flowgraph", ansi_log) +log = AnsiLogger("flowgraph") class TranslationContext(object): FLOWING_FLAGS = { @@ -50,14 +48,12 @@ graph = self._prebuilt_graphs.pop(func) else: if self.config.translation.verbose: - log.start(nice_repr_for_func(func)) + log(nice_repr_for_func(func)) graph = build_flow(func) simplify.simplify_graph(graph) if self.config.translation.list_comprehension_operations: simplify.detect_list_comprehension(graph) - if self.config.translation.verbose: - log.done(func.__name__) - elif not mute_dot: + if not self.config.translation.verbose and not mute_dot: log.dot() self.graphs.append(graph) # store the graph in our list return graph From pypy.commits at gmail.com Fri Mar 11 11:47:28 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 11 Mar 2016 08:47:28 -0800 (PST) Subject: [pypy-commit] pypy remove-py-log: Fix: the logger is not recursive, so "log.some_name" gives a method, not Message-ID: <56e2f6a0.e853c20a.2952d.1aa4@mx.google.com> Author: Armin Rigo Branch: remove-py-log Changeset: r82984:ea0858104188 Date: 2016-03-11 17:46 +0100 http://bitbucket.org/pypy/pypy/changeset/ea0858104188/ Log: Fix: the logger is not recursive, so "log.some_name" gives a method, not another logger instance diff --git a/rpython/translator/backendopt/merge_if_blocks.py b/rpython/translator/backendopt/merge_if_blocks.py --- a/rpython/translator/backendopt/merge_if_blocks.py +++ b/rpython/translator/backendopt/merge_if_blocks.py @@ -1,7 +1,8 @@ from rpython.flowspace.model import Constant, Variable, mkentrymap -from rpython.translator.backendopt.support import log +from rpython.tool.ansi_print import AnsiLogger -log = log.mergeifblocks +log = AnsiLogger("backendopt") + def is_chain_block(block, first=False): if len(block.operations) == 0: From pypy.commits at gmail.com Fri Mar 11 11:51:54 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 11 Mar 2016 08:51:54 -0800 (PST) Subject: [pypy-commit] pypy remove-py-log: another place Message-ID: <56e2f7aa.e853c20a.2952d.1c43@mx.google.com> Author: Armin Rigo Branch: remove-py-log Changeset: r82985:9114dba413e2 Date: 2016-03-11 16:57 +0000 http://bitbucket.org/pypy/pypy/changeset/9114dba413e2/ Log: another place diff --git a/lib_pypy/ctypes_config_cache/rebuild.py b/lib_pypy/ctypes_config_cache/rebuild.py --- a/lib_pypy/ctypes_config_cache/rebuild.py +++ b/lib_pypy/ctypes_config_cache/rebuild.py @@ -9,9 +9,8 @@ _dirpath = os.path.dirname(__file__) or os.curdir -from rpython.tool.ansi_print import ansi_log -log = py.log.Producer("ctypes_config_cache") -py.log.setconsumer("ctypes_config_cache", ansi_log) +from rpython.tool.ansi_print import AnsiLogger +log = AnsiLogger("ctypes_config_cache") def rebuild_one(name): From pypy.commits at gmail.com Fri Mar 11 12:29:15 2016 From: pypy.commits at gmail.com (plan_rich) Date: Fri, 11 Mar 2016 09:29:15 -0800 (PST) Subject: [pypy-commit] pypy gcstress-hypothesis: working on a new strategy that cuts down the search space of control flow graphs to deterministic ones Message-ID: <56e3006b.96811c0a.61c5c.186a@mx.google.com> Author: Richard Plangger Branch: gcstress-hypothesis Changeset: r82986:c800d87fa7b4 Date: 2016-03-11 18:28 +0100 http://bitbucket.org/pypy/pypy/changeset/c800d87fa7b4/ Log: working on a new strategy that cuts down the search space of control flow graphs to deterministic ones diff --git a/rpython/jit/backend/llsupport/tl/code.py b/rpython/jit/backend/llsupport/tl/code.py --- a/rpython/jit/backend/llsupport/tl/code.py +++ b/rpython/jit/backend/llsupport/tl/code.py @@ -7,6 +7,10 @@ ctx.append_byte(self.BYTE_CODE) @classmethod + def splits_control_flow(self): + return False + + @classmethod def filter_bytecode(self, stack): """ filter this byte code if the stack does not contain the right values on the stack. @@ -210,9 +214,46 @@ def __init__(self): pass -def op_modifies_list(clazz): - """ NOT_RPYTHON """ - return clazz in (DelList, InsertList) + at requires_stack(INT_TYP) + at leaves_on_stack() +class CondJump(ByteCode): + BYTE_CODE = unique_code() + + COND_EQ = 0 + COND_LT = 1 + COND_GT = 2 + COND_LE = 3 + COND_GE = 4 + COND_ANY = 5 + + @requires_param(COND_TYP, INT_TYP) + def __init__(self, cond, offset): + self.cond = cond + self.offset = offset + + def encode(self, ctx): + ctx.append_byte(self.BYTE_CODE) + ctx.append_byte(self.cond) + ctx.append_int(self.offset) + + def splits_control_flow(self): + return True + + at requires_stack(LIST_TYP) + at leaves_on_stack(LIST_TYP, INT_TYP) +class LenList(ByteCode): + BYTE_CODE = unique_code() + def __init__(self): + pass + + +#@requires_stack(INT_TYP) # TODO VAL_TYP) +#@leaves_on_stack() +#class ReturnFrame(ByteCode): +# BYTE_CODE = unique_code() +# def __init__(self): +# pass +# BC_CLASSES = [] BC_NUM_TO_CLASS = {} @@ -223,47 +264,15 @@ assert clazz.BYTE_CODE not in BC_NUM_TO_CLASS BC_NUM_TO_CLASS[clazz.BYTE_CODE] = clazz -# remove comment one by one! +BC_CLASSES.remove(CondJump) -#@requires_stack() -#@leaves_on_stack(INT_TYP) -#class CondJump(ByteCode): -# BYTE_CODE = unique_code() -# -# COND_EQ = 0 -# COND_LT = 1 -# COND_GT = 2 -# COND_LE = 3 -# COND_GE = 4 -# -# @requires_param(COND_TYP) -# def __init__(self, cond): -# self.cond = cond -# -# def encode(self, ctx): -# ctx.append_byte(self.BYTE_CODE) -# ctx.append_byte(self.cond) -# -#@requires_stack() -#@leaves_on_stack() -#class Jump(ByteCode): -# BYTE_CODE = unique_code() -# def __init__(self): -# pass -# +# control flow byte codes +BC_CF_CLASSES = [CondJump] -#@requires_stack(LIST_TYP) -#@leaves_on_stack(LIST_TYP, INT_TYP) -#class LenList(ByteCode): -# BYTE_CODE = unique_code() -# def __init__(self): -# pass -# -# -#@requires_stack(INT_TYP) # TODO VAL_TYP) -#@leaves_on_stack() -#class ReturnFrame(ByteCode): -# BYTE_CODE = unique_code() -# def __init__(self): -# pass -# +class ByteCodeControlFlow(object): + # see the deterministic control flow search startegy in + # test/code_strategies.py for what steps & byte_codes mean + def __init__(self): + self.blocks = [] + self.steps = 0 + self.byte_codes = 0 diff --git a/rpython/jit/backend/llsupport/tl/interp.py b/rpython/jit/backend/llsupport/tl/interp.py --- a/rpython/jit/backend/llsupport/tl/interp.py +++ b/rpython/jit/backend/llsupport/tl/interp.py @@ -13,6 +13,9 @@ def __init__(self, items): self.items = items + def size(self): + return len(self.items) + def concat(self, space, w_lst): assert isinstance(w_lst, W_ListObject) return space.wrap(self.items + w_lst.items) @@ -153,6 +156,22 @@ w_lst = stack.peek(0) del w_lst.items[w_idx.value] # index error, just crash the machine!! + elif opcode == code.LenList.BYTE_CODE: + w_lst = stack.peek(0) + assert isinstance(w_lst, W_ListObject) + stack.append(space.wrap(w_lst.size())) + elif opcode == code.CondJump.BYTE_CODE: + cond = runpack('b', bytecode[i+1:i+2]) + offset = runpack('i', bytecode[i+2:i+6]) + w_int = stack.pop(0) + assert isinstance(w_lst, W_IntObject) + i += 5 + if CondJump.should_jump(cond, w_int.value): + if offset < 0: + pass # TODO jit driver + # the new position is calculated at the end of + # this jump instruction!! + i += offset else: print("opcode %d is not implemented" % opcode) raise NotImplementedError diff --git a/rpython/jit/backend/llsupport/tl/test/code_strategies.py b/rpython/jit/backend/llsupport/tl/test/code_strategies.py --- a/rpython/jit/backend/llsupport/tl/test/code_strategies.py +++ b/rpython/jit/backend/llsupport/tl/test/code_strategies.py @@ -4,6 +4,7 @@ from rpython.jit.backend.llsupport.tl import code, interp, stack from hypothesis.searchstrategy.collections import TupleStrategy, ListStrategy import hypothesis.internal.conjecture.utils as cu +from collections import namedtuple def get_strategy_for(typ): if typ == code.INT_TYP: @@ -107,7 +108,7 @@ @composite def bytecode(draw, max_stack_size=4096): # get a stack that is the same for one test run - run_stack = draw(st.shared(st.just(stack.Stack(0)), 'stack2')) + run_stack = draw(st.shared(st.just(stack.Stack(0)), 'stack')) # get a byte code class, only allow what is valid for the run_stack clazzes = filter(lambda clazz: clazz.filter_bytecode(run_stack), code.BC_CLASSES) @@ -124,3 +125,51 @@ return inst +class DeterministicControlFlowSearchStrategy(SearchStrategy): + """ This is flow graph search space is limited to deterministic + control flow. This means the execution of this program MUST + terminate in at most `max_steps`. + + max/min_steps: one step is one execution in the interpreter loop + max_byte_codes: the amount of bytecodes the final program has + """ + + def __init__(self, stack, min_steps=1, max_steps=2**16, max_byte_codes=5000): + SearchStrategy.__init__(self) + + self.stack = stack + self.max_steps = float(max_steps) + self.min_steps = min_steps + self.max_byte_codes = max_byte_codes + + # self.element_strategy = one_of_strategies(strategies) + + def validate(self): + pass + #self.element_strategy.validate() + + def do_draw(self, data): + bccf = code.ByteCodeControlFlow() + result = [] + while True: + stopping_value = 1 - 1.0 / (1 + self.average_length) + data.start_example() + more = cu.biased_coin(data, stopping_value) + if not more: + data.stop_example() + if len(result) < self.min_size: + continue + else: + break + value = data.draw(self.element_strategy) + data.stop_example() + result.append(value) + return bccf + + at st.defines_strategy +def control_flow_graph(draw, stack=None, blocks): + if stack is None: + # get a stack that is the same for one test run + stack = stack.Stack(0) + return DeterministicControlFlowSearchStrategy(stack) + diff --git a/rpython/jit/backend/llsupport/tl/test/test_tl_interp.py b/rpython/jit/backend/llsupport/tl/test/test_tl_interp.py --- a/rpython/jit/backend/llsupport/tl/test/test_tl_interp.py +++ b/rpython/jit/backend/llsupport/tl/test/test_tl_interp.py @@ -5,6 +5,8 @@ from rpython.jit.backend.llsupport.tl.stack import Stack from rpython.jit.backend.llsupport.tl.test import code_strategies as st +STD_SPACE = interp.Space() + class TestByteCode(object): def test_load_str(self): c = code.Context() @@ -28,7 +30,6 @@ @given(data()) def test_bytecode_class_generation(self, data): - space = interp.Space() stack = Stack(0) for i in range(10): clazz = data.draw(st.bytecode_class(stack)) @@ -36,13 +37,12 @@ @given(data()) def test_bytecode_class_generation_int(self, data): - space = interp.Space() stack = Stack(0) - stack.append(space.wrap(0)) + stack.append(STD_SPACE.wrap(0)) for i in range(10): clazz = data.draw(st.bytecode_class(stack)) assert(clazz in self.DEFAULT_ACTION_CLASSES) - stack.append(space.wrap(0)) + stack.append(STD_SPACE.wrap(0)) for i in range(10): clazz = data.draw(st.bytecode_class(stack)) assert(clazz in self.DEFAULT_ACTION_CLASSES + \ @@ -50,13 +50,12 @@ @given(data()) def test_bytecode_class_generation_str(self, data): - space = interp.Space() stack = Stack(0) - stack.append(space.wrap("hello")) + stack.append(STD_SPACE.wrap("hello")) for i in range(10): clazz = data.draw(st.bytecode_class(stack)) assert(clazz in self.DEFAULT_ACTION_CLASSES) - stack.append(space.wrap("world")) + stack.append(STD_SPACE.wrap("world")) for i in range(10): clazz = data.draw(st.bytecode_class(stack)) assert(clazz in self.DEFAULT_ACTION_CLASSES + \ @@ -64,20 +63,19 @@ @given(data()) def test_bytecode_class_generation_list(self, data): - space = interp.Space() stack = Stack(0) - stack.append(space.wrap([])) - stack.append(space.wrap(0)) + stack.append(STD_SPACE.wrap([])) + stack.append(STD_SPACE.wrap(0)) for i in range(10): clazz = data.draw(st.bytecode_class(stack)) assert(clazz not in (code.InsertList, code.DelList)) - stack.append(space.wrap([space.wrap(1)])) - stack.append(space.wrap(0)) + stack.append(STD_SPACE.wrap([STD_SPACE.wrap(1)])) + stack.append(STD_SPACE.wrap(0)) for i in range(10): clazz = data.draw(st.bytecode_class(stack)) assert(clazz in self.DEFAULT_ACTION_CLASSES + \ (code.DelList, code.AppendList)) - stack.append(space.wrap("haskell")) + stack.append(STD_SPACE.wrap("haskell")) for i in range(10): clazz = data.draw(st.bytecode_class(stack)) assert(clazz in self.DEFAULT_ACTION_CLASSES + \ @@ -85,7 +83,6 @@ @given(data()) def test_empty_stack_no_list_op(self, data): - space = interp.Space() stack = Stack(0) for i in range(10): clazz = data.draw(st.bytecode_class(stack)) @@ -93,10 +90,26 @@ code.AppendList, code.AddList, code.AddStr)) + @given(data()) + def test_control_flow_split(self, data): + stack = Stack(0) + cfg = data.draw(st.control_flow_graph(stack)) + assert cfg.steps > 0 + # assert that there is at least one block that ends with a cond. jump + assert any([isinstance(block[-1], CondJump) for block in cfg.blocks]) + class TestInterp(object): @given(st.basic_block(st.bytecode(), min_size=1)) def test_execute_bytecode_block(self, bc_obj_list): + self.execute(bc_obj_list) + + @given(st.control_flow_graph()) + def test_execute_bytecode_block(self, cfg): + bc_obj_list = cfg.linearize() + self.execute(bc_obj_list) + + def execute(self, bc_obj_list): bytecode, consts = code.Context().transform(bc_obj_list) space = interp.Space() pc = 0 From pypy.commits at gmail.com Fri Mar 11 12:31:13 2016 From: pypy.commits at gmail.com (rlamy) Date: Fri, 11 Mar 2016 09:31:13 -0800 (PST) Subject: [pypy-commit] pypy py3.3-bootstrap-hack: Fix test_app_main Message-ID: <56e300e1.654fc20a.24690.2ec9@mx.google.com> Author: Ronan Lamy Branch: py3.3-bootstrap-hack Changeset: r82987:5705d77f0311 Date: 2016-03-11 17:30 +0000 http://bitbucket.org/pypy/pypy/changeset/5705d77f0311/ Log: Fix test_app_main diff --git a/pypy/interpreter/app_main.py b/pypy/interpreter/app_main.py --- a/pypy/interpreter/app_main.py +++ b/pypy/interpreter/app_main.py @@ -862,6 +862,7 @@ sys.pypy_find_executable = pypy_find_executable sys.pypy_find_stdlib = pypy_find_stdlib sys.pypy_resolvedirof = pypy_resolvedirof + sys.pypy_initfsencoding = lambda: None sys.cpython_path = sys.path[:] try: From pypy.commits at gmail.com Fri Mar 11 12:31:44 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 11 Mar 2016 09:31:44 -0800 (PST) Subject: [pypy-commit] pypy remove-py-log: ready to merge Message-ID: <56e30100.918e1c0a.36d33.1b1d@mx.google.com> Author: Armin Rigo Branch: remove-py-log Changeset: r82988:58f253c9bba4 Date: 2016-03-11 18:30 +0100 http://bitbucket.org/pypy/pypy/changeset/58f253c9bba4/ Log: ready to merge From pypy.commits at gmail.com Fri Mar 11 12:31:46 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 11 Mar 2016 09:31:46 -0800 (PST) Subject: [pypy-commit] pypy default: hg merge remove-py-log Message-ID: <56e30102.a3f6c20a.c5d46.22f1@mx.google.com> Author: Armin Rigo Branch: Changeset: r82989:9cea4c424341 Date: 2016-03-11 18:31 +0100 http://bitbucket.org/pypy/pypy/changeset/9cea4c424341/ Log: hg merge remove-py-log Remove py.log usage which seems particularly JIT-unfriendly. Replace it with much simpler code. diff --git a/lib_pypy/ctypes_config_cache/rebuild.py b/lib_pypy/ctypes_config_cache/rebuild.py --- a/lib_pypy/ctypes_config_cache/rebuild.py +++ b/lib_pypy/ctypes_config_cache/rebuild.py @@ -9,9 +9,8 @@ _dirpath = os.path.dirname(__file__) or os.curdir -from rpython.tool.ansi_print import ansi_log -log = py.log.Producer("ctypes_config_cache") -py.log.setconsumer("ctypes_config_cache", ansi_log) +from rpython.tool.ansi_print import AnsiLogger +log = AnsiLogger("ctypes_config_cache") def rebuild_one(name): diff --git a/rpython/annotator/annrpython.py b/rpython/annotator/annrpython.py --- a/rpython/annotator/annrpython.py +++ b/rpython/annotator/annrpython.py @@ -3,7 +3,7 @@ import types from collections import defaultdict -from rpython.tool.ansi_print import ansi_log +from rpython.tool.ansi_print import AnsiLogger from rpython.tool.pairtype import pair from rpython.tool.error import (format_blocked_annotation_error, gather_error, source_lines) @@ -15,9 +15,7 @@ from rpython.annotator.bookkeeper import Bookkeeper from rpython.rtyper.normalizecalls import perform_normalizations -import py -log = py.log.Producer("annrpython") -py.log.setconsumer("annrpython", ansi_log) +log = AnsiLogger("annrpython") class RPythonAnnotator(object): diff --git a/rpython/jit/backend/ppc/runner.py b/rpython/jit/backend/ppc/runner.py --- a/rpython/jit/backend/ppc/runner.py +++ b/rpython/jit/backend/ppc/runner.py @@ -1,4 +1,3 @@ -import py from rpython.rtyper.lltypesystem import lltype, llmemory, rffi from rpython.rtyper.llinterp import LLInterpreter from rpython.rlib import rgc @@ -9,9 +8,6 @@ from rpython.jit.backend.ppc.codebuilder import PPCBuilder from rpython.jit.backend.ppc import register as r -from rpython.tool.ansi_print import ansi_log -log = py.log.Producer('jitbackend') -py.log.setconsumer('jitbackend', ansi_log) class PPC_CPU(AbstractLLCPU): diff --git a/rpython/jit/backend/x86/runner.py b/rpython/jit/backend/x86/runner.py --- a/rpython/jit/backend/x86/runner.py +++ b/rpython/jit/backend/x86/runner.py @@ -10,10 +10,6 @@ import sys -from rpython.tool.ansi_print import ansi_log -log = py.log.Producer('jitbackend') -py.log.setconsumer('jitbackend', ansi_log) - class AbstractX86CPU(AbstractLLCPU): debug = True diff --git a/rpython/jit/codewriter/policy.py b/rpython/jit/codewriter/policy.py --- a/rpython/jit/codewriter/policy.py +++ b/rpython/jit/codewriter/policy.py @@ -1,10 +1,8 @@ from rpython.jit.metainterp import history from rpython.tool.udir import udir +from rpython.tool.ansi_print import AnsiLogger -import py -from rpython.tool.ansi_print import ansi_log -log = py.log.Producer('jitcodewriter') -py.log.setconsumer('jitcodewriter', ansi_log) +log = AnsiLogger('jitcodewriter') class JitPolicy(object): diff --git a/rpython/memory/gctransform/log.py b/rpython/memory/gctransform/log.py --- a/rpython/memory/gctransform/log.py +++ b/rpython/memory/gctransform/log.py @@ -1,4 +1,3 @@ -import py -from rpython.tool.ansi_print import ansi_log -log = py.log.Producer("gctransform") -py.log.setconsumer("gctransform", ansi_log) +from rpython.tool.ansi_print import AnsiLogger + +log = AnsiLogger("gctransform") diff --git a/rpython/memory/test/gc_test_base.py b/rpython/memory/test/gc_test_base.py --- a/rpython/memory/test/gc_test_base.py +++ b/rpython/memory/test/gc_test_base.py @@ -3,6 +3,7 @@ from rpython.memory import gcwrapper from rpython.memory.test import snippet +from rpython.rtyper import llinterp from rpython.rtyper.test.test_llinterp import get_interpreter from rpython.rtyper.lltypesystem import lltype from rpython.rtyper.lltypesystem.lloperation import llop @@ -15,11 +16,11 @@ WORD = LONG_BIT // 8 -def stdout_ignore_ll_functions(msg): - strmsg = str(msg) - if "evaluating" in strmsg and "ll_" in strmsg: - return - print >>sys.stdout, strmsg +## def stdout_ignore_ll_functions(msg): +## strmsg = str(msg) +## if "evaluating" in strmsg and "ll_" in strmsg: +## return +## print >>sys.stdout, strmsg class GCTest(object): @@ -31,13 +32,11 @@ WREF_IS_INVALID_BEFORE_DEL_IS_CALLED = False def setup_class(cls): - cls._saved_logstate = py.log._getstate() - py.log.setconsumer("llinterp", py.log.STDOUT) - py.log.setconsumer("llinterp frame", stdout_ignore_ll_functions) - py.log.setconsumer("llinterp operation", None) + # switch on logging of interp to show more info on failing tests + llinterp.log.output_disabled = False def teardown_class(cls): - py.log._setstate(cls._saved_logstate) + llinterp.log.output_disabled = True def interpret(self, func, values, **kwds): interp, graph = get_interpreter(func, values, **kwds) diff --git a/rpython/rlib/clibffi.py b/rpython/rlib/clibffi.py --- a/rpython/rlib/clibffi.py +++ b/rpython/rlib/clibffi.py @@ -22,9 +22,6 @@ import sys import ctypes.util -from rpython.tool.ansi_print import ansi_log -log = py.log.Producer("libffi") -py.log.setconsumer("libffi", ansi_log) # maaaybe isinstance here would be better. Think _MSVC = platform.name == "msvc" diff --git a/rpython/rtyper/llinterp.py b/rpython/rtyper/llinterp.py --- a/rpython/rtyper/llinterp.py +++ b/rpython/rtyper/llinterp.py @@ -14,9 +14,15 @@ r_uint, r_longlong, r_ulonglong, r_longlonglong) from rpython.rtyper.lltypesystem import lltype, llmemory, lloperation, llheap from rpython.rtyper import rclass +from rpython.tool.ansi_print import AnsiLogger -log = py.log.Producer('llinterp') +# by default this logger's output is disabled. +# e.g. tests can then switch on logging to get more help +# for failing tests +log = AnsiLogger('llinterp') +log.output_disabled = True + class LLException(Exception): def __init__(self, *args): @@ -1367,10 +1373,3 @@ class _address_of_thread_local(object): _TYPE = llmemory.Address is_fake_thread_local_addr = True - - -# by default we route all logging messages to nothingness -# e.g. tests can then switch on logging to get more help -# for failing tests -from rpython.tool.ansi_print import ansi_log -py.log.setconsumer('llinterp', ansi_log) diff --git a/rpython/rtyper/rmodel.py b/rpython/rtyper/rmodel.py --- a/rpython/rtyper/rmodel.py +++ b/rpython/rtyper/rmodel.py @@ -454,13 +454,9 @@ # logging/warning -import py -from rpython.tool.ansi_print import ansi_log +from rpython.tool.ansi_print import AnsiLogger -log = py.log.Producer("rtyper") -py.log.setconsumer("rtyper", ansi_log) -py.log.setconsumer("rtyper translating", None) -py.log.setconsumer("rtyper debug", None) +log = AnsiLogger("rtyper") def warning(msg): log.WARNING(msg) diff --git a/rpython/rtyper/test/test_llinterp.py b/rpython/rtyper/test/test_llinterp.py --- a/rpython/rtyper/test/test_llinterp.py +++ b/rpython/rtyper/test/test_llinterp.py @@ -2,7 +2,7 @@ import py import sys from rpython.rtyper.lltypesystem.lltype import typeOf, Void, malloc, free -from rpython.rtyper.llinterp import LLInterpreter, LLException +from rpython.rtyper.llinterp import LLInterpreter, LLException, log from rpython.rtyper.rmodel import inputconst from rpython.rtyper.annlowlevel import hlstr, llhelper from rpython.rtyper.exceptiondata import UnknownException @@ -16,13 +16,10 @@ from rpython.rtyper.rtyper import llinterp_backend # switch on logging of interp to show more info on failing tests - def setup_module(mod): - mod.logstate = py.log._getstate() - py.log.setconsumer("llinterp", py.log.STDOUT) - + log.output_disabled = False def teardown_module(mod): - py.log._setstate(mod.logstate) + log.output_disabled = True def gengraph(func, argtypes=[], viewbefore='auto', policy=None, diff --git a/rpython/rtyper/test/test_rtyper.py b/rpython/rtyper/test/test_rtyper.py --- a/rpython/rtyper/test/test_rtyper.py +++ b/rpython/rtyper/test/test_rtyper.py @@ -1,5 +1,3 @@ -import py - from rpython.annotator import model as annmodel, annrpython from rpython.flowspace.model import Constant from rpython.rtyper import rmodel @@ -9,14 +7,6 @@ from rpython.translator.translator import TranslationContext, graphof -def setup_module(mod): - mod.logstate = py.log._getstate() - py.log.setconsumer("rtyper", py.log.STDOUT) - py.log.setconsumer("annrpython", None) - -def teardown_module(mod): - py.log._setstate(mod.logstate) - def test_reprkeys_dont_clash(): stup1 = annmodel.SomeTuple((annmodel.SomeFloat(), annmodel.SomeInteger())) diff --git a/rpython/tool/ansi_print.py b/rpython/tool/ansi_print.py --- a/rpython/tool/ansi_print.py +++ b/rpython/tool/ansi_print.py @@ -1,74 +1,77 @@ """ -A color print. +A simple color logger. """ import sys from py.io import ansi_print from rpython.tool.ansi_mandelbrot import Driver -class AnsiLog: - wrote_dot = False # XXX sharing state with all instances - KW_TO_COLOR = { - # color supress - 'red': ((31,), True), - 'bold': ((1,), True), - 'WARNING': ((31,), False), - 'event': ((1,), True), - 'ERROR': ((1, 31), False), - 'Error': ((1, 31), False), - 'info': ((35,), False), - 'stub': ((34,), False), - } +isatty = getattr(sys.stderr, 'isatty', lambda: False) +mandelbrot_driver = Driver() +wrote_dot = False # global shared state - def __init__(self, kw_to_color={}, file=None): - self.kw_to_color = self.KW_TO_COLOR.copy() - self.kw_to_color.update(kw_to_color) - self.file = file - self.fancy = True - self.isatty = getattr(sys.stderr, 'isatty', lambda: False) - if self.fancy and self.isatty(): - self.mandelbrot_driver = Driver() + +def _make_method(subname, colors): + # + def logger_method(self, text): + global wrote_dot + if self.output_disabled: + return + text = "[%s%s] %s" % (self.name, subname, text) + if isatty(): + col = colors else: - self.mandelbrot_driver = None + col = () + if wrote_dot: + text = '\n' + text + ansi_print(text, col) + wrote_dot = False + # + return logger_method - def __call__(self, msg): - tty = self.isatty() - flush = False - newline = True - keywords = [] - esc = [] - for kw in msg.keywords: - color, supress = self.kw_to_color.get(kw, (None, False)) - if color: - esc.extend(color) - if not supress: - keywords.append(kw) - if 'start' in keywords: - if tty: - newline = False - flush = True - keywords.remove('start') - elif 'done' in keywords: - if tty: - print >> sys.stderr - return - elif 'dot' in keywords: - if tty: - if self.fancy: - if not AnsiLog.wrote_dot: - self.mandelbrot_driver.reset() - self.mandelbrot_driver.dot() - else: - ansi_print(".", tuple(esc), file=self.file, newline=False, flush=flush) - AnsiLog.wrote_dot = True - return - if AnsiLog.wrote_dot: - AnsiLog.wrote_dot = False - sys.stderr.write("\n") - esc = tuple(esc) - for line in msg.content().splitlines(): - ansi_print("[%s] %s" %(":".join(keywords), line), esc, - file=self.file, newline=newline, flush=flush) -ansi_log = AnsiLog() +class AnsiLogger(object): + output_disabled = False + + def __init__(self, name): + self.name = name + + # these methods write "[name:method] text" to the terminal, with color codes + red = _make_method('', (31,)) + bold = _make_method('', (1,)) + WARNING = _make_method(':WARNING', (31,)) + event = _make_method('', (1,)) + ERROR = _make_method(':ERROR', (1, 31)) + Error = _make_method(':Error', (1, 31)) + info = _make_method(':info', (35,)) + stub = _make_method(':stub', (34,)) + + # some more methods used by sandlib + call = _make_method(':call', (34,)) + result = _make_method(':result', (34,)) + exception = _make_method(':exception', (34,)), + vpath = _make_method(':vpath', (35,)), + timeout = _make_method('', (1, 31)), + + # directly calling the logger writes "[name] text" with no particular color + __call__ = _make_method('', ()) + + # calling unknown method names writes "[name:method] text" without color + def __getattr__(self, name): + if name[0].isalpha(): + method = _make_method(':' + name, ()) + setattr(self.__class__, name, method) + return getattr(self, name) + raise AttributeError(name) + + def dot(self): + """Output a mandelbrot dot to the terminal.""" + global wrote_dot + if not wrote_dot: + mandelbrot_driver.reset() + wrote_dot = True + mandelbrot_driver.dot() + + def debug(self, info): + """For messages that are dropped. Can be monkeypatched in tests.""" diff --git a/rpython/tool/error.py b/rpython/tool/error.py --- a/rpython/tool/error.py +++ b/rpython/tool/error.py @@ -8,12 +8,8 @@ from rpython.flowspace.model import Variable from rpython.rlib import jit -from rpython.tool.ansi_print import ansi_log -log = py.log.Producer("error") -py.log.setconsumer("error", ansi_log) - SHOW_TRACEBACK = False SHOW_ANNOTATIONS = True SHOW_DEFAULT_LINES_OF_CODE = 0 diff --git a/rpython/tool/test/test_ansi_print.py b/rpython/tool/test/test_ansi_print.py new file mode 100644 --- /dev/null +++ b/rpython/tool/test/test_ansi_print.py @@ -0,0 +1,84 @@ +from _pytest.monkeypatch import monkeypatch +from rpython.tool import ansi_print, ansi_mandelbrot + + +class FakeOutput(object): + def __init__(self, tty=True): + self.monkey = monkeypatch() + self.tty = tty + self.output = [] + def __enter__(self, *args): + ansi_print.wrote_dot = False + self.monkey.setattr(ansi_print, 'ansi_print', self._print) + self.monkey.setattr(ansi_print, 'isatty', self._isatty) + self.monkey.setattr(ansi_mandelbrot, 'ansi_print', self._print) + return self.output + def __exit__(self, *args): + self.monkey.undo() + + def _print(self, text, colors, newline=True, flush=True): + if newline: + text += '\n' + self.output.append((text, colors)) + def _isatty(self): + return self.tty + + +def test_simple(): + log = ansi_print.AnsiLogger('test') + with FakeOutput() as output: + log('Hello') + assert output == [('[test] Hello\n', ())] + +def test_bold(): + log = ansi_print.AnsiLogger('test') + with FakeOutput() as output: + log.bold('Hello') + assert output == [('[test] Hello\n', (1,))] + +def test_not_a_tty(): + log = ansi_print.AnsiLogger('test') + with FakeOutput(tty=False) as output: + log.bold('Hello') + assert output == [('[test] Hello\n', ())] + +def test_dot_1(): + log = ansi_print.AnsiLogger('test') + with FakeOutput() as output: + log.dot() + assert len(output) == 1 + assert len(output[0][0]) == 1 # single character + # output[0][1] is some ansi color code from mandelbort_driver + +def test_dot_mixing_with_regular_lines(): + log = ansi_print.AnsiLogger('test') + with FakeOutput() as output: + log.dot() + log.dot() + log.WARNING('oops') + log.WARNING('maybe?') + log.dot() + assert len(output) == 5 + assert len(output[0][0]) == 1 # single character + assert len(output[1][0]) == 1 # single character + assert output[2] == ('\n[test:WARNING] oops\n', (31,)) + assert output[3] == ('[test:WARNING] maybe?\n', (31,)) + assert len(output[4][0]) == 1 # single character + +def test_unknown_method_names(): + log = ansi_print.AnsiLogger('test') + with FakeOutput() as output: + log.foo('Hello') + log.foo('World') + log.BAR('!') + assert output == [('[test:foo] Hello\n', ()), + ('[test:foo] World\n', ()), + ('[test:BAR] !\n', ())] + +def test_output_disabled(): + log = ansi_print.AnsiLogger('test') + with FakeOutput() as output: + log('Hello') + log.output_disabled = True + log('World') + assert output == [('[test] Hello\n', ())] diff --git a/rpython/tool/version.py b/rpython/tool/version.py --- a/rpython/tool/version.py +++ b/rpython/tool/version.py @@ -10,9 +10,8 @@ if not err: return - from rpython.tool.ansi_print import ansi_log - log = py.log.Producer("version") - py.log.setconsumer("version", ansi_log) + from rpython.tool.ansi_print import AnsiLogger + log = AnsiLogger("version") log.WARNING('Errors getting %s information: %s' % (repo_type, err)) def get_repo_version_info(hgexe=None, root=rpythonroot): diff --git a/rpython/translator/backendopt/canraise.py b/rpython/translator/backendopt/canraise.py --- a/rpython/translator/backendopt/canraise.py +++ b/rpython/translator/backendopt/canraise.py @@ -1,11 +1,8 @@ -import py - from rpython.rtyper.lltypesystem.lloperation import LL_OPERATIONS -from rpython.tool.ansi_print import ansi_log +from rpython.tool.ansi_print import AnsiLogger from rpython.translator.backendopt import graphanalyze -log = py.log.Producer("canraise") -py.log.setconsumer("canraise", ansi_log) +log = AnsiLogger("canraise") class RaiseAnalyzer(graphanalyze.BoolGraphAnalyzer): diff --git a/rpython/translator/backendopt/merge_if_blocks.py b/rpython/translator/backendopt/merge_if_blocks.py --- a/rpython/translator/backendopt/merge_if_blocks.py +++ b/rpython/translator/backendopt/merge_if_blocks.py @@ -1,7 +1,8 @@ from rpython.flowspace.model import Constant, Variable, mkentrymap -from rpython.translator.backendopt.support import log +from rpython.tool.ansi_print import AnsiLogger -log = log.mergeifblocks +log = AnsiLogger("backendopt") + def is_chain_block(block, first=False): if len(block.operations) == 0: diff --git a/rpython/translator/backendopt/support.py b/rpython/translator/backendopt/support.py --- a/rpython/translator/backendopt/support.py +++ b/rpython/translator/backendopt/support.py @@ -1,13 +1,9 @@ -import py - from rpython.rtyper.lltypesystem import lltype from rpython.rtyper.rmodel import inputconst -from rpython.tool.ansi_print import ansi_log +from rpython.tool.ansi_print import AnsiLogger from rpython.translator.simplify import get_graph - -log = py.log.Producer("backendopt") -py.log.setconsumer("backendopt", ansi_log) +log = AnsiLogger("backendopt") def graph_operations(graph): diff --git a/rpython/translator/backendopt/test/test_removenoops.py b/rpython/translator/backendopt/test/test_removenoops.py --- a/rpython/translator/backendopt/test/test_removenoops.py +++ b/rpython/translator/backendopt/test/test_removenoops.py @@ -12,8 +12,6 @@ from rpython.rtyper.llinterp import LLInterpreter from rpython.conftest import option -import py -log = py.log.Producer('test_backendoptimization') def get_graph(fn, signature, all_opts=True): t = TranslationContext() diff --git a/rpython/translator/c/support.py b/rpython/translator/c/support.py --- a/rpython/translator/c/support.py +++ b/rpython/translator/c/support.py @@ -166,7 +166,5 @@ # logging -import py -from rpython.tool.ansi_print import ansi_log -log = py.log.Producer("c") -py.log.setconsumer("c", ansi_log) +from rpython.tool.ansi_print import AnsiLogger +log = AnsiLogger("c") diff --git a/rpython/translator/driver.py b/rpython/translator/driver.py --- a/rpython/translator/driver.py +++ b/rpython/translator/driver.py @@ -14,9 +14,9 @@ annotated_jit_entrypoints import py -from rpython.tool.ansi_print import ansi_log -log = py.log.Producer("translation") -py.log.setconsumer("translation", ansi_log) +from rpython.tool.ansi_print import AnsiLogger + +log = AnsiLogger("translation") def taskdef(deps, title, new_state=None, expected_states=[], @@ -524,7 +524,6 @@ @taskdef([STACKCHECKINSERTION, '?'+BACKENDOPT, RTYPE], "LLInterpreting") def task_llinterpret_lltype(self): from rpython.rtyper.llinterp import LLInterpreter - py.log.setconsumer("llinterp operation", None) translator = self.translator interp = LLInterpreter(translator.rtyper) @@ -534,7 +533,7 @@ self.extra.get('get_llinterp_args', lambda: [])()) - log.llinterpret.event("result -> %s" % v) + log.llinterpret("result -> %s" % v) def proceed(self, goals): if not goals: diff --git a/rpython/translator/goal/timing.py b/rpython/translator/goal/timing.py --- a/rpython/translator/goal/timing.py +++ b/rpython/translator/goal/timing.py @@ -5,9 +5,8 @@ import time import py -from rpython.tool.ansi_print import ansi_log -log = py.log.Producer("Timer") -py.log.setconsumer("Timer", ansi_log) +from rpython.tool.ansi_print import AnsiLogger +log = AnsiLogger("Timer") class Timer(object): def __init__(self, timer=time.time): diff --git a/rpython/translator/goal/translate.py b/rpython/translator/goal/translate.py --- a/rpython/translator/goal/translate.py +++ b/rpython/translator/goal/translate.py @@ -83,9 +83,8 @@ ]) import optparse -from rpython.tool.ansi_print import ansi_log -log = py.log.Producer("translation") -py.log.setconsumer("translation", ansi_log) +from rpython.tool.ansi_print import AnsiLogger +log = AnsiLogger("translation") def load_target(targetspec): log.info("Translating target as defined by %s" % targetspec) diff --git a/rpython/translator/platform/__init__.py b/rpython/translator/platform/__init__.py --- a/rpython/translator/platform/__init__.py +++ b/rpython/translator/platform/__init__.py @@ -5,8 +5,9 @@ from rpython.tool.runsubprocess import run_subprocess as _run_subprocess from rpython.tool.udir import udir from rpython.tool.version import rpythonroot +from rpython.tool.ansi_print import AnsiLogger -log = py.log.Producer("platform") +log = AnsiLogger("platform") class CompilationError(Exception): diff --git a/rpython/translator/sandbox/rsandbox.py b/rpython/translator/sandbox/rsandbox.py --- a/rpython/translator/sandbox/rsandbox.py +++ b/rpython/translator/sandbox/rsandbox.py @@ -17,10 +17,9 @@ from rpython.rtyper.lltypesystem import lltype, rffi from rpython.rtyper.llannotation import lltype_to_annotation from rpython.rtyper.annlowlevel import MixLevelHelperAnnotator -from rpython.tool.ansi_print import ansi_log +from rpython.tool.ansi_print import AnsiLogger -log = py.log.Producer("sandbox") -py.log.setconsumer("sandbox", ansi_log) +log = AnsiLogger("sandbox") # a version of os.read() and os.write() that are not mangled diff --git a/rpython/translator/sandbox/sandlib.py b/rpython/translator/sandbox/sandlib.py --- a/rpython/translator/sandbox/sandlib.py +++ b/rpython/translator/sandbox/sandlib.py @@ -15,21 +15,8 @@ def create_log(): """Make and return a log for the sandbox to use, if needed.""" - # These imports are local to avoid importing pypy if we don't need to. - from rpython.tool.ansi_print import AnsiLog - - class MyAnsiLog(AnsiLog): - KW_TO_COLOR = { - 'call': ((34,), False), - 'result': ((34,), False), - 'exception': ((34,), False), - 'vpath': ((35,), False), - 'timeout': ((1, 31), True), - } - - log = py.log.Producer("sandlib") - py.log.setconsumer("sandlib", MyAnsiLog()) - return log + from rpython.tool.ansi_print import AnsiLogger + return AnsiLogger("sandlib") # Note: we use lib_pypy/marshal.py instead of the built-in marshal # for two reasons. The built-in module could be made to segfault diff --git a/rpython/translator/translator.py b/rpython/translator/translator.py --- a/rpython/translator/translator.py +++ b/rpython/translator/translator.py @@ -10,13 +10,11 @@ from rpython.translator import simplify from rpython.flowspace.model import FunctionGraph, checkgraph, Block from rpython.flowspace.objspace import build_flow -from rpython.tool.ansi_print import ansi_log +from rpython.tool.ansi_print import AnsiLogger from rpython.tool.sourcetools import nice_repr_for_func from rpython.config.translationoption import get_platform -import py -log = py.log.Producer("flowgraph") -py.log.setconsumer("flowgraph", ansi_log) +log = AnsiLogger("flowgraph") class TranslationContext(object): FLOWING_FLAGS = { @@ -50,14 +48,12 @@ graph = self._prebuilt_graphs.pop(func) else: if self.config.translation.verbose: - log.start(nice_repr_for_func(func)) + log(nice_repr_for_func(func)) graph = build_flow(func) simplify.simplify_graph(graph) if self.config.translation.list_comprehension_operations: simplify.detect_list_comprehension(graph) - if self.config.translation.verbose: - log.done(func.__name__) - elif not mute_dot: + if not self.config.translation.verbose and not mute_dot: log.dot() self.graphs.append(graph) # store the graph in our list return graph From pypy.commits at gmail.com Sat Mar 12 04:33:22 2016 From: pypy.commits at gmail.com (cfbolz) Date: Sat, 12 Mar 2016 01:33:22 -0800 (PST) Subject: [pypy-commit] pypy default: we don't support python 2.5 any more Message-ID: <56e3e262.4412c30a.6f1b7.0c42@mx.google.com> Author: Carl Friedrich Bolz Branch: Changeset: r82990:075aea373fa9 Date: 2016-03-12 10:11 +0100 http://bitbucket.org/pypy/pypy/changeset/075aea373fa9/ Log: we don't support python 2.5 any more diff --git a/rpython/jit/codewriter/test/test_jtransform.py b/rpython/jit/codewriter/test/test_jtransform.py --- a/rpython/jit/codewriter/test/test_jtransform.py +++ b/rpython/jit/codewriter/test/test_jtransform.py @@ -1,19 +1,7 @@ import py import random -try: - from itertools import product -except ImportError: - # Python 2.5, this is taken from the CPython docs, but simplified. - def product(*args): - # product('ABCD', 'xy') --> Ax Ay Bx By Cx Cy Dx Dy - # product(range(2), repeat=3) --> 000 001 010 011 100 101 110 111 - pools = map(tuple, args) - result = [[]] - for pool in pools: - result = [x+[y] for x in result for y in pool] - for prod in result: - yield tuple(prod) +from itertools import product from rpython.flowspace.model import FunctionGraph, Block, Link, c_last_exception from rpython.flowspace.model import SpaceOperation, Variable, Constant From pypy.commits at gmail.com Sat Mar 12 04:33:24 2016 From: pypy.commits at gmail.com (cfbolz) Date: Sat, 12 Mar 2016 01:33:24 -0800 (PST) Subject: [pypy-commit] pypy default: re-set debug to True to make sure that subsequent asserts work Message-ID: <56e3e264.85b01c0a.439d9.fffff32a@mx.google.com> Author: Carl Friedrich Bolz Branch: Changeset: r82991:c7b41222937f Date: 2016-03-12 10:32 +0100 http://bitbucket.org/pypy/pypy/changeset/c7b41222937f/ Log: re-set debug to True to make sure that subsequent asserts work diff --git a/pypy/interpreter/test/test_app_main.py b/pypy/interpreter/test/test_app_main.py --- a/pypy/interpreter/test/test_app_main.py +++ b/pypy/interpreter/test/test_app_main.py @@ -9,6 +9,11 @@ from pypy.conftest import pypydir from lib_pypy._pypy_interact import irc_header +try: + import __pypy__ +except ImportError: + __pypy__ = None + banner = sys.version.splitlines()[0] app_main = os.path.join(os.path.realpath(os.path.dirname(__file__)), os.pardir, 'app_main.py') @@ -106,6 +111,8 @@ sys.argv[:] = saved_sys_argv sys.stdout = saved_sys_stdout sys.stderr = saved_sys_stderr + if __pypy__: + __pypy__.set_debug(True) def test_all_combinations_I_can_think_of(self): self.check([], {}, sys_argv=[''], run_stdin=True) @@ -601,9 +608,7 @@ def run_with_status_code(self, cmdline, senddata='', expect_prompt=False, expect_banner=False, python_flags='', env=None): if os.name == 'nt': - try: - import __pypy__ - except: + if __pypy__ is None: py.test.skip('app_main cannot run on non-pypy for windows') cmdline = '%s %s "%s" %s' % (sys.executable, python_flags, app_main, cmdline) From pypy.commits at gmail.com Sat Mar 12 07:22:21 2016 From: pypy.commits at gmail.com (mattip) Date: Sat, 12 Mar 2016 04:22:21 -0800 (PST) Subject: [pypy-commit] pypy default: fix for windows Message-ID: <56e409fd.02931c0a.3514c.2a8d@mx.google.com> Author: mattip Branch: Changeset: r82992:e5ee9597f8cc Date: 2016-03-12 14:21 +0200 http://bitbucket.org/pypy/pypy/changeset/e5ee9597f8cc/ Log: fix for windows diff --git a/testrunner/runner.py b/testrunner/runner.py --- a/testrunner/runner.py +++ b/testrunner/runner.py @@ -240,8 +240,8 @@ s = 'setting' if os.environ.get('MAKEFLAGS'): s = 'overriding' - out.write("%s MAKEFLAGS to '-j1'\n" % s) - os.environ['MAKEFLAGS'] = '-j1' + out.write("%s MAKEFLAGS to ' ' (space)\n" % s) + os.environ['MAKEFLAGS'] = ' ' failure = False for testname in testdirs: From pypy.commits at gmail.com Sat Mar 12 11:37:28 2016 From: pypy.commits at gmail.com (fijal) Date: Sat, 12 Mar 2016 08:37:28 -0800 (PST) Subject: [pypy-commit] pypy jit-leaner-frontend: hack differently Message-ID: <56e445c8.c711c30a.d2f7e.ffff8eb0@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r82993:065b7dae64ce Date: 2016-03-12 18:36 +0200 http://bitbucket.org/pypy/pypy/changeset/065b7dae64ce/ Log: hack differently diff --git a/rpython/jit/metainterp/opencoder.py b/rpython/jit/metainterp/opencoder.py --- a/rpython/jit/metainterp/opencoder.py +++ b/rpython/jit/metainterp/opencoder.py @@ -27,52 +27,32 @@ pass class SnapshotIterator(object): - def __init__(self, main_iter, pos, end_pos): - self.trace = main_iter.trace + def __init__(self, main_iter, snapshot): self.main_iter = main_iter - self.end = end_pos - self.start = pos - self.pos = pos - self.save_pos = -1 + # reverse the snapshots and store the vable, vref lists + assert isinstance(snapshot, TopSnapshot) + self.vable_array = snapshot.vable_array + self.vref_array = snapshot.vref_array + self.size = len(self.vable_array) + len(self.vref_array) + 2 + jc_index, pc = unpack_uint(snapshot.packed_jitcode_pc) + self.framestack = [] + if jc_index == 2**16-1: + return + while snapshot: + self.framestack.append(snapshot) + self.size += len(snapshot.box_array) + 2 + snapshot = snapshot.prev + self.framestack.reverse() - def length(self): - return self.end - self.start + def get(self, index): + return self.main_iter._untag(index) - def done(self): - return self.pos >= self.end + def unpack_jitcode_pc(self, snapshot): + return unpack_uint(snapshot.packed_jitcode_pc) - def _next(self): - res = rffi.cast(lltype.Signed, self.trace._ops[self.pos]) - self.pos += 1 - return res - - def next(self): - r = self.main_iter._untag(self._next()) - assert r - return r - - def read_boxes(self, size): - return [self.next() for i in range(size)] - - def get_size_jitcode_pc(self): - if self.save_pos >= 0: - self.pos = self.save_pos - self.save_pos = -1 - size = self._next() - if size < 0: - self.save_pos = self.pos + 1 - self.pos = ((-size - 1) << 15) | (self._next()) - assert self.pos >= 0 - size = self._next() - assert size >= 0 - return size, self._next(), self._next() - - def get_list_of_boxes(self): - size = self._next() - l = [] - for i in range(size): - l.append(self.next()) - return l + def unpack_array(self, arr): + # NOT_RPYTHON + return [self.get(i) for i in arr] class TraceIterator(BaseTrace): def __init__(self, trace, start, end, force_inputargs=None): @@ -127,14 +107,8 @@ else: assert False - def skip_resume_data(self): - pos = self.pos - self.pos += self._next() - return pos - - def get_snapshot_iter(self, pos): - end = rffi.cast(lltype.Signed, self.trace._ops[pos]) + pos - return SnapshotIterator(self, pos + 1, end) + def get_snapshot_iter(self, index): + return SnapshotIterator(self, self.trace._snapshots[index]) def next(self): opnum = self._next() @@ -147,7 +121,7 @@ args.append(self._untag(self._next())) if opwithdescr[opnum]: descr_index = self._next() - if descr_index == -1: + if descr_index == -1 or rop.is_guard(opnum): descr = None else: descr = self.trace._descrs[descr_index] @@ -156,7 +130,7 @@ res = ResOperation(opnum, args, -1, descr=descr) if rop.is_guard(opnum): assert isinstance(res, GuardResOp) - res.rd_resume_position = self.skip_resume_data() + res.rd_resume_position = descr_index self._cache[self._count] = res self._count += 1 return res @@ -174,6 +148,30 @@ iter._count = self.count return iter +def combine_uint(index1, index2): + assert 0 <= index1 < 65536 + assert 0 <= index2 < 65536 + return index1 << 16 | index2 # it's ok to return signed here, + # we need only 32bit, but 64 is ok for now + +def unpack_uint(packed): + return (packed >> 16) & 0xffff, packed & 0xffff + +class Snapshot(object): + _attrs_ = ('packed_jitcode_pc', 'box_array', 'prev') + + prev = None + + def __init__(self, packed_jitcode_pc, box_array): + self.packed_jitcode_pc = packed_jitcode_pc + self.box_array = box_array + +class TopSnapshot(Snapshot): + def __init__(self, packed_jitcode_pc, box_array, vable_array, vref_array): + Snapshot.__init__(self, packed_jitcode_pc, box_array) + self.vable_array = vable_array + self.vref_array = vref_array + class Trace(BaseTrace): def __init__(self, inputargs): self._ops = [rffi.cast(rffi.SHORT, -15)] * 30000 @@ -191,6 +189,7 @@ self._bigints_dict = {} self._floats = [] self._floats_dict = {} + self._snapshots = [] for i, inparg in enumerate(inputargs): assert isinstance(inparg, AbstractInputArg) inparg.position = -i - 1 @@ -301,32 +300,12 @@ self._count += 1 return pos - def _record_raw(self, opnum, tagged_args, tagged_descr=-1): - NOT_USED - operations = self._ops - pos = self._count - operations.append(opnum) - expected_arity = oparity[opnum] - if expected_arity == -1: - operations.append(len(tagged_args)) - else: - assert len(argboxes) == expected_arity - operations.extend(tagged_args) - if tagged_descr != -1: - operations.append(tagged_descr) - self._count += 1 - return pos - def _encode_descr(self, descr): # XXX provide a global cache for prebuilt descrs so we don't # have to repeat them here self._descrs.append(descr) return len(self._descrs) - 1 -# def record_forwarding(self, op, newtag): -# index = op._pos -# self._ops[index] = -newtag - 1 - def record_snapshot_link(self, pos): self._sharings += 1 lower = pos & 0x7fff @@ -340,40 +319,35 @@ assert opnum >= 0 return ResOperation(opnum, argboxes, pos, descr) - def record_op_tag(self, opnum, tagged_args, descr=None): - NOT_USED - return tag(TAGBOX, self._record_raw(opnum, tagged_args, descr)) + def _list_of_boxes(self, boxes): + return [rffi.cast(rffi.SHORT, self._encode(box)) for box in boxes] - def record_snapshot(self, jitcode, pc, active_boxes): - self._total_snapshots += 1 - pos = self._pos - self.append(len(active_boxes)) # unnecessary, can be read from - self.append(jitcode.index) - self.append(pc) - for box in active_boxes: - self.append(self._encode(box)) # not tagged, as it must be boxes - return pos + def create_top_snapshot(self, jitcode, pc, boxes, vable_boxes, vref_boxes): + array = self._list_of_boxes(boxes) + vable_array = self._list_of_boxes(vable_boxes) + vref_array = self._list_of_boxes(vref_boxes) + s = TopSnapshot(combine_uint(jitcode.index, pc), array, vable_array, + vref_array) + assert rffi.cast(lltype.Signed, self._ops[self._pos - 1]) == -1 + # guards have no descr + self._snapshots.append(s) + self._ops[self._pos - 1] = rffi.cast(rffi.SHORT, len(self._snapshots) - 1) + return s - def record_list_of_boxes(self, boxes): - self.append(len(boxes)) - for box in boxes: - self.append(self._encode(box)) + def create_empty_top_snapshot(self, vable_boxes, vref_boxes): + vable_array = self._list_of_boxes(vable_boxes) + vref_array = self._list_of_boxes(vref_boxes) + s = TopSnapshot(combine_uint(2**16 - 1, 0), [], vable_array, + vref_array) + assert rffi.cast(lltype.Signed, self._ops[self._pos - 1]) == -1 + # guards have no descr + self._snapshots.append(s) + self._ops[self._pos - 1] = rffi.cast(rffi.SHORT, len(self._snapshots) - 1) + return s - def get_patchable_position(self): - p = self._pos - self.append(-1) - return p - - def patch_position_to_current(self, p): - prev = self._ops[p] - assert rffi.cast(lltype.Signed, prev) == -1 - self._snapshot_lgt += self._pos - p - self._ops[p] = rffi.cast(rffi.SHORT, self._pos - p) - - def check_snapshot_jitcode_pc(self, jitcode, pc, resumedata_pos): - # XXX expensive? - assert self._ops[resumedata_pos + 1] == rffi.cast(rffi.SHORT, jitcode.index) - assert self._ops[resumedata_pos + 2] == rffi.cast(rffi.SHORT, pc) + def create_snapshot(self, jitcode, pc, boxes): + array = self._list_of_boxes(boxes) + return Snapshot(combine_uint(jitcode.index, pc), array) def get_iter(self): return TraceIterator(self, 0, self._pos) diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -70,7 +70,7 @@ self.copy_constants(self.registers_f, jitcode.constants_f, ConstFloat) self._result_argcode = 'v' # for resume.py operation - self.parent_resumedata_position = -1 + self.parent_snapshot = None # counter for unrolling inlined loops self.unroll_iterations = 1 diff --git a/rpython/jit/metainterp/resume.py b/rpython/jit/metainterp/resume.py --- a/rpython/jit/metainterp/resume.py +++ b/rpython/jit/metainterp/resume.py @@ -20,36 +20,6 @@ # because it needs to support optimize.py which encodes virtuals with # arbitrary cycles and also to compress the information -class Snapshot(object): - __slots__ = ('prev', 'boxes') - - def __init__(self, prev, boxes): - self.prev = prev - self.boxes = boxes - -class TopSnapshot(Snapshot): - __slots__ = ('vable_boxes',) - - def __init__(self, prev, boxes, vable_boxes): - Snapshot.__init__(self, prev, boxes) - self.vable_boxes = vable_boxes - -def combine_uint(index1, index2): - assert 0 <= index1 < 65536 - assert 0 <= index2 < 65536 - return index1 << 16 | index2 # it's ok to return signed here, - # we need only 32bit, but 64 is ok for now - -def unpack_uint(packed): - return (packed >> 16) & 0xffff, packed & 0xffff - -class FrameInfo(object): - __slots__ = ('prev', 'packed_jitcode_pc') - - def __init__(self, prev, jitcode_index, pc): - self.prev = prev - self.packed_jitcode_pc = combine_uint(jitcode_index, pc) - class VectorInfo(object): """ prev: the previous VectorInfo or None @@ -112,21 +82,19 @@ self.variable, self.location) -def _ensure_parent_resumedata(framestack, n, t): +def _ensure_parent_resumedata(framestack, n, t, snapshot): if n == 0: return - _ensure_parent_resumedata(framestack, n - 1, t) target = framestack[n] back = framestack[n - 1] - if target.parent_resumedata_position != -1: - if not we_are_translated(): - t.check_snapshot_jitcode_pc(back.jitcode, back.pc, - target.parent_resumedata_position) - t.record_snapshot_link(target.parent_resumedata_position) + if target.parent_snapshot: + snapshot.prev = target.parent_snapshot return - pos = t.record_snapshot(back.jitcode, back.pc, - back.get_list_of_active_boxes(True)) - target.parent_resumedata_position = pos + s = t.create_snapshot(back.jitcode, back.pc, + back.get_list_of_active_boxes(True)) + snapshot.prev = s + _ensure_parent_resumedata(framestack, n - 1, t, s) + target.parent_snapshot = s def capture_resumedata(framestack, virtualizable_boxes, virtualref_boxes, t): n = len(framestack) - 1 @@ -137,15 +105,15 @@ else: virtualizable_boxes = [] virtualref_boxes = virtualref_boxes[:] - pos = t.get_patchable_position() - t.record_list_of_boxes(virtualizable_boxes) - t.record_list_of_boxes(virtualref_boxes) if n >= 0: top = framestack[n] - _ensure_parent_resumedata(framestack, n, t) - t.record_snapshot(top.jitcode, top.pc, - top.get_list_of_active_boxes(False)) - t.patch_position_to_current(pos) + snapshot = t.create_top_snapshot(top.jitcode, top.pc, + top.get_list_of_active_boxes(False), virtualizable_boxes, + virtualref_boxes) + _ensure_parent_resumedata(framestack, n, t,snapshot) + else: + snapshot = t.create_empty_top_snapshot( + virtualizable_boxes, virtualref_boxes) return result PENDINGFIELDSTRUCT = lltype.Struct('PendingField', @@ -198,12 +166,14 @@ class NumberingState(object): def __init__(self, size): self.liveboxes = {} - self.current = [] + self.current = [0] * size + self._pos = 0 self.n = 0 self.v = 0 def append(self, item): - self.current.append(item) + self.current[self._pos] = item + self._pos += 1 class ResumeDataLoopMemo(object): @@ -256,14 +226,14 @@ # env numbering - def _number_boxes(self, iter, length, optimizer, state): + def _number_boxes(self, iter, arr, optimizer, state): """ Number boxes from one snapshot """ n = state.n v = state.v liveboxes = state.liveboxes - for i in range(length): - box = iter.next() + for item in arr: + box = iter.get(item) box = optimizer.get_box_replacement(box) if isinstance(box, Const): @@ -291,24 +261,25 @@ def number(self, optimizer, position, trace): snapshot_iter = trace.get_snapshot_iter(position) - state = NumberingState(snapshot_iter.length()) + state = NumberingState(snapshot_iter.size) - virtualizable_length = snapshot_iter._next() + arr = snapshot_iter.vable_array - state.append(rffi.cast(rffi.SHORT, virtualizable_length)) - self._number_boxes(snapshot_iter, virtualizable_length, optimizer, state) + state.append(rffi.cast(rffi.SHORT, len(arr))) + self._number_boxes(snapshot_iter, arr, optimizer, state) - n = snapshot_iter._next() + arr = snapshot_iter.vref_array + n = len(arr) assert not (n & 1) state.append(rffi.cast(rffi.SHORT, n >> 1)) - self._number_boxes(snapshot_iter, n, optimizer, state) + self._number_boxes(snapshot_iter, arr, optimizer, state) - while not snapshot_iter.done(): - size, jitcode_index, pc = snapshot_iter.get_size_jitcode_pc() + for snapshot in snapshot_iter.framestack: + jitcode_index, pc = snapshot_iter.unpack_jitcode_pc(snapshot) state.append(rffi.cast(rffi.SHORT, jitcode_index)) state.append(rffi.cast(rffi.SHORT, pc)) - self._number_boxes(snapshot_iter, size, optimizer, state) + self._number_boxes(snapshot_iter, snapshot.box_array, optimizer, state) numb = resumecode.create_numbering(state.current) return numb, state.liveboxes, state.v @@ -454,7 +425,7 @@ # make sure that nobody attached resume data to this guard yet assert not storage.rd_numb resume_position = self.guard_op.rd_resume_position - assert resume_position > 0 + assert resume_position >= 0 # count stack depth numb, liveboxes_from_env, v = self.memo.number(optimizer, resume_position, self.optimizer.trace) diff --git a/rpython/jit/metainterp/test/strategies.py b/rpython/jit/metainterp/test/strategies.py --- a/rpython/jit/metainterp/test/strategies.py +++ b/rpython/jit/metainterp/test/strategies.py @@ -19,7 +19,7 @@ self.index = index class Frame(object): - parent_resumedata_position = -1 + parent_snapshot = None def __init__(self, jitcode, pc, boxes): self.jitcode = jitcode diff --git a/rpython/jit/metainterp/test/test_opencoder.py b/rpython/jit/metainterp/test/test_opencoder.py --- a/rpython/jit/metainterp/test/test_opencoder.py +++ b/rpython/jit/metainterp/test/test_opencoder.py @@ -14,7 +14,7 @@ self.index = index class FakeFrame(object): - parent_resumedata_position = -1 + parent_snapshot = None def __init__(self, pc, jitcode, boxes): self.pc = pc @@ -27,14 +27,17 @@ def unpack_snapshot(t, op, pos): op.framestack = [] si = t.get_snapshot_iter(op.rd_resume_position) - virtualizables = si.get_list_of_boxes() - vref_boxes = si.get_list_of_boxes() + virtualizables = si.get_virtualizables() + vref_boxes = si.get_vref_boxes() while not si.done(): size, jitcode, pc = si.get_size_jitcode_pc() + if jitcode == 2**16 - 1: + break boxes = [] for i in range(size): boxes.append(si.next()) op.framestack.append(FakeFrame(JitCode(jitcode), pc, boxes)) + op.framestack.reverse() op.virtualizables = virtualizables op.vref_boxes = vref_boxes @@ -99,27 +102,27 @@ (i0, i1, i2), l, iter = self.unpack(t) pos = l[0].rd_resume_position snapshot_iter = iter.get_snapshot_iter(pos) - assert snapshot_iter.get_list_of_boxes() == [] - assert snapshot_iter.get_list_of_boxes() == [] + assert snapshot_iter.get_virtualizables() == [] + assert snapshot_iter.get_vref_boxes() == [] + size, jc_index, pc = snapshot_iter.get_size_jitcode_pc() + assert size == 2 + assert jc_index == 4 + assert pc == 3 + assert [snapshot_iter.next() for i in range(2)] == [i2, i2] size, jc_index, pc = snapshot_iter.get_size_jitcode_pc() assert size == 2 assert jc_index == 2 assert pc == 1 assert [snapshot_iter.next() for i in range(2)] == [i0, i1] + pos = l[1].rd_resume_position + snapshot_iter = iter.get_snapshot_iter(pos) + assert snapshot_iter.get_virtualizables() == [] + assert snapshot_iter.get_vref_boxes() == [] size, jc_index, pc = snapshot_iter.get_size_jitcode_pc() assert size == 2 assert jc_index == 4 assert pc == 3 assert [snapshot_iter.next() for i in range(2)] == [i2, i2] - pos = l[1].rd_resume_position - snapshot_iter = iter.get_snapshot_iter(pos) - assert snapshot_iter.get_list_of_boxes() == [] - assert snapshot_iter.get_list_of_boxes() == [] - size, jc_index, pc = snapshot_iter.get_size_jitcode_pc() - assert size == 2 - assert jc_index == 2 - assert pc == 1 - assert [snapshot_iter.next() for i in range(2)] == [i0, i1] @given(lists_of_operations()) def test_random_snapshot(self, lst): From pypy.commits at gmail.com Sat Mar 12 11:44:44 2016 From: pypy.commits at gmail.com (fijal) Date: Sat, 12 Mar 2016 08:44:44 -0800 (PST) Subject: [pypy-commit] pypy jit-leaner-frontend: fix tests Message-ID: <56e4477c.e6ebc20a.b86f.ffff911f@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r82994:57e6db0d79ab Date: 2016-03-12 18:42 +0200 http://bitbucket.org/pypy/pypy/changeset/57e6db0d79ab/ Log: fix tests diff --git a/rpython/jit/metainterp/test/test_opencoder.py b/rpython/jit/metainterp/test/test_opencoder.py --- a/rpython/jit/metainterp/test/test_opencoder.py +++ b/rpython/jit/metainterp/test/test_opencoder.py @@ -27,17 +27,12 @@ def unpack_snapshot(t, op, pos): op.framestack = [] si = t.get_snapshot_iter(op.rd_resume_position) - virtualizables = si.get_virtualizables() - vref_boxes = si.get_vref_boxes() - while not si.done(): - size, jitcode, pc = si.get_size_jitcode_pc() - if jitcode == 2**16 - 1: - break - boxes = [] - for i in range(size): - boxes.append(si.next()) + virtualizables = si.unpack_array(si.vable_array) + vref_boxes = si.unpack_array(si.vref_array) + for snapshot in si.framestack: + jitcode, pc = si.unpack_jitcode_pc(snapshot) + boxes = si.unpack_array(snapshot.box_array) op.framestack.append(FakeFrame(JitCode(jitcode), pc, boxes)) - op.framestack.reverse() op.virtualizables = virtualizables op.vref_boxes = vref_boxes @@ -102,27 +97,26 @@ (i0, i1, i2), l, iter = self.unpack(t) pos = l[0].rd_resume_position snapshot_iter = iter.get_snapshot_iter(pos) - assert snapshot_iter.get_virtualizables() == [] - assert snapshot_iter.get_vref_boxes() == [] - size, jc_index, pc = snapshot_iter.get_size_jitcode_pc() - assert size == 2 + assert snapshot_iter.vable_array == [] + assert snapshot_iter.vref_array == [] + framestack = snapshot_iter.framestack + jc_index, pc = snapshot_iter.unpack_jitcode_pc(framestack[1]) assert jc_index == 4 assert pc == 3 - assert [snapshot_iter.next() for i in range(2)] == [i2, i2] - size, jc_index, pc = snapshot_iter.get_size_jitcode_pc() - assert size == 2 + assert snapshot_iter.unpack_array(framestack[1].box_array) == [i2, i2] + jc_index, pc = snapshot_iter.unpack_jitcode_pc(framestack[0]) assert jc_index == 2 assert pc == 1 - assert [snapshot_iter.next() for i in range(2)] == [i0, i1] + assert snapshot_iter.unpack_array(framestack[0].box_array) == [i0, i1] pos = l[1].rd_resume_position snapshot_iter = iter.get_snapshot_iter(pos) - assert snapshot_iter.get_virtualizables() == [] - assert snapshot_iter.get_vref_boxes() == [] - size, jc_index, pc = snapshot_iter.get_size_jitcode_pc() - assert size == 2 + framestack = snapshot_iter.framestack + assert snapshot_iter.vable_array == [] + assert snapshot_iter.vref_array == [] + jc_index, pc = snapshot_iter.unpack_jitcode_pc(framestack[1]) assert jc_index == 4 assert pc == 3 - assert [snapshot_iter.next() for i in range(2)] == [i2, i2] + assert snapshot_iter.unpack_array(framestack[1].box_array) == [i2, i2] @given(lists_of_operations()) def test_random_snapshot(self, lst): From pypy.commits at gmail.com Sat Mar 12 11:44:46 2016 From: pypy.commits at gmail.com (fijal) Date: Sat, 12 Mar 2016 08:44:46 -0800 (PST) Subject: [pypy-commit] pypy jit-leaner-frontend: fix rpython Message-ID: <56e4477e.0e2e1c0a.151a.79b8@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r82995:17c5928daa79 Date: 2016-03-12 18:43 +0200 http://bitbucket.org/pypy/pypy/changeset/17c5928daa79/ Log: fix rpython diff --git a/rpython/jit/metainterp/opencoder.py b/rpython/jit/metainterp/opencoder.py --- a/rpython/jit/metainterp/opencoder.py +++ b/rpython/jit/metainterp/opencoder.py @@ -119,6 +119,7 @@ args = [] for i in range(argnum): args.append(self._untag(self._next())) + descr_index = -1 if opwithdescr[opnum]: descr_index = self._next() if descr_index == -1 or rop.is_guard(opnum): From pypy.commits at gmail.com Sat Mar 12 12:38:12 2016 From: pypy.commits at gmail.com (stefanor) Date: Sat, 12 Mar 2016 09:38:12 -0800 (PST) Subject: [pypy-commit] pypy default: Support unlink() in the sandbox VFS Message-ID: <56e45404.83561c0a.ade56.ffff88d5@mx.google.com> Author: Stefano Rivera Branch: Changeset: r82996:a80669896291 Date: 2016-03-12 09:37 -0800 http://bitbucket.org/pypy/pypy/changeset/a80669896291/ Log: Support unlink() in the sandbox VFS Now that pyc files aren't disableable, we try to unlink() one during startup. EPERM is handled correctly, but runtime errors are unexpected. So, let's just reject all unlink()s. diff --git a/rpython/translator/sandbox/sandlib.py b/rpython/translator/sandbox/sandlib.py --- a/rpython/translator/sandbox/sandlib.py +++ b/rpython/translator/sandbox/sandlib.py @@ -527,6 +527,9 @@ node = self.get_node(vpathname) return node.keys() + def do_ll_os__ll_os_unlink(self, vpathname): + raise OSError(errno.EPERM, "write access denied") + def do_ll_os__ll_os_getuid(self): return UID do_ll_os__ll_os_geteuid = do_ll_os__ll_os_getuid From pypy.commits at gmail.com Sat Mar 12 13:42:10 2016 From: pypy.commits at gmail.com (mattip) Date: Sat, 12 Mar 2016 10:42:10 -0800 (PST) Subject: [pypy-commit] pypy default: simplify ifdefs, add arm case Message-ID: <56e46302.02f0c20a.e0411.ffffab01@mx.google.com> Author: mattip Branch: Changeset: r82997:94c2361efe8b Date: 2016-03-12 20:29 +0200 http://bitbucket.org/pypy/pypy/changeset/94c2361efe8b/ Log: simplify ifdefs, add arm case diff --git a/rpython/rlib/rvmprof/src/vmprof_config.h b/rpython/rlib/rvmprof/src/vmprof_config.h --- a/rpython/rlib/rvmprof/src/vmprof_config.h +++ b/rpython/rlib/rvmprof/src/vmprof_config.h @@ -1,6 +1,15 @@ #define HAVE_SYS_UCONTEXT_H -#if defined(__FreeBSD__) || defined(__APPLE__) +#if defined(__FreeBSD__) #define PC_FROM_UCONTEXT uc_mcontext.mc_rip +#elif defined( __APPLE__) + #if ((ULONG_MAX) == (UINT_MAX)) + #define PC_FROM_UCONTEXT uc_mcontext->__ss.__eip + #else + #define PC_FROM_UCONTEXT uc_mcontext->__ss.__rip + #endif +#elif defined(__arm__) +#define PC_FROM_UCONTEXT uc_mcontext.arm_ip #else +/* linux, gnuc */ #define PC_FROM_UCONTEXT uc_mcontext.gregs[REG_RIP] #endif diff --git a/rpython/rlib/rvmprof/src/vmprof_getpc.h b/rpython/rlib/rvmprof/src/vmprof_getpc.h --- a/rpython/rlib/rvmprof/src/vmprof_getpc.h +++ b/rpython/rlib/rvmprof/src/vmprof_getpc.h @@ -112,13 +112,8 @@ // PC_FROM_UCONTEXT in config.h. The only thing we need to do here, // then, is to do the magic call-unrolling for systems that support it. -#if defined(__linux) && defined(__i386) && defined(__GNUC__) -intptr_t GetPC(ucontext_t *signal_ucontext) { - return signal_ucontext->uc_mcontext.gregs[REG_EIP]; -} - -// Special case #2: Windows, which has to do something totally different. -#elif defined(_WIN32) || defined(__CYGWIN__) || defined(__CYGWIN32__) || defined(__MINGW32__) +// Special case Windows, which has to do something totally different. +#if defined(_WIN32) || defined(__CYGWIN__) || defined(__CYGWIN32__) || defined(__MINGW32__) // If this is ever implemented, probably the way to do it is to have // profiler.cc use a high-precision timer via timeSetEvent: // http://msdn2.microsoft.com/en-us/library/ms712713.aspx @@ -141,18 +136,10 @@ // Normal cases. If this doesn't compile, it's probably because // PC_FROM_UCONTEXT is the empty string. You need to figure out // the right value for your system, and add it to the list in -// configure.ac (or set it manually in your config.h). +// vmrpof_config.h #else intptr_t GetPC(ucontext_t *signal_ucontext) { -#ifdef __APPLE__ -#if ((ULONG_MAX) == (UINT_MAX)) - return (signal_ucontext->uc_mcontext->__ss.__eip); -#else - return (signal_ucontext->uc_mcontext->__ss.__rip); -#endif -#else return signal_ucontext->PC_FROM_UCONTEXT; // defined in config.h -#endif } #endif From pypy.commits at gmail.com Sat Mar 12 14:25:12 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 12 Mar 2016 11:25:12 -0800 (PST) Subject: [pypy-commit] pypy default: Test and fix: if we use create_link_pypy() on *non-nursery* young Message-ID: <56e46d18.e5ecc20a.f9fb4.ffffb8ce@mx.google.com> Author: Armin Rigo Branch: Changeset: r82998:82f5992c242d Date: 2016-03-12 20:24 +0100 http://bitbucket.org/pypy/pypy/changeset/82f5992c242d/ Log: Test and fix: if we use create_link_pypy() on *non-nursery* young objects, crash diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py --- a/rpython/memory/gc/incminimark.py +++ b/rpython/memory/gc/incminimark.py @@ -1654,15 +1654,15 @@ else: self.nursery_objects_shadows.clear() # + # visit the P and O lists from rawrefcount, if enabled. + if self.rrc_enabled: + self.rrc_minor_collection_free() + # # Walk the list of young raw-malloced objects, and either free # them or make them old. if self.young_rawmalloced_objects: self.free_young_rawmalloced_objects() # - # visit the P and O lists from rawrefcount, if enabled. - if self.rrc_enabled: - self.rrc_minor_collection_free() - # # All live nursery objects are out of the nursery or pinned inside # the nursery. Create nursery barriers to protect the pinned objects, # fill the rest of the nursery with zeros and reset the current nursery diff --git a/rpython/memory/gc/test/test_rawrefcount.py b/rpython/memory/gc/test/test_rawrefcount.py --- a/rpython/memory/gc/test/test_rawrefcount.py +++ b/rpython/memory/gc/test/test_rawrefcount.py @@ -29,7 +29,8 @@ assert count2 - count1 == expected_trigger def _rawrefcount_pair(self, intval, is_light=False, is_pyobj=False, - create_old=False, create_immortal=False): + create_old=False, create_immortal=False, + force_external=False): if is_light: rc = REFCNT_FROM_PYPY_LIGHT else: @@ -40,7 +41,13 @@ if create_immortal: p1 = lltype.malloc(S, immortal=True) else: - p1 = self.malloc(S) + saved = self.gc.nonlarge_max + try: + if force_external: + self.gc.nonlarge_max = 1 + p1 = self.malloc(S) + finally: + self.gc.nonlarge_max = saved p1.x = intval if create_immortal: self.consider_constant(p1) @@ -220,9 +227,10 @@ def test_pypy_nonlight_dies_quickly_old(self): self.test_pypy_nonlight_dies_quickly(old=True) - def test_pyobject_pypy_link_dies_on_minor_collection(self): + @py.test.mark.parametrize('external', [False, True]) + def test_pyobject_pypy_link_dies_on_minor_collection(self, external): p1, p1ref, r1, r1addr, check_alive = ( - self._rawrefcount_pair(42, is_pyobj=True)) + self._rawrefcount_pair(42, is_pyobj=True, force_external=external)) check_alive(0) r1.ob_refcnt += 1 # the pyobject is kept alive self._collect(major=False) @@ -231,9 +239,12 @@ self.gc.check_no_more_rawrefcount_state() lltype.free(r1, flavor='raw') - def test_pyobject_dies(self, old=False): + @py.test.mark.parametrize('old,external', [ + (False, False), (True, False), (False, True)]) + def test_pyobject_dies(self, old, external): p1, p1ref, r1, r1addr, check_alive = ( - self._rawrefcount_pair(42, is_pyobj=True, create_old=old)) + self._rawrefcount_pair(42, is_pyobj=True, create_old=old, + force_external=external)) check_alive(0) if old: self._collect(major=False) @@ -247,9 +258,12 @@ self.gc.check_no_more_rawrefcount_state() lltype.free(r1, flavor='raw') - def test_pyobject_survives_from_obj(self, old=False): + @py.test.mark.parametrize('old,external', [ + (False, False), (True, False), (False, True)]) + def test_pyobject_survives_from_obj(self, old, external): p1, p1ref, r1, r1addr, check_alive = ( - self._rawrefcount_pair(42, is_pyobj=True, create_old=old)) + self._rawrefcount_pair(42, is_pyobj=True, create_old=old, + force_external=external)) check_alive(0) self.stackroots.append(p1) self._collect(major=False) @@ -269,11 +283,6 @@ self.gc.check_no_more_rawrefcount_state() lltype.free(r1, flavor='raw') - def test_pyobject_dies_old(self): - self.test_pyobject_dies(old=True) - def test_pyobject_survives_from_obj_old(self): - self.test_pyobject_survives_from_obj(old=True) - def test_pyobject_attached_to_prebuilt_obj(self): p1, p1ref, r1, r1addr, check_alive = ( self._rawrefcount_pair(42, create_immortal=True)) From pypy.commits at gmail.com Sat Mar 12 17:25:20 2016 From: pypy.commits at gmail.com (cfbolz) Date: Sat, 12 Mar 2016 14:25:20 -0800 (PST) Subject: [pypy-commit] pypy guard-compatible: a new decorator elidable_compatible, with a docstring how I want things to work Message-ID: <56e49750.85371c0a.e566.ffffe5ee@mx.google.com> Author: Carl Friedrich Bolz Branch: guard-compatible Changeset: r83000:1fb0c5a8a04e Date: 2016-03-12 22:38 +0100 http://bitbucket.org/pypy/pypy/changeset/1fb0c5a8a04e/ Log: a new decorator elidable_compatible, with a docstring how I want things to work diff --git a/rpython/rlib/jit.py b/rpython/rlib/jit.py --- a/rpython/rlib/jit.py +++ b/rpython/rlib/jit.py @@ -93,6 +93,8 @@ * force_virtualizable - a performance hint to force the virtualizable early (useful e.g. for python generators that are going to be read later anyway) + * promote_compatible - an internal hint used in the implementation of + elidable_compatible """ return x @@ -126,6 +128,42 @@ def promote_string(x): return hint(x, promote_string=True) +def elidable_compatible(): + """ func must be a function of at least one argument. That first argument + must be pointer-like (XXX for now?) The behaviour of @elidable_compatible + is as follows: + + it turns a call to func like this: + res = func(a1, *args) + + into something that behaves similar to this: + promote(a1) + res = func(a1, *args) + + However, the promote of a1 is not implemented with a guard_value. Instead, + it uses guard_compatible, which is less strict and causes less failures. + More precisely, executing the guard_compatible(x) will only fail if + func(x, *args) != func(a1, *args) + + In this, a1 must not be None. + + This works particularly well if func maps many different values a1 to a + single value res. If func is an injection, there is no reason to not simply + use a regular promote. + + XXX what happens if the *args are not constant? + XXX we need a better name + """ + def decorate(func): + elidable(func) + def wrapped_func(x, *args): + assert x is not None + x = hint(x, promote_compatible=True) + return func(x, *args) + return wrapped_func + return decorate + + def dont_look_inside(func): """ Make sure the JIT does not trace inside decorated function (it becomes a call instead) diff --git a/rpython/rlib/test/test_jit.py b/rpython/rlib/test/test_jit.py --- a/rpython/rlib/test/test_jit.py +++ b/rpython/rlib/test/test_jit.py @@ -5,7 +5,7 @@ from rpython.rlib.jit import (hint, we_are_jitted, JitDriver, elidable_promote, JitHintError, oopspec, isconstant, conditional_call, elidable, unroll_safe, dont_look_inside, - enter_portal_frame, leave_portal_frame) + enter_portal_frame, leave_portal_frame, elidable_compatible) from rpython.rlib.rarithmetic import r_uint from rpython.rtyper.test.tool import BaseRtypingTest from rpython.rtyper.lltypesystem import lltype @@ -143,6 +143,27 @@ res = self.interpret(f, [2]) assert res == 5 + def test_elidable_promote(self): + class A(object): + pass + a1 = A() + a1.x = 1 + a2 = A() + a2.x = 2 + @elidable_compatible() + def g(a): + return a.x + def f(x): + if x == 1: + a = a1 + else: + a = a2 + return g(a) + res = self.interpret(f, [1]) + assert res == 1 + res = self.interpret(f, [4]) + assert res == 2 + def test_elidable_promote_args(self): @elidable_promote(promote_args='0') def g(func, x): From pypy.commits at gmail.com Sat Mar 12 17:25:22 2016 From: pypy.commits at gmail.com (cfbolz) Date: Sat, 12 Mar 2016 14:25:22 -0800 (PST) Subject: [pypy-commit] pypy guard-compatible: support for promote_compatible in the codewriter Message-ID: <56e49752.918e1c0a.36d33.ffffdce2@mx.google.com> Author: Carl Friedrich Bolz Branch: guard-compatible Changeset: r83001:889f0fd17c1a Date: 2016-03-12 22:47 +0100 http://bitbucket.org/pypy/pypy/changeset/889f0fd17c1a/ Log: support for promote_compatible in the codewriter diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -591,6 +591,14 @@ if hints.get('force_no_const'): # for tests only assert getkind(op.args[0].concretetype) == 'int' return SpaceOperation('int_same_as', [op.args[0]], op.result) + if hints.get('promote_compatible') and op.args[0].concretetype is not lltype.Void: + kind = getkind(op.args[0].concretetype) + assert kind == "ref" # for now + op0 = SpaceOperation('-live-', [], None) + op1 = SpaceOperation('%s_guard_compatible' % kind, [op.args[0]], None) + # the special return value None forces op.result to be considered + # equal to op.args[0] + return [op0, op1, None] log.WARNING('ignoring hint %r at %r' % (hints, self.graph)) def _rewrite_raw_malloc(self, op, name, args): diff --git a/rpython/jit/codewriter/test/test_jtransform.py b/rpython/jit/codewriter/test/test_jtransform.py --- a/rpython/jit/codewriter/test/test_jtransform.py +++ b/rpython/jit/codewriter/test/test_jtransform.py @@ -941,6 +941,23 @@ assert block.operations[1].result is None assert block.exits[0].args == [v1] +def test_guard_compatible(): + S = lltype.GcStruct('S', ('x', lltype.Char)) # some ptr type + v1 = varoftype(lltype.Ptr(S)) + v2 = varoftype(lltype.Ptr(S)) + op = SpaceOperation('hint', + [v1, Constant({'promote_compatible': True}, lltype.Void)], + v2) + oplist = Transformer().rewrite_operation(op) + op0, op1, op2 = oplist + assert op0.opname == '-live-' + assert op0.args == [] + assert op1.opname == 'ref_guard_compatible' + assert op1.args == [v1] + assert op1.result is None + assert op2 is None + + def test_jit_merge_point_1(): class FakeJitDriverSD: index = 42 From pypy.commits at gmail.com Sat Mar 12 17:25:18 2016 From: pypy.commits at gmail.com (cfbolz) Date: Sat, 12 Mar 2016 14:25:18 -0800 (PST) Subject: [pypy-commit] pypy guard-compatible: a branch to explore implementing a guard_compatible Message-ID: <56e4974e.e213c20a.6aca7.ffffed23@mx.google.com> Author: Carl Friedrich Bolz Branch: guard-compatible Changeset: r82999:a6612658cd4d Date: 2016-03-12 22:37 +0100 http://bitbucket.org/pypy/pypy/changeset/a6612658cd4d/ Log: a branch to explore implementing a guard_compatible From pypy.commits at gmail.com Sat Mar 12 17:25:25 2016 From: pypy.commits at gmail.com (cfbolz) Date: Sat, 12 Mar 2016 14:25:25 -0800 (PST) Subject: [pypy-commit] pypy guard-compatible: in-progress: tracing support for guard_compatible, also some sketched code for Message-ID: <56e49755.d30e1c0a.3649a.ffffe39e@mx.google.com> Author: Carl Friedrich Bolz Branch: guard-compatible Changeset: r83003:818cabf99bbf Date: 2016-03-12 23:02 +0100 http://bitbucket.org/pypy/pypy/changeset/818cabf99bbf/ Log: in-progress: tracing support for guard_compatible, also some sketched code for how failures and checking of the conditions would work. needs proper integration with the backends diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -50,7 +50,7 @@ self.operations = [] for op in operations: opnum = op.getopnum() - if opnum == rop.GUARD_VALUE: + if opnum == rop.GUARD_VALUE or opnum == rop.GUARD_COMPATIBLE: # we don't care about the value 13 here, because we gonna # fish it from the extra slot on frame anyway op.getdescr().make_a_counter_per_value(op, 13) @@ -1271,6 +1271,12 @@ if self.lltrace.invalid: self.fail_guard(descr) + def execute_guard_compatible(self, descr, arg1, arg2): + if arg1 != arg2: + if descr.fake_check_against_list(self.cpu, arg1): + return + self.fail_guard(descr, extra_value=arg1) + def execute_int_add_ovf(self, _, x, y): try: z = ovfcheck(x + y) diff --git a/rpython/jit/metainterp/blackhole.py b/rpython/jit/metainterp/blackhole.py --- a/rpython/jit/metainterp/blackhole.py +++ b/rpython/jit/metainterp/blackhole.py @@ -582,6 +582,10 @@ def bhimpl_str_guard_value(a, i, d): return a + @arguments("r") + def bhimpl_ref_guard_compatible(a): + pass + @arguments("self", "i") def bhimpl_int_push(self, a): self.tmpreg_i = a diff --git a/rpython/jit/metainterp/compatible.py b/rpython/jit/metainterp/compatible.py --- a/rpython/jit/metainterp/compatible.py +++ b/rpython/jit/metainterp/compatible.py @@ -1,5 +1,51 @@ from rpython.jit.metainterp.history import newconst +def do_call(cpu, argboxes, descr): + from rpython.jit.metainterp.history import INT, REF, FLOAT, VOID + from rpython.jit.metainterp.blackhole import NULL + # XXX XXX almost copy from executor.py + rettype = descr.get_result_type() + # count the number of arguments of the different types + count_i = count_r = count_f = 0 + for i in range(1, len(argboxes)): + type = argboxes[i].type + if type == INT: count_i += 1 + elif type == REF: count_r += 1 + elif type == FLOAT: count_f += 1 + # allocate lists for each type that has at least one argument + if count_i: args_i = [0] * count_i + else: args_i = None + if count_r: args_r = [NULL] * count_r + else: args_r = None + if count_f: args_f = [longlong.ZEROF] * count_f + else: args_f = None + # fill in the lists + count_i = count_r = count_f = 0 + for i in range(1, len(argboxes)): + box = argboxes[i] + if box.type == INT: + args_i[count_i] = box.getint() + count_i += 1 + elif box.type == REF: + args_r[count_r] = box.getref_base() + count_r += 1 + elif box.type == FLOAT: + args_f[count_f] = box.getfloatstorage() + count_f += 1 + # get the function address as an integer + func = argboxes[0].getint() + # do the call using the correct function from the cpu + if rettype == INT: + return newconst(cpu.bh_call_i(func, args_i, args_r, args_f, descr)) + if rettype == REF: + return newconst(cpu.bh_call_r(func, args_i, args_r, args_f, descr)) + if rettype == FLOAT: + return newconst(cpu.bh_call_f(func, args_i, args_r, args_f, descr)) + if rettype == VOID: + # don't even need to call the void function, result will always match + return None + raise AssertionError("bad rettype") + class CompatibilityCondition(object): """ A collections of conditions that an object needs to fulfil. """ def __init__(self, ptr): @@ -8,3 +54,14 @@ def record_pure_call(self, op, res): self.pure_call_conditions.append((op, res)) + + def check_compat(self, cpu, ref): + for op, correct_res in self.pure_call_conditions: + calldescr = op.getdescr() + # change exactly the first argument + arglist = op.getarglist() + arglist[1] = newconst(ref) + res = do_call(cpu, arglist, calldescr) + if not res.same_constant(correct_res): + return False + return True diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -810,7 +810,7 @@ metainterp.box_names_memo) def make_a_counter_per_value(self, guard_value_op, index): - assert guard_value_op.getopnum() == rop.GUARD_VALUE + assert guard_value_op.getopnum() in (rop.GUARD_VALUE, rop.GUARD_COMPATIBLE) box = guard_value_op.getarg(0) if box.type == history.INT: ty = self.TY_INT @@ -929,6 +929,11 @@ resumedescr = ResumeGuardCopiedExcDescr() else: resumedescr = ResumeGuardExcDescr() + elif opnum == rop.GUARD_COMPATIBLE: + if copied_guard: + import pdb; pdb.set_trace() + else: + resumedescr = GuardCompatibleDescr() else: if copied_guard: resumedescr = ResumeGuardCopiedDescr() @@ -1078,6 +1083,50 @@ metainterp.retrace_needed(new_trace, info) return None +class GuardCompatibleDescr(ResumeGuardDescr): + """ A descr for guard_compatible. All the conditions that a value should + fulfil need to be attached to this descr by optimizeopt. """ + + def __init__(self): + # XXX for now - in the end this would be in assembler + self._checked_ptrs = [] + self._compatibility_conditions = None + + def handle_fail(self, deadframe, metainterp_sd, jitdriver_sd): + index = intmask(self.status >> self.ST_SHIFT) + typetag = intmask(self.status & self.ST_TYPE_MASK) + assert typetag == self.TY_REF # for now + refval = metainterp_sd.cpu.get_value_direct(deadframe, 'r', index) + if self.is_compatible(metainterp_sd.cpu, refval): + from rpython.jit.metainterp.blackhole import resume_in_blackhole + # next time it'll pass XXX use new cpu thingie here + self._checked_ptrs.append(history.newconst(refval)) + resume_in_blackhole(metainterp_sd, jitdriver_sd, self, deadframe) + else: + # a real failure + return ResumeGuardDescr.handle_fail(self, deadframe, metainterp_sd, jitdriver_sd) + + def fake_check_against_list(self, cpu, ref): + # XXX should be in assembler + const = history.newconst(ref) + if self._compatibility_conditions: + for i in range(len(self._checked_ptrs)): + if const.same_constant(self._checked_ptrs[i]): + return True + return False + + def is_compatible(self, cpu, ref): + const = history.newconst(ref) + if self._compatibility_conditions: + for i in range(len(self._checked_ptrs)): + if const.same_constant(self._checked_ptrs[i]): + return True + if self._compatibility_conditions.check_compat(cpu, ref): + self._checked_ptrs.append(const) + return True + return False + return True # no conditions, everything works + # ____________________________________________________________ memory_error = MemoryError() diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -1200,6 +1200,16 @@ opimpl_float_guard_value = _opimpl_guard_value @arguments("box", "orgpc") + def opimpl_ref_guard_compatible(self, box, orgpc): + if isinstance(box, Const): + return box # no promotion needed, already a Const + else: + promoted_box = executor.constant_from_op(box) + self.metainterp.generate_guard(rop.GUARD_COMPATIBLE, box, [promoted_box], + resumepc=orgpc) + # importantly, there is no replace_box here! + + @arguments("box", "orgpc") def opimpl_guard_class(self, box, orgpc): clsbox = self.cls_of_box(box) if not self.metainterp.heapcache.is_class_known(box): diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -1049,6 +1049,7 @@ 'GUARD_NOT_FORCED_2/0d/n', # same as GUARD_NOT_FORCED, but for finish() 'GUARD_NOT_INVALIDATED/0d/n', 'GUARD_FUTURE_CONDITION/0d/n', + 'GUARD_COMPATIBLE/2d/n', # is removable, may be patched by an optimization '_GUARD_LAST', # ----- end of guard operations ----- diff --git a/rpython/jit/metainterp/test/test_compatible.py b/rpython/jit/metainterp/test/test_compatible.py new file mode 100644 --- /dev/null +++ b/rpython/jit/metainterp/test/test_compatible.py @@ -0,0 +1,35 @@ +from rpython.jit.metainterp.test.support import LLJitMixin +from rpython.rlib import jit +from rpython.rtyper.lltypesystem import lltype, rffi + + +class TestCompatible(LLJitMixin): + def test_simple(self): + S = lltype.GcStruct('S', ('x', lltype.Signed)) + p1 = lltype.malloc(S) + p1.x = 5 + + p2 = lltype.malloc(S) + p2.x = 5 + + p3 = lltype.malloc(S) + p3.x = 6 + driver = jit.JitDriver(greens = [], reds = ['n', 'x']) + @jit.elidable_compatible() + def g(s): + return s.x + + def f(n, x): + while n > 0: + driver.can_enter_jit(n=n, x=x) + driver.jit_merge_point(n=n, x=x) + n -= g(x) + + def main(): + f(100, p1) + f(100, p2) + f(100, p3) + + self.meta_interp(main, []) + # XXX check number of bridges + From pypy.commits at gmail.com Sat Mar 12 17:25:27 2016 From: pypy.commits at gmail.com (cfbolz) Date: Sat, 12 Mar 2016 14:25:27 -0800 (PST) Subject: [pypy-commit] pypy guard-compatible: improve test a bit Message-ID: <56e49757.06b01c0a.ea1de.ffffe0d4@mx.google.com> Author: Carl Friedrich Bolz Branch: guard-compatible Changeset: r83004:d2ba8721ddfc Date: 2016-03-12 23:24 +0100 http://bitbucket.org/pypy/pypy/changeset/d2ba8721ddfc/ Log: improve test a bit diff --git a/rpython/jit/metainterp/optimizeopt/test/test_compatible.py b/rpython/jit/metainterp/optimizeopt/test/test_compatible.py --- a/rpython/jit/metainterp/optimizeopt/test/test_compatible.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_compatible.py @@ -39,18 +39,27 @@ def test_guard_compatible_call_pure(self): call_pure_results = { (ConstInt(123), ConstPtr(self.myptr)): ConstInt(5), + (ConstInt(124), ConstPtr(self.myptr)): ConstInt(7), } ops = """ [p1] guard_compatible(p1, ConstPtr(myptr)) [] i3 = call_pure_i(123, p1, descr=plaincalldescr) escape_n(i3) + i5 = call_pure_i(124, p1, descr=plaincalldescr) + escape_n(i5) jump(ConstPtr(myptr)) """ expected = """ [p1] guard_compatible(p1, ConstPtr(myptr)) [] escape_n(5) + escape_n(7) jump(ConstPtr(myptr)) """ self.optimize_loop(ops, expected, call_pure_results=call_pure_results) + # whitebox-test the guard_compatible descr a bit + descr = self.loop.operations[1].getdescr() + assert descr._compatibility_conditions is not None + assert descr._compatibility_conditions.known_valid.same_constant(ConstPtr(self.myptr)) + assert len(descr._compatibility_conditions.pure_call_conditions) == 2 From pypy.commits at gmail.com Sat Mar 12 17:25:23 2016 From: pypy.commits at gmail.com (cfbolz) Date: Sat, 12 Mar 2016 14:25:23 -0800 (PST) Subject: [pypy-commit] pypy guard-compatible: some optimization support for guard_compatible, including storing which Message-ID: <56e49753.45d61c0a.1ce32.ffffdd9a@mx.google.com> Author: Carl Friedrich Bolz Branch: guard-compatible Changeset: r83002:03482b008a97 Date: 2016-03-12 23:00 +0100 http://bitbucket.org/pypy/pypy/changeset/03482b008a97/ Log: some optimization support for guard_compatible, including storing which elidable functions were applied to its argument diff --git a/rpython/jit/metainterp/compatible.py b/rpython/jit/metainterp/compatible.py new file mode 100644 --- /dev/null +++ b/rpython/jit/metainterp/compatible.py @@ -0,0 +1,10 @@ +from rpython.jit.metainterp.history import newconst + +class CompatibilityCondition(object): + """ A collections of conditions that an object needs to fulfil. """ + def __init__(self, ptr): + self.known_valid = ptr + self.pure_call_conditions = [] + + def record_pure_call(self, op, res): + self.pure_call_conditions.append((op, res)) diff --git a/rpython/jit/metainterp/optimizeopt/info.py b/rpython/jit/metainterp/optimizeopt/info.py --- a/rpython/jit/metainterp/optimizeopt/info.py +++ b/rpython/jit/metainterp/optimizeopt/info.py @@ -35,7 +35,8 @@ class PtrInfo(AbstractInfo): - _attrs_ = () + _attrs_ = ('_compatibility_conditions', ) + _compatibility_conditions = None def is_nonnull(self): return False @@ -785,7 +786,7 @@ targetbox, CONST_0, offsetbox, lgt, mode) - + class FloatConstInfo(AbstractInfo): def __init__(self, const): self._const = const diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -713,6 +713,12 @@ # if op.getopnum() == rop.GUARD_VALUE: op = self._maybe_replace_guard_value(op, descr) + elif op.getopnum() == rop.GUARD_COMPATIBLE: + # XXX randomly stuff things into the descr + info = self.getptrinfo(op.getarg(0)) + assert isinstance(descr, compile.GuardCompatibleDescr) + if info is not None and info._compatibility_conditions: + descr._compatibility_conditions = info._compatibility_conditions return op def _maybe_replace_guard_value(self, op, descr): diff --git a/rpython/jit/metainterp/optimizeopt/pure.py b/rpython/jit/metainterp/optimizeopt/pure.py --- a/rpython/jit/metainterp/optimizeopt/pure.py +++ b/rpython/jit/metainterp/optimizeopt/pure.py @@ -4,6 +4,7 @@ from rpython.jit.metainterp.optimizeopt.util import make_dispatcher_method from rpython.jit.metainterp.optimizeopt.shortpreamble import PreambleOp from rpython.jit.metainterp.optimize import SpeculativeError +from rpython.jit.metainterp.compatible import CompatibilityCondition class RecentPureOps(object): @@ -130,6 +131,25 @@ return recentops def optimize_CALL_PURE_I(self, op): + # Step 0: check if first argument is subject of guard_compatible + # XXX maybe don't do this with absolutely *all* call_pure functions + # that have a guard_compatible ptr as first arg + if op.numargs() > 1: + arg1 = self.get_box_replacement(op.getarg(1)) + if arg1.type == 'r': + info = self.getptrinfo(arg1) + ccond = info._compatibility_conditions + if info and ccond: + # it's subject to guard_compatible + copied_op = op.copy() + copied_op.setarg(1, ccond.known_valid) + result = self._can_optimize_call_pure(copied_op) + if result is not None: + self.make_constant(op, result) + self.last_emitted_operation = REMOVED + ccond.record_pure_call(copied_op, result) + return + # Step 1: check if all arguments are constant for arg in op.getarglist(): self.optimizer.force_box(arg) @@ -186,6 +206,29 @@ return True return False + def optimize_GUARD_COMPATIBLE(self, op): + arg0 = self.get_box_replacement(op.getarg(0)) + if arg0.is_constant(): + # already subject of guard_value + return + assert arg0.type == 'r' + info = self.getptrinfo(arg0) + if info: + if info.is_virtual(): + raise InvalidLoop("guard_compatible of a virtual") + else: + self.make_nonnull(arg0) + info = self.getptrinfo(arg0) + if info._compatibility_conditions: + # seen a previous guard_compatible + # check that it's the same previous constant + assert info._compatibility_conditions.known_valid.same_constant(op.getarg(1)) + return + else: + info._compatibility_conditions = CompatibilityCondition( + op.getarg(1)) + self.emit_operation(op) + def optimize_GUARD_NO_EXCEPTION(self, op): if self.last_emitted_operation is REMOVED: # it was a CALL_PURE that was killed; so we also kill the diff --git a/rpython/jit/metainterp/optimizeopt/test/test_compatible.py b/rpython/jit/metainterp/optimizeopt/test/test_compatible.py new file mode 100644 --- /dev/null +++ b/rpython/jit/metainterp/optimizeopt/test/test_compatible.py @@ -0,0 +1,56 @@ +from rpython.jit.metainterp.optimizeopt.test.test_util import ( + LLtypeMixin) +from rpython.jit.metainterp.optimizeopt.test.test_optimizebasic import ( + BaseTestBasic) +from rpython.jit.metainterp.history import ConstInt, ConstPtr + +class TestCompatible(BaseTestBasic, LLtypeMixin): + + enable_opts = "intbounds:rewrite:virtualize:string:earlyforce:pure:heap" + + def test_guard_compatible_after_guard_value(self): + ops = """ + [p1] + guard_value(p1, ConstPtr(myptr)) [] + guard_compatible(p1, ConstPtr(myptr)) [] + jump(ConstPtr(myptr)) + """ + expected = """ + [p1] + guard_value(p1, ConstPtr(myptr)) [] + jump(ConstPtr(myptr)) + """ + self.optimize_loop(ops, expected) + + def test_guard_compatible_after_guard_compatible(self): + ops = """ + [p1] + guard_compatible(p1, ConstPtr(myptr)) [] + guard_compatible(p1, ConstPtr(myptr)) [] + jump(ConstPtr(myptr)) + """ + expected = """ + [p1] + guard_compatible(p1, ConstPtr(myptr)) [] + jump(ConstPtr(myptr)) + """ + self.optimize_loop(ops, expected) + + def test_guard_compatible_call_pure(self): + call_pure_results = { + (ConstInt(123), ConstPtr(self.myptr)): ConstInt(5), + } + ops = """ + [p1] + guard_compatible(p1, ConstPtr(myptr)) [] + i3 = call_pure_i(123, p1, descr=plaincalldescr) + escape_n(i3) + jump(ConstPtr(myptr)) + """ + expected = """ + [p1] + guard_compatible(p1, ConstPtr(myptr)) [] + escape_n(5) + jump(ConstPtr(myptr)) + """ + self.optimize_loop(ops, expected, call_pure_results=call_pure_results) From pypy.commits at gmail.com Sat Mar 12 17:42:34 2016 From: pypy.commits at gmail.com (mattip) Date: Sat, 12 Mar 2016 14:42:34 -0800 (PST) Subject: [pypy-commit] pypy cpyext-ext: charp2str does not accept const char * Message-ID: <56e49b5a.4d0d1c0a.5331b.ffffde6e@mx.google.com> Author: mattip Branch: cpyext-ext Changeset: r83005:85120bcdf24e Date: 2016-03-13 00:41 +0200 http://bitbucket.org/pypy/pypy/changeset/85120bcdf24e/ Log: charp2str does not accept const char * diff --git a/pypy/module/cpyext/methodobject.py b/pypy/module/cpyext/methodobject.py --- a/pypy/module/cpyext/methodobject.py +++ b/pypy/module/cpyext/methodobject.py @@ -62,7 +62,7 @@ class W_PyCFunctionObject(W_Root): def __init__(self, space, ml, w_self, w_module=None): self.ml = ml - self.name = rffi.charp2str(self.ml.c_ml_name) + self.name = rffi.charp2str(rffi.cast(rffi.CCHARP,self.ml.c_ml_name)) self.w_self = w_self self.w_module = w_module @@ -108,7 +108,7 @@ def get_doc(self, space): doc = self.ml.c_ml_doc if doc: - return space.wrap(rffi.charp2str(doc)) + return space.wrap(rffi.charp2str(rffi.cast(rffi.CCHARP,doc))) else: return space.w_None @@ -118,7 +118,7 @@ def __init__(self, space, ml, w_type): self.space = space self.ml = ml - self.name = rffi.charp2str(ml.c_ml_name) + self.name = rffi.charp2str(rffi.cast(rffi.CCHARP, ml.c_ml_name)) self.w_objclass = w_type def __repr__(self): @@ -137,7 +137,7 @@ def __init__(self, space, ml, w_type): self.space = space self.ml = ml - self.name = rffi.charp2str(ml.c_ml_name) + self.name = rffi.charp2str(rffi.cast(rffi.CCHARP, ml.c_ml_name)) self.w_objclass = w_type def __repr__(self): @@ -328,8 +328,8 @@ break if name == "__methods__": method_list_w.append( - space.wrap(rffi.charp2str(method.c_ml_name))) - elif rffi.charp2str(method.c_ml_name) == name: # XXX expensive copy + space.wrap(rffi.charp2str(rffi.cast(rffi.CCHARP, method.c_ml_name)))) + elif rffi.charp2str(rffi.cast(rffi.CCHARP, method.c_ml_name)) == name: # XXX expensive copy return space.wrap(W_PyCFunctionObject(space, method, w_obj)) if name == "__methods__": return space.newlist(method_list_w) diff --git a/pypy/module/cpyext/modsupport.py b/pypy/module/cpyext/modsupport.py --- a/pypy/module/cpyext/modsupport.py +++ b/pypy/module/cpyext/modsupport.py @@ -82,7 +82,7 @@ method = methods[i] if not method.c_ml_name: break - methodname = rffi.charp2str(method.c_ml_name) + methodname = rffi.charp2str(rffi.cast(rffi.CCHARP, method.c_ml_name)) flags = rffi.cast(lltype.Signed, method.c_ml_flags) if w_type is None: From pypy.commits at gmail.com Sun Mar 13 00:51:28 2016 From: pypy.commits at gmail.com (mattip) Date: Sat, 12 Mar 2016 21:51:28 -0800 (PST) Subject: [pypy-commit] pypy default: fix for linux32, macos Message-ID: <56e4ffe0.13821c0a.8d51a.2ab5@mx.google.com> Author: mattip Branch: Changeset: r83006:c4d20dd5b286 Date: 2016-03-13 07:45 +0200 http://bitbucket.org/pypy/pypy/changeset/c4d20dd5b286/ Log: fix for linux32, macos diff --git a/rpython/rlib/rvmprof/src/vmprof_config.h b/rpython/rlib/rvmprof/src/vmprof_config.h --- a/rpython/rlib/rvmprof/src/vmprof_config.h +++ b/rpython/rlib/rvmprof/src/vmprof_config.h @@ -9,6 +9,8 @@ #endif #elif defined(__arm__) #define PC_FROM_UCONTEXT uc_mcontext.arm_ip +#elif defined(__linux) && defined(__i386) && defined(__GNUC__) +#define PC_FROM_UCONTEXT uc_mcontext.gregs[REG_EIP] #else /* linux, gnuc */ #define PC_FROM_UCONTEXT uc_mcontext.gregs[REG_RIP] diff --git a/rpython/rlib/rvmprof/src/vmprof_getpc.h b/rpython/rlib/rvmprof/src/vmprof_getpc.h --- a/rpython/rlib/rvmprof/src/vmprof_getpc.h +++ b/rpython/rlib/rvmprof/src/vmprof_getpc.h @@ -43,9 +43,6 @@ #ifndef BASE_GETPC_H_ #define BASE_GETPC_H_ - -#include "vmprof_config.h" - // On many linux systems, we may need _GNU_SOURCE to get access to // the defined constants that define the register we want to see (eg // REG_EIP). Note this #define must come first! @@ -58,6 +55,8 @@ #define _XOPEN_SOURCE 500 #endif +#include "vmprof_config.h" + #include // for memcmp #if defined(HAVE_SYS_UCONTEXT_H) #include From pypy.commits at gmail.com Sun Mar 13 06:04:00 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 13 Mar 2016 03:04:00 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-ext: Improve the debug checks. Shows an issue with string_alloc() in cpyext Message-ID: <56e53b10.6718c20a.b2f2.7df7@mx.google.com> Author: Armin Rigo Branch: cpyext-ext Changeset: r83007:54e7251ae418 Date: 2016-03-13 11:03 +0100 http://bitbucket.org/pypy/pypy/changeset/54e7251ae418/ Log: Improve the debug checks. Shows an issue with string_alloc() in cpyext diff --git a/rpython/rlib/rawrefcount.py b/rpython/rlib/rawrefcount.py --- a/rpython/rlib/rawrefcount.py +++ b/rpython/rlib/rawrefcount.py @@ -27,12 +27,13 @@ """NOT_RPYTHON: set up rawrefcount with the GC. This is only used for tests; it should not be called at all during translation. """ - global _p_list, _o_list, _adr2pypy, _pypy2ob + global _p_list, _o_list, _adr2pypy, _pypy2ob, _ob_set global _d_list, _dealloc_trigger_callback _p_list = [] _o_list = [] _adr2pypy = [None] _pypy2ob = {} + _ob_set = set() _d_list = [] _dealloc_trigger_callback = dealloc_trigger_callback @@ -40,19 +41,23 @@ "NOT_RPYTHON: a link where the PyPy object contains some or all the data" #print 'create_link_pypy\n\t%s\n\t%s' % (p, ob) assert p not in _pypy2ob - #assert not ob.c_ob_pypy_link + assert ob._obj not in _ob_set + assert not ob.c_ob_pypy_link ob.c_ob_pypy_link = _build_pypy_link(p) _pypy2ob[p] = ob _p_list.append(ob) + _ob_set.add(ob._obj) def create_link_pyobj(p, ob): """NOT_RPYTHON: a link where the PyObject contains all the data. from_obj() will not work on this 'p'.""" #print 'create_link_pyobj\n\t%s\n\t%s' % (p, ob) assert p not in _pypy2ob - #assert not ob.c_ob_pypy_link + assert ob._obj not in _ob_set + assert not ob.c_ob_pypy_link ob.c_ob_pypy_link = _build_pypy_link(p) _o_list.append(ob) + _ob_set.add(ob._obj) def from_obj(OB_PTR_TYPE, p): "NOT_RPYTHON" From pypy.commits at gmail.com Sun Mar 13 08:44:52 2016 From: pypy.commits at gmail.com (cfbolz) Date: Sun, 13 Mar 2016 05:44:52 -0700 (PDT) Subject: [pypy-commit] pypy guard-compatible: a new CPU interface for guard_compatible, a test for it and an llgraph Message-ID: <56e560c4.12871c0a.3fab.ffff9a17@mx.google.com> Author: Carl Friedrich Bolz Branch: guard-compatible Changeset: r83008:8969fdd0d8d4 Date: 2016-03-13 13:43 +0100 http://bitbucket.org/pypy/pypy/changeset/8969fdd0d8d4/ Log: a new CPU interface for guard_compatible, a test for it and an llgraph implementation diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -470,6 +470,12 @@ assert deadframe._saved_data is not None return deadframe._saved_data + def grow_guard_compatible_switch(self, descr, ref): + if not hasattr(descr, '_guard_compatible_llgraph_lst'): + descr._guard_compatible_llgraph_lst = [] + descr._guard_compatible_llgraph_lst.append(ref) + + # ------------------------------------------------------------ def calldescrof(self, FUNC, ARGS, RESULT, effect_info): @@ -1273,8 +1279,11 @@ def execute_guard_compatible(self, descr, arg1, arg2): if arg1 != arg2: - if descr.fake_check_against_list(self.cpu, arg1): - return + if hasattr(descr, '_guard_compatible_llgraph_lst'): + lst = descr._guard_compatible_llgraph_lst + for ref in lst: + if ref == arg1: + return self.fail_guard(descr, extra_value=arg1) def execute_int_add_ovf(self, _, x, y): diff --git a/rpython/jit/backend/model.py b/rpython/jit/backend/model.py --- a/rpython/jit/backend/model.py +++ b/rpython/jit/backend/model.py @@ -158,6 +158,14 @@ """ pass + def grow_guard_compatible_switch(self, guarddescr, gcref): + """ This method is called to add another case to a guard_compatible. + guard_compatible starts like a guard_value, but can grow to check more + cases. The guard should only fail if the argument is unequal to all the + cases added so far. + """ + raise NotImplementedError + def sizeof(self, S): raise NotImplementedError diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -5,7 +5,7 @@ BasicFinalDescr, JitCellToken, TargetToken, ConstInt, ConstPtr, - ConstFloat, Const) + ConstFloat, Const, newconst) from rpython.jit.metainterp.resoperation import ResOperation, rop, InputArgInt,\ InputArgFloat, opname, InputArgRef from rpython.jit.metainterp.typesystem import deref @@ -190,6 +190,36 @@ res = self.cpu.get_int_value(deadframe, 0) assert res == 10 + def test_extend_guard_compatible(self): + t1_box, T1_box, d1 = self.alloc_instance(self.T) + t2_box, T2_box, d2 = self.alloc_instance(self.T) + t3_box, T3_box, d3 = self.alloc_instance(self.T) + faildescr1 = BasicFailDescr(1) + loop = parse(""" + [p0] + guard_compatible(p0, ConstPtr(t1), descr=faildescr1) [] + finish(p0, descr=fdescr) + """, namespace={'fdescr': BasicFinalDescr(2), + 'faildescr1': faildescr1, + 't1': t1_box._resref}) + looptoken = JitCellToken() + self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + deadframe = self.cpu.execute_token(looptoken, + t1_box._resref) + fail = self.cpu.get_latest_descr(deadframe) + assert fail.identifier == 2 + + deadframe = self.cpu.execute_token(looptoken, + t2_box._resref) + fail = self.cpu.get_latest_descr(deadframe) + assert fail.identifier == 1 + + self.cpu.grow_guard_compatible_switch(faildescr1, t2_box._resref) + deadframe = self.cpu.execute_token(looptoken, + t2_box._resref) + fail = self.cpu.get_latest_descr(deadframe) + assert fail.identifier == 2 + def test_compile_with_holes_in_fail_args(self): targettoken = TargetToken() loop = parse(""" From pypy.commits at gmail.com Sun Mar 13 08:55:46 2016 From: pypy.commits at gmail.com (cfbolz) Date: Sun, 13 Mar 2016 05:55:46 -0700 (PDT) Subject: [pypy-commit] pypy guard-compatible: use the new CPU interface Message-ID: <56e56352.4577c20a.bd04d.ffffaf80@mx.google.com> Author: Carl Friedrich Bolz Branch: guard-compatible Changeset: r83009:d086871396dd Date: 2016-03-13 13:54 +0100 http://bitbucket.org/pypy/pypy/changeset/d086871396dd/ Log: use the new CPU interface diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -1089,7 +1089,6 @@ def __init__(self): # XXX for now - in the end this would be in assembler - self._checked_ptrs = [] self._compatibility_conditions = None def handle_fail(self, deadframe, metainterp_sd, jitdriver_sd): @@ -1099,30 +1098,16 @@ refval = metainterp_sd.cpu.get_value_direct(deadframe, 'r', index) if self.is_compatible(metainterp_sd.cpu, refval): from rpython.jit.metainterp.blackhole import resume_in_blackhole - # next time it'll pass XXX use new cpu thingie here - self._checked_ptrs.append(history.newconst(refval)) resume_in_blackhole(metainterp_sd, jitdriver_sd, self, deadframe) else: # a real failure return ResumeGuardDescr.handle_fail(self, deadframe, metainterp_sd, jitdriver_sd) - def fake_check_against_list(self, cpu, ref): - # XXX should be in assembler - const = history.newconst(ref) - if self._compatibility_conditions: - for i in range(len(self._checked_ptrs)): - if const.same_constant(self._checked_ptrs[i]): - return True - return False - def is_compatible(self, cpu, ref): const = history.newconst(ref) if self._compatibility_conditions: - for i in range(len(self._checked_ptrs)): - if const.same_constant(self._checked_ptrs[i]): - return True if self._compatibility_conditions.check_compat(cpu, ref): - self._checked_ptrs.append(const) + cpu.grow_guard_compatible_switch(self, ref) return True return False return True # no conditions, everything works From pypy.commits at gmail.com Sun Mar 13 09:43:33 2016 From: pypy.commits at gmail.com (cfbolz) Date: Sun, 13 Mar 2016 06:43:33 -0700 (PDT) Subject: [pypy-commit] pypy guard-compatible: a first stab at using elidable_compatible in mapdict.py Message-ID: <56e56e85.6bb8c20a.7652f.ffffb6dd@mx.google.com> Author: Carl Friedrich Bolz Branch: guard-compatible Changeset: r83010:907d767133db Date: 2016-03-13 14:42 +0100 http://bitbucket.org/pypy/pypy/changeset/907d767133db/ Log: a first stab at using elidable_compatible in mapdict.py diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -36,11 +36,17 @@ assert isinstance(terminator, Terminator) self.terminator = terminator + @jit.elidable_compatible + def getclass_from_terminator(self): + # objects with different maps can have the same class + return self.terminator.w_cls + def read(self, obj, name, index): attr = self.find_map_attr(name, index) if attr is None: + jit.promote(self) return self.terminator._read_terminator(obj, name, index) - if ( + if ( # XXX in the guard_compatible world the following isconstant may never be true? jit.isconstant(attr.storageindex) and jit.isconstant(obj) and not attr.ever_mutated @@ -56,6 +62,8 @@ def write(self, obj, name, index, w_value): attr = self.find_map_attr(name, index) if attr is None: + # adding an attribute needs to know all attributes, thus promote + jit.promote(self) return self.terminator._write_terminator(obj, name, index, w_value) if not attr.ever_mutated: attr.ever_mutated = True @@ -65,7 +73,7 @@ def delete(self, obj, name, index): pass - @jit.elidable + @jit.elidable_compatible def find_map_attr(self, name, index): if (self.space.config.objspace.std.withmethodcache): return self._find_map_attr_cache(name, index) @@ -476,18 +484,23 @@ def _get_mapdict_map(self): return jit.promote(self.map) + + def _get_mapdict_map_no_promote(self): + return self.map + def _set_mapdict_map(self, map): self.map = map # _____________________________________________ # objspace interface def getdictvalue(self, space, attrname): - return self._get_mapdict_map().read(self, attrname, DICT) + return self._get_mapdict_map_no_promote().read(self, attrname, DICT) def setdictvalue(self, space, attrname, w_value): - return self._get_mapdict_map().write(self, attrname, DICT, w_value) + return self._get_mapdict_map_no_promote().write(self, attrname, DICT, w_value) def deldictvalue(self, space, attrname): + # deleting needs to promote, we need the whole shape new_obj = self._get_mapdict_map().delete(self, attrname, DICT) if new_obj is None: return False @@ -495,7 +508,7 @@ return True def getdict(self, space): - w_dict = self._get_mapdict_map().read(self, "dict", SPECIAL) + w_dict = self._get_mapdict_map_no_promote().read(self, "dict", SPECIAL) if w_dict is not None: assert isinstance(w_dict, W_DictMultiObject) return w_dict @@ -503,7 +516,7 @@ strategy = space.fromcache(MapDictStrategy) storage = strategy.erase(self) w_dict = W_DictObject(space, strategy, storage) - flag = self._get_mapdict_map().write(self, "dict", SPECIAL, w_dict) + flag = self._get_mapdict_map_no_promote().write(self, "dict", SPECIAL, w_dict) assert flag return w_dict @@ -519,11 +532,11 @@ # shell that continues to delegate to 'self'. if type(w_olddict.get_strategy()) is MapDictStrategy: w_olddict.get_strategy().switch_to_object_strategy(w_olddict) - flag = self._get_mapdict_map().write(self, "dict", SPECIAL, w_dict) + flag = self._get_mapdict_map_no_promote().write(self, "dict", SPECIAL, w_dict) assert flag def getclass(self, space): - return self._get_mapdict_map().terminator.w_cls + return self._get_mapdict_map_no_promote().getclass_from_terminator() def setclass(self, space, w_cls): new_obj = self._get_mapdict_map().set_terminator(self, w_cls.terminator) @@ -538,11 +551,11 @@ def getslotvalue(self, slotindex): index = SLOTS_STARTING_FROM + slotindex - return self._get_mapdict_map().read(self, "slot", index) + return self._get_mapdict_map_no_promote().read(self, "slot", index) def setslotvalue(self, slotindex, w_value): index = SLOTS_STARTING_FROM + slotindex - self._get_mapdict_map().write(self, "slot", index, w_value) + self._get_mapdict_map_no_promote().write(self, "slot", index, w_value) def delslotvalue(self, slotindex): index = SLOTS_STARTING_FROM + slotindex @@ -556,7 +569,7 @@ def getweakref(self): from pypy.module._weakref.interp__weakref import WeakrefLifeline - lifeline = self._get_mapdict_map().read(self, "weakref", SPECIAL) + lifeline = self._get_mapdict_map_no_promote().read(self, "weakref", SPECIAL) if lifeline is None: return None assert isinstance(lifeline, WeakrefLifeline) @@ -566,11 +579,11 @@ def setweakref(self, space, weakreflifeline): from pypy.module._weakref.interp__weakref import WeakrefLifeline assert isinstance(weakreflifeline, WeakrefLifeline) - self._get_mapdict_map().write(self, "weakref", SPECIAL, weakreflifeline) + self._get_mapdict_map_no_promote().write(self, "weakref", SPECIAL, weakreflifeline) setweakref._cannot_really_call_random_things_ = True def delweakref(self): - self._get_mapdict_map().write(self, "weakref", SPECIAL, None) + self._get_mapdict_map_no_promote().write(self, "weakref", SPECIAL, None) delweakref._cannot_really_call_random_things_ = True class ObjectMixin(object): From pypy.commits at gmail.com Sun Mar 13 12:51:43 2016 From: pypy.commits at gmail.com (rlamy) Date: Sun, 13 Mar 2016 09:51:43 -0700 (PDT) Subject: [pypy-commit] pypy rawrefcount-review: hg merge default Message-ID: <56e59a9f.99e61c0a.ba4f9.ffffe09b@mx.google.com> Author: Ronan Lamy Branch: rawrefcount-review Changeset: r83011:ac298f2c6197 Date: 2016-03-13 16:26 +0000 http://bitbucket.org/pypy/pypy/changeset/ac298f2c6197/ Log: hg merge default diff too long, truncating to 2000 out of 14492 lines diff --git a/.hgignore b/.hgignore --- a/.hgignore +++ b/.hgignore @@ -74,5 +74,6 @@ ^rpython/doc/_build/.*$ ^compiled ^.git/ +^.hypothesis/ ^release/ ^rpython/_cache$ diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -18,3 +18,4 @@ f3ad1e1e1d6215e20d34bb65ab85ff9188c9f559 release-2.6.1 850edf14b2c75573720f59e95767335fb1affe55 release-4.0.0 5f8302b8bf9f53056e40426f10c72151564e5b19 release-4.0.1 +246c9cf22037b11dc0e8c29ce3f291d3b8c5935a release-5.0 diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -240,6 +240,7 @@ Kristjan Valur Jonsson David Lievens Neil Blakey-Milner + Sergey Matyunin Lutz Paelike Lucio Torre Lars Wassermann @@ -271,6 +272,7 @@ Aaron Tubbs Ben Darnell Roberto De Ioris + Logan Chien Juan Francisco Cantero Hurtado Ruochen Huang Jeong YunWon diff --git a/lib-python/2.7/xml/etree/ElementTree.py b/lib-python/2.7/xml/etree/ElementTree.py --- a/lib-python/2.7/xml/etree/ElementTree.py +++ b/lib-python/2.7/xml/etree/ElementTree.py @@ -1606,7 +1606,17 @@ pubid = pubid[1:-1] if hasattr(self.target, "doctype"): self.target.doctype(name, pubid, system[1:-1]) - elif self.doctype is not self._XMLParser__doctype: + elif 1: # XXX PyPy fix, used to be + # elif self.doctype is not self._XMLParser__doctype: + # but that condition is always True on CPython, as far + # as I can tell: self._XMLParser__doctype always + # returns a fresh unbound method object. + # On PyPy, unbound and bound methods have stronger + # unicity guarantees: self._XMLParser__doctype + # can return the same unbound method object, in + # some cases making the test above incorrectly False. + # (My guess would be that the line above is a backport + # from Python 3.) # warn about deprecated call self._XMLParser__doctype(name, pubid, system[1:-1]) self.doctype(name, pubid, system[1:-1]) diff --git a/lib_pypy/ctypes_config_cache/rebuild.py b/lib_pypy/ctypes_config_cache/rebuild.py --- a/lib_pypy/ctypes_config_cache/rebuild.py +++ b/lib_pypy/ctypes_config_cache/rebuild.py @@ -9,9 +9,8 @@ _dirpath = os.path.dirname(__file__) or os.curdir -from rpython.tool.ansi_print import ansi_log -log = py.log.Producer("ctypes_config_cache") -py.log.setconsumer("ctypes_config_cache", ansi_log) +from rpython.tool.ansi_print import AnsiLogger +log = AnsiLogger("ctypes_config_cache") def rebuild_one(name): diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -123,7 +123,7 @@ # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". -#html_title = None +html_title = 'PyPy documentation' # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None diff --git a/pypy/doc/contributor.rst b/pypy/doc/contributor.rst --- a/pypy/doc/contributor.rst +++ b/pypy/doc/contributor.rst @@ -210,6 +210,7 @@ Kristjan Valur Jonsson David Lievens Neil Blakey-Milner + Sergey Matyunin Lutz Paelike Lucio Torre Lars Wassermann @@ -241,6 +242,7 @@ Aaron Tubbs Ben Darnell Roberto De Ioris + Logan Chien Juan Francisco Cantero Hurtado Ruochen Huang Jeong YunWon diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -265,7 +265,7 @@ return False def evil(y): - d = {x(): 1} + d = {X(): 1} X.__eq__ = __evil_eq__ d[y] # might trigger a call to __eq__? diff --git a/pypy/doc/how-to-release.rst b/pypy/doc/how-to-release.rst --- a/pypy/doc/how-to-release.rst +++ b/pypy/doc/how-to-release.rst @@ -76,5 +76,4 @@ * add a tag on the pypy/jitviewer repo that corresponds to pypy release * add a tag on the codespeed web site that corresponds to pypy release -* update the version number in {rpython,pypy}/doc/conf.py. * revise versioning at https://readthedocs.org/projects/pypy diff --git a/pypy/doc/project-ideas.rst b/pypy/doc/project-ideas.rst --- a/pypy/doc/project-ideas.rst +++ b/pypy/doc/project-ideas.rst @@ -167,22 +167,13 @@ * `hg` -Embedding PyPy and improving CFFI ---------------------------------- - -PyPy has some basic :doc:`embedding infrastructure `. The idea would be to improve -upon that with cffi hacks that can automatically generate embeddable .so/.dll -library - - Optimising cpyext (CPython C-API compatibility layer) ----------------------------------------------------- A lot of work has gone into PyPy's implementation of CPython's C-API over the last years to let it reach a practical level of compatibility, so that C extensions for CPython work on PyPy without major rewrites. However, -there are still many edges and corner cases where it misbehaves, and it has -not received any substantial optimisation so far. +there are still many edges and corner cases where it misbehaves. The objective of this project is to fix bugs in cpyext and to optimise several performance critical parts of it, such as the reference counting diff --git a/pypy/doc/release-5.0.0.rst b/pypy/doc/release-5.0.0.rst --- a/pypy/doc/release-5.0.0.rst +++ b/pypy/doc/release-5.0.0.rst @@ -1,96 +1,226 @@ -========== -PyPy 5.0.0 -========== +======== +PyPy 5.0 +======== -We have released PyPy 5.0.0, about three months after PyPy 4.0.0. -We encourage all users of PyPy to update to this version. There are -bug fixes and a major upgrade to our c-api layer (cpyext) +We have released PyPy 5.0, about three months after PyPy 4.0.1. +We encourage all users of PyPy to update to this version. Apart from the usual +bug fixes, there is an ongoing effort to improve the warmup time and memory +usage of JIT-related metadata. The exact effects depend vastly on the program +you're running and can range from insignificant to warmup being up to 30% +faster and memory dropping by about 30%. -You can download the PyPy 5.0.0 release here: +We also merged a major upgrade to our C-API layer (cpyext), simplifying the +interaction between c-level objects and PyPy interpreter level objects. As a +result, lxml (prerelease) with its cython compiled component +`passes all tests`_ on PyPy. The new cpyext is also much faster. + +vmprof_ has been a go-to profiler for PyPy on linux for a few releases +and we're happy to announce that thanks to the cooperation with jetbrains, +vmprof now works on Linux, OS X and Windows on both PyPy and CPython. + +You can download the PyPy 5.0 release here: http://pypy.org/download.html We would like to thank our donors for the continued support of the PyPy project. -We would also like to thank our contributors and +We would also like to thank our contributors and encourage new people to join the project. PyPy has many layers and we need help with all of them: `PyPy`_ and `RPython`_ documentation -improvements, tweaking popular `modules`_ to run on pypy, or general `help`_ -with making RPython's JIT even better. +improvements, tweaking popular `modules`_ to run on pypy, or general `help`_ +with making RPython's JIT even better. CFFI ==== While not applicable only to PyPy, `cffi`_ is arguably our most significant -contribution to the python ecosystem. PyPy 5.0.0 ships with -`cffi-1.5.2`_ which now allows embedding PyPy (or cpython) in a c program. +contribution to the python ecosystem. PyPy 5.0 ships with +`cffi-1.5.2`_ which now allows embedding PyPy (or cpython) in a C program. -.. _`PyPy`: http://doc.pypy.org +.. _`PyPy`: http://doc.pypy.org .. _`RPython`: https://rpython.readthedocs.org .. _`cffi`: https://cffi.readthedocs.org .. _`cffi-1.5.2`: http://cffi.readthedocs.org/en/latest/whatsnew.html#v1-5-2 .. _`modules`: http://doc.pypy.org/en/latest/project-ideas.html#make-more-python-modules-pypy-friendly .. _`help`: http://doc.pypy.org/en/latest/project-ideas.html .. _`numpy`: https://bitbucket.org/pypy/numpy +.. _`passes all tests`: https://bitbucket.org/pypy/compatibility/wiki/lxml +.. _vmprof: http://vmprof.readthedocs.org What is PyPy? ============= PyPy is a very compliant Python interpreter, almost a drop-in replacement for -CPython 2.7. It's fast (`pypy and cpython 2.7.x`_ performance comparison) +CPython 2.7. It's fast (`PyPy and CPython 2.7.x`_ performance comparison) due to its integrated tracing JIT compiler. We also welcome developers of other `dynamic languages`_ to see what RPython can do for them. This release supports **x86** machines on most common operating systems -(Linux 32/64, Mac OS X 64, Windows 32, OpenBSD, freebsd), +(Linux 32/64, Mac OS X 64, Windows 32, OpenBSD, FreeBSD), newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux, and the -big- and little-endian variants of **ppc64** running Linux. +big- and little-endian variants of **PPC64** running Linux. -.. _`pypy and cpython 2.7.x`: http://speed.pypy.org +.. _`PyPy and CPython 2.7.x`: http://speed.pypy.org .. _`dynamic languages`: http://pypyjs.org Other Highlights (since 4.0.1 released in November 2015) -======================================================= +========================================================= + +* New features: + + * Support embedding PyPy in a C-program via cffi and static callbacks in cffi. + This deprecates the old method of embedding PyPy + + * Refactor vmprof to work cross-operating-system, deprecate using buggy + libunwind on Linux platforms. Vmprof even works on Windows now. + + * Support more of the C-API type slots, like tp_getattro, and fix C-API + macros, functions, and structs such as _PyLong_FromByteArray(), + PyString_GET_SIZE, f_locals in PyFrameObject, Py_NAN, co_filename in + PyCodeObject + + * Use a more stable approach for allocating PyObjects in cpyext. (see + `blog post`_). Once the PyObject corresponding to a PyPy object is created, + it stays around at the same location until the death of the PyPy object. + Done with a little bit of custom GC support. It allows us to kill the + notion of "borrowing" inside cpyext, reduces 4 dictionaries down to 1, and + significantly simplifies the whole approach (which is why it is a new + feature while technically a refactoring) and allows PyPy to support the + populart lxml module (as of the *next* release) with no PyPy specific + patches needed + + * Make the default filesystem encoding ASCII, like CPython + + * Use `hypothesis`_ in test creation, which is great for randomizing tests * Bug Fixes - * + * Backport always using os.urandom for uuid4 from cpython and fix the JIT as well + (issue #2202) - * + * More completely support datetime, optimize timedelta creation - * + * Fix for issue #2185 which caused an inconsistent list of operations to be + generated by the unroller, appeared in a complicated DJango app + + * Fix an elusive issue with stacklets on shadowstack which showed up when + forgetting stacklets without resuming them + + * Fix entrypoint() which now acquires the GIL + + * Fix direct_ffi_call() so failure does not bail out before setting CALL_MAY_FORCE + + * Fix (de)pickling long values by simplifying the implementation + + * Fix RPython rthread so that objects stored as threadlocal do not force minor + GC collection and are kept alive automatically. This improves perfomance of + short-running Python callbacks and prevents resetting such object between + calls + + * Support floats as parameters to itertools.isslice() + + * Check for the existence of CODESET, ignoring it should have prevented PyPy + from working on FreeBSD + + * Fix for corner case (likely shown by Krakatau) for consecutive guards with + interdependencies + + * Fix applevel bare class method comparisons which should fix pretty printing + in IPython * Issues reported with our previous release were resolved_ after reports from users on our issue tracker at https://bitbucket.org/pypy/pypy/issues or on IRC at #pypy -* New features: - - * - - * - - * - * Numpy: - * + * Updates to numpy 1.10.2 (incompatibilities and not-implemented features + still exist) - * + * Support dtype=(('O', spec)) union while disallowing record arrays with + mixed object, non-object values + * Remove all traces of micronumpy from cpyext if --withoutmod-micronumpy option used -* Performance improvements and refactorings: + * Support indexing filtering with a boolean ndarray - * + * Support partition() as an app-level function, together with a cffi wrapper + in pypy/numpy, this now provides partial support for partition() - * +* Performance improvements: - * + * Optimize global lookups + + * Improve the memory signature of numbering instances in the JIT. This should + massively decrease the amount of memory consumed by the JIT, which is + significant for most programs. Also compress the numberings using variable- + size encoding + + * Optimize string concatenation + + * Use INT_LSHIFT instead of INT_MUL when possible + + * Improve struct.unpack by casting directly from the underlying buffer. + Unpacking floats and doubles is about 15 times faster, and integer types + about 50% faster (on 64 bit integers). This was then subsequently + improved further in optimizeopt.py. + + * Optimize two-tuple lookups in mapdict, which improves warmup of instance + variable access somewhat + + * Reduce all guards from int_floordiv_ovf if one of the arguments is constant + + * Identify permutations of attributes at instance creation, reducing the + number of bridges created + + * Greatly improve re.sub() performance + + +* Internal refactorings: + + * Refactor and improve exception analysis in the annotator + + * Remove unnecessary special handling of space.wrap(). + + * Support list-resizing setslice operations in RPython + + * Tweak the trace-too-long heuristic for multiple jit drivers + + * Refactor bookkeeping (such a cool word - three double letters) in the + annotater + + * Refactor wrappers for OS functions from rtyper to rlib and simplify them + + * Simplify backend loading instructions to only use four variants + + * Simplify GIL handling in non-jitted code + + * Refactor naming in optimizeopt + + * Change GraphAnalyzer to use a more precise way to recognize external + functions and fix null pointer handling, generally clean up external + function handling + + * Remove pure variants of ``getfield_gc_*`` operations from the JIT by + determining purity while tracing + + * Refactor databasing + + * Simplify bootstrapping in cpyext + + * Refactor rtyper debug code into python.rtyper.debug + + * Seperate structmember.h from Python.h Also enhance creating api functions + to specify which header file they appear in (previously only pypy_decl.h) + + * Fix tokenizer to enforce universal newlines, needed for Python 3 support .. _resolved: http://doc.pypy.org/en/latest/whatsnew-5.0.0.html +.. _`hypothesis`: http://hypothesis.readthedocs.org +.. _`blog post`: http://morepypy.blogspot.com/2016/02/c-api-support-update.html Please update, and continue to help us make PyPy better. diff --git a/pypy/doc/whatsnew-5.0.0.rst b/pypy/doc/whatsnew-5.0.0.rst --- a/pypy/doc/whatsnew-5.0.0.rst +++ b/pypy/doc/whatsnew-5.0.0.rst @@ -1,6 +1,6 @@ -======================== -What's new in PyPy 5.0.0 -======================== +====================== +What's new in PyPy 5.0 +====================== .. this is a revision shortly after release-4.0.1 .. startrev: 4b5c840d0da2 @@ -191,3 +191,7 @@ Fix boolean-array indexing in micronumpy +.. branch: numpy_partition +Support ndarray.partition() as an app-level function numpy.core._partition_use, +provided as a cffi wrapper to upstream's implementation in the pypy/numpy repo + diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -2,7 +2,12 @@ What's new in PyPy 5.0.+ ========================= -.. this is a revision shortly after release-5.0.0 -.. startrev: 6d13e55b962a +.. this is a revision shortly after release-5.0 +.. startrev: b238b48f9138 +.. branch: s390x-backend +The jit compiler backend implementation for the s390x architecutre. +The backend manages 64-bit values in the literal pool of the assembly instead of loading them as immediates. +It includes a simplification for the operation 'zero_array'. Start and length parameters are bytes instead of size. + diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py --- a/pypy/interpreter/function.py +++ b/pypy/interpreter/function.py @@ -13,6 +13,9 @@ from pypy.interpreter.argument import Arguments from rpython.rlib import jit +from rpython.rlib.rarithmetic import LONG_BIT +from rpython.rlib.rbigint import rbigint + funccallunrolling = unrolling_iterable(range(4)) @@ -557,6 +560,26 @@ return space.w_False return space.eq(self.w_function, w_other.w_function) + def is_w(self, space, other): + if not isinstance(other, Method): + return False + return (self.w_instance is other.w_instance and + self.w_function is other.w_function and + self.w_class is other.w_class) + + def immutable_unique_id(self, space): + from pypy.objspace.std.util import IDTAG_METHOD as tag + from pypy.objspace.std.util import IDTAG_SHIFT + if self.w_instance is not None: + id = space.bigint_w(space.id(self.w_instance)) + id = id.lshift(LONG_BIT) + else: + id = rbigint.fromint(0) + id = id.or_(space.bigint_w(space.id(self.w_function))) + id = id.lshift(LONG_BIT).or_(space.bigint_w(space.id(self.w_class))) + id = id.lshift(IDTAG_SHIFT).int_or_(tag) + return space.newlong_from_rbigint(id) + def descr_method_hash(self): space = self.space w_result = space.hash(self.w_function) diff --git a/pypy/interpreter/mixedmodule.py b/pypy/interpreter/mixedmodule.py --- a/pypy/interpreter/mixedmodule.py +++ b/pypy/interpreter/mixedmodule.py @@ -3,7 +3,7 @@ from pypy.interpreter import gateway from pypy.interpreter.error import OperationError from pypy.interpreter.baseobjspace import W_Root -import os, sys +import sys class MixedModule(Module): applevel_name = None @@ -60,7 +60,7 @@ def save_module_content_for_future_reload(self): self.w_initialdict = self.space.call_method(self.w_dict, 'items') - + @classmethod def get_applevel_name(cls): """ NOT_RPYTHON """ if cls.applevel_name is not None: @@ -68,7 +68,6 @@ else: pkgroot = cls.__module__ return pkgroot.split('.')[-1] - get_applevel_name = classmethod(get_applevel_name) def get(self, name): space = self.space @@ -103,7 +102,7 @@ # be normal Functions to get the correct binding behaviour func = w_value if (isinstance(func, Function) and - type(func) is not BuiltinFunction): + type(func) is not BuiltinFunction): try: bltin = func._builtinversion_ except AttributeError: @@ -115,7 +114,6 @@ space.setitem(self.w_dict, w_name, w_value) return w_value - def getdict(self, space): if self.lazy: for name in self.loaders: @@ -131,6 +129,7 @@ self.startup_called = False self._frozen = True + @classmethod def buildloaders(cls): """ NOT_RPYTHON """ if not hasattr(cls, 'loaders'): @@ -149,8 +148,6 @@ if '__doc__' not in loaders: loaders['__doc__'] = cls.get__doc__ - buildloaders = classmethod(buildloaders) - def extra_interpdef(self, name, spec): cls = self.__class__ pkgroot = cls.__module__ @@ -159,21 +156,21 @@ w_obj = loader(space) space.setattr(space.wrap(self), space.wrap(name), w_obj) + @classmethod def get__doc__(cls, space): return space.wrap(cls.__doc__) - get__doc__ = classmethod(get__doc__) def getinterpevalloader(pkgroot, spec): """ NOT_RPYTHON """ def ifileloader(space): - d = {'space' : space} + d = {'space':space} # EVIL HACK (but it works, and this is not RPython :-) while 1: try: value = eval(spec, d) except NameError, ex: - name = ex.args[0].split("'")[1] # super-Evil + name = ex.args[0].split("'")[1] # super-Evil if name in d: raise # propagate the NameError try: diff --git a/pypy/interpreter/test/test_app_main.py b/pypy/interpreter/test/test_app_main.py --- a/pypy/interpreter/test/test_app_main.py +++ b/pypy/interpreter/test/test_app_main.py @@ -9,6 +9,11 @@ from pypy.conftest import pypydir from lib_pypy._pypy_interact import irc_header +try: + import __pypy__ +except ImportError: + __pypy__ = None + banner = sys.version.splitlines()[0] app_main = os.path.join(os.path.realpath(os.path.dirname(__file__)), os.pardir, 'app_main.py') @@ -106,6 +111,8 @@ sys.argv[:] = saved_sys_argv sys.stdout = saved_sys_stdout sys.stderr = saved_sys_stderr + if __pypy__: + __pypy__.set_debug(True) def test_all_combinations_I_can_think_of(self): self.check([], {}, sys_argv=[''], run_stdin=True) @@ -601,9 +608,7 @@ def run_with_status_code(self, cmdline, senddata='', expect_prompt=False, expect_banner=False, python_flags='', env=None): if os.name == 'nt': - try: - import __pypy__ - except: + if __pypy__ is None: py.test.skip('app_main cannot run on non-pypy for windows') cmdline = '%s %s "%s" %s' % (sys.executable, python_flags, app_main, cmdline) diff --git a/pypy/interpreter/test/test_function.py b/pypy/interpreter/test/test_function.py --- a/pypy/interpreter/test/test_function.py +++ b/pypy/interpreter/test/test_function.py @@ -1,5 +1,4 @@ - -import unittest +import pytest from pypy.interpreter import eval from pypy.interpreter.function import Function, Method, descr_function_get from pypy.interpreter.pycode import PyCode @@ -187,6 +186,7 @@ raises( TypeError, func, 42, {'arg1': 23}) + @pytest.mark.skipif("config.option.runappdirect") def test_kwargs_nondict_mapping(self): class Mapping: def keys(self): @@ -257,6 +257,14 @@ meth = func.__get__(obj, object) assert meth() == obj + def test_none_get_interaction(self): + skip("XXX issue #2083") + assert type(None).__repr__(None) == 'None' + + def test_none_get_interaction_2(self): + f = None.__repr__ + assert f() == 'None' + def test_no_get_builtin(self): assert not hasattr(dir, '__get__') class A(object): @@ -284,6 +292,7 @@ raises(TypeError, len, s, some_unknown_keyword=s) raises(TypeError, len, s, s, some_unknown_keyword=s) + @pytest.mark.skipif("config.option.runappdirect") def test_call_error_message(self): try: len() @@ -325,6 +334,7 @@ f = lambda: 42 assert f.func_doc is None + @pytest.mark.skipif("config.option.runappdirect") def test_setstate_called_with_wrong_args(self): f = lambda: 42 # not sure what it should raise, since CPython doesn't have setstate @@ -550,6 +560,37 @@ assert A().m == X() assert X() == A().m + @pytest.mark.skipif("config.option.runappdirect") + def test_method_identity(self): + class A(object): + def m(self): + pass + def n(self): + pass + + class B(A): + pass + + class X(object): + def __eq__(self, other): + return True + + a = A() + a2 = A() + assert a.m is a.m + assert id(a.m) == id(a.m) + assert a.m is not a.n + assert id(a.m) != id(a.n) + assert a.m is not a2.m + assert id(a.m) != id(a2.m) + + assert A.m is A.m + assert id(A.m) == id(A.m) + assert A.m is not A.n + assert id(A.m) != id(A.n) + assert A.m is not B.m + assert id(A.m) != id(B.m) + class TestMethod: def setup_method(self, method): diff --git a/pypy/module/__builtin__/descriptor.py b/pypy/module/__builtin__/descriptor.py --- a/pypy/module/__builtin__/descriptor.py +++ b/pypy/module/__builtin__/descriptor.py @@ -79,6 +79,7 @@ W_Super.typedef = TypeDef( 'super', __new__ = interp2app(descr_new_super), + __thisclass__ = interp_attrproperty_w("w_starttype", W_Super), __getattribute__ = interp2app(W_Super.getattribute), __get__ = interp2app(W_Super.get), __doc__ = """super(type) -> unbound super object diff --git a/pypy/module/__builtin__/test/test_descriptor.py b/pypy/module/__builtin__/test/test_descriptor.py --- a/pypy/module/__builtin__/test/test_descriptor.py +++ b/pypy/module/__builtin__/test/test_descriptor.py @@ -214,7 +214,7 @@ c = C() assert C.goo(1) == (C, 1) assert c.goo(1) == (C, 1) - + assert c.foo(1) == (c, 1) class D(C): pass @@ -238,6 +238,17 @@ meth = classmethod(1).__get__(1) raises(TypeError, meth) + def test_super_thisclass(self): + class A(object): + pass + + assert super(A, A()).__thisclass__ is A + + class B(A): + pass + + assert super(B, B()).__thisclass__ is B + assert super(A, B()).__thisclass__ is A def test_property_docstring(self): assert property.__doc__.startswith('property') diff --git a/pypy/module/__pypy__/test/test_signal.py b/pypy/module/__pypy__/test/test_signal.py --- a/pypy/module/__pypy__/test/test_signal.py +++ b/pypy/module/__pypy__/test/test_signal.py @@ -2,7 +2,6 @@ from pypy.module.thread.test.support import GenericTestThread - class AppTestMinimal: spaceconfig = dict(usemodules=['__pypy__']) diff --git a/pypy/module/_cffi_backend/ctypeptr.py b/pypy/module/_cffi_backend/ctypeptr.py --- a/pypy/module/_cffi_backend/ctypeptr.py +++ b/pypy/module/_cffi_backend/ctypeptr.py @@ -353,10 +353,11 @@ # ____________________________________________________________ -rffi_fdopen = rffi.llexternal("fdopen", [rffi.INT, rffi.CCHARP], rffi.CCHARP, +FILEP = rffi.COpaquePtr("FILE") +rffi_fdopen = rffi.llexternal("fdopen", [rffi.INT, rffi.CCHARP], FILEP, save_err=rffi.RFFI_SAVE_ERRNO) -rffi_setbuf = rffi.llexternal("setbuf", [rffi.CCHARP, rffi.CCHARP], lltype.Void) -rffi_fclose = rffi.llexternal("fclose", [rffi.CCHARP], rffi.INT) +rffi_setbuf = rffi.llexternal("setbuf", [FILEP, rffi.CCHARP], lltype.Void) +rffi_fclose = rffi.llexternal("fclose", [FILEP], rffi.INT) class CffiFileObj(object): _immutable_ = True @@ -382,4 +383,4 @@ fileobj.cffi_fileobj = CffiFileObj(fd, fileobj.mode) except OSError, e: raise wrap_oserror(space, e) - return fileobj.cffi_fileobj.llf + return rffi.cast(rffi.CCHARP, fileobj.cffi_fileobj.llf) diff --git a/pypy/module/_collections/app_defaultdict.py b/pypy/module/_collections/app_defaultdict.py --- a/pypy/module/_collections/app_defaultdict.py +++ b/pypy/module/_collections/app_defaultdict.py @@ -12,6 +12,7 @@ class defaultdict(dict): __slots__ = ['default_factory'] + __module__ = 'collections' def __init__(self, *args, **kwds): if len(args) > 0: diff --git a/pypy/module/_collections/test/test_defaultdict.py b/pypy/module/_collections/test/test_defaultdict.py --- a/pypy/module/_collections/test/test_defaultdict.py +++ b/pypy/module/_collections/test/test_defaultdict.py @@ -14,6 +14,12 @@ d[5].append(44) assert l == [42, 43] and l2 == [44] + def test_module(self): + from _collections import defaultdict + assert repr(defaultdict) in ( + "", # on PyPy + "") # on CPython + def test_keyerror_without_factory(self): from _collections import defaultdict for d1 in [defaultdict(), defaultdict(None)]: diff --git a/pypy/module/_file/test/test_file_extra.py b/pypy/module/_file/test/test_file_extra.py --- a/pypy/module/_file/test/test_file_extra.py +++ b/pypy/module/_file/test/test_file_extra.py @@ -389,6 +389,7 @@ def test_writelines(self): import array + import sys fn = self.temptestfile with file(fn, 'w') as f: f.writelines(['abc']) @@ -406,7 +407,10 @@ exc = raises(TypeError, f.writelines, [memoryview('jkl')]) assert str(exc.value) == "writelines() argument must be a sequence of strings" out = open(fn, 'rb').readlines()[0] - assert out[0:5] == 'abcd\x00' + if sys.byteorder == 'big': + assert out[0:7] == 'abc\x00\x00\x00d' + else: + assert out[0:5] == 'abcd\x00' assert out[-3:] == 'ghi' with file(fn, 'wb') as f: diff --git a/pypy/module/_rawffi/callback.py b/pypy/module/_rawffi/callback.py --- a/pypy/module/_rawffi/callback.py +++ b/pypy/module/_rawffi/callback.py @@ -1,17 +1,23 @@ - +import sys from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef, GetSetProperty from rpython.rtyper.lltypesystem import lltype, rffi from pypy.module._rawffi.interp_rawffi import write_ptr from pypy.module._rawffi.structure import W_Structure from pypy.module._rawffi.interp_rawffi import (W_DataInstance, letter2tp, - unwrap_value, unpack_argshapes, got_libffi_error) + unwrap_value, unpack_argshapes, got_libffi_error, is_narrow_integer_type, + LL_TYPEMAP, NARROW_INTEGER_TYPES) from rpython.rlib.clibffi import USERDATA_P, CallbackFuncPtr, FUNCFLAG_CDECL from rpython.rlib.clibffi import ffi_type_void, LibFFIError from rpython.rlib import rweakref from pypy.module._rawffi.tracker import tracker from pypy.interpreter.error import OperationError from pypy.interpreter import gateway +from rpython.rlib.unroll import unrolling_iterable + +BIGENDIAN = sys.byteorder == 'big' + +unroll_narrow_integer_types = unrolling_iterable(NARROW_INTEGER_TYPES) app = gateway.applevel(''' def tbprint(tb, err): @@ -42,8 +48,17 @@ args_w[i] = space.wrap(rffi.cast(rffi.ULONG, ll_args[i])) w_res = space.call(w_callable, space.newtuple(args_w)) if callback_ptr.result is not None: # don't return void - unwrap_value(space, write_ptr, ll_res, 0, - callback_ptr.result, w_res) + ptr = ll_res + letter = callback_ptr.result + if BIGENDIAN: + # take care of narrow integers! + for int_type in unroll_narrow_integer_types: + if int_type == letter: + T = LL_TYPEMAP[int_type] + n = rffi.sizeof(lltype.Signed) - rffi.sizeof(T) + ptr = rffi.ptradd(ptr, n) + break + unwrap_value(space, write_ptr, ptr, 0, letter, w_res) except OperationError, e: tbprint(space, space.wrap(e.get_traceback()), space.wrap(e.errorstr(space))) diff --git a/pypy/module/_rawffi/interp_rawffi.py b/pypy/module/_rawffi/interp_rawffi.py --- a/pypy/module/_rawffi/interp_rawffi.py +++ b/pypy/module/_rawffi/interp_rawffi.py @@ -1,3 +1,4 @@ +import sys from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.error import OperationError, oefmt, wrap_oserror from pypy.interpreter.gateway import interp2app, unwrap_spec @@ -19,6 +20,8 @@ from pypy.module._rawffi.buffer import RawFFIBuffer from pypy.module._rawffi.tracker import tracker +BIGENDIAN = sys.byteorder == 'big' + TYPEMAP = { # XXX A mess with unsigned/signed/normal chars :-/ 'c' : ffi_type_uchar, @@ -331,10 +334,14 @@ if tracker.DO_TRACING: ll_buf = rffi.cast(lltype.Signed, self.ll_buffer) tracker.trace_allocation(ll_buf, self) + self._ll_buffer = self.ll_buffer def getbuffer(self, space): return space.wrap(rffi.cast(lltype.Unsigned, self.ll_buffer)) + def buffer_advance(self, n): + self.ll_buffer = rffi.ptradd(self.ll_buffer, n) + def byptr(self, space): from pypy.module._rawffi.array import ARRAY_OF_PTRS array = ARRAY_OF_PTRS.allocate(space, 1) @@ -342,16 +349,17 @@ return space.wrap(array) def free(self, space): - if not self.ll_buffer: + if not self._ll_buffer: raise segfault_exception(space, "freeing NULL pointer") self._free() def _free(self): if tracker.DO_TRACING: - ll_buf = rffi.cast(lltype.Signed, self.ll_buffer) + ll_buf = rffi.cast(lltype.Signed, self._ll_buffer) tracker.trace_free(ll_buf) - lltype.free(self.ll_buffer, flavor='raw') + lltype.free(self._ll_buffer, flavor='raw') self.ll_buffer = lltype.nullptr(rffi.VOIDP.TO) + self._ll_buffer = self.ll_buffer def buffer_w(self, space, flags): return RawFFIBuffer(self) @@ -432,12 +440,19 @@ space.wrap("cannot directly read value")) wrap_value._annspecialcase_ = 'specialize:arg(1)' +NARROW_INTEGER_TYPES = 'cbhiBIH?' + +def is_narrow_integer_type(letter): + return letter in NARROW_INTEGER_TYPES class W_FuncPtr(W_Root): def __init__(self, space, ptr, argshapes, resshape): self.ptr = ptr self.argshapes = argshapes self.resshape = resshape + self.narrow_integer = False + if resshape is not None: + self.narrow_integer = is_narrow_integer_type(resshape.itemcode.lower()) def getbuffer(self, space): return space.wrap(rffi.cast(lltype.Unsigned, self.ptr.funcsym)) @@ -497,6 +512,10 @@ result = self.resshape.allocate(space, 1, autofree=True) # adjust_return_size() was used here on result.ll_buffer self.ptr.call(args_ll, result.ll_buffer) + if BIGENDIAN and self.narrow_integer: + # we get a 8 byte value in big endian + n = rffi.sizeof(lltype.Signed) - result.shape.size + result.buffer_advance(n) return space.wrap(result) else: self.ptr.call(args_ll, lltype.nullptr(rffi.VOIDP.TO)) diff --git a/pypy/module/_rawffi/structure.py b/pypy/module/_rawffi/structure.py --- a/pypy/module/_rawffi/structure.py +++ b/pypy/module/_rawffi/structure.py @@ -18,6 +18,9 @@ from rpython.rlib.rarithmetic import intmask, signedtype, r_uint, \ r_ulonglong from rpython.rtyper.lltypesystem import lltype, rffi +import sys + +IS_BIG_ENDIAN = sys.byteorder == 'big' @@ -114,20 +117,32 @@ size += intmask(fieldsize) bitsizes.append(fieldsize) elif field_type == NEW_BITFIELD: - bitsizes.append((bitsize << 16) + bitoffset) + if IS_BIG_ENDIAN: + off = last_size - bitoffset - bitsize + else: + off = bitoffset + bitsizes.append((bitsize << 16) + off) bitoffset = bitsize size = round_up(size, fieldalignment) pos.append(size) size += fieldsize elif field_type == CONT_BITFIELD: - bitsizes.append((bitsize << 16) + bitoffset) + if IS_BIG_ENDIAN: + off = last_size - bitoffset - bitsize + else: + off = bitoffset + bitsizes.append((bitsize << 16) + off) bitoffset += bitsize # offset is already updated for the NEXT field pos.append(size - fieldsize) elif field_type == EXPAND_BITFIELD: size += fieldsize - last_size / 8 last_size = fieldsize * 8 - bitsizes.append((bitsize << 16) + bitoffset) + if IS_BIG_ENDIAN: + off = last_size - bitoffset - bitsize + else: + off = bitoffset + bitsizes.append((bitsize << 16) + off) bitoffset += bitsize # offset is already updated for the NEXT field pos.append(size - fieldsize) diff --git a/pypy/module/_rawffi/test/test__rawffi.py b/pypy/module/_rawffi/test/test__rawffi.py --- a/pypy/module/_rawffi/test/test__rawffi.py +++ b/pypy/module/_rawffi/test/test__rawffi.py @@ -704,7 +704,6 @@ def compare(a, b): a1 = _rawffi.Array('i').fromaddress(_rawffi.Array('P').fromaddress(a, 1)[0], 1) a2 = _rawffi.Array('i').fromaddress(_rawffi.Array('P').fromaddress(b, 1)[0], 1) - print "comparing", a1[0], "with", a2[0] if a1[0] not in [1,2,3,4] or a2[0] not in [1,2,3,4]: bogus_args.append((a1[0], a2[0])) if a1[0] > a2[0]: @@ -715,7 +714,7 @@ a2[0] = len(ll_to_sort) a3 = _rawffi.Array('l')(1) a3[0] = struct.calcsize('i') - cb = _rawffi.CallbackPtr(compare, ['P', 'P'], 'i') + cb = _rawffi.CallbackPtr(compare, ['P', 'P'], 'l') a4 = cb.byptr() qsort(a1, a2, a3, a4) res = [ll_to_sort[i] for i in range(len(ll_to_sort))] @@ -896,11 +895,21 @@ b = _rawffi.Array('c').fromaddress(a.buffer, 38) if sys.maxunicode > 65535: # UCS4 build - assert b[0] == 'x' - assert b[1] == '\x00' - assert b[2] == '\x00' - assert b[3] == '\x00' - assert b[4] == 'y' + if sys.byteorder == 'big': + assert b[0] == '\x00' + assert b[1] == '\x00' + assert b[2] == '\x00' + assert b[3] == 'x' + assert b[4] == '\x00' + assert b[5] == '\x00' + assert b[6] == '\x00' + assert b[7] == 'y' + else: + assert b[0] == 'x' + assert b[1] == '\x00' + assert b[2] == '\x00' + assert b[3] == '\x00' + assert b[4] == 'y' else: # UCS2 build assert b[0] == 'x' diff --git a/pypy/module/_rawffi/test/test_struct.py b/pypy/module/_rawffi/test/test_struct.py --- a/pypy/module/_rawffi/test/test_struct.py +++ b/pypy/module/_rawffi/test/test_struct.py @@ -1,4 +1,4 @@ - +import sys from pypy.module._rawffi.structure import size_alignment_pos from pypy.module._rawffi.interp_rawffi import TYPEMAP, letter2tp @@ -63,4 +63,7 @@ for (name, t, size) in fields]) assert size == 8 assert pos == [0, 0, 0] - assert bitsizes == [0x10000, 0x3e0001, 0x1003f] + if sys.byteorder == 'little': + assert bitsizes == [0x10000, 0x3e0001, 0x1003f] + else: + assert bitsizes == [0x1003f, 0x3e0001, 0x10000] diff --git a/pypy/module/_sre/interp_sre.py b/pypy/module/_sre/interp_sre.py --- a/pypy/module/_sre/interp_sre.py +++ b/pypy/module/_sre/interp_sre.py @@ -7,6 +7,7 @@ from pypy.interpreter.error import OperationError from rpython.rlib.rarithmetic import intmask from rpython.rlib import jit +from rpython.rlib.rstring import StringBuilder, UnicodeBuilder # ____________________________________________________________ # @@ -36,6 +37,8 @@ if 0 <= start <= end: if isinstance(ctx, rsre_core.BufMatchContext): return space.wrap(ctx._buffer.getslice(start, end, 1, end-start)) + if isinstance(ctx, rsre_core.StrMatchContext): + return space.wrap(ctx._string[start:end]) elif isinstance(ctx, rsre_core.UnicodeMatchContext): return space.wrap(ctx._unicodestr[start:end]) else: @@ -98,8 +101,8 @@ space.wrap("cannot copy this pattern object")) def make_ctx(self, w_string, pos=0, endpos=sys.maxint): - """Make a BufMatchContext or a UnicodeMatchContext for searching - in the given w_string object.""" + """Make a StrMatchContext, BufMatchContext or a UnicodeMatchContext for + searching in the given w_string object.""" space = self.space if pos < 0: pos = 0 @@ -113,6 +116,14 @@ endpos = len(unicodestr) return rsre_core.UnicodeMatchContext(self.code, unicodestr, pos, endpos, self.flags) + elif space.isinstance_w(w_string, space.w_str): + str = space.str_w(w_string) + if pos > len(str): + pos = len(str) + if endpos > len(str): + endpos = len(str) + return rsre_core.StrMatchContext(self.code, str, + pos, endpos, self.flags) else: buf = space.readbuf_w(w_string) size = buf.getlength() @@ -216,6 +227,11 @@ def subx(self, w_ptemplate, w_string, count): space = self.space + # use a (much faster) string/unicode builder if w_ptemplate and + # w_string are both string or both unicode objects, and if w_ptemplate + # is a literal + use_builder = False + filter_as_unicode = filter_as_string = None if space.is_true(space.callable(w_ptemplate)): w_filter = w_ptemplate filter_is_callable = True @@ -223,6 +239,8 @@ if space.isinstance_w(w_ptemplate, space.w_unicode): filter_as_unicode = space.unicode_w(w_ptemplate) literal = u'\\' not in filter_as_unicode + use_builder = ( + space.isinstance_w(w_string, space.w_unicode) and literal) else: try: filter_as_string = space.str_w(w_ptemplate) @@ -232,6 +250,8 @@ literal = False else: literal = '\\' not in filter_as_string + use_builder = ( + space.isinstance_w(w_string, space.w_str) and literal) if literal: w_filter = w_ptemplate filter_is_callable = False @@ -242,19 +262,44 @@ space.wrap(self), w_ptemplate) filter_is_callable = space.is_true(space.callable(w_filter)) # + # XXX this is a bit of a mess, but it improves performance a lot ctx = self.make_ctx(w_string) - sublist_w = [] + sublist_w = strbuilder = unicodebuilder = None + if use_builder: + if filter_as_unicode is not None: + unicodebuilder = UnicodeBuilder(ctx.end) + else: + assert filter_as_string is not None + strbuilder = StringBuilder(ctx.end) + else: + sublist_w = [] n = last_pos = 0 while not count or n < count: + sub_jitdriver.jit_merge_point( + self=self, + use_builder=use_builder, + filter_is_callable=filter_is_callable, + filter_type=type(w_filter), + ctx=ctx, + w_filter=w_filter, + strbuilder=strbuilder, + unicodebuilder=unicodebuilder, + filter_as_string=filter_as_string, + filter_as_unicode=filter_as_unicode, + count=count, + w_string=w_string, + n=n, last_pos=last_pos, sublist_w=sublist_w + ) + space = self.space if not searchcontext(space, ctx): break if last_pos < ctx.match_start: - sublist_w.append(slice_w(space, ctx, last_pos, - ctx.match_start, space.w_None)) + _sub_append_slice( + ctx, space, use_builder, sublist_w, + strbuilder, unicodebuilder, last_pos, ctx.match_start) start = ctx.match_end if start == ctx.match_start: start += 1 - nextctx = ctx.fresh_copy(start) if not (last_pos == ctx.match_start == ctx.match_end and n > 0): # the above ignores empty matches on latest position @@ -262,28 +307,71 @@ w_match = self.getmatch(ctx, True) w_piece = space.call_function(w_filter, w_match) if not space.is_w(w_piece, space.w_None): + assert strbuilder is None and unicodebuilder is None + assert not use_builder sublist_w.append(w_piece) else: - sublist_w.append(w_filter) + if use_builder: + if strbuilder is not None: + assert filter_as_string is not None + strbuilder.append(filter_as_string) + else: + assert unicodebuilder is not None + assert filter_as_unicode is not None + unicodebuilder.append(filter_as_unicode) + else: + sublist_w.append(w_filter) last_pos = ctx.match_end n += 1 elif last_pos >= ctx.end: break # empty match at the end: finished - ctx = nextctx + ctx.reset(start) if last_pos < ctx.end: - sublist_w.append(slice_w(space, ctx, last_pos, ctx.end, - space.w_None)) + _sub_append_slice(ctx, space, use_builder, sublist_w, + strbuilder, unicodebuilder, last_pos, ctx.end) + if use_builder: + if strbuilder is not None: + return space.wrap(strbuilder.build()), n + else: + assert unicodebuilder is not None + return space.wrap(unicodebuilder.build()), n + else: + if space.isinstance_w(w_string, space.w_unicode): + w_emptystr = space.wrap(u'') + else: + w_emptystr = space.wrap('') + w_item = space.call_method(w_emptystr, 'join', + space.newlist(sublist_w)) + return w_item, n - if space.isinstance_w(w_string, space.w_unicode): - w_emptystr = space.wrap(u'') - else: - w_emptystr = space.wrap('') - w_item = space.call_method(w_emptystr, 'join', - space.newlist(sublist_w)) - return w_item, n +sub_jitdriver = jit.JitDriver( + reds="""count n last_pos + ctx w_filter + strbuilder unicodebuilder + filter_as_string + filter_as_unicode + w_string sublist_w + self""".split(), + greens=["filter_is_callable", "use_builder", "filter_type", "ctx.pattern"]) +def _sub_append_slice(ctx, space, use_builder, sublist_w, + strbuilder, unicodebuilder, start, end): + if use_builder: + if isinstance(ctx, rsre_core.BufMatchContext): + assert strbuilder is not None + return strbuilder.append(ctx._buffer.getslice(start, end, 1, end-start)) + if isinstance(ctx, rsre_core.StrMatchContext): + assert strbuilder is not None + return strbuilder.append_slice(ctx._string, start, end) + elif isinstance(ctx, rsre_core.UnicodeMatchContext): + assert unicodebuilder is not None + return unicodebuilder.append_slice(ctx._unicodestr, start, end) + assert 0, "unreachable" + else: + sublist_w.append(slice_w(space, ctx, start, end, space.w_None)) + @unwrap_spec(flags=int, groups=int, w_groupindex=WrappedDefault(None), w_indexgroup=WrappedDefault(None)) def SRE_Pattern__new__(space, w_subtype, w_pattern, flags, w_code, @@ -482,6 +570,8 @@ ctx = self.ctx if isinstance(ctx, rsre_core.BufMatchContext): return space.wrap(ctx._buffer.as_str()) + elif isinstance(ctx, rsre_core.StrMatchContext): + return space.wrap(ctx._string) elif isinstance(ctx, rsre_core.UnicodeMatchContext): return space.wrap(ctx._unicodestr) else: diff --git a/pypy/module/_vmprof/conftest.py b/pypy/module/_vmprof/conftest.py new file mode 100644 --- /dev/null +++ b/pypy/module/_vmprof/conftest.py @@ -0,0 +1,6 @@ +import py, platform + +def pytest_collect_directory(path, parent): + if platform.machine() == 's390x': + py.test.skip("zarch tests skipped") +pytest_collect_file = pytest_collect_directory diff --git a/pypy/module/_vmprof/interp_vmprof.py b/pypy/module/_vmprof/interp_vmprof.py --- a/pypy/module/_vmprof/interp_vmprof.py +++ b/pypy/module/_vmprof/interp_vmprof.py @@ -60,10 +60,10 @@ Must be smaller than 1.0 """ w_modules = space.sys.get('modules') - if space.contains_w(w_modules, space.wrap('_continuation')): - space.warn(space.wrap("Using _continuation/greenlet/stacklet together " - "with vmprof will crash"), - space.w_RuntimeWarning) + #if space.contains_w(w_modules, space.wrap('_continuation')): + # space.warn(space.wrap("Using _continuation/greenlet/stacklet together " + # "with vmprof will crash"), + # space.w_RuntimeWarning) try: rvmprof.enable(fileno, period) except rvmprof.VMProfError, e: diff --git a/pypy/module/cppyy/src/dummy_backend.cxx b/pypy/module/cppyy/src/dummy_backend.cxx --- a/pypy/module/cppyy/src/dummy_backend.cxx +++ b/pypy/module/cppyy/src/dummy_backend.cxx @@ -390,7 +390,7 @@ ((dummy::cppyy_test_data*)self)->destroy_arrays(); } else if (idx == s_methods["cppyy_test_data::set_bool"]) { assert(self && nargs == 1); - ((dummy::cppyy_test_data*)self)->set_bool((bool)((CPPYY_G__value*)args)[0].obj.in); + ((dummy::cppyy_test_data*)self)->set_bool((bool)((CPPYY_G__value*)args)[0].obj.i); } else if (idx == s_methods["cppyy_test_data::set_char"]) { assert(self && nargs == 1); ((dummy::cppyy_test_data*)self)->set_char(((CPPYY_G__value*)args)[0].obj.ch); diff --git a/pypy/module/cpyext/test/test_arraymodule.py b/pypy/module/cpyext/test/test_arraymodule.py --- a/pypy/module/cpyext/test/test_arraymodule.py +++ b/pypy/module/cpyext/test/test_arraymodule.py @@ -51,13 +51,19 @@ assert arr.tolist() == [1, 23, 4] def test_buffer(self): + import sys module = self.import_module(name='array') arr = module.array('i', [1,2,3,4]) buf = buffer(arr) exc = raises(TypeError, "buf[1] = '1'") assert str(exc.value) == "buffer is read-only" - # XXX big-endian - assert str(buf) == ('\x01\0\0\0' - '\x02\0\0\0' - '\x03\0\0\0' - '\x04\0\0\0') + if sys.byteorder == 'big': + assert str(buf) == ('\0\0\0\x01' + '\0\0\0\x02' + '\0\0\0\x03' + '\0\0\0\x04') + else: + assert str(buf) == ('\x01\0\0\0' + '\x02\0\0\0' + '\x03\0\0\0' + '\x04\0\0\0') diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -606,7 +606,7 @@ long intval; PyObject *name; - if (!PyArg_ParseTuple(args, "i", &intval)) + if (!PyArg_ParseTuple(args, "l", &intval)) return NULL; IntLike_Type.tp_as_number = &intlike_as_number; diff --git a/pypy/module/cpyext/test/test_unicodeobject.py b/pypy/module/cpyext/test/test_unicodeobject.py --- a/pypy/module/cpyext/test/test_unicodeobject.py +++ b/pypy/module/cpyext/test/test_unicodeobject.py @@ -386,11 +386,11 @@ lltype.free(pendian, flavor='raw') test("\x61\x00\x62\x00\x63\x00\x64\x00", -1) - - test("\x61\x00\x62\x00\x63\x00\x64\x00", None) - + if sys.byteorder == 'big': + test("\x00\x61\x00\x62\x00\x63\x00\x64", None) + else: + test("\x61\x00\x62\x00\x63\x00\x64\x00", None) test("\x00\x61\x00\x62\x00\x63\x00\x64", 1) - test("\xFE\xFF\x00\x61\x00\x62\x00\x63\x00\x64", 0, 1) test("\xFF\xFE\x61\x00\x62\x00\x63\x00\x64\x00", 0, -1) @@ -423,7 +423,10 @@ test("\x61\x00\x00\x00\x62\x00\x00\x00", -1) - test("\x61\x00\x00\x00\x62\x00\x00\x00", None) + if sys.byteorder == 'big': + test("\x00\x00\x00\x61\x00\x00\x00\x62", None) + else: + test("\x61\x00\x00\x00\x62\x00\x00\x00", None) test("\x00\x00\x00\x61\x00\x00\x00\x62", 1) diff --git a/pypy/module/marshal/test/test_marshalimpl.py b/pypy/module/marshal/test/test_marshalimpl.py --- a/pypy/module/marshal/test/test_marshalimpl.py +++ b/pypy/module/marshal/test/test_marshalimpl.py @@ -64,14 +64,17 @@ import marshal, struct class FakeM: + # NOTE: marshal is platform independent, running this test must assume + # that self.seen gets values from the endianess of the marshal module. + # (which is little endian!) def __init__(self): self.seen = [] def start(self, code): self.seen.append(code) def put_int(self, value): - self.seen.append(struct.pack("i", value)) + self.seen.append(struct.pack("i4'), ('y', '>f4')]" in repr(a) + else: + assert "[('x', 'i4" + E = '<' if sys.byteorder == 'little' else '>' + b = np.dtype((xyz, [("col1", E+"i4"), ("col2", E+"i4"), ("col3", E+"i4")])) data = [(1, 2,3), (4, 5, 6)] a = np.array(data, dtype=b) x = pickle.loads(pickle.dumps(a)) @@ -423,18 +429,20 @@ assert hash(t5) != hash(t6) def test_pickle(self): + import sys import numpy as np from numpy import array, dtype from cPickle import loads, dumps a = array([1,2,3]) + E = '<' if sys.byteorder == 'little' else '>' if self.ptr_size == 8: - assert a.dtype.__reduce__() == (dtype, ('i8', 0, 1), (3, '<', None, None, None, -1, -1, 0)) + assert a.dtype.__reduce__() == (dtype, ('i8', 0, 1), (3, E, None, None, None, -1, -1, 0)) else: - assert a.dtype.__reduce__() == (dtype, ('i4', 0, 1), (3, '<', None, None, None, -1, -1, 0)) + assert a.dtype.__reduce__() == (dtype, ('i4', 0, 1), (3, E, None, None, None, -1, -1, 0)) assert loads(dumps(a.dtype)) == a.dtype assert np.dtype('bool').__reduce__() == (dtype, ('b1', 0, 1), (3, '|', None, None, None, -1, -1, 0)) assert np.dtype('|V16').__reduce__() == (dtype, ('V16', 0, 1), (3, '|', None, None, None, 16, 1, 0)) - assert np.dtype(('')+'U7' assert dtype([('', 'f8')]).str == "|V8" assert dtype(('f8', 2)).str == "|V16" @@ -968,8 +978,12 @@ def test_isnative(self): from numpy import dtype + import sys assert dtype('i4').isnative == True - assert dtype('>i8').isnative == False + if sys.byteorder == 'big': + assert dtype('i8').isnative == False def test_any_all_nonzero(self): import numpy @@ -1185,6 +1199,7 @@ def test_setstate(self): import numpy as np import sys + E = '<' if sys.byteorder == 'little' else '>' d = np.dtype('f8') d.__setstate__((3, '|', (np.dtype('float64'), (2,)), None, None, 20, 1, 0)) assert d.str == ('<' if sys.byteorder == 'little' else '>') + 'f8' @@ -1201,7 +1216,7 @@ assert d.shape == (2,) assert d.itemsize == 8 assert d.subdtype is not None - assert repr(d) == "dtype(('' + assert str(dt) == "{'names':['f0','f1'], 'formats':['%si4','u1'], 'offsets':[0,4], 'itemsize':8, 'aligned':True}" % E dt = np.dtype([('f1', 'u1'), ('f0', 'i4')], align=True) - assert str(dt) == "{'names':['f1','f0'], 'formats':['u1',' 2 ** 31 - 1: - assert (u == [1]).all() + if sys.byteorder == 'big': + assert (u == [0x0100000000000000]).all() + else: + assert (u == [1]).all() else: - assert (u == [1, 0]).all() + if sys.byteorder == 'big': + assert (u == [0x01000000, 0]).all() + else: + assert (u == [1, 0]).all() v = fromstring("abcd", dtype="|S2") assert v[0] == "ab" assert v[1] == "cd" @@ -3652,9 +3718,15 @@ k = fromstring(self.float16val, dtype='float16') assert k[0] == dtype('float16').type(5.) dt = array([5], dtype='longfloat').dtype + print(dt.itemsize) if dt.itemsize == 8: - m = fromstring('\x00\x00\x00\x00\x00\x00\x14@', - dtype='float64') + import sys + if sys.byteorder == 'big': + m = fromstring('@\x14\x00\x00\x00\x00\x00\x00', + dtype='float64') + else: + m = fromstring('\x00\x00\x00\x00\x00\x00\x14@', + dtype='float64') elif dt.itemsize == 12: m = fromstring('\x00\x00\x00\x00\x00\x00\x00\xa0\x01@\x00\x00', dtype='float96') @@ -3676,8 +3748,13 @@ def test_tostring(self): from numpy import array - assert array([1, 2, 3], 'i2').tostring() == '\x01\x00\x02\x00\x03\x00' - assert array([1, 2, 3], 'i2')[::2].tostring() == '\x01\x00\x03\x00' + import sys + if sys.byteorder == 'big': + assert array([1, 2, 3], 'i2').tostring() == '\x00\x01\x00\x02\x00\x03' + assert array([1, 2, 3], 'i2')[::2].tostring() == '\x00\x01\x00\x03' + else: + assert array([1, 2, 3], 'i2').tostring() == '\x01\x00\x02\x00\x03\x00' + assert array([1, 2, 3], 'i2')[::2].tostring() == '\x01\x00\x03\x00' assert array([1, 2, 3], 'i2')[::2].tostring() == '\x00\x01\x00\x03' assert array(0, dtype='i2').tostring() == '\x00\x00' @@ -4173,7 +4250,11 @@ v = a.view(('float32', 4)) assert v.dtype == np.dtype('float32') assert v.shape == (10, 4) - assert v[0][-1] == 2.53125 + import sys + if sys.byteorder == 'big': + assert v[0][-2] == 2.53125 + else: + assert v[0][-1] == 2.53125 exc = raises(ValueError, "a.view(('float32', 2))") assert exc.value[0] == 'new type not compatible with array.' diff --git a/pypy/module/micronumpy/test/test_scalar.py b/pypy/module/micronumpy/test/test_scalar.py --- a/pypy/module/micronumpy/test/test_scalar.py +++ b/pypy/module/micronumpy/test/test_scalar.py @@ -109,6 +109,7 @@ def test_pickle(self): from numpy import dtype, zeros + import sys try: from numpy.core.multiarray import scalar except ImportError: @@ -119,9 +120,11 @@ f = dtype('float64').type(13.37) c = dtype('complex128').type(13 + 37.j) - assert i.__reduce__() == (scalar, (dtype('int32'), '9\x05\x00\x00')) - assert f.__reduce__() == (scalar, (dtype('float64'), '=\n\xd7\xa3p\xbd*@')) - assert c.__reduce__() == (scalar, (dtype('complex128'), '\x00\x00\x00\x00\x00\x00*@\x00\x00\x00\x00\x00\x80B@')) + swap = lambda s: (''.join(reversed(s))) if sys.byteorder == 'big' else s + assert i.__reduce__() == (scalar, (dtype('int32'), swap('9\x05\x00\x00'))) + assert f.__reduce__() == (scalar, (dtype('float64'), swap('=\n\xd7\xa3p\xbd*@'))) + assert c.__reduce__() == (scalar, (dtype('complex128'), swap('\x00\x00\x00\x00\x00\x00*@') + \ + swap('\x00\x00\x00\x00\x00\x80B@'))) assert loads(dumps(i)) == i assert loads(dumps(f)) == f @@ -256,13 +259,20 @@ assert t < 7e-323 t = s.view('complex64') assert type(t) is np.complex64 - assert 0 < t.real < 1 - assert t.imag == 0 + if sys.byteorder == 'big': + assert 0 < t.imag < 1 + assert t.real == 0 + else: + assert 0 < t.real < 1 + assert t.imag == 0 exc = raises(TypeError, s.view, 'string') assert exc.value[0] == "data-type must not be 0-sized" t = s.view('S8') assert type(t) is np.string_ - assert t == '\x0c' + if sys.byteorder == 'big': + assert t == '\x00' * 7 + '\x0c' + else: + assert t == '\x0c' s = np.dtype('string').type('abc1') assert s.view('S4') == 'abc1' if '__pypy__' in sys.builtin_module_names: diff --git a/pypy/module/micronumpy/test/test_selection.py b/pypy/module/micronumpy/test/test_selection.py --- a/pypy/module/micronumpy/test/test_selection.py +++ b/pypy/module/micronumpy/test/test_selection.py @@ -327,10 +327,15 @@ # tests from numpy/core/tests/test_regression.py def test_sort_bigendian(self): from numpy import array, dtype - a = array(range(11), dtype='float64') - c = a.astype(dtype(' Author: Ronan Lamy Branch: rawrefcount-review Changeset: r83012:579b1d5d2803 Date: 2016-03-13 16:50 +0000 http://bitbucket.org/pypy/pypy/changeset/579b1d5d2803/ Log: Add externally malloced gcobjects to the hypothesis test diff --git a/rpython/memory/gc/test/test_rawrefcount.py b/rpython/memory/gc/test/test_rawrefcount.py --- a/rpython/memory/gc/test/test_rawrefcount.py +++ b/rpython/memory/gc/test/test_rawrefcount.py @@ -312,8 +312,14 @@ r1.ob_pypy_link = 0 return r1 - def new_gcobj(self, intval): - p1 = self.malloc(S) + def new_gcobj(self, intval, external=False): + saved = self.gc.nonlarge_max + try: + if external: + self.gc.nonlarge_max = 1 + p1 = self.malloc(S) + finally: + self.gc.nonlarge_max = saved p1.x = intval return p1 @@ -390,15 +396,15 @@ self.incref(r) self.rawobjs.append(r) - def add_gcobj(self): - p = self.space.new_gcobj(self.next_id) + def add_gcobj(self, external=False): + p = self.space.new_gcobj(self.next_id, external=external) self.space.stackroots.append(p) self.rootlinks.append(False) self.next_id += 1 return p - def create_gcpartner(self, raw, is_light=False, is_pyobj=False): - p = self.space.new_gcobj(self.next_id) + def create_gcpartner(self, raw, is_light=False, is_pyobj=False, external=False): + p = self.space.new_gcobj(self.next_id, external=external) self.next_id += 1 self.space.create_link(raw, p, is_light=is_light, is_pyobj=is_pyobj) @@ -427,7 +433,7 @@ Action('major_collection', ()), ] valid_st.append(sampled_from(global_actions)) - valid_st.append(builds(Action, just('add_gcobj'), tuples())) + valid_st.append(builds(Action, just('add_gcobj'), tuples(booleans()))) if self.rawobjs: valid_st.append(builds(Action, just('incref'), tuples( sampled_from(self.rawobjs)))) @@ -439,7 +445,7 @@ if candidates: st = builds(Action, just('create_gcpartner'), tuples( sampled_from(candidates), - booleans(), booleans())) + booleans(), booleans(), booleans())) valid_st.append(st) candidates = self.get_linkable_gcobjs() if candidates: From pypy.commits at gmail.com Sun Mar 13 12:58:15 2016 From: pypy.commits at gmail.com (jerith) Date: Sun, 13 Mar 2016 09:58:15 -0700 (PDT) Subject: [pypy-commit] pypy default: Hypothesis test for rlib.runicode. Message-ID: <56e59c27.99e61c0a.ba4f9.ffffe292@mx.google.com> Author: Jeremy Thurgood Branch: Changeset: r83013:bb73b1e0ef9f Date: 2016-03-13 18:57 +0200 http://bitbucket.org/pypy/pypy/changeset/bb73b1e0ef9f/ Log: Hypothesis test for rlib.runicode. diff --git a/rpython/rlib/test/test_runicode.py b/rpython/rlib/test/test_runicode.py --- a/rpython/rlib/test/test_runicode.py +++ b/rpython/rlib/test/test_runicode.py @@ -4,6 +4,8 @@ import sys, random from rpython.rlib import runicode +from hypothesis import given, settings, strategies + def test_unichr(): assert runicode.UNICHR(0xffff) == u'\uffff' @@ -172,6 +174,17 @@ "utf-32 utf-32-be utf-32-le").split(): self.checkdecode(uni, encoding) + # Same as above, but uses Hypothesis to generate non-surrogate unicode + # characters. + @settings(max_examples=10000) + @given(strategies.characters(blacklist_categories=["Cs"])) + def test_random_hypothesis(self, uni): + if sys.version >= "2.7": + self.checkdecode(uni, "utf-7") + for encoding in ("utf-8 utf-16 utf-16-be utf-16-le " + "utf-32 utf-32-be utf-32-le").split(): + self.checkdecode(uni, encoding) + def test_maxunicode(self): uni = unichr(sys.maxunicode) if sys.version >= "2.7": From pypy.commits at gmail.com Sun Mar 13 13:39:49 2016 From: pypy.commits at gmail.com (cfbolz) Date: Sun, 13 Mar 2016 10:39:49 -0700 (PDT) Subject: [pypy-commit] pypy guard-compatible: emitting guard_value after guard_compatible replaces the guard_compatible Message-ID: <56e5a5e5.a3f6c20a.c5d46.fffff4c2@mx.google.com> Author: Carl Friedrich Bolz Branch: guard-compatible Changeset: r83014:4f6cf238e74a Date: 2016-03-13 18:06 +0100 http://bitbucket.org/pypy/pypy/changeset/4f6cf238e74a/ Log: emitting guard_value after guard_compatible replaces the guard_compatible diff --git a/rpython/jit/metainterp/optimizeopt/pure.py b/rpython/jit/metainterp/optimizeopt/pure.py --- a/rpython/jit/metainterp/optimizeopt/pure.py +++ b/rpython/jit/metainterp/optimizeopt/pure.py @@ -228,6 +228,7 @@ info._compatibility_conditions = CompatibilityCondition( op.getarg(1)) self.emit_operation(op) + info.mark_last_guard(self.optimizer) def optimize_GUARD_NO_EXCEPTION(self, op): if self.last_emitted_operation is REMOVED: diff --git a/rpython/jit/metainterp/optimizeopt/rewrite.py b/rpython/jit/metainterp/optimizeopt/rewrite.py --- a/rpython/jit/metainterp/optimizeopt/rewrite.py +++ b/rpython/jit/metainterp/optimizeopt/rewrite.py @@ -416,6 +416,12 @@ r = self.optimizer.metainterp_sd.logger_ops.repr_of_resop(op) raise InvalidLoop('A GUARD_VALUE (%s) was proven to ' 'always fail' % r) + if old_guard_op.getopnum() == rop.GUARD_COMPATIBLE: + if not old_guard_op.getarg(1).same_constant(op.getarg(1)): + r1 = self.optimizer.metainterp_sd.logger_ops.repr_of_resop(op) + r2 = self.optimizer.metainterp_sd.logger_ops.repr_of_resop(old_guard) + raise InvalidLoop('a GUARD_COMPATIBLE (%s) is inconsistent ' + 'with a GUARD_VALUE (%s)' % (r1, r2)) descr = compile.ResumeGuardDescr() op = old_guard_op.copy_and_change(rop.GUARD_VALUE, args = [old_guard_op.getarg(0), op.getarg(1)], diff --git a/rpython/jit/metainterp/optimizeopt/test/test_compatible.py b/rpython/jit/metainterp/optimizeopt/test/test_compatible.py --- a/rpython/jit/metainterp/optimizeopt/test/test_compatible.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_compatible.py @@ -22,6 +22,14 @@ """ self.optimize_loop(ops, expected) + ops = """ + [p1] + guard_compatible(p1, ConstPtr(myptr)) [] + guard_value(p1, ConstPtr(myptr)) [] + jump(ConstPtr(myptr)) + """ + self.optimize_loop(ops, expected) + def test_guard_compatible_after_guard_compatible(self): ops = """ [p1] From pypy.commits at gmail.com Sun Mar 13 13:39:51 2016 From: pypy.commits at gmail.com (cfbolz) Date: Sun, 13 Mar 2016 10:39:51 -0700 (PDT) Subject: [pypy-commit] pypy guard-compatible: check that even if there are two guard_compatibles, the conditions end up on Message-ID: <56e5a5e7.c96cc20a.a5021.014d@mx.google.com> Author: Carl Friedrich Bolz Branch: guard-compatible Changeset: r83015:f0e5ad960ab7 Date: 2016-03-13 18:16 +0100 http://bitbucket.org/pypy/pypy/changeset/f0e5ad960ab7/ Log: check that even if there are two guard_compatibles, the conditions end up on only one diff --git a/rpython/jit/metainterp/optimizeopt/test/test_compatible.py b/rpython/jit/metainterp/optimizeopt/test/test_compatible.py --- a/rpython/jit/metainterp/optimizeopt/test/test_compatible.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_compatible.py @@ -49,7 +49,7 @@ (ConstInt(123), ConstPtr(self.myptr)): ConstInt(5), (ConstInt(124), ConstPtr(self.myptr)): ConstInt(7), } - ops = """ + ops1 = """ [p1] guard_compatible(p1, ConstPtr(myptr)) [] i3 = call_pure_i(123, p1, descr=plaincalldescr) @@ -58,6 +58,16 @@ escape_n(i5) jump(ConstPtr(myptr)) """ + ops2 = """ + [p1] + guard_compatible(p1, ConstPtr(myptr)) [] + i3 = call_pure_i(123, p1, descr=plaincalldescr) + escape_n(i3) + guard_compatible(p1, ConstPtr(myptr)) [] + i5 = call_pure_i(124, p1, descr=plaincalldescr) + escape_n(i5) + jump(ConstPtr(myptr)) + """ expected = """ [p1] guard_compatible(p1, ConstPtr(myptr)) [] @@ -65,9 +75,10 @@ escape_n(7) jump(ConstPtr(myptr)) """ - self.optimize_loop(ops, expected, call_pure_results=call_pure_results) - # whitebox-test the guard_compatible descr a bit - descr = self.loop.operations[1].getdescr() - assert descr._compatibility_conditions is not None - assert descr._compatibility_conditions.known_valid.same_constant(ConstPtr(self.myptr)) - assert len(descr._compatibility_conditions.pure_call_conditions) == 2 + for ops in [ops1, ops2]: + self.optimize_loop(ops, expected, call_pure_results=call_pure_results) + # whitebox-test the guard_compatible descr a bit + descr = self.loop.operations[1].getdescr() + assert descr._compatibility_conditions is not None + assert descr._compatibility_conditions.known_valid.same_constant(ConstPtr(self.myptr)) + assert len(descr._compatibility_conditions.pure_call_conditions) == 2 From pypy.commits at gmail.com Sun Mar 13 13:39:53 2016 From: pypy.commits at gmail.com (cfbolz) Date: Sun, 13 Mar 2016 10:39:53 -0700 (PDT) Subject: [pypy-commit] pypy guard-compatible: a test for the interaction of guard_compatible and guard_class Message-ID: <56e5a5e9.a3abc20a.7a71e.fffff389@mx.google.com> Author: Carl Friedrich Bolz Branch: guard-compatible Changeset: r83016:ac2efcf3d81a Date: 2016-03-13 18:31 +0100 http://bitbucket.org/pypy/pypy/changeset/ac2efcf3d81a/ Log: a test for the interaction of guard_compatible and guard_class diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -607,6 +607,8 @@ self._last_guard_op.getopnum() != rop.GUARD_NOT_FORCED): self._last_guard_op = None # + if opnum == rop.GUARD_COMPATIBLE: # XXX don't share that for now + self._last_guard_op = None if (self._last_guard_op and guard_op.getdescr() is None): self.metainterp_sd.profiler.count_ops(opnum, jitprof.Counters.OPT_GUARDS_SHARED) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_compatible.py b/rpython/jit/metainterp/optimizeopt/test/test_compatible.py --- a/rpython/jit/metainterp/optimizeopt/test/test_compatible.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_compatible.py @@ -8,7 +8,7 @@ enable_opts = "intbounds:rewrite:virtualize:string:earlyforce:pure:heap" - def test_guard_compatible_after_guard_value(self): + def test_guard_compatible_and_guard_value(self): ops = """ [p1] guard_value(p1, ConstPtr(myptr)) [] @@ -30,6 +30,22 @@ """ self.optimize_loop(ops, expected) + def test_guard_compatible_and_guard_class(self): + ops = """ + [p1] + guard_class(p1, ConstClass(node_vtable)) [] + guard_compatible(p1, ConstPtr(myptr)) [] + guard_class(p1, ConstClass(node_vtable)) [] + jump(ConstPtr(myptr)) + """ + expected = """ + [p1] + guard_class(p1, ConstClass(node_vtable)) [] + guard_compatible(p1, ConstPtr(myptr)) [] + jump(ConstPtr(myptr)) + """ + self.optimize_loop(ops, expected) + def test_guard_compatible_after_guard_compatible(self): ops = """ [p1] From pypy.commits at gmail.com Sun Mar 13 13:39:55 2016 From: pypy.commits at gmail.com (cfbolz) Date: Sun, 13 Mar 2016 10:39:55 -0700 (PDT) Subject: [pypy-commit] pypy guard-compatible: test interaction with guard_compatible if the elidable function raises (it's Message-ID: <56e5a5eb.2457c20a.5af9a.0032@mx.google.com> Author: Carl Friedrich Bolz Branch: guard-compatible Changeset: r83017:27576d755874 Date: 2016-03-13 18:39 +0100 http://bitbucket.org/pypy/pypy/changeset/27576d755874/ Log: test interaction with guard_compatible if the elidable function raises (it's never compatible then) diff --git a/rpython/jit/metainterp/compatible.py b/rpython/jit/metainterp/compatible.py --- a/rpython/jit/metainterp/compatible.py +++ b/rpython/jit/metainterp/compatible.py @@ -56,12 +56,19 @@ self.pure_call_conditions.append((op, res)) def check_compat(self, cpu, ref): + from rpython.rlib.debug import debug_print, debug_start, debug_stop for op, correct_res in self.pure_call_conditions: calldescr = op.getdescr() # change exactly the first argument arglist = op.getarglist() arglist[1] = newconst(ref) - res = do_call(cpu, arglist, calldescr) + try: + res = do_call(cpu, arglist, calldescr) + except Exception: + debug_start("jit-guard-compatible") + debug_print("call to elidable_compatible function raised") + debug_stop("jit-guard-compatible") + return False if not res.same_constant(correct_res): return False return True diff --git a/rpython/jit/metainterp/test/test_compatible.py b/rpython/jit/metainterp/test/test_compatible.py --- a/rpython/jit/metainterp/test/test_compatible.py +++ b/rpython/jit/metainterp/test/test_compatible.py @@ -33,3 +33,36 @@ self.meta_interp(main, []) # XXX check number of bridges + def test_exception(self): + S = lltype.GcStruct('S', ('x', lltype.Signed)) + p1 = lltype.malloc(S) + p1.x = 5 + + p2 = lltype.malloc(S) + p2.x = 5 + + p3 = lltype.malloc(S) + p3.x = 6 + driver = jit.JitDriver(greens = [], reds = ['n', 'x']) + @jit.elidable_compatible() + def g(s): + if s.x == 6: + raise Exception + return s.x + + def f(n, x): + while n > 0: + driver.can_enter_jit(n=n, x=x) + driver.jit_merge_point(n=n, x=x) + try: + n -= g(x) + except: + n -= 1 + + def main(): + f(100, p1) + f(100, p2) + f(100, p3) + + self.meta_interp(main, []) + # XXX check number of bridges From pypy.commits at gmail.com Sun Mar 13 15:29:28 2016 From: pypy.commits at gmail.com (rlamy) Date: Sun, 13 Mar 2016 12:29:28 -0700 (PDT) Subject: [pypy-commit] pypy py3.3-bootstrap-hack: Close branch py3.3-bootstrap-hack Message-ID: <56e5bf98.21bac20a.3733e.fffff60f@mx.google.com> Author: Ronan Lamy Branch: py3.3-bootstrap-hack Changeset: r83018:54a2d1b980a4 Date: 2016-03-13 19:29 +0000 http://bitbucket.org/pypy/pypy/changeset/54a2d1b980a4/ Log: Close branch py3.3-bootstrap-hack From pypy.commits at gmail.com Sun Mar 13 15:29:44 2016 From: pypy.commits at gmail.com (rlamy) Date: Sun, 13 Mar 2016 12:29:44 -0700 (PDT) Subject: [pypy-commit] pypy py3.3: Merged in py3.3-bootstrap-hack (pull request #415) Message-ID: <56e5bfa8.c52f1c0a.ded9c.0ebd@mx.google.com> Author: Ronan Lamy Branch: py3.3 Changeset: r83019:0113d6e6d5ca Date: 2016-03-13 19:29 +0000 http://bitbucket.org/pypy/pypy/changeset/0113d6e6d5ca/ Log: Merged in py3.3-bootstrap-hack (pull request #415) Initialise the filesystem encoding only after imports have been bootstrapped. Creating it requires importing the 'encodings' module from the stdlib, so the stdlib path needs to have been computed first. diff --git a/pypy/interpreter/app_main.py b/pypy/interpreter/app_main.py --- a/pypy/interpreter/app_main.py +++ b/pypy/interpreter/app_main.py @@ -769,6 +769,7 @@ # import os, which is used a bit everywhere in app_main, but only imported # *after* setup_bootstrap_path setup_bootstrap_path(executable) + sys.pypy_initfsencoding() try: cmdline = parse_command_line(argv) except CommandLineError as e: @@ -861,8 +862,9 @@ sys.pypy_find_executable = pypy_find_executable sys.pypy_find_stdlib = pypy_find_stdlib sys.pypy_resolvedirof = pypy_resolvedirof + sys.pypy_initfsencoding = lambda: None sys.cpython_path = sys.path[:] - + try: sys.exit(int(entry_point(sys.argv[0], sys.argv[1:]))) finally: diff --git a/pypy/module/sys/__init__.py b/pypy/module/sys/__init__.py --- a/pypy/module/sys/__init__.py +++ b/pypy/module/sys/__init__.py @@ -40,6 +40,7 @@ 'pypy_find_stdlib' : 'initpath.pypy_find_stdlib', 'pypy_find_executable' : 'initpath.pypy_find_executable', 'pypy_resolvedirof' : 'initpath.pypy_resolvedirof', + 'pypy_initfsencoding' : 'initpath.pypy_initfsencoding', '_getframe' : 'vm._getframe', '_current_frames' : 'currentframes._current_frames', @@ -97,12 +98,7 @@ def startup(self, space): if space.config.translating: - if not we_are_translated(): - # don't get the filesystemencoding at translation time - assert self.filesystemencoding is None - else: - from pypy.module.sys.interp_encoding import _getfilesystemencoding - self.filesystemencoding = _getfilesystemencoding(space) + assert self.filesystemencoding is None if not space.config.translating or we_are_translated(): from pypy.module.sys import version diff --git a/pypy/module/sys/initpath.py b/pypy/module/sys/initpath.py --- a/pypy/module/sys/initpath.py +++ b/pypy/module/sys/initpath.py @@ -12,6 +12,7 @@ from pypy.interpreter.gateway import unwrap_spec from pypy.module.sys.state import get as get_state +from pypy.module.sys.interp_encoding import _getfilesystemencoding PLATFORM = sys.platform _MACOSX = sys.platform == 'darwin' @@ -166,7 +167,9 @@ space.setitem(space.sys.w_dict, space.wrap('base_exec_prefix'), w_prefix) return space.newlist([_w_fsdecode(space, p) for p in path]) +def pypy_initfsencoding(space): + space.sys.filesystemencoding = _getfilesystemencoding(space) + def _w_fsdecode(space, b): return space.fsdecode(space.wrapbytes(b)) - From pypy.commits at gmail.com Sun Mar 13 15:56:32 2016 From: pypy.commits at gmail.com (mattip) Date: Sun, 13 Mar 2016 12:56:32 -0700 (PDT) Subject: [pypy-commit] pypy default: Merged in sergem/pypy/where_1_arg (pull request #414) Message-ID: <56e5c5f0.29cec20a.6211c.2a05@mx.google.com> Author: mattip Branch: Changeset: r83021:8542130344ea Date: 2016-03-13 21:56 +0200 http://bitbucket.org/pypy/pypy/changeset/8542130344ea/ Log: Merged in sergem/pypy/where_1_arg (pull request #414) Implemented numpy.where for 1 argument. diff --git a/pypy/module/micronumpy/arrayops.py b/pypy/module/micronumpy/arrayops.py --- a/pypy/module/micronumpy/arrayops.py +++ b/pypy/module/micronumpy/arrayops.py @@ -71,8 +71,8 @@ """ if space.is_none(w_y): if space.is_none(w_x): - raise OperationError(space.w_NotImplementedError, space.wrap( - "1-arg where unsupported right now")) + arr = convert_to_array(space, w_arr) + return arr.descr_nonzero(space) raise OperationError(space.w_ValueError, space.wrap( "Where should be called with either 1 or 3 arguments")) if space.is_none(w_x): diff --git a/pypy/module/micronumpy/test/test_arrayops.py b/pypy/module/micronumpy/test/test_arrayops.py --- a/pypy/module/micronumpy/test/test_arrayops.py +++ b/pypy/module/micronumpy/test/test_arrayops.py @@ -54,8 +54,24 @@ assert (where(False, 1, [1, 2, 3]) == [1, 2, 3]).all() assert (where([1, 2, 3], True, False) == [True, True, True]).all() - #def test_where_1_arg(self): - # xxx + def test_where_1_arg(self): + from numpy import where, array + + result = where([1,0,1]) + + assert isinstance(result, tuple) + assert len(result) == 1 + assert (result[0] == array([0, 2])).all() + + def test_where_1_arg_2d(self): + from numpy import where, array + + result = where([[1,0,1],[2,-1,-1]]) + + assert isinstance(result, tuple) + assert len(result) == 2 + assert (result[0] == array([0, 0, 1, 1, 1])).all() + assert (result[1] == array([0, 2, 0, 1, 2])).all() def test_where_invalidates(self): from numpy import where, ones, zeros, array From pypy.commits at gmail.com Sun Mar 13 15:56:37 2016 From: pypy.commits at gmail.com (Sergey Matyunin) Date: Sun, 13 Mar 2016 12:56:37 -0700 (PDT) Subject: [pypy-commit] pypy where_1_arg: Implemented where for 1 argument. Added tests. Message-ID: <56e5c5f5.c65b1c0a.734a7.2152@mx.google.com> Author: Sergey Matyunin Branch: where_1_arg Changeset: r83020:35aba6438e37 Date: 2016-03-13 19:43 +0100 http://bitbucket.org/pypy/pypy/changeset/35aba6438e37/ Log: Implemented where for 1 argument. Added tests. diff --git a/pypy/module/micronumpy/arrayops.py b/pypy/module/micronumpy/arrayops.py --- a/pypy/module/micronumpy/arrayops.py +++ b/pypy/module/micronumpy/arrayops.py @@ -71,8 +71,8 @@ """ if space.is_none(w_y): if space.is_none(w_x): - raise OperationError(space.w_NotImplementedError, space.wrap( - "1-arg where unsupported right now")) + arr = convert_to_array(space, w_arr) + return arr.descr_nonzero(space) raise OperationError(space.w_ValueError, space.wrap( "Where should be called with either 1 or 3 arguments")) if space.is_none(w_x): diff --git a/pypy/module/micronumpy/test/test_arrayops.py b/pypy/module/micronumpy/test/test_arrayops.py --- a/pypy/module/micronumpy/test/test_arrayops.py +++ b/pypy/module/micronumpy/test/test_arrayops.py @@ -54,8 +54,24 @@ assert (where(False, 1, [1, 2, 3]) == [1, 2, 3]).all() assert (where([1, 2, 3], True, False) == [True, True, True]).all() - #def test_where_1_arg(self): - # xxx + def test_where_1_arg(self): + from numpy import where, array + + result = where([1,0,1]) + + assert isinstance(result, tuple) + assert len(result) == 1 + assert (result[0] == array([0, 2])).all() + + def test_where_1_arg_2d(self): + from numpy import where, array + + result = where([[1,0,1],[2,-1,-1]]) + + assert isinstance(result, tuple) + assert len(result) == 2 + assert (result[0] == array([0, 0, 1, 1, 1])).all() + assert (result[1] == array([0, 2, 0, 1, 2])).all() def test_where_invalidates(self): from numpy import where, ones, zeros, array From pypy.commits at gmail.com Sun Mar 13 15:59:50 2016 From: pypy.commits at gmail.com (mattip) Date: Sun, 13 Mar 2016 12:59:50 -0700 (PDT) Subject: [pypy-commit] pypy default: document merged branches Message-ID: <56e5c6b6.aa09c20a.16d70.2781@mx.google.com> Author: mattip Branch: Changeset: r83022:6696ef3ab058 Date: 2016-03-13 21:59 +0200 http://bitbucket.org/pypy/pypy/changeset/6696ef3ab058/ Log: document merged branches diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -11,3 +11,10 @@ The backend manages 64-bit values in the literal pool of the assembly instead of loading them as immediates. It includes a simplification for the operation 'zero_array'. Start and length parameters are bytes instead of size. +.. branch: remove-py-log + +Replace py.log with something simpler, which should speed up logging + +.. branch: where_1_arg + +Implemented numpy.where for 1 argument (thanks sergem) From pypy.commits at gmail.com Sun Mar 13 18:45:51 2016 From: pypy.commits at gmail.com (mjacob) Date: Sun, 13 Mar 2016 15:45:51 -0700 (PDT) Subject: [pypy-commit] pypy py3k: hg merge b238b48f9138 Message-ID: <56e5ed9f.c711c30a.d2f7e.5f30@mx.google.com> Author: Manuel Jacob Branch: py3k Changeset: r83023:7fa0c2fa7ef4 Date: 2016-03-13 23:06 +0100 http://bitbucket.org/pypy/pypy/changeset/7fa0c2fa7ef4/ Log: hg merge b238b48f9138 This is the last changeset which was merged into release-5.x. diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -240,6 +240,7 @@ Kristjan Valur Jonsson David Lievens Neil Blakey-Milner + Sergey Matyunin Lutz Paelike Lucio Torre Lars Wassermann @@ -271,6 +272,7 @@ Aaron Tubbs Ben Darnell Roberto De Ioris + Logan Chien Juan Francisco Cantero Hurtado Ruochen Huang Jeong YunWon diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -123,7 +123,7 @@ # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". -#html_title = None +html_title = 'PyPy documentation' # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None diff --git a/pypy/doc/contributor.rst b/pypy/doc/contributor.rst --- a/pypy/doc/contributor.rst +++ b/pypy/doc/contributor.rst @@ -210,6 +210,7 @@ Kristjan Valur Jonsson David Lievens Neil Blakey-Milner + Sergey Matyunin Lutz Paelike Lucio Torre Lars Wassermann @@ -241,6 +242,7 @@ Aaron Tubbs Ben Darnell Roberto De Ioris + Logan Chien Juan Francisco Cantero Hurtado Ruochen Huang Jeong YunWon diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -265,7 +265,7 @@ return False def evil(y): - d = {x(): 1} + d = {X(): 1} X.__eq__ = __evil_eq__ d[y] # might trigger a call to __eq__? diff --git a/pypy/doc/release-5.0.0.rst b/pypy/doc/release-5.0.0.rst --- a/pypy/doc/release-5.0.0.rst +++ b/pypy/doc/release-5.0.0.rst @@ -1,44 +1,44 @@ -========== -PyPy 5.0.0 -========== +======== +PyPy 5.0 +======== -We have released PyPy 5.0.0, about three months after PyPy 4.0.0. +We have released PyPy 5.0, about three months after PyPy 4.0.1. We encourage all users of PyPy to update to this version. Apart from the usual bug fixes, there is an ongoing effort to improve the warmup time and memory usage of JIT-related metadata. The exact effects depend vastly on the program you're running and can range from insignificant to warmup being up to 30% -faster and memory dropping by about 30%. +faster and memory dropping by about 30%. We also merged a major upgrade to our C-API layer (cpyext), simplifying the interaction between c-level objects and PyPy interpreter level objects. As a -result, lxml with its cython compiled component `passes all tests`_ on PyPy +result, lxml (prerelease) with its cython compiled component +`passes all tests`_ on PyPy. The new cpyext is also much faster. -Users who have gotten used to vmprof_ on Linux, and those on other platforms -who have not yet tried its awesomeness, will be happy to hear that vmprof -now just works on MacOS and Windows too, in both PyPy (built-in support) and -CPython (as an installed module). +vmprof_ has been a go-to profiler for PyPy on linux for a few releases +and we're happy to announce that thanks to the cooperation with jetbrains, +vmprof now works on Linux, OS X and Windows on both PyPy and CPython. -You can download the PyPy 5.0.0 release here: +You can download the PyPy 5.0 release here: http://pypy.org/download.html We would like to thank our donors for the continued support of the PyPy project. -We would also like to thank our contributors and +We would also like to thank our contributors and encourage new people to join the project. PyPy has many layers and we need help with all of them: `PyPy`_ and `RPython`_ documentation -improvements, tweaking popular `modules`_ to run on pypy, or general `help`_ -with making RPython's JIT even better. +improvements, tweaking popular `modules`_ to run on pypy, or general `help`_ +with making RPython's JIT even better. CFFI ==== While not applicable only to PyPy, `cffi`_ is arguably our most significant -contribution to the python ecosystem. PyPy 5.0.0 ships with -`cffi-1.5.2`_ which now allows embedding PyPy (or cpython) in a c program. +contribution to the python ecosystem. PyPy 5.0 ships with +`cffi-1.5.2`_ which now allows embedding PyPy (or cpython) in a C program. -.. _`PyPy`: http://doc.pypy.org +.. _`PyPy`: http://doc.pypy.org .. _`RPython`: https://rpython.readthedocs.org .. _`cffi`: https://cffi.readthedocs.org .. _`cffi-1.5.2`: http://cffi.readthedocs.org/en/latest/whatsnew.html#v1-5-2 @@ -52,18 +52,18 @@ ============= PyPy is a very compliant Python interpreter, almost a drop-in replacement for -CPython 2.7. It's fast (`pypy and cpython 2.7.x`_ performance comparison) +CPython 2.7. It's fast (`PyPy and CPython 2.7.x`_ performance comparison) due to its integrated tracing JIT compiler. We also welcome developers of other `dynamic languages`_ to see what RPython can do for them. This release supports **x86** machines on most common operating systems -(Linux 32/64, Mac OS X 64, Windows 32, OpenBSD, freebsd), +(Linux 32/64, Mac OS X 64, Windows 32, OpenBSD, FreeBSD), newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux, and the -big- and little-endian variants of **ppc64** running Linux. +big- and little-endian variants of **PPC64** running Linux. -.. _`pypy and cpython 2.7.x`: http://speed.pypy.org +.. _`PyPy and CPython 2.7.x`: http://speed.pypy.org .. _`dynamic languages`: http://pypyjs.org Other Highlights (since 4.0.1 released in November 2015) @@ -103,7 +103,7 @@ * More completely support datetime, optimize timedelta creation - * Fix for issue 2185 which caused an inconsistent list of operations to be + * Fix for issue #2185 which caused an inconsistent list of operations to be generated by the unroller, appeared in a complicated DJango app * Fix an elusive issue with stacklets on shadowstack which showed up when @@ -128,9 +128,6 @@ * Fix for corner case (likely shown by Krakatau) for consecutive guards with interdependencies - * Fix applevel bare class method comparisons which should fix pretty printing - in IPython - * Issues reported with our previous release were resolved_ after reports from users on our issue tracker at https://bitbucket.org/pypy/pypy/issues or on IRC at #pypy @@ -150,49 +147,60 @@ * Support partition() as an app-level function, together with a cffi wrapper in pypy/numpy, this now provides partial support for partition() -* Performance improvements and refactorings: +* Performance improvements: - * Refactor and improve exception analysis in the annotator - - * Remove unnecessary special handling of space.wrap(). + * Optimize global lookups * Improve the memory signature of numbering instances in the JIT. This should massively decrease the amount of memory consumed by the JIT, which is significant for most programs. Also compress the numberings using variable- size encoding + * Optimize string concatenation + + * Use INT_LSHIFT instead of INT_MUL when possible + + * Improve struct.unpack by casting directly from the underlying buffer. + Unpacking floats and doubles is about 15 times faster, and integer types + about 50% faster (on 64 bit integers). This was then subsequently + improved further in optimizeopt.py. + + * Optimize two-tuple lookups in mapdict, which improves warmup of instance + variable access somewhat + + * Reduce all guards from int_floordiv_ovf if one of the arguments is constant + + * Identify permutations of attributes at instance creation, reducing the + number of bridges created + + * Greatly improve re.sub() performance + + +* Internal refactorings: + + * Refactor and improve exception analysis in the annotator + + * Remove unnecessary special handling of space.wrap(). + * Support list-resizing setslice operations in RPython * Tweak the trace-too-long heuristic for multiple jit drivers * Refactor bookkeeping (such a cool word - three double letters) in the annotater - + * Refactor wrappers for OS functions from rtyper to rlib and simplify them * Simplify backend loading instructions to only use four variants - * Optimize string concatination - * Simplify GIL handling in non-jitted code - * Use INT_LSHIFT instead of INT_MUL when possible - - * Improve struct.unpack by casting directly from the underlying buffer. - Unpacking floats and doubles is about 15 times faster, and integer types - about 50% faster (on 64 bit integers). This was then subsequently - improved further in optimizeopt.py. - * Refactor naming in optimizeopt * Change GraphAnalyzer to use a more precise way to recognize external functions and fix null pointer handling, generally clean up external function handling - * Optimize global lookups - - * Optimize two-tuple lookups in mapdict - * Remove pure variants of ``getfield_gc_*`` operations from the JIT by determining purity while tracing @@ -203,17 +211,10 @@ * Refactor rtyper debug code into python.rtyper.debug * Seperate structmember.h from Python.h Also enhance creating api functions - to specify which header file they appear in (previously only pypy_decl.h) - - * Reduce all guards from int_floordiv_ovf if one of the arguments is constant + to specify which header file they appear in (previously only pypy_decl.h) * Fix tokenizer to enforce universal newlines, needed for Python 3 support - * Identify permutations of attributes at instance creation, reducing the - number of bridges created - - * Greatly improve re.sub() performance - .. _resolved: http://doc.pypy.org/en/latest/whatsnew-5.0.0.html .. _`hypothesis`: http://hypothesis.readthedocs.org .. _`blog post`: http://morepypy.blogspot.com/2016/02/c-api-support-update.html diff --git a/pypy/doc/whatsnew-5.0.0.rst b/pypy/doc/whatsnew-5.0.0.rst --- a/pypy/doc/whatsnew-5.0.0.rst +++ b/pypy/doc/whatsnew-5.0.0.rst @@ -1,6 +1,6 @@ -======================== -What's new in PyPy 5.0.0 -======================== +====================== +What's new in PyPy 5.0 +====================== .. this is a revision shortly after release-4.0.1 .. startrev: 4b5c840d0da2 diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -2,6 +2,6 @@ What's new in PyPy 5.0.+ ========================= -.. this is a revision shortly after release-5.0.0 -.. startrev: 7bb6381d084c +.. this is a revision shortly after release-5.0 +.. startrev: 9c4299dc2d60 diff --git a/pypy/module/_vmprof/interp_vmprof.py b/pypy/module/_vmprof/interp_vmprof.py --- a/pypy/module/_vmprof/interp_vmprof.py +++ b/pypy/module/_vmprof/interp_vmprof.py @@ -60,10 +60,10 @@ Must be smaller than 1.0 """ w_modules = space.sys.get('modules') - if space.contains_w(w_modules, space.wrap('_continuation')): - space.warn(space.wrap("Using _continuation/greenlet/stacklet together " - "with vmprof will crash"), - space.w_RuntimeWarning) + #if space.contains_w(w_modules, space.wrap('_continuation')): + # space.warn(space.wrap("Using _continuation/greenlet/stacklet together " + # "with vmprof will crash"), + # space.w_RuntimeWarning) try: rvmprof.enable(fileno, period) except rvmprof.VMProfError, e: diff --git a/pypy/tool/release/repackage.sh b/pypy/tool/release/repackage.sh --- a/pypy/tool/release/repackage.sh +++ b/pypy/tool/release/repackage.sh @@ -1,7 +1,7 @@ # Edit these appropriately before running this script -maj=4 +maj=5 min=0 -rev=1 +rev=0 # This script will download latest builds from the buildmaster, rename the top # level directory, and repackage ready to be uploaded to bitbucket. It will also # download source, assuming a tag for the release already exists, and repackage them. diff --git a/requirements.txt b/requirements.txt --- a/requirements.txt +++ b/requirements.txt @@ -1,3 +1,3 @@ # hypothesis is used for test generation on untranslated jit tests hypothesis - +enum>=0.4.6 # is a dependency, but old pip does not pick it up diff --git a/rpython/doc/conf.py b/rpython/doc/conf.py --- a/rpython/doc/conf.py +++ b/rpython/doc/conf.py @@ -59,7 +59,7 @@ # General information about the project. project = u'RPython' -copyright = u'2015, The PyPy Project' +copyright = u'2016, The PyPy Project' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the @@ -121,7 +121,7 @@ # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". -#html_title = None +html_title = 'RPython Documentation' # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None diff --git a/rpython/jit/metainterp/blackhole.py b/rpython/jit/metainterp/blackhole.py --- a/rpython/jit/metainterp/blackhole.py +++ b/rpython/jit/metainterp/blackhole.py @@ -1148,35 +1148,45 @@ @arguments("cpu", "i", "R", "d", returns="i") def bhimpl_residual_call_r_i(cpu, func, args_r, calldescr): + workaround2200.active = True return cpu.bh_call_i(func, None, args_r, None, calldescr) @arguments("cpu", "i", "R", "d", returns="r") def bhimpl_residual_call_r_r(cpu, func, args_r, calldescr): + workaround2200.active = True return cpu.bh_call_r(func, None, args_r, None, calldescr) @arguments("cpu", "i", "R", "d") def bhimpl_residual_call_r_v(cpu, func, args_r, calldescr): + workaround2200.active = True return cpu.bh_call_v(func, None, args_r, None, calldescr) @arguments("cpu", "i", "I", "R", "d", returns="i") def bhimpl_residual_call_ir_i(cpu, func, args_i, args_r, calldescr): + workaround2200.active = True return cpu.bh_call_i(func, args_i, args_r, None, calldescr) @arguments("cpu", "i", "I", "R", "d", returns="r") def bhimpl_residual_call_ir_r(cpu, func, args_i, args_r, calldescr): + workaround2200.active = True return cpu.bh_call_r(func, args_i, args_r, None, calldescr) @arguments("cpu", "i", "I", "R", "d") def bhimpl_residual_call_ir_v(cpu, func, args_i, args_r, calldescr): + workaround2200.active = True return cpu.bh_call_v(func, args_i, args_r, None, calldescr) @arguments("cpu", "i", "I", "R", "F", "d", returns="i") def bhimpl_residual_call_irf_i(cpu, func, args_i,args_r,args_f,calldescr): + workaround2200.active = True return cpu.bh_call_i(func, args_i, args_r, args_f, calldescr) @arguments("cpu", "i", "I", "R", "F", "d", returns="r") def bhimpl_residual_call_irf_r(cpu, func, args_i,args_r,args_f,calldescr): + workaround2200.active = True return cpu.bh_call_r(func, args_i, args_r, args_f, calldescr) @arguments("cpu", "i", "I", "R", "F", "d", returns="f") def bhimpl_residual_call_irf_f(cpu, func, args_i,args_r,args_f,calldescr): + workaround2200.active = True return cpu.bh_call_f(func, args_i, args_r, args_f, calldescr) @arguments("cpu", "i", "I", "R", "F", "d") def bhimpl_residual_call_irf_v(cpu, func, args_i,args_r,args_f,calldescr): + workaround2200.active = True return cpu.bh_call_v(func, args_i, args_r, args_f, calldescr) # conditional calls - note that they cannot return stuff @@ -1204,44 +1214,54 @@ @arguments("cpu", "j", "R", returns="i") def bhimpl_inline_call_r_i(cpu, jitcode, args_r): + workaround2200.active = True return cpu.bh_call_i(jitcode.get_fnaddr_as_int(), None, args_r, None, jitcode.calldescr) @arguments("cpu", "j", "R", returns="r") def bhimpl_inline_call_r_r(cpu, jitcode, args_r): + workaround2200.active = True return cpu.bh_call_r(jitcode.get_fnaddr_as_int(), None, args_r, None, jitcode.calldescr) @arguments("cpu", "j", "R") def bhimpl_inline_call_r_v(cpu, jitcode, args_r): + workaround2200.active = True return cpu.bh_call_v(jitcode.get_fnaddr_as_int(), None, args_r, None, jitcode.calldescr) @arguments("cpu", "j", "I", "R", returns="i") def bhimpl_inline_call_ir_i(cpu, jitcode, args_i, args_r): + workaround2200.active = True return cpu.bh_call_i(jitcode.get_fnaddr_as_int(), args_i, args_r, None, jitcode.calldescr) @arguments("cpu", "j", "I", "R", returns="r") def bhimpl_inline_call_ir_r(cpu, jitcode, args_i, args_r): + workaround2200.active = True return cpu.bh_call_r(jitcode.get_fnaddr_as_int(), args_i, args_r, None, jitcode.calldescr) @arguments("cpu", "j", "I", "R") def bhimpl_inline_call_ir_v(cpu, jitcode, args_i, args_r): + workaround2200.active = True return cpu.bh_call_v(jitcode.get_fnaddr_as_int(), args_i, args_r, None, jitcode.calldescr) @arguments("cpu", "j", "I", "R", "F", returns="i") def bhimpl_inline_call_irf_i(cpu, jitcode, args_i, args_r, args_f): + workaround2200.active = True return cpu.bh_call_i(jitcode.get_fnaddr_as_int(), args_i, args_r, args_f, jitcode.calldescr) @arguments("cpu", "j", "I", "R", "F", returns="r") def bhimpl_inline_call_irf_r(cpu, jitcode, args_i, args_r, args_f): + workaround2200.active = True return cpu.bh_call_r(jitcode.get_fnaddr_as_int(), args_i, args_r, args_f, jitcode.calldescr) @arguments("cpu", "j", "I", "R", "F", returns="f") def bhimpl_inline_call_irf_f(cpu, jitcode, args_i, args_r, args_f): + workaround2200.active = True return cpu.bh_call_f(jitcode.get_fnaddr_as_int(), args_i, args_r, args_f, jitcode.calldescr) @arguments("cpu", "j", "I", "R", "F") def bhimpl_inline_call_irf_v(cpu, jitcode, args_i, args_r, args_f): + workaround2200.active = True return cpu.bh_call_v(jitcode.get_fnaddr_as_int(), args_i, args_r, args_f, jitcode.calldescr) @@ -1528,6 +1548,8 @@ if not self.nextblackholeinterp: self._exit_frame_with_exception(current_exc) return current_exc + finally: + workaround2200.active = False # # pass the frame's return value to the caller caller = self.nextblackholeinterp @@ -1701,3 +1723,10 @@ # _run_forever(firstbh, current_exc) convert_and_run_from_pyjitpl._dont_inline_ = True + +# ____________________________________________________________ + +class WorkaroundIssue2200(object): + pass +workaround2200 = WorkaroundIssue2200() +workaround2200.active = False diff --git a/rpython/jit/metainterp/test/test_ajit.py b/rpython/jit/metainterp/test/test_ajit.py --- a/rpython/jit/metainterp/test/test_ajit.py +++ b/rpython/jit/metainterp/test/test_ajit.py @@ -4377,3 +4377,30 @@ assert res == -1 else: assert res == 4294967295 + + def test_issue2200_recursion(self): + # Reproduces issue #2200. This test contains no recursion, + # but due to an unlikely combination of factors it ends up + # creating an RPython-level recursion, one per loop iteration. + # The recursion is: blackhole interp from the failing guard -> + # does the call to enter() as a normal call -> enter() runs + # can_enter_jit() as if we're interpreted -> we enter the JIT + # again from the start of the loop -> the guard fails again + # during the next iteration -> blackhole interp. All arrows + # in the previous sentence are one or more levels of RPython + # function calls. + driver = JitDriver(greens=[], reds=["i"]) + def enter(i): + driver.can_enter_jit(i=i) + def f(): + set_param(None, 'trace_eagerness', 999999) + i = 0 + while True: + driver.jit_merge_point(i=i) + i += 1 + if i >= 300: + return i + promote(i + 1) # a failing guard + enter(i) + + self.meta_interp(f, []) diff --git a/rpython/jit/metainterp/warmstate.py b/rpython/jit/metainterp/warmstate.py --- a/rpython/jit/metainterp/warmstate.py +++ b/rpython/jit/metainterp/warmstate.py @@ -433,6 +433,14 @@ bound_reached(hash, None, *args) return + # Workaround for issue #2200, maybe temporary. This is not + # a proper fix, but only a hack that should work well enough + # for PyPy's main jitdriver... See test_issue2200_recursion + from rpython.jit.metainterp.blackhole import workaround2200 + if workaround2200.active: + workaround2200.active = False + return + # Here, we have found 'cell'. # if cell.flags & (JC_TRACING | JC_TEMPORARY): diff --git a/rpython/jit/tool/traceviewer.py b/rpython/jit/tool/traceviewer.py --- a/rpython/jit/tool/traceviewer.py +++ b/rpython/jit/tool/traceviewer.py @@ -103,9 +103,9 @@ self.last_guard = -1 else: # guards can be out of order nowadays - groups = sorted(groups) - self.first_guard = guard_number(groups[0]) - self.last_guard = guard_number(groups[-1]) + groups = sorted(map(guard_number, groups)) + self.first_guard = groups[0] + self.last_guard = groups[-1] content = property(get_content, set_content) diff --git a/rpython/rlib/rvmprof/src/vmprof_getpc.h b/rpython/rlib/rvmprof/src/vmprof_getpc.h --- a/rpython/rlib/rvmprof/src/vmprof_getpc.h +++ b/rpython/rlib/rvmprof/src/vmprof_getpc.h @@ -54,6 +54,7 @@ // It will cause problems for FreeBSD though!, because it turns off // the needed __BSD_VISIBLE. #ifdef __APPLE__ +#include #define _XOPEN_SOURCE 500 #endif @@ -144,7 +145,11 @@ #else intptr_t GetPC(ucontext_t *signal_ucontext) { #ifdef __APPLE__ +#if ((ULONG_MAX) == (UINT_MAX)) + return (signal_ucontext->uc_mcontext->__ss.__eip); +#else return (signal_ucontext->uc_mcontext->__ss.__rip); +#endif #else return signal_ucontext->PC_FROM_UCONTEXT; // defined in config.h #endif diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -547,6 +547,11 @@ relpypath = localpath.relto(pypkgpath.dirname) assert relpypath, ("%r should be relative to %r" % (localpath, pypkgpath.dirname)) + if len(relpypath.split(os.path.sep)) > 2: + # pypy detail to agregate the c files by directory, + # since the enormous number of files was causing + # memory issues linking on win32 + return os.path.split(relpypath)[0] + '.c' return relpypath.replace('.py', '.c') return None if hasattr(node.obj, 'graph'): diff --git a/rpython/translator/c/src/entrypoint.c b/rpython/translator/c/src/entrypoint.c --- a/rpython/translator/c/src/entrypoint.c +++ b/rpython/translator/c/src/entrypoint.c @@ -37,6 +37,7 @@ # include #endif +RPY_EXPORTED void rpython_startup_code(void) { #ifdef RPY_WITH_GIL diff --git a/rpython/translator/c/test/test_genc.py b/rpython/translator/c/test/test_genc.py --- a/rpython/translator/c/test/test_genc.py +++ b/rpython/translator/c/test/test_genc.py @@ -596,7 +596,7 @@ t.context._graphof(foobar_fn).inhibit_tail_call = True t.source_c() lines = t.driver.cbuilder.c_source_filename.join('..', - 'rpython_translator_c_test_test_genc.c').readlines() + 'rpython_translator_c_test.c').readlines() for i, line in enumerate(lines): if '= pypy_g_foobar_fn' in line: break diff --git a/rpython/translator/c/test/test_standalone.py b/rpython/translator/c/test/test_standalone.py --- a/rpython/translator/c/test/test_standalone.py +++ b/rpython/translator/c/test/test_standalone.py @@ -81,7 +81,7 @@ # # verify that the executable re-export symbols, but not too many if sys.platform.startswith('linux') and not kwds.get('shared', False): - seen_main = False + seen = set() g = os.popen("objdump -T '%s'" % builder.executable_name, 'r') for line in g: if not line.strip(): @@ -91,8 +91,8 @@ name = line.split()[-1] if name.startswith('__'): continue + seen.add(name) if name == 'main': - seen_main = True continue if name == 'pypy_debug_file': # ok to export this one continue @@ -104,7 +104,9 @@ "declaration of this C function or global variable" % (name,)) g.close() - assert seen_main, "did not see 'main' exported" + # list of symbols that we *want* to be exported: + for name in ['main', 'pypy_debug_file', 'rpython_startup_code']: + assert name in seen, "did not see '%r' exported" % name # return t, builder @@ -123,9 +125,9 @@ # Verify that the generated C files have sane names: gen_c_files = [str(f) for f in cbuilder.extrafiles] - for expfile in ('rpython_rlib_rposix.c', - 'rpython_rtyper_lltypesystem_rstr.c', - 'rpython_translator_c_test_test_standalone.c'): + for expfile in ('rpython_rlib.c', + 'rpython_rtyper_lltypesystem.c', + 'rpython_translator_c_test.c'): assert cbuilder.targetdir.join(expfile) in gen_c_files def test_print(self): diff --git a/rpython/translator/interactive.py b/rpython/translator/interactive.py --- a/rpython/translator/interactive.py +++ b/rpython/translator/interactive.py @@ -32,12 +32,6 @@ self.context.viewcg() def ensure_setup(self, argtypes=None, policy=None): - standalone = argtypes is None - if standalone: - assert argtypes is None - else: - if argtypes is None: - argtypes = [] self.driver.setup(self.entry_point, argtypes, policy, empty_translator=self.context) self.ann_argtypes = argtypes From pypy.commits at gmail.com Sun Mar 13 18:52:48 2016 From: pypy.commits at gmail.com (mjacob) Date: Sun, 13 Mar 2016 15:52:48 -0700 (PDT) Subject: [pypy-commit] buildbot default: Change build master configuration back to run py3k instead of py3.3. Message-ID: <56e5ef40.c711c30a.d2f7e.60a7@mx.google.com> Author: Manuel Jacob Branch: Changeset: r993:c8cf9192307c Date: 2016-03-13 23:52 +0100 http://bitbucket.org/pypy/buildbot/changeset/c8cf9192307c/ Log: Change build master configuration back to run py3k instead of py3.3. diff --git a/bot2/pypybuildbot/master.py b/bot2/pypybuildbot/master.py --- a/bot2/pypybuildbot/master.py +++ b/bot2/pypybuildbot/master.py @@ -302,10 +302,10 @@ NUMPY_WIN, # on allegro_win32, SalsaSalsa ]), - Nightly("nightly-3-00-py3.3", [ + Nightly("nightly-3-00-py3k", [ LINUX64, # on speed-old, uses all cores APPLVLLINUX64, # on speed-old, uses 1 core - ], branch="py3.3", hour=3, minute=0), + ], branch="py3k", hour=3, minute=0), # S390X vm (ibm-research) Nightly("nightly-4-00", [LINUX_S390X], branch='default', hour=0, minute=0), From pypy.commits at gmail.com Sun Mar 13 19:08:48 2016 From: pypy.commits at gmail.com (mjacob) Date: Sun, 13 Mar 2016 16:08:48 -0700 (PDT) Subject: [pypy-commit] pypy py3.3: hg merge py3k Message-ID: <56e5f300.01adc20a.1ebbb.6926@mx.google.com> Author: Manuel Jacob Branch: py3.3 Changeset: r83024:49038903b4c5 Date: 2016-03-14 00:07 +0100 http://bitbucket.org/pypy/pypy/changeset/49038903b4c5/ Log: hg merge py3k diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -240,6 +240,7 @@ Kristjan Valur Jonsson David Lievens Neil Blakey-Milner + Sergey Matyunin Lutz Paelike Lucio Torre Lars Wassermann @@ -271,6 +272,7 @@ Aaron Tubbs Ben Darnell Roberto De Ioris + Logan Chien Juan Francisco Cantero Hurtado Ruochen Huang Jeong YunWon diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -123,7 +123,7 @@ # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". -#html_title = None +html_title = 'PyPy documentation' # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None diff --git a/pypy/doc/contributor.rst b/pypy/doc/contributor.rst --- a/pypy/doc/contributor.rst +++ b/pypy/doc/contributor.rst @@ -210,6 +210,7 @@ Kristjan Valur Jonsson David Lievens Neil Blakey-Milner + Sergey Matyunin Lutz Paelike Lucio Torre Lars Wassermann @@ -241,6 +242,7 @@ Aaron Tubbs Ben Darnell Roberto De Ioris + Logan Chien Juan Francisco Cantero Hurtado Ruochen Huang Jeong YunWon diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -265,7 +265,7 @@ return False def evil(y): - d = {x(): 1} + d = {X(): 1} X.__eq__ = __evil_eq__ d[y] # might trigger a call to __eq__? diff --git a/pypy/doc/release-5.0.0.rst b/pypy/doc/release-5.0.0.rst --- a/pypy/doc/release-5.0.0.rst +++ b/pypy/doc/release-5.0.0.rst @@ -1,44 +1,44 @@ -========== -PyPy 5.0.0 -========== +======== +PyPy 5.0 +======== -We have released PyPy 5.0.0, about three months after PyPy 4.0.0. +We have released PyPy 5.0, about three months after PyPy 4.0.1. We encourage all users of PyPy to update to this version. Apart from the usual bug fixes, there is an ongoing effort to improve the warmup time and memory usage of JIT-related metadata. The exact effects depend vastly on the program you're running and can range from insignificant to warmup being up to 30% -faster and memory dropping by about 30%. +faster and memory dropping by about 30%. We also merged a major upgrade to our C-API layer (cpyext), simplifying the interaction between c-level objects and PyPy interpreter level objects. As a -result, lxml with its cython compiled component `passes all tests`_ on PyPy +result, lxml (prerelease) with its cython compiled component +`passes all tests`_ on PyPy. The new cpyext is also much faster. -Users who have gotten used to vmprof_ on Linux, and those on other platforms -who have not yet tried its awesomeness, will be happy to hear that vmprof -now just works on MacOS and Windows too, in both PyPy (built-in support) and -CPython (as an installed module). +vmprof_ has been a go-to profiler for PyPy on linux for a few releases +and we're happy to announce that thanks to the cooperation with jetbrains, +vmprof now works on Linux, OS X and Windows on both PyPy and CPython. -You can download the PyPy 5.0.0 release here: +You can download the PyPy 5.0 release here: http://pypy.org/download.html We would like to thank our donors for the continued support of the PyPy project. -We would also like to thank our contributors and +We would also like to thank our contributors and encourage new people to join the project. PyPy has many layers and we need help with all of them: `PyPy`_ and `RPython`_ documentation -improvements, tweaking popular `modules`_ to run on pypy, or general `help`_ -with making RPython's JIT even better. +improvements, tweaking popular `modules`_ to run on pypy, or general `help`_ +with making RPython's JIT even better. CFFI ==== While not applicable only to PyPy, `cffi`_ is arguably our most significant -contribution to the python ecosystem. PyPy 5.0.0 ships with -`cffi-1.5.2`_ which now allows embedding PyPy (or cpython) in a c program. +contribution to the python ecosystem. PyPy 5.0 ships with +`cffi-1.5.2`_ which now allows embedding PyPy (or cpython) in a C program. -.. _`PyPy`: http://doc.pypy.org +.. _`PyPy`: http://doc.pypy.org .. _`RPython`: https://rpython.readthedocs.org .. _`cffi`: https://cffi.readthedocs.org .. _`cffi-1.5.2`: http://cffi.readthedocs.org/en/latest/whatsnew.html#v1-5-2 @@ -52,18 +52,18 @@ ============= PyPy is a very compliant Python interpreter, almost a drop-in replacement for -CPython 2.7. It's fast (`pypy and cpython 2.7.x`_ performance comparison) +CPython 2.7. It's fast (`PyPy and CPython 2.7.x`_ performance comparison) due to its integrated tracing JIT compiler. We also welcome developers of other `dynamic languages`_ to see what RPython can do for them. This release supports **x86** machines on most common operating systems -(Linux 32/64, Mac OS X 64, Windows 32, OpenBSD, freebsd), +(Linux 32/64, Mac OS X 64, Windows 32, OpenBSD, FreeBSD), newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux, and the -big- and little-endian variants of **ppc64** running Linux. +big- and little-endian variants of **PPC64** running Linux. -.. _`pypy and cpython 2.7.x`: http://speed.pypy.org +.. _`PyPy and CPython 2.7.x`: http://speed.pypy.org .. _`dynamic languages`: http://pypyjs.org Other Highlights (since 4.0.1 released in November 2015) @@ -103,7 +103,7 @@ * More completely support datetime, optimize timedelta creation - * Fix for issue 2185 which caused an inconsistent list of operations to be + * Fix for issue #2185 which caused an inconsistent list of operations to be generated by the unroller, appeared in a complicated DJango app * Fix an elusive issue with stacklets on shadowstack which showed up when @@ -128,9 +128,6 @@ * Fix for corner case (likely shown by Krakatau) for consecutive guards with interdependencies - * Fix applevel bare class method comparisons which should fix pretty printing - in IPython - * Issues reported with our previous release were resolved_ after reports from users on our issue tracker at https://bitbucket.org/pypy/pypy/issues or on IRC at #pypy @@ -150,49 +147,60 @@ * Support partition() as an app-level function, together with a cffi wrapper in pypy/numpy, this now provides partial support for partition() -* Performance improvements and refactorings: +* Performance improvements: - * Refactor and improve exception analysis in the annotator - - * Remove unnecessary special handling of space.wrap(). + * Optimize global lookups * Improve the memory signature of numbering instances in the JIT. This should massively decrease the amount of memory consumed by the JIT, which is significant for most programs. Also compress the numberings using variable- size encoding + * Optimize string concatenation + + * Use INT_LSHIFT instead of INT_MUL when possible + + * Improve struct.unpack by casting directly from the underlying buffer. + Unpacking floats and doubles is about 15 times faster, and integer types + about 50% faster (on 64 bit integers). This was then subsequently + improved further in optimizeopt.py. + + * Optimize two-tuple lookups in mapdict, which improves warmup of instance + variable access somewhat + + * Reduce all guards from int_floordiv_ovf if one of the arguments is constant + + * Identify permutations of attributes at instance creation, reducing the + number of bridges created + + * Greatly improve re.sub() performance + + +* Internal refactorings: + + * Refactor and improve exception analysis in the annotator + + * Remove unnecessary special handling of space.wrap(). + * Support list-resizing setslice operations in RPython * Tweak the trace-too-long heuristic for multiple jit drivers * Refactor bookkeeping (such a cool word - three double letters) in the annotater - + * Refactor wrappers for OS functions from rtyper to rlib and simplify them * Simplify backend loading instructions to only use four variants - * Optimize string concatination - * Simplify GIL handling in non-jitted code - * Use INT_LSHIFT instead of INT_MUL when possible - - * Improve struct.unpack by casting directly from the underlying buffer. - Unpacking floats and doubles is about 15 times faster, and integer types - about 50% faster (on 64 bit integers). This was then subsequently - improved further in optimizeopt.py. - * Refactor naming in optimizeopt * Change GraphAnalyzer to use a more precise way to recognize external functions and fix null pointer handling, generally clean up external function handling - * Optimize global lookups - - * Optimize two-tuple lookups in mapdict - * Remove pure variants of ``getfield_gc_*`` operations from the JIT by determining purity while tracing @@ -203,17 +211,10 @@ * Refactor rtyper debug code into python.rtyper.debug * Seperate structmember.h from Python.h Also enhance creating api functions - to specify which header file they appear in (previously only pypy_decl.h) - - * Reduce all guards from int_floordiv_ovf if one of the arguments is constant + to specify which header file they appear in (previously only pypy_decl.h) * Fix tokenizer to enforce universal newlines, needed for Python 3 support - * Identify permutations of attributes at instance creation, reducing the - number of bridges created - - * Greatly improve re.sub() performance - .. _resolved: http://doc.pypy.org/en/latest/whatsnew-5.0.0.html .. _`hypothesis`: http://hypothesis.readthedocs.org .. _`blog post`: http://morepypy.blogspot.com/2016/02/c-api-support-update.html diff --git a/pypy/doc/whatsnew-5.0.0.rst b/pypy/doc/whatsnew-5.0.0.rst --- a/pypy/doc/whatsnew-5.0.0.rst +++ b/pypy/doc/whatsnew-5.0.0.rst @@ -1,6 +1,6 @@ -======================== -What's new in PyPy 5.0.0 -======================== +====================== +What's new in PyPy 5.0 +====================== .. this is a revision shortly after release-4.0.1 .. startrev: 4b5c840d0da2 diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -2,6 +2,6 @@ What's new in PyPy 5.0.+ ========================= -.. this is a revision shortly after release-5.0.0 -.. startrev: 7bb6381d084c +.. this is a revision shortly after release-5.0 +.. startrev: 9c4299dc2d60 diff --git a/pypy/module/_sre/interp_sre.py b/pypy/module/_sre/interp_sre.py --- a/pypy/module/_sre/interp_sre.py +++ b/pypy/module/_sre/interp_sre.py @@ -113,7 +113,7 @@ if not (space.is_none(self.w_pattern) or space.isinstance_w(self.w_pattern, space.w_unicode)): raise OperationError(space.w_TypeError, space.wrap( - "can't use a string pattern on a bytes-like object")) + "can't use a bytes pattern on a string-like object")) if pos > len(unicodestr): pos = len(unicodestr) if endpos > len(unicodestr): @@ -121,6 +121,10 @@ return rsre_core.UnicodeMatchContext(self.code, unicodestr, pos, endpos, self.flags) elif space.isinstance_w(w_string, space.w_str): + if (not space.is_none(self.w_pattern) and + space.isinstance_w(self.w_pattern, space.w_unicode)): + raise OperationError(space.w_TypeError, space.wrap( + "can't use a string pattern on a bytes-like object")) str = space.str_w(w_string) if pos > len(str): pos = len(str) @@ -133,7 +137,7 @@ if (not space.is_none(self.w_pattern) and space.isinstance_w(self.w_pattern, space.w_unicode)): raise OperationError(space.w_TypeError, space.wrap( - "can't use a bytes pattern on a string-like object")) + "can't use a string pattern on a bytes-like object")) size = buf.getlength() assert size >= 0 if pos > size: diff --git a/pypy/module/_vmprof/interp_vmprof.py b/pypy/module/_vmprof/interp_vmprof.py --- a/pypy/module/_vmprof/interp_vmprof.py +++ b/pypy/module/_vmprof/interp_vmprof.py @@ -60,10 +60,10 @@ Must be smaller than 1.0 """ w_modules = space.sys.get('modules') - if space.contains_w(w_modules, space.wrap('_continuation')): - space.warn(space.wrap("Using _continuation/greenlet/stacklet together " - "with vmprof will crash"), - space.w_RuntimeWarning) + #if space.contains_w(w_modules, space.wrap('_continuation')): + # space.warn(space.wrap("Using _continuation/greenlet/stacklet together " + # "with vmprof will crash"), + # space.w_RuntimeWarning) try: rvmprof.enable(fileno, period) except rvmprof.VMProfError, e: diff --git a/pypy/tool/release/repackage.sh b/pypy/tool/release/repackage.sh --- a/pypy/tool/release/repackage.sh +++ b/pypy/tool/release/repackage.sh @@ -1,7 +1,7 @@ # Edit these appropriately before running this script -maj=4 +maj=5 min=0 -rev=1 +rev=0 # This script will download latest builds from the buildmaster, rename the top # level directory, and repackage ready to be uploaded to bitbucket. It will also # download source, assuming a tag for the release already exists, and repackage them. diff --git a/requirements.txt b/requirements.txt --- a/requirements.txt +++ b/requirements.txt @@ -1,3 +1,3 @@ # hypothesis is used for test generation on untranslated jit tests hypothesis - +enum>=0.4.6 # is a dependency, but old pip does not pick it up diff --git a/rpython/doc/conf.py b/rpython/doc/conf.py --- a/rpython/doc/conf.py +++ b/rpython/doc/conf.py @@ -59,7 +59,7 @@ # General information about the project. project = u'RPython' -copyright = u'2015, The PyPy Project' +copyright = u'2016, The PyPy Project' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the @@ -121,7 +121,7 @@ # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". -#html_title = None +html_title = 'RPython Documentation' # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None diff --git a/rpython/jit/metainterp/blackhole.py b/rpython/jit/metainterp/blackhole.py --- a/rpython/jit/metainterp/blackhole.py +++ b/rpython/jit/metainterp/blackhole.py @@ -1148,35 +1148,45 @@ @arguments("cpu", "i", "R", "d", returns="i") def bhimpl_residual_call_r_i(cpu, func, args_r, calldescr): + workaround2200.active = True return cpu.bh_call_i(func, None, args_r, None, calldescr) @arguments("cpu", "i", "R", "d", returns="r") def bhimpl_residual_call_r_r(cpu, func, args_r, calldescr): + workaround2200.active = True return cpu.bh_call_r(func, None, args_r, None, calldescr) @arguments("cpu", "i", "R", "d") def bhimpl_residual_call_r_v(cpu, func, args_r, calldescr): + workaround2200.active = True return cpu.bh_call_v(func, None, args_r, None, calldescr) @arguments("cpu", "i", "I", "R", "d", returns="i") def bhimpl_residual_call_ir_i(cpu, func, args_i, args_r, calldescr): + workaround2200.active = True return cpu.bh_call_i(func, args_i, args_r, None, calldescr) @arguments("cpu", "i", "I", "R", "d", returns="r") def bhimpl_residual_call_ir_r(cpu, func, args_i, args_r, calldescr): + workaround2200.active = True return cpu.bh_call_r(func, args_i, args_r, None, calldescr) @arguments("cpu", "i", "I", "R", "d") def bhimpl_residual_call_ir_v(cpu, func, args_i, args_r, calldescr): + workaround2200.active = True return cpu.bh_call_v(func, args_i, args_r, None, calldescr) @arguments("cpu", "i", "I", "R", "F", "d", returns="i") def bhimpl_residual_call_irf_i(cpu, func, args_i,args_r,args_f,calldescr): + workaround2200.active = True return cpu.bh_call_i(func, args_i, args_r, args_f, calldescr) @arguments("cpu", "i", "I", "R", "F", "d", returns="r") def bhimpl_residual_call_irf_r(cpu, func, args_i,args_r,args_f,calldescr): + workaround2200.active = True return cpu.bh_call_r(func, args_i, args_r, args_f, calldescr) @arguments("cpu", "i", "I", "R", "F", "d", returns="f") def bhimpl_residual_call_irf_f(cpu, func, args_i,args_r,args_f,calldescr): + workaround2200.active = True return cpu.bh_call_f(func, args_i, args_r, args_f, calldescr) @arguments("cpu", "i", "I", "R", "F", "d") def bhimpl_residual_call_irf_v(cpu, func, args_i,args_r,args_f,calldescr): + workaround2200.active = True return cpu.bh_call_v(func, args_i, args_r, args_f, calldescr) # conditional calls - note that they cannot return stuff @@ -1204,44 +1214,54 @@ @arguments("cpu", "j", "R", returns="i") def bhimpl_inline_call_r_i(cpu, jitcode, args_r): + workaround2200.active = True return cpu.bh_call_i(jitcode.get_fnaddr_as_int(), None, args_r, None, jitcode.calldescr) @arguments("cpu", "j", "R", returns="r") def bhimpl_inline_call_r_r(cpu, jitcode, args_r): + workaround2200.active = True return cpu.bh_call_r(jitcode.get_fnaddr_as_int(), None, args_r, None, jitcode.calldescr) @arguments("cpu", "j", "R") def bhimpl_inline_call_r_v(cpu, jitcode, args_r): + workaround2200.active = True return cpu.bh_call_v(jitcode.get_fnaddr_as_int(), None, args_r, None, jitcode.calldescr) @arguments("cpu", "j", "I", "R", returns="i") def bhimpl_inline_call_ir_i(cpu, jitcode, args_i, args_r): + workaround2200.active = True return cpu.bh_call_i(jitcode.get_fnaddr_as_int(), args_i, args_r, None, jitcode.calldescr) @arguments("cpu", "j", "I", "R", returns="r") def bhimpl_inline_call_ir_r(cpu, jitcode, args_i, args_r): + workaround2200.active = True return cpu.bh_call_r(jitcode.get_fnaddr_as_int(), args_i, args_r, None, jitcode.calldescr) @arguments("cpu", "j", "I", "R") def bhimpl_inline_call_ir_v(cpu, jitcode, args_i, args_r): + workaround2200.active = True return cpu.bh_call_v(jitcode.get_fnaddr_as_int(), args_i, args_r, None, jitcode.calldescr) @arguments("cpu", "j", "I", "R", "F", returns="i") def bhimpl_inline_call_irf_i(cpu, jitcode, args_i, args_r, args_f): + workaround2200.active = True return cpu.bh_call_i(jitcode.get_fnaddr_as_int(), args_i, args_r, args_f, jitcode.calldescr) @arguments("cpu", "j", "I", "R", "F", returns="r") def bhimpl_inline_call_irf_r(cpu, jitcode, args_i, args_r, args_f): + workaround2200.active = True return cpu.bh_call_r(jitcode.get_fnaddr_as_int(), args_i, args_r, args_f, jitcode.calldescr) @arguments("cpu", "j", "I", "R", "F", returns="f") def bhimpl_inline_call_irf_f(cpu, jitcode, args_i, args_r, args_f): + workaround2200.active = True return cpu.bh_call_f(jitcode.get_fnaddr_as_int(), args_i, args_r, args_f, jitcode.calldescr) @arguments("cpu", "j", "I", "R", "F") def bhimpl_inline_call_irf_v(cpu, jitcode, args_i, args_r, args_f): + workaround2200.active = True return cpu.bh_call_v(jitcode.get_fnaddr_as_int(), args_i, args_r, args_f, jitcode.calldescr) @@ -1528,6 +1548,8 @@ if not self.nextblackholeinterp: self._exit_frame_with_exception(current_exc) return current_exc + finally: + workaround2200.active = False # # pass the frame's return value to the caller caller = self.nextblackholeinterp @@ -1701,3 +1723,10 @@ # _run_forever(firstbh, current_exc) convert_and_run_from_pyjitpl._dont_inline_ = True + +# ____________________________________________________________ + +class WorkaroundIssue2200(object): + pass +workaround2200 = WorkaroundIssue2200() +workaround2200.active = False diff --git a/rpython/jit/metainterp/test/test_ajit.py b/rpython/jit/metainterp/test/test_ajit.py --- a/rpython/jit/metainterp/test/test_ajit.py +++ b/rpython/jit/metainterp/test/test_ajit.py @@ -4377,3 +4377,30 @@ assert res == -1 else: assert res == 4294967295 + + def test_issue2200_recursion(self): + # Reproduces issue #2200. This test contains no recursion, + # but due to an unlikely combination of factors it ends up + # creating an RPython-level recursion, one per loop iteration. + # The recursion is: blackhole interp from the failing guard -> + # does the call to enter() as a normal call -> enter() runs + # can_enter_jit() as if we're interpreted -> we enter the JIT + # again from the start of the loop -> the guard fails again + # during the next iteration -> blackhole interp. All arrows + # in the previous sentence are one or more levels of RPython + # function calls. + driver = JitDriver(greens=[], reds=["i"]) + def enter(i): + driver.can_enter_jit(i=i) + def f(): + set_param(None, 'trace_eagerness', 999999) + i = 0 + while True: + driver.jit_merge_point(i=i) + i += 1 + if i >= 300: + return i + promote(i + 1) # a failing guard + enter(i) + + self.meta_interp(f, []) diff --git a/rpython/jit/metainterp/warmstate.py b/rpython/jit/metainterp/warmstate.py --- a/rpython/jit/metainterp/warmstate.py +++ b/rpython/jit/metainterp/warmstate.py @@ -433,6 +433,14 @@ bound_reached(hash, None, *args) return + # Workaround for issue #2200, maybe temporary. This is not + # a proper fix, but only a hack that should work well enough + # for PyPy's main jitdriver... See test_issue2200_recursion + from rpython.jit.metainterp.blackhole import workaround2200 + if workaround2200.active: + workaround2200.active = False + return + # Here, we have found 'cell'. # if cell.flags & (JC_TRACING | JC_TEMPORARY): diff --git a/rpython/jit/tool/traceviewer.py b/rpython/jit/tool/traceviewer.py --- a/rpython/jit/tool/traceviewer.py +++ b/rpython/jit/tool/traceviewer.py @@ -103,9 +103,9 @@ self.last_guard = -1 else: # guards can be out of order nowadays - groups = sorted(groups) - self.first_guard = guard_number(groups[0]) - self.last_guard = guard_number(groups[-1]) + groups = sorted(map(guard_number, groups)) + self.first_guard = groups[0] + self.last_guard = groups[-1] content = property(get_content, set_content) diff --git a/rpython/rlib/rvmprof/src/vmprof_getpc.h b/rpython/rlib/rvmprof/src/vmprof_getpc.h --- a/rpython/rlib/rvmprof/src/vmprof_getpc.h +++ b/rpython/rlib/rvmprof/src/vmprof_getpc.h @@ -54,6 +54,7 @@ // It will cause problems for FreeBSD though!, because it turns off // the needed __BSD_VISIBLE. #ifdef __APPLE__ +#include #define _XOPEN_SOURCE 500 #endif @@ -144,7 +145,11 @@ #else intptr_t GetPC(ucontext_t *signal_ucontext) { #ifdef __APPLE__ +#if ((ULONG_MAX) == (UINT_MAX)) + return (signal_ucontext->uc_mcontext->__ss.__eip); +#else return (signal_ucontext->uc_mcontext->__ss.__rip); +#endif #else return signal_ucontext->PC_FROM_UCONTEXT; // defined in config.h #endif diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -547,6 +547,11 @@ relpypath = localpath.relto(pypkgpath.dirname) assert relpypath, ("%r should be relative to %r" % (localpath, pypkgpath.dirname)) + if len(relpypath.split(os.path.sep)) > 2: + # pypy detail to agregate the c files by directory, + # since the enormous number of files was causing + # memory issues linking on win32 + return os.path.split(relpypath)[0] + '.c' return relpypath.replace('.py', '.c') return None if hasattr(node.obj, 'graph'): diff --git a/rpython/translator/c/src/entrypoint.c b/rpython/translator/c/src/entrypoint.c --- a/rpython/translator/c/src/entrypoint.c +++ b/rpython/translator/c/src/entrypoint.c @@ -37,6 +37,7 @@ # include #endif +RPY_EXPORTED void rpython_startup_code(void) { #ifdef RPY_WITH_GIL diff --git a/rpython/translator/c/test/test_genc.py b/rpython/translator/c/test/test_genc.py --- a/rpython/translator/c/test/test_genc.py +++ b/rpython/translator/c/test/test_genc.py @@ -596,7 +596,7 @@ t.context._graphof(foobar_fn).inhibit_tail_call = True t.source_c() lines = t.driver.cbuilder.c_source_filename.join('..', - 'rpython_translator_c_test_test_genc.c').readlines() + 'rpython_translator_c_test.c').readlines() for i, line in enumerate(lines): if '= pypy_g_foobar_fn' in line: break diff --git a/rpython/translator/c/test/test_standalone.py b/rpython/translator/c/test/test_standalone.py --- a/rpython/translator/c/test/test_standalone.py +++ b/rpython/translator/c/test/test_standalone.py @@ -81,7 +81,7 @@ # # verify that the executable re-export symbols, but not too many if sys.platform.startswith('linux') and not kwds.get('shared', False): - seen_main = False + seen = set() g = os.popen("objdump -T '%s'" % builder.executable_name, 'r') for line in g: if not line.strip(): @@ -91,8 +91,8 @@ name = line.split()[-1] if name.startswith('__'): continue + seen.add(name) if name == 'main': - seen_main = True continue if name == 'pypy_debug_file': # ok to export this one continue @@ -104,7 +104,9 @@ "declaration of this C function or global variable" % (name,)) g.close() - assert seen_main, "did not see 'main' exported" + # list of symbols that we *want* to be exported: + for name in ['main', 'pypy_debug_file', 'rpython_startup_code']: + assert name in seen, "did not see '%r' exported" % name # return t, builder @@ -123,9 +125,9 @@ # Verify that the generated C files have sane names: gen_c_files = [str(f) for f in cbuilder.extrafiles] - for expfile in ('rpython_rlib_rposix.c', - 'rpython_rtyper_lltypesystem_rstr.c', - 'rpython_translator_c_test_test_standalone.c'): + for expfile in ('rpython_rlib.c', + 'rpython_rtyper_lltypesystem.c', + 'rpython_translator_c_test.c'): assert cbuilder.targetdir.join(expfile) in gen_c_files def test_print(self): diff --git a/rpython/translator/interactive.py b/rpython/translator/interactive.py --- a/rpython/translator/interactive.py +++ b/rpython/translator/interactive.py @@ -32,12 +32,6 @@ self.context.viewcg() def ensure_setup(self, argtypes=None, policy=None): - standalone = argtypes is None - if standalone: - assert argtypes is None - else: - if argtypes is None: - argtypes = [] self.driver.setup(self.entry_point, argtypes, policy, empty_translator=self.context) self.ann_argtypes = argtypes From pypy.commits at gmail.com Mon Mar 14 03:53:53 2016 From: pypy.commits at gmail.com (plan_rich) Date: Mon, 14 Mar 2016 00:53:53 -0700 (PDT) Subject: [pypy-commit] pypy default: marked two tests as xfail on s390x. they time out when invoked with test runner, but pass when run directly Message-ID: <56e66e11.d30e1c0a.3649a.ffffc158@mx.google.com> Author: Richard Plangger Branch: Changeset: r83025:4b386bcfee54 Date: 2016-03-14 08:49 +0100 http://bitbucket.org/pypy/pypy/changeset/4b386bcfee54/ Log: marked two tests as xfail on s390x. they time out when invoked with test runner, but pass when run directly diff --git a/pypy/module/thread/test/test_lock.py b/pypy/module/thread/test/test_lock.py --- a/pypy/module/thread/test/test_lock.py +++ b/pypy/module/thread/test/test_lock.py @@ -3,6 +3,7 @@ import sys, os from pypy.module.thread.test.support import GenericTestThread from rpython.translator.c.test.test_genc import compile +import platform class AppTestLock(GenericTestThread): @@ -63,6 +64,8 @@ else: assert self.runappdirect, "missing lock._py3k_acquire()" + @py.test.mark.xfail(platform.machine() == 's390x', + reason='may fail this test under heavy load') def test_ping_pong(self): # The purpose of this test is that doing a large number of ping-pongs # between two threads, using locks, should complete in a reasonable diff --git a/rpython/rlib/test/test_rthread.py b/rpython/rlib/test/test_rthread.py --- a/rpython/rlib/test/test_rthread.py +++ b/rpython/rlib/test/test_rthread.py @@ -5,6 +5,7 @@ from rpython.translator.c.test.test_boehm import AbstractGCTestClass from rpython.rtyper.lltypesystem import lltype, rffi import py +import platform def test_lock(): l = allocate_lock() @@ -92,6 +93,8 @@ res = fn() assert res == 42 + @py.test.mark.xfail(platform.machine() == 's390x', + reason='may fail this test under heavy load') def test_gc_locking(self): import time from rpython.rlib.debug import ll_assert From pypy.commits at gmail.com Mon Mar 14 04:11:52 2016 From: pypy.commits at gmail.com (fijal) Date: Mon, 14 Mar 2016 01:11:52 -0700 (PDT) Subject: [pypy-commit] pypy jit-leaner-frontend: maaaybe implement global numbering Message-ID: <56e67248.10921c0a.1cdf3.ffffc72f@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r83026:af5c4f0c2cc9 Date: 2016-03-14 10:11 +0200 http://bitbucket.org/pypy/pypy/changeset/af5c4f0c2cc9/ Log: maaaybe implement global numbering diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -472,6 +472,13 @@ # ------------------------------------------------------------ + def setup_descrs(self): + all_descrs = [] + for k, v in self.descrs.iteritems(): + v.descr_index = len(all_descrs) + all_descrs.append(v) + return all_descrs + def calldescrof(self, FUNC, ARGS, RESULT, effect_info): key = ('call', getkind(RESULT), tuple([getkind(A) for A in ARGS]), diff --git a/rpython/jit/backend/llsupport/descr.py b/rpython/jit/backend/llsupport/descr.py --- a/rpython/jit/backend/llsupport/descr.py +++ b/rpython/jit/backend/llsupport/descr.py @@ -21,6 +21,30 @@ self._cache_call = {} self._cache_interiorfield = {} + def setup_descrs(self): + all_descrs = [] + for k, v in self._cache_size.iteritems(): + v.descr_index = len(all_descrs) + all_descrs.append(v) + for k, v in self._cache_field.iteritems(): + for k1, v1 in v.iteritems(): + v1.descr_index = len(all_descrs) + all_descrs.append(v1) + for k, v in self._cache_array.iteritems(): + v.descr_index = len(all_descrs) + all_descrs.append(v) + for k, v in self._cache_arraylen.iteritems(): + v.descr_index = len(all_descrs) + all_descrs.append(v) + for k, v in self._cache_call.iteritems(): + v.descr_index = len(all_descrs) + all_descrs.append(v) + for k, v in self._cache_interiorfield.iteritems(): + v.descr_index = len(all_descrs) + all_descrs.append(v) + assert len(all_descrs) < 2**15 + return all_descrs + def init_size_descr(self, STRUCT, sizedescr): pass diff --git a/rpython/jit/backend/llsupport/llmodel.py b/rpython/jit/backend/llsupport/llmodel.py --- a/rpython/jit/backend/llsupport/llmodel.py +++ b/rpython/jit/backend/llsupport/llmodel.py @@ -316,6 +316,9 @@ return ll_frame return execute_token + def setup_descrs(self): + return self.gc_ll_descr.setup_descrs() + # ------------------- helpers and descriptions -------------------- @staticmethod diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -75,7 +75,7 @@ #assert not unroll opt = Optimizer(metainterp_sd, jitdriver_sd, optimizations) - return opt.propagate_all_forward(self.trace.get_iter(), + return opt.propagate_all_forward(self.trace.get_iter(metainterp_sd), self.call_pure_results) class BridgeCompileData(CompileData): diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -124,6 +124,7 @@ class AbstractDescr(AbstractValue): __slots__ = () llopaque = True + descr_index = -1 def repr_of_descr(self): return '%r' % (self,) diff --git a/rpython/jit/metainterp/logger.py b/rpython/jit/metainterp/logger.py --- a/rpython/jit/metainterp/logger.py +++ b/rpython/jit/metainterp/logger.py @@ -20,7 +20,7 @@ def _unpack_trace(self, trace): ops = [] - i = trace.get_iter() + i = trace.get_iter(self.metainterp_sd) while not i.done(): ops.append(i.next()) return i.inputargs, ops diff --git a/rpython/jit/metainterp/opencoder.py b/rpython/jit/metainterp/opencoder.py --- a/rpython/jit/metainterp/opencoder.py +++ b/rpython/jit/metainterp/opencoder.py @@ -55,8 +55,10 @@ return [self.get(i) for i in arr] class TraceIterator(BaseTrace): - def __init__(self, trace, start, end, force_inputargs=None): + def __init__(self, trace, start, end, force_inputargs=None, + metainterp_sd=None): self.trace = trace + self.metainterp_sd = metainterp_sd self._cache = [None] * trace._count if force_inputargs is not None: self.inputargs = [rop.inputarg_from_tp(arg.type) for @@ -122,10 +124,13 @@ descr_index = -1 if opwithdescr[opnum]: descr_index = self._next() - if descr_index == -1 or rop.is_guard(opnum): + if descr_index == 0 or rop.is_guard(opnum): descr = None else: - descr = self.trace._descrs[descr_index] + if descr_index < 0: + descr = self.metainterp_sd.all_descrs[-descr_index-1] + else: + descr = self.trace._descrs[descr_index] else: descr = None res = ResOperation(opnum, args, -1, descr=descr) @@ -143,9 +148,9 @@ self.inputargs = inputargs self.count = count - def get_iter(self): + def get_iter(self, metainterp_sd=None): iter = TraceIterator(self.trace, self.start, self.trace._pos, - self.inputargs) + self.inputargs, metainterp_sd=metainterp_sd) iter._count = self.count return iter @@ -295,15 +300,15 @@ self.append(self._encode(box)) if opwithdescr[opnum]: if descr is None: - self.append(-1) + self.append(0) else: self.append(self._encode_descr(descr)) self._count += 1 return pos def _encode_descr(self, descr): - # XXX provide a global cache for prebuilt descrs so we don't - # have to repeat them here + if descr.descr_index != -1: + return -descr.descr_index-1 self._descrs.append(descr) return len(self._descrs) - 1 @@ -324,18 +329,20 @@ return [rffi.cast(rffi.SHORT, self._encode(box)) for box in boxes] def create_top_snapshot(self, jitcode, pc, boxes, vable_boxes, vref_boxes): + self._total_snapshots += 1 array = self._list_of_boxes(boxes) vable_array = self._list_of_boxes(vable_boxes) vref_array = self._list_of_boxes(vref_boxes) s = TopSnapshot(combine_uint(jitcode.index, pc), array, vable_array, vref_array) - assert rffi.cast(lltype.Signed, self._ops[self._pos - 1]) == -1 + assert rffi.cast(lltype.Signed, self._ops[self._pos - 1]) == 0 # guards have no descr self._snapshots.append(s) self._ops[self._pos - 1] = rffi.cast(rffi.SHORT, len(self._snapshots) - 1) return s def create_empty_top_snapshot(self, vable_boxes, vref_boxes): + self._total_snapshots += 1 vable_array = self._list_of_boxes(vable_boxes) vref_array = self._list_of_boxes(vref_boxes) s = TopSnapshot(combine_uint(2**16 - 1, 0), [], vable_array, @@ -347,11 +354,13 @@ return s def create_snapshot(self, jitcode, pc, boxes): + self._total_snapshots += 1 array = self._list_of_boxes(boxes) return Snapshot(combine_uint(jitcode.index, pc), array) - def get_iter(self): - return TraceIterator(self, 0, self._pos) + def get_iter(self, metainterp_sd=None): + assert metainterp_sd + return TraceIterator(self, 0, self._pos, metainterp_sd=metainterp_sd) def unpack(self): iter = self.get_iter() diff --git a/rpython/jit/metainterp/optimizeopt/unroll.py b/rpython/jit/metainterp/optimizeopt/unroll.py --- a/rpython/jit/metainterp/optimizeopt/unroll.py +++ b/rpython/jit/metainterp/optimizeopt/unroll.py @@ -122,7 +122,8 @@ assert not self.optimizer._newoperations def optimize_preamble(self, trace, runtime_boxes, call_pure_results, memo): - info, newops = self.optimizer.propagate_all_forward(trace.get_iter(), + info, newops = self.optimizer.propagate_all_forward( + trace.get_iter(self.optimizer.metainterp_sd), call_pure_results, flush=False) exported_state = self.export_state(info.jump_op.getarglist(), info.inputargs, @@ -135,7 +136,7 @@ def optimize_peeled_loop(self, trace, celltoken, state, call_pure_results, inline_short_preamble=True): - trace = trace.get_iter() + trace = trace.get_iter(self.optimizer.metainterp_sd) try: label_args = self.import_state(trace.inputargs, state) except VirtualStatesCantMatch: @@ -226,7 +227,7 @@ def optimize_bridge(self, trace, runtime_boxes, call_pure_results, inline_short_preamble, box_names_memo): - trace = trace.get_iter() + trace = trace.get_iter(self.optimizer.metainterp_sd) self._check_no_forwarding([trace.inputargs]) info, ops = self.optimizer.propagate_all_forward(trace, call_pure_results, False) diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -1804,6 +1804,7 @@ self.cpu.propagate_exception_descr = exc_descr # self.globaldata = MetaInterpGlobalData(self) + self.all_descrs = self.cpu.setup_descrs() def _setup_once(self): """Runtime setup needed by the various components of the JIT.""" From pypy.commits at gmail.com Mon Mar 14 04:14:08 2016 From: pypy.commits at gmail.com (fijal) Date: Mon, 14 Mar 2016 01:14:08 -0700 (PDT) Subject: [pypy-commit] pypy jit-leaner-frontend: fix rpython Message-ID: <56e672d0.8b941c0a.c4699.ffffc075@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r83027:0599da8cfbe1 Date: 2016-03-14 10:13 +0200 http://bitbucket.org/pypy/pypy/changeset/0599da8cfbe1/ Log: fix rpython diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -122,7 +122,7 @@ return None class AbstractDescr(AbstractValue): - __slots__ = () + __slots__ = ('descr_index',) llopaque = True descr_index = -1 From pypy.commits at gmail.com Mon Mar 14 04:34:37 2016 From: pypy.commits at gmail.com (fijal) Date: Mon, 14 Mar 2016 01:34:37 -0700 (PDT) Subject: [pypy-commit] jitviewer default: bump the version Message-ID: <56e6779d.45d61c0a.1ce32.ffffce88@mx.google.com> Author: fijal Branch: Changeset: r279:028757f8af2a Date: 2016-03-14 10:34 +0200 http://bitbucket.org/pypy/jitviewer/changeset/028757f8af2a/ Log: bump the version diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -11,7 +11,7 @@ rest.append(os.path.join(dirname, x)) setup(name='JitViewer', - version='0.1', + version='0.2', description="Viewer for pypy's jit traces", author='Maciej Fijalkowski, Antonio Cuni and the PyPy team', author_email='fijall at gmail.com', From pypy.commits at gmail.com Mon Mar 14 04:50:20 2016 From: pypy.commits at gmail.com (plan_rich) Date: Mon, 14 Mar 2016 01:50:20 -0700 (PDT) Subject: [pypy-commit] buildbot default: removed libpython s390x and applevel. they are both tested in the jit suite Message-ID: <56e67b4c.c74fc20a.f49de.ffffe820@mx.google.com> Author: Richard Plangger Branch: Changeset: r994:18e2394e8147 Date: 2016-03-14 09:48 +0100 http://bitbucket.org/pypy/buildbot/changeset/18e2394e8147/ Log: removed libpython s390x and applevel. they are both tested in the jit suite diff --git a/bot2/pypybuildbot/master.py b/bot2/pypybuildbot/master.py --- a/bot2/pypybuildbot/master.py +++ b/bot2/pypybuildbot/master.py @@ -179,12 +179,10 @@ WIN32 = "own-win-x86-32" APPLVLLINUX32 = "pypy-c-app-level-linux-x86-32" APPLVLLINUX64 = "pypy-c-app-level-linux-x86-64" -APPLVLLINUX_S390X = "pypy-c-app-level-linux-s390x" APPLVLWIN32 = "pypy-c-app-level-win-x86-32" LIBPYTHON_LINUX32 = "pypy-c-lib-python-linux-x86-32" LIBPYTHON_LINUX64 = "pypy-c-lib-python-linux-x86-64" -LIBPYTHON_LINUX_S390X = "pypy-c-lib-python-linux-s390x" JITLINUX32 = "pypy-c-jit-linux-x86-32" JITLINUX64 = "pypy-c-jit-linux-x86-64" @@ -310,7 +308,6 @@ # S390X vm (ibm-research) Nightly("nightly-4-00", [LINUX_S390X], branch='default', hour=0, minute=0), Nightly("nightly-4-01", [JITLINUX_S390X], branch='default', hour=2, minute=0), - Nightly("nightly-4-02", [APPLVLLINUX_S390X], branch='default', hour=5, minute=0), # this one has faithfully run every night even though the latest # change to that branch was in January 2013. Re-enable one day. @@ -354,8 +351,6 @@ #JITINDIANA32, LINUX_S390X, - APPLVLLINUX_S390X, - LIBPYTHON_LINUX_S390X, JITLINUX_S390X, ] + ARM.builderNames, properties=[]), @@ -533,19 +528,6 @@ "factory": pypyOwnTestFactory, "category": 's390x', }, - {"name": APPLVLLINUX_S390X, - "slavenames": ["dje"], - "builddir": APPLVLLINUX_S390X, - "factory": pypyTranslatedAppLevelTestFactoryS390X, - "category": "s390x", - }, - {"name": LIBPYTHON_LINUX_S390X, - "slavenames": ["dje"], - "builddir": LIBPYTHON_LINUX_S390X, - "factory": pypyTranslatedLibPythonTestFactory, - "category": "s390x", - }, - ] + ARM.builders, # http://readthedocs.org/docs/buildbot/en/latest/tour.html#debugging-with-manhole From pypy.commits at gmail.com Mon Mar 14 05:04:47 2016 From: pypy.commits at gmail.com (cfbolz) Date: Mon, 14 Mar 2016 02:04:47 -0700 (PDT) Subject: [pypy-commit] pypy guard-compatible: implement the boring part of guard_compatible Message-ID: <56e67eaf.e213c20a.6aca7.ffffe387@mx.google.com> Author: Carl Friedrich Bolz Branch: guard-compatible Changeset: r83028:2567141c9d53 Date: 2016-03-14 10:03 +0100 http://bitbucket.org/pypy/pypy/changeset/2567141c9d53/ Log: implement the boring part of guard_compatible (which is exactly like guard_value :-)) diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -1725,6 +1725,12 @@ self.guard_success_cc = rx86.Conditions['E'] self.implement_guard(guard_token) + def genop_guard_guard_compatible(self, guard_op, guard_token, locs, ign): + assert guard_op.getarg(0).type == REF # XXX for now? + self.mc.CMP(locs[0], locs[1]) + self.guard_success_cc = rx86.Conditions['E'] + self.implement_guard(guard_token) + def _cmp_guard_class(self, locs): loc_ptr = locs[0] loc_classptr = locs[1] diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -483,6 +483,13 @@ y = self.loc(op.getarg(1)) self.perform_guard(op, [x, y], None) + def consider_guard_compatible(self, op): + x = self.make_sure_var_in_reg(op.getarg(0)) + loc = self.assembler.cpu.all_reg_indexes[x.value] + op.getdescr().make_a_counter_per_value(op, loc) + y = self.loc(op.getarg(1)) + self.perform_guard(op, [x, y], None) + def consider_guard_class(self, op): assert not isinstance(op.getarg(0), Const) x = self.rm.make_sure_var_in_reg(op.getarg(0)) From pypy.commits at gmail.com Mon Mar 14 05:21:00 2016 From: pypy.commits at gmail.com (arigo) Date: Mon, 14 Mar 2016 02:21:00 -0700 (PDT) Subject: [pypy-commit] pypy guard-compatible: In-progress Message-ID: <56e6827c.c74fc20a.f49de.fffff59e@mx.google.com> Author: Armin Rigo Branch: guard-compatible Changeset: r83029:f30a30829848 Date: 2016-03-14 10:20 +0100 http://bitbucket.org/pypy/pypy/changeset/f30a30829848/ Log: In-progress diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -215,10 +215,11 @@ assert fail.identifier == 1 self.cpu.grow_guard_compatible_switch(faildescr1, t2_box._resref) - deadframe = self.cpu.execute_token(looptoken, - t2_box._resref) - fail = self.cpu.get_latest_descr(deadframe) - assert fail.identifier == 2 + for retry in range(2): + deadframe = self.cpu.execute_token(looptoken, + t2_box._resref) + fail = self.cpu.get_latest_descr(deadframe) + assert fail.identifier == 2 def test_compile_with_holes_in_fail_args(self): targettoken = TargetToken() diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -33,7 +33,7 @@ from rpython.jit.backend.x86.vector_ext import VectorAssemblerMixin from rpython.jit.backend.x86.callbuilder import follow_jump from rpython.jit.metainterp.resoperation import rop -from rpython.jit.backend.x86 import support +from rpython.jit.backend.x86 import support, guard_compat from rpython.rlib.debug import debug_print, debug_start, debug_stop from rpython.rlib import rgc from rpython.jit.codewriter.effectinfo import EffectInfo @@ -69,6 +69,7 @@ def setup_once(self): BaseAssembler.setup_once(self) + guard_compat.setup_once(self) if self.cpu.supports_floats: support.ensure_sse2_floats() self._build_float_constants() @@ -1726,10 +1727,15 @@ self.implement_guard(guard_token) def genop_guard_guard_compatible(self, guard_op, guard_token, locs, ign): - assert guard_op.getarg(0).type == REF # XXX for now? - self.mc.CMP(locs[0], locs[1]) - self.guard_success_cc = rx86.Conditions['E'] - self.implement_guard(guard_token) + assert guard_op.getarg(0).type == REF # only supported case for now + assert guard_op.getarg(1).type == REF + loc_reg, loc_imm = locs + assert isinstance(loc_reg, RegLoc) + assert isinstance(loc_imm, ImmedLoc) + if IS_X86_32: + XXX + guard_compat.generate_guard_compatible(self, guard_token, + loc_reg, loc_imm.value) def _cmp_guard_class(self, locs): loc_ptr = locs[0] diff --git a/rpython/jit/backend/x86/guard_compat.py b/rpython/jit/backend/x86/guard_compat.py new file mode 100644 --- /dev/null +++ b/rpython/jit/backend/x86/guard_compat.py @@ -0,0 +1,96 @@ +from rpython.rtyper.lltypesystem import rffi +from rpython.jit.backend.x86.arch import WORD +from rpython.jit.backend.x86 import rx86, codebuf +from rpython.jit.backend.x86.regloc import X86_64_SCRATCH_REG, imm + +# uses the raw structure COMPATINFO, which is informally defined like this: +# it is an array containing all the expected values that should pass the +# guard, terminated with a small_ofs value ( < 128, see in code). + + +def generate_guard_compatible(assembler, guard_token, loc_reg, initial_value): + # fast-path check + mc = assembler.mc + mc.MOV_ri64(X86_64_SCRATCH_REG.value, initial_value) + rel_pos_compatible_imm = mc.get_relative_pos() + mc.CMP_rr(loc_reg.value, X86_64_SCRATCH_REG.value) + mc.J_il8(rx86.Conditions['E'], 0) + je_location = mc.get_relative_pos() + + # fast-path failed, call the slow-path checker + checker = get_or_build_checker(assembler, loc_reg.value) + + # initialize 'compatinfo' with only 'initial_value' in it + compatinfoaddr = assembler.datablockwrapper.malloc_aligned( + 2 * WORD, alignment=WORD) + compatinfo = rffi.cast(rffi.SIGNEDP, compatinfoaddr) + compatinfo[0] = initial_value + + mc.MOV_ri64(X86_64_SCRATCH_REG.value, compatinfoaddr) # patchable + mc.PUSH_r(X86_64_SCRATCH_REG.value) + mc.CALL(imm(checker)) + mc.stack_frame_size_delta(-WORD) + + small_ofs = mc.get_relative_pos() - rel_pos_compatible_imm + assert 0 <= small_ofs <= 127 + compatinfo[1] = small_ofs + + assembler.guard_success_cc = rx86.Conditions['NZ'] + assembler.implement_guard(guard_token) + # + # patch the JE above + offset = mc.get_relative_pos() - je_location + assert 0 < offset <= 127 + mc.overwrite(je_location-1, chr(offset)) + + +def setup_once(assembler): + nb_registers = WORD * 2 + assembler._guard_compat_checkers = [0] * nb_registers + + +def get_or_build_checker(assembler, regnum): + """Returns a piece of assembler that checks if the value is in + some array (there is one such piece per input register 'regnum') + """ + addr = assembler._guard_compat_checkers[regnum] + if addr != 0: + return addr + + mc = codebuf.MachineCodeBlockWrapper() + + mc.MOV_rs(X86_64_SCRATCH_REG.value, WORD) + + pos = mc.get_relative_pos() + mc.CMP_mr((X86_64_SCRATCH_REG.value, 0), regnum) + mc.J_il8(rx86.Conditions['E'], 0) # patched below + je_location = mc.get_relative_pos() + mc.ADD_ri(X86_64_SCRATCH_REG.value, WORD) + mc.CMP_mi((X86_64_SCRATCH_REG.value, 0), 127) + mc.J_il8(rx86.Conditions['NBE'], pos - (mc.get_relative_pos() + 2)) + + # not found! Return the condition code 'Zero' to mean 'not found'. + mc.CMP_rr(regnum, regnum) + mc.RET16_i(WORD) + + mc.force_frame_size(WORD) + + # patch the JE above + offset = mc.get_relative_pos() - je_location + assert 0 < offset <= 127 + mc.overwrite(je_location-1, chr(offset)) + + # found! update the assembler by writing the value at 'small_ofs' + # bytes before our return address. This should overwrite the const in + # 'MOV_ri64(r11, const)', first instruction of the guard_compatible. + mc.NEG_r(X86_64_SCRATCH_REG.value) + mc.ADD_rs(X86_64_SCRATCH_REG.value, 0) + mc.MOV_mr((X86_64_SCRATCH_REG.value, -WORD), regnum) + + # the condition codes say 'Not Zero', as a result of the ADD above. + # Return this condition code to mean 'found'. + mc.RET16_i(WORD) + + addr = mc.materialize(assembler.cpu, []) + assembler._guard_compat_checkers[regnum] = addr + return addr diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -483,12 +483,7 @@ y = self.loc(op.getarg(1)) self.perform_guard(op, [x, y], None) - def consider_guard_compatible(self, op): - x = self.make_sure_var_in_reg(op.getarg(0)) - loc = self.assembler.cpu.all_reg_indexes[x.value] - op.getdescr().make_a_counter_per_value(op, loc) - y = self.loc(op.getarg(1)) - self.perform_guard(op, [x, y], None) + consider_guard_compatible = consider_guard_value def consider_guard_class(self, op): assert not isinstance(op.getarg(0), Const) diff --git a/rpython/jit/backend/x86/runner.py b/rpython/jit/backend/x86/runner.py --- a/rpython/jit/backend/x86/runner.py +++ b/rpython/jit/backend/x86/runner.py @@ -122,6 +122,9 @@ l[i].counter = ll_s.i return l + def grow_guard_compatible_switch(self, guarddescr, gcref): + pass#xxx + class CPU386(AbstractX86CPU): backend_name = 'x86' diff --git a/rpython/jit/backend/x86/rx86.py b/rpython/jit/backend/x86/rx86.py --- a/rpython/jit/backend/x86/rx86.py +++ b/rpython/jit/backend/x86/rx86.py @@ -380,6 +380,7 @@ INSN_rr = insn(rex_w, chr(base+1), register(2,8), register(1,1), '\xC0') INSN_br = insn(rex_w, chr(base+1), register(2,8), stack_bp(1)) INSN_rb = insn(rex_w, chr(base+3), register(1,8), stack_bp(2)) + INSN_rs = insn(rex_w, chr(base+3), register(1,8), stack_sp(2)) INSN_rm = insn(rex_w, chr(base+3), register(1,8), mem_reg_plus_const(2)) INSN_rj = insn(rex_w, chr(base+3), register(1,8), abs_(2)) INSN_ji8 = insn(rex_w, '\x83', orbyte(base), abs_(1), immediate(2,'b')) @@ -403,7 +404,7 @@ INSN_bi._always_inline_ = True # try to constant-fold single_byte() return (INSN_ri, INSN_rr, INSN_rb, INSN_bi, INSN_br, INSN_rm, INSN_rj, - INSN_ji8, INSN_mi8) + INSN_ji8, INSN_mi8, INSN_rs) def select_8_or_32_bit_immed(insn_8, insn_32): def INSN(*args): @@ -505,13 +506,13 @@ INC_m = insn(rex_w, '\xFF', orbyte(0), mem_reg_plus_const(1)) INC_j = insn(rex_w, '\xFF', orbyte(0), abs_(1)) - AD1_ri,ADD_rr,ADD_rb,_,_,ADD_rm,ADD_rj,_,_ = common_modes(0) - OR_ri, OR_rr, OR_rb, _,_,OR_rm, OR_rj, _,_ = common_modes(1) - AND_ri,AND_rr,AND_rb,_,_,AND_rm,AND_rj,_,_ = common_modes(4) - SU1_ri,SUB_rr,SUB_rb,_,_,SUB_rm,SUB_rj,SUB_ji8,SUB_mi8 = common_modes(5) - SBB_ri,SBB_rr,SBB_rb,_,_,SBB_rm,SBB_rj,_,_ = common_modes(3) - XOR_ri,XOR_rr,XOR_rb,_,_,XOR_rm,XOR_rj,_,_ = common_modes(6) - CMP_ri,CMP_rr,CMP_rb,CMP_bi,CMP_br,CMP_rm,CMP_rj,_,_ = common_modes(7) + AD1_ri,ADD_rr,ADD_rb,_,_,ADD_rm,ADD_rj,_,_,ADD_rs = common_modes(0) + OR_ri, OR_rr, OR_rb, _,_,OR_rm, OR_rj, _,_,_ = common_modes(1) + AND_ri,AND_rr,AND_rb,_,_,AND_rm,AND_rj,_,_,_ = common_modes(4) + SU1_ri,SUB_rr,SUB_rb,_,_,SUB_rm,SUB_rj,SUB_ji8,SUB_mi8,_ = common_modes(5) + SBB_ri,SBB_rr,SBB_rb,_,_,SBB_rm,SBB_rj,_,_,_ = common_modes(3) + XOR_ri,XOR_rr,XOR_rb,_,_,XOR_rm,XOR_rj,_,_,_ = common_modes(6) + CMP_ri,CMP_rr,CMP_rb,CMP_bi,CMP_br,CMP_rm,CMP_rj,_,_,_ = common_modes(7) def ADD_ri(self, reg, immed): self.AD1_ri(reg, immed) From pypy.commits at gmail.com Mon Mar 14 06:14:57 2016 From: pypy.commits at gmail.com (arigo) Date: Mon, 14 Mar 2016 03:14:57 -0700 (PDT) Subject: [pypy-commit] pypy guard-compatible: Finish guard_compatible in the x86 backend (as far as test_runner goes) Message-ID: <56e68f21.e5ecc20a.f9fb4.fffffc81@mx.google.com> Author: Armin Rigo Branch: guard-compatible Changeset: r83030:a4934822ebc2 Date: 2016-03-14 11:14 +0100 http://bitbucket.org/pypy/pypy/changeset/a4934822ebc2/ Log: Finish guard_compatible in the x86 backend (as far as test_runner goes) diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -470,7 +470,7 @@ assert deadframe._saved_data is not None return deadframe._saved_data - def grow_guard_compatible_switch(self, descr, ref): + def grow_guard_compatible_switch(self, looptoken, descr, ref): if not hasattr(descr, '_guard_compatible_llgraph_lst'): descr._guard_compatible_llgraph_lst = [] descr._guard_compatible_llgraph_lst.append(ref) diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py --- a/rpython/jit/backend/llsupport/assembler.py +++ b/rpython/jit/backend/llsupport/assembler.py @@ -36,6 +36,9 @@ def guard_not_invalidated(self): return self.guard_opnum == rop.GUARD_NOT_INVALIDATED + def guard_compatible(self): + return self.guard_opnum == rop.GUARD_COMPATIBLE + def must_save_exception(self): guard_opnum = self.guard_opnum return (guard_opnum == rop.GUARD_EXCEPTION or diff --git a/rpython/jit/backend/model.py b/rpython/jit/backend/model.py --- a/rpython/jit/backend/model.py +++ b/rpython/jit/backend/model.py @@ -158,7 +158,7 @@ """ pass - def grow_guard_compatible_switch(self, guarddescr, gcref): + def grow_guard_compatible_switch(self, looptoken, guarddescr, gcref): """ This method is called to add another case to a guard_compatible. guard_compatible starts like a guard_value, but can grow to check more cases. The guard should only fail if the argument is unequal to all the diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -214,7 +214,8 @@ fail = self.cpu.get_latest_descr(deadframe) assert fail.identifier == 1 - self.cpu.grow_guard_compatible_switch(faildescr1, t2_box._resref) + self.cpu.grow_guard_compatible_switch(looptoken, faildescr1, + t2_box._resref) for retry in range(2): deadframe = self.cpu.execute_token(looptoken, t2_box._resref) diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -687,6 +687,8 @@ mc = codebuf.MachineCodeBlockWrapper() mc.writeimm32(relative_target) mc.copy_to_raw_memory(addr) + if tok.guard_compatible(): + guard_compat.patch_guard_compatible(rawstart, tok) else: # GUARD_NOT_INVALIDATED, record an entry in # clt.invalidate_positions of the form: @@ -772,7 +774,8 @@ mc.writeimm32(allocated_depth) mc.copy_to_raw_memory(adr) - def get_asmmemmgr_blocks(self, looptoken): + @staticmethod + def get_asmmemmgr_blocks(looptoken): clt = looptoken.compiled_loop_token if clt.asmmemmgr_blocks is None: clt.asmmemmgr_blocks = [] diff --git a/rpython/jit/backend/x86/guard_compat.py b/rpython/jit/backend/x86/guard_compat.py --- a/rpython/jit/backend/x86/guard_compat.py +++ b/rpython/jit/backend/x86/guard_compat.py @@ -1,11 +1,18 @@ -from rpython.rtyper.lltypesystem import rffi +from rpython.rlib import rgc +from rpython.rlib.objectmodel import we_are_translated +from rpython.rtyper.lltypesystem import lltype, rffi from rpython.jit.backend.x86.arch import WORD from rpython.jit.backend.x86 import rx86, codebuf from rpython.jit.backend.x86.regloc import X86_64_SCRATCH_REG, imm +from rpython.jit.backend.llsupport.asmmemmgr import MachineDataBlockWrapper +from rpython.jit.metainterp.compile import GuardCompatibleDescr +from rpython.jit.metainterp.history import BasicFailDescr + # uses the raw structure COMPATINFO, which is informally defined like this: -# it is an array containing all the expected values that should pass the -# guard, terminated with a small_ofs value ( < 128, see in code). +# it starts with a negative 'small_ofs' value (see in the code) +# then there is an array containing all the expected values that should pass +# the guard, ending in -1. def generate_guard_compatible(assembler, guard_token, loc_reg, initial_value): @@ -22,18 +29,19 @@ # initialize 'compatinfo' with only 'initial_value' in it compatinfoaddr = assembler.datablockwrapper.malloc_aligned( - 2 * WORD, alignment=WORD) + 3 * WORD, alignment=WORD) compatinfo = rffi.cast(rffi.SIGNEDP, compatinfoaddr) - compatinfo[0] = initial_value + compatinfo[1] = initial_value + compatinfo[2] = -1 mc.MOV_ri64(X86_64_SCRATCH_REG.value, compatinfoaddr) # patchable + guard_token.pos_compatinfo_offset = mc.get_relative_pos() - WORD mc.PUSH_r(X86_64_SCRATCH_REG.value) mc.CALL(imm(checker)) mc.stack_frame_size_delta(-WORD) - small_ofs = mc.get_relative_pos() - rel_pos_compatible_imm - assert 0 <= small_ofs <= 127 - compatinfo[1] = small_ofs + small_ofs = rel_pos_compatible_imm - mc.get_relative_pos() + compatinfo[0] = small_ofs assembler.guard_success_cc = rx86.Conditions['NZ'] assembler.implement_guard(guard_token) @@ -44,6 +52,54 @@ mc.overwrite(je_location-1, chr(offset)) +def patch_guard_compatible(rawstart, tok): + descr = tok.faildescr + if not we_are_translated() and isinstance(descr, BasicFailDescr): + pass # for tests + else: + assert isinstance(descr, GuardCompatibleDescr) + descr._backend_compatinfo = rawstart + tok.pos_compatinfo_offset + + +def grow_switch(cpu, looptoken, guarddescr, gcref): + from rpython.jit.backend.x86.assembler import Assembler386 + + # XXX is it ok to force gcref to be non-movable? + if not rgc._make_sure_does_not_move(gcref): + raise AssertionError("oops") + new_value = rffi.cast(lltype.Signed, gcref) + + if not we_are_translated() and isinstance(guarddescr, BasicFailDescr): + pass # for tests + else: + assert isinstance(guarddescr, GuardCompatibleDescr) + compatinfop = rffi.cast(rffi.VOIDPP, guarddescr._backend_compatinfo) + compatinfo = rffi.cast(rffi.SIGNEDP, compatinfop[0]) + length = 3 + while compatinfo[length - 1] != -1: + length += 1 + + allblocks = Assembler386.get_asmmemmgr_blocks(looptoken) + datablockwrapper = MachineDataBlockWrapper(cpu.asmmemmgr, allblocks) + newcompatinfoaddr = datablockwrapper.malloc_aligned( + (length + 1) * WORD, alignment=WORD) + datablockwrapper.done() + + newcompatinfo = rffi.cast(rffi.SIGNEDP, newcompatinfoaddr) + newcompatinfo[0] = compatinfo[0] + newcompatinfo[1] = new_value + + for i in range(1, length): + newcompatinfo[i + 1] = compatinfo[i] + + # the old 'compatinfo' is not used any more, but will only be freed + # when the looptoken is freed + compatinfop[0] = rffi.cast(rffi.VOIDP, newcompatinfo) + + # the machine code is not updated here. We leave it to the actual + # guard_compatible to update it if needed. + + def setup_once(assembler): nb_registers = WORD * 2 assembler._guard_compat_checkers = [0] * nb_registers @@ -62,15 +118,15 @@ mc.MOV_rs(X86_64_SCRATCH_REG.value, WORD) pos = mc.get_relative_pos() - mc.CMP_mr((X86_64_SCRATCH_REG.value, 0), regnum) + mc.CMP_mr((X86_64_SCRATCH_REG.value, WORD), regnum) mc.J_il8(rx86.Conditions['E'], 0) # patched below je_location = mc.get_relative_pos() - mc.ADD_ri(X86_64_SCRATCH_REG.value, WORD) - mc.CMP_mi((X86_64_SCRATCH_REG.value, 0), 127) - mc.J_il8(rx86.Conditions['NBE'], pos - (mc.get_relative_pos() + 2)) + mc.CMP_mi((X86_64_SCRATCH_REG.value, WORD), -1) + mc.LEA_rm(X86_64_SCRATCH_REG.value, (X86_64_SCRATCH_REG.value, WORD)) + mc.J_il8(rx86.Conditions['NE'], pos - (mc.get_relative_pos() + 2)) - # not found! Return the condition code 'Zero' to mean 'not found'. - mc.CMP_rr(regnum, regnum) + # not found! The condition code is already 'Zero', which we return + # to mean 'not found'. mc.RET16_i(WORD) mc.force_frame_size(WORD) @@ -83,7 +139,9 @@ # found! update the assembler by writing the value at 'small_ofs' # bytes before our return address. This should overwrite the const in # 'MOV_ri64(r11, const)', first instruction of the guard_compatible. - mc.NEG_r(X86_64_SCRATCH_REG.value) + mc.INT3() + mc.MOV_rs(X86_64_SCRATCH_REG.value, WORD) + mc.MOV_rm(X86_64_SCRATCH_REG.value, (X86_64_SCRATCH_REG.value, 0)) mc.ADD_rs(X86_64_SCRATCH_REG.value, 0) mc.MOV_mr((X86_64_SCRATCH_REG.value, -WORD), regnum) diff --git a/rpython/jit/backend/x86/runner.py b/rpython/jit/backend/x86/runner.py --- a/rpython/jit/backend/x86/runner.py +++ b/rpython/jit/backend/x86/runner.py @@ -6,7 +6,7 @@ from rpython.jit.backend.x86.regalloc import gpr_reg_mgr_cls, xmm_reg_mgr_cls from rpython.jit.backend.x86.profagent import ProfileAgent from rpython.jit.backend.llsupport.llmodel import AbstractLLCPU -from rpython.jit.backend.x86 import regloc +from rpython.jit.backend.x86 import regloc, guard_compat import sys @@ -122,8 +122,8 @@ l[i].counter = ll_s.i return l - def grow_guard_compatible_switch(self, guarddescr, gcref): - pass#xxx + def grow_guard_compatible_switch(self, looptoken, guarddescr, gcref): + guard_compat.grow_switch(self, looptoken, guarddescr, gcref) class CPU386(AbstractX86CPU): From pypy.commits at gmail.com Mon Mar 14 06:18:14 2016 From: pypy.commits at gmail.com (arigo) Date: Mon, 14 Mar 2016 03:18:14 -0700 (PDT) Subject: [pypy-commit] pypy guard-compatible: remove breakpoint Message-ID: <56e68fe6.030f1c0a.dc48a.fffff545@mx.google.com> Author: Armin Rigo Branch: guard-compatible Changeset: r83031:754537bcf5be Date: 2016-03-14 11:17 +0100 http://bitbucket.org/pypy/pypy/changeset/754537bcf5be/ Log: remove breakpoint diff --git a/rpython/jit/backend/x86/guard_compat.py b/rpython/jit/backend/x86/guard_compat.py --- a/rpython/jit/backend/x86/guard_compat.py +++ b/rpython/jit/backend/x86/guard_compat.py @@ -139,7 +139,6 @@ # found! update the assembler by writing the value at 'small_ofs' # bytes before our return address. This should overwrite the const in # 'MOV_ri64(r11, const)', first instruction of the guard_compatible. - mc.INT3() mc.MOV_rs(X86_64_SCRATCH_REG.value, WORD) mc.MOV_rm(X86_64_SCRATCH_REG.value, (X86_64_SCRATCH_REG.value, 0)) mc.ADD_rs(X86_64_SCRATCH_REG.value, 0) From pypy.commits at gmail.com Mon Mar 14 06:35:52 2016 From: pypy.commits at gmail.com (arigo) Date: Mon, 14 Mar 2016 03:35:52 -0700 (PDT) Subject: [pypy-commit] pypy guard-compatible: Change the interface again to take a CompiledLoopToken Message-ID: <56e69408.d30e1c0a.3649a.0433@mx.google.com> Author: Armin Rigo Branch: guard-compatible Changeset: r83032:c51aa683936e Date: 2016-03-14 11:35 +0100 http://bitbucket.org/pypy/pypy/changeset/c51aa683936e/ Log: Change the interface again to take a CompiledLoopToken diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -470,7 +470,7 @@ assert deadframe._saved_data is not None return deadframe._saved_data - def grow_guard_compatible_switch(self, looptoken, descr, ref): + def grow_guard_compatible_switch(self, compiled_loop_token, descr, ref): if not hasattr(descr, '_guard_compatible_llgraph_lst'): descr._guard_compatible_llgraph_lst = [] descr._guard_compatible_llgraph_lst.append(ref) diff --git a/rpython/jit/backend/model.py b/rpython/jit/backend/model.py --- a/rpython/jit/backend/model.py +++ b/rpython/jit/backend/model.py @@ -158,7 +158,8 @@ """ pass - def grow_guard_compatible_switch(self, looptoken, guarddescr, gcref): + def grow_guard_compatible_switch(self, compiled_loop_token, + guarddescr, gcref): """ This method is called to add another case to a guard_compatible. guard_compatible starts like a guard_value, but can grow to check more cases. The guard should only fail if the argument is unequal to all the @@ -308,6 +309,11 @@ debug_print("allocating Loop #", self.number) debug_stop("jit-mem-looptoken-alloc") + def get_asmmemmgr_blocks(self): + if self.asmmemmgr_blocks is None: + self.asmmemmgr_blocks = [] + return self.asmmemmgr_blocks + def compiling_a_bridge(self): self.cpu.tracker.total_compiled_bridges += 1 self.bridges_count += 1 diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -214,8 +214,8 @@ fail = self.cpu.get_latest_descr(deadframe) assert fail.identifier == 1 - self.cpu.grow_guard_compatible_switch(looptoken, faildescr1, - t2_box._resref) + self.cpu.grow_guard_compatible_switch(looptoken.compiled_loop_token, + faildescr1, t2_box._resref) for retry in range(2): deadframe = self.cpu.execute_token(looptoken, t2_box._resref) diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -774,12 +774,9 @@ mc.writeimm32(allocated_depth) mc.copy_to_raw_memory(adr) - @staticmethod - def get_asmmemmgr_blocks(looptoken): + def get_asmmemmgr_blocks(self, looptoken): clt = looptoken.compiled_loop_token - if clt.asmmemmgr_blocks is None: - clt.asmmemmgr_blocks = [] - return clt.asmmemmgr_blocks + return clt.get_asmmemmgr_blocks() def materialize_loop(self, looptoken): self.datablockwrapper.done() # finish using cpu.asmmemmgr diff --git a/rpython/jit/backend/x86/guard_compat.py b/rpython/jit/backend/x86/guard_compat.py --- a/rpython/jit/backend/x86/guard_compat.py +++ b/rpython/jit/backend/x86/guard_compat.py @@ -61,9 +61,7 @@ descr._backend_compatinfo = rawstart + tok.pos_compatinfo_offset -def grow_switch(cpu, looptoken, guarddescr, gcref): - from rpython.jit.backend.x86.assembler import Assembler386 - +def grow_switch(cpu, compiled_loop_token, guarddescr, gcref): # XXX is it ok to force gcref to be non-movable? if not rgc._make_sure_does_not_move(gcref): raise AssertionError("oops") @@ -79,7 +77,7 @@ while compatinfo[length - 1] != -1: length += 1 - allblocks = Assembler386.get_asmmemmgr_blocks(looptoken) + allblocks = compiled_loop_token.get_asmmemmgr_blocks() datablockwrapper = MachineDataBlockWrapper(cpu.asmmemmgr, allblocks) newcompatinfoaddr = datablockwrapper.malloc_aligned( (length + 1) * WORD, alignment=WORD) diff --git a/rpython/jit/backend/x86/runner.py b/rpython/jit/backend/x86/runner.py --- a/rpython/jit/backend/x86/runner.py +++ b/rpython/jit/backend/x86/runner.py @@ -122,8 +122,9 @@ l[i].counter = ll_s.i return l - def grow_guard_compatible_switch(self, looptoken, guarddescr, gcref): - guard_compat.grow_switch(self, looptoken, guarddescr, gcref) + def grow_guard_compatible_switch(self, compiled_loop_token, + guarddescr, gcref): + guard_compat.grow_switch(self, compiled_loop_token, guarddescr, gcref) class CPU386(AbstractX86CPU): From pypy.commits at gmail.com Mon Mar 14 06:38:52 2016 From: pypy.commits at gmail.com (arigo) Date: Mon, 14 Mar 2016 03:38:52 -0700 (PDT) Subject: [pypy-commit] pypy guard-compatible: fix test Message-ID: <56e694bc.465ec20a.25769.0842@mx.google.com> Author: Armin Rigo Branch: guard-compatible Changeset: r83033:a4bd899899fc Date: 2016-03-14 11:38 +0100 http://bitbucket.org/pypy/pypy/changeset/a4bd899899fc/ Log: fix test diff --git a/rpython/jit/backend/x86/test/test_assembler.py b/rpython/jit/backend/x86/test/test_assembler.py --- a/rpython/jit/backend/x86/test/test_assembler.py +++ b/rpython/jit/backend/x86/test/test_assembler.py @@ -43,9 +43,14 @@ def do_test(self, callback): from rpython.jit.backend.x86.regalloc import X86FrameManager from rpython.jit.backend.x86.regalloc import X86XMMRegisterManager + class FakeCompiledLoopToken: + asmmemmgr_blocks = None + def get_asmmemmgr_blocks(self): + if self.asmmemmgr_blocks is None: + self.asmmemmgr_blocks = [] + return self.asmmemmgr_blocks class FakeToken: - class compiled_loop_token: - asmmemmgr_blocks = None + compiled_loop_token = FakeCompiledLoopToken() cpu = ACTUAL_CPU(None, None) cpu.setup() if cpu.HAS_CODEMAP: From pypy.commits at gmail.com Mon Mar 14 07:32:01 2016 From: pypy.commits at gmail.com (cfbolz) Date: Mon, 14 Mar 2016 04:32:01 -0700 (PDT) Subject: [pypy-commit] pypy guard-compatible: remove pdb, remove outdated comment Message-ID: <56e6a131.4577c20a.bd04d.27f3@mx.google.com> Author: Carl Friedrich Bolz Branch: guard-compatible Changeset: r83034:4ce22e96aeb4 Date: 2016-03-14 11:36 +0100 http://bitbucket.org/pypy/pypy/changeset/4ce22e96aeb4/ Log: remove pdb, remove outdated comment diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -930,10 +930,8 @@ else: resumedescr = ResumeGuardExcDescr() elif opnum == rop.GUARD_COMPATIBLE: - if copied_guard: - import pdb; pdb.set_trace() - else: - resumedescr = GuardCompatibleDescr() + assert not copied_guard # XXX for now? + resumedescr = GuardCompatibleDescr() else: if copied_guard: resumedescr = ResumeGuardCopiedDescr() @@ -1088,7 +1086,6 @@ fulfil need to be attached to this descr by optimizeopt. """ def __init__(self): - # XXX for now - in the end this would be in assembler self._compatibility_conditions = None def handle_fail(self, deadframe, metainterp_sd, jitdriver_sd): From pypy.commits at gmail.com Mon Mar 14 07:32:03 2016 From: pypy.commits at gmail.com (cfbolz) Date: Mon, 14 Mar 2016 04:32:03 -0700 (PDT) Subject: [pypy-commit] pypy guard-compatible: update to new interface Message-ID: <56e6a133.500f1c0a.aa328.18ca@mx.google.com> Author: Carl Friedrich Bolz Branch: guard-compatible Changeset: r83035:5eebc249f313 Date: 2016-03-14 12:24 +0100 http://bitbucket.org/pypy/pypy/changeset/5eebc249f313/ Log: update to new interface diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -1095,6 +1095,8 @@ refval = metainterp_sd.cpu.get_value_direct(deadframe, 'r', index) if self.is_compatible(metainterp_sd.cpu, refval): from rpython.jit.metainterp.blackhole import resume_in_blackhole + metainterp_sd.cpu.grow_guard_compatible_switch( + self, self.rd_loop_token, refval) resume_in_blackhole(metainterp_sd, jitdriver_sd, self, deadframe) else: # a real failure @@ -1104,7 +1106,6 @@ const = history.newconst(ref) if self._compatibility_conditions: if self._compatibility_conditions.check_compat(cpu, ref): - cpu.grow_guard_compatible_switch(self, ref) return True return False return True # no conditions, everything works From pypy.commits at gmail.com Mon Mar 14 07:53:36 2016 From: pypy.commits at gmail.com (cfbolz) Date: Mon, 14 Mar 2016 04:53:36 -0700 (PDT) Subject: [pypy-commit] pypy guard-compatible: the ptrinfo might not exist at all Message-ID: <56e6a640.034cc20a.ad485.29f6@mx.google.com> Author: Carl Friedrich Bolz Branch: guard-compatible Changeset: r83036:6ae43da2bb1b Date: 2016-03-14 12:52 +0100 http://bitbucket.org/pypy/pypy/changeset/6ae43da2bb1b/ Log: the ptrinfo might not exist at all diff --git a/rpython/jit/metainterp/optimizeopt/pure.py b/rpython/jit/metainterp/optimizeopt/pure.py --- a/rpython/jit/metainterp/optimizeopt/pure.py +++ b/rpython/jit/metainterp/optimizeopt/pure.py @@ -138,17 +138,18 @@ arg1 = self.get_box_replacement(op.getarg(1)) if arg1.type == 'r': info = self.getptrinfo(arg1) - ccond = info._compatibility_conditions - if info and ccond: - # it's subject to guard_compatible - copied_op = op.copy() - copied_op.setarg(1, ccond.known_valid) - result = self._can_optimize_call_pure(copied_op) - if result is not None: - self.make_constant(op, result) - self.last_emitted_operation = REMOVED - ccond.record_pure_call(copied_op, result) - return + if info: + ccond = info._compatibility_conditions + if ccond: + # it's subject to guard_compatible + copied_op = op.copy() + copied_op.setarg(1, ccond.known_valid) + result = self._can_optimize_call_pure(copied_op) + if result is not None: + self.make_constant(op, result) + self.last_emitted_operation = REMOVED + ccond.record_pure_call(copied_op, result) + return # Step 1: check if all arguments are constant for arg in op.getarglist(): From pypy.commits at gmail.com Mon Mar 14 08:25:41 2016 From: pypy.commits at gmail.com (cfbolz) Date: Mon, 14 Mar 2016 05:25:41 -0700 (PDT) Subject: [pypy-commit] pypy guard-compatible: missing import Message-ID: <56e6adc5.8673c20a.01d5.3093@mx.google.com> Author: Carl Friedrich Bolz Branch: guard-compatible Changeset: r83037:f6680e0a34d7 Date: 2016-03-14 13:24 +0100 http://bitbucket.org/pypy/pypy/changeset/f6680e0a34d7/ Log: missing import diff --git a/rpython/jit/metainterp/optimizeopt/pure.py b/rpython/jit/metainterp/optimizeopt/pure.py --- a/rpython/jit/metainterp/optimizeopt/pure.py +++ b/rpython/jit/metainterp/optimizeopt/pure.py @@ -3,7 +3,7 @@ ResOperation from rpython.jit.metainterp.optimizeopt.util import make_dispatcher_method from rpython.jit.metainterp.optimizeopt.shortpreamble import PreambleOp -from rpython.jit.metainterp.optimize import SpeculativeError +from rpython.jit.metainterp.optimize import SpeculativeError, InvalidLoop from rpython.jit.metainterp.compatible import CompatibilityCondition From pypy.commits at gmail.com Mon Mar 14 08:27:26 2016 From: pypy.commits at gmail.com (cfbolz) Date: Mon, 14 Mar 2016 05:27:26 -0700 (PDT) Subject: [pypy-commit] pypy guard-compatible: fix Message-ID: <56e6ae2e.85371c0a.e566.3677@mx.google.com> Author: Carl Friedrich Bolz Branch: guard-compatible Changeset: r83038:66858a12701d Date: 2016-03-14 12:02 +0000 http://bitbucket.org/pypy/pypy/changeset/66858a12701d/ Log: fix diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -36,7 +36,7 @@ assert isinstance(terminator, Terminator) self.terminator = terminator - @jit.elidable_compatible + @jit.elidable_compatible() def getclass_from_terminator(self): # objects with different maps can have the same class return self.terminator.w_cls @@ -73,7 +73,7 @@ def delete(self, obj, name, index): pass - @jit.elidable_compatible + @jit.elidable_compatible() def find_map_attr(self, name, index): if (self.space.config.objspace.std.withmethodcache): return self._find_map_attr_cache(name, index) From pypy.commits at gmail.com Mon Mar 14 08:30:07 2016 From: pypy.commits at gmail.com (arigo) Date: Mon, 14 Mar 2016 05:30:07 -0700 (PDT) Subject: [pypy-commit] pypy guard-compatible: 32-bit support Message-ID: <56e6aecf.890bc30a.12110.40d5@mx.google.com> Author: Armin Rigo Branch: guard-compatible Changeset: r83039:4e72a2558a80 Date: 2016-03-14 13:29 +0100 http://bitbucket.org/pypy/pypy/changeset/4e72a2558a80/ Log: 32-bit support diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -1732,8 +1732,6 @@ loc_reg, loc_imm = locs assert isinstance(loc_reg, RegLoc) assert isinstance(loc_imm, ImmedLoc) - if IS_X86_32: - XXX guard_compat.generate_guard_compatible(self, guard_token, loc_reg, loc_imm.value) diff --git a/rpython/jit/backend/x86/guard_compat.py b/rpython/jit/backend/x86/guard_compat.py --- a/rpython/jit/backend/x86/guard_compat.py +++ b/rpython/jit/backend/x86/guard_compat.py @@ -1,9 +1,9 @@ from rpython.rlib import rgc from rpython.rlib.objectmodel import we_are_translated from rpython.rtyper.lltypesystem import lltype, rffi -from rpython.jit.backend.x86.arch import WORD +from rpython.jit.backend.x86.arch import WORD, IS_X86_32, IS_X86_64 from rpython.jit.backend.x86 import rx86, codebuf -from rpython.jit.backend.x86.regloc import X86_64_SCRATCH_REG, imm +from rpython.jit.backend.x86.regloc import X86_64_SCRATCH_REG, imm, eax, edx from rpython.jit.backend.llsupport.asmmemmgr import MachineDataBlockWrapper from rpython.jit.metainterp.compile import GuardCompatibleDescr from rpython.jit.metainterp.history import BasicFailDescr @@ -18,9 +18,13 @@ def generate_guard_compatible(assembler, guard_token, loc_reg, initial_value): # fast-path check mc = assembler.mc - mc.MOV_ri64(X86_64_SCRATCH_REG.value, initial_value) - rel_pos_compatible_imm = mc.get_relative_pos() - mc.CMP_rr(loc_reg.value, X86_64_SCRATCH_REG.value) + if IS_X86_64: + mc.MOV_ri64(X86_64_SCRATCH_REG.value, initial_value) + rel_pos_compatible_imm = mc.get_relative_pos() + mc.CMP_rr(loc_reg.value, X86_64_SCRATCH_REG.value) + elif IS_X86_32: + mc.CMP_ri32(loc_reg.value, initial_value) + rel_pos_compatible_imm = mc.get_relative_pos() mc.J_il8(rx86.Conditions['E'], 0) je_location = mc.get_relative_pos() @@ -34,9 +38,13 @@ compatinfo[1] = initial_value compatinfo[2] = -1 - mc.MOV_ri64(X86_64_SCRATCH_REG.value, compatinfoaddr) # patchable - guard_token.pos_compatinfo_offset = mc.get_relative_pos() - WORD - mc.PUSH_r(X86_64_SCRATCH_REG.value) + if IS_X86_64: + mc.MOV_ri64(X86_64_SCRATCH_REG.value, compatinfoaddr) # patchable + guard_token.pos_compatinfo_offset = mc.get_relative_pos() - WORD + mc.PUSH_r(X86_64_SCRATCH_REG.value) + elif IS_X86_32: + mc.PUSH_i32(compatinfoaddr) # patchable + guard_token.pos_compatinfo_offset = mc.get_relative_pos() - WORD mc.CALL(imm(checker)) mc.stack_frame_size_delta(-WORD) @@ -113,21 +121,36 @@ mc = codebuf.MachineCodeBlockWrapper() - mc.MOV_rs(X86_64_SCRATCH_REG.value, WORD) + if IS_X86_64: + tmp = X86_64_SCRATCH_REG.value + stack_ret = 0 + stack_arg = WORD + elif IS_X86_32: + if regnum != eax.value: + tmp = eax.value + else: + tmp = edx.value + mc.PUSH_r(tmp) + stack_ret = WORD + stack_arg = 2 * WORD + + mc.MOV_rs(tmp, stack_arg) pos = mc.get_relative_pos() - mc.CMP_mr((X86_64_SCRATCH_REG.value, WORD), regnum) + mc.CMP_mr((tmp, WORD), regnum) mc.J_il8(rx86.Conditions['E'], 0) # patched below je_location = mc.get_relative_pos() - mc.CMP_mi((X86_64_SCRATCH_REG.value, WORD), -1) - mc.LEA_rm(X86_64_SCRATCH_REG.value, (X86_64_SCRATCH_REG.value, WORD)) + mc.CMP_mi((tmp, WORD), -1) + mc.LEA_rm(tmp, (tmp, WORD)) mc.J_il8(rx86.Conditions['NE'], pos - (mc.get_relative_pos() + 2)) # not found! The condition code is already 'Zero', which we return # to mean 'not found'. + if IS_X86_32: + mc.POP_r(tmp) mc.RET16_i(WORD) - mc.force_frame_size(WORD) + mc.force_frame_size(8) # one word on X86_64, two words on X86_32 # patch the JE above offset = mc.get_relative_pos() - je_location @@ -137,13 +160,15 @@ # found! update the assembler by writing the value at 'small_ofs' # bytes before our return address. This should overwrite the const in # 'MOV_ri64(r11, const)', first instruction of the guard_compatible. - mc.MOV_rs(X86_64_SCRATCH_REG.value, WORD) - mc.MOV_rm(X86_64_SCRATCH_REG.value, (X86_64_SCRATCH_REG.value, 0)) - mc.ADD_rs(X86_64_SCRATCH_REG.value, 0) - mc.MOV_mr((X86_64_SCRATCH_REG.value, -WORD), regnum) + mc.MOV_rs(tmp, stack_arg) + mc.MOV_rm(tmp, (tmp, 0)) + mc.ADD_rs(tmp, stack_ret) + mc.MOV_mr((tmp, -WORD), regnum) # the condition codes say 'Not Zero', as a result of the ADD above. # Return this condition code to mean 'found'. + if IS_X86_32: + mc.POP_r(tmp) mc.RET16_i(WORD) addr = mc.materialize(assembler.cpu, []) diff --git a/rpython/jit/backend/x86/rx86.py b/rpython/jit/backend/x86/rx86.py --- a/rpython/jit/backend/x86/rx86.py +++ b/rpython/jit/backend/x86/rx86.py @@ -404,7 +404,7 @@ INSN_bi._always_inline_ = True # try to constant-fold single_byte() return (INSN_ri, INSN_rr, INSN_rb, INSN_bi, INSN_br, INSN_rm, INSN_rj, - INSN_ji8, INSN_mi8, INSN_rs) + INSN_ji8, INSN_mi8, INSN_rs, INSN_ri32) def select_8_or_32_bit_immed(insn_8, insn_32): def INSN(*args): @@ -506,13 +506,13 @@ INC_m = insn(rex_w, '\xFF', orbyte(0), mem_reg_plus_const(1)) INC_j = insn(rex_w, '\xFF', orbyte(0), abs_(1)) - AD1_ri,ADD_rr,ADD_rb,_,_,ADD_rm,ADD_rj,_,_,ADD_rs = common_modes(0) - OR_ri, OR_rr, OR_rb, _,_,OR_rm, OR_rj, _,_,_ = common_modes(1) - AND_ri,AND_rr,AND_rb,_,_,AND_rm,AND_rj,_,_,_ = common_modes(4) - SU1_ri,SUB_rr,SUB_rb,_,_,SUB_rm,SUB_rj,SUB_ji8,SUB_mi8,_ = common_modes(5) - SBB_ri,SBB_rr,SBB_rb,_,_,SBB_rm,SBB_rj,_,_,_ = common_modes(3) - XOR_ri,XOR_rr,XOR_rb,_,_,XOR_rm,XOR_rj,_,_,_ = common_modes(6) - CMP_ri,CMP_rr,CMP_rb,CMP_bi,CMP_br,CMP_rm,CMP_rj,_,_,_ = common_modes(7) + AD1_ri,ADD_rr,ADD_rb,_,_,ADD_rm,ADD_rj,_,_,ADD_rs, _ = common_modes(0) + OR_ri, OR_rr, OR_rb, _,_,OR_rm, OR_rj, _,_,_,_ = common_modes(1) + AND_ri,AND_rr,AND_rb,_,_,AND_rm,AND_rj,_,_,_,_ = common_modes(4) + SU1_ri,SUB_rr,SUB_rb,_,_,SUB_rm,SUB_rj,SUB_ji8,SUB_mi8,_,_ = common_modes(5) + SBB_ri,SBB_rr,SBB_rb,_,_,SBB_rm,SBB_rj,_,_,_,_ = common_modes(3) + XOR_ri,XOR_rr,XOR_rb,_,_,XOR_rm,XOR_rj,_,_,_,_ = common_modes(6) + CMP_ri,CMP_rr,CMP_rb,CMP_bi,CMP_br,CMP_rm,CMP_rj,_,_,_,CMP_ri32 = common_modes(7) def ADD_ri(self, reg, immed): self.AD1_ri(reg, immed) @@ -609,6 +609,10 @@ self.PUS1_i32(immed) self.stack_frame_size_delta(+self.WORD) + def PUSH_i32(self, immed): + self.PUS1_i32(immed) + self.stack_frame_size_delta(+self.WORD) + PO1_r = insn(rex_nw, register(1), '\x58') PO1_b = insn(rex_nw, '\x8F', orbyte(0<<3), stack_bp(1)) From pypy.commits at gmail.com Mon Mar 14 08:38:59 2016 From: pypy.commits at gmail.com (arigo) Date: Mon, 14 Mar 2016 05:38:59 -0700 (PDT) Subject: [pypy-commit] pypy guard-compatible: Keepalive. Hard to test... Message-ID: <56e6b0e3.c9161c0a.f42a9.2c3b@mx.google.com> Author: Armin Rigo Branch: guard-compatible Changeset: r83040:db4dc094a002 Date: 2016-03-14 13:38 +0100 http://bitbucket.org/pypy/pypy/changeset/db4dc094a002/ Log: Keepalive. Hard to test... diff --git a/rpython/jit/backend/model.py b/rpython/jit/backend/model.py --- a/rpython/jit/backend/model.py +++ b/rpython/jit/backend/model.py @@ -295,6 +295,7 @@ class CompiledLoopToken(object): asmmemmgr_blocks = None asmmemmgr_gcroots = 0 + _keepalive_extra = None def __init__(self, cpu, number): cpu.tracker.total_compiled_loops += 1 diff --git a/rpython/jit/backend/x86/guard_compat.py b/rpython/jit/backend/x86/guard_compat.py --- a/rpython/jit/backend/x86/guard_compat.py +++ b/rpython/jit/backend/x86/guard_compat.py @@ -75,6 +75,12 @@ raise AssertionError("oops") new_value = rffi.cast(lltype.Signed, gcref) + # XXX related to the above: for now we keep alive the gcrefs forever + # in the compiled_loop_token + if compiled_loop_token._keepalive_extra is None: + compiled_loop_token._keepalive_extra = [] + compiled_loop_token._keepalive_extra.append(gcref) + if not we_are_translated() and isinstance(guarddescr, BasicFailDescr): pass # for tests else: From pypy.commits at gmail.com Mon Mar 14 08:48:25 2016 From: pypy.commits at gmail.com (arigo) Date: Mon, 14 Mar 2016 05:48:25 -0700 (PDT) Subject: [pypy-commit] pypy guard-compatible: add test for db4dc094a002 Message-ID: <56e6b319.e6bbc20a.265af.41cc@mx.google.com> Author: Armin Rigo Branch: guard-compatible Changeset: r83041:d99d2e0e37bb Date: 2016-03-14 13:47 +0100 http://bitbucket.org/pypy/pypy/changeset/d99d2e0e37bb/ Log: add test for db4dc094a002 diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -191,9 +191,10 @@ assert res == 10 def test_extend_guard_compatible(self): + import weakref, gc + t1_box, T1_box, d1 = self.alloc_instance(self.T) t2_box, T2_box, d2 = self.alloc_instance(self.T) - t3_box, T3_box, d3 = self.alloc_instance(self.T) faildescr1 = BasicFailDescr(1) loop = parse(""" [p0] @@ -222,6 +223,11 @@ fail = self.cpu.get_latest_descr(deadframe) assert fail.identifier == 2 + wr = weakref.ref(t2_box.getref_base()) + del t2_box, T2_box, d2 + gc.collect(); gc.collect() + assert wr() is not None # kept alive by grow_guard_compatible_switch + def test_compile_with_holes_in_fail_args(self): targettoken = TargetToken() loop = parse(""" From pypy.commits at gmail.com Mon Mar 14 11:23:34 2016 From: pypy.commits at gmail.com (cfbolz) Date: Mon, 14 Mar 2016 08:23:34 -0700 (PDT) Subject: [pypy-commit] pypy guard-compatible: translation fixes Message-ID: <56e6d776.46fac20a.3bb1a.7f7f@mx.google.com> Author: Carl Friedrich Bolz Branch: guard-compatible Changeset: r83042:eed34dd847a5 Date: 2016-03-14 15:28 +0000 http://bitbucket.org/pypy/pypy/changeset/eed34dd847a5/ Log: translation fixes diff --git a/rpython/jit/metainterp/compatible.py b/rpython/jit/metainterp/compatible.py --- a/rpython/jit/metainterp/compatible.py +++ b/rpython/jit/metainterp/compatible.py @@ -1,4 +1,5 @@ from rpython.jit.metainterp.history import newconst +from rpython.jit.codewriter import longlong def do_call(cpu, argboxes, descr): from rpython.jit.metainterp.history import INT, REF, FLOAT, VOID diff --git a/rpython/jit/metainterp/optimizeopt/rewrite.py b/rpython/jit/metainterp/optimizeopt/rewrite.py --- a/rpython/jit/metainterp/optimizeopt/rewrite.py +++ b/rpython/jit/metainterp/optimizeopt/rewrite.py @@ -419,7 +419,7 @@ if old_guard_op.getopnum() == rop.GUARD_COMPATIBLE: if not old_guard_op.getarg(1).same_constant(op.getarg(1)): r1 = self.optimizer.metainterp_sd.logger_ops.repr_of_resop(op) - r2 = self.optimizer.metainterp_sd.logger_ops.repr_of_resop(old_guard) + r2 = self.optimizer.metainterp_sd.logger_ops.repr_of_resop(old_guard_op) raise InvalidLoop('a GUARD_COMPATIBLE (%s) is inconsistent ' 'with a GUARD_VALUE (%s)' % (r1, r2)) descr = compile.ResumeGuardDescr() From pypy.commits at gmail.com Mon Mar 14 12:17:56 2016 From: pypy.commits at gmail.com (cfbolz) Date: Mon, 14 Mar 2016 09:17:56 -0700 (PDT) Subject: [pypy-commit] pypy guard-compatible: grumble, of course the llgraph tests work anyway Message-ID: <56e6e434.8673c20a.01d5.ffff8dec@mx.google.com> Author: Carl Friedrich Bolz Branch: guard-compatible Changeset: r83043:2c3489492545 Date: 2016-03-14 17:17 +0100 http://bitbucket.org/pypy/pypy/changeset/2c3489492545/ Log: grumble, of course the llgraph tests work anyway diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -1096,7 +1096,7 @@ if self.is_compatible(metainterp_sd.cpu, refval): from rpython.jit.metainterp.blackhole import resume_in_blackhole metainterp_sd.cpu.grow_guard_compatible_switch( - self, self.rd_loop_token, refval) + self.rd_loop_token, self, refval) resume_in_blackhole(metainterp_sd, jitdriver_sd, self, deadframe) else: # a real failure From pypy.commits at gmail.com Mon Mar 14 13:26:52 2016 From: pypy.commits at gmail.com (arigo) Date: Mon, 14 Mar 2016 10:26:52 -0700 (PDT) Subject: [pypy-commit] pypy guard-compatible: Add an assert on the type of compiled_loop_token, even if not used here Message-ID: <56e6f45c.2179c20a.fd8e2.ffffab3e@mx.google.com> Author: Armin Rigo Branch: guard-compatible Changeset: r83044:8d87ef9411ad Date: 2016-03-14 18:25 +0100 http://bitbucket.org/pypy/pypy/changeset/8d87ef9411ad/ Log: Add an assert on the type of compiled_loop_token, even if not used here diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -471,6 +471,7 @@ return deadframe._saved_data def grow_guard_compatible_switch(self, compiled_loop_token, descr, ref): + assert isinstance(compiled_loop_token, model.CompiledLoopToken) if not hasattr(descr, '_guard_compatible_llgraph_lst'): descr._guard_compatible_llgraph_lst = [] descr._guard_compatible_llgraph_lst.append(ref) From pypy.commits at gmail.com Mon Mar 14 13:55:28 2016 From: pypy.commits at gmail.com (cfbolz) Date: Mon, 14 Mar 2016 10:55:28 -0700 (PDT) Subject: [pypy-commit] pypy guard-compatible: make sure that the test fails if grow_guard_compatible_switch does nothing by Message-ID: <56e6fb10.2457c20a.5af9a.ffffc3cc@mx.google.com> Author: Carl Friedrich Bolz Branch: guard-compatible Changeset: r83045:25a9cbb2f43b Date: 2016-03-14 18:48 +0100 http://bitbucket.org/pypy/pypy/changeset/25a9cbb2f43b/ Log: make sure that the test fails if grow_guard_compatible_switch does nothing by checking that the elidable function is not called too often diff --git a/rpython/jit/metainterp/test/test_compatible.py b/rpython/jit/metainterp/test/test_compatible.py --- a/rpython/jit/metainterp/test/test_compatible.py +++ b/rpython/jit/metainterp/test/test_compatible.py @@ -15,8 +15,15 @@ p3 = lltype.malloc(S) p3.x = 6 driver = jit.JitDriver(greens = [], reds = ['n', 'x']) + + class A(object): + pass + + c = A() + c.count = 0 @jit.elidable_compatible() def g(s): + c.count += 1 return s.x def f(n, x): @@ -29,8 +36,11 @@ f(100, p1) f(100, p2) f(100, p3) + return c.count - self.meta_interp(main, []) + x = self.meta_interp(main, []) + + assert x < 25 # XXX check number of bridges def test_exception(self): From pypy.commits at gmail.com Mon Mar 14 14:43:01 2016 From: pypy.commits at gmail.com (Sergey Matyunin) Date: Mon, 14 Mar 2016 11:43:01 -0700 (PDT) Subject: [pypy-commit] pypy fix_indexing_by_numpy_int: Fixed indexing by numpy.int Message-ID: <56e70635.c96cc20a.a5021.ffffdb69@mx.google.com> Author: Sergey Matyunin Branch: fix_indexing_by_numpy_int Changeset: r83046:889015cbbfed Date: 2016-03-13 22:33 +0100 http://bitbucket.org/pypy/pypy/changeset/889015cbbfed/ Log: Fixed indexing by numpy.int diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -267,6 +267,11 @@ "interpreted as a valid boolean index") elif isinstance(w_idx, boxes.W_GenericBox): w_ret = self.getitem_array_int(space, w_idx) + + if isinstance(w_idx, boxes.W_IntegerBox): + # if w_idx is integer then getitem_array_int must contain a single value and we must return it. + # Get 0-th element of the w_ret. + w_ret = w_ret.implementation.descr_getitem(space, self, space.wrap(0)) else: try: w_ret = self.implementation.descr_getitem(space, self, w_idx) diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -3397,6 +3397,22 @@ a.itemset(1, 2, 100) assert a[1, 2] == 100 + def test_index_int64(self): + import numpy as np + res = np.array([10, 20, 30])[np.int64(1)] + + assert isinstance(res, np.int64) + assert not isinstance(res, np.ndarray) + assert res == 20 + + def test_index_int32(self): + import numpy as np + res = np.array([10, 20, 30])[np.int32(0)] + + assert isinstance(res, np.int64) + assert not isinstance(res, np.ndarray) + assert res == 10 + def test_index(self): import numpy as np a = np.array([1], np.uint16) @@ -3408,6 +3424,7 @@ assert exc.value.message == 'only integer arrays with one element ' \ 'can be converted to an index' + def test_int_array_index(self): from numpy import array assert (array([])[[]] == []).all() From pypy.commits at gmail.com Mon Mar 14 14:43:03 2016 From: pypy.commits at gmail.com (mattip) Date: Mon, 14 Mar 2016 11:43:03 -0700 (PDT) Subject: [pypy-commit] pypy fix_indexing_by_numpy_int: add passing test Message-ID: <56e70637.4a811c0a.35edc.ffffc1a1@mx.google.com> Author: mattip Branch: fix_indexing_by_numpy_int Changeset: r83047:31bb9ad05cae Date: 2016-03-14 20:38 +0200 http://bitbucket.org/pypy/pypy/changeset/31bb9ad05cae/ Log: add passing test diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -3397,22 +3397,21 @@ a.itemset(1, 2, 100) assert a[1, 2] == 100 - def test_index_int64(self): + def test_index_int(self): import numpy as np - res = np.array([10, 20, 30])[np.int64(1)] - + a = np.array([10, 20, 30]) + res = a[np.int64(1)] assert isinstance(res, np.int64) - assert not isinstance(res, np.ndarray) assert res == 20 - - def test_index_int32(self): - import numpy as np - res = np.array([10, 20, 30])[np.int32(0)] - + res = a[np.int32(0)] assert isinstance(res, np.int64) - assert not isinstance(res, np.ndarray) assert res == 10 + b = a.astype(float) + res = b[np.int64(1)] + assert res == 20.0 + assert isinstance(res, np.float64) + def test_index(self): import numpy as np a = np.array([1], np.uint16) From pypy.commits at gmail.com Mon Mar 14 14:43:05 2016 From: pypy.commits at gmail.com (mattip) Date: Mon, 14 Mar 2016 11:43:05 -0700 (PDT) Subject: [pypy-commit] pypy fix_indexing_by_numpy_int: close branch to be merged Message-ID: <56e70639.4c181c0a.aca25.ffffca63@mx.google.com> Author: mattip Branch: fix_indexing_by_numpy_int Changeset: r83048:897fe49bbe94 Date: 2016-03-14 20:39 +0200 http://bitbucket.org/pypy/pypy/changeset/897fe49bbe94/ Log: close branch to be merged From pypy.commits at gmail.com Mon Mar 14 14:43:06 2016 From: pypy.commits at gmail.com (mattip) Date: Mon, 14 Mar 2016 11:43:06 -0700 (PDT) Subject: [pypy-commit] pypy default: merge fix_indexing_by_numpy_int which implements indexing by a scalar returning a scalar Message-ID: <56e7063a.10921c0a.1cdf3.ffffcba5@mx.google.com> Author: mattip Branch: Changeset: r83049:3ba796288d26 Date: 2016-03-14 20:41 +0200 http://bitbucket.org/pypy/pypy/changeset/3ba796288d26/ Log: merge fix_indexing_by_numpy_int which implements indexing by a scalar returning a scalar diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -18,3 +18,8 @@ .. branch: where_1_arg Implemented numpy.where for 1 argument (thanks sergem) + +.. branch: fix_indexing_by_numpy_int + +Implement yet another strange numpy indexing compatibility; indexing by a scalar +returns a scalar diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -267,6 +267,11 @@ "interpreted as a valid boolean index") elif isinstance(w_idx, boxes.W_GenericBox): w_ret = self.getitem_array_int(space, w_idx) + + if isinstance(w_idx, boxes.W_IntegerBox): + # if w_idx is integer then getitem_array_int must contain a single value and we must return it. + # Get 0-th element of the w_ret. + w_ret = w_ret.implementation.descr_getitem(space, self, space.wrap(0)) else: try: w_ret = self.implementation.descr_getitem(space, self, w_idx) diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -3437,6 +3437,21 @@ a.itemset(1, 2, 100) assert a[1, 2] == 100 + def test_index_int(self): + import numpy as np + a = np.array([10, 20, 30]) + res = a[np.int64(1)] + assert isinstance(res, np.int64) + assert res == 20 + res = a[np.int32(0)] + assert isinstance(res, np.int64) + assert res == 10 + + b = a.astype(float) + res = b[np.int64(1)] + assert res == 20.0 + assert isinstance(res, np.float64) + def test_index(self): import numpy as np a = np.array([1], np.uint16) @@ -3448,6 +3463,7 @@ assert exc.value.message == 'only integer arrays with one element ' \ 'can be converted to an index' + def test_int_array_index(self): from numpy import array assert (array([])[[]] == []).all() From pypy.commits at gmail.com Mon Mar 14 15:03:03 2016 From: pypy.commits at gmail.com (fijal) Date: Mon, 14 Mar 2016 12:03:03 -0700 (PDT) Subject: [pypy-commit] pypy jit-leaner-frontend: in-progress Message-ID: <56e70ae7.29cec20a.6211c.ffffd861@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r83050:caa5be25642c Date: 2016-03-14 12:47 +0200 http://bitbucket.org/pypy/pypy/changeset/caa5be25642c/ Log: in-progress diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -7,7 +7,7 @@ from rpython.conftest import option from rpython.jit.metainterp.resoperation import ResOperation, rop,\ - AbstractValue, oparity + AbstractValue, oparity, AbstractResOp, IntOp, RefOp, FloatOp from rpython.jit.codewriter import heaptracker, longlong import weakref @@ -640,6 +640,21 @@ # ____________________________________________________________ +class FrontendOp(AbstractResOp): + type = 'v' + _attrs_ = ('position',) + + def __init__(self, pos): + self.position = pos + +class IntFrontendOp(IntOp, FrontendOp): + _attrs_ = ('position', '_resint') + +class FloatFrontendOp(FloatOp, FrontendOp): + _attrs_ = ('position', '_resfloat') + +class RefFrontendOp(RefOp, FrontendOp): + _attrs_ = ('position', '_resref') class History(object): ends_with_jump = False @@ -657,6 +672,7 @@ self.trace = Trace(inpargs) self.inputargs = inpargs if self._cache: + xxx # hack to record the ops *after* we know our inputargs for op in self._cache: newop = self.trace.record_op(op.getopnum(), op.getarglist(), @@ -679,24 +695,25 @@ @specialize.argtype(3) def record(self, opnum, argboxes, value, descr=None): if self.trace is None: + xxx op = ResOperation(opnum, argboxes, -1, descr) self._cache.append(op) else: - op = self.trace.record_op(opnum, argboxes, descr) + pos = self.trace._record_op(opnum, argboxes, descr) if value is None: - assert op.type == 'v' + op = FrontendOp(pos) elif isinstance(value, bool): - assert op.type == 'i' + op = IntFrontendOp(pos) op.setint(int(value)) elif lltype.typeOf(value) == lltype.Signed: - assert op.type == 'i' + op = IntFrontendOp(pos) op.setint(value) elif lltype.typeOf(value) is longlong.FLOATSTORAGE: - assert op.type == 'f' + op = FloatFrontendOp(pos) op.setfloatstorage(value) else: + op = RefFrontendOp(pos) assert lltype.typeOf(value) == llmemory.GCREF - assert op.type == 'r' op.setref_base(value) return op diff --git a/rpython/jit/metainterp/opencoder.py b/rpython/jit/metainterp/opencoder.py --- a/rpython/jit/metainterp/opencoder.py +++ b/rpython/jit/metainterp/opencoder.py @@ -9,7 +9,7 @@ from rpython.jit.metainterp.history import ConstInt, Const, ConstFloat, ConstPtr from rpython.jit.metainterp.resoperation import AbstractResOp, AbstractInputArg,\ - ResOperation, oparity, rop, opwithdescr, GuardResOp + ResOperation, oparity, rop, opwithdescr, GuardResOp, IntOp, FloatOp, RefOp from rpython.rlib.rarithmetic import intmask from rpython.rlib.objectmodel import we_are_translated from rpython.rtyper.lltypesystem import rffi, lltype, llmemory @@ -347,7 +347,7 @@ vref_array = self._list_of_boxes(vref_boxes) s = TopSnapshot(combine_uint(2**16 - 1, 0), [], vable_array, vref_array) - assert rffi.cast(lltype.Signed, self._ops[self._pos - 1]) == -1 + assert rffi.cast(lltype.Signed, self._ops[self._pos - 1]) == 0 # guards have no descr self._snapshots.append(s) self._ops[self._pos - 1] = rffi.cast(rffi.SHORT, len(self._snapshots) - 1) From pypy.commits at gmail.com Mon Mar 14 15:03:05 2016 From: pypy.commits at gmail.com (fijal) Date: Mon, 14 Mar 2016 12:03:05 -0700 (PDT) Subject: [pypy-commit] pypy jit-leaner-frontend: try a much simpler implementation for resop in the frontend (ideally dying completely) Message-ID: <56e70ae9.c13fc20a.eb7cf.ffffd682@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r83051:18ea5495a9de Date: 2016-03-14 21:02 +0200 http://bitbucket.org/pypy/pypy/changeset/18ea5495a9de/ Log: try a much simpler implementation for resop in the frontend (ideally dying completely) diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -672,12 +672,13 @@ self.trace = Trace(inpargs) self.inputargs = inpargs if self._cache: - xxx # hack to record the ops *after* we know our inputargs for op in self._cache: newop = self.trace.record_op(op.getopnum(), op.getarglist(), op.getdescr()) op.position = newop.position + if op.type != 'v': + newop.copy_value_from(op) self._cache = None def length(self): @@ -692,29 +693,38 @@ def any_operation(self): return self.trace._count > 0 + @specialize.argtype(2) + def set_op_value(self, op, value): + if value is None: + return + elif isinstance(value, bool): + op.setint(int(value)) + elif lltype.typeOf(value) == lltype.Signed: + op.setint(value) + elif lltype.typeOf(value) is longlong.FLOATSTORAGE: + op.setfloatstorage(value) + else: + assert lltype.typeOf(value) == llmemory.GCREF + op.setref_base(value) + @specialize.argtype(3) def record(self, opnum, argboxes, value, descr=None): if self.trace is None: - xxx op = ResOperation(opnum, argboxes, -1, descr) self._cache.append(op) else: pos = self.trace._record_op(opnum, argboxes, descr) - if value is None: - op = FrontendOp(pos) - elif isinstance(value, bool): - op = IntFrontendOp(pos) - op.setint(int(value)) - elif lltype.typeOf(value) == lltype.Signed: - op = IntFrontendOp(pos) - op.setint(value) - elif lltype.typeOf(value) is longlong.FLOATSTORAGE: - op = FloatFrontendOp(pos) - op.setfloatstorage(value) - else: - op = RefFrontendOp(pos) - assert lltype.typeOf(value) == llmemory.GCREF - op.setref_base(value) + if value is None: + op = FrontendOp(pos) + elif isinstance(value, bool): + op = IntFrontendOp(pos) + elif lltype.typeOf(value) == lltype.Signed: + op = IntFrontendOp(pos) + elif lltype.typeOf(value) is longlong.FLOATSTORAGE: + op = FloatFrontendOp(pos) + else: + op = RefFrontendOp(pos) + self.set_op_value(op, value) return op def record_nospec(self, opnum, argboxes, descr=None): @@ -772,13 +782,14 @@ enter_count = 0 aborted_count = 0 - def __init__(self): + def __init__(self, metainterp_sd): self.loops = [] self.locations = [] self.aborted_keys = [] self.invalidated_token_numbers = set() # <- not RPython self.jitcell_token_wrefs = [] self.jitcell_dicts = [] # <- not RPython + self.metainterp_sd = metainterp_sd def clear(self): del self.loops[:] @@ -834,7 +845,7 @@ def check_history(self, expected=None, **check): insns = {} - t = self.history.trace.get_iter() + t = self.history.trace.get_iter(self.metainterp_sd) while not t.done(): op = t.next() opname = op.getopname() diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -1527,7 +1527,8 @@ op = self.metainterp.execute_and_record_varargs(opnum, argboxes, descr=descr) if pure and not self.metainterp.last_exc_value and op: - op = self.metainterp.record_result_of_call_pure(op, patch_pos) + op = self.metainterp.record_result_of_call_pure(op, argboxes, descr, + patch_pos) exc = exc and not isinstance(op, Const) if exc: if op is not None: @@ -1620,7 +1621,7 @@ vablebox = None if assembler_call: vablebox, resbox = self.metainterp.direct_assembler_call( - self.metainterp._last_op, assembler_call_jd, cut_pos) + self.metainterp._last_op, allboxes, descr, assembler_call_jd, cut_pos) if resbox and resbox.type != 'v': self.make_result_of_lastop(resbox) self.metainterp.vable_after_residual_call(funcbox) @@ -2063,12 +2064,11 @@ lltype.nullptr(llmemory.GCREF.TO)) else: guard_op = self.history.record(opnum, moreargs, None) - assert isinstance(guard_op, GuardResOp) self.capture_resumedata(resumepc) # ^^^ records extra to history self.staticdata.profiler.count_ops(opnum, Counters.GUARDS) # count - self.attach_debug_info(guard_op) + #self.attach_debug_info(guard_op) return guard_op def capture_resumedata(self, resumepc=-1): @@ -2997,14 +2997,12 @@ debug_stop("jit-abort-longest-function") return max_jdsd, max_key - def record_result_of_call_pure(self, op, patch_pos): + def record_result_of_call_pure(self, op, argboxes, descr, patch_pos): """ Patch a CALL into a CALL_PURE. """ - opnum = op.getopnum() - assert opnum in [rop.CALL_R, rop.CALL_N, rop.CALL_I, rop.CALL_F] resbox_as_const = executor.constant_from_op(op) - for i in range(op.numargs()): - if not isinstance(op.getarg(i), Const): + for argbox in argboxes: + if not isinstance(argbox, Const): break else: # all-constants: remove the CALL operation now and propagate a @@ -3013,28 +3011,26 @@ return resbox_as_const # not all constants (so far): turn CALL into CALL_PURE, which might # be either removed later by optimizeopt or turned back into CALL. - arg_consts = [executor.constant_from_op(a) for a in op.getarglist()] + arg_consts = [executor.constant_from_op(a) for a in argboxes] self.call_pure_results[arg_consts] = resbox_as_const - opnum = OpHelpers.call_pure_for_descr(op.getdescr()) + opnum = OpHelpers.call_pure_for_descr(descr) self.history.cut(patch_pos) - newop = self.history.record_nospec(opnum, op.getarglist(), op.getdescr()) + newop = self.history.record_nospec(opnum, argboxes, descr) newop.copy_value_from(op) return newop - def direct_assembler_call(self, op, targetjitdriver_sd, cut_pos): + def direct_assembler_call(self, op, arglist, descr, targetjitdriver_sd, cut_pos): """ Generate a direct call to assembler for portal entry point, patching the CALL_MAY_FORCE that occurred just now. """ self.history.cut(cut_pos) - assert rop.is_call_may_force(op.getopnum()) num_green_args = targetjitdriver_sd.num_green_args - arglist = op.getarglist() greenargs = arglist[1:num_green_args+1] args = arglist[num_green_args+1:] assert len(args) == targetjitdriver_sd.num_red_args warmrunnerstate = targetjitdriver_sd.warmstate token = warmrunnerstate.get_assembler_token(greenargs) - opnum = OpHelpers.call_assembler_for_descr(op.getdescr()) + opnum = OpHelpers.call_assembler_for_descr(descr) oldop = op op = self.history.record_nospec(opnum, args, descr=token) if opnum == rop.CALL_ASSEMBLER_N: diff --git a/rpython/jit/metainterp/test/support.py b/rpython/jit/metainterp/test/support.py --- a/rpython/jit/metainterp/test/support.py +++ b/rpython/jit/metainterp/test/support.py @@ -73,7 +73,7 @@ portal_runner_ptr = "???" vec = False - stats = history.Stats() + stats = history.Stats(None) cpu = CPUClass(rtyper, stats, None, False) cw = codewriter.CodeWriter(cpu, [FakeJitDriverSD()]) cw.debug = True @@ -99,6 +99,7 @@ testself.finish_setup_for_interp_operations() # cw.make_jitcodes(verbose=True) + return stats def _run_with_blackhole(testself, args): from rpython.jit.metainterp.blackhole import BlackholeInterpBuilder @@ -125,11 +126,13 @@ blackholeinterp.run() return blackholeinterp._final_result_anytype() -def _run_with_pyjitpl(testself, args): +def _run_with_pyjitpl(testself, args, stats): cw = testself.cw opt = history.Options(listops=True) metainterp_sd = pyjitpl.MetaInterpStaticData(cw.cpu, opt) + stats.metainterp_sd = metainterp_sd metainterp_sd.finish_setup(cw) + [jitdriver_sd] = metainterp_sd.jitdrivers_sd metainterp = pyjitpl.MetaInterp(metainterp_sd, jitdriver_sd) testself.metainterp = metainterp @@ -258,11 +261,11 @@ def interp_operations(self, f, args, **kwds): # get the JitCodes for the function f - _get_jitcodes(self, self.CPUClass, f, args, **kwds) + stats = _get_jitcodes(self, self.CPUClass, f, args, **kwds) # try to run it with blackhole.py result1 = _run_with_blackhole(self, args) # try to run it with pyjitpl.py - result2 = _run_with_pyjitpl(self, args) + result2 = _run_with_pyjitpl(self, args, stats) assert result1 == result2 or isnan(result1) and isnan(result2) # try to run it by running the code compiled just before df, result3 = _run_with_machine_code(self, args) diff --git a/rpython/jit/metainterp/test/test_compile.py b/rpython/jit/metainterp/test/test_compile.py --- a/rpython/jit/metainterp/test/test_compile.py +++ b/rpython/jit/metainterp/test/test_compile.py @@ -59,7 +59,7 @@ logger_ops = FakeLogger() config = get_combined_translation_config(translating=True) - stats = Stats() + stats = Stats(None) profiler = jitprof.EmptyProfiler() warmrunnerdesc = None def log(self, msg, event_kind=None): diff --git a/rpython/jit/metainterp/test/test_resume.py b/rpython/jit/metainterp/test/test_resume.py --- a/rpython/jit/metainterp/test/test_resume.py +++ b/rpython/jit/metainterp/test/test_resume.py @@ -8,9 +8,9 @@ tagged_list_eq, AbstractVirtualInfo, TAGCONST, NULLREF,\ ResumeDataDirectReader, TAGINT, REF, VirtualInfo, VStructInfo,\ VArrayInfoNotClear, VStrPlainInfo, VStrConcatInfo, VStrSliceInfo,\ - VUniPlainInfo, VUniConcatInfo, VUniSliceInfo, Snapshot, FrameInfo,\ + VUniPlainInfo, VUniConcatInfo, VUniSliceInfo,\ capture_resumedata, ResumeDataLoopMemo, UNASSIGNEDVIRTUAL, INT,\ - annlowlevel, PENDINGFIELDSP, unpack_uint, TAG_CONST_OFFSET, TopSnapshot + annlowlevel, PENDINGFIELDSP, TAG_CONST_OFFSET from rpython.jit.metainterp.resumecode import unpack_numbering,\ create_numbering, NULL_NUMBER from rpython.jit.metainterp.opencoder import Trace diff --git a/rpython/jit/metainterp/warmspot.py b/rpython/jit/metainterp/warmspot.py --- a/rpython/jit/metainterp/warmspot.py +++ b/rpython/jit/metainterp/warmspot.py @@ -464,7 +464,7 @@ if no_stats: stats = history.NoStats() else: - stats = history.Stats() + stats = history.Stats(None) self.stats = stats if translate_support_code: self.annhelper = MixLevelHelperAnnotator(self.translator.rtyper) @@ -483,6 +483,7 @@ self.opt, ProfilerClass=ProfilerClass, warmrunnerdesc=self) + self.stats.metainterp_sd = self.metainterp_sd def make_virtualizable_infos(self): vinfos = {} From pypy.commits at gmail.com Mon Mar 14 15:09:55 2016 From: pypy.commits at gmail.com (mattip) Date: Mon, 14 Mar 2016 12:09:55 -0700 (PDT) Subject: [pypy-commit] pypy default: comma caused AnsiLogger attributes to be tuples, not functions Message-ID: <56e70c83.080a1c0a.174bc.ffffd746@mx.google.com> Author: mattip Branch: Changeset: r83052:b3e2d1a59d0f Date: 2016-03-14 21:09 +0200 http://bitbucket.org/pypy/pypy/changeset/b3e2d1a59d0f/ Log: comma caused AnsiLogger attributes to be tuples, not functions diff --git a/rpython/tool/ansi_print.py b/rpython/tool/ansi_print.py --- a/rpython/tool/ansi_print.py +++ b/rpython/tool/ansi_print.py @@ -50,9 +50,9 @@ # some more methods used by sandlib call = _make_method(':call', (34,)) result = _make_method(':result', (34,)) - exception = _make_method(':exception', (34,)), - vpath = _make_method(':vpath', (35,)), - timeout = _make_method('', (1, 31)), + exception = _make_method(':exception', (34,)) + vpath = _make_method(':vpath', (35,)) + timeout = _make_method('', (1, 31)) # directly calling the logger writes "[name] text" with no particular color __call__ = _make_method('', ()) From pypy.commits at gmail.com Mon Mar 14 15:14:01 2016 From: pypy.commits at gmail.com (fijal) Date: Mon, 14 Mar 2016 12:14:01 -0700 (PDT) Subject: [pypy-commit] pypy jit-leaner-frontend: one more fix Message-ID: <56e70d79.890bc30a.12110.ffffe4dd@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r83053:967e19f9c345 Date: 2016-03-14 21:13 +0200 http://bitbucket.org/pypy/pypy/changeset/967e19f9c345/ Log: one more fix diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -1595,11 +1595,14 @@ self.metainterp.vable_and_vrefs_before_residual_call() tp = descr.get_normalized_result_type() resbox = NOT_HANDLED + opnum = -1 if effectinfo.oopspecindex == effectinfo.OS_LIBFFI_CALL: + opnum = rop.call_may_force_for_descr(descr) resbox = self.metainterp.direct_libffi_call(allboxes, descr, tp) if resbox is NOT_HANDLED: if effectinfo.is_call_release_gil(): + opnum = rop.call_release_gil_for_descr(descr) resbox = self.metainterp.direct_call_release_gil(allboxes, descr, tp) elif tp == 'i': @@ -1617,7 +1620,10 @@ resbox = None else: assert False - self.metainterp.vrefs_after_residual_call(self.metainterp._last_op, cut_pos) + if opnum == -1: + opnum = rop.call_may_force_for_descr(descr) + self.metainterp.vrefs_after_residual_call(self.metainterp._last_op, + opnum, allboxes, descr, cut_pos) vablebox = None if assembler_call: vablebox, resbox = self.metainterp.direct_assembler_call( @@ -2776,7 +2782,7 @@ force_token], None, descr=vinfo.vable_token_descr) - def vrefs_after_residual_call(self, op, cut_pos): + def vrefs_after_residual_call(self, op, opnum, arglist, descr, cut_pos): vrefinfo = self.staticdata.virtualref_info for i in range(0, len(self.virtualref_boxes), 2): vrefbox = self.virtualref_boxes[i+1] @@ -2786,7 +2792,7 @@ # during this CALL_MAY_FORCE. Mark this fact by # generating a VIRTUAL_REF_FINISH on it and replacing # it by ConstPtr(NULL). - self.stop_tracking_virtualref(i, op, cut_pos) + self.stop_tracking_virtualref(i, op, opnum, arglist, descr, cut_pos) def vable_after_residual_call(self, funcbox): vinfo = self.jitdriver_sd.virtualizable_info @@ -2810,16 +2816,14 @@ # have the eventual exception raised (this is normally done # after the call to vable_after_residual_call()). - def stop_tracking_virtualref(self, i, op, cut_pos): + def stop_tracking_virtualref(self, i, op, opnum, arglist, descr, cut_pos): virtualbox = self.virtualref_boxes[i] vrefbox = self.virtualref_boxes[i+1] # record VIRTUAL_REF_FINISH just before the current CALL_MAY_FORCE self.history.cut(cut_pos) # pop the CALL - assert rop.is_call_may_force(op.getopnum()) self.history.record_nospec(rop.VIRTUAL_REF_FINISH, [vrefbox, virtualbox], None) - newop = self.history.record_nospec(op.getopnum(), op.getarglist(), - op.getdescr()) + newop = self.history.record_nospec(opnum, arglist, descr) op.position = newop.position # mark by replacing it with ConstPtr(NULL) self.virtualref_boxes[i+1] = self.cpu.ts.CONST_NULL From pypy.commits at gmail.com Mon Mar 14 15:15:57 2016 From: pypy.commits at gmail.com (fijal) Date: Mon, 14 Mar 2016 12:15:57 -0700 (PDT) Subject: [pypy-commit] pypy jit-leaner-frontend: add a missing helper Message-ID: <56e70ded.04371c0a.9ea5f.ffffd906@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r83054:552f750cc26b Date: 2016-03-14 21:15 +0200 http://bitbucket.org/pypy/pypy/changeset/552f750cc26b/ Log: add a missing helper diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -1294,6 +1294,19 @@ return rop.CALL_MAY_FORCE_N @staticmethod + def call_release_gil_for_descr(descr): + tp = descr.get_normalized_result_type() + if tp == 'i': + return rop.CALL_RELEASE_GIL_I + # no such thing + #elif tp == 'r': + # return rop.CALL_RELEASE_GIL_R + elif tp == 'f': + return rop.CALL_RELEASE_GIL_F + assert tp == 'v' + return rop.CALL_RELEASE_GIL_N + + @staticmethod def call_assembler_for_descr(descr): tp = descr.get_normalized_result_type() if tp == 'i': From pypy.commits at gmail.com Mon Mar 14 15:42:23 2016 From: pypy.commits at gmail.com (mattip) Date: Mon, 14 Mar 2016 12:42:23 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-ext: merge default into branch Message-ID: <56e7141f.49f9c20a.dd60d.ffffd4b9@mx.google.com> Author: mattip Branch: cpyext-ext Changeset: r83055:ef1022e1d513 Date: 2016-03-14 21:41 +0200 http://bitbucket.org/pypy/pypy/changeset/ef1022e1d513/ Log: merge default into branch diff too long, truncating to 2000 out of 13184 lines diff --git a/.hgignore b/.hgignore --- a/.hgignore +++ b/.hgignore @@ -74,5 +74,6 @@ ^rpython/doc/_build/.*$ ^compiled ^.git/ +^.hypothesis/ ^release/ ^rpython/_cache$ diff --git a/lib_pypy/ctypes_config_cache/rebuild.py b/lib_pypy/ctypes_config_cache/rebuild.py --- a/lib_pypy/ctypes_config_cache/rebuild.py +++ b/lib_pypy/ctypes_config_cache/rebuild.py @@ -9,9 +9,8 @@ _dirpath = os.path.dirname(__file__) or os.curdir -from rpython.tool.ansi_print import ansi_log -log = py.log.Producer("ctypes_config_cache") -py.log.setconsumer("ctypes_config_cache", ansi_log) +from rpython.tool.ansi_print import AnsiLogger +log = AnsiLogger("ctypes_config_cache") def rebuild_one(name): diff --git a/pypy/doc/how-to-release.rst b/pypy/doc/how-to-release.rst --- a/pypy/doc/how-to-release.rst +++ b/pypy/doc/how-to-release.rst @@ -76,5 +76,4 @@ * add a tag on the pypy/jitviewer repo that corresponds to pypy release * add a tag on the codespeed web site that corresponds to pypy release -* update the version number in {rpython,pypy}/doc/conf.py. * revise versioning at https://readthedocs.org/projects/pypy diff --git a/pypy/doc/project-ideas.rst b/pypy/doc/project-ideas.rst --- a/pypy/doc/project-ideas.rst +++ b/pypy/doc/project-ideas.rst @@ -167,22 +167,13 @@ * `hg` -Embedding PyPy and improving CFFI ---------------------------------- - -PyPy has some basic :doc:`embedding infrastructure `. The idea would be to improve -upon that with cffi hacks that can automatically generate embeddable .so/.dll -library - - Optimising cpyext (CPython C-API compatibility layer) ----------------------------------------------------- A lot of work has gone into PyPy's implementation of CPython's C-API over the last years to let it reach a practical level of compatibility, so that C extensions for CPython work on PyPy without major rewrites. However, -there are still many edges and corner cases where it misbehaves, and it has -not received any substantial optimisation so far. +there are still many edges and corner cases where it misbehaves. The objective of this project is to fix bugs in cpyext and to optimise several performance critical parts of it, such as the reference counting diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -5,3 +5,21 @@ .. this is a revision shortly after release-5.0 .. startrev: b238b48f9138 +.. branch: s390x-backend + +The jit compiler backend implementation for the s390x architecutre. +The backend manages 64-bit values in the literal pool of the assembly instead of loading them as immediates. +It includes a simplification for the operation 'zero_array'. Start and length parameters are bytes instead of size. + +.. branch: remove-py-log + +Replace py.log with something simpler, which should speed up logging + +.. branch: where_1_arg + +Implemented numpy.where for 1 argument (thanks sergem) + +.. branch: fix_indexing_by_numpy_int + +Implement yet another strange numpy indexing compatibility; indexing by a scalar +returns a scalar diff --git a/pypy/interpreter/test/test_app_main.py b/pypy/interpreter/test/test_app_main.py --- a/pypy/interpreter/test/test_app_main.py +++ b/pypy/interpreter/test/test_app_main.py @@ -9,6 +9,11 @@ from pypy.conftest import pypydir from lib_pypy._pypy_interact import irc_header +try: + import __pypy__ +except ImportError: + __pypy__ = None + banner = sys.version.splitlines()[0] app_main = os.path.join(os.path.realpath(os.path.dirname(__file__)), os.pardir, 'app_main.py') @@ -106,6 +111,8 @@ sys.argv[:] = saved_sys_argv sys.stdout = saved_sys_stdout sys.stderr = saved_sys_stderr + if __pypy__: + __pypy__.set_debug(True) def test_all_combinations_I_can_think_of(self): self.check([], {}, sys_argv=[''], run_stdin=True) @@ -601,9 +608,7 @@ def run_with_status_code(self, cmdline, senddata='', expect_prompt=False, expect_banner=False, python_flags='', env=None): if os.name == 'nt': - try: - import __pypy__ - except: + if __pypy__ is None: py.test.skip('app_main cannot run on non-pypy for windows') cmdline = '%s %s "%s" %s' % (sys.executable, python_flags, app_main, cmdline) diff --git a/pypy/module/__pypy__/test/test_signal.py b/pypy/module/__pypy__/test/test_signal.py --- a/pypy/module/__pypy__/test/test_signal.py +++ b/pypy/module/__pypy__/test/test_signal.py @@ -2,7 +2,6 @@ from pypy.module.thread.test.support import GenericTestThread - class AppTestMinimal: spaceconfig = dict(usemodules=['__pypy__']) diff --git a/pypy/module/_cffi_backend/ctypeptr.py b/pypy/module/_cffi_backend/ctypeptr.py --- a/pypy/module/_cffi_backend/ctypeptr.py +++ b/pypy/module/_cffi_backend/ctypeptr.py @@ -353,10 +353,11 @@ # ____________________________________________________________ -rffi_fdopen = rffi.llexternal("fdopen", [rffi.INT, rffi.CCHARP], rffi.CCHARP, +FILEP = rffi.COpaquePtr("FILE") +rffi_fdopen = rffi.llexternal("fdopen", [rffi.INT, rffi.CCHARP], FILEP, save_err=rffi.RFFI_SAVE_ERRNO) -rffi_setbuf = rffi.llexternal("setbuf", [rffi.CCHARP, rffi.CCHARP], lltype.Void) -rffi_fclose = rffi.llexternal("fclose", [rffi.CCHARP], rffi.INT) +rffi_setbuf = rffi.llexternal("setbuf", [FILEP, rffi.CCHARP], lltype.Void) +rffi_fclose = rffi.llexternal("fclose", [FILEP], rffi.INT) class CffiFileObj(object): _immutable_ = True @@ -382,4 +383,4 @@ fileobj.cffi_fileobj = CffiFileObj(fd, fileobj.mode) except OSError, e: raise wrap_oserror(space, e) - return fileobj.cffi_fileobj.llf + return rffi.cast(rffi.CCHARP, fileobj.cffi_fileobj.llf) diff --git a/pypy/module/_file/test/test_file_extra.py b/pypy/module/_file/test/test_file_extra.py --- a/pypy/module/_file/test/test_file_extra.py +++ b/pypy/module/_file/test/test_file_extra.py @@ -389,6 +389,7 @@ def test_writelines(self): import array + import sys fn = self.temptestfile with file(fn, 'w') as f: f.writelines(['abc']) @@ -406,7 +407,10 @@ exc = raises(TypeError, f.writelines, [memoryview('jkl')]) assert str(exc.value) == "writelines() argument must be a sequence of strings" out = open(fn, 'rb').readlines()[0] - assert out[0:5] == 'abcd\x00' + if sys.byteorder == 'big': + assert out[0:7] == 'abc\x00\x00\x00d' + else: + assert out[0:5] == 'abcd\x00' assert out[-3:] == 'ghi' with file(fn, 'wb') as f: diff --git a/pypy/module/_rawffi/callback.py b/pypy/module/_rawffi/callback.py --- a/pypy/module/_rawffi/callback.py +++ b/pypy/module/_rawffi/callback.py @@ -1,17 +1,23 @@ - +import sys from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef, GetSetProperty from rpython.rtyper.lltypesystem import lltype, rffi from pypy.module._rawffi.interp_rawffi import write_ptr from pypy.module._rawffi.structure import W_Structure from pypy.module._rawffi.interp_rawffi import (W_DataInstance, letter2tp, - unwrap_value, unpack_argshapes, got_libffi_error) + unwrap_value, unpack_argshapes, got_libffi_error, is_narrow_integer_type, + LL_TYPEMAP, NARROW_INTEGER_TYPES) from rpython.rlib.clibffi import USERDATA_P, CallbackFuncPtr, FUNCFLAG_CDECL from rpython.rlib.clibffi import ffi_type_void, LibFFIError from rpython.rlib import rweakref from pypy.module._rawffi.tracker import tracker from pypy.interpreter.error import OperationError from pypy.interpreter import gateway +from rpython.rlib.unroll import unrolling_iterable + +BIGENDIAN = sys.byteorder == 'big' + +unroll_narrow_integer_types = unrolling_iterable(NARROW_INTEGER_TYPES) app = gateway.applevel(''' def tbprint(tb, err): @@ -42,8 +48,17 @@ args_w[i] = space.wrap(rffi.cast(rffi.ULONG, ll_args[i])) w_res = space.call(w_callable, space.newtuple(args_w)) if callback_ptr.result is not None: # don't return void - unwrap_value(space, write_ptr, ll_res, 0, - callback_ptr.result, w_res) + ptr = ll_res + letter = callback_ptr.result + if BIGENDIAN: + # take care of narrow integers! + for int_type in unroll_narrow_integer_types: + if int_type == letter: + T = LL_TYPEMAP[int_type] + n = rffi.sizeof(lltype.Signed) - rffi.sizeof(T) + ptr = rffi.ptradd(ptr, n) + break + unwrap_value(space, write_ptr, ptr, 0, letter, w_res) except OperationError, e: tbprint(space, space.wrap(e.get_traceback()), space.wrap(e.errorstr(space))) diff --git a/pypy/module/_rawffi/interp_rawffi.py b/pypy/module/_rawffi/interp_rawffi.py --- a/pypy/module/_rawffi/interp_rawffi.py +++ b/pypy/module/_rawffi/interp_rawffi.py @@ -1,3 +1,4 @@ +import sys from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.error import OperationError, oefmt, wrap_oserror from pypy.interpreter.gateway import interp2app, unwrap_spec @@ -19,6 +20,8 @@ from pypy.module._rawffi.buffer import RawFFIBuffer from pypy.module._rawffi.tracker import tracker +BIGENDIAN = sys.byteorder == 'big' + TYPEMAP = { # XXX A mess with unsigned/signed/normal chars :-/ 'c' : ffi_type_uchar, @@ -331,10 +334,14 @@ if tracker.DO_TRACING: ll_buf = rffi.cast(lltype.Signed, self.ll_buffer) tracker.trace_allocation(ll_buf, self) + self._ll_buffer = self.ll_buffer def getbuffer(self, space): return space.wrap(rffi.cast(lltype.Unsigned, self.ll_buffer)) + def buffer_advance(self, n): + self.ll_buffer = rffi.ptradd(self.ll_buffer, n) + def byptr(self, space): from pypy.module._rawffi.array import ARRAY_OF_PTRS array = ARRAY_OF_PTRS.allocate(space, 1) @@ -342,16 +349,17 @@ return space.wrap(array) def free(self, space): - if not self.ll_buffer: + if not self._ll_buffer: raise segfault_exception(space, "freeing NULL pointer") self._free() def _free(self): if tracker.DO_TRACING: - ll_buf = rffi.cast(lltype.Signed, self.ll_buffer) + ll_buf = rffi.cast(lltype.Signed, self._ll_buffer) tracker.trace_free(ll_buf) - lltype.free(self.ll_buffer, flavor='raw') + lltype.free(self._ll_buffer, flavor='raw') self.ll_buffer = lltype.nullptr(rffi.VOIDP.TO) + self._ll_buffer = self.ll_buffer def buffer_w(self, space, flags): return RawFFIBuffer(self) @@ -432,12 +440,19 @@ space.wrap("cannot directly read value")) wrap_value._annspecialcase_ = 'specialize:arg(1)' +NARROW_INTEGER_TYPES = 'cbhiBIH?' + +def is_narrow_integer_type(letter): + return letter in NARROW_INTEGER_TYPES class W_FuncPtr(W_Root): def __init__(self, space, ptr, argshapes, resshape): self.ptr = ptr self.argshapes = argshapes self.resshape = resshape + self.narrow_integer = False + if resshape is not None: + self.narrow_integer = is_narrow_integer_type(resshape.itemcode.lower()) def getbuffer(self, space): return space.wrap(rffi.cast(lltype.Unsigned, self.ptr.funcsym)) @@ -497,6 +512,10 @@ result = self.resshape.allocate(space, 1, autofree=True) # adjust_return_size() was used here on result.ll_buffer self.ptr.call(args_ll, result.ll_buffer) + if BIGENDIAN and self.narrow_integer: + # we get a 8 byte value in big endian + n = rffi.sizeof(lltype.Signed) - result.shape.size + result.buffer_advance(n) return space.wrap(result) else: self.ptr.call(args_ll, lltype.nullptr(rffi.VOIDP.TO)) diff --git a/pypy/module/_rawffi/structure.py b/pypy/module/_rawffi/structure.py --- a/pypy/module/_rawffi/structure.py +++ b/pypy/module/_rawffi/structure.py @@ -18,6 +18,9 @@ from rpython.rlib.rarithmetic import intmask, signedtype, r_uint, \ r_ulonglong from rpython.rtyper.lltypesystem import lltype, rffi +import sys + +IS_BIG_ENDIAN = sys.byteorder == 'big' @@ -114,20 +117,32 @@ size += intmask(fieldsize) bitsizes.append(fieldsize) elif field_type == NEW_BITFIELD: - bitsizes.append((bitsize << 16) + bitoffset) + if IS_BIG_ENDIAN: + off = last_size - bitoffset - bitsize + else: + off = bitoffset + bitsizes.append((bitsize << 16) + off) bitoffset = bitsize size = round_up(size, fieldalignment) pos.append(size) size += fieldsize elif field_type == CONT_BITFIELD: - bitsizes.append((bitsize << 16) + bitoffset) + if IS_BIG_ENDIAN: + off = last_size - bitoffset - bitsize + else: + off = bitoffset + bitsizes.append((bitsize << 16) + off) bitoffset += bitsize # offset is already updated for the NEXT field pos.append(size - fieldsize) elif field_type == EXPAND_BITFIELD: size += fieldsize - last_size / 8 last_size = fieldsize * 8 - bitsizes.append((bitsize << 16) + bitoffset) + if IS_BIG_ENDIAN: + off = last_size - bitoffset - bitsize + else: + off = bitoffset + bitsizes.append((bitsize << 16) + off) bitoffset += bitsize # offset is already updated for the NEXT field pos.append(size - fieldsize) diff --git a/pypy/module/_rawffi/test/test__rawffi.py b/pypy/module/_rawffi/test/test__rawffi.py --- a/pypy/module/_rawffi/test/test__rawffi.py +++ b/pypy/module/_rawffi/test/test__rawffi.py @@ -704,7 +704,6 @@ def compare(a, b): a1 = _rawffi.Array('i').fromaddress(_rawffi.Array('P').fromaddress(a, 1)[0], 1) a2 = _rawffi.Array('i').fromaddress(_rawffi.Array('P').fromaddress(b, 1)[0], 1) - print "comparing", a1[0], "with", a2[0] if a1[0] not in [1,2,3,4] or a2[0] not in [1,2,3,4]: bogus_args.append((a1[0], a2[0])) if a1[0] > a2[0]: @@ -715,7 +714,7 @@ a2[0] = len(ll_to_sort) a3 = _rawffi.Array('l')(1) a3[0] = struct.calcsize('i') - cb = _rawffi.CallbackPtr(compare, ['P', 'P'], 'i') + cb = _rawffi.CallbackPtr(compare, ['P', 'P'], 'l') a4 = cb.byptr() qsort(a1, a2, a3, a4) res = [ll_to_sort[i] for i in range(len(ll_to_sort))] @@ -896,11 +895,21 @@ b = _rawffi.Array('c').fromaddress(a.buffer, 38) if sys.maxunicode > 65535: # UCS4 build - assert b[0] == 'x' - assert b[1] == '\x00' - assert b[2] == '\x00' - assert b[3] == '\x00' - assert b[4] == 'y' + if sys.byteorder == 'big': + assert b[0] == '\x00' + assert b[1] == '\x00' + assert b[2] == '\x00' + assert b[3] == 'x' + assert b[4] == '\x00' + assert b[5] == '\x00' + assert b[6] == '\x00' + assert b[7] == 'y' + else: + assert b[0] == 'x' + assert b[1] == '\x00' + assert b[2] == '\x00' + assert b[3] == '\x00' + assert b[4] == 'y' else: # UCS2 build assert b[0] == 'x' diff --git a/pypy/module/_rawffi/test/test_struct.py b/pypy/module/_rawffi/test/test_struct.py --- a/pypy/module/_rawffi/test/test_struct.py +++ b/pypy/module/_rawffi/test/test_struct.py @@ -1,4 +1,4 @@ - +import sys from pypy.module._rawffi.structure import size_alignment_pos from pypy.module._rawffi.interp_rawffi import TYPEMAP, letter2tp @@ -63,4 +63,7 @@ for (name, t, size) in fields]) assert size == 8 assert pos == [0, 0, 0] - assert bitsizes == [0x10000, 0x3e0001, 0x1003f] + if sys.byteorder == 'little': + assert bitsizes == [0x10000, 0x3e0001, 0x1003f] + else: + assert bitsizes == [0x1003f, 0x3e0001, 0x10000] diff --git a/pypy/module/_vmprof/conftest.py b/pypy/module/_vmprof/conftest.py new file mode 100644 --- /dev/null +++ b/pypy/module/_vmprof/conftest.py @@ -0,0 +1,6 @@ +import py, platform + +def pytest_collect_directory(path, parent): + if platform.machine() == 's390x': + py.test.skip("zarch tests skipped") +pytest_collect_file = pytest_collect_directory diff --git a/pypy/module/cppyy/src/dummy_backend.cxx b/pypy/module/cppyy/src/dummy_backend.cxx --- a/pypy/module/cppyy/src/dummy_backend.cxx +++ b/pypy/module/cppyy/src/dummy_backend.cxx @@ -390,7 +390,7 @@ ((dummy::cppyy_test_data*)self)->destroy_arrays(); } else if (idx == s_methods["cppyy_test_data::set_bool"]) { assert(self && nargs == 1); - ((dummy::cppyy_test_data*)self)->set_bool((bool)((CPPYY_G__value*)args)[0].obj.in); + ((dummy::cppyy_test_data*)self)->set_bool((bool)((CPPYY_G__value*)args)[0].obj.i); } else if (idx == s_methods["cppyy_test_data::set_char"]) { assert(self && nargs == 1); ((dummy::cppyy_test_data*)self)->set_char(((CPPYY_G__value*)args)[0].obj.ch); diff --git a/pypy/module/cpyext/test/test_arraymodule.py b/pypy/module/cpyext/test/test_arraymodule.py --- a/pypy/module/cpyext/test/test_arraymodule.py +++ b/pypy/module/cpyext/test/test_arraymodule.py @@ -51,13 +51,19 @@ assert arr.tolist() == [1, 23, 4] def test_buffer(self): + import sys module = self.import_module(name='array') arr = module.array('i', [1,2,3,4]) buf = buffer(arr) exc = raises(TypeError, "buf[1] = '1'") assert str(exc.value) == "buffer is read-only" - # XXX big-endian - assert str(buf) == ('\x01\0\0\0' - '\x02\0\0\0' - '\x03\0\0\0' - '\x04\0\0\0') + if sys.byteorder == 'big': + assert str(buf) == ('\0\0\0\x01' + '\0\0\0\x02' + '\0\0\0\x03' + '\0\0\0\x04') + else: + assert str(buf) == ('\x01\0\0\0' + '\x02\0\0\0' + '\x03\0\0\0' + '\x04\0\0\0') diff --git a/pypy/module/cpyext/test/test_methodobject.py b/pypy/module/cpyext/test/test_methodobject.py --- a/pypy/module/cpyext/test/test_methodobject.py +++ b/pypy/module/cpyext/test/test_methodobject.py @@ -103,7 +103,7 @@ c_func = ApiFunction([PyObject, PyObject], PyObject, func) func.api_func = c_func ml = lltype.malloc(PyMethodDef, flavor='raw', zero=True) - namebuf = rffi.str2charp('func') + namebuf = rffi.cast(rffi.CONST_CCHARP, rffi.str2charp('func')) ml.c_ml_name = namebuf ml.c_ml_meth = rffi.cast(PyCFunction_typedef, c_func.get_llhelper(space)) diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -729,7 +729,7 @@ int intval; PyObject *name; - if (!PyArg_ParseTuple(args, "i", &intval)) + if (!PyArg_ParseTuple(args, "l", &intval)) return NULL; IntLike_Type.tp_flags |= Py_TPFLAGS_DEFAULT; diff --git a/pypy/module/cpyext/test/test_unicodeobject.py b/pypy/module/cpyext/test/test_unicodeobject.py --- a/pypy/module/cpyext/test/test_unicodeobject.py +++ b/pypy/module/cpyext/test/test_unicodeobject.py @@ -398,11 +398,11 @@ lltype.free(pendian, flavor='raw') test("\x61\x00\x62\x00\x63\x00\x64\x00", -1) - - test("\x61\x00\x62\x00\x63\x00\x64\x00", None) - + if sys.byteorder == 'big': + test("\x00\x61\x00\x62\x00\x63\x00\x64", None) + else: + test("\x61\x00\x62\x00\x63\x00\x64\x00", None) test("\x00\x61\x00\x62\x00\x63\x00\x64", 1) - test("\xFE\xFF\x00\x61\x00\x62\x00\x63\x00\x64", 0, 1) test("\xFF\xFE\x61\x00\x62\x00\x63\x00\x64\x00", 0, -1) @@ -435,7 +435,10 @@ test("\x61\x00\x00\x00\x62\x00\x00\x00", -1) - test("\x61\x00\x00\x00\x62\x00\x00\x00", None) + if sys.byteorder == 'big': + test("\x00\x00\x00\x61\x00\x00\x00\x62", None) + else: + test("\x61\x00\x00\x00\x62\x00\x00\x00", None) test("\x00\x00\x00\x61\x00\x00\x00\x62", 1) diff --git a/pypy/module/marshal/test/test_marshalimpl.py b/pypy/module/marshal/test/test_marshalimpl.py --- a/pypy/module/marshal/test/test_marshalimpl.py +++ b/pypy/module/marshal/test/test_marshalimpl.py @@ -64,14 +64,17 @@ import marshal, struct class FakeM: + # NOTE: marshal is platform independent, running this test must assume + # that self.seen gets values from the endianess of the marshal module. + # (which is little endian!) def __init__(self): self.seen = [] def start(self, code): self.seen.append(code) def put_int(self, value): - self.seen.append(struct.pack("i", value)) + self.seen.append(struct.pack("i4'), ('y', '>f4')]" in repr(a) + else: + assert "[('x', 'i4" + E = '<' if sys.byteorder == 'little' else '>' + b = np.dtype((xyz, [("col1", E+"i4"), ("col2", E+"i4"), ("col3", E+"i4")])) data = [(1, 2,3), (4, 5, 6)] a = np.array(data, dtype=b) x = pickle.loads(pickle.dumps(a)) @@ -423,18 +429,20 @@ assert hash(t5) != hash(t6) def test_pickle(self): + import sys import numpy as np from numpy import array, dtype from cPickle import loads, dumps a = array([1,2,3]) + E = '<' if sys.byteorder == 'little' else '>' if self.ptr_size == 8: - assert a.dtype.__reduce__() == (dtype, ('i8', 0, 1), (3, '<', None, None, None, -1, -1, 0)) + assert a.dtype.__reduce__() == (dtype, ('i8', 0, 1), (3, E, None, None, None, -1, -1, 0)) else: - assert a.dtype.__reduce__() == (dtype, ('i4', 0, 1), (3, '<', None, None, None, -1, -1, 0)) + assert a.dtype.__reduce__() == (dtype, ('i4', 0, 1), (3, E, None, None, None, -1, -1, 0)) assert loads(dumps(a.dtype)) == a.dtype assert np.dtype('bool').__reduce__() == (dtype, ('b1', 0, 1), (3, '|', None, None, None, -1, -1, 0)) assert np.dtype('|V16').__reduce__() == (dtype, ('V16', 0, 1), (3, '|', None, None, None, 16, 1, 0)) - assert np.dtype(('')+'U7' assert dtype([('', 'f8')]).str == "|V8" assert dtype(('f8', 2)).str == "|V16" @@ -968,8 +978,12 @@ def test_isnative(self): from numpy import dtype + import sys assert dtype('i4').isnative == True - assert dtype('>i8').isnative == False + if sys.byteorder == 'big': + assert dtype('i8').isnative == False def test_any_all_nonzero(self): import numpy @@ -1185,6 +1199,7 @@ def test_setstate(self): import numpy as np import sys + E = '<' if sys.byteorder == 'little' else '>' d = np.dtype('f8') d.__setstate__((3, '|', (np.dtype('float64'), (2,)), None, None, 20, 1, 0)) assert d.str == ('<' if sys.byteorder == 'little' else '>') + 'f8' @@ -1201,7 +1216,7 @@ assert d.shape == (2,) assert d.itemsize == 8 assert d.subdtype is not None - assert repr(d) == "dtype(('' + assert str(dt) == "{'names':['f0','f1'], 'formats':['%si4','u1'], 'offsets':[0,4], 'itemsize':8, 'aligned':True}" % E dt = np.dtype([('f1', 'u1'), ('f0', 'i4')], align=True) - assert str(dt) == "{'names':['f1','f0'], 'formats':['u1',' 2 ** 31 - 1: - assert (u == [1]).all() + if sys.byteorder == 'big': + assert (u == [0x0100000000000000]).all() + else: + assert (u == [1]).all() else: - assert (u == [1, 0]).all() + if sys.byteorder == 'big': + assert (u == [0x01000000, 0]).all() + else: + assert (u == [1, 0]).all() v = fromstring("abcd", dtype="|S2") assert v[0] == "ab" assert v[1] == "cd" @@ -3668,9 +3734,15 @@ k = fromstring(self.float16val, dtype='float16') assert k[0] == dtype('float16').type(5.) dt = array([5], dtype='longfloat').dtype + print(dt.itemsize) if dt.itemsize == 8: - m = fromstring('\x00\x00\x00\x00\x00\x00\x14@', - dtype='float64') + import sys + if sys.byteorder == 'big': + m = fromstring('@\x14\x00\x00\x00\x00\x00\x00', + dtype='float64') + else: + m = fromstring('\x00\x00\x00\x00\x00\x00\x14@', + dtype='float64') elif dt.itemsize == 12: m = fromstring('\x00\x00\x00\x00\x00\x00\x00\xa0\x01@\x00\x00', dtype='float96') @@ -3692,8 +3764,13 @@ def test_tostring(self): from numpy import array - assert array([1, 2, 3], 'i2').tostring() == '\x01\x00\x02\x00\x03\x00' - assert array([1, 2, 3], 'i2')[::2].tostring() == '\x01\x00\x03\x00' + import sys + if sys.byteorder == 'big': + assert array([1, 2, 3], 'i2').tostring() == '\x00\x01\x00\x02\x00\x03' + assert array([1, 2, 3], 'i2')[::2].tostring() == '\x00\x01\x00\x03' + else: + assert array([1, 2, 3], 'i2').tostring() == '\x01\x00\x02\x00\x03\x00' + assert array([1, 2, 3], 'i2')[::2].tostring() == '\x01\x00\x03\x00' assert array([1, 2, 3], 'i2')[::2].tostring() == '\x00\x01\x00\x03' assert array(0, dtype='i2').tostring() == '\x00\x00' @@ -4189,7 +4266,11 @@ v = a.view(('float32', 4)) assert v.dtype == np.dtype('float32') assert v.shape == (10, 4) - assert v[0][-1] == 2.53125 + import sys + if sys.byteorder == 'big': + assert v[0][-2] == 2.53125 + else: + assert v[0][-1] == 2.53125 exc = raises(ValueError, "a.view(('float32', 2))") assert exc.value[0] == 'new type not compatible with array.' diff --git a/pypy/module/micronumpy/test/test_scalar.py b/pypy/module/micronumpy/test/test_scalar.py --- a/pypy/module/micronumpy/test/test_scalar.py +++ b/pypy/module/micronumpy/test/test_scalar.py @@ -109,6 +109,7 @@ def test_pickle(self): from numpy import dtype, zeros + import sys try: from numpy.core.multiarray import scalar except ImportError: @@ -119,9 +120,11 @@ f = dtype('float64').type(13.37) c = dtype('complex128').type(13 + 37.j) - assert i.__reduce__() == (scalar, (dtype('int32'), '9\x05\x00\x00')) - assert f.__reduce__() == (scalar, (dtype('float64'), '=\n\xd7\xa3p\xbd*@')) - assert c.__reduce__() == (scalar, (dtype('complex128'), '\x00\x00\x00\x00\x00\x00*@\x00\x00\x00\x00\x00\x80B@')) + swap = lambda s: (''.join(reversed(s))) if sys.byteorder == 'big' else s + assert i.__reduce__() == (scalar, (dtype('int32'), swap('9\x05\x00\x00'))) + assert f.__reduce__() == (scalar, (dtype('float64'), swap('=\n\xd7\xa3p\xbd*@'))) + assert c.__reduce__() == (scalar, (dtype('complex128'), swap('\x00\x00\x00\x00\x00\x00*@') + \ + swap('\x00\x00\x00\x00\x00\x80B@'))) assert loads(dumps(i)) == i assert loads(dumps(f)) == f @@ -256,13 +259,20 @@ assert t < 7e-323 t = s.view('complex64') assert type(t) is np.complex64 - assert 0 < t.real < 1 - assert t.imag == 0 + if sys.byteorder == 'big': + assert 0 < t.imag < 1 + assert t.real == 0 + else: + assert 0 < t.real < 1 + assert t.imag == 0 exc = raises(TypeError, s.view, 'string') assert exc.value[0] == "data-type must not be 0-sized" t = s.view('S8') assert type(t) is np.string_ - assert t == '\x0c' + if sys.byteorder == 'big': + assert t == '\x00' * 7 + '\x0c' + else: + assert t == '\x0c' s = np.dtype('string').type('abc1') assert s.view('S4') == 'abc1' if '__pypy__' in sys.builtin_module_names: diff --git a/pypy/module/micronumpy/test/test_selection.py b/pypy/module/micronumpy/test/test_selection.py --- a/pypy/module/micronumpy/test/test_selection.py +++ b/pypy/module/micronumpy/test/test_selection.py @@ -327,10 +327,15 @@ # tests from numpy/core/tests/test_regression.py def test_sort_bigendian(self): from numpy import array, dtype - a = array(range(11), dtype='float64') - c = a.astype(dtype('' D.__module__ = 'mod' mod = new.module('mod') mod.D = D @@ -510,7 +511,7 @@ tp9 Rp10 (I3 - S'<' + S'{E}' p11 NNNI-1 I-1 @@ -520,7 +521,7 @@ S'\x00\x00\x00\x00\x00\x00\xf0?\x00\x00\x00\x00\x00\x00\x00@' p13 tp14 - b.'''.replace(' ','') + b.'''.replace(' ','').format(E=E) for ss,sn in zip(s.split('\n')[1:],s_from_numpy.split('\n')[1:]): if len(ss)>10: # ignore binary data, it will be checked later diff --git a/pypy/module/pypyjit/test_pypy_c/test_buffers.py b/pypy/module/pypyjit/test_pypy_c/test_buffers.py --- a/pypy/module/pypyjit/test_pypy_c/test_buffers.py +++ b/pypy/module/pypyjit/test_pypy_c/test_buffers.py @@ -34,7 +34,7 @@ i = 0 while i < n: i += 1 - struct.unpack('i', a) # ID: unpack + struct.unpack('') + else: + bit = ord('<') assert loop.match(""" guard_class(p1, #, descr=...) p4 = getfield_gc_r(p1, descr=) @@ -109,7 +113,7 @@ i9 = getfield_gc_i(p4, descr=) i10 = getfield_gc_i(p6, descr=) i12 = int_eq(i10, 61) - i14 = int_eq(i10, 60) + i14 = int_eq(i10, %d) i15 = int_or(i12, i14) f16 = raw_load_f(i9, i5, descr=) guard_true(i15, descr=...) @@ -142,7 +146,7 @@ setfield_gc(p34, i30, descr=) }}} jump(..., descr=...) - """) + """ % (bit,)) def test_reduce_logical_and(self): def main(): diff --git a/pypy/module/pypyjit/test_pypy_c/test_struct.py b/pypy/module/pypyjit/test_pypy_c/test_struct.py --- a/pypy/module/pypyjit/test_pypy_c/test_struct.py +++ b/pypy/module/pypyjit/test_pypy_c/test_struct.py @@ -19,8 +19,8 @@ import struct i = 1 while i < n: - buf = struct.pack("i", i) # ID: pack - x = struct.unpack("i", buf)[0] # ID: unpack + buf = struct.pack(" len(value): @@ -629,13 +632,16 @@ def descr_endswith(self, space, w_suffix, w_start=None, w_end=None): (value, start, end) = self._convert_idx_params(space, w_start, w_end) if space.isinstance_w(w_suffix, space.w_tuple): - for w_suffix in space.fixedview(w_suffix): - if self._endswith(space, value, w_suffix, start, end): - return space.w_True - return space.w_False + return self._endswith_tuple(space, value, w_suffix, start, end) return space.newbool(self._endswith(space, value, w_suffix, start, end)) + def _endswith_tuple(self, space, value, w_suffix, start, end): + for w_suffix in space.fixedview(w_suffix): + if self._endswith(space, value, w_suffix, start, end): + return space.w_True + return space.w_False + def _endswith(self, space, value, w_prefix, start, end): prefix = self._op_val(space, w_prefix) if start > len(value): @@ -795,5 +801,3 @@ def _get_buffer(space, w_obj): return space.buffer_w(w_obj, space.BUF_SIMPLE) - - diff --git a/pypy/testrunner_cfg.py b/pypy/testrunner_cfg.py --- a/pypy/testrunner_cfg.py +++ b/pypy/testrunner_cfg.py @@ -5,6 +5,7 @@ 'translator/c', 'rlib', 'memory/test', 'jit/metainterp', 'jit/backend/arm', 'jit/backend/x86', + 'jit/backend/zarch', ] def collect_one_testdir(testdirs, reldir, tests): diff --git a/pypy/tool/release/repackage.sh b/pypy/tool/release/repackage.sh --- a/pypy/tool/release/repackage.sh +++ b/pypy/tool/release/repackage.sh @@ -9,7 +9,7 @@ # download source, assuming a tag for the release already exists, and repackage them. # The script should be run in an empty directory, i.e. /tmp/release_xxx -for plat in linux linux64 linux-armhf-raspbian linux-armhf-raring linux-armel osx64 freebsd64 +for plat in linux linux64 linux-armhf-raspbian linux-armhf-raring linux-armel osx64 do wget http://buildbot.pypy.org/nightly/$branchname/pypy-c-jit-latest-$plat.tar.bz2 tar -xf pypy-c-jit-latest-$plat.tar.bz2 @@ -29,15 +29,16 @@ # Do this after creating a tag, note the untarred directory is pypy-pypy- # so make sure there is not another one wget https://bitbucket.org/pypy/pypy/get/$tagname.tar.bz2 -tar -xf release-$maj.$min.$rev.tar.bz2 +tar -xf $tagname.tar.bz2 mv pypy-pypy-* pypy-$maj.$min.$rev-src tar --owner=root --group=root --numeric-owner -cvjf pypy-$maj.$min.$rev-src.tar.bz2 pypy-$maj.$min.$rev-src zip -r pypy-$maj.$min.$rev-src.zip pypy-$maj.$min.$rev-src rm -rf pypy-$maj.$min.$rev-src -# Print out the md5, sha1 +# Print out the md5, sha1, sha256 md5sum *.bz2 *.zip sha1sum *.bz2 *.zip +sha256sum *.bz2 *.zip # Now upload all the bz2 and zip diff --git a/requirements.txt b/requirements.txt --- a/requirements.txt +++ b/requirements.txt @@ -1,3 +1,4 @@ # hypothesis is used for test generation on untranslated jit tests hypothesis enum>=0.4.6 # is a dependency, but old pip does not pick it up +enum34>=1.1.2 diff --git a/rpython/annotator/annrpython.py b/rpython/annotator/annrpython.py --- a/rpython/annotator/annrpython.py +++ b/rpython/annotator/annrpython.py @@ -3,7 +3,7 @@ import types from collections import defaultdict -from rpython.tool.ansi_print import ansi_log +from rpython.tool.ansi_print import AnsiLogger from rpython.tool.pairtype import pair from rpython.tool.error import (format_blocked_annotation_error, gather_error, source_lines) @@ -15,9 +15,7 @@ from rpython.annotator.bookkeeper import Bookkeeper from rpython.rtyper.normalizecalls import perform_normalizations -import py -log = py.log.Producer("annrpython") -py.log.setconsumer("annrpython", ansi_log) +log = AnsiLogger("annrpython") class RPythonAnnotator(object): diff --git a/rpython/doc/index.rst b/rpython/doc/index.rst --- a/rpython/doc/index.rst +++ b/rpython/doc/index.rst @@ -37,6 +37,7 @@ arm logging + s390x Writing your own interpreter in RPython diff --git a/rpython/doc/s390x.rst b/rpython/doc/s390x.rst new file mode 100644 --- /dev/null +++ b/rpython/doc/s390x.rst @@ -0,0 +1,20 @@ +.. _s390x: + +S390X JIT Backend +================= + +Our JIT implements the 64 bit version of the IBM Mainframe called s390x. +Note that this architecture is big endian. + +The following facilities need to be installed to operate +correctly (all of the machines used for development these where installed): + +* General-Instructions-Extension +* Long-Displacement +* Binary Floating Point (IEEE) + +Translating +----------- + +Ensure that libffi is installed (version should do > 3.0.+). +CPython should be version 2.7.+. diff --git a/rpython/jit/backend/arm/locations.py b/rpython/jit/backend/arm/locations.py --- a/rpython/jit/backend/arm/locations.py +++ b/rpython/jit/backend/arm/locations.py @@ -1,7 +1,6 @@ from rpython.jit.metainterp.history import INT, FLOAT from rpython.jit.backend.arm.arch import WORD, DOUBLE_WORD, JITFRAME_FIXED_SIZE - class AssemblerLocation(object): _immutable_ = True type = INT diff --git a/rpython/jit/backend/arm/opassembler.py b/rpython/jit/backend/arm/opassembler.py --- a/rpython/jit/backend/arm/opassembler.py +++ b/rpython/jit/backend/arm/opassembler.py @@ -1143,35 +1143,42 @@ def emit_op_zero_array(self, op, arglocs, regalloc, fcond): from rpython.jit.backend.llsupport.descr import unpack_arraydescr assert len(arglocs) == 0 - length_box = op.getarg(2) - if isinstance(length_box, ConstInt) and length_box.getint() == 0: + size_box = op.getarg(2) + if isinstance(size_box, ConstInt) and size_box.getint() == 0: return fcond # nothing to do itemsize, baseofs, _ = unpack_arraydescr(op.getdescr()) args = op.getarglist() + # + # ZERO_ARRAY(base_loc, start, size, 1, 1) + # 'start' and 'size' are both expressed in bytes, + # and the two scaling arguments should always be ConstInt(1) on ARM. + assert args[3].getint() == 1 + assert args[4].getint() == 1 + # base_loc = regalloc.rm.make_sure_var_in_reg(args[0], args) - sibox = args[1] - if isinstance(sibox, ConstInt): - startindex_loc = None - startindex = sibox.getint() - assert startindex >= 0 + startbyte_box = args[1] + if isinstance(startbyte_box, ConstInt): + startbyte_loc = None + startbyte = startbyte_box.getint() + assert startbyte >= 0 else: - startindex_loc = regalloc.rm.make_sure_var_in_reg(sibox, args) - startindex = -1 + startbyte_loc = regalloc.rm.make_sure_var_in_reg(startbyte_box, + args) + startbyte = -1 - # base_loc and startindex_loc are in two regs here (or they are - # immediates). Compute the dstaddr_loc, which is the raw + # base_loc and startbyte_loc are in two regs here (or startbyte_loc + # is an immediate). Compute the dstaddr_loc, which is the raw # address that we will pass as first argument to memset(). # It can be in the same register as either one, but not in # args[2], because we're still needing the latter. dstaddr_box = TempVar() dstaddr_loc = regalloc.rm.force_allocate_reg(dstaddr_box, [args[2]]) - if startindex >= 0: # a constant - ofs = baseofs + startindex * itemsize + if startbyte >= 0: # a constant + ofs = baseofs + startbyte reg = base_loc.value else: - self.mc.gen_load_int(r.ip.value, itemsize) - self.mc.MLA(dstaddr_loc.value, r.ip.value, - startindex_loc.value, base_loc.value) + self.mc.ADD_rr(dstaddr_loc.value, + base_loc.value, startbyte_loc.value) ofs = baseofs reg = dstaddr_loc.value if check_imm_arg(ofs): @@ -1180,20 +1187,27 @@ self.mc.gen_load_int(r.ip.value, ofs) self.mc.ADD_rr(dstaddr_loc.value, reg, r.ip.value) - if (isinstance(length_box, ConstInt) and - length_box.getint() <= 14 and # same limit as GCC - itemsize in (4, 2, 1)): + # We use STRB, STRH or STR based on whether we know the array + # item size is a multiple of 1, 2 or 4. + if itemsize & 1: itemsize = 1 + elif itemsize & 2: itemsize = 2 + else: itemsize = 4 + limit = itemsize + next_group = -1 + if itemsize < 4 and startbyte >= 0: + # we optimize STRB/STRH into STR, but this needs care: + # it only works if startindex_loc is a constant, otherwise + # we'd be doing unaligned accesses. + next_group = (-startbyte) & 3 + limit = 4 + + if (isinstance(size_box, ConstInt) and + size_box.getint() <= 14 * limit): # same limit as GCC # Inline a series of STR operations, starting at 'dstaddr_loc'. - next_group = -1 - if itemsize < 4 and startindex >= 0: - # we optimize STRB/STRH into STR, but this needs care: - # it only works if startindex_loc is a constant, otherwise - # we'd be doing unaligned accesses. - next_group = (-startindex * itemsize) & 3 # self.mc.gen_load_int(r.ip.value, 0) i = 0 - total_size = length_box.getint() * itemsize + total_size = size_box.getint() while i < total_size: sz = itemsize if i == next_group: @@ -1209,29 +1223,18 @@ i += sz else: - if isinstance(length_box, ConstInt): - length_loc = imm(length_box.getint() * itemsize) + if isinstance(size_box, ConstInt): + size_loc = imm(size_box.getint()) else: - # load length_loc in a register different than dstaddr_loc - length_loc = regalloc.rm.make_sure_var_in_reg(length_box, - [dstaddr_box]) - if itemsize > 1: - # we need a register that is different from dstaddr_loc, - # but which can be identical to length_loc (as usual, - # only if the length_box is not used by future operations) - bytes_box = TempVar() - bytes_loc = regalloc.rm.force_allocate_reg(bytes_box, - [dstaddr_box]) - self.mc.gen_load_int(r.ip.value, itemsize) - self.mc.MUL(bytes_loc.value, r.ip.value, length_loc.value) - length_box = bytes_box - length_loc = bytes_loc + # load size_loc in a register different than dstaddr_loc + size_loc = regalloc.rm.make_sure_var_in_reg(size_box, + [dstaddr_box]) # # call memset() regalloc.before_call() self.simple_call_no_collect(imm(self.memset_addr), - [dstaddr_loc, imm(0), length_loc]) - regalloc.rm.possibly_free_var(length_box) + [dstaddr_loc, imm(0), size_loc]) + regalloc.rm.possibly_free_var(size_box) regalloc.rm.possibly_free_var(dstaddr_box) return fcond diff --git a/rpython/jit/backend/detect_cpu.py b/rpython/jit/backend/detect_cpu.py --- a/rpython/jit/backend/detect_cpu.py +++ b/rpython/jit/backend/detect_cpu.py @@ -16,6 +16,7 @@ MODEL_X86_64_SSE4 = 'x86-64-sse4' MODEL_ARM = 'arm' MODEL_PPC_64 = 'ppc-64' +MODEL_S390_64 = 's390x' # don't use '_' in the model strings; they are replaced by '-' @@ -27,6 +28,7 @@ MODEL_ARM: ['__arm__', '__thumb__','_M_ARM_EP'], MODEL_X86: ['i386', '__i386', '__i386__', '__i686__','_M_IX86'], MODEL_PPC_64: ['__powerpc64__'], + MODEL_S390_64:['__s390x__'], } for k, v in mapping.iteritems(): for macro in v: @@ -67,6 +69,7 @@ 'armv7l': MODEL_ARM, 'armv6l': MODEL_ARM, 'arm': MODEL_ARM, # freebsd + 's390x': MODEL_S390_64 }.get(mach) if result is None: @@ -88,7 +91,6 @@ if feature.detect_x32_mode(): raise ProcessorAutodetectError( 'JITting in x32 mode is not implemented') - # if result.startswith('arm'): from rpython.jit.backend.arm.detect import detect_float @@ -122,6 +124,8 @@ return "rpython.jit.backend.arm.runner", "CPU_ARM" elif backend_name == MODEL_PPC_64: return "rpython.jit.backend.ppc.runner", "PPC_CPU" + elif backend_name == MODEL_S390_64: + return "rpython.jit.backend.zarch.runner", "CPU_S390_64" else: raise ProcessorAutodetectError, ( "we have no JIT backend for this cpu: '%s'" % backend_name) @@ -142,6 +146,7 @@ MODEL_X86_64_SSE4: ['floats', 'singlefloats'], MODEL_ARM: ['floats', 'singlefloats', 'longlong'], MODEL_PPC_64: [], # we don't even have PPC directory, so no + MODEL_S390_64: ['floats'], }[backend_name] if __name__ == '__main__': diff --git a/rpython/jit/backend/llsupport/gc.py b/rpython/jit/backend/llsupport/gc.py --- a/rpython/jit/backend/llsupport/gc.py +++ b/rpython/jit/backend/llsupport/gc.py @@ -164,13 +164,11 @@ array_index = moving_obj_tracker.get_array_index(v) size, offset, _ = unpack_arraydescr(moving_obj_tracker.ptr_array_descr) - scale = size + array_index = array_index * size + offset args = [moving_obj_tracker.const_ptr_gcref_array, ConstInt(array_index), - ConstInt(scale), - ConstInt(offset), ConstInt(size)] - load_op = ResOperation(rop.GC_LOAD_INDEXED_R, args) + load_op = ResOperation(rop.GC_LOAD_R, args) newops.append(load_op) op.setarg(arg_i, load_op) # diff --git a/rpython/jit/backend/llsupport/jump.py b/rpython/jit/backend/llsupport/jump.py new file mode 100644 --- /dev/null +++ b/rpython/jit/backend/llsupport/jump.py @@ -0,0 +1,107 @@ +def remap_frame_layout(assembler, src_locations, dst_locations, tmpreg): + pending_dests = len(dst_locations) + srccount = {} # maps dst_locations to how many times the same + # location appears in src_locations + for dst in dst_locations: + key = dst.as_key() + assert key not in srccount, "duplicate value in dst_locations!" + srccount[key] = 0 + for i in range(len(dst_locations)): + src = src_locations[i] + if src.is_imm(): + continue + key = src.as_key() + if key in srccount: + if key == dst_locations[i].as_key(): + # ignore a move "x = x" + # setting any "large enough" negative value is ok, but + # be careful of overflows, don't use -sys.maxint + srccount[key] = -len(dst_locations) - 1 + pending_dests -= 1 + else: + srccount[key] += 1 + + while pending_dests > 0: + progress = False + for i in range(len(dst_locations)): + dst = dst_locations[i] + key = dst.as_key() + if srccount[key] == 0: + srccount[key] = -1 # means "it's done" + pending_dests -= 1 + src = src_locations[i] + if not src.is_imm(): + key = src.as_key() + if key in srccount: + srccount[key] -= 1 + _move(assembler, src, dst, tmpreg) + progress = True + if not progress: + # we are left with only pure disjoint cycles + sources = {} # maps dst_locations to src_locations + for i in range(len(dst_locations)): + src = src_locations[i] + dst = dst_locations[i] + sources[dst.as_key()] = src + # + for i in range(len(dst_locations)): + dst = dst_locations[i] + originalkey = dst.as_key() + if srccount[originalkey] >= 0: + assembler.regalloc_push(dst, 0) + while True: + key = dst.as_key() + assert srccount[key] == 1 + # ^^^ because we are in a simple cycle + srccount[key] = -1 + pending_dests -= 1 + src = sources[key] + if src.as_key() == originalkey: + break + _move(assembler, src, dst, tmpreg) + dst = src + assembler.regalloc_pop(dst, 0) + assert pending_dests == 0 + +def _move(assembler, src, dst, tmpreg): + # some assembler cannot handle memory to memory moves without + # a tmp register, thus prepare src according to the ISA capabilities + src = assembler.regalloc_prepare_move(src, dst, tmpreg) + assembler.regalloc_mov(src, dst) + +def remap_frame_layout_mixed(assembler, + src_locations1, dst_locations1, tmpreg1, + src_locations2, dst_locations2, tmpreg2, WORD): + # find and push the fp stack locations from src_locations2 that + # are going to be overwritten by dst_locations1 + extrapushes = [] + dst_keys = {} + for loc in dst_locations1: + dst_keys[loc.as_key()] = None + src_locations2red = [] + dst_locations2red = [] + for i in range(len(src_locations2)): + loc = src_locations2[i] + dstloc = dst_locations2[i] + if loc.is_stack(): + key = loc.as_key() + if (key in dst_keys or (loc.width > WORD and + (key + 1) in dst_keys)): + assembler.regalloc_push(loc, len(extrapushes)) + extrapushes.append(dstloc) + continue + src_locations2red.append(loc) + dst_locations2red.append(dstloc) + src_locations2 = src_locations2red + dst_locations2 = dst_locations2red + # + # remap the integer and pointer registers and stack locations + remap_frame_layout(assembler, src_locations1, dst_locations1, tmpreg1) + # + # remap the fp registers and stack locations + remap_frame_layout(assembler, src_locations2, dst_locations2, tmpreg2) + # + # finally, pop the extra fp stack locations + while len(extrapushes) > 0: + loc = extrapushes.pop() + assembler.regalloc_pop(loc, len(extrapushes)) diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py +++ b/rpython/jit/backend/llsupport/rewrite.py @@ -203,38 +203,47 @@ def transform_to_gc_load(self, op): NOT_SIGNED = 0 CINT_ZERO = ConstInt(0) + opnum = op.getopnum() + #if opnum == rop.CALL_MALLOC_NURSERY_VARSIZE: + # v_length = op.getarg(2) + # scale = op.getarg(1).getint() + # if scale not in self.cpu.load_supported_factors: + # scale, offset, v_length = \ + # self._emit_mul_if_factor_offset_not_supported(v_length, scale, 0) + # op.setarg(1, ConstInt(scale)) + # op.setarg(2, v_length) if op.is_getarrayitem() or \ - op.getopnum() in (rop.GETARRAYITEM_RAW_I, - rop.GETARRAYITEM_RAW_F): + opnum in (rop.GETARRAYITEM_RAW_I, + rop.GETARRAYITEM_RAW_F): self.handle_getarrayitem(op) - elif op.getopnum() in (rop.SETARRAYITEM_GC, rop.SETARRAYITEM_RAW): + elif opnum in (rop.SETARRAYITEM_GC, rop.SETARRAYITEM_RAW): self.handle_setarrayitem(op) - elif op.getopnum() == rop.RAW_STORE: + elif opnum == rop.RAW_STORE: itemsize, ofs, _ = unpack_arraydescr(op.getdescr()) ptr_box = op.getarg(0) index_box = op.getarg(1) value_box = op.getarg(2) self.emit_gc_store_or_indexed(op, ptr_box, index_box, value_box, itemsize, 1, ofs) - elif op.getopnum() in (rop.RAW_LOAD_I, rop.RAW_LOAD_F): + elif opnum in (rop.RAW_LOAD_I, rop.RAW_LOAD_F): itemsize, ofs, sign = unpack_arraydescr(op.getdescr()) ptr_box = op.getarg(0) index_box = op.getarg(1) self.emit_gc_load_or_indexed(op, ptr_box, index_box, itemsize, 1, ofs, sign) - elif op.getopnum() in (rop.GETINTERIORFIELD_GC_I, rop.GETINTERIORFIELD_GC_R, - rop.GETINTERIORFIELD_GC_F): + elif opnum in (rop.GETINTERIORFIELD_GC_I, rop.GETINTERIORFIELD_GC_R, + rop.GETINTERIORFIELD_GC_F): ofs, itemsize, fieldsize, sign = unpack_interiorfielddescr(op.getdescr()) ptr_box = op.getarg(0) index_box = op.getarg(1) self.emit_gc_load_or_indexed(op, ptr_box, index_box, fieldsize, itemsize, ofs, sign) - elif op.getopnum() in (rop.SETINTERIORFIELD_RAW, rop.SETINTERIORFIELD_GC): + elif opnum in (rop.SETINTERIORFIELD_RAW, rop.SETINTERIORFIELD_GC): ofs, itemsize, fieldsize, sign = unpack_interiorfielddescr(op.getdescr()) ptr_box = op.getarg(0) index_box = op.getarg(1) value_box = op.getarg(2) self.emit_gc_store_or_indexed(op, ptr_box, index_box, value_box, fieldsize, itemsize, ofs) - elif op.getopnum() in (rop.GETFIELD_GC_I, rop.GETFIELD_GC_F, rop.GETFIELD_GC_R, - rop.GETFIELD_RAW_I, rop.GETFIELD_RAW_F, rop.GETFIELD_RAW_R): + elif opnum in (rop.GETFIELD_GC_I, rop.GETFIELD_GC_F, rop.GETFIELD_GC_R, + rop.GETFIELD_RAW_I, rop.GETFIELD_RAW_F, rop.GETFIELD_RAW_R): ofs, itemsize, sign = unpack_fielddescr(op.getdescr()) ptr_box = op.getarg(0) if op.getopnum() in (rop.GETFIELD_GC_F, rop.GETFIELD_GC_I, rop.GETFIELD_GC_R): @@ -249,45 +258,45 @@ self.emit_op(op) return True self.emit_gc_load_or_indexed(op, ptr_box, ConstInt(0), itemsize, 1, ofs, sign) - elif op.getopnum() in (rop.SETFIELD_GC, rop.SETFIELD_RAW): + elif opnum in (rop.SETFIELD_GC, rop.SETFIELD_RAW): ofs, itemsize, sign = unpack_fielddescr(op.getdescr()) ptr_box = op.getarg(0) value_box = op.getarg(1) self.emit_gc_store_or_indexed(op, ptr_box, ConstInt(0), value_box, itemsize, 1, ofs) - elif op.getopnum() == rop.ARRAYLEN_GC: + elif opnum == rop.ARRAYLEN_GC: descr = op.getdescr() assert isinstance(descr, ArrayDescr) ofs = descr.lendescr.offset self.emit_gc_load_or_indexed(op, op.getarg(0), ConstInt(0), WORD, 1, ofs, NOT_SIGNED) - elif op.getopnum() == rop.STRLEN: + elif opnum == rop.STRLEN: basesize, itemsize, ofs_length = get_array_token(rstr.STR, self.cpu.translate_support_code) self.emit_gc_load_or_indexed(op, op.getarg(0), ConstInt(0), WORD, 1, ofs_length, NOT_SIGNED) - elif op.getopnum() == rop.UNICODELEN: + elif opnum == rop.UNICODELEN: basesize, itemsize, ofs_length = get_array_token(rstr.UNICODE, self.cpu.translate_support_code) self.emit_gc_load_or_indexed(op, op.getarg(0), ConstInt(0), WORD, 1, ofs_length, NOT_SIGNED) - elif op.getopnum() == rop.STRGETITEM: + elif opnum == rop.STRGETITEM: basesize, itemsize, ofs_length = get_array_token(rstr.STR, self.cpu.translate_support_code) assert itemsize == 1 self.emit_gc_load_or_indexed(op, op.getarg(0), op.getarg(1), itemsize, itemsize, basesize, NOT_SIGNED) - elif op.getopnum() == rop.UNICODEGETITEM: + elif opnum == rop.UNICODEGETITEM: basesize, itemsize, ofs_length = get_array_token(rstr.UNICODE, self.cpu.translate_support_code) From pypy.commits at gmail.com Mon Mar 14 16:13:35 2016 From: pypy.commits at gmail.com (mattip) Date: Mon, 14 Mar 2016 13:13:35 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-ext: refactor PyStringObject to use PyObject_VARHEAD not PyObject_HEAD, improve failing test Message-ID: <56e71b6f.c96cc20a.a5021.fffffc36@mx.google.com> Author: mattip Branch: cpyext-ext Changeset: r83056:731fbe0846a6 Date: 2016-03-14 22:12 +0200 http://bitbucket.org/pypy/pypy/changeset/731fbe0846a6/ Log: refactor PyStringObject to use PyObject_VARHEAD not PyObject_HEAD, improve failing test diff --git a/pypy/module/cpyext/bytesobject.py b/pypy/module/cpyext/bytesobject.py --- a/pypy/module/cpyext/bytesobject.py +++ b/pypy/module/cpyext/bytesobject.py @@ -27,7 +27,7 @@ ## Solution ## -------- ## -## PyStringObject contains two additional members: the size and a pointer to a +## PyStringObject contains two additional members: the ob_size and a pointer to a ## char buffer; it may be NULL. ## ## - A string allocated by pypy will be converted into a PyStringObject with a @@ -36,7 +36,7 @@ ## ## - A string allocated with PyString_FromStringAndSize(NULL, size) will ## allocate a PyStringObject structure, and a buffer with the specified -## size, but the reference won't be stored in the global map; there is no +## size+1, but the reference won't be stored in the global map; there is no ## corresponding object in pypy. When from_ref() or Py_INCREF() is called, ## the pypy string is created, and added to the global map of tracked ## objects. The buffer is then supposed to be immutable. @@ -54,7 +54,7 @@ PyStringObject = lltype.Ptr(PyStringObjectStruct) PyStringObjectFields = PyObjectFields + \ (("ob_shash", rffi.LONG), ("ob_sstate", rffi.INT), - ("buffer", rffi.CCHARP), ("size", Py_ssize_t)) + ("buffer", rffi.CCHARP), ("ob_size", Py_ssize_t)) cpython_struct("PyStringObject", PyStringObjectFields, PyStringObjectStruct) @bootstrap_function @@ -62,7 +62,7 @@ "Type description of PyStringObject" make_typedescr(space.w_str.layout.typedef, basestruct=PyStringObject.TO, - alloc = string_alloc, + #alloc = string_alloc, attach=string_attach, dealloc=string_dealloc, realize=string_realize) @@ -77,6 +77,7 @@ from string_attach. This is used as the tp_alloc function for PyStringObject ''' + xxxx # TODO remove from pypy.module.cpyext.typeobjectdefs import PyTypeObjectPtr pytype = as_pyobj(space, w_type) pytype = rffi.cast(PyTypeObjectPtr, pytype) @@ -90,7 +91,7 @@ if length > 0: py_str.c_buffer = lltype.malloc(rffi.CCHARP.TO, length+1, flavor='raw', zero=True) - py_str.c_size = length + py_str.c_ob_size = length py_str.c_ob_sstate = rffi.cast(rffi.INT, 0) # SSTATE_NOT_INTERNED s = rffi.charpsize2str(py_str.c_buffer, length+1) w_obj = space.wrap(s) @@ -110,7 +111,7 @@ py_str = rffi.cast(PyStringObject, py_obj) buflen = length + 1 - py_str.c_size = length + py_str.c_ob_size = length py_str.c_buffer = lltype.malloc(rffi.CCHARP.TO, buflen, flavor='raw', zero=True) py_str.c_ob_sstate = rffi.cast(rffi.INT, 0) # SSTATE_NOT_INTERNED @@ -122,7 +123,7 @@ buffer must not be modified. """ py_str = rffi.cast(PyStringObject, py_obj) - py_str.c_size = len(space.str_w(w_obj)) + py_str.c_ob_size = len(space.str_w(w_obj)) py_str.c_buffer = lltype.nullptr(rffi.CCHARP.TO) py_str.c_ob_shash = space.hash_w(w_obj) py_str.c_ob_sstate = rffi.cast(rffi.INT, 1) # SSTATE_INTERNED_MORTAL @@ -133,7 +134,7 @@ be modified after this call. """ py_str = rffi.cast(PyStringObject, py_obj) - s = rffi.charpsize2str(py_str.c_buffer, py_str.c_size) + s = rffi.charpsize2str(py_str.c_buffer, py_str.c_ob_size) w_obj = space.wrap(s) py_str.c_ob_shash = space.hash_w(w_obj) py_str.c_ob_sstate = rffi.cast(rffi.INT, 1) # SSTATE_INTERNED_MORTAL @@ -193,12 +194,12 @@ ref_str.c_buffer = rffi.str2charp(s) buffer[0] = ref_str.c_buffer if length: - length[0] = ref_str.c_size + length[0] = ref_str.c_ob_size else: i = 0 while ref_str.c_buffer[i] != '\0': i += 1 - if i != ref_str.c_size: + if i != ref_str.c_ob_size: raise OperationError(space.w_TypeError, space.wrap( "expected string without null bytes")) return 0 @@ -207,7 +208,7 @@ def PyString_Size(space, ref): if from_ref(space, rffi.cast(PyObject, ref.c_ob_type)) is space.w_str: ref = rffi.cast(PyStringObject, ref) - return ref.c_size + return ref.c_ob_size else: w_obj = from_ref(space, ref) return space.len_w(w_obj) @@ -236,7 +237,7 @@ ref[0] = lltype.nullptr(PyObject.TO) raise to_cp = newsize - oldsize = py_str.c_size + oldsize = py_str.c_ob_size if oldsize < newsize: to_cp = oldsize for i in range(to_cp): diff --git a/pypy/module/cpyext/include/stringobject.h b/pypy/module/cpyext/include/stringobject.h --- a/pypy/module/cpyext/include/stringobject.h +++ b/pypy/module/cpyext/include/stringobject.h @@ -38,15 +38,15 @@ typedef struct { - PyObject_HEAD + PyObject_VAR_HEAD long ob_shash; int ob_sstate; - char * buffer; - Py_ssize_t size; + char * buffer; /* change the name from cpython so all non-api c access is thwarted */ - /* Invariants: - * ob_sval contains space for 'ob_size+1' elements. - * ob_sval[ob_size] == 0. + /* Invariants + * (not relevant in PyPy, all stringobjects are backed by a pypy object) + * buffer contains space for 'ob_size+1' elements. + * buffer[ob_size] == 0. * ob_shash is the hash of the string or -1 if not computed yet. * ob_sstate != 0 iff the string object is in stringobject.c's * 'interned' dictionary; in this case the two references diff --git a/pypy/module/cpyext/test/test_bytesobject.py b/pypy/module/cpyext/test/test_bytesobject.py --- a/pypy/module/cpyext/test/test_bytesobject.py +++ b/pypy/module/cpyext/test/test_bytesobject.py @@ -91,13 +91,15 @@ def test_string_tp_alloc(self): module = self.import_extension('foo', [ - ("getstring", "METH_NOARGS", + ("tpalloc", "METH_NOARGS", """ PyObject *base; PyTypeObject * type; PyStringObject *obj; char * p_str; base = PyString_FromString("test"); + if (((PyStringObject*)base)->buffer == NULL) + return PyLong_FromLong(-2); type = base->ob_type; if (type->tp_itemsize != 1) return PyLong_FromLong(type->tp_itemsize); @@ -109,7 +111,7 @@ return (PyObject*)obj; """), ]) - s = module.getstring() + s = module.tpalloc() assert s == 'works\x00\x00\x00\x00\x00' def test_AsString(self): @@ -262,14 +264,14 @@ ar[0] = rffi.cast(PyObject, py_str) api._PyString_Resize(ar, 3) py_str = rffi.cast(PyStringObject, ar[0]) - assert py_str.c_size == 3 + assert py_str.c_ob_size == 3 assert py_str.c_buffer[1] == 'b' assert py_str.c_buffer[3] == '\x00' # the same for growing ar[0] = rffi.cast(PyObject, py_str) api._PyString_Resize(ar, 10) py_str = rffi.cast(PyStringObject, ar[0]) - assert py_str.c_size == 10 + assert py_str.c_ob_size == 10 assert py_str.c_buffer[1] == 'b' assert py_str.c_buffer[10] == '\x00' Py_DecRef(space, ar[0]) From pypy.commits at gmail.com Mon Mar 14 17:23:36 2016 From: pypy.commits at gmail.com (amauryfa) Date: Mon, 14 Mar 2016 14:23:36 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-ext: Fix PyString_AsString() to accept a Unicode object, it is encoded using the default encoding. Message-ID: <56e72bd8.552f1c0a.e1428.fffff69c@mx.google.com> Author: Amaury Forgeot d'Arc Branch: cpyext-ext Changeset: r83057:931af853eaab Date: 2016-03-14 22:20 +0100 http://bitbucket.org/pypy/pypy/changeset/931af853eaab/ Log: Fix PyString_AsString() to accept a Unicode object, it is encoded using the default encoding. Also fix _PyUnicode_AsDefaultEncodedString() to return a referrence borrowed on the unicode. diff --git a/pypy/module/cpyext/bytesobject.py b/pypy/module/cpyext/bytesobject.py --- a/pypy/module/cpyext/bytesobject.py +++ b/pypy/module/cpyext/bytesobject.py @@ -1,4 +1,4 @@ -from pypy.interpreter.error import OperationError +from pypy.interpreter.error import OperationError, oefmt from rpython.rtyper.lltypesystem import rffi, lltype from pypy.module.cpyext.api import ( cpython_api, cpython_struct, bootstrap_function, build_type_checkers, @@ -171,8 +171,14 @@ if from_ref(space, rffi.cast(PyObject, ref.c_ob_type)) is space.w_str: pass # typecheck returned "ok" without forcing 'ref' at all elif not PyString_Check(space, ref): # otherwise, use the alternate way - raise OperationError(space.w_TypeError, space.wrap( - "PyString_AsString only support strings")) + from pypy.module.cpyext.unicodeobject import ( + PyUnicode_Check, _PyUnicode_AsDefaultEncodedString) + if PyUnicode_Check(space, ref): + ref = _PyUnicode_AsDefaultEncodedString(space, ref, None) + else: + raise oefmt(space.w_TypeError, + "expected string or Unicode object, %T found", + from_ref(space, ref)) ref_str = rffi.cast(PyStringObject, ref) if not ref_str.c_buffer: # copy string buffer @@ -184,8 +190,14 @@ @cpython_api([PyObject, rffi.CCHARPP, rffi.CArrayPtr(Py_ssize_t)], rffi.INT_real, error=-1) def PyString_AsStringAndSize(space, ref, buffer, length): if not PyString_Check(space, ref): - raise OperationError(space.w_TypeError, space.wrap( - "PyString_AsStringAndSize only support strings")) + from pypy.module.cpyext.unicodeobject import ( + PyUnicode_Check, _PyUnicode_AsDefaultEncodedString) + if PyUnicode_Check(space, ref): + ref = _PyUnicode_AsDefaultEncodedString(space, ref, None) + else: + raise oefmt(space.w_TypeError, + "expected string or Unicode object, %T found", + from_ref(space, ref)) ref_str = rffi.cast(PyStringObject, ref) if not ref_str.c_buffer: # copy string buffer diff --git a/pypy/module/cpyext/test/test_bytesobject.py b/pypy/module/cpyext/test/test_bytesobject.py --- a/pypy/module/cpyext/test/test_bytesobject.py +++ b/pypy/module/cpyext/test/test_bytesobject.py @@ -172,6 +172,44 @@ ]) module.getstring() + def test_py_string_as_string_Unicode(self): + module = self.import_extension('foo', [ + ("getstring_unicode", "METH_NOARGS", + """ + Py_UNICODE chars[] = {'t', 'e', 's', 't'}; + PyObject* u1 = PyUnicode_FromUnicode(chars, 4); + char *buf; + buf = PyString_AsString(u1); + if (buf == NULL) + return NULL; + if (buf[3] != 't') { + PyErr_SetString(PyExc_AssertionError, "Bad conversion"); + return NULL; + } + Py_DECREF(u1); + Py_INCREF(Py_None); + return Py_None; + """), + ("getstringandsize_unicode", "METH_NOARGS", + """ + Py_UNICODE chars[] = {'t', 'e', 's', 't'}; + PyObject* u1 = PyUnicode_FromUnicode(chars, 4); + char *buf; + Py_ssize_t len; + if (PyString_AsStringAndSize(u1, &buf, &len) < 0) + return NULL; + if (len != 4) { + PyErr_SetString(PyExc_AssertionError, "Bad Length"); + return NULL; + } + Py_DECREF(u1); + Py_INCREF(Py_None); + return Py_None; + """), + ]) + module.getstring_unicode() + module.getstringandsize_unicode() + def test_format_v(self): module = self.import_extension('foo', [ ("test_string_format_v", "METH_VARARGS", diff --git a/pypy/module/cpyext/test/test_unicodeobject.py b/pypy/module/cpyext/test/test_unicodeobject.py --- a/pypy/module/cpyext/test/test_unicodeobject.py +++ b/pypy/module/cpyext/test/test_unicodeobject.py @@ -89,6 +89,22 @@ res = module.test_hash(u"xyz") assert res == hash(u'xyz') + def test_default_encoded_string(self): + module = self.import_extension('foo', [ + ("test_default_encoded_string", "METH_O", + ''' + PyObject* result = _PyUnicode_AsDefaultEncodedString(args, "replace"); + Py_INCREF(result); + return result; + ''' + ), + ]) + res = module.test_default_encoded_string(u"xyz") + assert isinstance(res, str) + assert res == 'xyz' + res = module.test_default_encoded_string(u"caf\xe9") + assert isinstance(res, str) + assert res == 'caf?' class TestUnicode(BaseApiTest): def test_unicodeobject(self, space, api): diff --git a/pypy/module/cpyext/unicodeobject.py b/pypy/module/cpyext/unicodeobject.py --- a/pypy/module/cpyext/unicodeobject.py +++ b/pypy/module/cpyext/unicodeobject.py @@ -375,8 +375,15 @@ return PyUnicode_FromUnicode(space, wchar_p, length) @cpython_api([PyObject, CONST_STRING], PyObject) -def _PyUnicode_AsDefaultEncodedString(space, w_unicode, errors): - return PyUnicode_AsEncodedString(space, w_unicode, lltype.nullptr(rffi.CCHARP.TO), errors) +def _PyUnicode_AsDefaultEncodedString(space, ref, errors): + # Returns a borrowed reference. + py_uni = rffi.cast(PyUnicodeObject, ref) + if not py_uni.c_defenc: + py_uni.c_defenc = make_ref( + space, PyUnicode_AsEncodedString( + space, ref, + lltype.nullptr(rffi.CCHARP.TO), errors)) + return py_uni.c_defenc @cpython_api([CONST_STRING, Py_ssize_t, CONST_STRING, CONST_STRING], PyObject) def PyUnicode_Decode(space, s, size, encoding, errors): From pypy.commits at gmail.com Mon Mar 14 18:16:05 2016 From: pypy.commits at gmail.com (mattip) Date: Mon, 14 Mar 2016 15:16:05 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-ext: fix test since PyStringObject buffer is read only, fix segfault if buffer is NULL Message-ID: <56e73825.c9161c0a.f42a9.03cd@mx.google.com> Author: mattip Branch: cpyext-ext Changeset: r83058:232d73c2ea06 Date: 2016-03-15 00:14 +0200 http://bitbucket.org/pypy/pypy/changeset/232d73c2ea06/ Log: fix test since PyStringObject buffer is read only, fix segfault if buffer is NULL diff --git a/pypy/module/cpyext/bytesobject.py b/pypy/module/cpyext/bytesobject.py --- a/pypy/module/cpyext/bytesobject.py +++ b/pypy/module/cpyext/bytesobject.py @@ -134,6 +134,9 @@ be modified after this call. """ py_str = rffi.cast(PyStringObject, py_obj) + if not py_str.c_buffer: + py_str.c_buffer = lltype.malloc(rffi.CCHARP.TO, py_str.c_ob_size + 1, + flavor='raw', zero=True) s = rffi.charpsize2str(py_str.c_buffer, py_str.c_ob_size) w_obj = space.wrap(s) py_str.c_ob_shash = space.hash_w(w_obj) diff --git a/pypy/module/cpyext/test/test_bytesobject.py b/pypy/module/cpyext/test/test_bytesobject.py --- a/pypy/module/cpyext/test/test_bytesobject.py +++ b/pypy/module/cpyext/test/test_bytesobject.py @@ -98,21 +98,22 @@ PyStringObject *obj; char * p_str; base = PyString_FromString("test"); - if (((PyStringObject*)base)->buffer == NULL) - return PyLong_FromLong(-2); + if (PyString_GET_SIZE(base) != 4) + return PyLong_FromLong(-PyString_GET_SIZE(base)); type = base->ob_type; if (type->tp_itemsize != 1) return PyLong_FromLong(type->tp_itemsize); obj = (PyStringObject*)type->tp_alloc(type, 10); - if (PyString_GET_SIZE(obj) == 0) - return PyLong_FromLong(-1); - memcpy(PyString_AS_STRING(obj), "works", 6); + if (PyString_GET_SIZE(obj) != 10) + return PyLong_FromLong(PyString_GET_SIZE(obj)); + /* cannot work, there is only RO access + memcpy(PyString_AS_STRING(obj), "works", 6); */ Py_INCREF(obj); return (PyObject*)obj; """), ]) s = module.tpalloc() - assert s == 'works\x00\x00\x00\x00\x00' + assert s == '\x00' * 10 def test_AsString(self): module = self.import_extension('foo', [ From pypy.commits at gmail.com Mon Mar 14 18:16:07 2016 From: pypy.commits at gmail.com (mattip) Date: Mon, 14 Mar 2016 15:16:07 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-ext: remove leftover cruft Message-ID: <56e73827.918e1c0a.36d33.086e@mx.google.com> Author: mattip Branch: cpyext-ext Changeset: r83059:6b2f326b088c Date: 2016-03-15 00:15 +0200 http://bitbucket.org/pypy/pypy/changeset/6b2f326b088c/ Log: remove leftover cruft diff --git a/pypy/module/cpyext/bytesobject.py b/pypy/module/cpyext/bytesobject.py --- a/pypy/module/cpyext/bytesobject.py +++ b/pypy/module/cpyext/bytesobject.py @@ -62,44 +62,12 @@ "Type description of PyStringObject" make_typedescr(space.w_str.layout.typedef, basestruct=PyStringObject.TO, - #alloc = string_alloc, attach=string_attach, dealloc=string_dealloc, realize=string_realize) PyString_Check, PyString_CheckExact = build_type_checkers("String", "w_str") -def string_alloc(space, w_type, length): - ''' - Yet another way to allocate a PyObject, this time a - PyStringObject. The first bit is copied from - BaseCpyTypedescr.allocate, the bit after length>0 - from string_attach. This is used as the tp_alloc function - for PyStringObject - ''' - xxxx # TODO remove - from pypy.module.cpyext.typeobjectdefs import PyTypeObjectPtr - pytype = as_pyobj(space, w_type) - pytype = rffi.cast(PyTypeObjectPtr, pytype) - assert pytype - size = pytype.c_tp_basicsize - buf = lltype.malloc(rffi.VOIDP.TO, size, - flavor='raw', zero=True) - py_str = rffi.cast(PyStringObject, buf) - py_str.c_ob_refcnt = 1 - py_str.c_ob_type = pytype - if length > 0: - py_str.c_buffer = lltype.malloc(rffi.CCHARP.TO, length+1, - flavor='raw', zero=True) - py_str.c_ob_size = length - py_str.c_ob_sstate = rffi.cast(rffi.INT, 0) # SSTATE_NOT_INTERNED - s = rffi.charpsize2str(py_str.c_buffer, length+1) - w_obj = space.wrap(s) - py_str.c_ob_shash = space.hash_w(w_obj) - py_str.c_ob_sstate = rffi.cast(rffi.INT, 1) # SSTATE_INTERNED_MORTAL - track_reference(space, rffi.cast(PyObject, py_str), w_obj) - return rffi.cast(PyObject, py_str) - def new_empty_str(space, length): """ Allocate a PyStringObject and its buffer, but without a corresponding From pypy.commits at gmail.com Mon Mar 14 18:58:47 2016 From: pypy.commits at gmail.com (mattip) Date: Mon, 14 Mar 2016 15:58:47 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-ext: fix translation Message-ID: <56e74227.654fc20a.24690.2578@mx.google.com> Author: mattip Branch: cpyext-ext Changeset: r83060:fbe7b2887db2 Date: 2016-03-15 00:58 +0200 http://bitbucket.org/pypy/pypy/changeset/fbe7b2887db2/ Log: fix translation diff --git a/pypy/module/cpyext/bytesobject.py b/pypy/module/cpyext/bytesobject.py --- a/pypy/module/cpyext/bytesobject.py +++ b/pypy/module/cpyext/bytesobject.py @@ -145,7 +145,7 @@ from pypy.module.cpyext.unicodeobject import ( PyUnicode_Check, _PyUnicode_AsDefaultEncodedString) if PyUnicode_Check(space, ref): - ref = _PyUnicode_AsDefaultEncodedString(space, ref, None) + ref = _PyUnicode_AsDefaultEncodedString(space, ref, rffi.str2charp("")) else: raise oefmt(space.w_TypeError, "expected string or Unicode object, %T found", @@ -164,7 +164,7 @@ from pypy.module.cpyext.unicodeobject import ( PyUnicode_Check, _PyUnicode_AsDefaultEncodedString) if PyUnicode_Check(space, ref): - ref = _PyUnicode_AsDefaultEncodedString(space, ref, None) + ref = _PyUnicode_AsDefaultEncodedString(space, ref, rffi.str2charp("")) else: raise oefmt(space.w_TypeError, "expected string or Unicode object, %T found", From pypy.commits at gmail.com Tue Mar 15 02:10:58 2016 From: pypy.commits at gmail.com (mattip) Date: Mon, 14 Mar 2016 23:10:58 -0700 (PDT) Subject: [pypy-commit] pypy default: fix for 32 bit Message-ID: <56e7a772.918e1c0a.36d33.5be0@mx.google.com> Author: mattip Branch: Changeset: r83061:173add34cdd2 Date: 2016-03-15 08:10 +0200 http://bitbucket.org/pypy/pypy/changeset/173add34cdd2/ Log: fix for 32 bit diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -3439,7 +3439,7 @@ def test_index_int(self): import numpy as np - a = np.array([10, 20, 30]) + a = np.array([10, 20, 30], dtype='int64') res = a[np.int64(1)] assert isinstance(res, np.int64) assert res == 20 From pypy.commits at gmail.com Tue Mar 15 02:59:41 2016 From: pypy.commits at gmail.com (fijal) Date: Mon, 14 Mar 2016 23:59:41 -0700 (PDT) Subject: [pypy-commit] pypy jit-leaner-frontend: a different approach - let's try not to intern constants Message-ID: <56e7b2dd.c52f1c0a.ded9c.5c09@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r83062:106e5d6db9cb Date: 2016-03-15 08:58 +0200 http://bitbucket.org/pypy/pypy/changeset/106e5d6db9cb/ Log: a different approach - let's try not to intern constants diff --git a/rpython/jit/metainterp/opencoder.py b/rpython/jit/metainterp/opencoder.py --- a/rpython/jit/metainterp/opencoder.py +++ b/rpython/jit/metainterp/opencoder.py @@ -249,37 +249,36 @@ SMALL_INT_START <= box.getint() < SMALL_INT_STOP): return tag(TAGINT, box.getint()) elif isinstance(box, ConstInt): - self._consts_bigint += 1 - if not isinstance(box.getint(), int): - # symbolics, for tests, don't worry about caching - v = len(self._bigints) << 1 - self._bigints.append(box.getint()) - else: - v = self._bigints_dict.get(box.getint(), -1) - if v == -1: - v = len(self._bigints) << 1 - self._bigints_dict[box.getint()] = v - self._bigints.append(box.getint()) + #self._consts_bigint += 1 + #if not isinstance(box.getint(), int): + # # symbolics, for tests, don't worry about caching + # v = len(self._bigints) << 1 + # self._bigints.append(box.getint()) + #else: + # v = self._bigints_dict.get(box.getint(), -1) + # if v == -1: + v = len(self._bigints) << 1 + # self._bigints_dict[box.getint()] = v + self._bigints.append(box.getint()) return tag(TAGCONSTOTHER, v) elif isinstance(box, ConstFloat): self._consts_float += 1 - v = self._floats_dict.get(box.getfloat(), -1) - if v == -1: - v = (len(self._floats) << 1) | 1 - self._floats_dict[box.getfloat()] = v - self._floats.append(box.getfloat()) + #v = self._floats_dict.get(box.getfloat(), -1) + #if v == -1: + v = (len(self._floats) << 1) | 1 + #self._floats_dict[box.getfloat()] = v + self._floats.append(box.getfloat()) return tag(TAGCONSTOTHER, v) else: self._consts_ptr += 1 assert isinstance(box, ConstPtr) if not box.getref_base(): return tag(TAGCONSTPTR, 0) - addr = box.getref_base() - v = self._refs_dict.get(addr, -1) - if v == -1: - v = len(self._refs) - self._refs_dict[addr] = v - self._refs.append(box.getref_base()) + #v = self._refs_dict.get(addr, -1) + #if v == -1: + # v = len(self._refs) + # self._refs_dict[addr] = v + self._refs.append(box.getref_base()) return tag(TAGCONSTPTR, v) elif isinstance(box, AbstractResOp): return tag(TAGBOX, box.get_position()) diff --git a/rpython/jit/metainterp/test/test_opencoder.py b/rpython/jit/metainterp/test/test_opencoder.py --- a/rpython/jit/metainterp/test/test_opencoder.py +++ b/rpython/jit/metainterp/test/test_opencoder.py @@ -13,6 +13,9 @@ def __init__(self, index): self.index = index +class metainterp_sd(object): + pass + class FakeFrame(object): parent_snapshot = None @@ -38,7 +41,7 @@ class TestOpencoder(object): def unpack(self, t): - iter = t.get_iter() + iter = t.get_iter(metainterp_sd) l = [] while not iter.done(): op = iter.next() @@ -142,7 +145,7 @@ def test_packing(self, i): t = Trace([]) t.record_snapshot_link(i) - iter = t.get_iter() + iter = t.get_iter(metainterp_sd) assert (((-iter._next() - 1) << 15) | (iter._next())) == i def test_cut_trace_from(self): From pypy.commits at gmail.com Tue Mar 15 03:16:09 2016 From: pypy.commits at gmail.com (fijal) Date: Tue, 15 Mar 2016 00:16:09 -0700 (PDT) Subject: [pypy-commit] pypy jit-leaner-frontend: oops Message-ID: <56e7b6b9.2457c20a.5af9a.ffff80d8@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r83063:0779fefa5ca6 Date: 2016-03-15 09:15 +0200 http://bitbucket.org/pypy/pypy/changeset/0779fefa5ca6/ Log: oops diff --git a/rpython/jit/metainterp/opencoder.py b/rpython/jit/metainterp/opencoder.py --- a/rpython/jit/metainterp/opencoder.py +++ b/rpython/jit/metainterp/opencoder.py @@ -276,7 +276,7 @@ return tag(TAGCONSTPTR, 0) #v = self._refs_dict.get(addr, -1) #if v == -1: - # v = len(self._refs) + v = len(self._refs) # self._refs_dict[addr] = v self._refs.append(box.getref_base()) return tag(TAGCONSTPTR, v) From pypy.commits at gmail.com Tue Mar 15 04:15:59 2016 From: pypy.commits at gmail.com (fijal) Date: Tue, 15 Mar 2016 01:15:59 -0700 (PDT) Subject: [pypy-commit] pypy jit-leaner-frontend: backpedal + be slightly more conscious Message-ID: <56e7c4bf.99e61c0a.ba4f9.ffff8745@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r83064:345cab5f0d0b Date: 2016-03-15 10:15 +0200 http://bitbucket.org/pypy/pypy/changeset/345cab5f0d0b/ Log: backpedal + be slightly more conscious diff --git a/rpython/jit/metainterp/opencoder.py b/rpython/jit/metainterp/opencoder.py --- a/rpython/jit/metainterp/opencoder.py +++ b/rpython/jit/metainterp/opencoder.py @@ -249,36 +249,37 @@ SMALL_INT_START <= box.getint() < SMALL_INT_STOP): return tag(TAGINT, box.getint()) elif isinstance(box, ConstInt): - #self._consts_bigint += 1 - #if not isinstance(box.getint(), int): - # # symbolics, for tests, don't worry about caching - # v = len(self._bigints) << 1 - # self._bigints.append(box.getint()) - #else: - # v = self._bigints_dict.get(box.getint(), -1) - # if v == -1: - v = len(self._bigints) << 1 - # self._bigints_dict[box.getint()] = v - self._bigints.append(box.getint()) + self._consts_bigint += 1 + if not isinstance(box.getint(), int): + # symbolics, for tests, don't worry about caching + v = len(self._bigints) << 1 + self._bigints.append(box.getint()) + else: + v = self._bigints_dict.get(box.getint(), -1) + if v == -1: + v = len(self._bigints) << 1 + self._bigints_dict[box.getint()] = v + self._bigints.append(box.getint()) return tag(TAGCONSTOTHER, v) elif isinstance(box, ConstFloat): self._consts_float += 1 - #v = self._floats_dict.get(box.getfloat(), -1) - #if v == -1: - v = (len(self._floats) << 1) | 1 - #self._floats_dict[box.getfloat()] = v - self._floats.append(box.getfloat()) + v = self._floats_dict.get(box.getfloat(), -1) + if v == -1: + v = (len(self._floats) << 1) | 1 + self._floats_dict[box.getfloat()] = v + self._floats.append(box.getfloat()) return tag(TAGCONSTOTHER, v) else: self._consts_ptr += 1 assert isinstance(box, ConstPtr) if not box.getref_base(): return tag(TAGCONSTPTR, 0) - #v = self._refs_dict.get(addr, -1) - #if v == -1: - v = len(self._refs) - # self._refs_dict[addr] = v - self._refs.append(box.getref_base()) + addr = box.getref_base() + v = self._refs_dict.get(addr, -1) + if v == -1: + v = len(self._refs) + self._refs_dict[addr] = v + self._refs.append(box.getref_base()) return tag(TAGCONSTPTR, v) elif isinstance(box, AbstractResOp): return tag(TAGBOX, box.get_position()) @@ -325,7 +326,10 @@ return ResOperation(opnum, argboxes, pos, descr) def _list_of_boxes(self, boxes): - return [rffi.cast(rffi.SHORT, self._encode(box)) for box in boxes] + array = [rffi.cast(rffi.SHORT, 0)] * len(boxes) + for i in range(boxes): + array[i] = self._encode(boxes[i]) + return array def create_top_snapshot(self, jitcode, pc, boxes, vable_boxes, vref_boxes): self._total_snapshots += 1 From pypy.commits at gmail.com Tue Mar 15 05:09:39 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 15 Mar 2016 02:09:39 -0700 (PDT) Subject: [pypy-commit] pypy guard-compatible: Add more flexibility with (compilation-time) tweakable parameters Message-ID: <56e7d153.e6bbc20a.265af.ffffa5a6@mx.google.com> Author: Armin Rigo Branch: guard-compatible Changeset: r83066:0cbabc844652 Date: 2016-03-15 10:08 +0100 http://bitbucket.org/pypy/pypy/changeset/0cbabc844652/ Log: Add more flexibility with (compilation-time) tweakable parameters diff --git a/rpython/jit/backend/test/runner_test.py b/rpython/jit/backend/test/runner_test.py --- a/rpython/jit/backend/test/runner_test.py +++ b/rpython/jit/backend/test/runner_test.py @@ -217,7 +217,7 @@ self.cpu.grow_guard_compatible_switch(looptoken.compiled_loop_token, faildescr1, t2_box._resref) - for retry in range(2): + for retry in range(5): deadframe = self.cpu.execute_token(looptoken, t2_box._resref) fail = self.cpu.get_latest_descr(deadframe) diff --git a/rpython/jit/backend/x86/guard_compat.py b/rpython/jit/backend/x86/guard_compat.py --- a/rpython/jit/backend/x86/guard_compat.py +++ b/rpython/jit/backend/x86/guard_compat.py @@ -15,6 +15,22 @@ # the guard, ending in -1. +# --tweakable parameters (you get the effect closest to before we had +# guard-compat by setting GROW_POSITION to 1 and UPDATE_ASM to 0)-- + +# where grow_switch puts the new value: +# 0 = at the beginning of the list +# 1 = at position N-1, just before the initial value which stays last +# 2 = at the end +GROW_POSITION = 2 + +# when guard_compatible's slow path is called and finds a value, when +# should we update the machine code to make this value the fast-path? +# 0 = never +# another value = after about this many calls to the slow-path +UPDATE_ASM = 1291 + + def generate_guard_compatible(assembler, guard_token, loc_reg, initial_value): # fast-path check mc = assembler.mc @@ -49,9 +65,10 @@ mc.stack_frame_size_delta(-WORD) small_ofs = rel_pos_compatible_imm - mc.get_relative_pos() - compatinfo[0] = small_ofs + assert -128 <= small_ofs < 128 + compatinfo[0] = small_ofs & 0xFF - assembler.guard_success_cc = rx86.Conditions['NZ'] + assembler.guard_success_cc = rx86.Conditions['Z'] assembler.implement_guard(guard_token) # # patch the JE above @@ -99,10 +116,22 @@ newcompatinfo = rffi.cast(rffi.SIGNEDP, newcompatinfoaddr) newcompatinfo[0] = compatinfo[0] - newcompatinfo[1] = new_value - for i in range(1, length): - newcompatinfo[i + 1] = compatinfo[i] + if GROW_POSITION == 0: + newcompatinfo[1] = new_value + for i in range(1, length): + newcompatinfo[i + 1] = compatinfo[i] + elif GROW_POSITION == 1: + for i in range(1, length - 2): + newcompatinfo[i] = compatinfo[i] + newcompatinfo[length - 2] = new_value + newcompatinfo[length - 1] = compatinfo[length - 2] + newcompatinfo[length] = -1 # == compatinfo[length - 1] + else: + for i in range(1, length - 1): + newcompatinfo[i] = compatinfo[i] + newcompatinfo[length - 1] = new_value + newcompatinfo[length] = -1 # == compatinfo[length - 1] # the old 'compatinfo' is not used any more, but will only be freed # when the looptoken is freed @@ -117,6 +146,36 @@ assembler._guard_compat_checkers = [0] * nb_registers +def _build_inner_loop(mc, regnum, tmp, immediate_return): + pos = mc.get_relative_pos() + mc.CMP_mr((tmp, WORD), regnum) + mc.J_il8(rx86.Conditions['E'], 0) # patched below + je_location = mc.get_relative_pos() + mc.CMP_mi((tmp, WORD), -1) + mc.LEA_rm(tmp, (tmp, WORD)) + mc.J_il8(rx86.Conditions['NE'], pos - (mc.get_relative_pos() + 2)) + # + # not found! Return the condition code 'Not Zero' to mean 'not found'. + mc.OR_rr(tmp, tmp) + # + # if 'immediate_return', patch the JE above to jump here. When we + # follow that path, we get condition code 'Zero', which means 'found'. + if immediate_return: + offset = mc.get_relative_pos() - je_location + assert 0 < offset <= 127 + mc.overwrite(je_location-1, chr(offset)) + # + if IS_X86_32: + mc.POP_r(tmp) + mc.RET16_i(WORD) + mc.force_frame_size(8) # one word on X86_64, two words on X86_32 + # + # if not 'immediate_return', patch the JE above to jump here. + if not immediate_return: + offset = mc.get_relative_pos() - je_location + assert 0 < offset <= 127 + mc.overwrite(je_location-1, chr(offset)) + def get_or_build_checker(assembler, regnum): """Returns a piece of assembler that checks if the value is in some array (there is one such piece per input register 'regnum') @@ -142,40 +201,43 @@ mc.MOV_rs(tmp, stack_arg) - pos = mc.get_relative_pos() - mc.CMP_mr((tmp, WORD), regnum) - mc.J_il8(rx86.Conditions['E'], 0) # patched below - je_location = mc.get_relative_pos() - mc.CMP_mi((tmp, WORD), -1) - mc.LEA_rm(tmp, (tmp, WORD)) - mc.J_il8(rx86.Conditions['NE'], pos - (mc.get_relative_pos() + 2)) + if UPDATE_ASM > 0: + CONST_TO_ADD = int((1 << 24) / (UPDATE_ASM + 0.3)) + if CONST_TO_ADD >= (1 << 23): + CONST_TO_ADD = (1 << 23) - 1 + if CONST_TO_ADD < 1: + CONST_TO_ADD = 1 + CONST_TO_ADD <<= 8 + # + mc.ADD32_mi32((tmp, 0), CONST_TO_ADD) + mc.J_il8(rx86.Conditions['C'], 0) # patched below + jc_location = mc.get_relative_pos() + else: + jc_location = -1 - # not found! The condition code is already 'Zero', which we return - # to mean 'not found'. - if IS_X86_32: - mc.POP_r(tmp) - mc.RET16_i(WORD) + _build_inner_loop(mc, regnum, tmp, immediate_return=True) - mc.force_frame_size(8) # one word on X86_64, two words on X86_32 - - # patch the JE above - offset = mc.get_relative_pos() - je_location - assert 0 < offset <= 127 - mc.overwrite(je_location-1, chr(offset)) - - # found! update the assembler by writing the value at 'small_ofs' - # bytes before our return address. This should overwrite the const in - # 'MOV_ri64(r11, const)', first instruction of the guard_compatible. - mc.MOV_rs(tmp, stack_arg) - mc.MOV_rm(tmp, (tmp, 0)) - mc.ADD_rs(tmp, stack_ret) - mc.MOV_mr((tmp, -WORD), regnum) - - # the condition codes say 'Not Zero', as a result of the ADD above. - # Return this condition code to mean 'found'. - if IS_X86_32: - mc.POP_r(tmp) - mc.RET16_i(WORD) + if jc_location != -1: + # patch the JC above + offset = mc.get_relative_pos() - jc_location + assert 0 < offset <= 127 + mc.overwrite(jc_location-1, chr(offset)) + # + _build_inner_loop(mc, regnum, tmp, immediate_return=False) + # + # found! update the assembler by writing the value at 'small_ofs' + # bytes before our return address. This should overwrite the const in + # 'MOV_ri64(r11, const)', first instruction of the guard_compatible. + mc.MOV_rs(tmp, stack_arg) + mc.MOVSX8_rm(tmp, (tmp, 0)) + mc.ADD_rs(tmp, stack_ret) + mc.MOV_mr((tmp, -WORD), regnum) + # + # Return condition code 'Zero' to mean 'found'. + mc.CMP_rr(regnum, regnum) + if IS_X86_32: + mc.POP_r(tmp) + mc.RET16_i(WORD) addr = mc.materialize(assembler.cpu, []) assembler._guard_compat_checkers[regnum] = addr diff --git a/rpython/jit/backend/x86/rx86.py b/rpython/jit/backend/x86/rx86.py --- a/rpython/jit/backend/x86/rx86.py +++ b/rpython/jit/backend/x86/rx86.py @@ -514,6 +514,8 @@ XOR_ri,XOR_rr,XOR_rb,_,_,XOR_rm,XOR_rj,_,_,_,_ = common_modes(6) CMP_ri,CMP_rr,CMP_rb,CMP_bi,CMP_br,CMP_rm,CMP_rj,_,_,_,CMP_ri32 = common_modes(7) + ADD32_mi32 = insn(rex_nw, '\x81', mem_reg_plus_const(1), immediate(2)) + def ADD_ri(self, reg, immed): self.AD1_ri(reg, immed) if reg == R.esp: diff --git a/rpython/jit/backend/x86/test/test_runner.py b/rpython/jit/backend/x86/test/test_runner.py --- a/rpython/jit/backend/x86/test/test_runner.py +++ b/rpython/jit/backend/x86/test/test_runner.py @@ -13,6 +13,7 @@ from rpython.jit.backend.test.runner_test import LLtypeBackendTest from rpython.jit.tool.oparser import parse import ctypes +from hypothesis import strategies, given CPU = getcpuclass() @@ -556,6 +557,52 @@ assert self.cpu.get_int_value(deadframe, 2) == 42 assert self.cpu.get_int_value(deadframe, 3) == 42 + @given(strategies.integers(min_value=0, max_value=2), + strategies.integers(min_value=0), + strategies.lists(strategies.integers())) + def test_guard_compatible_extra(self, grow_position, update_asm, lst): + from rpython.jit.backend.x86 import guard_compat + saved = guard_compat.GROW_POSITION, guard_compat.UPDATE_ASM + try: + guard_compat.GROW_POSITION = grow_position + guard_compat.UPDATE_ASM = update_asm + + t1_box, T1_box, d1 = self.alloc_instance(self.T) + faildescr1 = BasicFailDescr(1) + loop = parse(""" + [p0] + guard_compatible(p0, ConstPtr(t1), descr=faildescr1) [] + finish(p0, descr=fdescr) + """, namespace={'fdescr': BasicFinalDescr(2), + 'faildescr1': faildescr1, + 't1': t1_box._resref}) + looptoken = JitCellToken() + self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken) + + def run(box): + deadframe = self.cpu.execute_token(looptoken, + box._resref) + fail = self.cpu.get_latest_descr(deadframe) + return fail.identifier + + choices = {0: t1_box} + + for operation in lst: + if operation >= 0 or (-operation) in choices: + if operation in choices: + assert run(choices[operation]) == 2 + else: + t2_box, T2_box, d2 = self.alloc_instance(self.T) + assert run(t2_box) == 1 + else: + t2_box, T2_box, d2 = self.alloc_instance(self.T) + self.cpu.grow_guard_compatible_switch( + looptoken.compiled_loop_token, + faildescr1, t2_box._resref) + choices[-operation] = t2_box + finally: + guard_compat.GROW_POSITION, guard_compat.UPDATE_ASM = saved + class TestDebuggingAssembler(object): def setup_method(self, meth): From pypy.commits at gmail.com Tue Mar 15 05:14:12 2016 From: pypy.commits at gmail.com (cfbolz) Date: Tue, 15 Mar 2016 02:14:12 -0700 (PDT) Subject: [pypy-commit] pypy guard-compatible: remove two more promotes Message-ID: <56e7d264.e213c20a.6aca7.ffffa81b@mx.google.com> Author: Carl Friedrich Bolz Branch: guard-compatible Changeset: r83067:1b4c7fac81be Date: 2016-03-15 10:11 +0100 http://bitbucket.org/pypy/pypy/changeset/1b4c7fac81be/ Log: remove two more promotes diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -41,11 +41,14 @@ # objects with different maps can have the same class return self.terminator.w_cls + @jit.elidable_compatible() + def _get_terminator(self): + return self.terminator + def read(self, obj, name, index): attr = self.find_map_attr(name, index) if attr is None: - jit.promote(self) - return self.terminator._read_terminator(obj, name, index) + return self._get_terminator()._read_terminator(obj, name, index) if ( # XXX in the guard_compatible world the following isconstant may never be true? jit.isconstant(attr.storageindex) and jit.isconstant(obj) and @@ -62,9 +65,7 @@ def write(self, obj, name, index, w_value): attr = self.find_map_attr(name, index) if attr is None: - # adding an attribute needs to know all attributes, thus promote - jit.promote(self) - return self.terminator._write_terminator(obj, name, index, w_value) + return self._get_terminator()._write_terminator(obj, name, index, w_value) if not attr.ever_mutated: attr.ever_mutated = True obj._mapdict_write_storage(attr.storageindex, w_value) From pypy.commits at gmail.com Tue Mar 15 09:29:17 2016 From: pypy.commits at gmail.com (fijal) Date: Tue, 15 Mar 2016 06:29:17 -0700 (PDT) Subject: [pypy-commit] pypy jit-leaner-frontend: try to not have position on resop at all Message-ID: <56e80e2d.a151c20a.b61d7.0f75@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r83069:5413d954e8a2 Date: 2016-03-15 15:28 +0200 http://bitbucket.org/pypy/pypy/changeset/5413d954e8a2/ Log: try to not have position on resop at all diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -7,7 +7,8 @@ from rpython.conftest import option from rpython.jit.metainterp.resoperation import ResOperation, rop,\ - AbstractValue, oparity, AbstractResOp, IntOp, RefOp, FloatOp + AbstractValue, oparity, AbstractResOp, IntOp, RefOp, FloatOp,\ + opclasses from rpython.jit.codewriter import heaptracker, longlong import weakref @@ -647,15 +648,27 @@ def __init__(self, pos): self.position = pos + def get_position(self): + return self.position + class IntFrontendOp(IntOp, FrontendOp): _attrs_ = ('position', '_resint') + def copy_value_from(self, other): + self._resint = other.getint() + class FloatFrontendOp(FloatOp, FrontendOp): _attrs_ = ('position', '_resfloat') + def copy_value_from(self, other): + self._resfloat = other.getfloatstorage() + class RefFrontendOp(RefOp, FrontendOp): _attrs_ = ('position', '_resref') + def copy_value_from(self, other): + self._resref = other.getref_base() + class History(object): ends_with_jump = False trace = None @@ -673,12 +686,9 @@ self.inputargs = inpargs if self._cache: # hack to record the ops *after* we know our inputargs - for op in self._cache: - newop = self.trace.record_op(op.getopnum(), op.getarglist(), - op.getdescr()) - op.position = newop.position - if op.type != 'v': - newop.copy_value_from(op) + for (opnum, argboxes, op, descr) in self._cache: + pos = self.trace.record_op(opnum, argboxes, descr) + op.position = pos self._cache = None def length(self): @@ -710,29 +720,39 @@ @specialize.argtype(3) def record(self, opnum, argboxes, value, descr=None): if self.trace is None: - op = ResOperation(opnum, argboxes, -1, descr) - self._cache.append(op) + pos = -1 else: - pos = self.trace._record_op(opnum, argboxes, descr) - if value is None: - op = FrontendOp(pos) - elif isinstance(value, bool): - op = IntFrontendOp(pos) - elif lltype.typeOf(value) == lltype.Signed: - op = IntFrontendOp(pos) - elif lltype.typeOf(value) is longlong.FLOATSTORAGE: - op = FloatFrontendOp(pos) - else: - op = RefFrontendOp(pos) + pos = self.trace.record_op(opnum, argboxes, descr) + if value is None: + op = FrontendOp(pos) + elif isinstance(value, bool): + op = IntFrontendOp(pos) + elif lltype.typeOf(value) == lltype.Signed: + op = IntFrontendOp(pos) + elif lltype.typeOf(value) is longlong.FLOATSTORAGE: + op = FloatFrontendOp(pos) + else: + op = RefFrontendOp(pos) + if self.trace is None: + self._cache.append((opnum, argboxes, op, descr)) self.set_op_value(op, value) return op def record_nospec(self, opnum, argboxes, descr=None): - return self.trace.record_op(opnum, argboxes, descr) + tp = opclasses[opnum].type + pos = self.trace.record_op(opnum, argboxes, descr) + if tp == 'v': + return FrontendOp(pos) + elif tp == 'i': + return IntFrontendOp(pos) + elif tp == 'f': + return FloatFrontendOp(pos) + assert tp == 'r' + return RefFrontendOp(pos) def record_default_val(self, opnum, argboxes, descr=None): assert rop.is_same_as(opnum) - op = self.trace.record_op(opnum, argboxes, descr) + op = self.record_nospec(opnum, argboxes, descr) op.copy_value_from(argboxes[0]) return op diff --git a/rpython/jit/metainterp/opencoder.py b/rpython/jit/metainterp/opencoder.py --- a/rpython/jit/metainterp/opencoder.py +++ b/rpython/jit/metainterp/opencoder.py @@ -133,7 +133,7 @@ descr = self.trace._descrs[descr_index] else: descr = None - res = ResOperation(opnum, args, -1, descr=descr) + res = ResOperation(opnum, args, descr=descr) if rop.is_guard(opnum): assert isinstance(res, GuardResOp) res.rd_resume_position = descr_index @@ -288,7 +288,7 @@ else: assert False, "unreachable code" - def _record_op(self, opnum, argboxes, descr=None): + def record_op(self, opnum, argboxes, descr=None): pos = self._count self.append(opnum) expected_arity = oparity[opnum] @@ -319,12 +319,6 @@ self.append(-upper-1) self.append(lower) - def record_op(self, opnum, argboxes, descr=None): - # return an ResOperation instance, ideally die in hell - pos = self._record_op(opnum, argboxes, descr) - assert opnum >= 0 - return ResOperation(opnum, argboxes, pos, descr) - def _list_of_boxes(self, boxes): array = [rffi.cast(rffi.SHORT, 0)] * len(boxes) for i in range(len(boxes)): diff --git a/rpython/jit/metainterp/optimizeopt/pure.py b/rpython/jit/metainterp/optimizeopt/pure.py --- a/rpython/jit/metainterp/optimizeopt/pure.py +++ b/rpython/jit/metainterp/optimizeopt/pure.py @@ -206,7 +206,7 @@ def pure_from_args(self, opnum, args, op, descr=None): newop = ResOperation(opnum, [self.get_box_replacement(arg) for arg in args], - -1, descr=descr) + descr=descr) newop.set_forwarded(op) self.pure(opnum, newop) diff --git a/rpython/jit/metainterp/optimizeopt/rewrite.py b/rpython/jit/metainterp/optimizeopt/rewrite.py --- a/rpython/jit/metainterp/optimizeopt/rewrite.py +++ b/rpython/jit/metainterp/optimizeopt/rewrite.py @@ -56,13 +56,13 @@ arg0 = op.getarg(0) arg1 = op.getarg(1) if oldopnum != -1: - top = ResOperation(oldopnum, [arg0, arg1], -1) + top = ResOperation(oldopnum, [arg0, arg1]) if self.try_boolinvers(op, top): return True oldopnum = op.boolreflex # FIXME: add INT_ADD, INT_MUL if oldopnum != -1: - top = ResOperation(oldopnum, [arg1, arg0], -1) + top = ResOperation(oldopnum, [arg1, arg0]) oldop = self.get_pure_result(top) if oldop is not None: self.optimizer.make_equal_to(op, oldop) @@ -72,7 +72,7 @@ return False oldopnum = opclasses[op.boolreflex].boolinverse if oldopnum != -1: - top = ResOperation(oldopnum, [arg1, arg0], -1) + top = ResOperation(oldopnum, [arg1, arg0]) if self.try_boolinvers(op, top): return True diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -2540,9 +2540,7 @@ i = len(self.history._cache) op1 = self.history.record(rop.SAVE_EXC_CLASS, [], exc_class) op2 = self.history.record(rop.SAVE_EXCEPTION, [], exception) - assert op1 is self.history._cache[i] - assert op2 is self.history._cache[i + 1] - self.history._cache = [op1, op2] + self.history._cache[:i] + self.history._cache = self.history._cache[i:] + self.history._cache[:i] self.history.record(rop.RESTORE_EXCEPTION, [op1, op2], None) self.history.set_inputargs(inputargs) if exception_obj: diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -81,12 +81,10 @@ return False -def ResOperation(opnum, args, position=-1, descr=None): +def ResOperation(opnum, args, descr=None): cls = opclasses[opnum] op = cls() op.initarglist(args) - assert isinstance(position, int) - op.position = position if descr is not None: assert isinstance(op, ResOpWithDescr) if opnum == rop.FINISH: @@ -331,7 +329,7 @@ descr = self.getdescr() if descr is DONT_CHANGE: descr = None - newop = ResOperation(opnum, args, -1, descr) + newop = ResOperation(opnum, args, descr) if self.type != 'v': newop.copy_value_from(self) return newop diff --git a/rpython/jit/metainterp/test/test_opencoder.py b/rpython/jit/metainterp/test/test_opencoder.py --- a/rpython/jit/metainterp/test/test_opencoder.py +++ b/rpython/jit/metainterp/test/test_opencoder.py @@ -1,6 +1,6 @@ from rpython.jit.metainterp.opencoder import Trace, untag, TAGINT, TAGBOX -from rpython.jit.metainterp.resoperation import rop, InputArgInt +from rpython.jit.metainterp.resoperation import rop, InputArgInt, AbstractResOp from rpython.jit.metainterp.history import ConstInt from rpython.jit.metainterp.optimizeopt.optimizer import Optimizer from rpython.jit.metainterp import resume @@ -16,6 +16,13 @@ class metainterp_sd(object): pass +class FakeOp(AbstractResOp): + def __init__(self, pos): + self.pos = pos + + def get_position(self): + return self.pos + class FakeFrame(object): parent_snapshot = None @@ -53,7 +60,7 @@ def test_simple_iterator(self): i0, i1 = InputArgInt(), InputArgInt() t = Trace([i0, i1]) - add = t.record_op(rop.INT_ADD, [i0, i1]) + add = FakeOp(t.record_op(rop.INT_ADD, [i0, i1])) t.record_op(rop.INT_ADD, [add, ConstInt(1)]) (i0, i1), l, _ = self.unpack(t) assert len(l) == 2 @@ -67,7 +74,7 @@ def test_rd_snapshot(self): i0, i1 = InputArgInt(), InputArgInt() t = Trace([i0, i1]) - add = t.record_op(rop.INT_ADD, [i0, i1]) + add = FakeOp(t.record_op(rop.INT_ADD, [i0, i1])) t.record_op(rop.GUARD_FALSE, [add]) # now we write rd_snapshot and friends frame0 = FakeFrame(1, JitCode(2), [i0, i1]) @@ -126,12 +133,12 @@ inputargs, ops = lst t = Trace(inputargs) for op in ops: - newop = t.record_op(op.getopnum(), op.getarglist()) + newop = FakeOp(t.record_op(op.getopnum(), op.getarglist())) newop.orig_op = op if newop.is_guard(): resume.capture_resumedata(op.framestack, None, [], t) - op.position = newop.position + op.position = newop.get_position() inpargs, l, iter = self.unpack(t) loop1 = TreeLoop("loop1") loop1.inputargs = inputargs From pypy.commits at gmail.com Tue Mar 15 09:51:18 2016 From: pypy.commits at gmail.com (cfbolz) Date: Tue, 15 Mar 2016 06:51:18 -0700 (PDT) Subject: [pypy-commit] pypy guard-compatible: avoid some operations Message-ID: <56e81356.906b1c0a.37ca4.fffffcd7@mx.google.com> Author: Carl Friedrich Bolz Branch: guard-compatible Changeset: r83070:cadb6f1c1225 Date: 2016-03-15 14:49 +0100 http://bitbucket.org/pypy/pypy/changeset/cadb6f1c1225/ Log: avoid some operations diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -129,6 +129,10 @@ def length(self): raise NotImplementedError("abstract base class") + @jit.guard_compatible() + def _length_larger_than(self, n): + return self.length() > n + def get_terminator(self): return self.terminator @@ -659,7 +663,7 @@ self.map = map def _has_storage_list(self): - return self.map.length() > n + return self.map._length_larger_than(n) def _mapdict_get_storage_list(self): erased = getattr(self, valnmin1) From pypy.commits at gmail.com Tue Mar 15 10:03:42 2016 From: pypy.commits at gmail.com (cfbolz) Date: Tue, 15 Mar 2016 07:03:42 -0700 (PDT) Subject: [pypy-commit] pypy guard-compatible: nonsense Message-ID: <56e8163e.03dd1c0a.9456.ffffff4b@mx.google.com> Author: Carl Friedrich Bolz Branch: guard-compatible Changeset: r83071:24d702fa2df4 Date: 2016-03-15 15:02 +0100 http://bitbucket.org/pypy/pypy/changeset/24d702fa2df4/ Log: nonsense diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -129,7 +129,7 @@ def length(self): raise NotImplementedError("abstract base class") - @jit.guard_compatible() + @jit.elidable_compatible() def _length_larger_than(self, n): return self.length() > n From pypy.commits at gmail.com Tue Mar 15 11:49:22 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 15 Mar 2016 08:49:22 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-ext: * Remove unicode_alloc. Message-ID: <56e82f02.a3abc20a.7a71e.378a@mx.google.com> Author: Armin Rigo Branch: cpyext-ext Changeset: r83072:251f4973a108 Date: 2016-03-15 16:48 +0100 http://bitbucket.org/pypy/pypy/changeset/251f4973a108/ Log: * Remove unicode_alloc. * Ensure the order of the fields is correct (might be unnecessary, but feels safer). diff --git a/pypy/module/cpyext/bytesobject.py b/pypy/module/cpyext/bytesobject.py --- a/pypy/module/cpyext/bytesobject.py +++ b/pypy/module/cpyext/bytesobject.py @@ -53,8 +53,8 @@ PyStringObjectStruct = lltype.ForwardReference() PyStringObject = lltype.Ptr(PyStringObjectStruct) PyStringObjectFields = PyObjectFields + \ - (("ob_shash", rffi.LONG), ("ob_sstate", rffi.INT), - ("buffer", rffi.CCHARP), ("ob_size", Py_ssize_t)) + (("ob_size", Py_ssize_t), ("ob_shash", rffi.LONG), + ("ob_sstate", rffi.INT), ("buffer", rffi.CCHARP)) cpython_struct("PyStringObject", PyStringObjectFields, PyStringObjectStruct) @bootstrap_function diff --git a/pypy/module/cpyext/unicodeobject.py b/pypy/module/cpyext/unicodeobject.py --- a/pypy/module/cpyext/unicodeobject.py +++ b/pypy/module/cpyext/unicodeobject.py @@ -30,7 +30,6 @@ def init_unicodeobject(space): make_typedescr(space.w_unicode.layout.typedef, basestruct=PyUnicodeObject.TO, - alloc = unicode_alloc, attach=unicode_attach, dealloc=unicode_dealloc, realize=unicode_realize) @@ -48,6 +47,7 @@ ''' see comments with string_alloc in stringobject.py ''' + XXX from pypy.module.cpyext.typeobjectdefs import PyTypeObjectPtr pytype = as_pyobj(space, w_type) pytype = rffi.cast(PyTypeObjectPtr, pytype) From pypy.commits at gmail.com Tue Mar 15 11:54:17 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 15 Mar 2016 08:54:17 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-ext: Undo one of the effects of hacking at PyString_Type.tp_itemsize, which Message-ID: <56e83029.c65b1c0a.734a7.3993@mx.google.com> Author: Armin Rigo Branch: cpyext-ext Changeset: r83073:7b7796d0cff0 Date: 2016-03-15 16:53 +0100 http://bitbucket.org/pypy/pypy/changeset/7b7796d0cff0/ Log: Undo one of the effects of hacking at PyString_Type.tp_itemsize, which is that all PyStringObjects have the extra size allocated (but don't use it) diff --git a/pypy/module/cpyext/pyobject.py b/pypy/module/cpyext/pyobject.py --- a/pypy/module/cpyext/pyobject.py +++ b/pypy/module/cpyext/pyobject.py @@ -46,7 +46,7 @@ size = pytype.c_tp_basicsize else: size = rffi.sizeof(self.basestruct) - if itemcount: + if itemcount and w_type is not space.w_str: size += itemcount * pytype.c_tp_itemsize assert size >= rffi.sizeof(PyObject.TO) buf = lltype.malloc(rffi.VOIDP.TO, size, From pypy.commits at gmail.com Tue Mar 15 12:46:17 2016 From: pypy.commits at gmail.com (fijal) Date: Tue, 15 Mar 2016 09:46:17 -0700 (PDT) Subject: [pypy-commit] pypy jit-leaner-frontend: try to get rid of values too Message-ID: <56e83c59.838d1c0a.6474b.3ed8@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r83074:b62416027897 Date: 2016-03-15 15:43 +0200 http://bitbucket.org/pypy/pypy/changeset/b62416027897/ Log: try to get rid of values too diff --git a/rpython/jit/metainterp/logger.py b/rpython/jit/metainterp/logger.py --- a/rpython/jit/metainterp/logger.py +++ b/rpython/jit/metainterp/logger.py @@ -187,7 +187,10 @@ s = s.replace(',', '.') # we use comma for argument splitting s2 = '' for box in args[1:]: - s2 += ', %d' % box.getint() + if isinstance(box, ConstInt): + s2 += ', %d' % box.getint() + else: + s2 += ', box' return "jit_debug('%s'%s)" % (s, s2) if ops_offset is None: offset = -1 diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -329,10 +329,7 @@ descr = self.getdescr() if descr is DONT_CHANGE: descr = None - newop = ResOperation(opnum, args, descr) - if self.type != 'v': - newop.copy_value_from(self) - return newop + return ResOperation(opnum, args, descr) def repr(self, memo, graytext=False): # RPython-friendly version @@ -1720,14 +1717,6 @@ baseclass = PlainResOp mixins = [arity2mixin.get(arity, N_aryOp)] - if result_type == 'i': - mixins.append(IntOp) - elif result_type == 'f': - mixins.append(FloatOp) - elif result_type == 'r': - mixins.append(RefOp) - else: - assert result_type == 'n' if name in _cast_ops: if "INT_SIGNEXT" in name: mixins.append(SignExtOp) @@ -1736,7 +1725,11 @@ cls_name = '%s_OP' % name bases = (get_base_class(tuple(mixins), baseclass),) dic = {'opnum': opnum} - return type(cls_name, bases, dic) + res = type(cls_name, bases, dic) + if result_type == 'n': + result_type = 'v' # why? + res.type = result_type + return res setup(__name__ == '__main__') # print out the table when run directly del _oplist diff --git a/rpython/jit/tool/oparser.py b/rpython/jit/tool/oparser.py --- a/rpython/jit/tool/oparser.py +++ b/rpython/jit/tool/oparser.py @@ -252,7 +252,7 @@ return opnum, args, descr, fail_args def create_op(self, opnum, args, res, descr, fail_args): - res = ResOperation(opnum, args, -1, descr) + res = ResOperation(opnum, args, descr) if fail_args is not None: res.setfailargs(fail_args) if self._postproces: From pypy.commits at gmail.com Tue Mar 15 12:46:19 2016 From: pypy.commits at gmail.com (fijal) Date: Tue, 15 Mar 2016 09:46:19 -0700 (PDT) Subject: [pypy-commit] pypy jit-leaner-frontend: another micro-optimization Message-ID: <56e83c5b.83301c0a.fdf39.414d@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r83075:5f3c4d1e2f03 Date: 2016-03-15 18:45 +0200 http://bitbucket.org/pypy/pypy/changeset/5f3c4d1e2f03/ Log: another micro-optimization diff --git a/rpython/jit/metainterp/opencoder.py b/rpython/jit/metainterp/opencoder.py --- a/rpython/jit/metainterp/opencoder.py +++ b/rpython/jit/metainterp/opencoder.py @@ -325,9 +325,12 @@ array[i] = self._encode(boxes[i]) return array - def create_top_snapshot(self, jitcode, pc, boxes, vable_boxes, vref_boxes): + def new_array(self, lgt): + return [rffi.cast(rffi.SHORT, 0)] * lgt + + def create_top_snapshot(self, jitcode, pc, frame, flag, vable_boxes, vref_boxes): self._total_snapshots += 1 - array = self._list_of_boxes(boxes) + array = frame.get_list_of_active_boxes(flag, self.new_array, self._encode) vable_array = self._list_of_boxes(vable_boxes) vref_array = self._list_of_boxes(vref_boxes) s = TopSnapshot(combine_uint(jitcode.index, pc), array, vable_array, @@ -350,9 +353,9 @@ self._ops[self._pos - 1] = rffi.cast(rffi.SHORT, len(self._snapshots) - 1) return s - def create_snapshot(self, jitcode, pc, boxes): + def create_snapshot(self, jitcode, pc, frame, flag): self._total_snapshots += 1 - array = self._list_of_boxes(boxes) + array = frame.get_list_of_active_boxes(flag, self.new_array, self._encode) return Snapshot(combine_uint(jitcode.index, pc), array) def get_iter(self, metainterp_sd=None): diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -127,7 +127,7 @@ def get_current_position_info(self): return self.jitcode.get_live_vars_info(self.pc) - def get_list_of_active_boxes(self, in_a_call): + def get_list_of_active_boxes(self, in_a_call, new_array, encode): if in_a_call: # If we are not the topmost frame, self._result_argcode contains # the type of the result of the call instruction in the bytecode. @@ -146,18 +146,18 @@ start_f = start_r + info.get_register_count_r() total = start_f + info.get_register_count_f() # allocate a list of the correct size - env = [None] * total + env = new_array(total) make_sure_not_resized(env) # fill it now for i in range(info.get_register_count_i()): index = info.get_register_index_i(i) - env[start_i + i] = self.registers_i[index] + env[start_i + i] = encode(self.registers_i[index]) for i in range(info.get_register_count_r()): index = info.get_register_index_r(i) - env[start_r + i] = self.registers_r[index] + env[start_r + i] = encode(self.registers_r[index]) for i in range(info.get_register_count_f()): index = info.get_register_index_f(i) - env[start_f + i] = self.registers_f[index] + env[start_f + i] = encode(self.registers_f[index]) return env def replace_active_box_in_frame(self, oldbox, newbox): diff --git a/rpython/jit/metainterp/resume.py b/rpython/jit/metainterp/resume.py --- a/rpython/jit/metainterp/resume.py +++ b/rpython/jit/metainterp/resume.py @@ -90,8 +90,7 @@ if target.parent_snapshot: snapshot.prev = target.parent_snapshot return - s = t.create_snapshot(back.jitcode, back.pc, - back.get_list_of_active_boxes(True)) + s = t.create_snapshot(back.jitcode, back.pc, back, True) snapshot.prev = s _ensure_parent_resumedata(framestack, n - 1, t, s) target.parent_snapshot = s @@ -108,7 +107,7 @@ if n >= 0: top = framestack[n] snapshot = t.create_top_snapshot(top.jitcode, top.pc, - top.get_list_of_active_boxes(False), virtualizable_boxes, + top, False, virtualizable_boxes, virtualref_boxes) _ensure_parent_resumedata(framestack, n, t,snapshot) else: From pypy.commits at gmail.com Tue Mar 15 13:07:31 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 15 Mar 2016 10:07:31 -0700 (PDT) Subject: [pypy-commit] cffi default: Another attempt at improving the shutdown issues w.r.t. @def_extern Message-ID: <56e84153.4c181c0a.aca25.5439@mx.google.com> Author: Armin Rigo Branch: Changeset: r2647:ab9941c073b3 Date: 2016-03-15 18:07 +0100 http://bitbucket.org/cffi/cffi/changeset/ab9941c073b3/ Log: Another attempt at improving the shutdown issues w.r.t. @def_extern diff --git a/c/call_python.c b/c/call_python.c --- a/c/call_python.c +++ b/c/call_python.c @@ -1,25 +1,52 @@ static PyObject *_get_interpstate_dict(void) { - /* hack around to return a dict that is subinterpreter-local */ + /* Hack around to return a dict that is subinterpreter-local. + Does not return a new reference. Returns NULL in case of + error, but without setting any exception. (If called late + during shutdown, we *can't* set an exception!) + */ + static PyObject *attr_name = NULL; + PyThreadState *tstate; + PyObject *d, *builtins; int err; - PyObject *m, *modules = PyThreadState_GET()->interp->modules; - if (modules == NULL) { - PyErr_SetString(FFIError, "subinterpreter already gone?"); + tstate = PyThreadState_GET(); + if (tstate == NULL) { + /* no thread state! */ return NULL; } - m = PyDict_GetItemString(modules, "_cffi_backend._extern_py"); - if (m == NULL) { - m = PyModule_New("_cffi_backend._extern_py"); - if (m == NULL) - return NULL; - err = PyDict_SetItemString(modules, "_cffi_backend._extern_py", m); - Py_DECREF(m); /* sys.modules keeps one reference to m */ + + builtins = tstate->interp->builtins; + if (builtins == NULL) { + /* subinterpreter was cleared already, or is being cleared right now, + to a point that is too much for us to continue */ + return NULL; + } + + /* from there on, we know the (sub-)interpreter is still valid */ + + if (attr_name == NULL) { + attr_name = PyString_InternFromString("__cffi_backend_extern_py"); + if (attr_name == NULL) + goto error; + } + + d = PyDict_GetItem(builtins, attr_name); + if (d == NULL) { + d = PyDict_New(); + if (d == NULL) + goto error; + err = PyDict_SetItem(builtins, attr_name, d); + Py_DECREF(d); /* if successful, there is one ref left in builtins */ if (err < 0) - return NULL; + goto error; } - return PyModule_GetDict(m); + return d; + + error: + PyErr_Clear(); /* typically a MemoryError */ + return NULL; } static PyObject *_ffi_def_extern_decorator(PyObject *outer_args, PyObject *fn) @@ -77,7 +104,7 @@ interpstate_dict = _get_interpstate_dict(); if (interpstate_dict == NULL) { Py_DECREF(infotuple); - return NULL; + return PyErr_NoMemory(); } externpy = (struct _cffi_externpy_s *)g->address; @@ -119,7 +146,7 @@ interpstate_dict = _get_interpstate_dict(); if (interpstate_dict == NULL) - goto error; + return 4; /* oops, shutdown issue? */ interpstate_key = PyLong_FromVoidPtr((void *)externpy); if (interpstate_key == NULL) @@ -219,8 +246,9 @@ if (err) { static const char *msg[] = { "no code was attached to it yet with @ffi.def_extern()", - "got internal exception (out of memory / shutdown issue)", + "got internal exception (out of memory?)", "@ffi.def_extern() was not called in the current subinterpreter", + "got internal exception (shutdown issue?)", }; fprintf(stderr, "extern \"Python\": function %s() called, " "but %s. Returning 0.\n", externpy->name, msg[err-1]); From pypy.commits at gmail.com Tue Mar 15 13:16:57 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 15 Mar 2016 10:16:57 -0700 (PDT) Subject: [pypy-commit] cffi default: Document issue #247 Message-ID: <56e84389.0f941c0a.b8543.514c@mx.google.com> Author: Armin Rigo Branch: Changeset: r2648:9fa50c74716e Date: 2016-03-15 18:16 +0100 http://bitbucket.org/cffi/cffi/changeset/9fa50c74716e/ Log: Document issue #247 diff --git a/doc/source/cdef.rst b/doc/source/cdef.rst --- a/doc/source/cdef.rst +++ b/doc/source/cdef.rst @@ -109,6 +109,14 @@ install_requires=["cffi>=1.0.0"], ) +* Note that some bundler tools that try to find all modules used by a + project, like PyInstaller, will miss ``_cffi_backend`` in the + out-of-line mode because your program contains no explicit ``import + cffi`` or ``import _cffi_backend``. You need to add + ``_cffi_backend`` explicitly (as a "hidden import" in PyInstaller, + but it can also be done more generally by adding the line ``import + _cffi_backend`` in your main program). + Note that CFFI actually contains two different ``FFI`` classes. The page `Using the ffi/lib objects`_ describes the common functionality. It is what you get in the ``from package._foo import ffi`` lines above. From pypy.commits at gmail.com Tue Mar 15 16:12:08 2016 From: pypy.commits at gmail.com (rlamy) Date: Tue, 15 Mar 2016 13:12:08 -0700 (PDT) Subject: [pypy-commit] pypy bootstrap-clarity: Create space._is_runtime attribute. Message-ID: <56e86c98.06b01c0a.8c183.24c2@mx.google.com> Author: Ronan Lamy Branch: bootstrap-clarity Changeset: r83076:006b04e6249a Date: 2016-03-15 19:06 +0000 http://bitbucket.org/pypy/pypy/changeset/006b04e6249a/ Log: Create space._is_runtime attribute. This is meant to allow cleanly separating translation-time objspace configuration from run-time interpreter initialisation. The attribute is initially False and must be set to True before running space.startup() or exectuting any annotator-visible RPython code. diff --git a/pypy/bin/pyinteractive.py b/pypy/bin/pyinteractive.py --- a/pypy/bin/pyinteractive.py +++ b/pypy/bin/pyinteractive.py @@ -42,7 +42,7 @@ StrOption("warn", "warning control (arg is action:message:category:module:lineno)", default=None, cmdline="-W"), - + ]) pypy_init = gateway.applevel(''' @@ -118,7 +118,7 @@ # set warning control options (if any) warn_arg = interactiveconfig.warn if warn_arg is not None: - space.appexec([space.wrap(warn_arg)], """(arg): + space.appexec([space.wrap(warn_arg)], """(arg): import sys sys.warnoptions.append(arg)""") @@ -167,6 +167,7 @@ try: def do_start(): + space._is_runtime = True space.startup() pypy_init(space, space.wrap(not interactiveconfig.no_site_import)) if main.run_toplevel(space, do_start, @@ -200,6 +201,6 @@ if __name__ == '__main__': if hasattr(sys, 'setrecursionlimit'): - # for running "python -i pyinteractive.py -Si -- py.py -Si" + # for running "python -i pyinteractive.py -Si -- py.py -Si" sys.setrecursionlimit(3000) sys.exit(main_(sys.argv)) diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -292,7 +292,7 @@ self.hack_for_cffi_modules(driver) return self.get_entry_point(config) - + def hack_for_cffi_modules(self, driver): # HACKHACKHACK # ugly hack to modify target goal from compile_* to build_cffi_imports @@ -319,7 +319,7 @@ while not basedir.join('include').exists(): _basedir = basedir.dirpath() if _basedir == basedir: - raise ValueError('interpreter %s not inside pypy repo', + raise ValueError('interpreter %s not inside pypy repo', str(exename)) basedir = _basedir modules = self.config.objspace.usemodules.getpaths() @@ -350,6 +350,7 @@ app = gateway.applevel(open(filename).read(), 'app_main.py', 'app_main') app.hidden_applevel = False w_dict = app.getwdict(space) + space._is_runtime = True entry_point, _ = create_entry_point(space, w_dict) return entry_point, None, PyPyAnnotatorPolicy() diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -374,6 +374,7 @@ def __init__(self, config=None): "NOT_RPYTHON: Basic initialization of objects." + self._is_runtime = False self.fromcache = InternalSpaceCache(self).getorbuild self.threadlocals = ThreadLocals() # set recursion limit @@ -391,7 +392,7 @@ self.check_signal_action = None # changed by the signal module self.user_del_action = UserDelAction(self) self._code_of_sys_exc_info = None - + # can be overridden to a subclass self.initialize() @@ -643,21 +644,14 @@ # you should not see frames while you translate # so we make sure that the threadlocals never *have* an # ExecutionContext during translation. - if not we_are_translated(): - if self.config.translating: - assert self.threadlocals.get_ec() is None, ( - "threadlocals got an ExecutionContext during translation!") - try: - return self._ec_during_translation - except AttributeError: - ec = self.createexecutioncontext() - self._ec_during_translation = ec - return ec - else: - ec = self.threadlocals.get_ec() - if ec is None: - self.threadlocals.enter_thread(self) - ec = self.threadlocals.get_ec() + if not self._is_runtime: + assert self.threadlocals.get_ec() is None, ( + "threadlocals got an ExecutionContext during translation!") + try: + return self._ec_during_translation + except AttributeError: + ec = self.createexecutioncontext() + self._ec_during_translation = ec return ec else: # translated case follows. self.threadlocals is either from @@ -693,13 +687,11 @@ """Return an interp-level Lock object if threads are enabled, and a dummy object if they are not.""" from rpython.rlib import rthread + # There is no threading at objspace configuration time + if not self._is_runtime: + raise CannotHaveLock() if not self.config.objspace.usemodules.thread: return rthread.dummy_lock - # hack: we can't have prebuilt locks if we're translating. - # In this special situation we should just not lock at all - # (translation is not multithreaded anyway). - if not we_are_translated() and self.config.translating: - raise CannotHaveLock() try: return rthread.allocate_lock() except rthread.error: diff --git a/pypy/interpreter/test/test_targetpypy.py b/pypy/interpreter/test/test_targetpypy.py --- a/pypy/interpreter/test/test_targetpypy.py +++ b/pypy/interpreter/test/test_targetpypy.py @@ -9,6 +9,7 @@ entry_point(['pypy-c' , '-S', '-c', 'print 3']) def test_execute_source(space): + space._is_runtime = True _, d = create_entry_point(space, None) execute_source = d['pypy_execute_source'] lls = rffi.str2charp("import sys; sys.modules['xyz'] = 3") diff --git a/pypy/module/_codecs/interp_codecs.py b/pypy/module/_codecs/interp_codecs.py --- a/pypy/module/_codecs/interp_codecs.py +++ b/pypy/module/_codecs/interp_codecs.py @@ -130,8 +130,7 @@ Looks up a codec tuple in the Python codec registry and returns a tuple of functions. """ - assert not (space.config.translating and not we_are_translated()), \ - "lookup_codec() should not be called during translation" + assert space._is_runtime state = space.fromcache(CodecState) normalized_encoding = encoding.replace(" ", "-").lower() w_result = state.get_codec_from_cache(normalized_encoding) diff --git a/pypy/module/sys/__init__.py b/pypy/module/sys/__init__.py --- a/pypy/module/sys/__init__.py +++ b/pypy/module/sys/__init__.py @@ -103,7 +103,7 @@ } def startup(self, space): - if space.config.translating and not we_are_translated(): + if not space._is_runtime: # don't get the filesystemencoding at translation time assert self.filesystemencoding is None diff --git a/pypy/tool/pytest/objspace.py b/pypy/tool/pytest/objspace.py --- a/pypy/tool/pytest/objspace.py +++ b/pypy/tool/pytest/objspace.py @@ -29,6 +29,7 @@ if config is None: config = make_config(option) space = make_objspace(config) + space._is_runtime = True space.startup() # Initialize all builtin modules space.setitem(space.builtin.w_dict, space.wrap('AssertionError'), appsupport.build_pytest_assertion(space)) From pypy.commits at gmail.com Tue Mar 15 16:12:10 2016 From: pypy.commits at gmail.com (rlamy) Date: Tue, 15 Mar 2016 13:12:10 -0700 (PDT) Subject: [pypy-commit] pypy bootstrap-clarity: Make module _cleanup_ explicit Message-ID: <56e86c9a.99e61c0a.ba4f9.ffff9d01@mx.google.com> Author: Ronan Lamy Branch: bootstrap-clarity Changeset: r83077:11e9cab4e26f Date: 2016-03-15 20:10 +0000 http://bitbucket.org/pypy/pypy/changeset/11e9cab4e26f/ Log: Make module _cleanup_ explicit diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -633,6 +633,8 @@ self.getbuiltinmodule('__builtin__') for mod in self.builtin_modules.values(): mod.setup_after_space_initialization() + for mod in self.builtin_modules.values(): + mod.cleanup() def initialize(self): """NOT_RPYTHON: Abstract method that should put some minimal diff --git a/pypy/interpreter/mixedmodule.py b/pypy/interpreter/mixedmodule.py --- a/pypy/interpreter/mixedmodule.py +++ b/pypy/interpreter/mixedmodule.py @@ -123,7 +123,7 @@ self.save_module_content_for_future_reload() return self.w_dict - def _cleanup_(self): + def cleanup(self): self.getdict(self.space) self.w_initialdict = None self.startup_called = False diff --git a/pypy/interpreter/module.py b/pypy/interpreter/module.py --- a/pypy/interpreter/module.py +++ b/pypy/interpreter/module.py @@ -29,7 +29,7 @@ space.w_None) self.startup_called = False - def _cleanup_(self): + def cleanup(self): """Called by the annotator on prebuilt Module instances. We don't have many such modules, but for the ones that show up, remove their __file__ rather than translate it diff --git a/pypy/interpreter/test/test_appinterp.py b/pypy/interpreter/test/test_appinterp.py --- a/pypy/interpreter/test/test_appinterp.py +++ b/pypy/interpreter/test/test_appinterp.py @@ -3,30 +3,30 @@ from pypy.interpreter.gateway import appdef, ApplevelClass, applevel_temp from pypy.interpreter.error import OperationError -def test_execwith_novars(space): - val = space.appexec([], """ - (): - return 42 - """) +def test_execwith_novars(space): + val = space.appexec([], """ + (): + return 42 + """) assert space.eq_w(val, space.wrap(42)) -def test_execwith_withvars(space): +def test_execwith_withvars(space): val = space.appexec([space.wrap(7)], """ - (x): - y = 6 * x - return y - """) + (x): + y = 6 * x + return y + """) assert space.eq_w(val, space.wrap(42)) -def test_execwith_compile_error(space): +def test_execwith_compile_error(space): excinfo = py.test.raises(OperationError, space.appexec, [], """ - (): - y y + (): + y y """) - assert str(excinfo.value.errorstr(space)).find('y y') != -1 + assert str(excinfo.value.errorstr(space)).find('y y') != -1 def test_simple_applevel(space): - app = appdef("""app(x,y): + app = appdef("""app(x,y): return x + y """) assert app.func_name == 'app' @@ -34,15 +34,15 @@ assert space.eq_w(w_result, space.wrap(42)) def test_applevel_with_one_default(space): - app = appdef("""app(x,y=1): + app = appdef("""app(x,y=1): return x + y """) assert app.func_name == 'app' - w_result = app(space, space.wrap(41)) + w_result = app(space, space.wrap(41)) assert space.eq_w(w_result, space.wrap(42)) def test_applevel_with_two_defaults(space): - app = appdef("""app(x=1,y=2): + app = appdef("""app(x=1,y=2): return x + y """) w_result = app(space, space.wrap(41), space.wrap(1)) @@ -56,19 +56,19 @@ def test_applevel_noargs(space): - app = appdef("""app(): - return 42 + app = appdef("""app(): + return 42 """) assert app.func_name == 'app' - w_result = app(space) + w_result = app(space) assert space.eq_w(w_result, space.wrap(42)) -def somefunc(arg2=42): - return arg2 +def somefunc(arg2=42): + return arg2 -def test_app2interp_somefunc(space): - app = appdef(somefunc) - w_result = app(space) +def test_app2interp_somefunc(space): + app = appdef(somefunc) + w_result = app(space) assert space.eq_w(w_result, space.wrap(42)) def test_applevel_functions(space, applevel_temp = applevel_temp): @@ -85,45 +85,45 @@ def test_applevel_class(space, applevel_temp = applevel_temp): app = applevel_temp(''' class C(object): - clsattr = 42 - def __init__(self, x=13): - self.attr = x + clsattr = 42 + def __init__(self, x=13): + self.attr = x ''') C = app.interphook('C') - c = C(space, space.wrap(17)) + c = C(space, space.wrap(17)) w_attr = space.getattr(c, space.wrap('clsattr')) assert space.eq_w(w_attr, space.wrap(42)) w_clsattr = space.getattr(c, space.wrap('attr')) assert space.eq_w(w_clsattr, space.wrap(17)) -def app_test_something_at_app_level(): +def app_test_something_at_app_level(): x = 2 assert x/2 == 1 -class AppTestMethods: - def test_some_app_test_method(self): +class AppTestMethods: + def test_some_app_test_method(self): assert 2 == 2 -class TestMixedModule: - def test_accesses(self): +class TestMixedModule: + def test_accesses(self): space = self.space - import demomixedmod + import demomixedmod w_module = demomixedmod.Module(space, space.wrap('mixedmodule')) space.appexec([w_module], """ - (module): - assert module.value is None + (module): + assert module.value is None assert module.__doc__ == 'mixedmodule doc' - assert module.somefunc is module.somefunc - result = module.somefunc() - assert result == True + assert module.somefunc is module.somefunc + result = module.somefunc() + assert result == True - assert module.someappfunc is module.someappfunc - appresult = module.someappfunc(41) - assert appresult == 42 + assert module.someappfunc is module.someappfunc + appresult = module.someappfunc(41) + assert appresult == 42 assert module.__dict__ is module.__dict__ - for name in ('somefunc', 'someappfunc', '__doc__', '__name__'): + for name in ('somefunc', 'someappfunc', '__doc__', '__name__'): assert name in module.__dict__ """) assert space.is_true(w_module.call('somefunc')) @@ -172,8 +172,7 @@ # Uncomment this line for a workaround # space.getattr(w_ssl, space.wrap('SSLError')) - w_socket._cleanup_() + w_socket.cleanup() assert w_socket.startup_called == False - w_ssl._cleanup_() # w_ssl.appleveldefs['SSLError'] imports _socket + w_ssl.cleanup() # w_ssl.appleveldefs['SSLError'] imports _socket assert w_socket.startup_called == False - From pypy.commits at gmail.com Wed Mar 16 04:44:34 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 16 Mar 2016 01:44:34 -0700 (PDT) Subject: [pypy-commit] pypy default: Document issue (from pypy-dev) Message-ID: <56e91cf2.d4b61c0a.59947.53e0@mx.google.com> Author: Armin Rigo Branch: Changeset: r83078:eda9fd6a0601 Date: 2016-03-16 09:43 +0100 http://bitbucket.org/pypy/pypy/changeset/eda9fd6a0601/ Log: Document issue (from pypy-dev) diff --git a/rpython/rlib/jit.py b/rpython/rlib/jit.py --- a/rpython/rlib/jit.py +++ b/rpython/rlib/jit.py @@ -1059,6 +1059,14 @@ of JIT running like JIT loops compiled, aborts etc. An instance of this class will be available as policy.jithookiface. """ + # WARNING: You should make a single prebuilt instance of a subclass + # of this class. You can, before translation, initialize some + # attributes on this instance, and then read or change these + # attributes inside the methods of the subclass. But this prebuilt + # instance *must not* be seen during the normal annotation/rtyping + # of the program! A line like ``pypy_hooks.foo = ...`` must not + # appear inside your interpreter's RPython code. + def on_abort(self, reason, jitdriver, greenkey, greenkey_repr, logops, operations): """ A hook called each time a loop is aborted with jitdriver and greenkey where it started, reason is a string why it got aborted From pypy.commits at gmail.com Wed Mar 16 05:48:04 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 16 Mar 2016 02:48:04 -0700 (PDT) Subject: [pypy-commit] pypy.org extradoc: update the values Message-ID: <56e92bd4.a151c20a.a8c9.ffffc34e@mx.google.com> Author: Armin Rigo Branch: extradoc Changeset: r722:3a1c644cbf2c Date: 2016-03-16 10:47 +0100 http://bitbucket.org/pypy/pypy.org/changeset/3a1c644cbf2c/ Log: update the values diff --git a/don1.html b/don1.html --- a/don1.html +++ b/don1.html @@ -9,13 +9,13 @@ - $63079 of $105000 (60.1%) + $63312 of $105000 (60.3%)

    @@ -23,7 +23,7 @@
  • diff --git a/don4.html b/don4.html --- a/don4.html +++ b/don4.html @@ -17,7 +17,7 @@ 2nd call: - $30524 of $80000 (38.2%) + $30534 of $80000 (38.2%)
    @@ -25,7 +25,7 @@
  • From pypy.commits at gmail.com Wed Mar 16 06:16:50 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 16 Mar 2016 03:16:50 -0700 (PDT) Subject: [pypy-commit] pypy default: Fix gcc warnings Message-ID: <56e93292.05de1c0a.4b2c3.728a@mx.google.com> Author: Armin Rigo Branch: Changeset: r83079:47ef86de15e5 Date: 2016-03-16 11:16 +0100 http://bitbucket.org/pypy/pypy/changeset/47ef86de15e5/ Log: Fix gcc warnings diff --git a/rpython/rlib/rvmprof/src/vmprof_common.h b/rpython/rlib/rvmprof/src/vmprof_common.h --- a/rpython/rlib/rvmprof/src/vmprof_common.h +++ b/rpython/rlib/rvmprof/src/vmprof_common.h @@ -24,7 +24,7 @@ char padding[sizeof(long) - 1]; char marker; long count, depth; - void *stack[]; + intptr_t stack[]; } prof_stacktrace_s; diff --git a/rpython/rlib/rvmprof/src/vmprof_main_win32.h b/rpython/rlib/rvmprof/src/vmprof_main_win32.h --- a/rpython/rlib/rvmprof/src/vmprof_main_win32.h +++ b/rpython/rlib/rvmprof/src/vmprof_main_win32.h @@ -101,7 +101,7 @@ depth = get_stack_trace(p->vmprof_tl_stack, stack->stack, MAX_STACK_DEPTH-2, ctx.Eip); stack->depth = depth; - stack->stack[depth++] = (void*)p->thread_ident; + stack->stack[depth++] = p->thread_ident; stack->count = 1; stack->marker = MARKER_STACKTRACE; ResumeThread(hThread); From pypy.commits at gmail.com Wed Mar 16 06:43:53 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 16 Mar 2016 03:43:53 -0700 (PDT) Subject: [pypy-commit] pypy jit-leaner-frontend: XXX comment Message-ID: <56e938e9.a151c20a.a8c9.ffffdbd4@mx.google.com> Author: Armin Rigo Branch: jit-leaner-frontend Changeset: r83080:92976799896e Date: 2016-03-16 11:43 +0100 http://bitbucket.org/pypy/pypy/changeset/92976799896e/ Log: XXX comment diff --git a/rpython/jit/metainterp/opencoder.py b/rpython/jit/metainterp/opencoder.py --- a/rpython/jit/metainterp/opencoder.py +++ b/rpython/jit/metainterp/opencoder.py @@ -266,6 +266,8 @@ v = self._floats_dict.get(box.getfloat(), -1) if v == -1: v = (len(self._floats) << 1) | 1 + # XXX the next line is bogus, can't use a float as + # dict key. Must convert it first to a longlong self._floats_dict[box.getfloat()] = v self._floats.append(box.getfloat()) return tag(TAGCONSTOTHER, v) From pypy.commits at gmail.com Wed Mar 16 09:06:57 2016 From: pypy.commits at gmail.com (cfbolz) Date: Wed, 16 Mar 2016 06:06:57 -0700 (PDT) Subject: [pypy-commit] pypy default: fix link (thanks Edd) Message-ID: <56e95a71.88c8c20a.db21a.1c61@mx.google.com> Author: Carl Friedrich Bolz Branch: Changeset: r83081:1fd8429ed560 Date: 2016-03-16 13:06 +0000 http://bitbucket.org/pypy/pypy/changeset/1fd8429ed560/ Log: fix link (thanks Edd) diff --git a/pypy/doc/extradoc.rst b/pypy/doc/extradoc.rst --- a/pypy/doc/extradoc.rst +++ b/pypy/doc/extradoc.rst @@ -80,7 +80,7 @@ .. _How to *not* write Virtual Machines for Dynamic Languages: https://bitbucket.org/pypy/extradoc/raw/tip/talk/dyla2007/dyla.pdf .. _`Tracing the Meta-Level: PyPy's Tracing JIT Compiler`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/icooolps2009/bolz-tracing-jit.pdf .. _`Faster than C#: Efficient Implementation of Dynamic Languages on .NET`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/icooolps2009-dotnet/cli-jit.pdf -.. _Automatic JIT Compiler Generation with Runtime Partial Evaluation: http://wwwold.cobra.cs.uni-duesseldorf.de/thesis/final-master.pdf +.. _Automatic JIT Compiler Generation with Runtime Partial Evaluation: http://stups.hhu.de/mediawiki/images/b/b9/Master_bolz.pdf .. _`RPython: A Step towards Reconciling Dynamically and Statically Typed OO Languages`: http://www.disi.unige.it/person/AnconaD/papers/DynamicLanguages_abstracts.html#AACM-DLS07 .. _EU Reports: index-report.html .. _Hardware Transactional Memory Support for Lightweight Dynamic Language Evolution: http://sabi.net/nriley/pubs/dls6-riley.pdf From pypy.commits at gmail.com Wed Mar 16 10:41:05 2016 From: pypy.commits at gmail.com (cfbolz) Date: Wed, 16 Mar 2016 07:41:05 -0700 (PDT) Subject: [pypy-commit] pypy default: this function with the somewhat scary comment is actually no longer used Message-ID: <56e97081.654fc20a.991cb.44a8@mx.google.com> Author: Carl Friedrich Bolz Branch: Changeset: r83082:e68330b4c7ee Date: 2016-03-16 15:40 +0100 http://bitbucket.org/pypy/pypy/changeset/e68330b4c7ee/ Log: this function with the somewhat scary comment is actually no longer used diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -144,14 +144,6 @@ cache[name, index] = attr return attr - @jit.elidable - def _get_cache_attr(self, name, index): - key = name, index - # this method is not actually elidable, but it's fine anyway - if self.cache_attrs is not None: - return self.cache_attrs.get(key, None) - return None - def add_attr(self, obj, name, index, w_value): self._reorder_and_add(obj, name, index, w_value) if not jit.we_are_jitted(): From pypy.commits at gmail.com Wed Mar 16 13:00:20 2016 From: pypy.commits at gmail.com (plan_rich) Date: Wed, 16 Mar 2016 10:00:20 -0700 (PDT) Subject: [pypy-commit] buildbot default: added a new buildbot to test if this is a machine config problem Message-ID: <56e99124.0357c20a.c858a.7c06@mx.google.com> Author: Richard Plangger Branch: Changeset: r995:5b2d6b27aacf Date: 2016-03-16 17:59 +0100 http://bitbucket.org/pypy/buildbot/changeset/5b2d6b27aacf/ Log: added a new buildbot to test if this is a machine config problem diff --git a/bot2/pypybuildbot/master.py b/bot2/pypybuildbot/master.py --- a/bot2/pypybuildbot/master.py +++ b/bot2/pypybuildbot/master.py @@ -187,6 +187,7 @@ JITLINUX32 = "pypy-c-jit-linux-x86-32" JITLINUX64 = "pypy-c-jit-linux-x86-64" JITLINUX_S390X = 'pypy-c-jit-linux-s390x' +JITLINUX_S390X_2 = 'pypy-c-jit-linux-s390x-2' JITMACOSX64 = "pypy-c-jit-macosx-x86-64" #JITMACOSX64_2 = "pypy-c-jit-macosx-x86-64-2" JITWIN32 = "pypy-c-jit-win-x86-32" @@ -308,6 +309,7 @@ # S390X vm (ibm-research) Nightly("nightly-4-00", [LINUX_S390X], branch='default', hour=0, minute=0), Nightly("nightly-4-01", [JITLINUX_S390X], branch='default', hour=2, minute=0), + Nightly("nightly-4-02", [JITLINUX_S390X_2], branch='default', hour=2, minute=0), # this one has faithfully run every night even though the latest # change to that branch was in January 2013. Re-enable one day. @@ -352,6 +354,7 @@ LINUX_S390X, JITLINUX_S390X, + JITLINUX_S390X_2, ] + ARM.builderNames, properties=[]), ] + ARM.schedulers, @@ -422,13 +425,6 @@ 'category': 'linux64', #"locks": [TannitCPU.access('counting')], }, - {'name': JITLINUX_S390X, - 'slavenames': ["dje"], - 'builddir': JITLINUX_S390X, - 'factory': pypyJITTranslatedTestFactoryS390X, - 'category': 'linux-s390x', - #"locks": [TannitCPU.access('counting')], - }, {"name": JITBENCH, "slavenames": ["tannit32"], "builddir": JITBENCH, @@ -528,6 +524,18 @@ "factory": pypyOwnTestFactory, "category": 's390x', }, + {'name': JITLINUX_S390X, + 'slavenames': ["dje"], + 'builddir': JITLINUX_S390X, + 'factory': pypyJITTranslatedTestFactoryS390X, + 'category': 'linux-s390x', + }, + {'name': JITLINUX_S390X_2, + 'slavenames': ['s390x-slave'], + 'builddir': JITLINUX_S390X_2, + 'factory': pypyJITTranslatedTestFactoryS390X, + 'category': 'linux-s390x', + }, ] + ARM.builders, # http://readthedocs.org/docs/buildbot/en/latest/tour.html#debugging-with-manhole From pypy.commits at gmail.com Wed Mar 16 13:13:35 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 16 Mar 2016 10:13:35 -0700 (PDT) Subject: [pypy-commit] pypy.org extradoc: update the values Message-ID: <56e9943f.e6bbc20a.a5878.ffff80f0@mx.google.com> Author: Armin Rigo Branch: extradoc Changeset: r723:94a5ad8113da Date: 2016-03-16 18:13 +0100 http://bitbucket.org/pypy/pypy.org/changeset/94a5ad8113da/ Log: update the values diff --git a/don1.html b/don1.html --- a/don1.html +++ b/don1.html @@ -9,13 +9,13 @@ - $63312 of $105000 (60.3%) + $63421 of $105000 (60.4%)
    @@ -23,7 +23,7 @@
  • From pypy.commits at gmail.com Wed Mar 16 13:25:15 2016 From: pypy.commits at gmail.com (mattip) Date: Wed, 16 Mar 2016 10:25:15 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-ext: fix properly (amaury) Message-ID: <56e996fb.8b941c0a.9fda4.ffffb9be@mx.google.com> Author: mattip Branch: cpyext-ext Changeset: r83083:68a119b4877f Date: 2016-03-16 19:23 +0200 http://bitbucket.org/pypy/pypy/changeset/68a119b4877f/ Log: fix properly (amaury) diff --git a/pypy/module/cpyext/bytesobject.py b/pypy/module/cpyext/bytesobject.py --- a/pypy/module/cpyext/bytesobject.py +++ b/pypy/module/cpyext/bytesobject.py @@ -145,7 +145,7 @@ from pypy.module.cpyext.unicodeobject import ( PyUnicode_Check, _PyUnicode_AsDefaultEncodedString) if PyUnicode_Check(space, ref): - ref = _PyUnicode_AsDefaultEncodedString(space, ref, rffi.str2charp("")) + ref = _PyUnicode_AsDefaultEncodedString(space, ref, lltype.nullptr(rffi.CCHARP.TO)) else: raise oefmt(space.w_TypeError, "expected string or Unicode object, %T found", @@ -164,7 +164,7 @@ from pypy.module.cpyext.unicodeobject import ( PyUnicode_Check, _PyUnicode_AsDefaultEncodedString) if PyUnicode_Check(space, ref): - ref = _PyUnicode_AsDefaultEncodedString(space, ref, rffi.str2charp("")) + ref = _PyUnicode_AsDefaultEncodedString(space, ref, lltype.nullptr(rffi.CCHARP.TO)) else: raise oefmt(space.w_TypeError, "expected string or Unicode object, %T found", From pypy.commits at gmail.com Wed Mar 16 13:29:32 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 16 Mar 2016 10:29:32 -0700 (PDT) Subject: [pypy-commit] cffi default: Python 3 compat Message-ID: <56e997fc.857ac20a.dc7f4.ffff8397@mx.google.com> Author: Armin Rigo Branch: Changeset: r2649:c38d1fce0c24 Date: 2016-03-16 18:28 +0100 http://bitbucket.org/cffi/cffi/changeset/c38d1fce0c24/ Log: Python 3 compat diff --git a/c/call_python.c b/c/call_python.c --- a/c/call_python.c +++ b/c/call_python.c @@ -27,7 +27,7 @@ /* from there on, we know the (sub-)interpreter is still valid */ if (attr_name == NULL) { - attr_name = PyString_InternFromString("__cffi_backend_extern_py"); + attr_name = PyText_InternFromString("__cffi_backend_extern_py"); if (attr_name == NULL) goto error; } diff --git a/testing/embedding/test_basic.py b/testing/embedding/test_basic.py --- a/testing/embedding/test_basic.py +++ b/testing/embedding/test_basic.py @@ -142,7 +142,7 @@ env_extra[envname] = libpath for key, value in sorted(env_extra.items()): if os.environ.get(key) != value: - print '* setting env var %r to %r' % (key, value) + print('* setting env var %r to %r' % (key, value)) os.environ[key] = value def execute(self, name): From pypy.commits at gmail.com Wed Mar 16 14:24:04 2016 From: pypy.commits at gmail.com (mattip) Date: Wed, 16 Mar 2016 11:24:04 -0700 (PDT) Subject: [pypy-commit] pypy default: descriptors must fit on one line for appdef() Message-ID: <56e9a4c4.6774c20a.14549.ffff9cad@mx.google.com> Author: mattip Branch: Changeset: r83084:aaec8eb09c6a Date: 2016-03-16 20:23 +0200 http://bitbucket.org/pypy/pypy/changeset/aaec8eb09c6a/ Log: descriptors must fit on one line for appdef() diff --git a/pypy/module/thread/test/test_lock.py b/pypy/module/thread/test/test_lock.py --- a/pypy/module/thread/test/test_lock.py +++ b/pypy/module/thread/test/test_lock.py @@ -3,7 +3,7 @@ import sys, os from pypy.module.thread.test.support import GenericTestThread from rpython.translator.c.test.test_genc import compile -import platform +from platform import machine class AppTestLock(GenericTestThread): @@ -64,8 +64,7 @@ else: assert self.runappdirect, "missing lock._py3k_acquire()" - @py.test.mark.xfail(platform.machine() == 's390x', - reason='may fail this test under heavy load') + @py.test.mark.xfail(machine()=='s390x', reason='may fail under heavy load') def test_ping_pong(self): # The purpose of this test is that doing a large number of ping-pongs # between two threads, using locks, should complete in a reasonable From pypy.commits at gmail.com Wed Mar 16 15:27:22 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 16 Mar 2016 12:27:22 -0700 (PDT) Subject: [pypy-commit] pypy default: Fix the test, and then fix the code that the test really should complain about Message-ID: <56e9b39a.aa09c20a.60c4.ffffb577@mx.google.com> Author: Armin Rigo Branch: Changeset: r83085:d0fea664c105 Date: 2016-03-16 20:26 +0100 http://bitbucket.org/pypy/pypy/changeset/d0fea664c105/ Log: Fix the test, and then fix the code that the test really should complain about diff --git a/pypy/module/_vmprof/test/test__vmprof.py b/pypy/module/_vmprof/test/test__vmprof.py --- a/pypy/module/_vmprof/test/test__vmprof.py +++ b/pypy/module/_vmprof/test/test__vmprof.py @@ -72,9 +72,9 @@ def test_enable_ovf(self): import _vmprof - raises(_vmprof.VMProfError, _vmprof.enable, 999, 0) - raises(_vmprof.VMProfError, _vmprof.enable, 999, -2.5) - raises(_vmprof.VMProfError, _vmprof.enable, 999, 1e300) - raises(_vmprof.VMProfError, _vmprof.enable, 999, 1e300 * 1e300) + raises(_vmprof.VMProfError, _vmprof.enable, 2, 0) + raises(_vmprof.VMProfError, _vmprof.enable, 2, -2.5) + raises(_vmprof.VMProfError, _vmprof.enable, 2, 1e300) + raises(_vmprof.VMProfError, _vmprof.enable, 2, 1e300 * 1e300) NaN = (1e300*1e300) / (1e300*1e300) - raises(_vmprof.VMProfError, _vmprof.enable, 999, NaN) + raises(_vmprof.VMProfError, _vmprof.enable, 2, NaN) diff --git a/rpython/rlib/rvmprof/src/vmprof_common.h b/rpython/rlib/rvmprof/src/vmprof_common.h --- a/rpython/rlib/rvmprof/src/vmprof_common.h +++ b/rpython/rlib/rvmprof/src/vmprof_common.h @@ -31,7 +31,7 @@ RPY_EXTERN char *vmprof_init(int fd, double interval, char *interp_name) { - if (interval < 1e-6 || interval >= 1.0) + if (!(interval >= 1e-6 && interval < 1.0)) /* also if it is NaN */ return "bad value for 'interval'"; prepare_interval_usec = (int)(interval * 1000000.0); From pypy.commits at gmail.com Wed Mar 16 15:39:03 2016 From: pypy.commits at gmail.com (plan_rich) Date: Wed, 16 Mar 2016 12:39:03 -0700 (PDT) Subject: [pypy-commit] pypy gcstress-hypothesis: generating control flow using basic blocks, need conditions and loops Message-ID: <56e9b657.02931c0a.ef1a0.ffffe7d8@mx.google.com> Author: Richard Plangger Branch: gcstress-hypothesis Changeset: r83086:65a4e92ce40f Date: 2016-03-15 11:31 +0100 http://bitbucket.org/pypy/pypy/changeset/65a4e92ce40f/ Log: generating control flow using basic blocks, need conditions and loops diff --git a/rpython/jit/backend/llsupport/tl/code.py b/rpython/jit/backend/llsupport/tl/code.py --- a/rpython/jit/backend/llsupport/tl/code.py +++ b/rpython/jit/backend/llsupport/tl/code.py @@ -92,6 +92,12 @@ code.append(struct.pack(typ, nmr)) return ''.join(code) + def transform_blocks(self, blocks): + for block in blocks: + for code_obj in block.opcodes: + code_obj.encode(self) + return self.to_string(), self.consts + def transform(self, code_objs): for code_obj in code_objs: code_obj.encode(self) @@ -239,6 +245,14 @@ def splits_control_flow(self): return True + @staticmethod + def should_jump(cond, value): + # TODO + if value == 0 and cond == 0: + return True + return False + + @requires_stack(LIST_TYP) @leaves_on_stack(LIST_TYP, INT_TYP) class LenList(ByteCode): @@ -269,6 +283,15 @@ # control flow byte codes BC_CF_CLASSES = [CondJump] +class ByteCodeBlock(object): + def __init__(self, stack): + self.init_stack = stack.copy() + self.exit_stack = None + self.opcodes = [] + + def interp_steps(self): + return len(self.opcodes) + class ByteCodeControlFlow(object): # see the deterministic control flow search startegy in # test/code_strategies.py for what steps & byte_codes mean @@ -276,3 +299,35 @@ self.blocks = [] self.steps = 0 self.byte_codes = 0 + + def interp_steps(self): + """ how many steps does the interpreter perform to + reach the end of the current control flow? + """ + return self.steps + + def linearize(self): + from rpython.jit.backend.llsupport.tl import code + ctx = code.Context() + bytecode, consts = ctx.transform_blocks(self.blocks) + return bytecode, consts + + def generate_block(self, data, last_block, strat): + if last_block: + stack = last_block.init_stack + else: + from rpython.jit.backend.llsupport.tl.stack import Stack + stack = Stack(0) + + bcb = ByteCodeBlock(stack) + opcodes = data.draw(strat.draw_from(stack, self)) + if not opcodes: + return None + bcb.exit_stack = stack.copy() + bcb.opcodes = opcodes + self.steps += bcb.interp_steps() + self.byte_codes += len(opcodes) + self.blocks.append(bcb) + return bcb + + diff --git a/rpython/jit/backend/llsupport/tl/interp.py b/rpython/jit/backend/llsupport/tl/interp.py --- a/rpython/jit/backend/llsupport/tl/interp.py +++ b/rpython/jit/backend/llsupport/tl/interp.py @@ -161,17 +161,19 @@ assert isinstance(w_lst, W_ListObject) stack.append(space.wrap(w_lst.size())) elif opcode == code.CondJump.BYTE_CODE: + assert i >= 0 cond = runpack('b', bytecode[i+1:i+2]) offset = runpack('i', bytecode[i+2:i+6]) - w_int = stack.pop(0) - assert isinstance(w_lst, W_IntObject) + w_int = stack.pop() + assert isinstance(w_int, W_IntObject) i += 5 - if CondJump.should_jump(cond, w_int.value): + if code.CondJump.should_jump(cond, w_int.value): if offset < 0: pass # TODO jit driver # the new position is calculated at the end of # this jump instruction!! i += offset + assert i >= 0 else: print("opcode %d is not implemented" % opcode) raise NotImplementedError diff --git a/rpython/jit/backend/llsupport/tl/test/code_strategies.py b/rpython/jit/backend/llsupport/tl/test/code_strategies.py --- a/rpython/jit/backend/llsupport/tl/test/code_strategies.py +++ b/rpython/jit/backend/llsupport/tl/test/code_strategies.py @@ -2,7 +2,9 @@ from hypothesis.control import assume from hypothesis.strategies import composite from rpython.jit.backend.llsupport.tl import code, interp, stack +from rpython.jit.backend.llsupport.tl.stack import Stack from hypothesis.searchstrategy.collections import TupleStrategy, ListStrategy +from hypothesis.searchstrategy.strategies import SearchStrategy, one_of_strategies import hypothesis.internal.conjecture.utils as cu from collections import namedtuple @@ -33,12 +35,12 @@ def runtime_stack(min_size=0, average_size=5, max_size=4096, types=code.all_types): if max_size == 0: - return st.just(stack.Stack(0)) + return st.just(Stack(0)) stack_entries = st.lists(stack_entry(all_types), min_size=min_size, average_size=average_size, max_size=max_size) return stack_entries.map(lambda elems: \ - stack.Stack.from_items(STD_SPACE, elems)) + Stack.from_items(STD_SPACE, elems)) def get_byte_code_class(num): return code.BC_NUM_TO_CLASS[num] @@ -63,7 +65,6 @@ data.draw(self.element_strategy) for _ in range(self.min_size) ] - stopping_value = 1 - 1.0 / (1 + self.average_length) result = [] while True: @@ -94,9 +95,12 @@ @st.defines_strategy def basic_block(strategy, min_size=1, average_size=8, max_size=128): + assert max_size >= 1 + if average_size < max_size: + average_size = max_size//2 return BasicBlockStrategy([strategy], min_size=min_size, average_length=average_size, - max_size=max_size) + max_size=int(max_size)) @st.defines_strategy def bytecode_class(stack): @@ -106,9 +110,10 @@ @composite -def bytecode(draw, max_stack_size=4096): +def bytecode(draw, run_stack=None): # get a stack that is the same for one test run - run_stack = draw(st.shared(st.just(stack.Stack(0)), 'stack')) + if run_stack is None: + run_stack = draw(st.shared(st.just(Stack(0)), 'stack')) # get a byte code class, only allow what is valid for the run_stack clazzes = filter(lambda clazz: clazz.filter_bytecode(run_stack), code.BC_CLASSES) @@ -134,42 +139,52 @@ max_byte_codes: the amount of bytecodes the final program has """ - def __init__(self, stack, min_steps=1, max_steps=2**16, max_byte_codes=5000): + def __init__(self, stack, min_steps=1, max_steps=2**16, max_byte_codes=4000): SearchStrategy.__init__(self) self.stack = stack self.max_steps = float(max_steps) self.min_steps = min_steps + self.average_steps = (self.max_steps - self.min_steps) / 2.0 self.max_byte_codes = max_byte_codes - # self.element_strategy = one_of_strategies(strategies) - def validate(self): pass #self.element_strategy.validate() + def draw_from(self, stack, bccf): + left = int(self.max_steps - bccf.interp_steps()) + if left <= 0: + return st.just(None) + if left > 32: + left = 32 + # either draw a normal basic block + strats = [basic_block(bytecode(stack), max_size=left)] + # or draw a loop + #strats.append(deterministic_loop(bytecode(stack))) + # or draw a conditional + #strats.append(conditional(bytecode(stack))) + return one_of_strategies(strats) + def do_draw(self, data): bccf = code.ByteCodeControlFlow() - result = [] + last_block = None + stopping_value = 1 - 1.0 / (1 + self.average_steps) while True: - stopping_value = 1 - 1.0 / (1 + self.average_length) data.start_example() + block = bccf.generate_block(data, last_block, self) + data.stop_example() + if block is None: + break # enough is enough! more = cu.biased_coin(data, stopping_value) if not more: - data.stop_example() - if len(result) < self.min_size: - continue - else: - break - value = data.draw(self.element_strategy) - data.stop_example() - result.append(value) + break return bccf @st.defines_strategy -def control_flow_graph(draw, stack=None, blocks): +def control_flow_graph(stack=None): if stack is None: # get a stack that is the same for one test run - stack = stack.Stack(0) + stack = Stack(0) return DeterministicControlFlowSearchStrategy(stack) diff --git a/rpython/jit/backend/llsupport/tl/test/test_tl_interp.py b/rpython/jit/backend/llsupport/tl/test/test_tl_interp.py --- a/rpython/jit/backend/llsupport/tl/test/test_tl_interp.py +++ b/rpython/jit/backend/llsupport/tl/test/test_tl_interp.py @@ -101,16 +101,18 @@ class TestInterp(object): @given(st.basic_block(st.bytecode(), min_size=1)) - def test_execute_bytecode_block(self, bc_obj_list): - self.execute(bc_obj_list) + def test_execute_block(self, bc_obj_list): + bytecode, consts = code.Context().transform(bc_obj_list) + self.execute(bytecode, consts) @given(st.control_flow_graph()) - def test_execute_bytecode_block(self, cfg): - bc_obj_list = cfg.linearize() - self.execute(bc_obj_list) + @settings(perform_health_check=False, min_satisfying_examples=1000) + def test_execute_cfg(self, cfg): + print("execute_cfg: cfg with steps:", cfg.interp_steps()) + bytecode, consts = cfg.linearize() + self.execute(bytecode, consts) - def execute(self, bc_obj_list): - bytecode, consts = code.Context().transform(bc_obj_list) + def execute(self, bytecode, consts): space = interp.Space() pc = 0 end = len(bytecode) diff --git a/rpython/jit/backend/llsupport/tl/test/zrpy_gc_hypo_test.py b/rpython/jit/backend/llsupport/tl/test/zrpy_gc_hypo_test.py --- a/rpython/jit/backend/llsupport/tl/test/zrpy_gc_hypo_test.py +++ b/rpython/jit/backend/llsupport/tl/test/zrpy_gc_hypo_test.py @@ -1,5 +1,5 @@ import py -from hypothesis import given +from hypothesis import given, settings from hypothesis.strategies import lists from rpython.tool.udir import udir from rpython.jit.metainterp.optimize import SpeculativeError @@ -68,3 +68,13 @@ if result != 0: raise Exception(("could not run program. returned %d" " stderr:\n%s\nstdout:\n%s\n") % (result, err, out)) + + @given(st.control_flow_graph()) + @settings(perform_health_check=False, min_satisfying_examples=1000) + def test_execute_cfg(self, cfg): + print "execute_cfg: cfg with steps:", cfg.interp_steps() + bytecode, consts = cfg.linearize() + result, out, err = self.execute(bytecode, consts) + if result != 0: + raise Exception(("could not run program. returned %d" + " stderr:\n%s\nstdout:\n%s\n") % (result, err, out)) From pypy.commits at gmail.com Wed Mar 16 15:39:07 2016 From: pypy.commits at gmail.com (plan_rich) Date: Wed, 16 Mar 2016 12:39:07 -0700 (PDT) Subject: [pypy-commit] pypy fix-longevity: catchup with default Message-ID: <56e9b65b.4a811c0a.1703e.ffffecaa@mx.google.com> Author: Richard Plangger Branch: fix-longevity Changeset: r83087:f64a2c834041 Date: 2016-03-15 11:52 +0100 http://bitbucket.org/pypy/pypy/changeset/f64a2c834041/ Log: catchup with default diff too long, truncating to 2000 out of 17154 lines diff --git a/.hgignore b/.hgignore --- a/.hgignore +++ b/.hgignore @@ -74,5 +74,6 @@ ^rpython/doc/_build/.*$ ^compiled ^.git/ +^.hypothesis/ ^release/ ^rpython/_cache$ diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -18,3 +18,4 @@ f3ad1e1e1d6215e20d34bb65ab85ff9188c9f559 release-2.6.1 850edf14b2c75573720f59e95767335fb1affe55 release-4.0.0 5f8302b8bf9f53056e40426f10c72151564e5b19 release-4.0.1 +246c9cf22037b11dc0e8c29ce3f291d3b8c5935a release-5.0 diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -240,6 +240,7 @@ Kristjan Valur Jonsson David Lievens Neil Blakey-Milner + Sergey Matyunin Lutz Paelike Lucio Torre Lars Wassermann @@ -271,6 +272,7 @@ Aaron Tubbs Ben Darnell Roberto De Ioris + Logan Chien Juan Francisco Cantero Hurtado Ruochen Huang Jeong YunWon diff --git a/lib-python/2.7/xml/etree/ElementTree.py b/lib-python/2.7/xml/etree/ElementTree.py --- a/lib-python/2.7/xml/etree/ElementTree.py +++ b/lib-python/2.7/xml/etree/ElementTree.py @@ -1606,7 +1606,17 @@ pubid = pubid[1:-1] if hasattr(self.target, "doctype"): self.target.doctype(name, pubid, system[1:-1]) - elif self.doctype is not self._XMLParser__doctype: + elif 1: # XXX PyPy fix, used to be + # elif self.doctype is not self._XMLParser__doctype: + # but that condition is always True on CPython, as far + # as I can tell: self._XMLParser__doctype always + # returns a fresh unbound method object. + # On PyPy, unbound and bound methods have stronger + # unicity guarantees: self._XMLParser__doctype + # can return the same unbound method object, in + # some cases making the test above incorrectly False. + # (My guess would be that the line above is a backport + # from Python 3.) # warn about deprecated call self._XMLParser__doctype(name, pubid, system[1:-1]) self.doctype(name, pubid, system[1:-1]) diff --git a/lib_pypy/ctypes_config_cache/rebuild.py b/lib_pypy/ctypes_config_cache/rebuild.py --- a/lib_pypy/ctypes_config_cache/rebuild.py +++ b/lib_pypy/ctypes_config_cache/rebuild.py @@ -9,9 +9,8 @@ _dirpath = os.path.dirname(__file__) or os.curdir -from rpython.tool.ansi_print import ansi_log -log = py.log.Producer("ctypes_config_cache") -py.log.setconsumer("ctypes_config_cache", ansi_log) +from rpython.tool.ansi_print import AnsiLogger +log = AnsiLogger("ctypes_config_cache") def rebuild_one(name): diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -123,7 +123,7 @@ # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". -#html_title = None +html_title = 'PyPy documentation' # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None diff --git a/pypy/doc/contributor.rst b/pypy/doc/contributor.rst --- a/pypy/doc/contributor.rst +++ b/pypy/doc/contributor.rst @@ -11,29 +11,29 @@ Amaury Forgeot d'Arc Antonio Cuni Samuele Pedroni + Matti Picus Alex Gaynor Brian Kearns - Matti Picus Philip Jenvey Michael Hudson + Ronan Lamy David Schneider + Manuel Jacob Holger Krekel Christian Tismer Hakan Ardo - Manuel Jacob - Ronan Lamy Benjamin Peterson + Richard Plangger Anders Chrigstrom Eric van Riet Paap Wim Lavrijsen - Richard Plangger Richard Emslie Alexander Schremmer Dan Villiom Podlaski Christiansen + Remi Meier Lukas Diekmann Sven Hager Anders Lehmann - Remi Meier Aurelien Campeas Niklaus Haldimann Camillo Bruni @@ -42,8 +42,8 @@ Romain Guillebert Leonardo Santagada Seo Sanghyeon + Ronny Pfannschmidt Justin Peel - Ronny Pfannschmidt David Edelsohn Anders Hammarquist Jakub Gustak @@ -65,6 +65,7 @@ Tyler Wade Michael Foord Stephan Diehl + Vincent Legoll Stefan Schwarzer Valentino Volonghi Tomek Meka @@ -75,9 +76,9 @@ Jean-Paul Calderone Timo Paulssen Squeaky + Marius Gedminas Alexandre Fayolle Simon Burton - Marius Gedminas Martin Matusiak Konstantin Lopuhin Wenzhu Man @@ -86,16 +87,20 @@ Ivan Sichmann Freitas Greg Price Dario Bertini + Stefano Rivera Mark Pearse Simon Cross Andreas Stührk - Stefano Rivera + Edd Barrett Jean-Philippe St. Pierre Guido van Rossum Pavel Vinogradov + Jeremy Thurgood Paweł Piotr Przeradowski + Spenser Bauman Paul deGrandis Ilya Osadchiy + marky1991 Tobias Oberstein Adrian Kuhn Boris Feigin @@ -104,14 +109,12 @@ Georg Brandl Bert Freudenberg Stian Andreassen - Edd Barrett + Tobias Pape Wanja Saatkamp Gerald Klix Mike Blume - Tobias Pape Oscar Nierstrasz Stefan H. Muller - Jeremy Thurgood Rami Chowdhury Eugene Oden Henry Mason @@ -123,6 +126,8 @@ Lukas Renggli Guenter Jantzen Ned Batchelder + Tim Felgentreff + Anton Gulenko Amit Regmi Ben Young Nicolas Chauvat @@ -132,12 +137,12 @@ Nicholas Riley Jason Chu Igor Trindade Oliveira - Tim Felgentreff + Yichao Yu Rocco Moretti Gintautas Miliauskas Michael Twomey Lucian Branescu Mihaila - Yichao Yu + Devin Jeanpierre Gabriel Lavoie Olivier Dormond Jared Grubb @@ -161,33 +166,33 @@ Stanislaw Halik Mikael Schönenberg Berkin Ilbeyi - Elmo M?ntynen + Elmo Mäntynen + Faye Zhao Jonathan David Riehl Anders Qvist Corbin Simpson Chirag Jadwani Beatrice During Alex Perry - Vincent Legoll + Vaibhav Sood Alan McIntyre - Spenser Bauman + William Leslie Alexander Sedov Attila Gobi + Jasper.Schulz Christopher Pope - Devin Jeanpierre - Vaibhav Sood Christian Tismer Marc Abramowitz Dan Stromberg Arjun Naik Valentina Mukhamedzhanova Stefano Parmesan + Mark Young Alexis Daboville Jens-Uwe Mager Carl Meyer Karl Ramm Pieter Zieschang - Anton Gulenko Gabriel Lukas Vacek Andrew Dalke @@ -195,6 +200,7 @@ Jakub Stasiak Nathan Taylor Vladimir Kryachko + Omer Katz Jacek Generowicz Alejandro J. Cura Jacob Oscarson @@ -204,11 +210,13 @@ Kristjan Valur Jonsson David Lievens Neil Blakey-Milner + Sergey Matyunin Lutz Paelike Lucio Torre Lars Wassermann Philipp Rustemeuer Henrik Vendelbo + Richard Lancaster Dan Buch Miguel de Val Borro Artur Lisiecki @@ -220,20 +228,21 @@ Tomo Cocoa Kim Jin Su Toni Mattis + Amber Brown Lucas Stadler Julian Berman Markus Holtermann roberto at goyle Yury V. Zaytsev Anna Katrina Dominguez - William Leslie Bobby Impollonia - Faye Zhao timo at eistee.fritz.box Andrew Thompson Yusei Tahara + Aaron Tubbs Ben Darnell Roberto De Ioris + Logan Chien Juan Francisco Cantero Hurtado Ruochen Huang Jeong YunWon @@ -243,6 +252,7 @@ Christopher Armstrong Michael Hudson-Doyle Anders Sigfridsson + Nikolay Zinov Yasir Suhail Jason Michalski rafalgalczynski at gmail.com @@ -252,6 +262,7 @@ Gustavo Niemeyer Stephan Busemann Rafał Gałczyński + Matt Bogosian Christian Muirhead Berker Peksag James Lan @@ -286,9 +297,9 @@ Stefan Marr jiaaro Mads Kiilerich - Richard Lancaster opassembler.py Antony Lee + Jason Madden Yaroslav Fedevych Jim Hunziker Markus Unterwaditzer @@ -297,6 +308,7 @@ squeaky Zearin soareschen + Jonas Pfannschmidt Kurt Griffiths Mike Bayer Matthew Miller @@ -311,4 +323,3 @@ Julien Phalip Roman Podoliaka Dan Loewenherz - diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -265,7 +265,7 @@ return False def evil(y): - d = {x(): 1} + d = {X(): 1} X.__eq__ = __evil_eq__ d[y] # might trigger a call to __eq__? diff --git a/pypy/doc/how-to-release.rst b/pypy/doc/how-to-release.rst --- a/pypy/doc/how-to-release.rst +++ b/pypy/doc/how-to-release.rst @@ -76,5 +76,4 @@ * add a tag on the pypy/jitviewer repo that corresponds to pypy release * add a tag on the codespeed web site that corresponds to pypy release -* update the version number in {rpython,pypy}/doc/conf.py. * revise versioning at https://readthedocs.org/projects/pypy diff --git a/pypy/doc/index-of-release-notes.rst b/pypy/doc/index-of-release-notes.rst --- a/pypy/doc/index-of-release-notes.rst +++ b/pypy/doc/index-of-release-notes.rst @@ -6,6 +6,7 @@ .. toctree:: + release-5.0.0.rst release-4.0.1.rst release-4.0.0.rst release-2.6.1.rst diff --git a/pypy/doc/index-of-whatsnew.rst b/pypy/doc/index-of-whatsnew.rst --- a/pypy/doc/index-of-whatsnew.rst +++ b/pypy/doc/index-of-whatsnew.rst @@ -7,6 +7,7 @@ .. toctree:: whatsnew-head.rst + whatsnew-5.0.0.rst whatsnew-4.0.1.rst whatsnew-4.0.0.rst whatsnew-2.6.1.rst diff --git a/pypy/doc/project-ideas.rst b/pypy/doc/project-ideas.rst --- a/pypy/doc/project-ideas.rst +++ b/pypy/doc/project-ideas.rst @@ -167,22 +167,13 @@ * `hg` -Embedding PyPy and improving CFFI ---------------------------------- - -PyPy has some basic :doc:`embedding infrastructure `. The idea would be to improve -upon that with cffi hacks that can automatically generate embeddable .so/.dll -library - - Optimising cpyext (CPython C-API compatibility layer) ----------------------------------------------------- A lot of work has gone into PyPy's implementation of CPython's C-API over the last years to let it reach a practical level of compatibility, so that C extensions for CPython work on PyPy without major rewrites. However, -there are still many edges and corner cases where it misbehaves, and it has -not received any substantial optimisation so far. +there are still many edges and corner cases where it misbehaves. The objective of this project is to fix bugs in cpyext and to optimise several performance critical parts of it, such as the reference counting diff --git a/pypy/doc/release-5.0.0.rst b/pypy/doc/release-5.0.0.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-5.0.0.rst @@ -0,0 +1,230 @@ +======== +PyPy 5.0 +======== + +We have released PyPy 5.0, about three months after PyPy 4.0.1. +We encourage all users of PyPy to update to this version. Apart from the usual +bug fixes, there is an ongoing effort to improve the warmup time and memory +usage of JIT-related metadata. The exact effects depend vastly on the program +you're running and can range from insignificant to warmup being up to 30% +faster and memory dropping by about 30%. + +We also merged a major upgrade to our C-API layer (cpyext), simplifying the +interaction between c-level objects and PyPy interpreter level objects. As a +result, lxml (prerelease) with its cython compiled component +`passes all tests`_ on PyPy. The new cpyext is also much faster. + +vmprof_ has been a go-to profiler for PyPy on linux for a few releases +and we're happy to announce that thanks to the cooperation with jetbrains, +vmprof now works on Linux, OS X and Windows on both PyPy and CPython. + +You can download the PyPy 5.0 release here: + + http://pypy.org/download.html + +We would like to thank our donors for the continued support of the PyPy +project. + +We would also like to thank our contributors and +encourage new people to join the project. PyPy has many +layers and we need help with all of them: `PyPy`_ and `RPython`_ documentation +improvements, tweaking popular `modules`_ to run on pypy, or general `help`_ +with making RPython's JIT even better. + +CFFI +==== + +While not applicable only to PyPy, `cffi`_ is arguably our most significant +contribution to the python ecosystem. PyPy 5.0 ships with +`cffi-1.5.2`_ which now allows embedding PyPy (or cpython) in a C program. + +.. _`PyPy`: http://doc.pypy.org +.. _`RPython`: https://rpython.readthedocs.org +.. _`cffi`: https://cffi.readthedocs.org +.. _`cffi-1.5.2`: http://cffi.readthedocs.org/en/latest/whatsnew.html#v1-5-2 +.. _`modules`: http://doc.pypy.org/en/latest/project-ideas.html#make-more-python-modules-pypy-friendly +.. _`help`: http://doc.pypy.org/en/latest/project-ideas.html +.. _`numpy`: https://bitbucket.org/pypy/numpy +.. _`passes all tests`: https://bitbucket.org/pypy/compatibility/wiki/lxml +.. _vmprof: http://vmprof.readthedocs.org + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7. It's fast (`PyPy and CPython 2.7.x`_ performance comparison) +due to its integrated tracing JIT compiler. + +We also welcome developers of other +`dynamic languages`_ to see what RPython can do for them. + +This release supports **x86** machines on most common operating systems +(Linux 32/64, Mac OS X 64, Windows 32, OpenBSD, FreeBSD), +newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux, and the +big- and little-endian variants of **PPC64** running Linux. + +.. _`PyPy and CPython 2.7.x`: http://speed.pypy.org +.. _`dynamic languages`: http://pypyjs.org + +Other Highlights (since 4.0.1 released in November 2015) +========================================================= + +* New features: + + * Support embedding PyPy in a C-program via cffi and static callbacks in cffi. + This deprecates the old method of embedding PyPy + + * Refactor vmprof to work cross-operating-system, deprecate using buggy + libunwind on Linux platforms. Vmprof even works on Windows now. + + * Support more of the C-API type slots, like tp_getattro, and fix C-API + macros, functions, and structs such as _PyLong_FromByteArray(), + PyString_GET_SIZE, f_locals in PyFrameObject, Py_NAN, co_filename in + PyCodeObject + + * Use a more stable approach for allocating PyObjects in cpyext. (see + `blog post`_). Once the PyObject corresponding to a PyPy object is created, + it stays around at the same location until the death of the PyPy object. + Done with a little bit of custom GC support. It allows us to kill the + notion of "borrowing" inside cpyext, reduces 4 dictionaries down to 1, and + significantly simplifies the whole approach (which is why it is a new + feature while technically a refactoring) and allows PyPy to support the + populart lxml module (as of the *next* release) with no PyPy specific + patches needed + + * Make the default filesystem encoding ASCII, like CPython + + * Use `hypothesis`_ in test creation, which is great for randomizing tests + +* Bug Fixes + + * Backport always using os.urandom for uuid4 from cpython and fix the JIT as well + (issue #2202) + + * More completely support datetime, optimize timedelta creation + + * Fix for issue #2185 which caused an inconsistent list of operations to be + generated by the unroller, appeared in a complicated DJango app + + * Fix an elusive issue with stacklets on shadowstack which showed up when + forgetting stacklets without resuming them + + * Fix entrypoint() which now acquires the GIL + + * Fix direct_ffi_call() so failure does not bail out before setting CALL_MAY_FORCE + + * Fix (de)pickling long values by simplifying the implementation + + * Fix RPython rthread so that objects stored as threadlocal do not force minor + GC collection and are kept alive automatically. This improves perfomance of + short-running Python callbacks and prevents resetting such object between + calls + + * Support floats as parameters to itertools.isslice() + + * Check for the existence of CODESET, ignoring it should have prevented PyPy + from working on FreeBSD + + * Fix for corner case (likely shown by Krakatau) for consecutive guards with + interdependencies + + * Fix applevel bare class method comparisons which should fix pretty printing + in IPython + + * Issues reported with our previous release were resolved_ after reports from users on + our issue tracker at https://bitbucket.org/pypy/pypy/issues or on IRC at + #pypy + +* Numpy: + + * Updates to numpy 1.10.2 (incompatibilities and not-implemented features + still exist) + + * Support dtype=(('O', spec)) union while disallowing record arrays with + mixed object, non-object values + + * Remove all traces of micronumpy from cpyext if --withoutmod-micronumpy option used + + * Support indexing filtering with a boolean ndarray + + * Support partition() as an app-level function, together with a cffi wrapper + in pypy/numpy, this now provides partial support for partition() + +* Performance improvements: + + * Optimize global lookups + + * Improve the memory signature of numbering instances in the JIT. This should + massively decrease the amount of memory consumed by the JIT, which is + significant for most programs. Also compress the numberings using variable- + size encoding + + * Optimize string concatenation + + * Use INT_LSHIFT instead of INT_MUL when possible + + * Improve struct.unpack by casting directly from the underlying buffer. + Unpacking floats and doubles is about 15 times faster, and integer types + about 50% faster (on 64 bit integers). This was then subsequently + improved further in optimizeopt.py. + + * Optimize two-tuple lookups in mapdict, which improves warmup of instance + variable access somewhat + + * Reduce all guards from int_floordiv_ovf if one of the arguments is constant + + * Identify permutations of attributes at instance creation, reducing the + number of bridges created + + * Greatly improve re.sub() performance + + +* Internal refactorings: + + * Refactor and improve exception analysis in the annotator + + * Remove unnecessary special handling of space.wrap(). + + * Support list-resizing setslice operations in RPython + + * Tweak the trace-too-long heuristic for multiple jit drivers + + * Refactor bookkeeping (such a cool word - three double letters) in the + annotater + + * Refactor wrappers for OS functions from rtyper to rlib and simplify them + + * Simplify backend loading instructions to only use four variants + + * Simplify GIL handling in non-jitted code + + * Refactor naming in optimizeopt + + * Change GraphAnalyzer to use a more precise way to recognize external + functions and fix null pointer handling, generally clean up external + function handling + + * Remove pure variants of ``getfield_gc_*`` operations from the JIT by + determining purity while tracing + + * Refactor databasing + + * Simplify bootstrapping in cpyext + + * Refactor rtyper debug code into python.rtyper.debug + + * Seperate structmember.h from Python.h Also enhance creating api functions + to specify which header file they appear in (previously only pypy_decl.h) + + * Fix tokenizer to enforce universal newlines, needed for Python 3 support + +.. _resolved: http://doc.pypy.org/en/latest/whatsnew-5.0.0.html +.. _`hypothesis`: http://hypothesis.readthedocs.org +.. _`blog post`: http://morepypy.blogspot.com/2016/02/c-api-support-update.html + +Please update, and continue to help us make PyPy better. + +Cheers + +The PyPy Team + diff --git a/pypy/doc/whatsnew-5.0.0.rst b/pypy/doc/whatsnew-5.0.0.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/whatsnew-5.0.0.rst @@ -0,0 +1,197 @@ +====================== +What's new in PyPy 5.0 +====================== + +.. this is a revision shortly after release-4.0.1 +.. startrev: 4b5c840d0da2 + +Fixed ``_PyLong_FromByteArray()``, which was buggy. + +Fixed a crash with stacklets (or greenlets) on non-Linux machines +which showed up if you forget stacklets without resuming them. + +.. branch: numpy-1.10 + +Fix tests to run cleanly with -A and start to fix micronumpy for upstream numpy +which is now 1.10.2 + +.. branch: osx-flat-namespace + +Fix the cpyext tests on OSX by linking with -flat_namespace + +.. branch: anntype + +Refactor and improve exception analysis in the annotator. + +.. branch: posita/2193-datetime-timedelta-integrals + +Fix issue #2193. ``isinstance(..., int)`` => ``isinstance(..., numbers.Integral)`` +to allow for alternate ``int``-like implementations (e.g., ``future.types.newint``) + +.. branch: faster-rstruct + +Improve the performace of struct.unpack, which now directly reads inside the +string buffer and directly casts the bytes to the appropriate type, when +allowed. Unpacking of floats and doubles is about 15 times faster now, while +for integer types it's up to ~50% faster for 64bit integers. + +.. branch: wrap-specialisation + +Remove unnecessary special handling of space.wrap(). + +.. branch: compress-numbering + +Improve the memory signature of numbering instances in the JIT. This should massively +decrease the amount of memory consumed by the JIT, which is significant for most programs. + +.. branch: fix-trace-too-long-heuristic + +Improve the heuristic when disable trace-too-long + +.. branch: fix-setslice-can-resize + +Make rlist's ll_listsetslice() able to resize the target list to help +simplify objspace/std/listobject.py. Was issue #2196. + +.. branch: anntype2 + +A somewhat random bunch of changes and fixes following up on branch 'anntype'. Highlights: + +- Implement @doubledispatch decorator and use it for intersection() and difference(). + +- Turn isinstance into a SpaceOperation + +- Create a few direct tests of the fundamental annotation invariant in test_model.py + +- Remove bookkeeper attribute from DictDef and ListDef. + +.. branch: cffi-static-callback + +.. branch: vecopt-absvalue + +- Enhancement. Removed vector fields from AbstractValue. + +.. branch: memop-simplify2 + +Simplification. Backends implement too many loading instructions, only having a slightly different interface. +Four new operations (gc_load/gc_load_indexed, gc_store/gc_store_indexed) replace all the +commonly known loading operations + +.. branch: more-rposix + +Move wrappers for OS functions from `rpython/rtyper` to `rpython/rlib` and +turn them into regular RPython functions. Most RPython-compatible `os.*` +functions are now directly accessible as `rpython.rposix.*`. + +.. branch: always-enable-gil + +Simplify a bit the GIL handling in non-jitted code. Fixes issue #2205. + +.. branch: flowspace-cleanups + +Trivial cleanups in flowspace.operation : fix comment & duplicated method + +.. branch: test-AF_NETLINK + +Add a test for pre-existing AF_NETLINK support. Was part of issue #1942. + +.. branch: small-cleanups-misc + +Trivial misc cleanups: typo, whitespace, obsolete comments + +.. branch: cpyext-slotdefs +.. branch: fix-missing-canraise +.. branch: whatsnew + +.. branch: fix-2211 + +Fix the cryptic exception message when attempting to use extended slicing +in rpython. Was issue #2211. + +.. branch: ec-keepalive + +Optimize the case where, in a new C-created thread, we keep invoking +short-running Python callbacks. (CFFI on CPython has a hack to achieve +the same result.) This can also be seen as a bug fix: previously, +thread-local objects would be reset between two such calls. + +.. branch: globals-quasiimmut + +Optimize global lookups. + +.. branch: cffi-static-callback-embedding + +Updated to CFFI 1.5, which supports a new way to do embedding. +Deprecates http://pypy.readthedocs.org/en/latest/embedding.html. + +.. branch: fix-cpython-ssl-tests-2.7 + +Fix SSL tests by importing cpython's patch + + +.. branch: remove-getfield-pure + +Remove pure variants of ``getfield_gc_*`` operations from the JIT. Relevant +optimizations instead consult the field descriptor to determine the purity of +the operation. Additionally, pure ``getfield`` operations are now handled +entirely by `rpython/jit/metainterp/optimizeopt/heap.py` rather than +`rpython/jit/metainterp/optimizeopt/pure.py`, which can result in better codegen +for traces containing a large number of pure getfield operations. + +.. branch: exctrans + +Try to ensure that no new functions get annotated during the 'source_c' phase. +Refactor sandboxing to operate at a higher level. + +.. branch: cpyext-bootstrap + +.. branch: vmprof-newstack + +Refactor vmprof to work cross-operating-system. + +.. branch: seperate-strucmember_h + +Seperate structmember.h from Python.h Also enhance creating api functions +to specify which header file they appear in (previously only pypy_decl.h) + +.. branch: llimpl + +Refactor register_external(), remove running_on_llinterp mechanism and +apply sandbox transform on externals at the end of annotation. + +.. branch: cffi-embedding-win32 + +.. branch: windows-vmprof-support + +vmprof should work on Windows. + + +.. branch: reorder-map-attributes + +When creating instances and adding attributes in several different orders +depending on some condition, the JIT would create too much code. This is now +fixed. + +.. branch: cpyext-gc-support-2 + +Improve CPython C API support, which means lxml now runs unmodified +(after removing pypy hacks, pending pull request) + +.. branch: look-inside-tuple-hash + +Look inside tuple hash, improving mdp benchmark + +.. branch: vlen-resume + +Compress resume data, saving 10-20% of memory consumed by the JIT + +.. branch: issue-2248 + +.. branch: ndarray-setitem-filtered + +Fix boolean-array indexing in micronumpy + +.. branch: numpy_partition +Support ndarray.partition() as an app-level function numpy.core._partition_use, +provided as a cffi wrapper to upstream's implementation in the pypy/numpy repo + diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -1,173 +1,25 @@ ========================= -What's new in PyPy 4.1.+ +What's new in PyPy 5.0.+ ========================= -.. this is a revision shortly after release-4.0.1 -.. startrev: 4b5c840d0da2 +.. this is a revision shortly after release-5.0 +.. startrev: b238b48f9138 -Fixed ``_PyLong_FromByteArray()``, which was buggy. +.. branch: s390x-backend -Fixed a crash with stacklets (or greenlets) on non-Linux machines -which showed up if you forget stacklets without resuming them. +The jit compiler backend implementation for the s390x architecutre. +The backend manages 64-bit values in the literal pool of the assembly instead of loading them as immediates. +It includes a simplification for the operation 'zero_array'. Start and length parameters are bytes instead of size. -.. branch: numpy-1.10 +.. branch: remove-py-log -Fix tests to run cleanly with -A and start to fix micronumpy for upstream numpy -which is now 1.10.2 +Replace py.log with something simpler, which should speed up logging -.. branch: osx-flat-namespace +.. branch: where_1_arg -Fix the cpyext tests on OSX by linking with -flat_namespace +Implemented numpy.where for 1 argument (thanks sergem) -.. branch: anntype +.. branch: fix_indexing_by_numpy_int -Refactor and improve exception analysis in the annotator. - -.. branch: posita/2193-datetime-timedelta-integrals - -Fix issue #2193. ``isinstance(..., int)`` => ``isinstance(..., numbers.Integral)`` -to allow for alternate ``int``-like implementations (e.g., ``future.types.newint``) - -.. branch: faster-rstruct - -Improve the performace of struct.unpack, which now directly reads inside the -string buffer and directly casts the bytes to the appropriate type, when -allowed. Unpacking of floats and doubles is about 15 times faster now, while -for integer types it's up to ~50% faster for 64bit integers. - -.. branch: wrap-specialisation - -Remove unnecessary special handling of space.wrap(). - -.. branch: compress-numbering - -Improve the memory signature of numbering instances in the JIT. This should massively -decrease the amount of memory consumed by the JIT, which is significant for most programs. - -.. branch: fix-trace-too-long-heuristic - -Improve the heuristic when disable trace-too-long - -.. branch: fix-setslice-can-resize - -Make rlist's ll_listsetslice() able to resize the target list to help -simplify objspace/std/listobject.py. Was issue #2196. - -.. branch: anntype2 - -A somewhat random bunch of changes and fixes following up on branch 'anntype'. Highlights: - -- Implement @doubledispatch decorator and use it for intersection() and difference(). - -- Turn isinstance into a SpaceOperation - -- Create a few direct tests of the fundamental annotation invariant in test_model.py - -- Remove bookkeeper attribute from DictDef and ListDef. - -.. branch: cffi-static-callback - -.. branch: vecopt-absvalue - -- Enhancement. Removed vector fields from AbstractValue. - -.. branch: memop-simplify2 - -Simplification. Backends implement too many loading instructions, only having a slightly different interface. -Four new operations (gc_load/gc_load_indexed, gc_store/gc_store_indexed) replace all the -commonly known loading operations - -.. branch: more-rposix - -Move wrappers for OS functions from `rpython/rtyper` to `rpython/rlib` and -turn them into regular RPython functions. Most RPython-compatible `os.*` -functions are now directly accessible as `rpython.rposix.*`. - -.. branch: always-enable-gil - -Simplify a bit the GIL handling in non-jitted code. Fixes issue #2205. - -.. branch: flowspace-cleanups - -Trivial cleanups in flowspace.operation : fix comment & duplicated method - -.. branch: test-AF_NETLINK - -Add a test for pre-existing AF_NETLINK support. Was part of issue #1942. - -.. branch: small-cleanups-misc - -Trivial misc cleanups: typo, whitespace, obsolete comments - -.. branch: cpyext-slotdefs -.. branch: fix-missing-canraise -.. branch: whatsnew - -.. branch: fix-2211 - -Fix the cryptic exception message when attempting to use extended slicing -in rpython. Was issue #2211. - -.. branch: ec-keepalive - -Optimize the case where, in a new C-created thread, we keep invoking -short-running Python callbacks. (CFFI on CPython has a hack to achieve -the same result.) This can also be seen as a bug fix: previously, -thread-local objects would be reset between two such calls. - -.. branch: globals-quasiimmut - -Optimize global lookups. - -.. branch: cffi-static-callback-embedding - -Updated to CFFI 1.5, which supports a new way to do embedding. -Deprecates http://pypy.readthedocs.org/en/latest/embedding.html. - -.. branch: fix-cpython-ssl-tests-2.7 - -Fix SSL tests by importing cpython's patch - - -.. branch: remove-getfield-pure - -Remove pure variants of ``getfield_gc_*`` operations from the JIT. Relevant -optimizations instead consult the field descriptor to determine the purity of -the operation. Additionally, pure ``getfield`` operations are now handled -entirely by `rpython/jit/metainterp/optimizeopt/heap.py` rather than -`rpython/jit/metainterp/optimizeopt/pure.py`, which can result in better codegen -for traces containing a large number of pure getfield operations. - -.. branch: exctrans - -Try to ensure that no new functions get annotated during the 'source_c' phase. -Refactor sandboxing to operate at a higher level. - -.. branch: cpyext-bootstrap - -.. branch: vmprof-newstack - -Refactor vmprof to work cross-operating-system. - -.. branch: seperate-strucmember_h - -Seperate structmember.h from Python.h Also enhance creating api functions -to specify which header file they appear in (previously only pypy_decl.h) - -.. branch: llimpl - -Refactor register_external(), remove running_on_llinterp mechanism and -apply sandbox transform on externals at the end of annotation. - -.. branch: cffi-embedding-win32 - -.. branch: windows-vmprof-support - -vmprof should work on Windows. - - -.. branch: reorder-map-attributes - -When creating instances and adding attributes in several different orders -depending on some condition, the JIT would create too much code. This is now -fixed. \ No newline at end of file +Implement yet another strange numpy indexing compatibility; indexing by a scalar +returns a scalar diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py --- a/pypy/interpreter/function.py +++ b/pypy/interpreter/function.py @@ -13,6 +13,9 @@ from pypy.interpreter.argument import Arguments from rpython.rlib import jit +from rpython.rlib.rarithmetic import LONG_BIT +from rpython.rlib.rbigint import rbigint + funccallunrolling = unrolling_iterable(range(4)) @@ -557,6 +560,26 @@ return space.w_False return space.eq(self.w_function, w_other.w_function) + def is_w(self, space, other): + if not isinstance(other, Method): + return False + return (self.w_instance is other.w_instance and + self.w_function is other.w_function and + self.w_class is other.w_class) + + def immutable_unique_id(self, space): + from pypy.objspace.std.util import IDTAG_METHOD as tag + from pypy.objspace.std.util import IDTAG_SHIFT + if self.w_instance is not None: + id = space.bigint_w(space.id(self.w_instance)) + id = id.lshift(LONG_BIT) + else: + id = rbigint.fromint(0) + id = id.or_(space.bigint_w(space.id(self.w_function))) + id = id.lshift(LONG_BIT).or_(space.bigint_w(space.id(self.w_class))) + id = id.lshift(IDTAG_SHIFT).int_or_(tag) + return space.newlong_from_rbigint(id) + def descr_method_hash(self): space = self.space w_result = space.hash(self.w_function) diff --git a/pypy/interpreter/mixedmodule.py b/pypy/interpreter/mixedmodule.py --- a/pypy/interpreter/mixedmodule.py +++ b/pypy/interpreter/mixedmodule.py @@ -3,7 +3,7 @@ from pypy.interpreter import gateway from pypy.interpreter.error import OperationError from pypy.interpreter.baseobjspace import W_Root -import os, sys +import sys class MixedModule(Module): applevel_name = None @@ -60,7 +60,7 @@ def save_module_content_for_future_reload(self): self.w_initialdict = self.space.call_method(self.w_dict, 'items') - + @classmethod def get_applevel_name(cls): """ NOT_RPYTHON """ if cls.applevel_name is not None: @@ -68,7 +68,6 @@ else: pkgroot = cls.__module__ return pkgroot.split('.')[-1] - get_applevel_name = classmethod(get_applevel_name) def get(self, name): space = self.space @@ -103,7 +102,7 @@ # be normal Functions to get the correct binding behaviour func = w_value if (isinstance(func, Function) and - type(func) is not BuiltinFunction): + type(func) is not BuiltinFunction): try: bltin = func._builtinversion_ except AttributeError: @@ -115,7 +114,6 @@ space.setitem(self.w_dict, w_name, w_value) return w_value - def getdict(self, space): if self.lazy: for name in self.loaders: @@ -131,6 +129,7 @@ self.startup_called = False self._frozen = True + @classmethod def buildloaders(cls): """ NOT_RPYTHON """ if not hasattr(cls, 'loaders'): @@ -149,8 +148,6 @@ if '__doc__' not in loaders: loaders['__doc__'] = cls.get__doc__ - buildloaders = classmethod(buildloaders) - def extra_interpdef(self, name, spec): cls = self.__class__ pkgroot = cls.__module__ @@ -159,21 +156,21 @@ w_obj = loader(space) space.setattr(space.wrap(self), space.wrap(name), w_obj) + @classmethod def get__doc__(cls, space): return space.wrap(cls.__doc__) - get__doc__ = classmethod(get__doc__) def getinterpevalloader(pkgroot, spec): """ NOT_RPYTHON """ def ifileloader(space): - d = {'space' : space} + d = {'space':space} # EVIL HACK (but it works, and this is not RPython :-) while 1: try: value = eval(spec, d) except NameError, ex: - name = ex.args[0].split("'")[1] # super-Evil + name = ex.args[0].split("'")[1] # super-Evil if name in d: raise # propagate the NameError try: diff --git a/pypy/interpreter/test/test_app_main.py b/pypy/interpreter/test/test_app_main.py --- a/pypy/interpreter/test/test_app_main.py +++ b/pypy/interpreter/test/test_app_main.py @@ -9,6 +9,11 @@ from pypy.conftest import pypydir from lib_pypy._pypy_interact import irc_header +try: + import __pypy__ +except ImportError: + __pypy__ = None + banner = sys.version.splitlines()[0] app_main = os.path.join(os.path.realpath(os.path.dirname(__file__)), os.pardir, 'app_main.py') @@ -106,6 +111,8 @@ sys.argv[:] = saved_sys_argv sys.stdout = saved_sys_stdout sys.stderr = saved_sys_stderr + if __pypy__: + __pypy__.set_debug(True) def test_all_combinations_I_can_think_of(self): self.check([], {}, sys_argv=[''], run_stdin=True) @@ -133,7 +140,7 @@ self.check(['-S', '-tO', '--info'], {}, output_contains='translation') self.check(['-S', '-tO', '--version'], {}, output_contains='Python') self.check(['-S', '-tOV'], {}, output_contains='Python') - self.check(['--jit', 'foobar', '-S'], {}, sys_argv=[''], + self.check(['--jit', 'off', '-S'], {}, sys_argv=[''], run_stdin=True, no_site=1) self.check(['-c', 'pass'], {}, sys_argv=['-c'], run_command='pass') self.check(['-cpass'], {}, sys_argv=['-c'], run_command='pass') @@ -601,9 +608,7 @@ def run_with_status_code(self, cmdline, senddata='', expect_prompt=False, expect_banner=False, python_flags='', env=None): if os.name == 'nt': - try: - import __pypy__ - except: + if __pypy__ is None: py.test.skip('app_main cannot run on non-pypy for windows') cmdline = '%s %s "%s" %s' % (sys.executable, python_flags, app_main, cmdline) diff --git a/pypy/interpreter/test/test_function.py b/pypy/interpreter/test/test_function.py --- a/pypy/interpreter/test/test_function.py +++ b/pypy/interpreter/test/test_function.py @@ -1,5 +1,4 @@ - -import unittest +import pytest from pypy.interpreter import eval from pypy.interpreter.function import Function, Method, descr_function_get from pypy.interpreter.pycode import PyCode @@ -187,6 +186,7 @@ raises( TypeError, func, 42, {'arg1': 23}) + @pytest.mark.skipif("config.option.runappdirect") def test_kwargs_nondict_mapping(self): class Mapping: def keys(self): @@ -257,6 +257,14 @@ meth = func.__get__(obj, object) assert meth() == obj + def test_none_get_interaction(self): + skip("XXX issue #2083") + assert type(None).__repr__(None) == 'None' + + def test_none_get_interaction_2(self): + f = None.__repr__ + assert f() == 'None' + def test_no_get_builtin(self): assert not hasattr(dir, '__get__') class A(object): @@ -284,6 +292,7 @@ raises(TypeError, len, s, some_unknown_keyword=s) raises(TypeError, len, s, s, some_unknown_keyword=s) + @pytest.mark.skipif("config.option.runappdirect") def test_call_error_message(self): try: len() @@ -325,6 +334,7 @@ f = lambda: 42 assert f.func_doc is None + @pytest.mark.skipif("config.option.runappdirect") def test_setstate_called_with_wrong_args(self): f = lambda: 42 # not sure what it should raise, since CPython doesn't have setstate @@ -550,6 +560,37 @@ assert A().m == X() assert X() == A().m + @pytest.mark.skipif("config.option.runappdirect") + def test_method_identity(self): + class A(object): + def m(self): + pass + def n(self): + pass + + class B(A): + pass + + class X(object): + def __eq__(self, other): + return True + + a = A() + a2 = A() + assert a.m is a.m + assert id(a.m) == id(a.m) + assert a.m is not a.n + assert id(a.m) != id(a.n) + assert a.m is not a2.m + assert id(a.m) != id(a2.m) + + assert A.m is A.m + assert id(A.m) == id(A.m) + assert A.m is not A.n + assert id(A.m) != id(A.n) + assert A.m is not B.m + assert id(A.m) != id(B.m) + class TestMethod: def setup_method(self, method): diff --git a/pypy/module/__builtin__/descriptor.py b/pypy/module/__builtin__/descriptor.py --- a/pypy/module/__builtin__/descriptor.py +++ b/pypy/module/__builtin__/descriptor.py @@ -79,6 +79,7 @@ W_Super.typedef = TypeDef( 'super', __new__ = interp2app(descr_new_super), + __thisclass__ = interp_attrproperty_w("w_starttype", W_Super), __getattribute__ = interp2app(W_Super.getattribute), __get__ = interp2app(W_Super.get), __doc__ = """super(type) -> unbound super object diff --git a/pypy/module/__builtin__/test/test_classobj.py b/pypy/module/__builtin__/test/test_classobj.py --- a/pypy/module/__builtin__/test/test_classobj.py +++ b/pypy/module/__builtin__/test/test_classobj.py @@ -452,7 +452,6 @@ assert a + 1 == 2 assert a + 1.1 == 2 - def test_binaryop_calls_coerce_always(self): l = [] class A: @@ -1076,6 +1075,16 @@ assert (D() > A()) == 'D:A.gt' assert (D() >= A()) == 'D:A.ge' + def test_override___int__(self): + class F(float): + def __int__(self): + return 666 + f = F(-12.3) + assert int(f) == 666 + # on cpython, this calls float_trunc() in floatobject.c + # which ends up calling PyFloat_AS_DOUBLE((PyFloatObject*) f) + assert float.__int__(f) == -12 + class AppTestOldStyleClassBytesDict(object): def setup_class(cls): diff --git a/pypy/module/__builtin__/test/test_descriptor.py b/pypy/module/__builtin__/test/test_descriptor.py --- a/pypy/module/__builtin__/test/test_descriptor.py +++ b/pypy/module/__builtin__/test/test_descriptor.py @@ -214,7 +214,7 @@ c = C() assert C.goo(1) == (C, 1) assert c.goo(1) == (C, 1) - + assert c.foo(1) == (c, 1) class D(C): pass @@ -238,6 +238,17 @@ meth = classmethod(1).__get__(1) raises(TypeError, meth) + def test_super_thisclass(self): + class A(object): + pass + + assert super(A, A()).__thisclass__ is A + + class B(A): + pass + + assert super(B, B()).__thisclass__ is B + assert super(A, B()).__thisclass__ is A def test_property_docstring(self): assert property.__doc__.startswith('property') diff --git a/pypy/module/__pypy__/test/test_signal.py b/pypy/module/__pypy__/test/test_signal.py --- a/pypy/module/__pypy__/test/test_signal.py +++ b/pypy/module/__pypy__/test/test_signal.py @@ -2,7 +2,6 @@ from pypy.module.thread.test.support import GenericTestThread - class AppTestMinimal: spaceconfig = dict(usemodules=['__pypy__']) diff --git a/pypy/module/_cffi_backend/ctypeptr.py b/pypy/module/_cffi_backend/ctypeptr.py --- a/pypy/module/_cffi_backend/ctypeptr.py +++ b/pypy/module/_cffi_backend/ctypeptr.py @@ -353,10 +353,11 @@ # ____________________________________________________________ -rffi_fdopen = rffi.llexternal("fdopen", [rffi.INT, rffi.CCHARP], rffi.CCHARP, +FILEP = rffi.COpaquePtr("FILE") +rffi_fdopen = rffi.llexternal("fdopen", [rffi.INT, rffi.CCHARP], FILEP, save_err=rffi.RFFI_SAVE_ERRNO) -rffi_setbuf = rffi.llexternal("setbuf", [rffi.CCHARP, rffi.CCHARP], lltype.Void) -rffi_fclose = rffi.llexternal("fclose", [rffi.CCHARP], rffi.INT) +rffi_setbuf = rffi.llexternal("setbuf", [FILEP, rffi.CCHARP], lltype.Void) +rffi_fclose = rffi.llexternal("fclose", [FILEP], rffi.INT) class CffiFileObj(object): _immutable_ = True @@ -382,4 +383,4 @@ fileobj.cffi_fileobj = CffiFileObj(fd, fileobj.mode) except OSError, e: raise wrap_oserror(space, e) - return fileobj.cffi_fileobj.llf + return rffi.cast(rffi.CCHARP, fileobj.cffi_fileobj.llf) diff --git a/pypy/module/_collections/app_defaultdict.py b/pypy/module/_collections/app_defaultdict.py --- a/pypy/module/_collections/app_defaultdict.py +++ b/pypy/module/_collections/app_defaultdict.py @@ -12,6 +12,7 @@ class defaultdict(dict): __slots__ = ['default_factory'] + __module__ = 'collections' def __init__(self, *args, **kwds): if len(args) > 0: diff --git a/pypy/module/_collections/test/test_defaultdict.py b/pypy/module/_collections/test/test_defaultdict.py --- a/pypy/module/_collections/test/test_defaultdict.py +++ b/pypy/module/_collections/test/test_defaultdict.py @@ -14,6 +14,12 @@ d[5].append(44) assert l == [42, 43] and l2 == [44] + def test_module(self): + from _collections import defaultdict + assert repr(defaultdict) in ( + "", # on PyPy + "") # on CPython + def test_keyerror_without_factory(self): from _collections import defaultdict for d1 in [defaultdict(), defaultdict(None)]: diff --git a/pypy/module/_file/test/test_file_extra.py b/pypy/module/_file/test/test_file_extra.py --- a/pypy/module/_file/test/test_file_extra.py +++ b/pypy/module/_file/test/test_file_extra.py @@ -389,6 +389,7 @@ def test_writelines(self): import array + import sys fn = self.temptestfile with file(fn, 'w') as f: f.writelines(['abc']) @@ -406,7 +407,10 @@ exc = raises(TypeError, f.writelines, [memoryview('jkl')]) assert str(exc.value) == "writelines() argument must be a sequence of strings" out = open(fn, 'rb').readlines()[0] - assert out[0:5] == 'abcd\x00' + if sys.byteorder == 'big': + assert out[0:7] == 'abc\x00\x00\x00d' + else: + assert out[0:5] == 'abcd\x00' assert out[-3:] == 'ghi' with file(fn, 'wb') as f: diff --git a/pypy/module/_rawffi/callback.py b/pypy/module/_rawffi/callback.py --- a/pypy/module/_rawffi/callback.py +++ b/pypy/module/_rawffi/callback.py @@ -1,17 +1,23 @@ - +import sys from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef, GetSetProperty from rpython.rtyper.lltypesystem import lltype, rffi from pypy.module._rawffi.interp_rawffi import write_ptr from pypy.module._rawffi.structure import W_Structure from pypy.module._rawffi.interp_rawffi import (W_DataInstance, letter2tp, - unwrap_value, unpack_argshapes, got_libffi_error) + unwrap_value, unpack_argshapes, got_libffi_error, is_narrow_integer_type, + LL_TYPEMAP, NARROW_INTEGER_TYPES) from rpython.rlib.clibffi import USERDATA_P, CallbackFuncPtr, FUNCFLAG_CDECL from rpython.rlib.clibffi import ffi_type_void, LibFFIError from rpython.rlib import rweakref from pypy.module._rawffi.tracker import tracker from pypy.interpreter.error import OperationError from pypy.interpreter import gateway +from rpython.rlib.unroll import unrolling_iterable + +BIGENDIAN = sys.byteorder == 'big' + +unroll_narrow_integer_types = unrolling_iterable(NARROW_INTEGER_TYPES) app = gateway.applevel(''' def tbprint(tb, err): @@ -42,8 +48,17 @@ args_w[i] = space.wrap(rffi.cast(rffi.ULONG, ll_args[i])) w_res = space.call(w_callable, space.newtuple(args_w)) if callback_ptr.result is not None: # don't return void - unwrap_value(space, write_ptr, ll_res, 0, - callback_ptr.result, w_res) + ptr = ll_res + letter = callback_ptr.result + if BIGENDIAN: + # take care of narrow integers! + for int_type in unroll_narrow_integer_types: + if int_type == letter: + T = LL_TYPEMAP[int_type] + n = rffi.sizeof(lltype.Signed) - rffi.sizeof(T) + ptr = rffi.ptradd(ptr, n) + break + unwrap_value(space, write_ptr, ptr, 0, letter, w_res) except OperationError, e: tbprint(space, space.wrap(e.get_traceback()), space.wrap(e.errorstr(space))) diff --git a/pypy/module/_rawffi/interp_rawffi.py b/pypy/module/_rawffi/interp_rawffi.py --- a/pypy/module/_rawffi/interp_rawffi.py +++ b/pypy/module/_rawffi/interp_rawffi.py @@ -1,3 +1,4 @@ +import sys from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.error import OperationError, oefmt, wrap_oserror from pypy.interpreter.gateway import interp2app, unwrap_spec @@ -19,6 +20,8 @@ from pypy.module._rawffi.buffer import RawFFIBuffer from pypy.module._rawffi.tracker import tracker +BIGENDIAN = sys.byteorder == 'big' + TYPEMAP = { # XXX A mess with unsigned/signed/normal chars :-/ 'c' : ffi_type_uchar, @@ -331,10 +334,14 @@ if tracker.DO_TRACING: ll_buf = rffi.cast(lltype.Signed, self.ll_buffer) tracker.trace_allocation(ll_buf, self) + self._ll_buffer = self.ll_buffer def getbuffer(self, space): return space.wrap(rffi.cast(lltype.Unsigned, self.ll_buffer)) + def buffer_advance(self, n): + self.ll_buffer = rffi.ptradd(self.ll_buffer, n) + def byptr(self, space): from pypy.module._rawffi.array import ARRAY_OF_PTRS array = ARRAY_OF_PTRS.allocate(space, 1) @@ -342,16 +349,17 @@ return space.wrap(array) def free(self, space): - if not self.ll_buffer: + if not self._ll_buffer: raise segfault_exception(space, "freeing NULL pointer") self._free() def _free(self): if tracker.DO_TRACING: - ll_buf = rffi.cast(lltype.Signed, self.ll_buffer) + ll_buf = rffi.cast(lltype.Signed, self._ll_buffer) tracker.trace_free(ll_buf) - lltype.free(self.ll_buffer, flavor='raw') + lltype.free(self._ll_buffer, flavor='raw') self.ll_buffer = lltype.nullptr(rffi.VOIDP.TO) + self._ll_buffer = self.ll_buffer def buffer_w(self, space, flags): return RawFFIBuffer(self) @@ -432,12 +440,19 @@ space.wrap("cannot directly read value")) wrap_value._annspecialcase_ = 'specialize:arg(1)' +NARROW_INTEGER_TYPES = 'cbhiBIH?' + +def is_narrow_integer_type(letter): + return letter in NARROW_INTEGER_TYPES class W_FuncPtr(W_Root): def __init__(self, space, ptr, argshapes, resshape): self.ptr = ptr self.argshapes = argshapes self.resshape = resshape + self.narrow_integer = False + if resshape is not None: + self.narrow_integer = is_narrow_integer_type(resshape.itemcode.lower()) def getbuffer(self, space): return space.wrap(rffi.cast(lltype.Unsigned, self.ptr.funcsym)) @@ -497,6 +512,10 @@ result = self.resshape.allocate(space, 1, autofree=True) # adjust_return_size() was used here on result.ll_buffer self.ptr.call(args_ll, result.ll_buffer) + if BIGENDIAN and self.narrow_integer: + # we get a 8 byte value in big endian + n = rffi.sizeof(lltype.Signed) - result.shape.size + result.buffer_advance(n) return space.wrap(result) else: self.ptr.call(args_ll, lltype.nullptr(rffi.VOIDP.TO)) diff --git a/pypy/module/_rawffi/structure.py b/pypy/module/_rawffi/structure.py --- a/pypy/module/_rawffi/structure.py +++ b/pypy/module/_rawffi/structure.py @@ -18,6 +18,9 @@ from rpython.rlib.rarithmetic import intmask, signedtype, r_uint, \ r_ulonglong from rpython.rtyper.lltypesystem import lltype, rffi +import sys + +IS_BIG_ENDIAN = sys.byteorder == 'big' @@ -114,20 +117,32 @@ size += intmask(fieldsize) bitsizes.append(fieldsize) elif field_type == NEW_BITFIELD: - bitsizes.append((bitsize << 16) + bitoffset) + if IS_BIG_ENDIAN: + off = last_size - bitoffset - bitsize + else: + off = bitoffset + bitsizes.append((bitsize << 16) + off) bitoffset = bitsize size = round_up(size, fieldalignment) pos.append(size) size += fieldsize elif field_type == CONT_BITFIELD: - bitsizes.append((bitsize << 16) + bitoffset) + if IS_BIG_ENDIAN: + off = last_size - bitoffset - bitsize + else: + off = bitoffset + bitsizes.append((bitsize << 16) + off) bitoffset += bitsize # offset is already updated for the NEXT field pos.append(size - fieldsize) elif field_type == EXPAND_BITFIELD: size += fieldsize - last_size / 8 last_size = fieldsize * 8 - bitsizes.append((bitsize << 16) + bitoffset) + if IS_BIG_ENDIAN: + off = last_size - bitoffset - bitsize + else: + off = bitoffset + bitsizes.append((bitsize << 16) + off) bitoffset += bitsize # offset is already updated for the NEXT field pos.append(size - fieldsize) diff --git a/pypy/module/_rawffi/test/test__rawffi.py b/pypy/module/_rawffi/test/test__rawffi.py --- a/pypy/module/_rawffi/test/test__rawffi.py +++ b/pypy/module/_rawffi/test/test__rawffi.py @@ -704,7 +704,6 @@ def compare(a, b): a1 = _rawffi.Array('i').fromaddress(_rawffi.Array('P').fromaddress(a, 1)[0], 1) a2 = _rawffi.Array('i').fromaddress(_rawffi.Array('P').fromaddress(b, 1)[0], 1) - print "comparing", a1[0], "with", a2[0] if a1[0] not in [1,2,3,4] or a2[0] not in [1,2,3,4]: bogus_args.append((a1[0], a2[0])) if a1[0] > a2[0]: @@ -715,7 +714,7 @@ a2[0] = len(ll_to_sort) a3 = _rawffi.Array('l')(1) a3[0] = struct.calcsize('i') - cb = _rawffi.CallbackPtr(compare, ['P', 'P'], 'i') + cb = _rawffi.CallbackPtr(compare, ['P', 'P'], 'l') a4 = cb.byptr() qsort(a1, a2, a3, a4) res = [ll_to_sort[i] for i in range(len(ll_to_sort))] @@ -896,11 +895,21 @@ b = _rawffi.Array('c').fromaddress(a.buffer, 38) if sys.maxunicode > 65535: # UCS4 build - assert b[0] == 'x' - assert b[1] == '\x00' - assert b[2] == '\x00' - assert b[3] == '\x00' - assert b[4] == 'y' + if sys.byteorder == 'big': + assert b[0] == '\x00' + assert b[1] == '\x00' + assert b[2] == '\x00' + assert b[3] == 'x' + assert b[4] == '\x00' + assert b[5] == '\x00' + assert b[6] == '\x00' + assert b[7] == 'y' + else: + assert b[0] == 'x' + assert b[1] == '\x00' + assert b[2] == '\x00' + assert b[3] == '\x00' + assert b[4] == 'y' else: # UCS2 build assert b[0] == 'x' diff --git a/pypy/module/_rawffi/test/test_struct.py b/pypy/module/_rawffi/test/test_struct.py --- a/pypy/module/_rawffi/test/test_struct.py +++ b/pypy/module/_rawffi/test/test_struct.py @@ -1,4 +1,4 @@ - +import sys from pypy.module._rawffi.structure import size_alignment_pos from pypy.module._rawffi.interp_rawffi import TYPEMAP, letter2tp @@ -63,4 +63,7 @@ for (name, t, size) in fields]) assert size == 8 assert pos == [0, 0, 0] - assert bitsizes == [0x10000, 0x3e0001, 0x1003f] + if sys.byteorder == 'little': + assert bitsizes == [0x10000, 0x3e0001, 0x1003f] + else: + assert bitsizes == [0x1003f, 0x3e0001, 0x10000] diff --git a/pypy/module/_sre/interp_sre.py b/pypy/module/_sre/interp_sre.py --- a/pypy/module/_sre/interp_sre.py +++ b/pypy/module/_sre/interp_sre.py @@ -7,6 +7,7 @@ from pypy.interpreter.error import OperationError from rpython.rlib.rarithmetic import intmask from rpython.rlib import jit +from rpython.rlib.rstring import StringBuilder, UnicodeBuilder # ____________________________________________________________ # @@ -36,6 +37,8 @@ if 0 <= start <= end: if isinstance(ctx, rsre_core.BufMatchContext): return space.wrap(ctx._buffer.getslice(start, end, 1, end-start)) + if isinstance(ctx, rsre_core.StrMatchContext): + return space.wrap(ctx._string[start:end]) elif isinstance(ctx, rsre_core.UnicodeMatchContext): return space.wrap(ctx._unicodestr[start:end]) else: @@ -98,8 +101,8 @@ space.wrap("cannot copy this pattern object")) def make_ctx(self, w_string, pos=0, endpos=sys.maxint): - """Make a BufMatchContext or a UnicodeMatchContext for searching - in the given w_string object.""" + """Make a StrMatchContext, BufMatchContext or a UnicodeMatchContext for + searching in the given w_string object.""" space = self.space if pos < 0: pos = 0 @@ -113,6 +116,14 @@ endpos = len(unicodestr) return rsre_core.UnicodeMatchContext(self.code, unicodestr, pos, endpos, self.flags) + elif space.isinstance_w(w_string, space.w_str): + str = space.str_w(w_string) + if pos > len(str): + pos = len(str) + if endpos > len(str): + endpos = len(str) + return rsre_core.StrMatchContext(self.code, str, + pos, endpos, self.flags) else: buf = space.readbuf_w(w_string) size = buf.getlength() @@ -216,6 +227,11 @@ def subx(self, w_ptemplate, w_string, count): space = self.space + # use a (much faster) string/unicode builder if w_ptemplate and + # w_string are both string or both unicode objects, and if w_ptemplate + # is a literal + use_builder = False + filter_as_unicode = filter_as_string = None if space.is_true(space.callable(w_ptemplate)): w_filter = w_ptemplate filter_is_callable = True @@ -223,6 +239,8 @@ if space.isinstance_w(w_ptemplate, space.w_unicode): filter_as_unicode = space.unicode_w(w_ptemplate) literal = u'\\' not in filter_as_unicode + use_builder = ( + space.isinstance_w(w_string, space.w_unicode) and literal) else: try: filter_as_string = space.str_w(w_ptemplate) @@ -232,6 +250,8 @@ literal = False else: literal = '\\' not in filter_as_string + use_builder = ( + space.isinstance_w(w_string, space.w_str) and literal) if literal: w_filter = w_ptemplate filter_is_callable = False @@ -242,19 +262,44 @@ space.wrap(self), w_ptemplate) filter_is_callable = space.is_true(space.callable(w_filter)) # + # XXX this is a bit of a mess, but it improves performance a lot ctx = self.make_ctx(w_string) - sublist_w = [] + sublist_w = strbuilder = unicodebuilder = None + if use_builder: + if filter_as_unicode is not None: + unicodebuilder = UnicodeBuilder(ctx.end) + else: + assert filter_as_string is not None + strbuilder = StringBuilder(ctx.end) + else: + sublist_w = [] n = last_pos = 0 while not count or n < count: + sub_jitdriver.jit_merge_point( + self=self, + use_builder=use_builder, + filter_is_callable=filter_is_callable, + filter_type=type(w_filter), + ctx=ctx, + w_filter=w_filter, + strbuilder=strbuilder, + unicodebuilder=unicodebuilder, + filter_as_string=filter_as_string, + filter_as_unicode=filter_as_unicode, + count=count, + w_string=w_string, + n=n, last_pos=last_pos, sublist_w=sublist_w + ) + space = self.space if not searchcontext(space, ctx): break if last_pos < ctx.match_start: - sublist_w.append(slice_w(space, ctx, last_pos, - ctx.match_start, space.w_None)) + _sub_append_slice( + ctx, space, use_builder, sublist_w, + strbuilder, unicodebuilder, last_pos, ctx.match_start) start = ctx.match_end if start == ctx.match_start: start += 1 - nextctx = ctx.fresh_copy(start) if not (last_pos == ctx.match_start == ctx.match_end and n > 0): # the above ignores empty matches on latest position @@ -262,28 +307,71 @@ w_match = self.getmatch(ctx, True) w_piece = space.call_function(w_filter, w_match) if not space.is_w(w_piece, space.w_None): + assert strbuilder is None and unicodebuilder is None + assert not use_builder sublist_w.append(w_piece) else: - sublist_w.append(w_filter) + if use_builder: + if strbuilder is not None: + assert filter_as_string is not None + strbuilder.append(filter_as_string) + else: + assert unicodebuilder is not None + assert filter_as_unicode is not None + unicodebuilder.append(filter_as_unicode) + else: + sublist_w.append(w_filter) last_pos = ctx.match_end n += 1 elif last_pos >= ctx.end: break # empty match at the end: finished - ctx = nextctx + ctx.reset(start) if last_pos < ctx.end: - sublist_w.append(slice_w(space, ctx, last_pos, ctx.end, - space.w_None)) + _sub_append_slice(ctx, space, use_builder, sublist_w, + strbuilder, unicodebuilder, last_pos, ctx.end) + if use_builder: + if strbuilder is not None: + return space.wrap(strbuilder.build()), n + else: + assert unicodebuilder is not None + return space.wrap(unicodebuilder.build()), n + else: + if space.isinstance_w(w_string, space.w_unicode): + w_emptystr = space.wrap(u'') + else: + w_emptystr = space.wrap('') + w_item = space.call_method(w_emptystr, 'join', + space.newlist(sublist_w)) + return w_item, n - if space.isinstance_w(w_string, space.w_unicode): - w_emptystr = space.wrap(u'') - else: - w_emptystr = space.wrap('') - w_item = space.call_method(w_emptystr, 'join', - space.newlist(sublist_w)) - return w_item, n +sub_jitdriver = jit.JitDriver( + reds="""count n last_pos + ctx w_filter + strbuilder unicodebuilder + filter_as_string + filter_as_unicode + w_string sublist_w + self""".split(), + greens=["filter_is_callable", "use_builder", "filter_type", "ctx.pattern"]) +def _sub_append_slice(ctx, space, use_builder, sublist_w, + strbuilder, unicodebuilder, start, end): + if use_builder: + if isinstance(ctx, rsre_core.BufMatchContext): + assert strbuilder is not None + return strbuilder.append(ctx._buffer.getslice(start, end, 1, end-start)) + if isinstance(ctx, rsre_core.StrMatchContext): + assert strbuilder is not None + return strbuilder.append_slice(ctx._string, start, end) + elif isinstance(ctx, rsre_core.UnicodeMatchContext): + assert unicodebuilder is not None + return unicodebuilder.append_slice(ctx._unicodestr, start, end) + assert 0, "unreachable" + else: + sublist_w.append(slice_w(space, ctx, start, end, space.w_None)) + @unwrap_spec(flags=int, groups=int, w_groupindex=WrappedDefault(None), w_indexgroup=WrappedDefault(None)) def SRE_Pattern__new__(space, w_subtype, w_pattern, flags, w_code, @@ -482,6 +570,8 @@ ctx = self.ctx if isinstance(ctx, rsre_core.BufMatchContext): return space.wrap(ctx._buffer.as_str()) + elif isinstance(ctx, rsre_core.StrMatchContext): + return space.wrap(ctx._string) elif isinstance(ctx, rsre_core.UnicodeMatchContext): return space.wrap(ctx._unicodestr) else: diff --git a/pypy/module/_vmprof/conftest.py b/pypy/module/_vmprof/conftest.py new file mode 100644 --- /dev/null +++ b/pypy/module/_vmprof/conftest.py @@ -0,0 +1,6 @@ +import py, platform + +def pytest_collect_directory(path, parent): + if platform.machine() == 's390x': + py.test.skip("zarch tests skipped") +pytest_collect_file = pytest_collect_directory diff --git a/pypy/module/_vmprof/interp_vmprof.py b/pypy/module/_vmprof/interp_vmprof.py --- a/pypy/module/_vmprof/interp_vmprof.py +++ b/pypy/module/_vmprof/interp_vmprof.py @@ -60,10 +60,10 @@ Must be smaller than 1.0 """ w_modules = space.sys.get('modules') - if space.contains_w(w_modules, space.wrap('_continuation')): - space.warn(space.wrap("Using _continuation/greenlet/stacklet together " - "with vmprof will crash"), - space.w_RuntimeWarning) + #if space.contains_w(w_modules, space.wrap('_continuation')): + # space.warn(space.wrap("Using _continuation/greenlet/stacklet together " + # "with vmprof will crash"), + # space.w_RuntimeWarning) try: rvmprof.enable(fileno, period) except rvmprof.VMProfError, e: diff --git a/pypy/module/_vmprof/test/test__vmprof.py b/pypy/module/_vmprof/test/test__vmprof.py --- a/pypy/module/_vmprof/test/test__vmprof.py +++ b/pypy/module/_vmprof/test/test__vmprof.py @@ -5,14 +5,15 @@ class AppTestVMProf(object): def setup_class(cls): cls.space = gettestobjspace(usemodules=['_vmprof', 'struct']) - cls.tmpfile = udir.join('test__vmprof.1').open('wb') - cls.w_tmpfileno = cls.space.wrap(cls.tmpfile.fileno()) - cls.w_tmpfilename = cls.space.wrap(cls.tmpfile.name) - cls.tmpfile2 = udir.join('test__vmprof.2').open('wb') - cls.w_tmpfileno2 = cls.space.wrap(cls.tmpfile2.fileno()) - cls.w_tmpfilename2 = cls.space.wrap(cls.tmpfile2.name) + cls.w_tmpfilename = cls.space.wrap(str(udir.join('test__vmprof.1'))) + cls.w_tmpfilename2 = cls.space.wrap(str(udir.join('test__vmprof.2'))) def test_import_vmprof(self): + tmpfile = open(self.tmpfilename, 'wb') + tmpfileno = tmpfile.fileno() + tmpfile2 = open(self.tmpfilename2, 'wb') + tmpfileno2 = tmpfile2.fileno() + import struct, sys WORD = struct.calcsize('l') @@ -45,7 +46,7 @@ return count import _vmprof - _vmprof.enable(self.tmpfileno, 0.01) + _vmprof.enable(tmpfileno, 0.01) _vmprof.disable() s = open(self.tmpfilename, 'rb').read() no_of_codes = count(s) @@ -56,7 +57,7 @@ pass """ in d - _vmprof.enable(self.tmpfileno2, 0.01) + _vmprof.enable(tmpfileno2, 0.01) exec """def foo2(): pass diff --git a/pypy/module/cppyy/src/dummy_backend.cxx b/pypy/module/cppyy/src/dummy_backend.cxx --- a/pypy/module/cppyy/src/dummy_backend.cxx +++ b/pypy/module/cppyy/src/dummy_backend.cxx @@ -390,7 +390,7 @@ ((dummy::cppyy_test_data*)self)->destroy_arrays(); } else if (idx == s_methods["cppyy_test_data::set_bool"]) { assert(self && nargs == 1); - ((dummy::cppyy_test_data*)self)->set_bool((bool)((CPPYY_G__value*)args)[0].obj.in); + ((dummy::cppyy_test_data*)self)->set_bool((bool)((CPPYY_G__value*)args)[0].obj.i); } else if (idx == s_methods["cppyy_test_data::set_char"]) { assert(self && nargs == 1); From pypy.commits at gmail.com Wed Mar 16 15:39:08 2016 From: pypy.commits at gmail.com (plan_rich) Date: Wed, 16 Mar 2016 12:39:08 -0700 (PDT) Subject: [pypy-commit] pypy default: fixed test, must be little endian timestamp, otherwise it will not load pyc, but py Message-ID: <56e9b65c.c65b1c0a.734a7.64dc@mx.google.com> Author: Richard Plangger Branch: Changeset: r83088:ba879e2e3236 Date: 2016-03-16 20:37 +0100 http://bitbucket.org/pypy/pypy/changeset/ba879e2e3236/ Log: fixed test, must be little endian timestamp, otherwise it will not load pyc, but py diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py --- a/pypy/module/imp/test/test_import.py +++ b/pypy/module/imp/test/test_import.py @@ -109,7 +109,7 @@ import marshal, stat, struct, os, imp code = py.code.Source(p.join("x.py").read()).compile() s3 = marshal.dumps(code) - s2 = struct.pack("i", os.stat(str(p.join("x.py")))[stat.ST_MTIME]) + s2 = struct.pack(" Author: Richard Plangger Branch: s390x-z196 Changeset: r83089:30a33af3efa2 Date: 2016-03-16 21:03 +0100 http://bitbucket.org/pypy/pypy/changeset/30a33af3efa2/ Log: changed chip model to a 2 year older version diff --git a/rpython/jit/backend/zarch/test/test_auto_encoding.py b/rpython/jit/backend/zarch/test/test_auto_encoding.py --- a/rpython/jit/backend/zarch/test/test_auto_encoding.py +++ b/rpython/jit/backend/zarch/test/test_auto_encoding.py @@ -204,7 +204,7 @@ g.write('%s\n' % op) oplist.append(op) g.write('\t.string "%s"\n' % END_TAG) - proc = subprocess.Popen(['as', '-m64', '-mzarch', '-march=zEC12', + proc = subprocess.Popen(['as', '-m64', '-mzarch', '-march=z196', inputname, '-o', filename], stdout=subprocess.PIPE, stderr=subprocess.PIPE) diff --git a/rpython/translator/platform/linux.py b/rpython/translator/platform/linux.py --- a/rpython/translator/platform/linux.py +++ b/rpython/translator/platform/linux.py @@ -23,7 +23,7 @@ if platform.machine() == 's390x': # force the right target arch for s390x - cflags = ('-march=zEC12','-m64','-mzarch') + cflags + cflags = ('-march=z196','-m64','-mzarch') + cflags def _args_for_shared(self, args): return ['-shared'] + args From pypy.commits at gmail.com Wed Mar 16 16:03:52 2016 From: pypy.commits at gmail.com (mattip) Date: Wed, 16 Mar 2016 13:03:52 -0700 (PDT) Subject: [pypy-commit] pypy default: help Popen find executable on win32 Message-ID: <56e9bc28.aa09c20a.60c4.ffffc213@mx.google.com> Author: Matti Picus Branch: Changeset: r83090:4aaefd95f635 Date: 2014-04-14 00:24 +0300 http://bitbucket.org/pypy/pypy/changeset/4aaefd95f635/ Log: help Popen find executable on win32 diff --git a/rpython/tool/runsubprocess.py b/rpython/tool/runsubprocess.py --- a/rpython/tool/runsubprocess.py +++ b/rpython/tool/runsubprocess.py @@ -20,6 +20,8 @@ def _run(executable, args, env, cwd): # note that this function can be *overridden* below # in some cases! + if sys.platform == 'win32': + executable = executable.replace('/','\\') if isinstance(args, str): args = str(executable) + ' ' + args shell = True From pypy.commits at gmail.com Wed Mar 16 16:07:56 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 16 Mar 2016 13:07:56 -0700 (PDT) Subject: [pypy-commit] pypy default: Issue #2259 test and fix Message-ID: <56e9bd1c.2968c20a.84ddc.ffffbf41@mx.google.com> Author: Armin Rigo Branch: Changeset: r83091:53be1372d9ce Date: 2016-03-16 21:07 +0100 http://bitbucket.org/pypy/pypy/changeset/53be1372d9ce/ Log: Issue #2259 test and fix diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -417,7 +417,10 @@ self.wait_for_thread_shutdown() w_exitfunc = self.sys.getdictvalue(self, 'exitfunc') if w_exitfunc is not None: - self.call_function(w_exitfunc) + try: + self.call_function(w_exitfunc) + except OperationError as e: + e.write_unraisable(self, 'sys.exitfunc == ', w_exitfunc) from pypy.interpreter.module import Module for w_mod in self.builtin_modules.values(): if isinstance(w_mod, Module) and w_mod.startup_called: diff --git a/pypy/interpreter/test/test_objspace.py b/pypy/interpreter/test/test_objspace.py --- a/pypy/interpreter/test/test_objspace.py +++ b/pypy/interpreter/test/test_objspace.py @@ -416,3 +416,14 @@ i -= 1 assert i >= 0 gc.collect() + + def test_exitfunc_catches_exceptions(self): + from pypy.tool.pytest.objspace import maketestobjspace + space = maketestobjspace() + space.appexec([], """(): + import sys + sys.exitfunc = lambda: this_is_an_unknown_name + """) + space.finish() + # assert that we reach this point without getting interrupted + # by the OperationError(NameError) From pypy.commits at gmail.com Wed Mar 16 16:16:28 2016 From: pypy.commits at gmail.com (mattip) Date: Wed, 16 Mar 2016 13:16:28 -0700 (PDT) Subject: [pypy-commit] pypy py3.3: minimize diff to default to make test pass Message-ID: <56e9bf1c.0357c20a.c858a.ffffc782@mx.google.com> Author: mattip Branch: py3.3 Changeset: r83092:f93e216f53f3 Date: 2016-03-16 22:15 +0200 http://bitbucket.org/pypy/pypy/changeset/f93e216f53f3/ Log: minimize diff to default to make test pass diff --git a/pypy/module/__pypy__/test/test_signal.py b/pypy/module/__pypy__/test/test_signal.py --- a/pypy/module/__pypy__/test/test_signal.py +++ b/pypy/module/__pypy__/test/test_signal.py @@ -57,10 +57,10 @@ interrupted = [] print('--- start ---') _thread.start_new_thread(subthread, ()) - for j in range(100): + for j in range(30): if len(done): break print('.') - time.sleep(0) + time.sleep(0.25) print('main thread loop done') assert len(done) == 1 assert len(interrupted) == 1 From pypy.commits at gmail.com Wed Mar 16 17:05:25 2016 From: pypy.commits at gmail.com (rlamy) Date: Wed, 16 Mar 2016 14:05:25 -0700 (PDT) Subject: [pypy-commit] pypy bootstrap-clarity: Make appexec() work without executing bytecode in the middle of translation Message-ID: <56e9ca95.c13fc20a.54b2.ffffd1de@mx.google.com> Author: Ronan Lamy Branch: bootstrap-clarity Changeset: r83093:c6664c23be1a Date: 2016-03-16 21:04 +0000 http://bitbucket.org/pypy/pypy/changeset/c6664c23be1a/ Log: Make appexec() work without executing bytecode in the middle of translation diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -1734,16 +1734,25 @@ class AppExecCache(SpaceCache): def build(cache, source): - """ NOT_RPYTHON """ space = cache.space - # XXX will change once we have our own compiler - import py - source = source.lstrip() - assert source.startswith('('), "incorrect header in:\n%s" % (source,) - source = py.code.Source("def anonymous%s\n" % source) - w_glob = space.newdict(module=True) - space.exec_(str(source), w_glob, w_glob) - return space.getitem(w_glob, space.wrap('anonymous')) + return appexec_compile(space, source) + +def appexec_compile(space, source): + """ NOT_RPYTHON """ + import py + from pypy.interpreter.function import Function + source = source.lstrip() + assert source.startswith('('), "incorrect header in:\n%s" % (source,) + source = py.code.Source("def anonymous%s\n" % source) + source = str(source) + compiler = space.createcompiler() + w_decl = compiler.compile(source, '?', mode='exec', flags=0) + w_glob = space.newdict(module=True) + space.setitem(w_glob, space.wrap('__builtins__'), + space.wrap(space.builtin)) + func_code = w_decl.co_consts_w[0] # hack + fn = Function(space, func_code, w_glob, 0) + return fn # Table describing the regular part of the interface of object spaces, From pypy.commits at gmail.com Thu Mar 17 01:01:48 2016 From: pypy.commits at gmail.com (stefanor) Date: Wed, 16 Mar 2016 22:01:48 -0700 (PDT) Subject: [pypy-commit] pypy default: Recognise armv8l machine type Message-ID: <56ea3a3c.6507c20a.ad77a.254c@mx.google.com> Author: Stefano Rivera Branch: Changeset: r83095:0206c67c661f Date: 2016-03-16 22:00 -0700 http://bitbucket.org/pypy/pypy/changeset/0206c67c661f/ Log: Recognise armv8l machine type This is a 32bit personality on an ARMv8 (64bit) CPU diff --git a/rpython/jit/backend/detect_cpu.py b/rpython/jit/backend/detect_cpu.py --- a/rpython/jit/backend/detect_cpu.py +++ b/rpython/jit/backend/detect_cpu.py @@ -66,6 +66,7 @@ 'x86_64': MODEL_X86, 'amd64': MODEL_X86, # freebsd 'AMD64': MODEL_X86, # win64 + 'armv8l': MODEL_ARM, # 32-bit ARMv8 'armv7l': MODEL_ARM, 'armv6l': MODEL_ARM, 'arm': MODEL_ARM, # freebsd From pypy.commits at gmail.com Thu Mar 17 05:52:43 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 17 Mar 2016 02:52:43 -0700 (PDT) Subject: [pypy-commit] pypy heapcache-refactor: A subbranch of jit-leaner-frontend Message-ID: <56ea7e6b.a185c20a.7621f.ffff8598@mx.google.com> Author: Armin Rigo Branch: heapcache-refactor Changeset: r83096:2f0eb13e605f Date: 2016-03-17 10:49 +0100 http://bitbucket.org/pypy/pypy/changeset/2f0eb13e605f/ Log: A subbranch of jit-leaner-frontend From pypy.commits at gmail.com Thu Mar 17 05:52:45 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 17 Mar 2016 02:52:45 -0700 (PDT) Subject: [pypy-commit] pypy heapcache-refactor: kill outdated comments Message-ID: <56ea7e6d.a2afc20a.a24d3.ffff88e2@mx.google.com> Author: Armin Rigo Branch: heapcache-refactor Changeset: r83097:b1a3ac45b246 Date: 2016-03-17 10:51 +0100 http://bitbucket.org/pypy/pypy/changeset/b1a3ac45b246/ Log: kill outdated comments diff --git a/rpython/jit/metainterp/heapcache.py b/rpython/jit/metainterp/heapcache.py --- a/rpython/jit/metainterp/heapcache.py +++ b/rpython/jit/metainterp/heapcache.py @@ -86,31 +86,13 @@ self.reset() def reset(self): - # maps boxes to values + # maps boxes to HeapCacheValue self.values = {} - # store the boxes that contain newly allocated objects, this maps the - # boxes to a bool, the bool indicates whether or not the object has - # escaped the trace or not (True means the box never escaped, False - # means it did escape), its presences in the mapping shows that it was - # allocated inside the trace - #if trace_branch: - #self.new_boxes = {} - # pass - #else: - #for box in self.new_boxes: - # self.new_boxes[box] = False - # pass - #if reset_virtuals: - # self.likely_virtuals = {} # only for jit.isvirtual() - # Tracks which boxes should be marked as escaped when the key box - # escapes. - #self.dependencies = {} - # heap cache # maps descrs to CacheEntry self.heap_cache = {} # heap array cache - # maps descrs to {index: {from_value: to_value}} dicts + # maps descrs to {index: CacheEntry} dicts self.heap_array_cache = {} def reset_keep_likely_virtuals(self): From pypy.commits at gmail.com Thu Mar 17 06:10:03 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 17 Mar 2016 03:10:03 -0700 (PDT) Subject: [pypy-commit] pypy heapcache-refactor: Starting to port the flags to RefFrontendOp: likely_virtual first Message-ID: <56ea827b.465ec20a.90fc6.ffff8c55@mx.google.com> Author: Armin Rigo Branch: heapcache-refactor Changeset: r83098:ea3280600306 Date: 2016-03-17 11:09 +0100 http://bitbucket.org/pypy/pypy/changeset/ea3280600306/ Log: Starting to port the flags to RefFrontendOp: likely_virtual first diff --git a/rpython/jit/metainterp/heapcache.py b/rpython/jit/metainterp/heapcache.py --- a/rpython/jit/metainterp/heapcache.py +++ b/rpython/jit/metainterp/heapcache.py @@ -1,10 +1,33 @@ -from rpython.jit.metainterp.history import ConstInt +from rpython.jit.metainterp.history import ConstInt, RefFrontendOp from rpython.jit.metainterp.resoperation import rop, OpHelpers +from rpython.rlib.rarithmetic import r_uint32, r_uint +from rpython.rlib.objectmodel import always_inline + + +# RefFrontendOp._heapc_flags: +HF_LIKELY_VIRTUAL = 0x01 + + at always_inline +def add_flags(ref_frontend_op, flags): + f = r_uint(ref_frontend_op._heapc_flags) + f |= r_uint(flags) + ref_frontend_op._heapc_flags = r_uint32(f) + + at always_inline +def remove_flags(ref_frontend_op, flags): + f = r_uint(ref_frontend_op._heapc_flags) + f &= r_uint(~flags) + ref_frontend_op._heapc_flags = r_uint32(f) + + at always_inline +def test_flags(ref_frontend_op, flags): + f = r_uint(ref_frontend_op._heapc_flags) + return bool(f & flags) + class HeapCacheValue(object): def __init__(self, box): self.box = box - self.likely_virtual = False self.reset_keep_likely_virtual() def reset_keep_likely_virtual(self): @@ -84,6 +107,7 @@ class HeapCache(object): def __init__(self): self.reset() + self.version = r_uint32(1) def reset(self): # maps boxes to HeapCacheValue @@ -158,14 +182,13 @@ self._escape_box(box) def _escape_box(self, box): + if isinstance(box, RefFrontendOp): + remove_flags(box, HF_LIKELY_VIRTUAL) + # value = self.getvalue(box, create=False) if not value: return - self._escape(value) - - def _escape(self, value): value.is_unescaped = False - value.likely_virtual = False deps = value.dependencies value.dependencies = None if deps is not None: @@ -301,15 +324,14 @@ return False def is_likely_virtual(self, box): - value = self.getvalue(box, create=False) - if value: - return value.likely_virtual - return False + return (isinstance(box, RefFrontendOp) and + test_flags(box, HF_LIKELY_VIRTUAL)) def new(self, box): + assert isinstance(box, RefFrontendOp) + add_flags(box, HF_LIKELY_VIRTUAL) value = self.getvalue(box) value.is_unescaped = True - value.likely_virtual = True value.seen_allocation = True def new_array(self, box, lengthbox): diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -2,7 +2,7 @@ from rpython.rtyper.lltypesystem import lltype, llmemory, rffi from rpython.rlib.objectmodel import we_are_translated, Symbolic from rpython.rlib.objectmodel import compute_unique_id, specialize -from rpython.rlib.rarithmetic import r_int64, is_valid_int +from rpython.rlib.rarithmetic import r_uint32, r_int64, is_valid_int from rpython.conftest import option @@ -664,7 +664,10 @@ self._resfloat = other.getfloatstorage() class RefFrontendOp(RefOp, FrontendOp): - _attrs_ = ('position', '_resref') + _attrs_ = ('position', '_resref', '_heapc_flags', '_heapc_version') + + _heapc_flags = r_uint32(0) + _heapc_version = r_uint32(0) def copy_value_from(self, other): self._resref = other.getref_base() diff --git a/rpython/jit/metainterp/test/test_heapcache.py b/rpython/jit/metainterp/test/test_heapcache.py --- a/rpython/jit/metainterp/test/test_heapcache.py +++ b/rpython/jit/metainterp/test/test_heapcache.py @@ -1,6 +1,7 @@ from rpython.jit.metainterp.heapcache import HeapCache from rpython.jit.metainterp.resoperation import rop, InputArgInt from rpython.jit.metainterp.history import ConstInt, BasicFailDescr +from rpython.jit.metainterp.history import RefFrontendOp box1 = "box1" box2 = "box2" @@ -624,6 +625,7 @@ def test_is_likely_virtual(self): h = HeapCache() + box1 = RefFrontendOp(1) h.new(box1) assert h.is_unescaped(box1) assert h.is_likely_virtual(box1) From pypy.commits at gmail.com Thu Mar 17 06:37:32 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 17 Mar 2016 03:37:32 -0700 (PDT) Subject: [pypy-commit] pypy heapcache-refactor: test_known_class_box. Implement the double versioning check. Message-ID: <56ea88ec.02f0c20a.c3d6b.ffff9707@mx.google.com> Author: Armin Rigo Branch: heapcache-refactor Changeset: r83099:d8bbee232b14 Date: 2016-03-17 11:36 +0100 http://bitbucket.org/pypy/pypy/changeset/d8bbee232b14/ Log: test_known_class_box. Implement the double versioning check. diff --git a/rpython/jit/metainterp/heapcache.py b/rpython/jit/metainterp/heapcache.py --- a/rpython/jit/metainterp/heapcache.py +++ b/rpython/jit/metainterp/heapcache.py @@ -6,6 +6,7 @@ # RefFrontendOp._heapc_flags: HF_LIKELY_VIRTUAL = 0x01 +HF_KNOWN_CLASS = 0x02 @always_inline def add_flags(ref_frontend_op, flags): @@ -22,7 +23,7 @@ @always_inline def test_flags(ref_frontend_op, flags): f = r_uint(ref_frontend_op._heapc_flags) - return bool(f & flags) + return bool(f & r_uint(flags)) class HeapCacheValue(object): @@ -31,7 +32,6 @@ self.reset_keep_likely_virtual() def reset_keep_likely_virtual(self): - self.known_class = False self.known_nullity = False # did we see the allocation during tracing? self.seen_allocation = False @@ -106,10 +106,27 @@ class HeapCache(object): def __init__(self): + # Works with flags stored on RefFrontendOp._heapc_flags. + # There are two ways to do a global resetting of these flags: + # reset() and reset_keep_likely_virtual(). The basic idea is + # to use a version number in each RefFrontendOp, and in order + # to reset the flags globally, we increment the global version + # number in this class. Then when we read '_heapc_flags' we + # also check if the associated '_heapc_version' is up-to-date + # or not. More precisely, we have two global version numbers + # here: 'head_version' and 'likely_virtual_version'. Normally + # we use 'head_version'. For is_likely_virtual() though, we + # use the other, older version number. + self.head_version = r_uint(0) + self.likely_virtual_version = r_uint(0) self.reset() - self.version = r_uint32(1) def reset(self): + # Global reset of all flags. Update both version numbers so + # that any access to '_heapc_flags' will be marked as outdated. + self.head_version += 1 + self.likely_virtual_version = self.head_version + # # maps boxes to HeapCacheValue self.values = {} # heap cache @@ -120,11 +137,33 @@ self.heap_array_cache = {} def reset_keep_likely_virtuals(self): + # Update only 'head_version', but 'likely_virtual_version' remains + # at its older value. + self.head_version += 1 + # for value in self.values.itervalues(): value.reset_keep_likely_virtual() self.heap_cache = {} self.heap_array_cache = {} + @always_inline + def test_head_version(self, ref_frontend_op): + return r_uint(ref_frontend_op._heapc_version) == self.head_version + + @always_inline + def test_likely_virtual_version(self, ref_frontend_op): + return (r_uint(ref_frontend_op._heapc_version) == + self.likely_virtual_version) + + def update_version(self, ref_frontend_op): + if not self.test_head_version(ref_frontend_op): + f = 0 + if (self.test_likely_virtual_version(ref_frontend_op) and + test_flags(ref_frontend_op, HF_LIKELY_VIRTUAL)): + f |= HF_LIKELY_VIRTUAL + ref_frontend_op._heapc_flags = r_uint32(f) + ref_frontend_op._heapc_version = r_uint32(self.head_version) + def getvalue(self, box, create=True): value = self.values.get(box, None) if not value and create: @@ -291,13 +330,14 @@ self.reset_keep_likely_virtuals() def is_class_known(self, box): - value = self.getvalue(box, create=False) - if value: - return value.known_class - return False + return (isinstance(box, RefFrontendOp) and + self.test_head_version(box) and + test_flags(box, HF_KNOWN_CLASS)) def class_now_known(self, box): - self.getvalue(box).known_class = True + assert isinstance(box, RefFrontendOp) + self.update_version(box) + add_flags(box, HF_KNOWN_CLASS) def is_nullity_known(self, box): value = self.getvalue(box, create=False) @@ -325,10 +365,12 @@ def is_likely_virtual(self, box): return (isinstance(box, RefFrontendOp) and - test_flags(box, HF_LIKELY_VIRTUAL)) + self.test_likely_virtual_version(box) and + test_flags(box, HF_LIKELY_VIRTUAL)) def new(self, box): assert isinstance(box, RefFrontendOp) + self.update_version(box) add_flags(box, HF_LIKELY_VIRTUAL) value = self.getvalue(box) value.is_unescaped = True diff --git a/rpython/jit/metainterp/test/test_heapcache.py b/rpython/jit/metainterp/test/test_heapcache.py --- a/rpython/jit/metainterp/test/test_heapcache.py +++ b/rpython/jit/metainterp/test/test_heapcache.py @@ -60,15 +60,17 @@ class TestHeapCache(object): def test_known_class_box(self): h = HeapCache() - assert not h.is_class_known(1) - assert not h.is_class_known(2) - h.class_now_known(1) - assert h.is_class_known(1) - assert not h.is_class_known(2) + box1 = RefFrontendOp(1) + box2 = RefFrontendOp(2) + assert not h.is_class_known(box1) + assert not h.is_class_known(box2) + h.class_now_known(box1) + assert h.is_class_known(box1) + assert not h.is_class_known(box2) h.reset() - assert not h.is_class_known(1) - assert not h.is_class_known(2) + assert not h.is_class_known(box1) + assert not h.is_class_known(box2) def test_known_nullity(self): h = HeapCache() From pypy.commits at gmail.com Thu Mar 17 06:44:33 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 17 Mar 2016 03:44:33 -0700 (PDT) Subject: [pypy-commit] pypy heapcache-refactor: test_known_nullity Message-ID: <56ea8a91.85371c0a.e566.50bc@mx.google.com> Author: Armin Rigo Branch: heapcache-refactor Changeset: r83100:669fec3af2a8 Date: 2016-03-17 11:38 +0100 http://bitbucket.org/pypy/pypy/changeset/669fec3af2a8/ Log: test_known_nullity diff --git a/rpython/jit/metainterp/heapcache.py b/rpython/jit/metainterp/heapcache.py --- a/rpython/jit/metainterp/heapcache.py +++ b/rpython/jit/metainterp/heapcache.py @@ -7,6 +7,7 @@ # RefFrontendOp._heapc_flags: HF_LIKELY_VIRTUAL = 0x01 HF_KNOWN_CLASS = 0x02 +HF_KNOWN_NULLITY = 0x04 @always_inline def add_flags(ref_frontend_op, flags): @@ -32,7 +33,6 @@ self.reset_keep_likely_virtual() def reset_keep_likely_virtual(self): - self.known_nullity = False # did we see the allocation during tracing? self.seen_allocation = False self.is_unescaped = False @@ -340,13 +340,14 @@ add_flags(box, HF_KNOWN_CLASS) def is_nullity_known(self, box): - value = self.getvalue(box, create=False) - if value: - return value.known_nullity - return False + return (isinstance(box, RefFrontendOp) and + self.test_head_version(box) and + test_flags(box, HF_KNOWN_NULLITY)) def nullity_now_known(self, box): - self.getvalue(box).known_nullity = True + assert isinstance(box, RefFrontendOp) + self.update_version(box) + add_flags(box, HF_KNOWN_NULLITY) def is_nonstandard_virtualizable(self, box): value = self.getvalue(box, create=False) diff --git a/rpython/jit/metainterp/test/test_heapcache.py b/rpython/jit/metainterp/test/test_heapcache.py --- a/rpython/jit/metainterp/test/test_heapcache.py +++ b/rpython/jit/metainterp/test/test_heapcache.py @@ -74,15 +74,17 @@ def test_known_nullity(self): h = HeapCache() - assert not h.is_nullity_known(1) - assert not h.is_nullity_known(2) - h.nullity_now_known(1) - assert h.is_nullity_known(1) - assert not h.is_nullity_known(2) + box1 = RefFrontendOp(1) + box2 = RefFrontendOp(2) + assert not h.is_nullity_known(box1) + assert not h.is_nullity_known(box2) + h.nullity_now_known(box1) + assert h.is_nullity_known(box1) + assert not h.is_nullity_known(box2) h.reset() - assert not h.is_nullity_known(1) - assert not h.is_nullity_known(2) + assert not h.is_nullity_known(box1) + assert not h.is_nullity_known(box2) def test_nonstandard_virtualizable(self): From pypy.commits at gmail.com Thu Mar 17 06:44:34 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 17 Mar 2016 03:44:34 -0700 (PDT) Subject: [pypy-commit] pypy heapcache-refactor: Test and fix for the interaction between the two families of flags Message-ID: <56ea8a92.82561c0a.7eb19.ffffcfdc@mx.google.com> Author: Armin Rigo Branch: heapcache-refactor Changeset: r83101:f99d4404477b Date: 2016-03-17 11:43 +0100 http://bitbucket.org/pypy/pypy/changeset/f99d4404477b/ Log: Test and fix for the interaction between the two families of flags diff --git a/rpython/jit/metainterp/heapcache.py b/rpython/jit/metainterp/heapcache.py --- a/rpython/jit/metainterp/heapcache.py +++ b/rpython/jit/metainterp/heapcache.py @@ -152,7 +152,7 @@ @always_inline def test_likely_virtual_version(self, ref_frontend_op): - return (r_uint(ref_frontend_op._heapc_version) == + return (r_uint(ref_frontend_op._heapc_version) >= self.likely_virtual_version) def update_version(self, ref_frontend_op): diff --git a/rpython/jit/metainterp/test/test_heapcache.py b/rpython/jit/metainterp/test/test_heapcache.py --- a/rpython/jit/metainterp/test/test_heapcache.py +++ b/rpython/jit/metainterp/test/test_heapcache.py @@ -639,3 +639,29 @@ h._escape_box(box1) assert not h.is_unescaped(box1) assert not h.is_likely_virtual(box1) + + def test_is_likely_virtual_2(self): + h = HeapCache() + box1 = RefFrontendOp(1) + h.new(box1) + assert h.is_unescaped(box1) + assert h.is_likely_virtual(box1) + h.reset_keep_likely_virtuals() + assert not h.is_unescaped(box1) + assert h.is_likely_virtual(box1) + h.reset() # reset everything + assert not h.is_unescaped(box1) + assert not h.is_likely_virtual(box1) + + def test_is_likely_virtual_3(self): + h = HeapCache() + box1 = RefFrontendOp(1) + h.new(box1) + assert h.is_unescaped(box1) + assert h.is_likely_virtual(box1) + h.reset_keep_likely_virtuals() + assert not h.is_unescaped(box1) + assert h.is_likely_virtual(box1) + h.class_now_known(box1) # interaction of the two families of flags + assert not h.is_unescaped(box1) + assert h.is_likely_virtual(box1) From pypy.commits at gmail.com Thu Mar 17 07:07:50 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 17 Mar 2016 04:07:50 -0700 (PDT) Subject: [pypy-commit] pypy heapcache-refactor: in-progress Message-ID: <56ea9006.02931c0a.ef1a0.ffffd5e0@mx.google.com> Author: Armin Rigo Branch: heapcache-refactor Changeset: r83102:13c3037d3ce3 Date: 2016-03-17 12:07 +0100 http://bitbucket.org/pypy/pypy/changeset/13c3037d3ce3/ Log: in-progress diff --git a/rpython/jit/metainterp/heapcache.py b/rpython/jit/metainterp/heapcache.py --- a/rpython/jit/metainterp/heapcache.py +++ b/rpython/jit/metainterp/heapcache.py @@ -5,9 +5,11 @@ # RefFrontendOp._heapc_flags: -HF_LIKELY_VIRTUAL = 0x01 -HF_KNOWN_CLASS = 0x02 -HF_KNOWN_NULLITY = 0x04 +HF_LIKELY_VIRTUAL = 0x01 +HF_KNOWN_CLASS = 0x02 +HF_KNOWN_NULLITY = 0x04 +HF_SEEN_ALLOCATION = 0x08 # did we see the allocation during tracing? +HF_NONSTD_VABLE = 0x10 @always_inline def add_flags(ref_frontend_op, flags): @@ -33,10 +35,7 @@ self.reset_keep_likely_virtual() def reset_keep_likely_virtual(self): - # did we see the allocation during tracing? - self.seen_allocation = False self.is_unescaped = False - self.nonstandard_virtualizable = False self.length = None self.dependencies = None @@ -45,12 +44,13 @@ class CacheEntry(object): - def __init__(self): + def __init__(self, heapcache): # both are {from_value: to_value} dicts # the first is for boxes where we did not see the allocation, the # second for anything else. the reason that distinction makes sense is # because if we saw the allocation, we know it cannot alias with # anything else where we saw the allocation. + self.heapcache = heapcache self.cache_anything = {} self.cache_seen_allocation = {} @@ -59,21 +59,26 @@ self.cache_seen_allocation.clear() self.cache_anything.clear() - def _getdict(self, value): - if value.seen_allocation: + def _seen_alloc(self, ref_box): + assert isinstance(ref_box, RefFrontendOp) + return self.heapcache._check_flag(ref_box, HF_SEEN_ALLOCATION) + + def _getdict(self, seen_alloc): + if seen_alloc: return self.cache_seen_allocation else: return self.cache_anything - def do_write_with_aliasing(self, value, fieldvalue): - self._clear_cache_on_write(value.seen_allocation) - self._getdict(value)[value] = fieldvalue + def do_write_with_aliasing(self, ref_box, fieldbox): + seen_alloc = self._seen_alloc(ref_box) + self._clear_cache_on_write(seen_alloc) + self._getdict(seen_alloc)[ref_box] = fieldbox - def read(self, value): - return self._getdict(value).get(value, None) + def read(self, ref_box): + return self._getdict(self._seen_alloc(ref_box)).get(ref_box, None) - def read_now_known(self, value, fieldvalue): - self._getdict(value)[value] = fieldvalue + def read_now_known(self, ref_box, fieldbox): + self._getdict(self._seen_alloc(ref_box))[ref_box] = fieldbox def invalidate_unescaped(self): self._invalidate_unescaped(self.cache_anything) @@ -86,22 +91,16 @@ class FieldUpdater(object): - def __init__(self, heapcache, value, cache, fieldvalue): - self.heapcache = heapcache - self.value = value + def __init__(self, ref_box, cache, fieldbox): + self.ref_box = ref_box self.cache = cache - if fieldvalue is not None: - self.currfieldbox = fieldvalue.box - else: - self.currfieldbox = None + self.currfieldbox = fieldbox # <= read directly from pyjitpl.py def getfield_now_known(self, fieldbox): - fieldvalue = self.heapcache.getvalue(fieldbox) - self.cache.read_now_known(self.value, fieldvalue) + self.cache.read_now_known(self.ref_box, fieldbox) def setfield(self, fieldbox): - fieldvalue = self.heapcache.getvalue(fieldbox) - self.cache.do_write_with_aliasing(self.value, fieldvalue) + self.cache.do_write_with_aliasing(self.ref_box, fieldbox) class HeapCache(object): @@ -329,34 +328,33 @@ return self.reset_keep_likely_virtuals() - def is_class_known(self, box): + def _check_flag(self, box, flag): return (isinstance(box, RefFrontendOp) and self.test_head_version(box) and - test_flags(box, HF_KNOWN_CLASS)) + test_flags(box, flag)) + + def _set_flag(self, box, flag): + assert isinstance(box, RefFrontendOp) + self.update_version(box) + add_flags(box, flag) + + def is_class_known(self, box): + return self._check_flag(box, HF_KNOWN_CLASS) def class_now_known(self, box): - assert isinstance(box, RefFrontendOp) - self.update_version(box) - add_flags(box, HF_KNOWN_CLASS) + self._set_flag(box, HF_KNOWN_CLASS) def is_nullity_known(self, box): - return (isinstance(box, RefFrontendOp) and - self.test_head_version(box) and - test_flags(box, HF_KNOWN_NULLITY)) + return self._check_flag(box, HF_KNOWN_NULLITY) def nullity_now_known(self, box): - assert isinstance(box, RefFrontendOp) - self.update_version(box) - add_flags(box, HF_KNOWN_NULLITY) + self._set_flag(box, HF_KNOWN_NULLITY) def is_nonstandard_virtualizable(self, box): - value = self.getvalue(box, create=False) - if value: - return value.nonstandard_virtualizable - return False + return self._check_flag(box, HF_NONSTD_VABLE) def nonstandard_virtualizables_now_known(self, box): - self.getvalue(box).nonstandard_virtualizable = True + self._set_flag(box, HF_NONSTD_VABLE) def is_unescaped(self, box): value = self.getvalue(box, create=False) @@ -372,34 +370,29 @@ def new(self, box): assert isinstance(box, RefFrontendOp) self.update_version(box) - add_flags(box, HF_LIKELY_VIRTUAL) + add_flags(box, HF_LIKELY_VIRTUAL | HF_SEEN_ALLOCATION) value = self.getvalue(box) value.is_unescaped = True - value.seen_allocation = True def new_array(self, box, lengthbox): self.new(box) self.arraylen_now_known(box, lengthbox) def getfield(self, box, descr): - value = self.getvalue(box, create=False) - if value: - cache = self.heap_cache.get(descr, None) - if cache: - tovalue = cache.read(value) - if tovalue: - return tovalue.box + cache = self.heap_cache.get(descr, None) + if cache: + return cache.read(box) return None def get_field_updater(self, box, descr): - value = self.getvalue(box) + assert isinstance(box, RefFrontendOp) cache = self.heap_cache.get(descr, None) if cache is None: - cache = self.heap_cache[descr] = CacheEntry() - fieldvalue = None + cache = self.heap_cache[descr] = CacheEntry(self) + fieldbox = None else: - fieldvalue = cache.read(value) - return FieldUpdater(self, value, cache, fieldvalue) + fieldbox = cache.read(box) + return FieldUpdater(box, cache, fieldbox) def getfield_now_known(self, box, descr, fieldbox): upd = self.get_field_updater(box, descr) @@ -432,7 +425,7 @@ cache = self.heap_array_cache.setdefault(descr, {}) indexcache = cache.get(index, None) if indexcache is None: - cache[index] = indexcache = CacheEntry() + cache[index] = indexcache = CacheEntry(self) return indexcache diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -651,6 +651,9 @@ def get_position(self): return self.position + def __repr__(self): + return '%s(%s)' % (self.__class__.__name__, self.position) + class IntFrontendOp(IntOp, FrontendOp): _attrs_ = ('position', '_resint') diff --git a/rpython/jit/metainterp/test/test_heapcache.py b/rpython/jit/metainterp/test/test_heapcache.py --- a/rpython/jit/metainterp/test/test_heapcache.py +++ b/rpython/jit/metainterp/test/test_heapcache.py @@ -3,11 +3,6 @@ from rpython.jit.metainterp.history import ConstInt, BasicFailDescr from rpython.jit.metainterp.history import RefFrontendOp -box1 = "box1" -box2 = "box2" -box3 = "box3" -box4 = "box4" -box5 = "box5" lengthbox1 = object() lengthbox2 = object() lengthbox3 = object() @@ -89,18 +84,23 @@ def test_nonstandard_virtualizable(self): h = HeapCache() - assert not h.is_nonstandard_virtualizable(1) - assert not h.is_nonstandard_virtualizable(2) - h.nonstandard_virtualizables_now_known(1) - assert h.is_nonstandard_virtualizable(1) - assert not h.is_nonstandard_virtualizable(2) + box1 = RefFrontendOp(1) + box2 = RefFrontendOp(2) + assert not h.is_nonstandard_virtualizable(box1) + assert not h.is_nonstandard_virtualizable(box2) + h.nonstandard_virtualizables_now_known(box1) + assert h.is_nonstandard_virtualizable(box1) + assert not h.is_nonstandard_virtualizable(box2) h.reset() - assert not h.is_nonstandard_virtualizable(1) - assert not h.is_nonstandard_virtualizable(2) + assert not h.is_nonstandard_virtualizable(box1) + assert not h.is_nonstandard_virtualizable(box2) def test_heapcache_fields(self): h = HeapCache() + box1 = RefFrontendOp(1) + box2 = RefFrontendOp(2) + box3 = RefFrontendOp(3) assert h.getfield(box1, descr1) is None assert h.getfield(box1, descr2) is None h.setfield(box1, box2, descr1) From pypy.commits at gmail.com Thu Mar 17 07:16:34 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 17 Mar 2016 04:16:34 -0700 (PDT) Subject: [pypy-commit] pypy heapcache-refactor: progress on arrays Message-ID: <56ea9212.c85b1c0a.5461b.ffffd67e@mx.google.com> Author: Armin Rigo Branch: heapcache-refactor Changeset: r83103:be27b70f8af9 Date: 2016-03-17 12:15 +0100 http://bitbucket.org/pypy/pypy/changeset/be27b70f8af9/ Log: progress on arrays diff --git a/rpython/jit/metainterp/heapcache.py b/rpython/jit/metainterp/heapcache.py --- a/rpython/jit/metainterp/heapcache.py +++ b/rpython/jit/metainterp/heapcache.py @@ -9,7 +9,8 @@ HF_KNOWN_CLASS = 0x02 HF_KNOWN_NULLITY = 0x04 HF_SEEN_ALLOCATION = 0x08 # did we see the allocation during tracing? -HF_NONSTD_VABLE = 0x10 +HF_IS_UNESCAPED = 0x10 +HF_NONSTD_VABLE = 0x20 @always_inline def add_flags(ref_frontend_op, flags): @@ -35,7 +36,6 @@ self.reset_keep_likely_virtual() def reset_keep_likely_virtual(self): - self.is_unescaped = False self.length = None self.dependencies = None @@ -221,12 +221,11 @@ def _escape_box(self, box): if isinstance(box, RefFrontendOp): - remove_flags(box, HF_LIKELY_VIRTUAL) + remove_flags(box, HF_LIKELY_VIRTUAL | HF_IS_UNESCAPED) # value = self.getvalue(box, create=False) if not value: return - value.is_unescaped = False deps = value.dependencies value.dependencies = None if deps is not None: @@ -357,12 +356,10 @@ self._set_flag(box, HF_NONSTD_VABLE) def is_unescaped(self, box): - value = self.getvalue(box, create=False) - if value: - return value.is_unescaped - return False + return self._check_flag(box, HF_IS_UNESCAPED) def is_likely_virtual(self, box): + # note: this is different from _check_flag() return (isinstance(box, RefFrontendOp) and self.test_likely_virtual_version(box) and test_flags(box, HF_LIKELY_VIRTUAL)) @@ -370,9 +367,7 @@ def new(self, box): assert isinstance(box, RefFrontendOp) self.update_version(box) - add_flags(box, HF_LIKELY_VIRTUAL | HF_SEEN_ALLOCATION) - value = self.getvalue(box) - value.is_unescaped = True + add_flags(box, HF_LIKELY_VIRTUAL | HF_SEEN_ALLOCATION | HF_IS_UNESCAPED) def new_array(self, box, lengthbox): self.new(box) @@ -405,17 +400,12 @@ def getarrayitem(self, box, indexbox, descr): if not isinstance(indexbox, ConstInt): return None - value = self.getvalue(box, create=False) - if value is None: - return None index = indexbox.getint() cache = self.heap_array_cache.get(descr, None) if cache: indexcache = cache.get(index, None) if indexcache is not None: - resvalue = indexcache.read(value) - if resvalue: - return resvalue.box + return indexcache.read(box) return None def _get_or_make_array_cache_entry(self, indexbox, descr): @@ -431,10 +421,9 @@ def getarrayitem_now_known(self, box, indexbox, fieldbox, descr): value = self.getvalue(box) - fieldvalue = self.getvalue(fieldbox) indexcache = self._get_or_make_array_cache_entry(indexbox, descr) if indexcache: - indexcache.read_now_known(value, fieldvalue) + indexcache.read_now_known(box, fieldbox) def setarrayitem(self, box, indexbox, fieldbox, descr): if not isinstance(indexbox, ConstInt): @@ -442,11 +431,9 @@ if cache is not None: cache.clear() return - value = self.getvalue(box) - fieldvalue = self.getvalue(fieldbox) indexcache = self._get_or_make_array_cache_entry(indexbox, descr) if indexcache: - indexcache.do_write_with_aliasing(value, fieldvalue) + indexcache.do_write_with_aliasing(box, fieldbox) def arraylen(self, box): value = self.getvalue(box, create=False) diff --git a/rpython/jit/metainterp/test/test_heapcache.py b/rpython/jit/metainterp/test/test_heapcache.py --- a/rpython/jit/metainterp/test/test_heapcache.py +++ b/rpython/jit/metainterp/test/test_heapcache.py @@ -124,6 +124,10 @@ def test_heapcache_read_fields_multiple(self): h = HeapCache() + box1 = RefFrontendOp(1) + box2 = RefFrontendOp(2) + box3 = RefFrontendOp(3) + box4 = RefFrontendOp(4) h.getfield_now_known(box1, descr1, box2) h.getfield_now_known(box3, descr1, box4) assert h.getfield(box1, descr1) is box2 @@ -139,6 +143,10 @@ def test_heapcache_write_fields_multiple(self): h = HeapCache() + box1 = RefFrontendOp(1) + box2 = RefFrontendOp(2) + box3 = RefFrontendOp(3) + box4 = RefFrontendOp(4) h.setfield(box1, box2, descr1) assert h.getfield(box1, descr1) is box2 h.setfield(box3, box4, descr1) @@ -146,6 +154,10 @@ assert h.getfield(box1, descr1) is None # box1 and box3 can alias h = HeapCache() + box1 = RefFrontendOp(1) + box2 = RefFrontendOp(2) + box3 = RefFrontendOp(3) + box4 = RefFrontendOp(4) h.new(box1) h.setfield(box1, box2, descr1) assert h.getfield(box1, descr1) is box2 @@ -154,6 +166,10 @@ assert h.getfield(box1, descr1) is None # box1 and box3 can alias h = HeapCache() + box1 = RefFrontendOp(1) + box2 = RefFrontendOp(2) + box3 = RefFrontendOp(3) + box4 = RefFrontendOp(4) h.new(box1) h.new(box3) h.setfield(box1, box2, descr1) @@ -167,6 +183,10 @@ def test_heapcache_arrays(self): h = HeapCache() + box1 = RefFrontendOp(1) + box2 = RefFrontendOp(2) + box3 = RefFrontendOp(3) + box4 = RefFrontendOp(4) assert h.getarrayitem(box1, index1, descr1) is None assert h.getarrayitem(box1, index1, descr2) is None assert h.getarrayitem(box1, index2, descr1) is None @@ -209,6 +229,10 @@ def test_heapcache_array_nonconst_index(self): h = HeapCache() + box1 = RefFrontendOp(1) + box2 = RefFrontendOp(2) + box3 = RefFrontendOp(3) + box4 = RefFrontendOp(4) h.setarrayitem(box1, index1, box2, descr1) h.setarrayitem(box1, index2, box4, descr1) assert h.getarrayitem(box1, index1, descr1) is box2 @@ -219,6 +243,10 @@ def test_heapcache_read_fields_multiple_array(self): h = HeapCache() + box1 = RefFrontendOp(1) + box2 = RefFrontendOp(2) + box3 = RefFrontendOp(3) + box4 = RefFrontendOp(4) h.getarrayitem_now_known(box1, index1, box2, descr1) h.getarrayitem_now_known(box3, index1, box4, descr1) assert h.getarrayitem(box1, index1, descr1) is box2 @@ -234,6 +262,10 @@ def test_heapcache_write_fields_multiple_array(self): h = HeapCache() + box1 = RefFrontendOp(1) + box2 = RefFrontendOp(2) + box3 = RefFrontendOp(3) + box4 = RefFrontendOp(4) h.setarrayitem(box1, index1, box2, descr1) assert h.getarrayitem(box1, index1, descr1) is box2 h.setarrayitem(box3, index1, box4, descr1) @@ -241,6 +273,10 @@ assert h.getarrayitem(box1, index1, descr1) is None # box1 and box3 can alias h = HeapCache() + box1 = RefFrontendOp(1) + box2 = RefFrontendOp(2) + box3 = RefFrontendOp(3) + box4 = RefFrontendOp(4) h.new(box1) h.setarrayitem(box1, index1, box2, descr1) assert h.getarrayitem(box1, index1, descr1) is box2 @@ -249,6 +285,10 @@ assert h.getarrayitem(box1, index1, descr1) is None # box1 and box3 can alias h = HeapCache() + box1 = RefFrontendOp(1) + box2 = RefFrontendOp(2) + box3 = RefFrontendOp(3) + box4 = RefFrontendOp(4) h.new(box1) h.new(box3) h.setarrayitem(box1, index1, box2, descr1) @@ -262,6 +302,8 @@ def test_length_cache(self): h = HeapCache() + box1 = RefFrontendOp(1) + box2 = RefFrontendOp(2) h.new_array(box1, lengthbox1) assert h.arraylen(box1) is lengthbox1 From pypy.commits at gmail.com Thu Mar 17 07:26:29 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 17 Mar 2016 04:26:29 -0700 (PDT) Subject: [pypy-commit] pypy heapcache-refactor: length cache Message-ID: <56ea9465.906b1c0a.37ca4.540c@mx.google.com> Author: Armin Rigo Branch: heapcache-refactor Changeset: r83104:6e9ac1ae89af Date: 2016-03-17 12:25 +0100 http://bitbucket.org/pypy/pypy/changeset/6e9ac1ae89af/ Log: length cache diff --git a/rpython/jit/metainterp/heapcache.py b/rpython/jit/metainterp/heapcache.py --- a/rpython/jit/metainterp/heapcache.py +++ b/rpython/jit/metainterp/heapcache.py @@ -30,19 +30,6 @@ return bool(f & r_uint(flags)) -class HeapCacheValue(object): - def __init__(self, box): - self.box = box - self.reset_keep_likely_virtual() - - def reset_keep_likely_virtual(self): - self.length = None - self.dependencies = None - - def __repr__(self): - return 'HeapCacheValue(%s)' % (self.box, ) - - class CacheEntry(object): def __init__(self, heapcache): # both are {from_value: to_value} dicts @@ -162,6 +149,7 @@ f |= HF_LIKELY_VIRTUAL ref_frontend_op._heapc_flags = r_uint32(f) ref_frontend_op._heapc_version = r_uint32(self.head_version) + ref_frontend_op._heapc_deps = None def getvalue(self, box, create=True): value = self.values.get(box, None) @@ -327,6 +315,14 @@ return self.reset_keep_likely_virtuals() + def _get_deps(self, box): + if not isinstance(box, RefFrontendOp): + return None + self.update_version(box) + if box._heapc_deps is None: + box._heapc_deps = [None] + return box._heapc_deps + def _check_flag(self, box, flag): return (isinstance(box, RefFrontendOp) and self.test_head_version(box) and @@ -420,7 +416,6 @@ def getarrayitem_now_known(self, box, indexbox, fieldbox, descr): - value = self.getvalue(box) indexcache = self._get_or_make_array_cache_entry(indexbox, descr) if indexcache: indexcache.read_now_known(box, fieldbox) @@ -436,14 +431,19 @@ indexcache.do_write_with_aliasing(box, fieldbox) def arraylen(self, box): - value = self.getvalue(box, create=False) - if value and value.length: - return value.length.box + if (isinstance(box, RefFrontendOp) and + self.test_head_version(box) and + box._heapc_deps is not None): + return box._heapc_deps[0] return None def arraylen_now_known(self, box, lengthbox): - value = self.getvalue(box) - value.length = self.getvalue(lengthbox) + # we store in '_heapc_deps' a list of boxes: the *first* box is + # the known length or None, and the remaining boxes are the + # regular dependencies. + deps = self._get_deps(box) + assert deps is not None + deps[0] = lengthbox def replace_box(self, oldbox, newbox): value = self.getvalue(oldbox, create=False) diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -667,10 +667,12 @@ self._resfloat = other.getfloatstorage() class RefFrontendOp(RefOp, FrontendOp): - _attrs_ = ('position', '_resref', '_heapc_flags', '_heapc_version') + _attrs_ = ('position', '_resref', + '_heapc_flags', '_heapc_version', '_heapc_deps') _heapc_flags = r_uint32(0) _heapc_version = r_uint32(0) + _heapc_deps = None def copy_value_from(self, other): self._resref = other.getref_base() From pypy.commits at gmail.com Thu Mar 17 07:32:11 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 17 Mar 2016 04:32:11 -0700 (PDT) Subject: [pypy-commit] pypy heapcache-refactor: next test Message-ID: <56ea95bb.82561c0a.7eb19.ffffe302@mx.google.com> Author: Armin Rigo Branch: heapcache-refactor Changeset: r83105:da988ce66739 Date: 2016-03-17 12:29 +0100 http://bitbucket.org/pypy/pypy/changeset/da988ce66739/ Log: next test diff --git a/rpython/jit/metainterp/heapcache.py b/rpython/jit/metainterp/heapcache.py --- a/rpython/jit/metainterp/heapcache.py +++ b/rpython/jit/metainterp/heapcache.py @@ -32,7 +32,7 @@ class CacheEntry(object): def __init__(self, heapcache): - # both are {from_value: to_value} dicts + # both are {from_ref_box: to_field_box} dicts # the first is for boxes where we did not see the allocation, the # second for anything else. the reason that distinction makes sense is # because if we saw the allocation, we know it cannot alias with @@ -72,9 +72,9 @@ self._invalidate_unescaped(self.cache_seen_allocation) def _invalidate_unescaped(self, d): - for value in d.keys(): - if not value.is_unescaped: - del d[value] + for ref_box in d.keys(): + if not self.heapcache._check_flag(ref_box, HF_IS_UNESCAPED): + del d[ref_box] class FieldUpdater(object): diff --git a/rpython/jit/metainterp/test/test_heapcache.py b/rpython/jit/metainterp/test/test_heapcache.py --- a/rpython/jit/metainterp/test/test_heapcache.py +++ b/rpython/jit/metainterp/test/test_heapcache.py @@ -314,6 +314,9 @@ def test_invalidate_cache(self): h = HeapCache() + box1 = RefFrontendOp(1) + box2 = RefFrontendOp(2) + box4 = RefFrontendOp(4) h.setfield(box1, box2, descr1) h.setarrayitem(box1, index1, box2, descr1) h.setarrayitem(box1, index2, box4, descr1) From pypy.commits at gmail.com Thu Mar 17 07:42:37 2016 From: pypy.commits at gmail.com (plan_rich) Date: Thu, 17 Mar 2016 04:42:37 -0700 (PDT) Subject: [pypy-commit] pypy new-jit-log: merged default Message-ID: <56ea982d.e853c20a.24b0f.ffffb305@mx.google.com> Author: Richard Plangger Branch: new-jit-log Changeset: r83106:ffd85c0f09ca Date: 2016-03-17 10:48 +0100 http://bitbucket.org/pypy/pypy/changeset/ffd85c0f09ca/ Log: merged default diff too long, truncating to 2000 out of 17105 lines diff --git a/.hgignore b/.hgignore --- a/.hgignore +++ b/.hgignore @@ -74,5 +74,6 @@ ^rpython/doc/_build/.*$ ^compiled ^.git/ +^.hypothesis/ ^release/ ^rpython/_cache$ diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -18,3 +18,4 @@ f3ad1e1e1d6215e20d34bb65ab85ff9188c9f559 release-2.6.1 850edf14b2c75573720f59e95767335fb1affe55 release-4.0.0 5f8302b8bf9f53056e40426f10c72151564e5b19 release-4.0.1 +246c9cf22037b11dc0e8c29ce3f291d3b8c5935a release-5.0 diff --git a/LICENSE b/LICENSE --- a/LICENSE +++ b/LICENSE @@ -240,6 +240,7 @@ Kristjan Valur Jonsson David Lievens Neil Blakey-Milner + Sergey Matyunin Lutz Paelike Lucio Torre Lars Wassermann @@ -271,6 +272,7 @@ Aaron Tubbs Ben Darnell Roberto De Ioris + Logan Chien Juan Francisco Cantero Hurtado Ruochen Huang Jeong YunWon diff --git a/lib-python/2.7/xml/etree/ElementTree.py b/lib-python/2.7/xml/etree/ElementTree.py --- a/lib-python/2.7/xml/etree/ElementTree.py +++ b/lib-python/2.7/xml/etree/ElementTree.py @@ -1606,7 +1606,17 @@ pubid = pubid[1:-1] if hasattr(self.target, "doctype"): self.target.doctype(name, pubid, system[1:-1]) - elif self.doctype is not self._XMLParser__doctype: + elif 1: # XXX PyPy fix, used to be + # elif self.doctype is not self._XMLParser__doctype: + # but that condition is always True on CPython, as far + # as I can tell: self._XMLParser__doctype always + # returns a fresh unbound method object. + # On PyPy, unbound and bound methods have stronger + # unicity guarantees: self._XMLParser__doctype + # can return the same unbound method object, in + # some cases making the test above incorrectly False. + # (My guess would be that the line above is a backport + # from Python 3.) # warn about deprecated call self._XMLParser__doctype(name, pubid, system[1:-1]) self.doctype(name, pubid, system[1:-1]) diff --git a/lib_pypy/ctypes_config_cache/rebuild.py b/lib_pypy/ctypes_config_cache/rebuild.py --- a/lib_pypy/ctypes_config_cache/rebuild.py +++ b/lib_pypy/ctypes_config_cache/rebuild.py @@ -9,9 +9,8 @@ _dirpath = os.path.dirname(__file__) or os.curdir -from rpython.tool.ansi_print import ansi_log -log = py.log.Producer("ctypes_config_cache") -py.log.setconsumer("ctypes_config_cache", ansi_log) +from rpython.tool.ansi_print import AnsiLogger +log = AnsiLogger("ctypes_config_cache") def rebuild_one(name): diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py --- a/pypy/doc/conf.py +++ b/pypy/doc/conf.py @@ -123,7 +123,7 @@ # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". -#html_title = None +html_title = 'PyPy documentation' # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None diff --git a/pypy/doc/contributor.rst b/pypy/doc/contributor.rst --- a/pypy/doc/contributor.rst +++ b/pypy/doc/contributor.rst @@ -11,29 +11,29 @@ Amaury Forgeot d'Arc Antonio Cuni Samuele Pedroni + Matti Picus Alex Gaynor Brian Kearns - Matti Picus Philip Jenvey Michael Hudson + Ronan Lamy David Schneider + Manuel Jacob Holger Krekel Christian Tismer Hakan Ardo - Manuel Jacob - Ronan Lamy Benjamin Peterson + Richard Plangger Anders Chrigstrom Eric van Riet Paap Wim Lavrijsen - Richard Plangger Richard Emslie Alexander Schremmer Dan Villiom Podlaski Christiansen + Remi Meier Lukas Diekmann Sven Hager Anders Lehmann - Remi Meier Aurelien Campeas Niklaus Haldimann Camillo Bruni @@ -42,8 +42,8 @@ Romain Guillebert Leonardo Santagada Seo Sanghyeon + Ronny Pfannschmidt Justin Peel - Ronny Pfannschmidt David Edelsohn Anders Hammarquist Jakub Gustak @@ -65,6 +65,7 @@ Tyler Wade Michael Foord Stephan Diehl + Vincent Legoll Stefan Schwarzer Valentino Volonghi Tomek Meka @@ -75,9 +76,9 @@ Jean-Paul Calderone Timo Paulssen Squeaky + Marius Gedminas Alexandre Fayolle Simon Burton - Marius Gedminas Martin Matusiak Konstantin Lopuhin Wenzhu Man @@ -86,16 +87,20 @@ Ivan Sichmann Freitas Greg Price Dario Bertini + Stefano Rivera Mark Pearse Simon Cross Andreas Stührk - Stefano Rivera + Edd Barrett Jean-Philippe St. Pierre Guido van Rossum Pavel Vinogradov + Jeremy Thurgood Paweł Piotr Przeradowski + Spenser Bauman Paul deGrandis Ilya Osadchiy + marky1991 Tobias Oberstein Adrian Kuhn Boris Feigin @@ -104,14 +109,12 @@ Georg Brandl Bert Freudenberg Stian Andreassen - Edd Barrett + Tobias Pape Wanja Saatkamp Gerald Klix Mike Blume - Tobias Pape Oscar Nierstrasz Stefan H. Muller - Jeremy Thurgood Rami Chowdhury Eugene Oden Henry Mason @@ -123,6 +126,8 @@ Lukas Renggli Guenter Jantzen Ned Batchelder + Tim Felgentreff + Anton Gulenko Amit Regmi Ben Young Nicolas Chauvat @@ -132,12 +137,12 @@ Nicholas Riley Jason Chu Igor Trindade Oliveira - Tim Felgentreff + Yichao Yu Rocco Moretti Gintautas Miliauskas Michael Twomey Lucian Branescu Mihaila - Yichao Yu + Devin Jeanpierre Gabriel Lavoie Olivier Dormond Jared Grubb @@ -161,33 +166,33 @@ Stanislaw Halik Mikael Schönenberg Berkin Ilbeyi - Elmo M?ntynen + Elmo Mäntynen + Faye Zhao Jonathan David Riehl Anders Qvist Corbin Simpson Chirag Jadwani Beatrice During Alex Perry - Vincent Legoll + Vaibhav Sood Alan McIntyre - Spenser Bauman + William Leslie Alexander Sedov Attila Gobi + Jasper.Schulz Christopher Pope - Devin Jeanpierre - Vaibhav Sood Christian Tismer Marc Abramowitz Dan Stromberg Arjun Naik Valentina Mukhamedzhanova Stefano Parmesan + Mark Young Alexis Daboville Jens-Uwe Mager Carl Meyer Karl Ramm Pieter Zieschang - Anton Gulenko Gabriel Lukas Vacek Andrew Dalke @@ -195,6 +200,7 @@ Jakub Stasiak Nathan Taylor Vladimir Kryachko + Omer Katz Jacek Generowicz Alejandro J. Cura Jacob Oscarson @@ -204,11 +210,13 @@ Kristjan Valur Jonsson David Lievens Neil Blakey-Milner + Sergey Matyunin Lutz Paelike Lucio Torre Lars Wassermann Philipp Rustemeuer Henrik Vendelbo + Richard Lancaster Dan Buch Miguel de Val Borro Artur Lisiecki @@ -220,20 +228,21 @@ Tomo Cocoa Kim Jin Su Toni Mattis + Amber Brown Lucas Stadler Julian Berman Markus Holtermann roberto at goyle Yury V. Zaytsev Anna Katrina Dominguez - William Leslie Bobby Impollonia - Faye Zhao timo at eistee.fritz.box Andrew Thompson Yusei Tahara + Aaron Tubbs Ben Darnell Roberto De Ioris + Logan Chien Juan Francisco Cantero Hurtado Ruochen Huang Jeong YunWon @@ -243,6 +252,7 @@ Christopher Armstrong Michael Hudson-Doyle Anders Sigfridsson + Nikolay Zinov Yasir Suhail Jason Michalski rafalgalczynski at gmail.com @@ -252,6 +262,7 @@ Gustavo Niemeyer Stephan Busemann Rafał Gałczyński + Matt Bogosian Christian Muirhead Berker Peksag James Lan @@ -286,9 +297,9 @@ Stefan Marr jiaaro Mads Kiilerich - Richard Lancaster opassembler.py Antony Lee + Jason Madden Yaroslav Fedevych Jim Hunziker Markus Unterwaditzer @@ -297,6 +308,7 @@ squeaky Zearin soareschen + Jonas Pfannschmidt Kurt Griffiths Mike Bayer Matthew Miller @@ -311,4 +323,3 @@ Julien Phalip Roman Podoliaka Dan Loewenherz - diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst --- a/pypy/doc/cpython_differences.rst +++ b/pypy/doc/cpython_differences.rst @@ -265,7 +265,7 @@ return False def evil(y): - d = {x(): 1} + d = {X(): 1} X.__eq__ = __evil_eq__ d[y] # might trigger a call to __eq__? diff --git a/pypy/doc/extradoc.rst b/pypy/doc/extradoc.rst --- a/pypy/doc/extradoc.rst +++ b/pypy/doc/extradoc.rst @@ -80,7 +80,7 @@ .. _How to *not* write Virtual Machines for Dynamic Languages: https://bitbucket.org/pypy/extradoc/raw/tip/talk/dyla2007/dyla.pdf .. _`Tracing the Meta-Level: PyPy's Tracing JIT Compiler`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/icooolps2009/bolz-tracing-jit.pdf .. _`Faster than C#: Efficient Implementation of Dynamic Languages on .NET`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/icooolps2009-dotnet/cli-jit.pdf -.. _Automatic JIT Compiler Generation with Runtime Partial Evaluation: http://wwwold.cobra.cs.uni-duesseldorf.de/thesis/final-master.pdf +.. _Automatic JIT Compiler Generation with Runtime Partial Evaluation: http://stups.hhu.de/mediawiki/images/b/b9/Master_bolz.pdf .. _`RPython: A Step towards Reconciling Dynamically and Statically Typed OO Languages`: http://www.disi.unige.it/person/AnconaD/papers/DynamicLanguages_abstracts.html#AACM-DLS07 .. _EU Reports: index-report.html .. _Hardware Transactional Memory Support for Lightweight Dynamic Language Evolution: http://sabi.net/nriley/pubs/dls6-riley.pdf diff --git a/pypy/doc/how-to-release.rst b/pypy/doc/how-to-release.rst --- a/pypy/doc/how-to-release.rst +++ b/pypy/doc/how-to-release.rst @@ -76,5 +76,4 @@ * add a tag on the pypy/jitviewer repo that corresponds to pypy release * add a tag on the codespeed web site that corresponds to pypy release -* update the version number in {rpython,pypy}/doc/conf.py. * revise versioning at https://readthedocs.org/projects/pypy diff --git a/pypy/doc/index-of-release-notes.rst b/pypy/doc/index-of-release-notes.rst --- a/pypy/doc/index-of-release-notes.rst +++ b/pypy/doc/index-of-release-notes.rst @@ -6,6 +6,7 @@ .. toctree:: + release-5.0.0.rst release-4.0.1.rst release-4.0.0.rst release-2.6.1.rst diff --git a/pypy/doc/index-of-whatsnew.rst b/pypy/doc/index-of-whatsnew.rst --- a/pypy/doc/index-of-whatsnew.rst +++ b/pypy/doc/index-of-whatsnew.rst @@ -7,6 +7,7 @@ .. toctree:: whatsnew-head.rst + whatsnew-5.0.0.rst whatsnew-4.0.1.rst whatsnew-4.0.0.rst whatsnew-2.6.1.rst diff --git a/pypy/doc/project-ideas.rst b/pypy/doc/project-ideas.rst --- a/pypy/doc/project-ideas.rst +++ b/pypy/doc/project-ideas.rst @@ -167,22 +167,13 @@ * `hg` -Embedding PyPy and improving CFFI ---------------------------------- - -PyPy has some basic :doc:`embedding infrastructure `. The idea would be to improve -upon that with cffi hacks that can automatically generate embeddable .so/.dll -library - - Optimising cpyext (CPython C-API compatibility layer) ----------------------------------------------------- A lot of work has gone into PyPy's implementation of CPython's C-API over the last years to let it reach a practical level of compatibility, so that C extensions for CPython work on PyPy without major rewrites. However, -there are still many edges and corner cases where it misbehaves, and it has -not received any substantial optimisation so far. +there are still many edges and corner cases where it misbehaves. The objective of this project is to fix bugs in cpyext and to optimise several performance critical parts of it, such as the reference counting diff --git a/pypy/doc/release-5.0.0.rst b/pypy/doc/release-5.0.0.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-5.0.0.rst @@ -0,0 +1,230 @@ +======== +PyPy 5.0 +======== + +We have released PyPy 5.0, about three months after PyPy 4.0.1. +We encourage all users of PyPy to update to this version. Apart from the usual +bug fixes, there is an ongoing effort to improve the warmup time and memory +usage of JIT-related metadata. The exact effects depend vastly on the program +you're running and can range from insignificant to warmup being up to 30% +faster and memory dropping by about 30%. + +We also merged a major upgrade to our C-API layer (cpyext), simplifying the +interaction between c-level objects and PyPy interpreter level objects. As a +result, lxml (prerelease) with its cython compiled component +`passes all tests`_ on PyPy. The new cpyext is also much faster. + +vmprof_ has been a go-to profiler for PyPy on linux for a few releases +and we're happy to announce that thanks to the cooperation with jetbrains, +vmprof now works on Linux, OS X and Windows on both PyPy and CPython. + +You can download the PyPy 5.0 release here: + + http://pypy.org/download.html + +We would like to thank our donors for the continued support of the PyPy +project. + +We would also like to thank our contributors and +encourage new people to join the project. PyPy has many +layers and we need help with all of them: `PyPy`_ and `RPython`_ documentation +improvements, tweaking popular `modules`_ to run on pypy, or general `help`_ +with making RPython's JIT even better. + +CFFI +==== + +While not applicable only to PyPy, `cffi`_ is arguably our most significant +contribution to the python ecosystem. PyPy 5.0 ships with +`cffi-1.5.2`_ which now allows embedding PyPy (or cpython) in a C program. + +.. _`PyPy`: http://doc.pypy.org +.. _`RPython`: https://rpython.readthedocs.org +.. _`cffi`: https://cffi.readthedocs.org +.. _`cffi-1.5.2`: http://cffi.readthedocs.org/en/latest/whatsnew.html#v1-5-2 +.. _`modules`: http://doc.pypy.org/en/latest/project-ideas.html#make-more-python-modules-pypy-friendly +.. _`help`: http://doc.pypy.org/en/latest/project-ideas.html +.. _`numpy`: https://bitbucket.org/pypy/numpy +.. _`passes all tests`: https://bitbucket.org/pypy/compatibility/wiki/lxml +.. _vmprof: http://vmprof.readthedocs.org + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7. It's fast (`PyPy and CPython 2.7.x`_ performance comparison) +due to its integrated tracing JIT compiler. + +We also welcome developers of other +`dynamic languages`_ to see what RPython can do for them. + +This release supports **x86** machines on most common operating systems +(Linux 32/64, Mac OS X 64, Windows 32, OpenBSD, FreeBSD), +newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux, and the +big- and little-endian variants of **PPC64** running Linux. + +.. _`PyPy and CPython 2.7.x`: http://speed.pypy.org +.. _`dynamic languages`: http://pypyjs.org + +Other Highlights (since 4.0.1 released in November 2015) +========================================================= + +* New features: + + * Support embedding PyPy in a C-program via cffi and static callbacks in cffi. + This deprecates the old method of embedding PyPy + + * Refactor vmprof to work cross-operating-system, deprecate using buggy + libunwind on Linux platforms. Vmprof even works on Windows now. + + * Support more of the C-API type slots, like tp_getattro, and fix C-API + macros, functions, and structs such as _PyLong_FromByteArray(), + PyString_GET_SIZE, f_locals in PyFrameObject, Py_NAN, co_filename in + PyCodeObject + + * Use a more stable approach for allocating PyObjects in cpyext. (see + `blog post`_). Once the PyObject corresponding to a PyPy object is created, + it stays around at the same location until the death of the PyPy object. + Done with a little bit of custom GC support. It allows us to kill the + notion of "borrowing" inside cpyext, reduces 4 dictionaries down to 1, and + significantly simplifies the whole approach (which is why it is a new + feature while technically a refactoring) and allows PyPy to support the + populart lxml module (as of the *next* release) with no PyPy specific + patches needed + + * Make the default filesystem encoding ASCII, like CPython + + * Use `hypothesis`_ in test creation, which is great for randomizing tests + +* Bug Fixes + + * Backport always using os.urandom for uuid4 from cpython and fix the JIT as well + (issue #2202) + + * More completely support datetime, optimize timedelta creation + + * Fix for issue #2185 which caused an inconsistent list of operations to be + generated by the unroller, appeared in a complicated DJango app + + * Fix an elusive issue with stacklets on shadowstack which showed up when + forgetting stacklets without resuming them + + * Fix entrypoint() which now acquires the GIL + + * Fix direct_ffi_call() so failure does not bail out before setting CALL_MAY_FORCE + + * Fix (de)pickling long values by simplifying the implementation + + * Fix RPython rthread so that objects stored as threadlocal do not force minor + GC collection and are kept alive automatically. This improves perfomance of + short-running Python callbacks and prevents resetting such object between + calls + + * Support floats as parameters to itertools.isslice() + + * Check for the existence of CODESET, ignoring it should have prevented PyPy + from working on FreeBSD + + * Fix for corner case (likely shown by Krakatau) for consecutive guards with + interdependencies + + * Fix applevel bare class method comparisons which should fix pretty printing + in IPython + + * Issues reported with our previous release were resolved_ after reports from users on + our issue tracker at https://bitbucket.org/pypy/pypy/issues or on IRC at + #pypy + +* Numpy: + + * Updates to numpy 1.10.2 (incompatibilities and not-implemented features + still exist) + + * Support dtype=(('O', spec)) union while disallowing record arrays with + mixed object, non-object values + + * Remove all traces of micronumpy from cpyext if --withoutmod-micronumpy option used + + * Support indexing filtering with a boolean ndarray + + * Support partition() as an app-level function, together with a cffi wrapper + in pypy/numpy, this now provides partial support for partition() + +* Performance improvements: + + * Optimize global lookups + + * Improve the memory signature of numbering instances in the JIT. This should + massively decrease the amount of memory consumed by the JIT, which is + significant for most programs. Also compress the numberings using variable- + size encoding + + * Optimize string concatenation + + * Use INT_LSHIFT instead of INT_MUL when possible + + * Improve struct.unpack by casting directly from the underlying buffer. + Unpacking floats and doubles is about 15 times faster, and integer types + about 50% faster (on 64 bit integers). This was then subsequently + improved further in optimizeopt.py. + + * Optimize two-tuple lookups in mapdict, which improves warmup of instance + variable access somewhat + + * Reduce all guards from int_floordiv_ovf if one of the arguments is constant + + * Identify permutations of attributes at instance creation, reducing the + number of bridges created + + * Greatly improve re.sub() performance + + +* Internal refactorings: + + * Refactor and improve exception analysis in the annotator + + * Remove unnecessary special handling of space.wrap(). + + * Support list-resizing setslice operations in RPython + + * Tweak the trace-too-long heuristic for multiple jit drivers + + * Refactor bookkeeping (such a cool word - three double letters) in the + annotater + + * Refactor wrappers for OS functions from rtyper to rlib and simplify them + + * Simplify backend loading instructions to only use four variants + + * Simplify GIL handling in non-jitted code + + * Refactor naming in optimizeopt + + * Change GraphAnalyzer to use a more precise way to recognize external + functions and fix null pointer handling, generally clean up external + function handling + + * Remove pure variants of ``getfield_gc_*`` operations from the JIT by + determining purity while tracing + + * Refactor databasing + + * Simplify bootstrapping in cpyext + + * Refactor rtyper debug code into python.rtyper.debug + + * Seperate structmember.h from Python.h Also enhance creating api functions + to specify which header file they appear in (previously only pypy_decl.h) + + * Fix tokenizer to enforce universal newlines, needed for Python 3 support + +.. _resolved: http://doc.pypy.org/en/latest/whatsnew-5.0.0.html +.. _`hypothesis`: http://hypothesis.readthedocs.org +.. _`blog post`: http://morepypy.blogspot.com/2016/02/c-api-support-update.html + +Please update, and continue to help us make PyPy better. + +Cheers + +The PyPy Team + diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-5.0.0.rst copy from pypy/doc/whatsnew-head.rst copy to pypy/doc/whatsnew-5.0.0.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-5.0.0.rst @@ -1,6 +1,6 @@ -========================= -What's new in PyPy 4.1.+ -========================= +====================== +What's new in PyPy 5.0 +====================== .. this is a revision shortly after release-4.0.1 .. startrev: 4b5c840d0da2 @@ -183,4 +183,15 @@ .. branch: vlen-resume -Compress resume data, saving 10-20% of memory consumed by the JIT \ No newline at end of file +Compress resume data, saving 10-20% of memory consumed by the JIT + +.. branch: issue-2248 + +.. branch: ndarray-setitem-filtered + +Fix boolean-array indexing in micronumpy + +.. branch: numpy_partition +Support ndarray.partition() as an app-level function numpy.core._partition_use, +provided as a cffi wrapper to upstream's implementation in the pypy/numpy repo + diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -1,186 +1,25 @@ ========================= -What's new in PyPy 4.1.+ +What's new in PyPy 5.0.+ ========================= -.. this is a revision shortly after release-4.0.1 -.. startrev: 4b5c840d0da2 +.. this is a revision shortly after release-5.0 +.. startrev: b238b48f9138 -Fixed ``_PyLong_FromByteArray()``, which was buggy. +.. branch: s390x-backend -Fixed a crash with stacklets (or greenlets) on non-Linux machines -which showed up if you forget stacklets without resuming them. +The jit compiler backend implementation for the s390x architecutre. +The backend manages 64-bit values in the literal pool of the assembly instead of loading them as immediates. +It includes a simplification for the operation 'zero_array'. Start and length parameters are bytes instead of size. -.. branch: numpy-1.10 +.. branch: remove-py-log -Fix tests to run cleanly with -A and start to fix micronumpy for upstream numpy -which is now 1.10.2 +Replace py.log with something simpler, which should speed up logging -.. branch: osx-flat-namespace +.. branch: where_1_arg -Fix the cpyext tests on OSX by linking with -flat_namespace +Implemented numpy.where for 1 argument (thanks sergem) -.. branch: anntype +.. branch: fix_indexing_by_numpy_int -Refactor and improve exception analysis in the annotator. - -.. branch: posita/2193-datetime-timedelta-integrals - -Fix issue #2193. ``isinstance(..., int)`` => ``isinstance(..., numbers.Integral)`` -to allow for alternate ``int``-like implementations (e.g., ``future.types.newint``) - -.. branch: faster-rstruct - -Improve the performace of struct.unpack, which now directly reads inside the -string buffer and directly casts the bytes to the appropriate type, when -allowed. Unpacking of floats and doubles is about 15 times faster now, while -for integer types it's up to ~50% faster for 64bit integers. - -.. branch: wrap-specialisation - -Remove unnecessary special handling of space.wrap(). - -.. branch: compress-numbering - -Improve the memory signature of numbering instances in the JIT. This should massively -decrease the amount of memory consumed by the JIT, which is significant for most programs. - -.. branch: fix-trace-too-long-heuristic - -Improve the heuristic when disable trace-too-long - -.. branch: fix-setslice-can-resize - -Make rlist's ll_listsetslice() able to resize the target list to help -simplify objspace/std/listobject.py. Was issue #2196. - -.. branch: anntype2 - -A somewhat random bunch of changes and fixes following up on branch 'anntype'. Highlights: - -- Implement @doubledispatch decorator and use it for intersection() and difference(). - -- Turn isinstance into a SpaceOperation - -- Create a few direct tests of the fundamental annotation invariant in test_model.py - -- Remove bookkeeper attribute from DictDef and ListDef. - -.. branch: cffi-static-callback - -.. branch: vecopt-absvalue - -- Enhancement. Removed vector fields from AbstractValue. - -.. branch: memop-simplify2 - -Simplification. Backends implement too many loading instructions, only having a slightly different interface. -Four new operations (gc_load/gc_load_indexed, gc_store/gc_store_indexed) replace all the -commonly known loading operations - -.. branch: more-rposix - -Move wrappers for OS functions from `rpython/rtyper` to `rpython/rlib` and -turn them into regular RPython functions. Most RPython-compatible `os.*` -functions are now directly accessible as `rpython.rposix.*`. - -.. branch: always-enable-gil - -Simplify a bit the GIL handling in non-jitted code. Fixes issue #2205. - -.. branch: flowspace-cleanups - -Trivial cleanups in flowspace.operation : fix comment & duplicated method - -.. branch: test-AF_NETLINK - -Add a test for pre-existing AF_NETLINK support. Was part of issue #1942. - -.. branch: small-cleanups-misc - -Trivial misc cleanups: typo, whitespace, obsolete comments - -.. branch: cpyext-slotdefs -.. branch: fix-missing-canraise -.. branch: whatsnew - -.. branch: fix-2211 - -Fix the cryptic exception message when attempting to use extended slicing -in rpython. Was issue #2211. - -.. branch: ec-keepalive - -Optimize the case where, in a new C-created thread, we keep invoking -short-running Python callbacks. (CFFI on CPython has a hack to achieve -the same result.) This can also be seen as a bug fix: previously, -thread-local objects would be reset between two such calls. - -.. branch: globals-quasiimmut - -Optimize global lookups. - -.. branch: cffi-static-callback-embedding - -Updated to CFFI 1.5, which supports a new way to do embedding. -Deprecates http://pypy.readthedocs.org/en/latest/embedding.html. - -.. branch: fix-cpython-ssl-tests-2.7 - -Fix SSL tests by importing cpython's patch - - -.. branch: remove-getfield-pure - -Remove pure variants of ``getfield_gc_*`` operations from the JIT. Relevant -optimizations instead consult the field descriptor to determine the purity of -the operation. Additionally, pure ``getfield`` operations are now handled -entirely by `rpython/jit/metainterp/optimizeopt/heap.py` rather than -`rpython/jit/metainterp/optimizeopt/pure.py`, which can result in better codegen -for traces containing a large number of pure getfield operations. - -.. branch: exctrans - -Try to ensure that no new functions get annotated during the 'source_c' phase. -Refactor sandboxing to operate at a higher level. - -.. branch: cpyext-bootstrap - -.. branch: vmprof-newstack - -Refactor vmprof to work cross-operating-system. - -.. branch: seperate-strucmember_h - -Seperate structmember.h from Python.h Also enhance creating api functions -to specify which header file they appear in (previously only pypy_decl.h) - -.. branch: llimpl - -Refactor register_external(), remove running_on_llinterp mechanism and -apply sandbox transform on externals at the end of annotation. - -.. branch: cffi-embedding-win32 - -.. branch: windows-vmprof-support - -vmprof should work on Windows. - - -.. branch: reorder-map-attributes - -When creating instances and adding attributes in several different orders -depending on some condition, the JIT would create too much code. This is now -fixed. - -.. branch: cpyext-gc-support-2 - -Improve CPython C API support, which means lxml now runs unmodified -(after removing pypy hacks, pending pull request) - -.. branch: look-inside-tuple-hash - -Look inside tuple hash, improving mdp benchmark - -.. branch: vlen-resume - -Compress resume data, saving 10-20% of memory consumed by the JIT \ No newline at end of file +Implement yet another strange numpy indexing compatibility; indexing by a scalar +returns a scalar diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py --- a/pypy/interpreter/function.py +++ b/pypy/interpreter/function.py @@ -13,6 +13,9 @@ from pypy.interpreter.argument import Arguments from rpython.rlib import jit +from rpython.rlib.rarithmetic import LONG_BIT +from rpython.rlib.rbigint import rbigint + funccallunrolling = unrolling_iterable(range(4)) @@ -557,6 +560,26 @@ return space.w_False return space.eq(self.w_function, w_other.w_function) + def is_w(self, space, other): + if not isinstance(other, Method): + return False + return (self.w_instance is other.w_instance and + self.w_function is other.w_function and + self.w_class is other.w_class) + + def immutable_unique_id(self, space): + from pypy.objspace.std.util import IDTAG_METHOD as tag + from pypy.objspace.std.util import IDTAG_SHIFT + if self.w_instance is not None: + id = space.bigint_w(space.id(self.w_instance)) + id = id.lshift(LONG_BIT) + else: + id = rbigint.fromint(0) + id = id.or_(space.bigint_w(space.id(self.w_function))) + id = id.lshift(LONG_BIT).or_(space.bigint_w(space.id(self.w_class))) + id = id.lshift(IDTAG_SHIFT).int_or_(tag) + return space.newlong_from_rbigint(id) + def descr_method_hash(self): space = self.space w_result = space.hash(self.w_function) diff --git a/pypy/interpreter/mixedmodule.py b/pypy/interpreter/mixedmodule.py --- a/pypy/interpreter/mixedmodule.py +++ b/pypy/interpreter/mixedmodule.py @@ -3,7 +3,7 @@ from pypy.interpreter import gateway from pypy.interpreter.error import OperationError from pypy.interpreter.baseobjspace import W_Root -import os, sys +import sys class MixedModule(Module): applevel_name = None @@ -60,7 +60,7 @@ def save_module_content_for_future_reload(self): self.w_initialdict = self.space.call_method(self.w_dict, 'items') - + @classmethod def get_applevel_name(cls): """ NOT_RPYTHON """ if cls.applevel_name is not None: @@ -68,7 +68,6 @@ else: pkgroot = cls.__module__ return pkgroot.split('.')[-1] - get_applevel_name = classmethod(get_applevel_name) def get(self, name): space = self.space @@ -103,7 +102,7 @@ # be normal Functions to get the correct binding behaviour func = w_value if (isinstance(func, Function) and - type(func) is not BuiltinFunction): + type(func) is not BuiltinFunction): try: bltin = func._builtinversion_ except AttributeError: @@ -115,7 +114,6 @@ space.setitem(self.w_dict, w_name, w_value) return w_value - def getdict(self, space): if self.lazy: for name in self.loaders: @@ -131,6 +129,7 @@ self.startup_called = False self._frozen = True + @classmethod def buildloaders(cls): """ NOT_RPYTHON """ if not hasattr(cls, 'loaders'): @@ -149,8 +148,6 @@ if '__doc__' not in loaders: loaders['__doc__'] = cls.get__doc__ - buildloaders = classmethod(buildloaders) - def extra_interpdef(self, name, spec): cls = self.__class__ pkgroot = cls.__module__ @@ -159,21 +156,21 @@ w_obj = loader(space) space.setattr(space.wrap(self), space.wrap(name), w_obj) + @classmethod def get__doc__(cls, space): return space.wrap(cls.__doc__) - get__doc__ = classmethod(get__doc__) def getinterpevalloader(pkgroot, spec): """ NOT_RPYTHON """ def ifileloader(space): - d = {'space' : space} + d = {'space':space} # EVIL HACK (but it works, and this is not RPython :-) while 1: try: value = eval(spec, d) except NameError, ex: - name = ex.args[0].split("'")[1] # super-Evil + name = ex.args[0].split("'")[1] # super-Evil if name in d: raise # propagate the NameError try: diff --git a/pypy/interpreter/test/test_app_main.py b/pypy/interpreter/test/test_app_main.py --- a/pypy/interpreter/test/test_app_main.py +++ b/pypy/interpreter/test/test_app_main.py @@ -9,6 +9,11 @@ from pypy.conftest import pypydir from lib_pypy._pypy_interact import irc_header +try: + import __pypy__ +except ImportError: + __pypy__ = None + banner = sys.version.splitlines()[0] app_main = os.path.join(os.path.realpath(os.path.dirname(__file__)), os.pardir, 'app_main.py') @@ -106,6 +111,8 @@ sys.argv[:] = saved_sys_argv sys.stdout = saved_sys_stdout sys.stderr = saved_sys_stderr + if __pypy__: + __pypy__.set_debug(True) def test_all_combinations_I_can_think_of(self): self.check([], {}, sys_argv=[''], run_stdin=True) @@ -133,7 +140,7 @@ self.check(['-S', '-tO', '--info'], {}, output_contains='translation') self.check(['-S', '-tO', '--version'], {}, output_contains='Python') self.check(['-S', '-tOV'], {}, output_contains='Python') - self.check(['--jit', 'foobar', '-S'], {}, sys_argv=[''], + self.check(['--jit', 'off', '-S'], {}, sys_argv=[''], run_stdin=True, no_site=1) self.check(['-c', 'pass'], {}, sys_argv=['-c'], run_command='pass') self.check(['-cpass'], {}, sys_argv=['-c'], run_command='pass') @@ -601,9 +608,7 @@ def run_with_status_code(self, cmdline, senddata='', expect_prompt=False, expect_banner=False, python_flags='', env=None): if os.name == 'nt': - try: - import __pypy__ - except: + if __pypy__ is None: py.test.skip('app_main cannot run on non-pypy for windows') cmdline = '%s %s "%s" %s' % (sys.executable, python_flags, app_main, cmdline) diff --git a/pypy/interpreter/test/test_function.py b/pypy/interpreter/test/test_function.py --- a/pypy/interpreter/test/test_function.py +++ b/pypy/interpreter/test/test_function.py @@ -1,5 +1,4 @@ - -import unittest +import pytest from pypy.interpreter import eval from pypy.interpreter.function import Function, Method, descr_function_get from pypy.interpreter.pycode import PyCode @@ -187,6 +186,7 @@ raises( TypeError, func, 42, {'arg1': 23}) + @pytest.mark.skipif("config.option.runappdirect") def test_kwargs_nondict_mapping(self): class Mapping: def keys(self): @@ -257,6 +257,14 @@ meth = func.__get__(obj, object) assert meth() == obj + def test_none_get_interaction(self): + skip("XXX issue #2083") + assert type(None).__repr__(None) == 'None' + + def test_none_get_interaction_2(self): + f = None.__repr__ + assert f() == 'None' + def test_no_get_builtin(self): assert not hasattr(dir, '__get__') class A(object): @@ -284,6 +292,7 @@ raises(TypeError, len, s, some_unknown_keyword=s) raises(TypeError, len, s, s, some_unknown_keyword=s) + @pytest.mark.skipif("config.option.runappdirect") def test_call_error_message(self): try: len() @@ -325,6 +334,7 @@ f = lambda: 42 assert f.func_doc is None + @pytest.mark.skipif("config.option.runappdirect") def test_setstate_called_with_wrong_args(self): f = lambda: 42 # not sure what it should raise, since CPython doesn't have setstate @@ -550,6 +560,37 @@ assert A().m == X() assert X() == A().m + @pytest.mark.skipif("config.option.runappdirect") + def test_method_identity(self): + class A(object): + def m(self): + pass + def n(self): + pass + + class B(A): + pass + + class X(object): + def __eq__(self, other): + return True + + a = A() + a2 = A() + assert a.m is a.m + assert id(a.m) == id(a.m) + assert a.m is not a.n + assert id(a.m) != id(a.n) + assert a.m is not a2.m + assert id(a.m) != id(a2.m) + + assert A.m is A.m + assert id(A.m) == id(A.m) + assert A.m is not A.n + assert id(A.m) != id(A.n) + assert A.m is not B.m + assert id(A.m) != id(B.m) + class TestMethod: def setup_method(self, method): diff --git a/pypy/module/__builtin__/descriptor.py b/pypy/module/__builtin__/descriptor.py --- a/pypy/module/__builtin__/descriptor.py +++ b/pypy/module/__builtin__/descriptor.py @@ -79,6 +79,7 @@ W_Super.typedef = TypeDef( 'super', __new__ = interp2app(descr_new_super), + __thisclass__ = interp_attrproperty_w("w_starttype", W_Super), __getattribute__ = interp2app(W_Super.getattribute), __get__ = interp2app(W_Super.get), __doc__ = """super(type) -> unbound super object diff --git a/pypy/module/__builtin__/test/test_classobj.py b/pypy/module/__builtin__/test/test_classobj.py --- a/pypy/module/__builtin__/test/test_classobj.py +++ b/pypy/module/__builtin__/test/test_classobj.py @@ -452,7 +452,6 @@ assert a + 1 == 2 assert a + 1.1 == 2 - def test_binaryop_calls_coerce_always(self): l = [] class A: @@ -1076,6 +1075,16 @@ assert (D() > A()) == 'D:A.gt' assert (D() >= A()) == 'D:A.ge' + def test_override___int__(self): + class F(float): + def __int__(self): + return 666 + f = F(-12.3) + assert int(f) == 666 + # on cpython, this calls float_trunc() in floatobject.c + # which ends up calling PyFloat_AS_DOUBLE((PyFloatObject*) f) + assert float.__int__(f) == -12 + class AppTestOldStyleClassBytesDict(object): def setup_class(cls): diff --git a/pypy/module/__builtin__/test/test_descriptor.py b/pypy/module/__builtin__/test/test_descriptor.py --- a/pypy/module/__builtin__/test/test_descriptor.py +++ b/pypy/module/__builtin__/test/test_descriptor.py @@ -214,7 +214,7 @@ c = C() assert C.goo(1) == (C, 1) assert c.goo(1) == (C, 1) - + assert c.foo(1) == (c, 1) class D(C): pass @@ -238,6 +238,17 @@ meth = classmethod(1).__get__(1) raises(TypeError, meth) + def test_super_thisclass(self): + class A(object): + pass + + assert super(A, A()).__thisclass__ is A + + class B(A): + pass + + assert super(B, B()).__thisclass__ is B + assert super(A, B()).__thisclass__ is A def test_property_docstring(self): assert property.__doc__.startswith('property') diff --git a/pypy/module/__pypy__/test/test_signal.py b/pypy/module/__pypy__/test/test_signal.py --- a/pypy/module/__pypy__/test/test_signal.py +++ b/pypy/module/__pypy__/test/test_signal.py @@ -2,7 +2,6 @@ from pypy.module.thread.test.support import GenericTestThread - class AppTestMinimal: spaceconfig = dict(usemodules=['__pypy__']) diff --git a/pypy/module/_cffi_backend/ctypeptr.py b/pypy/module/_cffi_backend/ctypeptr.py --- a/pypy/module/_cffi_backend/ctypeptr.py +++ b/pypy/module/_cffi_backend/ctypeptr.py @@ -353,10 +353,11 @@ # ____________________________________________________________ -rffi_fdopen = rffi.llexternal("fdopen", [rffi.INT, rffi.CCHARP], rffi.CCHARP, +FILEP = rffi.COpaquePtr("FILE") +rffi_fdopen = rffi.llexternal("fdopen", [rffi.INT, rffi.CCHARP], FILEP, save_err=rffi.RFFI_SAVE_ERRNO) -rffi_setbuf = rffi.llexternal("setbuf", [rffi.CCHARP, rffi.CCHARP], lltype.Void) -rffi_fclose = rffi.llexternal("fclose", [rffi.CCHARP], rffi.INT) +rffi_setbuf = rffi.llexternal("setbuf", [FILEP, rffi.CCHARP], lltype.Void) +rffi_fclose = rffi.llexternal("fclose", [FILEP], rffi.INT) class CffiFileObj(object): _immutable_ = True @@ -382,4 +383,4 @@ fileobj.cffi_fileobj = CffiFileObj(fd, fileobj.mode) except OSError, e: raise wrap_oserror(space, e) - return fileobj.cffi_fileobj.llf + return rffi.cast(rffi.CCHARP, fileobj.cffi_fileobj.llf) diff --git a/pypy/module/_collections/app_defaultdict.py b/pypy/module/_collections/app_defaultdict.py --- a/pypy/module/_collections/app_defaultdict.py +++ b/pypy/module/_collections/app_defaultdict.py @@ -12,6 +12,7 @@ class defaultdict(dict): __slots__ = ['default_factory'] + __module__ = 'collections' def __init__(self, *args, **kwds): if len(args) > 0: diff --git a/pypy/module/_collections/test/test_defaultdict.py b/pypy/module/_collections/test/test_defaultdict.py --- a/pypy/module/_collections/test/test_defaultdict.py +++ b/pypy/module/_collections/test/test_defaultdict.py @@ -14,6 +14,12 @@ d[5].append(44) assert l == [42, 43] and l2 == [44] + def test_module(self): + from _collections import defaultdict + assert repr(defaultdict) in ( + "", # on PyPy + "") # on CPython + def test_keyerror_without_factory(self): from _collections import defaultdict for d1 in [defaultdict(), defaultdict(None)]: diff --git a/pypy/module/_file/test/test_file_extra.py b/pypy/module/_file/test/test_file_extra.py --- a/pypy/module/_file/test/test_file_extra.py +++ b/pypy/module/_file/test/test_file_extra.py @@ -389,6 +389,7 @@ def test_writelines(self): import array + import sys fn = self.temptestfile with file(fn, 'w') as f: f.writelines(['abc']) @@ -406,7 +407,10 @@ exc = raises(TypeError, f.writelines, [memoryview('jkl')]) assert str(exc.value) == "writelines() argument must be a sequence of strings" out = open(fn, 'rb').readlines()[0] - assert out[0:5] == 'abcd\x00' + if sys.byteorder == 'big': + assert out[0:7] == 'abc\x00\x00\x00d' + else: + assert out[0:5] == 'abcd\x00' assert out[-3:] == 'ghi' with file(fn, 'wb') as f: diff --git a/pypy/module/_rawffi/callback.py b/pypy/module/_rawffi/callback.py --- a/pypy/module/_rawffi/callback.py +++ b/pypy/module/_rawffi/callback.py @@ -1,17 +1,23 @@ - +import sys from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef, GetSetProperty from rpython.rtyper.lltypesystem import lltype, rffi from pypy.module._rawffi.interp_rawffi import write_ptr from pypy.module._rawffi.structure import W_Structure from pypy.module._rawffi.interp_rawffi import (W_DataInstance, letter2tp, - unwrap_value, unpack_argshapes, got_libffi_error) + unwrap_value, unpack_argshapes, got_libffi_error, is_narrow_integer_type, + LL_TYPEMAP, NARROW_INTEGER_TYPES) from rpython.rlib.clibffi import USERDATA_P, CallbackFuncPtr, FUNCFLAG_CDECL from rpython.rlib.clibffi import ffi_type_void, LibFFIError from rpython.rlib import rweakref from pypy.module._rawffi.tracker import tracker from pypy.interpreter.error import OperationError from pypy.interpreter import gateway +from rpython.rlib.unroll import unrolling_iterable + +BIGENDIAN = sys.byteorder == 'big' + +unroll_narrow_integer_types = unrolling_iterable(NARROW_INTEGER_TYPES) app = gateway.applevel(''' def tbprint(tb, err): @@ -42,8 +48,17 @@ args_w[i] = space.wrap(rffi.cast(rffi.ULONG, ll_args[i])) w_res = space.call(w_callable, space.newtuple(args_w)) if callback_ptr.result is not None: # don't return void - unwrap_value(space, write_ptr, ll_res, 0, - callback_ptr.result, w_res) + ptr = ll_res + letter = callback_ptr.result + if BIGENDIAN: + # take care of narrow integers! + for int_type in unroll_narrow_integer_types: + if int_type == letter: + T = LL_TYPEMAP[int_type] + n = rffi.sizeof(lltype.Signed) - rffi.sizeof(T) + ptr = rffi.ptradd(ptr, n) + break + unwrap_value(space, write_ptr, ptr, 0, letter, w_res) except OperationError, e: tbprint(space, space.wrap(e.get_traceback()), space.wrap(e.errorstr(space))) diff --git a/pypy/module/_rawffi/interp_rawffi.py b/pypy/module/_rawffi/interp_rawffi.py --- a/pypy/module/_rawffi/interp_rawffi.py +++ b/pypy/module/_rawffi/interp_rawffi.py @@ -1,3 +1,4 @@ +import sys from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.error import OperationError, oefmt, wrap_oserror from pypy.interpreter.gateway import interp2app, unwrap_spec @@ -19,6 +20,8 @@ from pypy.module._rawffi.buffer import RawFFIBuffer from pypy.module._rawffi.tracker import tracker +BIGENDIAN = sys.byteorder == 'big' + TYPEMAP = { # XXX A mess with unsigned/signed/normal chars :-/ 'c' : ffi_type_uchar, @@ -331,10 +334,14 @@ if tracker.DO_TRACING: ll_buf = rffi.cast(lltype.Signed, self.ll_buffer) tracker.trace_allocation(ll_buf, self) + self._ll_buffer = self.ll_buffer def getbuffer(self, space): return space.wrap(rffi.cast(lltype.Unsigned, self.ll_buffer)) + def buffer_advance(self, n): + self.ll_buffer = rffi.ptradd(self.ll_buffer, n) + def byptr(self, space): from pypy.module._rawffi.array import ARRAY_OF_PTRS array = ARRAY_OF_PTRS.allocate(space, 1) @@ -342,16 +349,17 @@ return space.wrap(array) def free(self, space): - if not self.ll_buffer: + if not self._ll_buffer: raise segfault_exception(space, "freeing NULL pointer") self._free() def _free(self): if tracker.DO_TRACING: - ll_buf = rffi.cast(lltype.Signed, self.ll_buffer) + ll_buf = rffi.cast(lltype.Signed, self._ll_buffer) tracker.trace_free(ll_buf) - lltype.free(self.ll_buffer, flavor='raw') + lltype.free(self._ll_buffer, flavor='raw') self.ll_buffer = lltype.nullptr(rffi.VOIDP.TO) + self._ll_buffer = self.ll_buffer def buffer_w(self, space, flags): return RawFFIBuffer(self) @@ -432,12 +440,19 @@ space.wrap("cannot directly read value")) wrap_value._annspecialcase_ = 'specialize:arg(1)' +NARROW_INTEGER_TYPES = 'cbhiBIH?' + +def is_narrow_integer_type(letter): + return letter in NARROW_INTEGER_TYPES class W_FuncPtr(W_Root): def __init__(self, space, ptr, argshapes, resshape): self.ptr = ptr self.argshapes = argshapes self.resshape = resshape + self.narrow_integer = False + if resshape is not None: + self.narrow_integer = is_narrow_integer_type(resshape.itemcode.lower()) def getbuffer(self, space): return space.wrap(rffi.cast(lltype.Unsigned, self.ptr.funcsym)) @@ -497,6 +512,10 @@ result = self.resshape.allocate(space, 1, autofree=True) # adjust_return_size() was used here on result.ll_buffer self.ptr.call(args_ll, result.ll_buffer) + if BIGENDIAN and self.narrow_integer: + # we get a 8 byte value in big endian + n = rffi.sizeof(lltype.Signed) - result.shape.size + result.buffer_advance(n) return space.wrap(result) else: self.ptr.call(args_ll, lltype.nullptr(rffi.VOIDP.TO)) diff --git a/pypy/module/_rawffi/structure.py b/pypy/module/_rawffi/structure.py --- a/pypy/module/_rawffi/structure.py +++ b/pypy/module/_rawffi/structure.py @@ -18,6 +18,9 @@ from rpython.rlib.rarithmetic import intmask, signedtype, r_uint, \ r_ulonglong from rpython.rtyper.lltypesystem import lltype, rffi +import sys + +IS_BIG_ENDIAN = sys.byteorder == 'big' @@ -114,20 +117,32 @@ size += intmask(fieldsize) bitsizes.append(fieldsize) elif field_type == NEW_BITFIELD: - bitsizes.append((bitsize << 16) + bitoffset) + if IS_BIG_ENDIAN: + off = last_size - bitoffset - bitsize + else: + off = bitoffset + bitsizes.append((bitsize << 16) + off) bitoffset = bitsize size = round_up(size, fieldalignment) pos.append(size) size += fieldsize elif field_type == CONT_BITFIELD: - bitsizes.append((bitsize << 16) + bitoffset) + if IS_BIG_ENDIAN: + off = last_size - bitoffset - bitsize + else: + off = bitoffset + bitsizes.append((bitsize << 16) + off) bitoffset += bitsize # offset is already updated for the NEXT field pos.append(size - fieldsize) elif field_type == EXPAND_BITFIELD: size += fieldsize - last_size / 8 last_size = fieldsize * 8 - bitsizes.append((bitsize << 16) + bitoffset) + if IS_BIG_ENDIAN: + off = last_size - bitoffset - bitsize + else: + off = bitoffset + bitsizes.append((bitsize << 16) + off) bitoffset += bitsize # offset is already updated for the NEXT field pos.append(size - fieldsize) diff --git a/pypy/module/_rawffi/test/test__rawffi.py b/pypy/module/_rawffi/test/test__rawffi.py --- a/pypy/module/_rawffi/test/test__rawffi.py +++ b/pypy/module/_rawffi/test/test__rawffi.py @@ -704,7 +704,6 @@ def compare(a, b): a1 = _rawffi.Array('i').fromaddress(_rawffi.Array('P').fromaddress(a, 1)[0], 1) a2 = _rawffi.Array('i').fromaddress(_rawffi.Array('P').fromaddress(b, 1)[0], 1) - print "comparing", a1[0], "with", a2[0] if a1[0] not in [1,2,3,4] or a2[0] not in [1,2,3,4]: bogus_args.append((a1[0], a2[0])) if a1[0] > a2[0]: @@ -715,7 +714,7 @@ a2[0] = len(ll_to_sort) a3 = _rawffi.Array('l')(1) a3[0] = struct.calcsize('i') - cb = _rawffi.CallbackPtr(compare, ['P', 'P'], 'i') + cb = _rawffi.CallbackPtr(compare, ['P', 'P'], 'l') a4 = cb.byptr() qsort(a1, a2, a3, a4) res = [ll_to_sort[i] for i in range(len(ll_to_sort))] @@ -896,11 +895,21 @@ b = _rawffi.Array('c').fromaddress(a.buffer, 38) if sys.maxunicode > 65535: # UCS4 build - assert b[0] == 'x' - assert b[1] == '\x00' - assert b[2] == '\x00' - assert b[3] == '\x00' - assert b[4] == 'y' + if sys.byteorder == 'big': + assert b[0] == '\x00' + assert b[1] == '\x00' + assert b[2] == '\x00' + assert b[3] == 'x' + assert b[4] == '\x00' + assert b[5] == '\x00' + assert b[6] == '\x00' + assert b[7] == 'y' + else: + assert b[0] == 'x' + assert b[1] == '\x00' + assert b[2] == '\x00' + assert b[3] == '\x00' + assert b[4] == 'y' else: # UCS2 build assert b[0] == 'x' diff --git a/pypy/module/_rawffi/test/test_struct.py b/pypy/module/_rawffi/test/test_struct.py --- a/pypy/module/_rawffi/test/test_struct.py +++ b/pypy/module/_rawffi/test/test_struct.py @@ -1,4 +1,4 @@ - +import sys from pypy.module._rawffi.structure import size_alignment_pos from pypy.module._rawffi.interp_rawffi import TYPEMAP, letter2tp @@ -63,4 +63,7 @@ for (name, t, size) in fields]) assert size == 8 assert pos == [0, 0, 0] - assert bitsizes == [0x10000, 0x3e0001, 0x1003f] + if sys.byteorder == 'little': + assert bitsizes == [0x10000, 0x3e0001, 0x1003f] + else: + assert bitsizes == [0x1003f, 0x3e0001, 0x10000] diff --git a/pypy/module/_sre/interp_sre.py b/pypy/module/_sre/interp_sre.py --- a/pypy/module/_sre/interp_sre.py +++ b/pypy/module/_sre/interp_sre.py @@ -7,6 +7,7 @@ from pypy.interpreter.error import OperationError from rpython.rlib.rarithmetic import intmask from rpython.rlib import jit +from rpython.rlib.rstring import StringBuilder, UnicodeBuilder # ____________________________________________________________ # @@ -36,6 +37,8 @@ if 0 <= start <= end: if isinstance(ctx, rsre_core.BufMatchContext): return space.wrap(ctx._buffer.getslice(start, end, 1, end-start)) + if isinstance(ctx, rsre_core.StrMatchContext): + return space.wrap(ctx._string[start:end]) elif isinstance(ctx, rsre_core.UnicodeMatchContext): return space.wrap(ctx._unicodestr[start:end]) else: @@ -98,8 +101,8 @@ space.wrap("cannot copy this pattern object")) def make_ctx(self, w_string, pos=0, endpos=sys.maxint): - """Make a BufMatchContext or a UnicodeMatchContext for searching - in the given w_string object.""" + """Make a StrMatchContext, BufMatchContext or a UnicodeMatchContext for + searching in the given w_string object.""" space = self.space if pos < 0: pos = 0 @@ -113,6 +116,14 @@ endpos = len(unicodestr) return rsre_core.UnicodeMatchContext(self.code, unicodestr, pos, endpos, self.flags) + elif space.isinstance_w(w_string, space.w_str): + str = space.str_w(w_string) + if pos > len(str): + pos = len(str) + if endpos > len(str): + endpos = len(str) + return rsre_core.StrMatchContext(self.code, str, + pos, endpos, self.flags) else: buf = space.readbuf_w(w_string) size = buf.getlength() @@ -216,6 +227,11 @@ def subx(self, w_ptemplate, w_string, count): space = self.space + # use a (much faster) string/unicode builder if w_ptemplate and + # w_string are both string or both unicode objects, and if w_ptemplate + # is a literal + use_builder = False + filter_as_unicode = filter_as_string = None if space.is_true(space.callable(w_ptemplate)): w_filter = w_ptemplate filter_is_callable = True @@ -223,6 +239,8 @@ if space.isinstance_w(w_ptemplate, space.w_unicode): filter_as_unicode = space.unicode_w(w_ptemplate) literal = u'\\' not in filter_as_unicode + use_builder = ( + space.isinstance_w(w_string, space.w_unicode) and literal) else: try: filter_as_string = space.str_w(w_ptemplate) @@ -232,6 +250,8 @@ literal = False else: literal = '\\' not in filter_as_string + use_builder = ( + space.isinstance_w(w_string, space.w_str) and literal) if literal: w_filter = w_ptemplate filter_is_callable = False @@ -242,19 +262,44 @@ space.wrap(self), w_ptemplate) filter_is_callable = space.is_true(space.callable(w_filter)) # + # XXX this is a bit of a mess, but it improves performance a lot ctx = self.make_ctx(w_string) - sublist_w = [] + sublist_w = strbuilder = unicodebuilder = None + if use_builder: + if filter_as_unicode is not None: + unicodebuilder = UnicodeBuilder(ctx.end) + else: + assert filter_as_string is not None + strbuilder = StringBuilder(ctx.end) + else: + sublist_w = [] n = last_pos = 0 while not count or n < count: + sub_jitdriver.jit_merge_point( + self=self, + use_builder=use_builder, + filter_is_callable=filter_is_callable, + filter_type=type(w_filter), + ctx=ctx, + w_filter=w_filter, + strbuilder=strbuilder, + unicodebuilder=unicodebuilder, + filter_as_string=filter_as_string, + filter_as_unicode=filter_as_unicode, + count=count, + w_string=w_string, + n=n, last_pos=last_pos, sublist_w=sublist_w + ) + space = self.space if not searchcontext(space, ctx): break if last_pos < ctx.match_start: - sublist_w.append(slice_w(space, ctx, last_pos, - ctx.match_start, space.w_None)) + _sub_append_slice( + ctx, space, use_builder, sublist_w, + strbuilder, unicodebuilder, last_pos, ctx.match_start) start = ctx.match_end if start == ctx.match_start: start += 1 - nextctx = ctx.fresh_copy(start) if not (last_pos == ctx.match_start == ctx.match_end and n > 0): # the above ignores empty matches on latest position @@ -262,28 +307,71 @@ w_match = self.getmatch(ctx, True) w_piece = space.call_function(w_filter, w_match) if not space.is_w(w_piece, space.w_None): + assert strbuilder is None and unicodebuilder is None + assert not use_builder sublist_w.append(w_piece) else: - sublist_w.append(w_filter) + if use_builder: + if strbuilder is not None: + assert filter_as_string is not None + strbuilder.append(filter_as_string) + else: + assert unicodebuilder is not None + assert filter_as_unicode is not None + unicodebuilder.append(filter_as_unicode) + else: + sublist_w.append(w_filter) last_pos = ctx.match_end n += 1 elif last_pos >= ctx.end: break # empty match at the end: finished - ctx = nextctx + ctx.reset(start) if last_pos < ctx.end: - sublist_w.append(slice_w(space, ctx, last_pos, ctx.end, - space.w_None)) + _sub_append_slice(ctx, space, use_builder, sublist_w, + strbuilder, unicodebuilder, last_pos, ctx.end) + if use_builder: + if strbuilder is not None: + return space.wrap(strbuilder.build()), n + else: + assert unicodebuilder is not None + return space.wrap(unicodebuilder.build()), n + else: + if space.isinstance_w(w_string, space.w_unicode): + w_emptystr = space.wrap(u'') + else: + w_emptystr = space.wrap('') + w_item = space.call_method(w_emptystr, 'join', + space.newlist(sublist_w)) + return w_item, n - if space.isinstance_w(w_string, space.w_unicode): - w_emptystr = space.wrap(u'') - else: - w_emptystr = space.wrap('') - w_item = space.call_method(w_emptystr, 'join', - space.newlist(sublist_w)) - return w_item, n +sub_jitdriver = jit.JitDriver( + reds="""count n last_pos + ctx w_filter + strbuilder unicodebuilder + filter_as_string + filter_as_unicode + w_string sublist_w + self""".split(), + greens=["filter_is_callable", "use_builder", "filter_type", "ctx.pattern"]) +def _sub_append_slice(ctx, space, use_builder, sublist_w, + strbuilder, unicodebuilder, start, end): + if use_builder: + if isinstance(ctx, rsre_core.BufMatchContext): + assert strbuilder is not None + return strbuilder.append(ctx._buffer.getslice(start, end, 1, end-start)) + if isinstance(ctx, rsre_core.StrMatchContext): + assert strbuilder is not None + return strbuilder.append_slice(ctx._string, start, end) + elif isinstance(ctx, rsre_core.UnicodeMatchContext): + assert unicodebuilder is not None + return unicodebuilder.append_slice(ctx._unicodestr, start, end) + assert 0, "unreachable" + else: + sublist_w.append(slice_w(space, ctx, start, end, space.w_None)) + @unwrap_spec(flags=int, groups=int, w_groupindex=WrappedDefault(None), w_indexgroup=WrappedDefault(None)) def SRE_Pattern__new__(space, w_subtype, w_pattern, flags, w_code, @@ -482,6 +570,8 @@ ctx = self.ctx if isinstance(ctx, rsre_core.BufMatchContext): return space.wrap(ctx._buffer.as_str()) + elif isinstance(ctx, rsre_core.StrMatchContext): + return space.wrap(ctx._string) elif isinstance(ctx, rsre_core.UnicodeMatchContext): return space.wrap(ctx._unicodestr) else: diff --git a/pypy/module/_vmprof/conftest.py b/pypy/module/_vmprof/conftest.py new file mode 100644 --- /dev/null +++ b/pypy/module/_vmprof/conftest.py @@ -0,0 +1,6 @@ +import py, platform + +def pytest_collect_directory(path, parent): + if platform.machine() == 's390x': + py.test.skip("zarch tests skipped") +pytest_collect_file = pytest_collect_directory diff --git a/pypy/module/_vmprof/interp_vmprof.py b/pypy/module/_vmprof/interp_vmprof.py --- a/pypy/module/_vmprof/interp_vmprof.py +++ b/pypy/module/_vmprof/interp_vmprof.py @@ -60,10 +60,10 @@ Must be smaller than 1.0 """ w_modules = space.sys.get('modules') - if space.contains_w(w_modules, space.wrap('_continuation')): - space.warn(space.wrap("Using _continuation/greenlet/stacklet together " - "with vmprof will crash"), - space.w_RuntimeWarning) + #if space.contains_w(w_modules, space.wrap('_continuation')): + # space.warn(space.wrap("Using _continuation/greenlet/stacklet together " + # "with vmprof will crash"), + # space.w_RuntimeWarning) try: rvmprof.enable(fileno, period) except rvmprof.VMProfError, e: diff --git a/pypy/module/_vmprof/test/test__vmprof.py b/pypy/module/_vmprof/test/test__vmprof.py --- a/pypy/module/_vmprof/test/test__vmprof.py +++ b/pypy/module/_vmprof/test/test__vmprof.py @@ -5,14 +5,15 @@ class AppTestVMProf(object): def setup_class(cls): cls.space = gettestobjspace(usemodules=['_vmprof', 'struct']) - cls.tmpfile = udir.join('test__vmprof.1').open('wb') - cls.w_tmpfileno = cls.space.wrap(cls.tmpfile.fileno()) - cls.w_tmpfilename = cls.space.wrap(cls.tmpfile.name) - cls.tmpfile2 = udir.join('test__vmprof.2').open('wb') - cls.w_tmpfileno2 = cls.space.wrap(cls.tmpfile2.fileno()) - cls.w_tmpfilename2 = cls.space.wrap(cls.tmpfile2.name) + cls.w_tmpfilename = cls.space.wrap(str(udir.join('test__vmprof.1'))) + cls.w_tmpfilename2 = cls.space.wrap(str(udir.join('test__vmprof.2'))) def test_import_vmprof(self): + tmpfile = open(self.tmpfilename, 'wb') + tmpfileno = tmpfile.fileno() + tmpfile2 = open(self.tmpfilename2, 'wb') + tmpfileno2 = tmpfile2.fileno() + import struct, sys WORD = struct.calcsize('l') @@ -45,7 +46,7 @@ return count import _vmprof - _vmprof.enable(self.tmpfileno, 0.01) + _vmprof.enable(tmpfileno, 0.01) _vmprof.disable() s = open(self.tmpfilename, 'rb').read() no_of_codes = count(s) @@ -56,7 +57,7 @@ pass """ in d - _vmprof.enable(self.tmpfileno2, 0.01) + _vmprof.enable(tmpfileno2, 0.01) exec """def foo2(): pass @@ -71,9 +72,9 @@ def test_enable_ovf(self): import _vmprof - raises(_vmprof.VMProfError, _vmprof.enable, 999, 0) - raises(_vmprof.VMProfError, _vmprof.enable, 999, -2.5) - raises(_vmprof.VMProfError, _vmprof.enable, 999, 1e300) - raises(_vmprof.VMProfError, _vmprof.enable, 999, 1e300 * 1e300) + raises(_vmprof.VMProfError, _vmprof.enable, 2, 0) + raises(_vmprof.VMProfError, _vmprof.enable, 2, -2.5) + raises(_vmprof.VMProfError, _vmprof.enable, 2, 1e300) + raises(_vmprof.VMProfError, _vmprof.enable, 2, 1e300 * 1e300) NaN = (1e300*1e300) / (1e300*1e300) - raises(_vmprof.VMProfError, _vmprof.enable, 999, NaN) + raises(_vmprof.VMProfError, _vmprof.enable, 2, NaN) diff --git a/pypy/module/cppyy/src/dummy_backend.cxx b/pypy/module/cppyy/src/dummy_backend.cxx --- a/pypy/module/cppyy/src/dummy_backend.cxx +++ b/pypy/module/cppyy/src/dummy_backend.cxx @@ -390,7 +390,7 @@ ((dummy::cppyy_test_data*)self)->destroy_arrays(); } else if (idx == s_methods["cppyy_test_data::set_bool"]) { assert(self && nargs == 1); - ((dummy::cppyy_test_data*)self)->set_bool((bool)((CPPYY_G__value*)args)[0].obj.in); + ((dummy::cppyy_test_data*)self)->set_bool((bool)((CPPYY_G__value*)args)[0].obj.i); } else if (idx == s_methods["cppyy_test_data::set_char"]) { assert(self && nargs == 1); ((dummy::cppyy_test_data*)self)->set_char(((CPPYY_G__value*)args)[0].obj.ch); diff --git a/pypy/module/cpyext/include/patchlevel.h b/pypy/module/cpyext/include/patchlevel.h --- a/pypy/module/cpyext/include/patchlevel.h +++ b/pypy/module/cpyext/include/patchlevel.h @@ -29,8 +29,8 @@ #define PY_VERSION "2.7.10" /* PyPy version as a string */ -#define PYPY_VERSION "4.1.0-alpha0" -#define PYPY_VERSION_NUM 0x04010000 +#define PYPY_VERSION "5.1.0-alpha0" +#define PYPY_VERSION_NUM 0x05010000 /* Defined to mean a PyPy where cpyext holds more regular references to PyObjects, e.g. staying alive as long as the internal PyPy object diff --git a/pypy/module/cpyext/test/test_arraymodule.py b/pypy/module/cpyext/test/test_arraymodule.py --- a/pypy/module/cpyext/test/test_arraymodule.py +++ b/pypy/module/cpyext/test/test_arraymodule.py @@ -51,13 +51,19 @@ assert arr.tolist() == [1, 23, 4] def test_buffer(self): + import sys module = self.import_module(name='array') arr = module.array('i', [1,2,3,4]) buf = buffer(arr) exc = raises(TypeError, "buf[1] = '1'") assert str(exc.value) == "buffer is read-only" - # XXX big-endian - assert str(buf) == ('\x01\0\0\0' - '\x02\0\0\0' - '\x03\0\0\0' - '\x04\0\0\0') + if sys.byteorder == 'big': + assert str(buf) == ('\0\0\0\x01' + '\0\0\0\x02' + '\0\0\0\x03' + '\0\0\0\x04') + else: + assert str(buf) == ('\x01\0\0\0' + '\x02\0\0\0' + '\x03\0\0\0' + '\x04\0\0\0') diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -606,7 +606,7 @@ long intval; PyObject *name; - if (!PyArg_ParseTuple(args, "i", &intval)) + if (!PyArg_ParseTuple(args, "l", &intval)) return NULL; IntLike_Type.tp_as_number = &intlike_as_number; diff --git a/pypy/module/cpyext/test/test_unicodeobject.py b/pypy/module/cpyext/test/test_unicodeobject.py --- a/pypy/module/cpyext/test/test_unicodeobject.py +++ b/pypy/module/cpyext/test/test_unicodeobject.py @@ -386,11 +386,11 @@ lltype.free(pendian, flavor='raw') test("\x61\x00\x62\x00\x63\x00\x64\x00", -1) - - test("\x61\x00\x62\x00\x63\x00\x64\x00", None) - + if sys.byteorder == 'big': + test("\x00\x61\x00\x62\x00\x63\x00\x64", None) + else: + test("\x61\x00\x62\x00\x63\x00\x64\x00", None) test("\x00\x61\x00\x62\x00\x63\x00\x64", 1) - test("\xFE\xFF\x00\x61\x00\x62\x00\x63\x00\x64", 0, 1) test("\xFF\xFE\x61\x00\x62\x00\x63\x00\x64\x00", 0, -1) @@ -423,7 +423,10 @@ test("\x61\x00\x00\x00\x62\x00\x00\x00", -1) - test("\x61\x00\x00\x00\x62\x00\x00\x00", None) + if sys.byteorder == 'big': + test("\x00\x00\x00\x61\x00\x00\x00\x62", None) + else: + test("\x61\x00\x00\x00\x62\x00\x00\x00", None) test("\x00\x00\x00\x61\x00\x00\x00\x62", 1) diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py --- a/pypy/module/imp/test/test_import.py +++ b/pypy/module/imp/test/test_import.py @@ -109,7 +109,7 @@ import marshal, stat, struct, os, imp code = py.code.Source(p.join("x.py").read()).compile() s3 = marshal.dumps(code) - s2 = struct.pack("i", os.stat(str(p.join("x.py")))[stat.ST_MTIME]) + s2 = struct.pack(" Author: Richard Plangger Branch: new-jit-log Changeset: r83107:3c69b7ad777e Date: 2016-03-17 12:41 +0100 http://bitbucket.org/pypy/pypy/changeset/3c69b7ad777e/ Log: revived this branch, writes all resoperations into the log (to be independant from the pypy code base) diff --git a/rpython/jit/metainterp/jitlog.py b/rpython/jit/metainterp/jitlog.py --- a/rpython/jit/metainterp/jitlog.py +++ b/rpython/jit/metainterp/jitlog.py @@ -1,13 +1,17 @@ from rpython.rlib.rvmprof.rvmprof import cintf +from rpython.jit.metainterp import resoperation as resoperations +import struct class VMProfJitLogger(object): - MARK_BLOCK_ASM = 0x10 + MARK_TRACED = 0x10 + MARK_ASM = 0x11 - MARK_INPUT_ARGS = 0x11 - MARK_RESOP = 0x12 + MARK_INPUT_ARGS = 0x12 + MARK_RESOP = 0x13 - MARK_RESOP_META = 0x13 + MARK_RESOP_META = 0x14 + MARK_RESOP = 0x15 def __init__(self): self.cintf = cintf.setup() @@ -16,31 +20,31 @@ self.cintf.jitlog_try_init_using_env() if self.cintf.jitlog_filter(0x0): return - self.cintf.jitlog_write_marker(MARK_RESOP_META); - count = len(resoperation.opname) - self.cintf.jitlog_write_int(count) - for opnum, opname in resoperation.opname.items(): - self.cintf.write_marker(opnum) - self.cintf.write_string(opname) + count = len(resoperations.opname) + mark = VMProfJitLogger.MARK_RESOP_META + for opnum, opname in resoperations.opname.items(): + line = struct.pack(">h", opnum) + opname.lower() + self.write_marked(mark, line) + + def teardown(self): + self.cintf.jitlog_teardown() + + def write_marked(self, mark, line): + self.cintf.jitlog_write_marked(mark, line, len(line)) def log_trace(self, tag, args, ops, faildescr=None, ops_offset={}): if self.cintf.jitlog_filter(tag): return assert isinstance(tag, int) - self.cintf.jitlog_write_marker(tag); # input args - self.cintf.jitlog_write_marker(MARK_INPUT_ARGS); str_args = [arg.repr_short(arg._repr_memo) for arg in args] - self.cintf.jitlog_write_string(','.join(str_args)) + self.write_marked(self.MARK_INPUT_ARGS, ','.join(str_args)) - self.cintf.jitlog_write_int(len(ops)) for i,op in enumerate(ops): - self.cintf.jitlog_write_marker(MARK_RESOP) - self.cintf.jitlog_write_marker(op.getopnum()) str_args = [arg.repr_short(arg._repr_memo) for arg in op.getarglist()] descr = op.getdescr() if descr: str_args += ['descr='+descr] - self.cintf.jitlog_write_string(','.join(str_args)) + self.write_marked(self.MARK_RESOP, ','.join(args)) diff --git a/rpython/rlib/rvmprof/cintf.py b/rpython/rlib/rvmprof/cintf.py --- a/rpython/rlib/rvmprof/cintf.py +++ b/rpython/rlib/rvmprof/cintf.py @@ -56,12 +56,18 @@ compilation_info=eci, _nowrapper=True) - jitlog_init = rffi.llexternal("jitlog_init", [rffi.INT, rffi.CHARP], - rffi.CHARP, compilation_info=eci, - save_err=rffi.RFFI_SAVE_ERRNO) - jitlog_init = rffi.llexternal("jitlog_write_marker", [rffi.INT, rffi.CHARP], - rffi.CHARP, compilation_info=eci, - save_err=rffi.RFFI_SAVE_ERRNO) + # jit log functions + jitlog_init = rffi.llexternal("jitlog_init", [rffi.INT, rffi.CCHARP], + rffi.CCHARP, compilation_info=eci) + jitlog_try_init_using_env = rffi.llexternal("jitlog_try_init_using_env", + [], lltype.Void, compilation_info=eci) + jitlog_write_marked = rffi.llexternal("jitlog_write_marked", + [rffi.INT, rffi.CCHARP, rffi.INT], + lltype.Void, compilation_info=eci) + jitlog_filter = rffi.llexternal("jitlog_filter", [rffi.INT], rffi.INT, + compilation_info=eci) + jitlog_teardown = rffi.llexternal("jitlog_teardown", [], lltype.Void, + compilation_info=eci) return CInterface(locals()) diff --git a/rpython/rlib/rvmprof/src/jitlog_main.h b/rpython/rlib/rvmprof/src/jitlog_main.h --- a/rpython/rlib/rvmprof/src/jitlog_main.h +++ b/rpython/rlib/rvmprof/src/jitlog_main.h @@ -29,14 +29,13 @@ } if (!colon) { /* JITLOG=+filename (or just 'filename') --- profiling version */ - debug_profile = 1; - pypy_setup_profiling(); + //pypy_setup_profiling(); } else { /* JITLOG=prefix:filename --- conditional logging */ int n = colon - filename; jitlog_prefix = malloc(n + 1); memcpy(jitlog_prefix, filename, n); - debug_prefix[n] = '\0'; + //debug_prefix[n] = '\0'; filename = colon + 1; } escape = strstr(filename, "%d"); @@ -55,7 +54,7 @@ if (strcmp(filename, "-") != 0) { // mode is 775 mode_t mode = S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH; - jitlog_fd = open(filename, O_WRONLY | O_CREATE, mode); + jitlog_fd = open(filename, O_WRONLY | O_CREAT, mode); } if (escape) { @@ -70,7 +69,7 @@ } } if (!jitlog_fd) { - jitlog_fd = stderr; + jitlog_fd = 2; // TODO //if (isatty(2)) // { @@ -84,7 +83,7 @@ } RPY_EXTERN -char *jitlog_init(int fd, char * prefix) +char *jitlog_init(int fd, const char * prefix) { jitlog_fd = fd; jitlog_prefix = strdup(prefix); @@ -92,15 +91,32 @@ } RPY_EXTERN -void jitlog_close(int close_fd) +void jitlog_teardown() { + jitlog_ready = 0; if (jitlog_fd == -1) { return; } - if (close_fd) { - close(jitlog_fd); + // close the jitlog file descriptor + close(jitlog_fd); + jitlog_fd = -1; + // free the prefix + if (jitlog_prefix != NULL) { + free(jitlog_prefix); } - jitlog_fd = -1; - free(jitlog_prefix); } +RPY_EXTERN +void jitlog_write_marked(int tag, char * text, int length) +{ + if (!jitlog_ready) { return; } + + char header[5]; + header[0] = tag; + header[1] = (length >> 24) & 0xff; + header[2] = (length >> 16) & 0xff; + header[3] = (length >> 8) & 0xff; + header[4] = length & 0xff; + write(jitlog_fd, (const char*)&header, 5); + write(jitlog_fd, text, length); +} diff --git a/rpython/rlib/rvmprof/src/rvmprof.h b/rpython/rlib/rvmprof/src/rvmprof.h --- a/rpython/rlib/rvmprof/src/rvmprof.h +++ b/rpython/rlib/rvmprof/src/rvmprof.h @@ -9,6 +9,8 @@ RPY_EXTERN long vmprof_stack_pop(void*); RPY_EXTERN void vmprof_stack_free(void*); -RPY_EXTERN char * jitlog_init(int, char*); +RPY_EXTERN char * jitlog_init(int, const char*); RPY_EXTERN void jitlog_try_init_using_env(void); -RPY_EXTERN int jitlog_filter(int tag); +RPY_EXTERN int jitlog_filter(int); +RPY_EXTERN void jitlog_write_marked(int, char*, int); +RPY_EXTERN void jitlog_teardown(); From pypy.commits at gmail.com Thu Mar 17 10:16:12 2016 From: pypy.commits at gmail.com (fijal) Date: Thu, 17 Mar 2016 07:16:12 -0700 (PDT) Subject: [pypy-commit] pypy default: add a -live- to greenfield op since it can call nonstandard virtualizable, how to reproduce a crash with a test Message-ID: <56eabc2c.41e11c0a.dba59.20cd@mx.google.com> Author: fijal Branch: Changeset: r83108:75ef4a7f1830 Date: 2016-03-17 16:15 +0200 http://bitbucket.org/pypy/pypy/changeset/75ef4a7f1830/ Log: add a -live- to greenfield op since it can call nonstandard virtualizable, how to reproduce a crash with a test diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -784,11 +784,13 @@ return [] # check for _immutable_fields_ hints immut = v_inst.concretetype.TO._immutable_field(c_fieldname.value) + need_live = False if immut: if (self.callcontrol is not None and self.callcontrol.could_be_green_field(v_inst.concretetype.TO, c_fieldname.value)): pure = '_greenfield' + need_live = True else: pure = '_pure' else: @@ -815,10 +817,12 @@ descr1 = self.cpu.fielddescrof( v_inst.concretetype.TO, quasiimmut.get_mutate_field_name(c_fieldname.value)) - op1 = [SpaceOperation('-live-', [], None), + return [SpaceOperation('-live-', [], None), SpaceOperation('record_quasiimmut_field', [v_inst, descr, descr1], None), op1] + if need_live: + return [SpaceOperation('-live-', [], None), op1] return op1 def rewrite_op_setfield(self, op, override_type=None): diff --git a/rpython/jit/codewriter/test/test_jtransform.py b/rpython/jit/codewriter/test/test_jtransform.py --- a/rpython/jit/codewriter/test/test_jtransform.py +++ b/rpython/jit/codewriter/test/test_jtransform.py @@ -1012,7 +1012,8 @@ v1 = varoftype(lltype.Ptr(S)) v2 = varoftype(lltype.Char) op = SpaceOperation('getfield', [v1, Constant('x', lltype.Void)], v2) - op1 = Transformer(FakeCPU(), FakeCC()).rewrite_operation(op) + op0, op1 = Transformer(FakeCPU(), FakeCC()).rewrite_operation(op) + assert op0.opname == '-live-' assert op1.opname == 'getfield_gc_i_greenfield' assert op1.args == [v1, ('fielddescr', S, 'x')] assert op1.result == v2 From pypy.commits at gmail.com Thu Mar 17 10:32:25 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 17 Mar 2016 07:32:25 -0700 (PDT) Subject: [pypy-commit] pypy heapcache-refactor: replace_box(), limited version (enough to support almost all cases that Message-ID: <56eabff9.0357c20a.c858a.fffff709@mx.google.com> Author: Armin Rigo Branch: heapcache-refactor Changeset: r83109:4ed062f45f76 Date: 2016-03-17 15:31 +0100 http://bitbucket.org/pypy/pypy/changeset/4ed062f45f76/ Log: replace_box(), limited version (enough to support almost all cases that pyjitpl calls it with) diff --git a/rpython/jit/metainterp/heapcache.py b/rpython/jit/metainterp/heapcache.py --- a/rpython/jit/metainterp/heapcache.py +++ b/rpython/jit/metainterp/heapcache.py @@ -1,5 +1,8 @@ -from rpython.jit.metainterp.history import ConstInt, RefFrontendOp +from rpython.jit.metainterp.history import Const, ConstInt +from rpython.jit.metainterp.history import FrontendOp, RefFrontendOp +from rpython.jit.metainterp.history import FO_REPLACED_WITH_CONST from rpython.jit.metainterp.resoperation import rop, OpHelpers +from rpython.jit.metainterp.executor import constant_from_op from rpython.rlib.rarithmetic import r_uint32, r_uint from rpython.rlib.objectmodel import always_inline @@ -12,23 +15,32 @@ HF_IS_UNESCAPED = 0x10 HF_NONSTD_VABLE = 0x20 +_HF_VERSION_INC = 0x40 # must be last +_HF_VERSION_MAX = r_uint(2 ** 32 - _HF_VERSION_INC) + @always_inline def add_flags(ref_frontend_op, flags): - f = r_uint(ref_frontend_op._heapc_flags) + f = ref_frontend_op._get_heapc_flags() f |= r_uint(flags) - ref_frontend_op._heapc_flags = r_uint32(f) + ref_frontend_op._set_heapc_flags(f) @always_inline def remove_flags(ref_frontend_op, flags): - f = r_uint(ref_frontend_op._heapc_flags) + f = ref_frontend_op._get_heapc_flags() f &= r_uint(~flags) - ref_frontend_op._heapc_flags = r_uint32(f) + ref_frontend_op._set_heapc_flags(f) @always_inline def test_flags(ref_frontend_op, flags): - f = r_uint(ref_frontend_op._heapc_flags) + f = ref_frontend_op._get_heapc_flags() return bool(f & r_uint(flags)) +def maybe_replace_with_const(box): + if box.is_replaced_with_const(): + return constant_from_op(box) + else: + return box + class CacheEntry(object): def __init__(self, heapcache): @@ -62,7 +74,12 @@ self._getdict(seen_alloc)[ref_box] = fieldbox def read(self, ref_box): - return self._getdict(self._seen_alloc(ref_box)).get(ref_box, None) + dict = self._getdict(self._seen_alloc(ref_box)) + try: + res_box = dict[ref_box] + except KeyError: + return None + return maybe_replace_with_const(res_box) def read_now_known(self, ref_box, fieldbox): self._getdict(self._seen_alloc(ref_box))[ref_box] = fieldbox @@ -98,7 +115,7 @@ # to use a version number in each RefFrontendOp, and in order # to reset the flags globally, we increment the global version # number in this class. Then when we read '_heapc_flags' we - # also check if the associated '_heapc_version' is up-to-date + # also check if the associated version number is up-to-date # or not. More precisely, we have two global version numbers # here: 'head_version' and 'likely_virtual_version'. Normally # we use 'head_version'. For is_likely_virtual() though, we @@ -110,7 +127,8 @@ def reset(self): # Global reset of all flags. Update both version numbers so # that any access to '_heapc_flags' will be marked as outdated. - self.head_version += 1 + assert self.head_version < _HF_VERSION_MAX + self.head_version += _HF_VERSION_INC self.likely_virtual_version = self.head_version # # maps boxes to HeapCacheValue @@ -125,7 +143,8 @@ def reset_keep_likely_virtuals(self): # Update only 'head_version', but 'likely_virtual_version' remains # at its older value. - self.head_version += 1 + assert self.head_version < _HF_VERSION_MAX + self.head_version += _HF_VERSION_INC # for value in self.values.itervalues(): value.reset_keep_likely_virtual() @@ -134,32 +153,21 @@ @always_inline def test_head_version(self, ref_frontend_op): - return r_uint(ref_frontend_op._heapc_version) == self.head_version + return ref_frontend_op._get_heapc_flags() >= self.head_version @always_inline def test_likely_virtual_version(self, ref_frontend_op): - return (r_uint(ref_frontend_op._heapc_version) >= - self.likely_virtual_version) + return ref_frontend_op._get_heapc_flags() >= self.likely_virtual_version def update_version(self, ref_frontend_op): if not self.test_head_version(ref_frontend_op): - f = 0 + f = self.head_version if (self.test_likely_virtual_version(ref_frontend_op) and test_flags(ref_frontend_op, HF_LIKELY_VIRTUAL)): f |= HF_LIKELY_VIRTUAL - ref_frontend_op._heapc_flags = r_uint32(f) - ref_frontend_op._heapc_version = r_uint32(self.head_version) + ref_frontend_op._set_heapc_flags(f) ref_frontend_op._heapc_deps = None - def getvalue(self, box, create=True): - value = self.values.get(box, None) - if not value and create: - value = self.values[box] = HeapCacheValue(box) - return value - - def getvalues(self, boxes): - return [self.getvalue(box) for box in boxes] - def invalidate_caches(self, opnum, descr, argboxes): self.mark_escaped(opnum, descr, argboxes) self.clear_caches(opnum, descr, argboxes) @@ -434,7 +442,9 @@ if (isinstance(box, RefFrontendOp) and self.test_head_version(box) and box._heapc_deps is not None): - return box._heapc_deps[0] + res_box = box._heapc_deps[0] + if res_box is not None: + return maybe_replace_with_const(res_box) return None def arraylen_now_known(self, box, lengthbox): @@ -446,8 +456,7 @@ deps[0] = lengthbox def replace_box(self, oldbox, newbox): - value = self.getvalue(oldbox, create=False) - if value is None: - return - value.box = newbox - self.values[newbox] = value + # here, only for replacing a box with a const + if isinstance(oldbox, FrontendOp) and isinstance(newbox, Const): + assert newbox.same_constant(constant_from_op(oldbox)) + oldbox.set_replaced_with_const() diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -2,7 +2,8 @@ from rpython.rtyper.lltypesystem import lltype, llmemory, rffi from rpython.rlib.objectmodel import we_are_translated, Symbolic from rpython.rlib.objectmodel import compute_unique_id, specialize -from rpython.rlib.rarithmetic import r_uint32, r_int64, is_valid_int +from rpython.rlib.rarithmetic import r_int64, is_valid_int +from rpython.rlib.rarithmetic import LONG_BIT, intmask, r_uint from rpython.conftest import option @@ -641,42 +642,66 @@ # ____________________________________________________________ +FO_POSITION_MASK = r_uint(0x7FFFFFFF) +FO_REPLACED_WITH_CONST = r_uint(0x80000000) + + class FrontendOp(AbstractResOp): type = 'v' - _attrs_ = ('position',) + _attrs_ = ('position_and_flags',) def __init__(self, pos): - self.position = pos + assert pos >= 0 + self.position_and_flags = r_uint(pos) def get_position(self): - return self.position + return intmask(self.position_and_flags & FO_POSITION_MASK) + + def is_replaced_with_const(self): + return bool(self.position_and_flags & FO_REPLACED_WITH_CONST) + + def set_replaced_with_const(self): + self.position_and_flags |= FO_REPLACED_WITH_CONST def __repr__(self): return '%s(%s)' % (self.__class__.__name__, self.position) class IntFrontendOp(IntOp, FrontendOp): - _attrs_ = ('position', '_resint') + _attrs_ = ('position_and_flags', '_resint') def copy_value_from(self, other): self._resint = other.getint() class FloatFrontendOp(FloatOp, FrontendOp): - _attrs_ = ('position', '_resfloat') + _attrs_ = ('position_and_flags', '_resfloat') def copy_value_from(self, other): self._resfloat = other.getfloatstorage() class RefFrontendOp(RefOp, FrontendOp): - _attrs_ = ('position', '_resref', - '_heapc_flags', '_heapc_version', '_heapc_deps') - - _heapc_flags = r_uint32(0) - _heapc_version = r_uint32(0) + _attrs_ = ('position_and_flags', '_resref', '_heapc_deps') + if LONG_BIT == 32: + _attrs_ += ('_heapc_flags',) # on 64 bit, this gets stored into the + _heapc_flags = r_uint(0) # high 32 bits of 'position_and_flags' _heapc_deps = None def copy_value_from(self, other): self._resref = other.getref_base() + if LONG_BIT == 32: + def _get_heapc_flags(self): + return self._heapc_flags + def _set_heapc_flags(self, value): + self._heapc_flags = value + else: + def _get_heapc_flags(self): + return self.position_and_flags >> 32 + def _set_heapc_flags(self, value): + self.position_and_flags = ( + (self.position_and_flags & 0xFFFFFFFF) | + (value << 32)) + + class History(object): ends_with_jump = False trace = None diff --git a/rpython/jit/metainterp/test/test_heapcache.py b/rpython/jit/metainterp/test/test_heapcache.py --- a/rpython/jit/metainterp/test/test_heapcache.py +++ b/rpython/jit/metainterp/test/test_heapcache.py @@ -1,11 +1,9 @@ +import py from rpython.jit.metainterp.heapcache import HeapCache from rpython.jit.metainterp.resoperation import rop, InputArgInt -from rpython.jit.metainterp.history import ConstInt, BasicFailDescr -from rpython.jit.metainterp.history import RefFrontendOp +from rpython.jit.metainterp.history import ConstInt, ConstPtr, BasicFailDescr +from rpython.jit.metainterp.history import IntFrontendOp, RefFrontendOp -lengthbox1 = object() -lengthbox2 = object() -lengthbox3 = object() descr1 = object() descr2 = object() descr3 = object() @@ -304,6 +302,8 @@ h = HeapCache() box1 = RefFrontendOp(1) box2 = RefFrontendOp(2) + lengthbox1 = IntFrontendOp(11) + lengthbox2 = IntFrontendOp(12) h.new_array(box1, lengthbox1) assert h.arraylen(box1) is lengthbox1 @@ -348,8 +348,13 @@ assert h.getarrayitem(box1, index1, descr1) is None assert h.getarrayitem(box1, index2, descr1) is None - def test_replace_box(self): + def test_replace_box_with_box(self): + py.test.skip("replacing a box with another box: not supported any more") h = HeapCache() + box1 = RefFrontendOp(1) + box2 = RefFrontendOp(2) + box3 = RefFrontendOp(3) + box4 = RefFrontendOp(4) h.setfield(box1, box2, descr1) h.setfield(box1, box3, descr2) h.setfield(box2, box3, descr3) @@ -360,16 +365,22 @@ h.setfield(box4, box3, descr1) assert h.getfield(box4, descr1) is box3 + def test_replace_box_with_const(self): h = HeapCache() + box1 = RefFrontendOp(1) + box2 = RefFrontendOp(2) + box3 = RefFrontendOp(3) + c_box3 = ConstPtr(ConstPtr.value) h.setfield(box1, box2, descr1) h.setfield(box1, box3, descr2) h.setfield(box2, box3, descr3) - h.replace_box(box3, box4) + h.replace_box(box3, c_box3) assert h.getfield(box1, descr1) is box2 - assert h.getfield(box1, descr2) is box4 - assert h.getfield(box2, descr3) is box4 + assert c_box3.same_constant(h.getfield(box1, descr2)) + assert c_box3.same_constant(h.getfield(box2, descr3)) def test_replace_box_twice(self): + py.test.skip("replacing a box with another box: not supported any more") h = HeapCache() h.setfield(box1, box2, descr1) h.setfield(box1, box3, descr2) @@ -393,6 +404,7 @@ assert h.getfield(box2, descr3) is box5 def test_replace_box_array(self): + py.test.skip("replacing a box with another box: not supported any more") h = HeapCache() h.setarrayitem(box1, index1, box2, descr1) h.setarrayitem(box1, index1, box3, descr2) @@ -412,6 +424,7 @@ assert h.arraylen(box4) is lengthbox2 def test_replace_box_array_twice(self): + py.test.skip("replacing a box with another box: not supported any more") h = HeapCache() h.setarrayitem(box1, index1, box2, descr1) h.setarrayitem(box1, index1, box3, descr2) @@ -432,6 +445,25 @@ h.replace_box(lengthbox2, lengthbox3) assert h.arraylen(box4) is lengthbox3 + def test_replace_box_with_const_in_array(self): + h = HeapCache() + box1 = RefFrontendOp(1) + lengthbox2 = IntFrontendOp(2) + lengthbox2.setint(10) + h.arraylen_now_known(box1, lengthbox2) + assert h.arraylen(box1) is lengthbox2 + c10 = ConstInt(10) + h.replace_box(lengthbox2, c10) + assert c10.same_constant(h.arraylen(box1)) + + box2 = IntFrontendOp(2) + box2.setint(12) + h.setarrayitem(box1, index2, box2, descr1) + assert h.getarrayitem(box1, index2, descr1) is box2 + c12 = ConstInt(12) + h.replace_box(box2, c12) + assert c12.same_constant(h.getarrayitem(box1, index2, descr1)) + def test_ll_arraycopy(self): h = HeapCache() h.new_array(box1, lengthbox1) From pypy.commits at gmail.com Thu Mar 17 10:44:45 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 17 Mar 2016 07:44:45 -0700 (PDT) Subject: [pypy-commit] pypy heapcache-refactor: ll_arraycopy Message-ID: <56eac2dd.45d61c0a.1c085.2dac@mx.google.com> Author: Armin Rigo Branch: heapcache-refactor Changeset: r83110:f71e1a7f5bbb Date: 2016-03-17 15:43 +0100 http://bitbucket.org/pypy/pypy/changeset/f71e1a7f5bbb/ Log: ll_arraycopy diff --git a/rpython/jit/metainterp/heapcache.py b/rpython/jit/metainterp/heapcache.py --- a/rpython/jit/metainterp/heapcache.py +++ b/rpython/jit/metainterp/heapcache.py @@ -131,8 +131,6 @@ self.head_version += _HF_VERSION_INC self.likely_virtual_version = self.head_version # - # maps boxes to HeapCacheValue - self.values = {} # heap cache # maps descrs to CacheEntry self.heap_cache = {} @@ -145,9 +143,6 @@ # at its older value. assert self.head_version < _HF_VERSION_MAX self.head_version += _HF_VERSION_INC - # - for value in self.values.itervalues(): - value.reset_keep_likely_virtual() self.heap_cache = {} self.heap_array_cache = {} @@ -218,15 +213,15 @@ def _escape_box(self, box): if isinstance(box, RefFrontendOp): remove_flags(box, HF_LIKELY_VIRTUAL | HF_IS_UNESCAPED) - # - value = self.getvalue(box, create=False) - if not value: - return - deps = value.dependencies - value.dependencies = None - if deps is not None: - for dep in deps: - self._escape(dep) + deps = self._get_deps(box) + if deps is not None and len(deps) > 1: + # 'deps[0]' is abused to store the array length, keep it + if deps[0] is None: + box._heapc_deps = None + else: + box._heapc_deps = [deps[0]] + for i in range(1, len(deps)): + self._escape(deps[i]) def clear_caches(self, opnum, descr, argboxes): if (opnum == rop.SETFIELD_GC or @@ -279,7 +274,8 @@ self.reset_keep_likely_virtuals() def _clear_caches_arraycopy(self, opnum, desrc, argboxes, effectinfo): - seen_allocation_of_target = self.getvalue(argboxes[2]).seen_allocation + seen_allocation_of_target = self._check_flag( + argboxes[2], HF_SEEN_ALLOCATION) if ( isinstance(argboxes[3], ConstInt) and isinstance(argboxes[4], ConstInt) and diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -664,7 +664,7 @@ self.position_and_flags |= FO_REPLACED_WITH_CONST def __repr__(self): - return '%s(%s)' % (self.__class__.__name__, self.position) + return '%s(0x%x)' % (self.__class__.__name__, self.position_and_flags) class IntFrontendOp(IntOp, FrontendOp): _attrs_ = ('position_and_flags', '_resint') diff --git a/rpython/jit/metainterp/test/test_heapcache.py b/rpython/jit/metainterp/test/test_heapcache.py --- a/rpython/jit/metainterp/test/test_heapcache.py +++ b/rpython/jit/metainterp/test/test_heapcache.py @@ -466,6 +466,13 @@ def test_ll_arraycopy(self): h = HeapCache() + box1 = RefFrontendOp(1) + box2 = RefFrontendOp(2) + box3 = RefFrontendOp(3) + box4 = RefFrontendOp(4) + box5 = RefFrontendOp(5) + lengthbox1 = IntFrontendOp(11) + lengthbox2 = IntFrontendOp(12) h.new_array(box1, lengthbox1) h.setarrayitem(box1, index1, box2, descr1) h.new_array(box2, lengthbox1) @@ -494,6 +501,10 @@ def test_ll_arraycopy_differing_descrs(self): h = HeapCache() + box1 = RefFrontendOp(1) + box2 = RefFrontendOp(2) + box3 = RefFrontendOp(3) + lengthbox2 = IntFrontendOp(12) h.setarrayitem(box1, index1, box2, descr2) assert h.getarrayitem(box1, index1, descr2) is box2 h.new_array(box2, lengthbox2) @@ -506,6 +517,9 @@ def test_ll_arraycopy_differing_descrs_nonconst_index(self): h = HeapCache() + box1 = RefFrontendOp(1) + box2 = RefFrontendOp(2) + box3 = RefFrontendOp(3) h.setarrayitem(box1, index1, box2, descr2) assert h.getarrayitem(box1, index1, descr2) is box2 h.invalidate_caches( @@ -517,6 +531,9 @@ def test_ll_arraycopy_result_propogated(self): h = HeapCache() + box1 = RefFrontendOp(1) + box2 = RefFrontendOp(2) + box3 = RefFrontendOp(3) h.setarrayitem(box1, index1, box2, descr1) h.invalidate_caches( rop.CALL_N, @@ -527,6 +544,11 @@ def test_ll_arraycopy_dest_new(self): h = HeapCache() + box1 = RefFrontendOp(1) + box2 = RefFrontendOp(2) + box3 = RefFrontendOp(3) + box4 = RefFrontendOp(4) + lengthbox1 = IntFrontendOp(11) h.new_array(box1, lengthbox1) h.setarrayitem(box3, index1, box4, descr1) h.invalidate_caches( @@ -537,6 +559,10 @@ def test_ll_arraycopy_doesnt_escape_arrays(self): h = HeapCache() + box1 = RefFrontendOp(1) + box2 = RefFrontendOp(2) + lengthbox1 = IntFrontendOp(11) + lengthbox2 = IntFrontendOp(12) h.new_array(box1, lengthbox1) h.new_array(box2, lengthbox2) h.invalidate_caches( From pypy.commits at gmail.com Thu Mar 17 10:54:20 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 17 Mar 2016 07:54:20 -0700 (PDT) Subject: [pypy-commit] pypy heapcache-refactor: escape Message-ID: <56eac51c.6672c20a.220e2.ffffff60@mx.google.com> Author: Armin Rigo Branch: heapcache-refactor Changeset: r83111:1d11223aec92 Date: 2016-03-17 15:53 +0100 http://bitbucket.org/pypy/pypy/changeset/1d11223aec92/ Log: escape diff --git a/rpython/jit/metainterp/heapcache.py b/rpython/jit/metainterp/heapcache.py --- a/rpython/jit/metainterp/heapcache.py +++ b/rpython/jit/metainterp/heapcache.py @@ -90,7 +90,7 @@ def _invalidate_unescaped(self, d): for ref_box in d.keys(): - if not self.heapcache._check_flag(ref_box, HF_IS_UNESCAPED): + if not self.heapcache.is_unescaped(ref_box): del d[ref_box] @@ -168,15 +168,11 @@ self.clear_caches(opnum, descr, argboxes) def _escape_from_write(self, box, fieldbox): - value = self.getvalue(box, create=False) - fieldvalue = self.getvalue(fieldbox, create=False) - if (value is not None and value.is_unescaped and - fieldvalue is not None and fieldvalue.is_unescaped): - if value.dependencies is None: - value.dependencies = [] - value.dependencies.append(fieldvalue) - elif fieldvalue is not None: - self._escape(fieldvalue) + if self.is_unescaped(box) and self.is_unescaped(fieldbox): + deps = self._get_deps(box) + deps.append(fieldbox) + elif fieldbox is not None: + self._escape_box(fieldbox) def mark_escaped(self, opnum, descr, argboxes): if opnum == rop.SETFIELD_GC: @@ -213,15 +209,18 @@ def _escape_box(self, box): if isinstance(box, RefFrontendOp): remove_flags(box, HF_LIKELY_VIRTUAL | HF_IS_UNESCAPED) - deps = self._get_deps(box) - if deps is not None and len(deps) > 1: - # 'deps[0]' is abused to store the array length, keep it - if deps[0] is None: + deps = box._heapc_deps + if deps is not None: + if not self.test_head_version(box): box._heapc_deps = None else: - box._heapc_deps = [deps[0]] - for i in range(1, len(deps)): - self._escape(deps[i]) + # 'deps[0]' is abused to store the array length, keep it + if deps[0] is None: + box._heapc_deps = None + else: + box._heapc_deps = [deps[0]] + for i in range(1, len(deps)): + self._escape_box(deps[i]) def clear_caches(self, opnum, descr, argboxes): if (opnum == rop.SETFIELD_GC or diff --git a/rpython/jit/metainterp/test/test_heapcache.py b/rpython/jit/metainterp/test/test_heapcache.py --- a/rpython/jit/metainterp/test/test_heapcache.py +++ b/rpython/jit/metainterp/test/test_heapcache.py @@ -582,6 +582,8 @@ def test_unescaped(self): h = HeapCache() + box1 = RefFrontendOp(1) + box2 = RefFrontendOp(2) assert not h.is_unescaped(box1) h.new(box2) assert h.is_unescaped(box2) @@ -592,6 +594,9 @@ def test_unescaped_testing(self): h = HeapCache() + box1 = RefFrontendOp(1) + box2 = RefFrontendOp(2) + box3 = RefFrontendOp(3) h.new(box1) h.new(box2) assert h.is_unescaped(box1) @@ -610,6 +615,8 @@ def test_ops_dont_escape(self): h = HeapCache() + box1 = RefFrontendOp(1) + box2 = RefFrontendOp(2) h.new(box1) h.new(box2) assert h.is_unescaped(box1) @@ -623,6 +630,9 @@ def test_circular_virtuals(self): h = HeapCache() + box1 = RefFrontendOp(1) + box2 = RefFrontendOp(2) + box3 = RefFrontendOp(3) h.new(box1) h.new(box2) h.invalidate_caches(rop.SETFIELD_GC, None, [box1, box2]) @@ -631,6 +641,10 @@ def test_unescaped_array(self): h = HeapCache() + box1 = RefFrontendOp(1) + box2 = RefFrontendOp(2) + lengthbox1 = IntFrontendOp(11) + lengthbox2 = IntFrontendOp(12) h.new_array(box1, lengthbox1) assert h.is_unescaped(box1) h.invalidate_caches(rop.SETARRAYITEM_GC, None, [box1, index1, box2]) @@ -654,6 +668,8 @@ def test_call_doesnt_invalidate_unescaped_boxes(self): h = HeapCache() + box1 = RefFrontendOp(1) + box2 = RefFrontendOp(2) h.new(box1) assert h.is_unescaped(box1) h.setfield(box1, box2, descr1) @@ -665,6 +681,9 @@ def test_call_doesnt_invalidate_unescaped_array_boxes(self): h = HeapCache() + box1 = RefFrontendOp(1) + box3 = RefFrontendOp(3) + lengthbox1 = IntFrontendOp(11) h.new_array(box1, lengthbox1) assert h.is_unescaped(box1) h.setarrayitem(box1, index1, box3, descr1) From pypy.commits at gmail.com Thu Mar 17 10:55:14 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 17 Mar 2016 07:55:14 -0700 (PDT) Subject: [pypy-commit] pypy heapcache-refactor: test_heapcache passes again Message-ID: <56eac552.6bb8c20a.5dea5.02fa@mx.google.com> Author: Armin Rigo Branch: heapcache-refactor Changeset: r83112:bdc3859075cd Date: 2016-03-17 15:54 +0100 http://bitbucket.org/pypy/pypy/changeset/bdc3859075cd/ Log: test_heapcache passes again diff --git a/rpython/jit/metainterp/test/test_heapcache.py b/rpython/jit/metainterp/test/test_heapcache.py --- a/rpython/jit/metainterp/test/test_heapcache.py +++ b/rpython/jit/metainterp/test/test_heapcache.py @@ -695,6 +695,8 @@ def test_bug_missing_ignored_operations(self): h = HeapCache() + box1 = RefFrontendOp(1) + box2 = RefFrontendOp(2) h.new(box1) h.new(box2) h.setfield(box1, box2, descr1) @@ -717,6 +719,8 @@ # calling some residual code that changes the values on box3: then # the content of box2 is still cached at the old value. h = HeapCache() + box1 = RefFrontendOp(1) + box2 = RefFrontendOp(2) h.new(box1) h.new(box2) h.setfield(box1, box2, descr1) @@ -729,6 +733,8 @@ def test_bug_heap_cache_is_cleared_but_not_is_unescaped_2(self): h = HeapCache() + box1 = RefFrontendOp(1) + box2 = RefFrontendOp(2) h.new(box1) h.new(box2) h.setfield(box1, box2, descr1) From pypy.commits at gmail.com Thu Mar 17 12:37:34 2016 From: pypy.commits at gmail.com (fijal) Date: Thu, 17 Mar 2016 09:37:34 -0700 (PDT) Subject: [pypy-commit] pypy release-5.x: add a -live- to greenfield op since it can call nonstandard virtualizable, how to reproduce a crash with a test Message-ID: <56eadd4e.04371c0a.66f1b.558f@mx.google.com> Author: fijal Branch: release-5.x Changeset: r83113:07e163f8e2ff Date: 2016-03-17 16:15 +0200 http://bitbucket.org/pypy/pypy/changeset/07e163f8e2ff/ Log: add a -live- to greenfield op since it can call nonstandard virtualizable, how to reproduce a crash with a test diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -784,11 +784,13 @@ return [] # check for _immutable_fields_ hints immut = v_inst.concretetype.TO._immutable_field(c_fieldname.value) + need_live = False if immut: if (self.callcontrol is not None and self.callcontrol.could_be_green_field(v_inst.concretetype.TO, c_fieldname.value)): pure = '_greenfield' + need_live = True else: pure = '_pure' else: @@ -815,10 +817,12 @@ descr1 = self.cpu.fielddescrof( v_inst.concretetype.TO, quasiimmut.get_mutate_field_name(c_fieldname.value)) - op1 = [SpaceOperation('-live-', [], None), + return [SpaceOperation('-live-', [], None), SpaceOperation('record_quasiimmut_field', [v_inst, descr, descr1], None), op1] + if need_live: + return [SpaceOperation('-live-', [], None), op1] return op1 def rewrite_op_setfield(self, op, override_type=None): diff --git a/rpython/jit/codewriter/test/test_jtransform.py b/rpython/jit/codewriter/test/test_jtransform.py --- a/rpython/jit/codewriter/test/test_jtransform.py +++ b/rpython/jit/codewriter/test/test_jtransform.py @@ -1024,7 +1024,8 @@ v1 = varoftype(lltype.Ptr(S)) v2 = varoftype(lltype.Char) op = SpaceOperation('getfield', [v1, Constant('x', lltype.Void)], v2) - op1 = Transformer(FakeCPU(), FakeCC()).rewrite_operation(op) + op0, op1 = Transformer(FakeCPU(), FakeCC()).rewrite_operation(op) + assert op0.opname == '-live-' assert op1.opname == 'getfield_gc_i_greenfield' assert op1.args == [v1, ('fielddescr', S, 'x')] assert op1.result == v2 From pypy.commits at gmail.com Thu Mar 17 12:41:40 2016 From: pypy.commits at gmail.com (rlamy) Date: Thu, 17 Mar 2016 09:41:40 -0700 (PDT) Subject: [pypy-commit] pypy py3.3-bootstrap-again: Do not import importlib during objspace creation. Message-ID: <56eade44.c13fc20a.54b2.239d@mx.google.com> Author: Ronan Lamy Branch: py3.3-bootstrap-again Changeset: r83114:90e05aa7339c Date: 2016-03-17 16:40 +0000 http://bitbucket.org/pypy/pypy/changeset/90e05aa7339c/ Log: Do not import importlib during objspace creation. This freezed importlib into a pre-built constant which prevents importing the rest of the package at run-time. diff --git a/pypy/module/imp/__init__.py b/pypy/module/imp/__init__.py --- a/pypy/module/imp/__init__.py +++ b/pypy/module/imp/__init__.py @@ -51,11 +51,3 @@ add_fork_hook('before', interp_imp.acquire_lock) add_fork_hook('parent', interp_imp.release_lock) add_fork_hook('child', interp_imp.reinit_lock) - - def setup_after_space_initialization(self): - # Install importlib as __import__ - self.space.appexec([], '''(): - import importlib._bootstrap, sys, _imp - sys.path_importer_cache.clear() - importlib._bootstrap._install(sys, _imp) - ''') From pypy.commits at gmail.com Thu Mar 17 14:29:38 2016 From: pypy.commits at gmail.com (plan_rich) Date: Thu, 17 Mar 2016 11:29:38 -0700 (PDT) Subject: [pypy-commit] pypy new-jit-log: writing core dump just after each resoperation, added test to check that Message-ID: <56eaf792.05de1c0a.4b2c3.fffffa04@mx.google.com> Author: Richard Plangger Branch: new-jit-log Changeset: r83115:24a3cc49b4ee Date: 2016-03-17 19:28 +0100 http://bitbucket.org/pypy/pypy/changeset/24a3cc49b4ee/ Log: writing core dump just after each resoperation, added test to check that diff --git a/rpython/jit/backend/llsupport/asmmemmgr.py b/rpython/jit/backend/llsupport/asmmemmgr.py --- a/rpython/jit/backend/llsupport/asmmemmgr.py +++ b/rpython/jit/backend/llsupport/asmmemmgr.py @@ -226,6 +226,7 @@ self.init_block_builder() else: self._become_a_plain_block_builder() + self.rawstart = 0 def init_block_builder(self): self._cursubblock = lltype.nullptr(self.SUBBLOCK) @@ -248,6 +249,9 @@ self._cursubblock.data[index] = char self._cursubindex = index + 1 + def absolute_addr(self): + return self.rawstart + def overwrite(self, index, char): assert 0 <= index < self.get_relative_pos() block = self._cursubblock @@ -283,6 +287,16 @@ targetindex -= self.SUBBLOCK_SIZE assert not block + def copy_core_dump(self, addr, offset=0): + HEX = '0123456789ABCDEF' + dump = [] + src = rffi.cast(rffi.CCHARP, addr + offset) + for p in range(self.get_relative_pos()): + o = ord(src[p]) + dump.append(HEX[o >> 4]) + dump.append(HEX[o & 15]) + return ''.join(dump) + def _dump(self, addr, logname, backend=None): debug_start(logname) if have_debug_prints(): @@ -296,17 +310,11 @@ else: debug_print('SYS_EXECUTABLE', '??') # - HEX = '0123456789ABCDEF' - dump = [] - src = rffi.cast(rffi.CCHARP, addr) - for p in range(self.get_relative_pos()): - o = ord(src[p]) - dump.append(HEX[o >> 4]) - dump.append(HEX[o & 15]) + dump = self.copy_core_dump(addr) debug_print('CODE_DUMP', '@%x' % addr, '+0 ', # backwards compatibility - ''.join(dump)) + dump) # debug_stop(logname) @@ -318,6 +326,7 @@ allblocks.append(malloced) rawstart = malloced[0] rawstart = (rawstart + align - 1) & (-align) + self.rawstart = rawstart self.copy_to_raw_memory(rawstart) if self.gcroot_markers is not None: assert gcrootmap is not None diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -533,7 +533,7 @@ looptoken._ll_function_addr = rawstart if logger: logger.log_trace(logger.MARK_ASM, inputargs, operations, - ops_offset=ops_offset) + ops_offset=ops_offset, self.mc) self.fixup_target_tokens(rawstart) self.teardown() @@ -587,7 +587,7 @@ frame_depth = max(self.current_clt.frame_info.jfi_frame_depth, frame_depth_no_fixed_size + JITFRAME_FIXED_SIZE) if logger: - logger.log_trace(logger.MARK_ASM, inputargs, operations, + logger.log_trace(logger.MARK_TRACE_ASM, inputargs, operations, faildescr=faildescr, ops_offset=ops_offset) self.fixup_target_tokens(rawstart) self.update_frame_depth(frame_depth) diff --git a/rpython/jit/metainterp/jitlog.py b/rpython/jit/metainterp/jitlog.py --- a/rpython/jit/metainterp/jitlog.py +++ b/rpython/jit/metainterp/jitlog.py @@ -2,16 +2,35 @@ from rpython.jit.metainterp import resoperation as resoperations import struct -class VMProfJitLogger(object): +class JitLogMarshall(object): + def encode(self, op): + str_args = [arg.repr_short(arg._repr_memo) for arg in op.getarglist()] + descr = op.getdescr() + line = struct.pack('h", opnum) + opname.lower() + line = struct.pack("> 24) & 0xff; - header[2] = (length >> 16) & 0xff; - header[3] = (length >> 8) & 0xff; - header[4] = length & 0xff; + // little endian 32 bit singed int + header[1] = length & 0xff; + header[2] = (length >> 8) & 0xff; + header[3] = (length >> 16) & 0xff; + header[4] = (length >> 24) & 0xff; write(jitlog_fd, (const char*)&header, 5); write(jitlog_fd, text, length); } From pypy.commits at gmail.com Thu Mar 17 21:13:44 2016 From: pypy.commits at gmail.com (rlamy) Date: Thu, 17 Mar 2016 18:13:44 -0700 (PDT) Subject: [pypy-commit] pypy py3.3-bootstrap-again: Close branch py3.3-bootstrap-again Message-ID: <56eb5648.a3abc20a.2ab9f.ffffb936@mx.google.com> Author: Ronan Lamy Branch: py3.3-bootstrap-again Changeset: r83116:8d273b40ca8d Date: 2016-03-18 01:13 +0000 http://bitbucket.org/pypy/pypy/changeset/8d273b40ca8d/ Log: Close branch py3.3-bootstrap-again From pypy.commits at gmail.com Thu Mar 17 21:13:49 2016 From: pypy.commits at gmail.com (rlamy) Date: Thu, 17 Mar 2016 18:13:49 -0700 (PDT) Subject: [pypy-commit] pypy py3.3: Merged in py3.3-bootstrap-again (pull request #417) Message-ID: <56eb564d.418f1c0a.cb040.ffffed17@mx.google.com> Author: Ronan Lamy Branch: py3.3 Changeset: r83117:f52e4b4b7338 Date: 2016-03-18 01:13 +0000 http://bitbucket.org/pypy/pypy/changeset/f52e4b4b7338/ Log: Merged in py3.3-bootstrap-again (pull request #417) Do not import importlib during objspace creation diff --git a/pypy/module/imp/__init__.py b/pypy/module/imp/__init__.py --- a/pypy/module/imp/__init__.py +++ b/pypy/module/imp/__init__.py @@ -51,11 +51,3 @@ add_fork_hook('before', interp_imp.acquire_lock) add_fork_hook('parent', interp_imp.release_lock) add_fork_hook('child', interp_imp.reinit_lock) - - def setup_after_space_initialization(self): - # Install importlib as __import__ - self.space.appexec([], '''(): - import importlib._bootstrap, sys, _imp - sys.path_importer_cache.clear() - importlib._bootstrap._install(sys, _imp) - ''') From pypy.commits at gmail.com Fri Mar 18 03:19:18 2016 From: pypy.commits at gmail.com (plan_rich) Date: Fri, 18 Mar 2016 00:19:18 -0700 (PDT) Subject: [pypy-commit] pypy new-jit-log: added jitlog to the existing call sites of the old logger Message-ID: <56ebabf6.838d1c0a.2a34f.33b8@mx.google.com> Author: Richard Plangger Branch: new-jit-log Changeset: r83118:df3f2f49084a Date: 2016-03-18 08:18 +0100 http://bitbucket.org/pypy/pypy/changeset/df3f2f49084a/ Log: added jitlog to the existing call sites of the old logger diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -532,7 +532,7 @@ looptoken._x86_ops_offset = ops_offset looptoken._ll_function_addr = rawstart if logger: - logger.log_trace(logger.MARK_ASM, inputargs, operations, + logger.log_trace(logger.MARK_TRACE_ASM, inputargs, operations, ops_offset=ops_offset, self.mc) self.fixup_target_tokens(rawstart) diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -480,22 +480,28 @@ def do_compile_loop(jd_id, unique_id, metainterp_sd, inputargs, operations, looptoken, log=True, name='', memo=None): + mark = VMProfJitLogger.MARK_TRACE_OPT + metainterp_sd.jitlog.log_trace(mark, inputargs, operations) + # TODO remove old metainterp_sd.logger_ops.log_loop(inputargs, operations, -2, 'compiling', None, name, memo) return metainterp_sd.cpu.compile_loop(inputargs, operations, looptoken, jd_id=jd_id, unique_id=unique_id, log=log, name=name, - logger=metainterp_sd.logger_ops) + logger=metainterp_sd.jitlog) def do_compile_bridge(metainterp_sd, faildescr, inputargs, operations, original_loop_token, log=True, memo=None): + mark = VMProfJitLogger.MARK_TRACE_OPT + metainterp_sd.jitlog.log_trace(mark, inputargs, operations, faildescr=faildescr) + # TODO remove old metainterp_sd.logger_ops.log_bridge(inputargs, operations, "compiling", memo=memo) assert isinstance(faildescr, AbstractFailDescr) return metainterp_sd.cpu.compile_bridge(faildescr, inputargs, operations, original_loop_token, log=log, - logger=metainterp_sd.logger_ops) + logger=metainterp_sd.jitlog) def forget_optimization_info(lst, reset_values=False): for item in lst: diff --git a/rpython/jit/metainterp/jitlog.py b/rpython/jit/metainterp/jitlog.py --- a/rpython/jit/metainterp/jitlog.py +++ b/rpython/jit/metainterp/jitlog.py @@ -19,18 +19,24 @@ class VMProfJitLogger(JitLogMarshall): - MARK_TRACED = 0x10 - MARK_INPUT_ARGS = 0x12 + MARK_INPUT_ARGS = 0x10 + MARK_RESOP_META = 0x11 + MARK_RESOP = 0x12 + MARK_RESOP_DESCR = 0x13 + MARK_ASM_ADDR = 0x14 + MARK_ASM = 0x15 - MARK_RESOP_META = 0x13 - MARK_RESOP = 0x14 - MARK_RESOP_DESCR = 0x15 - MARK_ASM_ADDR = 0x16 - MARK_ASM = 0x17 + # which type of trace is logged after this + # the trace as it is recorded by the tracer + MARK_TRACE = 0x16 + # the trace that has passed the optimizer + MARK_TRACE_OPT = 0x17 + # the trace assembled to machine code (after rewritten) + MARK_TRACE_ASM = 0x18 - # the ones as parameter to log_trace - MARK_TRACE_ASM = 0x18 + # the machine code was patched (e.g. guard) + MARK_ASM_PATCH = 0x19 def __init__(self): self.cintf = cintf.setup() @@ -70,8 +76,11 @@ # assembler address (to not duplicate it in write_code_dump) if mc is not None: - lendian_addr = struct.pack(' as two unsigend longs + lendian_addrs = struct.pack(' Author: Armin Rigo Branch: release-5.x Changeset: r83119:fd7f20eabad9 Date: 2016-03-12 20:24 +0100 http://bitbucket.org/pypy/pypy/changeset/fd7f20eabad9/ Log: Test and fix: if we use create_link_pypy() on *non-nursery* young objects, crash diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py --- a/rpython/memory/gc/incminimark.py +++ b/rpython/memory/gc/incminimark.py @@ -1654,15 +1654,15 @@ else: self.nursery_objects_shadows.clear() # + # visit the P and O lists from rawrefcount, if enabled. + if self.rrc_enabled: + self.rrc_minor_collection_free() + # # Walk the list of young raw-malloced objects, and either free # them or make them old. if self.young_rawmalloced_objects: self.free_young_rawmalloced_objects() # - # visit the P and O lists from rawrefcount, if enabled. - if self.rrc_enabled: - self.rrc_minor_collection_free() - # # All live nursery objects are out of the nursery or pinned inside # the nursery. Create nursery barriers to protect the pinned objects, # fill the rest of the nursery with zeros and reset the current nursery diff --git a/rpython/memory/gc/test/test_rawrefcount.py b/rpython/memory/gc/test/test_rawrefcount.py --- a/rpython/memory/gc/test/test_rawrefcount.py +++ b/rpython/memory/gc/test/test_rawrefcount.py @@ -29,7 +29,8 @@ assert count2 - count1 == expected_trigger def _rawrefcount_pair(self, intval, is_light=False, is_pyobj=False, - create_old=False, create_immortal=False): + create_old=False, create_immortal=False, + force_external=False): if is_light: rc = REFCNT_FROM_PYPY_LIGHT else: @@ -40,7 +41,13 @@ if create_immortal: p1 = lltype.malloc(S, immortal=True) else: - p1 = self.malloc(S) + saved = self.gc.nonlarge_max + try: + if force_external: + self.gc.nonlarge_max = 1 + p1 = self.malloc(S) + finally: + self.gc.nonlarge_max = saved p1.x = intval if create_immortal: self.consider_constant(p1) @@ -220,9 +227,10 @@ def test_pypy_nonlight_dies_quickly_old(self): self.test_pypy_nonlight_dies_quickly(old=True) - def test_pyobject_pypy_link_dies_on_minor_collection(self): + @py.test.mark.parametrize('external', [False, True]) + def test_pyobject_pypy_link_dies_on_minor_collection(self, external): p1, p1ref, r1, r1addr, check_alive = ( - self._rawrefcount_pair(42, is_pyobj=True)) + self._rawrefcount_pair(42, is_pyobj=True, force_external=external)) check_alive(0) r1.ob_refcnt += 1 # the pyobject is kept alive self._collect(major=False) @@ -231,9 +239,12 @@ self.gc.check_no_more_rawrefcount_state() lltype.free(r1, flavor='raw') - def test_pyobject_dies(self, old=False): + @py.test.mark.parametrize('old,external', [ + (False, False), (True, False), (False, True)]) + def test_pyobject_dies(self, old, external): p1, p1ref, r1, r1addr, check_alive = ( - self._rawrefcount_pair(42, is_pyobj=True, create_old=old)) + self._rawrefcount_pair(42, is_pyobj=True, create_old=old, + force_external=external)) check_alive(0) if old: self._collect(major=False) @@ -247,9 +258,12 @@ self.gc.check_no_more_rawrefcount_state() lltype.free(r1, flavor='raw') - def test_pyobject_survives_from_obj(self, old=False): + @py.test.mark.parametrize('old,external', [ + (False, False), (True, False), (False, True)]) + def test_pyobject_survives_from_obj(self, old, external): p1, p1ref, r1, r1addr, check_alive = ( - self._rawrefcount_pair(42, is_pyobj=True, create_old=old)) + self._rawrefcount_pair(42, is_pyobj=True, create_old=old, + force_external=external)) check_alive(0) self.stackroots.append(p1) self._collect(major=False) @@ -269,11 +283,6 @@ self.gc.check_no_more_rawrefcount_state() lltype.free(r1, flavor='raw') - def test_pyobject_dies_old(self): - self.test_pyobject_dies(old=True) - def test_pyobject_survives_from_obj_old(self): - self.test_pyobject_survives_from_obj(old=True) - def test_pyobject_attached_to_prebuilt_obj(self): p1, p1ref, r1, r1addr, check_alive = ( self._rawrefcount_pair(42, create_immortal=True)) From pypy.commits at gmail.com Fri Mar 18 05:02:44 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 18 Mar 2016 02:02:44 -0700 (PDT) Subject: [pypy-commit] pypy default: Update a bit this doc page Message-ID: <56ebc434.e6ebc20a.bf07d.2707@mx.google.com> Author: Armin Rigo Branch: Changeset: r83120:ee486ea3e2c5 Date: 2016-03-18 10:02 +0100 http://bitbucket.org/pypy/pypy/changeset/ee486ea3e2c5/ Log: Update a bit this doc page diff --git a/pypy/doc/config/translation.gc.txt b/pypy/doc/config/translation.gc.txt --- a/pypy/doc/config/translation.gc.txt +++ b/pypy/doc/config/translation.gc.txt @@ -1,24 +1,26 @@ Choose the Garbage Collector used by the translated program. -The good performing collectors are "hybrid" and "minimark". -The default is "minimark". +The recommended default is "incminimark". - "ref": reference counting. Takes very long to translate and the result is - slow. + slow. Used only for tests. Don't use it for real RPython programs. - - "marksweep": naive mark & sweep. + - "none": no GC. Leaks everything. Don't use it for real RPython + programs: the rate of leaking is immense. - "semispace": a copying semi-space GC. - "generation": a generational GC using the semi-space GC for the older generation. - - "boehm": use the Boehm conservative GC. - - "hybrid": a hybrid collector of "generation" together with a mark-n-sweep old space - - "markcompact": a slow, but memory-efficient collector, - influenced e.g. by Smalltalk systems. + - "boehm": use the Boehm conservative GC. - "minimark": a generational mark-n-sweep collector with good performance. Includes page marking for large arrays. + + - "incminimark": like minimark, but adds incremental major + collections. Seems to come with no performance drawback over + "minimark", so it is the default. A few recent features of PyPy + (like cpyext) are only working with this GC. From pypy.commits at gmail.com Fri Mar 18 05:33:37 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 18 Mar 2016 02:33:37 -0700 (PDT) Subject: [pypy-commit] pypy default: clarify error message: there may or may not have been errors building Message-ID: <56ebcb71.10921c0a.ad54b.62d0@mx.google.com> Author: Armin Rigo Branch: Changeset: r83121:a8a022a88c98 Date: 2016-03-18 10:33 +0100 http://bitbucket.org/pypy/pypy/changeset/a8a022a88c98/ Log: clarify error message: there may or may not have been errors building the cffi modules diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -327,7 +327,7 @@ # XXX possibly adapt options using modules failures = create_cffi_import_libraries(exename, options, basedir) # if failures, they were already printed - print >> sys.stderr, str(exename),'successfully built, but errors while building the above modules will be ignored' + print >> sys.stderr, str(exename),'successfully built (errors, if any, while building the above modules are ignored)' driver.task_build_cffi_imports = types.MethodType(task_build_cffi_imports, driver) driver.tasks['build_cffi_imports'] = driver.task_build_cffi_imports, [compile_goal] driver.default_goal = 'build_cffi_imports' From pypy.commits at gmail.com Fri Mar 18 05:41:23 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 18 Mar 2016 02:41:23 -0700 (PDT) Subject: [pypy-commit] pypy default: rawrefcount fix: pyobjs waiting on the dead list of the GC should not Message-ID: <56ebcd43.e213c20a.62e64.348d@mx.google.com> Author: Armin Rigo Branch: Changeset: r83122:0173cdbbbacc Date: 2016-03-18 09:46 +0000 http://bitbucket.org/pypy/pypy/changeset/0173cdbbbacc/ Log: rawrefcount fix: pyobjs waiting on the dead list of the GC should not have refcnt == 0. See comment. diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -833,14 +833,14 @@ modulename = py.path.local(eci.libraries[-1]) def dealloc_trigger(): - from pypy.module.cpyext.pyobject import _Py_Dealloc + from pypy.module.cpyext.pyobject import decref print 'dealloc_trigger...' while True: ob = rawrefcount.next_dead(PyObject) if not ob: break print ob - _Py_Dealloc(space, ob) + decref(space, ob) print 'dealloc_trigger DONE' return "RETRY" rawrefcount.init(dealloc_trigger) diff --git a/pypy/module/cpyext/state.py b/pypy/module/cpyext/state.py --- a/pypy/module/cpyext/state.py +++ b/pypy/module/cpyext/state.py @@ -147,10 +147,10 @@ """ def perform(self, executioncontext, frame): - from pypy.module.cpyext.pyobject import PyObject, _Py_Dealloc + from pypy.module.cpyext.pyobject import PyObject, decref while True: py_obj = rawrefcount.next_dead(PyObject) if not py_obj: break - _Py_Dealloc(self.space, py_obj) + decref(self.space, py_obj) diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py --- a/rpython/memory/gc/incminimark.py +++ b/rpython/memory/gc/incminimark.py @@ -2929,10 +2929,19 @@ ll_assert(rc < int(REFCNT_FROM_PYPY_LIGHT * 0.99), "refcount underflow from REFCNT_FROM_PYPY_LIGHT?") rc -= REFCNT_FROM_PYPY - self._pyobj(pyobject).ob_refcnt = rc self._pyobj(pyobject).ob_pypy_link = 0 if rc == 0: self.rrc_dealloc_pending.append(pyobject) + # an object with refcnt == 0 cannot stay around waiting + # for its deallocator to be called. Some code (lxml) + # expects that tp_dealloc is called immediately when + # the refcnt drops to 0. If it isn't, we get some + # uncleared raw pointer that can still be used to access + # the object; but (PyObject *)raw_pointer is then bogus + # because after a Py_INCREF()/Py_DECREF() on it, its + # tp_dealloc is also called! + rc = 1 + self._pyobj(pyobject).ob_refcnt = rc _rrc_free._always_inline_ = True def rrc_major_collection_trace(self): diff --git a/rpython/memory/gc/test/test_rawrefcount.py b/rpython/memory/gc/test/test_rawrefcount.py --- a/rpython/memory/gc/test/test_rawrefcount.py +++ b/rpython/memory/gc/test/test_rawrefcount.py @@ -174,7 +174,7 @@ p1 = check_alive(0) self._collect(major=True, expected_trigger=1) py.test.raises(RuntimeError, "p1.x") # dead - assert r1.ob_refcnt == 0 + assert r1.ob_refcnt == 1 # in the pending list assert r1.ob_pypy_link == 0 assert self.gc.rawrefcount_next_dead() == r1addr assert self.gc.rawrefcount_next_dead() == llmemory.NULL @@ -197,7 +197,7 @@ assert p1.x == 42 self._collect(major=True, expected_trigger=1) py.test.raises(RuntimeError, "p1.x") # dead - assert r1.ob_refcnt == 0 + assert r1.ob_refcnt == 1 assert r1.ob_pypy_link == 0 assert self.gc.rawrefcount_next_dead() == r1addr self.gc.check_no_more_rawrefcount_state() @@ -214,7 +214,7 @@ else: self._collect(major=False, expected_trigger=1) py.test.raises(RuntimeError, "p1.x") # dead - assert r1.ob_refcnt == 0 + assert r1.ob_refcnt == 1 assert r1.ob_pypy_link == 0 assert self.gc.rawrefcount_next_dead() == r1addr self.gc.check_no_more_rawrefcount_state() @@ -252,7 +252,7 @@ self._collect(major=True, expected_trigger=1) else: self._collect(major=False, expected_trigger=1) - assert r1.ob_refcnt == 0 # refcnt dropped to 0 + assert r1.ob_refcnt == 1 # refcnt 1, in the pending list assert r1.ob_pypy_link == 0 # detached assert self.gc.rawrefcount_next_dead() == r1addr self.gc.check_no_more_rawrefcount_state() @@ -277,7 +277,7 @@ assert self.trigger == [] self._collect(major=True, expected_trigger=1) py.test.raises(RuntimeError, "p1.x") # dead - assert r1.ob_refcnt == 0 + assert r1.ob_refcnt == 1 assert r1.ob_pypy_link == 0 assert self.gc.rawrefcount_next_dead() == r1addr self.gc.check_no_more_rawrefcount_state() diff --git a/rpython/rlib/rawrefcount.py b/rpython/rlib/rawrefcount.py --- a/rpython/rlib/rawrefcount.py +++ b/rpython/rlib/rawrefcount.py @@ -136,6 +136,7 @@ ob.c_ob_refcnt -= REFCNT_FROM_PYPY ob.c_ob_pypy_link = 0 if ob.c_ob_refcnt == 0: + ob.c_ob_refcnt = 1 _d_list.append(ob) return None diff --git a/rpython/rlib/test/test_rawrefcount.py b/rpython/rlib/test/test_rawrefcount.py --- a/rpython/rlib/test/test_rawrefcount.py +++ b/rpython/rlib/test/test_rawrefcount.py @@ -116,7 +116,7 @@ assert rawrefcount.next_dead(PyObject) == lltype.nullptr(PyObjectS) assert rawrefcount._o_list == [] assert wr_p() is None - assert ob.c_ob_refcnt == 0 + assert ob.c_ob_refcnt == 1 # from the pending list assert ob.c_ob_pypy_link == 0 lltype.free(ob, flavor='raw') @@ -173,7 +173,7 @@ assert rawrefcount._d_list == [ob] assert rawrefcount._p_list == [] assert wr_p() is None - assert ob.c_ob_refcnt == 0 + assert ob.c_ob_refcnt == 1 # from _d_list assert ob.c_ob_pypy_link == 0 lltype.free(ob, flavor='raw') From pypy.commits at gmail.com Fri Mar 18 05:41:25 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 18 Mar 2016 02:41:25 -0700 (PDT) Subject: [pypy-commit] pypy default: merge heads Message-ID: <56ebcd45.e6bbc20a.a5878.3005@mx.google.com> Author: Armin Rigo Branch: Changeset: r83123:b53d34f0c42d Date: 2016-03-18 09:46 +0000 http://bitbucket.org/pypy/pypy/changeset/b53d34f0c42d/ Log: merge heads diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -833,14 +833,14 @@ modulename = py.path.local(eci.libraries[-1]) def dealloc_trigger(): - from pypy.module.cpyext.pyobject import _Py_Dealloc + from pypy.module.cpyext.pyobject import decref print 'dealloc_trigger...' while True: ob = rawrefcount.next_dead(PyObject) if not ob: break print ob - _Py_Dealloc(space, ob) + decref(space, ob) print 'dealloc_trigger DONE' return "RETRY" rawrefcount.init(dealloc_trigger) diff --git a/pypy/module/cpyext/state.py b/pypy/module/cpyext/state.py --- a/pypy/module/cpyext/state.py +++ b/pypy/module/cpyext/state.py @@ -147,10 +147,10 @@ """ def perform(self, executioncontext, frame): - from pypy.module.cpyext.pyobject import PyObject, _Py_Dealloc + from pypy.module.cpyext.pyobject import PyObject, decref while True: py_obj = rawrefcount.next_dead(PyObject) if not py_obj: break - _Py_Dealloc(self.space, py_obj) + decref(self.space, py_obj) diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py --- a/rpython/memory/gc/incminimark.py +++ b/rpython/memory/gc/incminimark.py @@ -2929,10 +2929,19 @@ ll_assert(rc < int(REFCNT_FROM_PYPY_LIGHT * 0.99), "refcount underflow from REFCNT_FROM_PYPY_LIGHT?") rc -= REFCNT_FROM_PYPY - self._pyobj(pyobject).ob_refcnt = rc self._pyobj(pyobject).ob_pypy_link = 0 if rc == 0: self.rrc_dealloc_pending.append(pyobject) + # an object with refcnt == 0 cannot stay around waiting + # for its deallocator to be called. Some code (lxml) + # expects that tp_dealloc is called immediately when + # the refcnt drops to 0. If it isn't, we get some + # uncleared raw pointer that can still be used to access + # the object; but (PyObject *)raw_pointer is then bogus + # because after a Py_INCREF()/Py_DECREF() on it, its + # tp_dealloc is also called! + rc = 1 + self._pyobj(pyobject).ob_refcnt = rc _rrc_free._always_inline_ = True def rrc_major_collection_trace(self): diff --git a/rpython/memory/gc/test/test_rawrefcount.py b/rpython/memory/gc/test/test_rawrefcount.py --- a/rpython/memory/gc/test/test_rawrefcount.py +++ b/rpython/memory/gc/test/test_rawrefcount.py @@ -174,7 +174,7 @@ p1 = check_alive(0) self._collect(major=True, expected_trigger=1) py.test.raises(RuntimeError, "p1.x") # dead - assert r1.ob_refcnt == 0 + assert r1.ob_refcnt == 1 # in the pending list assert r1.ob_pypy_link == 0 assert self.gc.rawrefcount_next_dead() == r1addr assert self.gc.rawrefcount_next_dead() == llmemory.NULL @@ -197,7 +197,7 @@ assert p1.x == 42 self._collect(major=True, expected_trigger=1) py.test.raises(RuntimeError, "p1.x") # dead - assert r1.ob_refcnt == 0 + assert r1.ob_refcnt == 1 assert r1.ob_pypy_link == 0 assert self.gc.rawrefcount_next_dead() == r1addr self.gc.check_no_more_rawrefcount_state() @@ -214,7 +214,7 @@ else: self._collect(major=False, expected_trigger=1) py.test.raises(RuntimeError, "p1.x") # dead - assert r1.ob_refcnt == 0 + assert r1.ob_refcnt == 1 assert r1.ob_pypy_link == 0 assert self.gc.rawrefcount_next_dead() == r1addr self.gc.check_no_more_rawrefcount_state() @@ -252,7 +252,7 @@ self._collect(major=True, expected_trigger=1) else: self._collect(major=False, expected_trigger=1) - assert r1.ob_refcnt == 0 # refcnt dropped to 0 + assert r1.ob_refcnt == 1 # refcnt 1, in the pending list assert r1.ob_pypy_link == 0 # detached assert self.gc.rawrefcount_next_dead() == r1addr self.gc.check_no_more_rawrefcount_state() @@ -277,7 +277,7 @@ assert self.trigger == [] self._collect(major=True, expected_trigger=1) py.test.raises(RuntimeError, "p1.x") # dead - assert r1.ob_refcnt == 0 + assert r1.ob_refcnt == 1 assert r1.ob_pypy_link == 0 assert self.gc.rawrefcount_next_dead() == r1addr self.gc.check_no_more_rawrefcount_state() diff --git a/rpython/rlib/rawrefcount.py b/rpython/rlib/rawrefcount.py --- a/rpython/rlib/rawrefcount.py +++ b/rpython/rlib/rawrefcount.py @@ -136,6 +136,7 @@ ob.c_ob_refcnt -= REFCNT_FROM_PYPY ob.c_ob_pypy_link = 0 if ob.c_ob_refcnt == 0: + ob.c_ob_refcnt = 1 _d_list.append(ob) return None diff --git a/rpython/rlib/test/test_rawrefcount.py b/rpython/rlib/test/test_rawrefcount.py --- a/rpython/rlib/test/test_rawrefcount.py +++ b/rpython/rlib/test/test_rawrefcount.py @@ -116,7 +116,7 @@ assert rawrefcount.next_dead(PyObject) == lltype.nullptr(PyObjectS) assert rawrefcount._o_list == [] assert wr_p() is None - assert ob.c_ob_refcnt == 0 + assert ob.c_ob_refcnt == 1 # from the pending list assert ob.c_ob_pypy_link == 0 lltype.free(ob, flavor='raw') @@ -173,7 +173,7 @@ assert rawrefcount._d_list == [ob] assert rawrefcount._p_list == [] assert wr_p() is None - assert ob.c_ob_refcnt == 0 + assert ob.c_ob_refcnt == 1 # from _d_list assert ob.c_ob_pypy_link == 0 lltype.free(ob, flavor='raw') From pypy.commits at gmail.com Fri Mar 18 05:48:09 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 18 Mar 2016 02:48:09 -0700 (PDT) Subject: [pypy-commit] pypy default: add comment Message-ID: <56ebced9.41e11c0a.dba59.67aa@mx.google.com> Author: Armin Rigo Branch: Changeset: r83124:263d68316ae2 Date: 2016-03-18 10:46 +0100 http://bitbucket.org/pypy/pypy/changeset/263d68316ae2/ Log: add comment diff --git a/rpython/rlib/rawrefcount.py b/rpython/rlib/rawrefcount.py --- a/rpython/rlib/rawrefcount.py +++ b/rpython/rlib/rawrefcount.py @@ -72,6 +72,12 @@ return p def next_dead(OB_PTR_TYPE): + """NOT_RPYTHON. When the GC runs, it finds some pyobjs to be dead + but cannot immediately dispose of them (it doesn't know how to call + e.g. tp_dealloc(), and anyway calling it immediately would cause all + sorts of bugs). So instead, it stores them in an internal list, + initially with refcnt == 1. This pops the next item off this list. + """ if len(_d_list) == 0: return lltype.nullptr(OB_PTR_TYPE.TO) ob = _d_list.pop() From pypy.commits at gmail.com Fri Mar 18 05:48:11 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 18 Mar 2016 02:48:11 -0700 (PDT) Subject: [pypy-commit] pypy release-5.x: rawrefcount fix: pyobjs waiting on the dead list of the GC should not Message-ID: <56ebcedb.a185c20a.7621f.3665@mx.google.com> Author: Armin Rigo Branch: release-5.x Changeset: r83125:a09a60a9c381 Date: 2016-03-18 09:46 +0000 http://bitbucket.org/pypy/pypy/changeset/a09a60a9c381/ Log: rawrefcount fix: pyobjs waiting on the dead list of the GC should not have refcnt == 0. See comment. diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -833,14 +833,14 @@ modulename = py.path.local(eci.libraries[-1]) def dealloc_trigger(): - from pypy.module.cpyext.pyobject import _Py_Dealloc + from pypy.module.cpyext.pyobject import decref print 'dealloc_trigger...' while True: ob = rawrefcount.next_dead(PyObject) if not ob: break print ob - _Py_Dealloc(space, ob) + decref(space, ob) print 'dealloc_trigger DONE' return "RETRY" rawrefcount.init(dealloc_trigger) diff --git a/pypy/module/cpyext/state.py b/pypy/module/cpyext/state.py --- a/pypy/module/cpyext/state.py +++ b/pypy/module/cpyext/state.py @@ -147,10 +147,10 @@ """ def perform(self, executioncontext, frame): - from pypy.module.cpyext.pyobject import PyObject, _Py_Dealloc + from pypy.module.cpyext.pyobject import PyObject, decref while True: py_obj = rawrefcount.next_dead(PyObject) if not py_obj: break - _Py_Dealloc(self.space, py_obj) + decref(self.space, py_obj) diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py --- a/rpython/memory/gc/incminimark.py +++ b/rpython/memory/gc/incminimark.py @@ -2929,10 +2929,19 @@ ll_assert(rc < int(REFCNT_FROM_PYPY_LIGHT * 0.99), "refcount underflow from REFCNT_FROM_PYPY_LIGHT?") rc -= REFCNT_FROM_PYPY - self._pyobj(pyobject).ob_refcnt = rc self._pyobj(pyobject).ob_pypy_link = 0 if rc == 0: self.rrc_dealloc_pending.append(pyobject) + # an object with refcnt == 0 cannot stay around waiting + # for its deallocator to be called. Some code (lxml) + # expects that tp_dealloc is called immediately when + # the refcnt drops to 0. If it isn't, we get some + # uncleared raw pointer that can still be used to access + # the object; but (PyObject *)raw_pointer is then bogus + # because after a Py_INCREF()/Py_DECREF() on it, its + # tp_dealloc is also called! + rc = 1 + self._pyobj(pyobject).ob_refcnt = rc _rrc_free._always_inline_ = True def rrc_major_collection_trace(self): diff --git a/rpython/memory/gc/test/test_rawrefcount.py b/rpython/memory/gc/test/test_rawrefcount.py --- a/rpython/memory/gc/test/test_rawrefcount.py +++ b/rpython/memory/gc/test/test_rawrefcount.py @@ -174,7 +174,7 @@ p1 = check_alive(0) self._collect(major=True, expected_trigger=1) py.test.raises(RuntimeError, "p1.x") # dead - assert r1.ob_refcnt == 0 + assert r1.ob_refcnt == 1 # in the pending list assert r1.ob_pypy_link == 0 assert self.gc.rawrefcount_next_dead() == r1addr assert self.gc.rawrefcount_next_dead() == llmemory.NULL @@ -197,7 +197,7 @@ assert p1.x == 42 self._collect(major=True, expected_trigger=1) py.test.raises(RuntimeError, "p1.x") # dead - assert r1.ob_refcnt == 0 + assert r1.ob_refcnt == 1 assert r1.ob_pypy_link == 0 assert self.gc.rawrefcount_next_dead() == r1addr self.gc.check_no_more_rawrefcount_state() @@ -214,7 +214,7 @@ else: self._collect(major=False, expected_trigger=1) py.test.raises(RuntimeError, "p1.x") # dead - assert r1.ob_refcnt == 0 + assert r1.ob_refcnt == 1 assert r1.ob_pypy_link == 0 assert self.gc.rawrefcount_next_dead() == r1addr self.gc.check_no_more_rawrefcount_state() @@ -252,7 +252,7 @@ self._collect(major=True, expected_trigger=1) else: self._collect(major=False, expected_trigger=1) - assert r1.ob_refcnt == 0 # refcnt dropped to 0 + assert r1.ob_refcnt == 1 # refcnt 1, in the pending list assert r1.ob_pypy_link == 0 # detached assert self.gc.rawrefcount_next_dead() == r1addr self.gc.check_no_more_rawrefcount_state() @@ -277,7 +277,7 @@ assert self.trigger == [] self._collect(major=True, expected_trigger=1) py.test.raises(RuntimeError, "p1.x") # dead - assert r1.ob_refcnt == 0 + assert r1.ob_refcnt == 1 assert r1.ob_pypy_link == 0 assert self.gc.rawrefcount_next_dead() == r1addr self.gc.check_no_more_rawrefcount_state() diff --git a/rpython/rlib/rawrefcount.py b/rpython/rlib/rawrefcount.py --- a/rpython/rlib/rawrefcount.py +++ b/rpython/rlib/rawrefcount.py @@ -136,6 +136,7 @@ ob.c_ob_refcnt -= REFCNT_FROM_PYPY ob.c_ob_pypy_link = 0 if ob.c_ob_refcnt == 0: + ob.c_ob_refcnt = 1 _d_list.append(ob) return None diff --git a/rpython/rlib/test/test_rawrefcount.py b/rpython/rlib/test/test_rawrefcount.py --- a/rpython/rlib/test/test_rawrefcount.py +++ b/rpython/rlib/test/test_rawrefcount.py @@ -116,7 +116,7 @@ assert rawrefcount.next_dead(PyObject) == lltype.nullptr(PyObjectS) assert rawrefcount._o_list == [] assert wr_p() is None - assert ob.c_ob_refcnt == 0 + assert ob.c_ob_refcnt == 1 # from the pending list assert ob.c_ob_pypy_link == 0 lltype.free(ob, flavor='raw') @@ -173,7 +173,7 @@ assert rawrefcount._d_list == [ob] assert rawrefcount._p_list == [] assert wr_p() is None - assert ob.c_ob_refcnt == 0 + assert ob.c_ob_refcnt == 1 # from _d_list assert ob.c_ob_pypy_link == 0 lltype.free(ob, flavor='raw') From pypy.commits at gmail.com Fri Mar 18 06:38:44 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 18 Mar 2016 03:38:44 -0700 (PDT) Subject: [pypy-commit] pypy release-5.x: update version to 5.0.1 Message-ID: <56ebdab4.465ec20a.90fc6.44da@mx.google.com> Author: mattip Branch: release-5.x Changeset: r83126:8212933130f5 Date: 2016-03-18 12:33 +0200 http://bitbucket.org/pypy/pypy/changeset/8212933130f5/ Log: update version to 5.0.1 diff --git a/pypy/module/cpyext/include/patchlevel.h b/pypy/module/cpyext/include/patchlevel.h --- a/pypy/module/cpyext/include/patchlevel.h +++ b/pypy/module/cpyext/include/patchlevel.h @@ -29,8 +29,8 @@ #define PY_VERSION "2.7.10" /* PyPy version as a string */ -#define PYPY_VERSION "5.0.0" -#define PYPY_VERSION_NUM 0x05000000 +#define PYPY_VERSION "5.0.1" +#define PYPY_VERSION_NUM 0x05000100 /* Defined to mean a PyPy where cpyext holds more regular references to PyObjects, e.g. staying alive as long as the internal PyPy object diff --git a/pypy/module/sys/version.py b/pypy/module/sys/version.py --- a/pypy/module/sys/version.py +++ b/pypy/module/sys/version.py @@ -10,7 +10,7 @@ #XXX # sync CPYTHON_VERSION with patchlevel.h, package.py CPYTHON_API_VERSION = 1013 #XXX # sync with include/modsupport.h -PYPY_VERSION = (5, 0, 0, "final", 0) #XXX # sync patchlevel.h +PYPY_VERSION = (5, 0, 1, "final", 0) #XXX # sync patchlevel.h import pypy From pypy.commits at gmail.com Fri Mar 18 06:38:46 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 18 Mar 2016 03:38:46 -0700 (PDT) Subject: [pypy-commit] pypy release-5.x: merge heads Message-ID: <56ebdab6.55031c0a.6d9d3.ffffe3d8@mx.google.com> Author: mattip Branch: release-5.x Changeset: r83127:b4e30894d356 Date: 2016-03-18 12:37 +0200 http://bitbucket.org/pypy/pypy/changeset/b4e30894d356/ Log: merge heads diff --git a/pypy/module/cpyext/include/patchlevel.h b/pypy/module/cpyext/include/patchlevel.h --- a/pypy/module/cpyext/include/patchlevel.h +++ b/pypy/module/cpyext/include/patchlevel.h @@ -29,8 +29,8 @@ #define PY_VERSION "2.7.10" /* PyPy version as a string */ -#define PYPY_VERSION "5.0.0" -#define PYPY_VERSION_NUM 0x05000000 +#define PYPY_VERSION "5.0.1" +#define PYPY_VERSION_NUM 0x05000100 /* Defined to mean a PyPy where cpyext holds more regular references to PyObjects, e.g. staying alive as long as the internal PyPy object diff --git a/pypy/module/sys/version.py b/pypy/module/sys/version.py --- a/pypy/module/sys/version.py +++ b/pypy/module/sys/version.py @@ -10,7 +10,7 @@ #XXX # sync CPYTHON_VERSION with patchlevel.h, package.py CPYTHON_API_VERSION = 1013 #XXX # sync with include/modsupport.h -PYPY_VERSION = (5, 0, 0, "final", 0) #XXX # sync patchlevel.h +PYPY_VERSION = (5, 0, 1, "final", 0) #XXX # sync patchlevel.h import pypy From pypy.commits at gmail.com Fri Mar 18 07:52:56 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 18 Mar 2016 04:52:56 -0700 (PDT) Subject: [pypy-commit] pypy default: start release 5.0.1 Message-ID: <56ebec18.4d0d1c0a.1489.ffff921b@mx.google.com> Author: mattip Branch: Changeset: r83128:41c6d761f311 Date: 2016-03-18 13:51 +0200 http://bitbucket.org/pypy/pypy/changeset/41c6d761f311/ Log: start release 5.0.1 diff --git a/pypy/doc/index-of-release-notes.rst b/pypy/doc/index-of-release-notes.rst --- a/pypy/doc/index-of-release-notes.rst +++ b/pypy/doc/index-of-release-notes.rst @@ -6,6 +6,7 @@ .. toctree:: + release-5.0.1.rst release-5.0.0.rst release-4.0.1.rst release-4.0.0.rst diff --git a/pypy/doc/release-5.0.1.rst b/pypy/doc/release-5.0.1.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-5.0.1.rst @@ -0,0 +1,35 @@ +========== +PyPy 5.0.1 +========== + +We have released a bugfix for PyPy 5.0, after reports that the newly released +`lxml 3.6.0`_, which now supports PyPy 5.0 +, can `crash on large files`_. +Thanks to those who reported the crash. Please update, downloads are available +at pypy.org/download.html + +.. _`lxml 3.6.0`: https://pypi.python.org/pypi/lxml/3.6.0 +.. _`crash on large files`: https://bitbucket.org/pypy/pypy/issues/2260 +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7. It's fast (`PyPy and CPython 2.7.x`_ performance comparison) +due to its integrated tracing JIT compiler. + +We also welcome developers of other +`dynamic languages`_ to see what RPython can do for them. + +This release supports **x86** machines on most common operating systems +(Linux 32/64, Mac OS X 64, Windows 32, OpenBSD, FreeBSD), +newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux, and the +big- and little-endian variants of **PPC64** running Linux. + +.. _`PyPy and CPython 2.7.x`: http://speed.pypy.org +.. _`dynamic languages`: http://pypyjs.org + +Please update, and continue to help us make PyPy better. + +Cheers + +The PyPy Team + From pypy.commits at gmail.com Fri Mar 18 08:50:27 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 18 Mar 2016 05:50:27 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-ext: tweak Message-ID: <56ebf993.85371c0a.e566.326e@mx.google.com> Author: mattip Branch: cpyext-ext Changeset: r83129:a59464aa579d Date: 2016-03-18 14:49 +0200 http://bitbucket.org/pypy/pypy/changeset/a59464aa579d/ Log: tweak diff --git a/pypy/module/cpyext/bytesobject.py b/pypy/module/cpyext/bytesobject.py --- a/pypy/module/cpyext/bytesobject.py +++ b/pypy/module/cpyext/bytesobject.py @@ -2,7 +2,7 @@ from rpython.rtyper.lltypesystem import rffi, lltype from pypy.module.cpyext.api import ( cpython_api, cpython_struct, bootstrap_function, build_type_checkers, - PyObjectFields, Py_ssize_t, CONST_STRING, CANNOT_FAIL) + PyObjectFields, PyVarObjectFields, Py_ssize_t, CONST_STRING, CANNOT_FAIL) from pypy.module.cpyext.pyerrors import PyErr_BadArgument from pypy.module.cpyext.pyobject import ( PyObject, PyObjectP, Py_DecRef, make_ref, from_ref, track_reference, @@ -52,9 +52,8 @@ PyStringObjectStruct = lltype.ForwardReference() PyStringObject = lltype.Ptr(PyStringObjectStruct) -PyStringObjectFields = PyObjectFields + \ - (("ob_size", Py_ssize_t), ("ob_shash", rffi.LONG), - ("ob_sstate", rffi.INT), ("buffer", rffi.CCHARP)) +PyStringObjectFields = PyVarObjectFields + \ + (("ob_shash", rffi.LONG), ("ob_sstate", rffi.INT), ("buffer", rffi.CCHARP)) cpython_struct("PyStringObject", PyStringObjectFields, PyStringObjectStruct) @bootstrap_function diff --git a/pypy/module/cpyext/object.py b/pypy/module/cpyext/object.py --- a/pypy/module/cpyext/object.py +++ b/pypy/module/cpyext/object.py @@ -297,7 +297,7 @@ @cpython_api([PyObject], PyObject) def PyObject_SelfIter(space, ref): - """Undocumented function, this is wat CPython does.""" + """Undocumented function, this is what CPython does.""" Py_IncRef(space, ref) return ref From pypy.commits at gmail.com Fri Mar 18 08:51:41 2016 From: pypy.commits at gmail.com (mattip) Date: Fri, 18 Mar 2016 05:51:41 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-ext: add test that fails since ByteArray_Type's tp_as_buffer is NULL (line 1355 in getargs.c) Message-ID: <56ebf9dd.e6ebc20a.bf07d.7ca0@mx.google.com> Author: mattip Branch: cpyext-ext Changeset: r83130:16f119c9be67 Date: 2016-03-18 14:28 +0200 http://bitbucket.org/pypy/pypy/changeset/16f119c9be67/ Log: add test that fails since ByteArray_Type's tp_as_buffer is NULL (line 1355 in getargs.c) diff --git a/pypy/module/cpyext/test/test_getargs.py b/pypy/module/cpyext/test/test_getargs.py --- a/pypy/module/cpyext/test/test_getargs.py +++ b/pypy/module/cpyext/test/test_getargs.py @@ -122,6 +122,7 @@ PyBuffer_Release(&buf); return result; ''') + assert 'foo\0bar\0baz' == pybuffer(bytearray('foo\0bar\0baz')) assert 'foo\0bar\0baz' == pybuffer('foo\0bar\0baz') From pypy.commits at gmail.com Fri Mar 18 12:53:46 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 18 Mar 2016 09:53:46 -0700 (PDT) Subject: [pypy-commit] pypy heapcache-refactor: Allow storing negative numbers in .position_and_flags Message-ID: <56ec329a.4a811c0a.35ca4.0eb7@mx.google.com> Author: Armin Rigo Branch: heapcache-refactor Changeset: r83132:1994d682f8b3 Date: 2016-03-18 17:52 +0100 http://bitbucket.org/pypy/pypy/changeset/1994d682f8b3/ Log: Allow storing negative numbers in .position_and_flags diff --git a/rpython/jit/metainterp/heapcache.py b/rpython/jit/metainterp/heapcache.py --- a/rpython/jit/metainterp/heapcache.py +++ b/rpython/jit/metainterp/heapcache.py @@ -1,6 +1,5 @@ from rpython.jit.metainterp.history import Const, ConstInt from rpython.jit.metainterp.history import FrontendOp, RefFrontendOp -from rpython.jit.metainterp.history import FO_REPLACED_WITH_CONST from rpython.jit.metainterp.resoperation import rop, OpHelpers from rpython.jit.metainterp.executor import constant_from_op from rpython.rlib.rarithmetic import r_uint32, r_uint diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -642,8 +642,8 @@ # ____________________________________________________________ -FO_POSITION_MASK = r_uint(0x7FFFFFFF) -FO_REPLACED_WITH_CONST = r_uint(0x80000000) +FO_REPLACED_WITH_CONST = r_uint(1) +FO_POSITION_SHIFT = 1 class FrontendOp(AbstractResOp): @@ -651,11 +651,10 @@ _attrs_ = ('position_and_flags',) def __init__(self, pos): - assert pos >= 0 - self.position_and_flags = r_uint(pos) + self.position_and_flags = r_uint(pos << FO_POSITION_SHIFT) def get_position(self): - return intmask(self.position_and_flags & FO_POSITION_MASK) + return intmask(r_uint32(self.position_and_flags)) >> FO_POSITION_SHIFT def is_replaced_with_const(self): return bool(self.position_and_flags & FO_REPLACED_WITH_CONST) From pypy.commits at gmail.com Fri Mar 18 12:53:44 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 18 Mar 2016 09:53:44 -0700 (PDT) Subject: [pypy-commit] pypy heapcache-refactor: comment Message-ID: <56ec3298.45d61c0a.43f97.0bf0@mx.google.com> Author: Armin Rigo Branch: heapcache-refactor Changeset: r83131:be279330887e Date: 2016-03-18 17:23 +0100 http://bitbucket.org/pypy/pypy/changeset/be279330887e/ Log: comment diff --git a/rpython/jit/metainterp/heapcache.py b/rpython/jit/metainterp/heapcache.py --- a/rpython/jit/metainterp/heapcache.py +++ b/rpython/jit/metainterp/heapcache.py @@ -155,6 +155,9 @@ return ref_frontend_op._get_heapc_flags() >= self.likely_virtual_version def update_version(self, ref_frontend_op): + """Ensure the version of 'ref_frontend_op' is current. If not, + it will update 'ref_frontend_op' (removing most flags currently set). + """ if not self.test_head_version(ref_frontend_op): f = self.head_version if (self.test_likely_virtual_version(ref_frontend_op) and From pypy.commits at gmail.com Fri Mar 18 12:57:52 2016 From: pypy.commits at gmail.com (rlamy) Date: Fri, 18 Mar 2016 09:57:52 -0700 (PDT) Subject: [pypy-commit] pypy py3k: hg merge py3.3 Message-ID: <56ec3390.c711c30a.da056.ffffda73@mx.google.com> Author: Ronan Lamy Branch: py3k Changeset: r83133:5ce0c542f4d8 Date: 2016-03-18 16:57 +0000 http://bitbucket.org/pypy/pypy/changeset/5ce0c542f4d8/ Log: hg merge py3.3 diff --git a/pypy/module/imp/__init__.py b/pypy/module/imp/__init__.py --- a/pypy/module/imp/__init__.py +++ b/pypy/module/imp/__init__.py @@ -51,11 +51,3 @@ add_fork_hook('before', interp_imp.acquire_lock) add_fork_hook('parent', interp_imp.release_lock) add_fork_hook('child', interp_imp.reinit_lock) - - def setup_after_space_initialization(self): - # Install importlib as __import__ - self.space.appexec([], '''(): - import importlib._bootstrap, sys, _imp - sys.path_importer_cache.clear() - importlib._bootstrap._install(sys, _imp) - ''') From pypy.commits at gmail.com Fri Mar 18 13:01:00 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 18 Mar 2016 10:01:00 -0700 (PDT) Subject: [pypy-commit] pypy heapcache-refactor: Test and fix Message-ID: <56ec344c.8673c20a.63013.ffffd27f@mx.google.com> Author: Armin Rigo Branch: heapcache-refactor Changeset: r83134:5f85929ae2ee Date: 2016-03-18 18:00 +0100 http://bitbucket.org/pypy/pypy/changeset/5f85929ae2ee/ Log: Test and fix diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -651,10 +651,15 @@ _attrs_ = ('position_and_flags',) def __init__(self, pos): - self.position_and_flags = r_uint(pos << FO_POSITION_SHIFT) + # p is the 32-bit position shifted left by one (might be negative, + # but casted to the 32-bit UINT type) + p = rffi.cast(rffi.UINT, pos << FO_POSITION_SHIFT) + self.position_and_flags = r_uint(p) # zero-extended to a full word def get_position(self): - return intmask(r_uint32(self.position_and_flags)) >> FO_POSITION_SHIFT + # p is the signed 32-bit position, from self.position_and_flags + p = rffi.cast(rffi.INT, self.position_and_flags) + return intmask(p) >> FO_POSITION_SHIFT def is_replaced_with_const(self): return bool(self.position_and_flags & FO_REPLACED_WITH_CONST) diff --git a/rpython/jit/metainterp/test/test_history.py b/rpython/jit/metainterp/test/test_history.py --- a/rpython/jit/metainterp/test/test_history.py +++ b/rpython/jit/metainterp/test/test_history.py @@ -62,6 +62,12 @@ assert c5.nonnull() assert c6.nonnull() +def test_frontendop(): + f = FrontendOp(42) + assert f.get_position() == 42 + f = FrontendOp(-56) + assert f.get_position() == -56 + class TestZTranslated(StandaloneTests): def test_ztranslated_same_constant_float(self): def fn(args): From pypy.commits at gmail.com Fri Mar 18 13:03:48 2016 From: pypy.commits at gmail.com (cfbolz) Date: Fri, 18 Mar 2016 10:03:48 -0700 (PDT) Subject: [pypy-commit] pypy guard-compatible: a comment about attribute reordering Message-ID: <56ec34f4.a2f2c20a.75382.ffffcfee@mx.google.com> Author: Carl Friedrich Bolz Branch: guard-compatible Changeset: r83136:c716cfc2bc42 Date: 2016-03-15 18:00 +0100 http://bitbucket.org/pypy/pypy/changeset/c716cfc2bc42/ Log: a comment about attribute reordering diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -229,6 +229,9 @@ jit.isconstant(name) and jit.isconstant(index)) def _reorder_and_add(self, obj, name, index, w_value): + # XXX it might be worth it to change reordering such that instances of + # different classes end up with the same order of attributes. + # the idea is as follows: the subtrees of any map are ordered by # insertion. the invariant is that subtrees that are inserted later # must not contain the name of the attribute of any earlier inserted From pypy.commits at gmail.com Fri Mar 18 13:03:50 2016 From: pypy.commits at gmail.com (cfbolz) Date: Fri, 18 Mar 2016 10:03:50 -0700 (PDT) Subject: [pypy-commit] pypy guard-compatible: in progress: support for quasi-immutables as the second argument of Message-ID: <56ec34f6.2968c20a.84ddc.ffffd1b6@mx.google.com> Author: Carl Friedrich Bolz Branch: guard-compatible Changeset: r83137:c572a1cedff3 Date: 2016-03-16 13:43 +0100 http://bitbucket.org/pypy/pypy/changeset/c572a1cedff3/ Log: in progress: support for quasi-immutables as the second argument of @elidable_compatible functions. This is needed for supporting the pattern elidable_func(x, x.version) where x.version is a quasi- immutable field. diff --git a/rpython/jit/metainterp/compatible.py b/rpython/jit/metainterp/compatible.py --- a/rpython/jit/metainterp/compatible.py +++ b/rpython/jit/metainterp/compatible.py @@ -1,5 +1,6 @@ from rpython.jit.metainterp.history import newconst from rpython.jit.codewriter import longlong +from rpython.jit.metainterp.resoperation import rop def do_call(cpu, argboxes, descr): from rpython.jit.metainterp.history import INT, REF, FLOAT, VOID @@ -51,25 +52,122 @@ """ A collections of conditions that an object needs to fulfil. """ def __init__(self, ptr): self.known_valid = ptr - self.pure_call_conditions = [] + self.conditions = [] + self.last_quasi_immut_field_op = None - def record_pure_call(self, op, res): - self.pure_call_conditions.append((op, res)) + def record_condition(self, cond, res, optimizer): + cond.activate(res, optimizer) + self.conditions.append(cond) - def check_compat(self, cpu, ref): + def register_quasi_immut_field(self, op): + self.last_quasi_immut_field_op = op + + def check_compat(self, cpu, ref, loop_token): + for cond in self.conditions: + if not cond.check(cpu, ref): + return False + # need to tell all conditions, in case a quasi-immut needs to be registered + for cond in self.conditions: + cond.activate_secondary(ref, loop_token) + return True + + def prepare_const_arg_call(self, op): + from rpython.jit.metainterp.quasiimmut import QuasiImmutDescr + copied_op = op.copy() + copied_op.setarg(1, self.known_valid) + if op.numargs() == 2: + return copied_op, PureCallCondition(op) + arg2 = copied_op.getarg(2) + # really simple-minded pattern matching + # the order of things is like this: + # GUARD_COMPATIBLE(x) + # QUASIIMMUT_FIELD(x) + # y = GETFIELD_GC(x, f) + # z = CALL_PURE(x, y, ...) + # we want to discover this (and so far precisely this) situation and + # make it possible for the GUARD_COMPATIBLE to still remove the call, + # even though the second argument is not constant + if arg2.getopnum() != rop.GETFIELD_GC_R: + return None, None + if not self.last_quasi_immut_field_op: + return None, None + qmutdescr = self.last_quasi_immut_field_op.getdescr() + assert isinstance(qmutdescr, QuasiImmutDescr) + fielddescr = qmutdescr.fielddescr # XXX + same_arg = self.last_quasi_immut_field_op.getarg(0) is arg2.getarg(0) + if arg2.getdescr() is not fielddescr or not same_arg: + return None, None + if not qmutdescr.is_still_valid_for(self.known_valid): + return None, None + copied_op.setarg(2, qmutdescr.constantfieldbox) + self.last_quasi_immut_field_op = None + return copied_op, QuasiimmutGetfieldAndPureCallCondition(op, qmutdescr) + +class Condition(object): + def check(self, cpu, ref): + raise NotImplementedError + + def activate(self, ref, optimizer): + self.res = ref + + def activate_secondary(self, ref, loop_token): + pass + + +class PureCallCondition(Condition): + def __init__(self, op): + self.op = op + + def check(self, cpu, ref): from rpython.rlib.debug import debug_print, debug_start, debug_stop - for op, correct_res in self.pure_call_conditions: - calldescr = op.getdescr() - # change exactly the first argument - arglist = op.getarglist() - arglist[1] = newconst(ref) - try: - res = do_call(cpu, arglist, calldescr) - except Exception: - debug_start("jit-guard-compatible") - debug_print("call to elidable_compatible function raised") - debug_stop("jit-guard-compatible") - return False - if not res.same_constant(correct_res): - return False + calldescr = self.op.getdescr() + # change exactly the first argument + arglist = self.op.getarglist() + arglist[1] = newconst(ref) + try: + res = do_call(cpu, arglist, calldescr) + except Exception: + debug_start("jit-guard-compatible") + debug_print("call to elidable_compatible function raised") + debug_stop("jit-guard-compatible") + return False + if not res.same_constant(self.res): + return False return True + + +class QuasiimmutGetfieldAndPureCallCondition(PureCallCondition): + def __init__(self, op, qmutdescr): + self.op = op + self.qmutdescr = qmutdescr + + def activate(self, ref, optimizer): + # record the quasi-immutable + optimizer.record_quasi_immutable_dep(self.qmutdescr.qmut) + Condition.activate(self, ref, optimizer) + + def activate_secondary(self, ref, loop_token): + from rpython.jit.metainterp.quasiimmut import get_current_qmut_instance + # need to register the loop for invalidation as well! + qmut = get_current_qmut_instance(loop_token.cpu, ref, + self.qmutdescr.mutatefielddescr) + qmut.register_loop_token(loop_token.loop_token_wref) + + def check(self, cpu, ref): + from rpython.rlib.debug import debug_print, debug_start, debug_stop + calldescr = self.op.getdescr() + # change exactly the first argument + arglist = self.op.getarglist() + arglist[1] = newconst(ref) + arglist[2] = self.qmutdescr._get_fieldvalue(ref) + try: + res = do_call(cpu, arglist, calldescr) + except Exception: + debug_start("jit-guard-compatible") + debug_print("call to elidable_compatible function raised") + debug_stop("jit-guard-compatible") + return False + if not res.same_constant(self.res): + return False + return True + diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -1086,6 +1086,7 @@ fulfil need to be attached to this descr by optimizeopt. """ def __init__(self): + # XXX think about what is being kept alive here self._compatibility_conditions = None def handle_fail(self, deadframe, metainterp_sd, jitdriver_sd): @@ -1094,18 +1095,21 @@ assert typetag == self.TY_REF # for now refval = metainterp_sd.cpu.get_value_direct(deadframe, 'r', index) if self.is_compatible(metainterp_sd.cpu, refval): + print "~~~~~~~~~~~~~~~~~~~ compatible! growing switch", self from rpython.jit.metainterp.blackhole import resume_in_blackhole metainterp_sd.cpu.grow_guard_compatible_switch( self.rd_loop_token, self, refval) resume_in_blackhole(metainterp_sd, jitdriver_sd, self, deadframe) else: + print "~~~~~~~~~~~~~~~~~~~ not compatible!", self # a real failure return ResumeGuardDescr.handle_fail(self, deadframe, metainterp_sd, jitdriver_sd) def is_compatible(self, cpu, ref): const = history.newconst(ref) if self._compatibility_conditions: - if self._compatibility_conditions.check_compat(cpu, ref): + if self._compatibility_conditions.check_compat( + cpu, ref, self.rd_loop_token): return True return False return True # no conditions, everything works diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -635,7 +635,18 @@ # registered. structvalue = self.ensure_ptr_info_arg0(op) if not structvalue.is_constant(): - self._remove_guard_not_invalidated = True + ccond = structvalue._compatibility_conditions + if ccond: + # the object is subject to a guard_compatible. We cannot remove + # the getfield_gc on the object, since it's not constant. + # However, if the quasi-immutable field is passed to a pure + # function call, we can treat it as constant then + ccond.register_quasi_immut_field(op) + # don't remove the guard_not_invalidated, the guard_compatible + # needs it + self._remove_guard_not_invalidated = False + else: + self._remove_guard_not_invalidated = True return # not a constant at all; ignore QUASIIMMUT_FIELD # from rpython.jit.metainterp.quasiimmut import QuasiImmutDescr @@ -648,9 +659,7 @@ self.get_box_replacement(op.getarg(0))): raise InvalidLoop('quasi immutable field changed during tracing') # record as an out-of-line guard - if self.optimizer.quasi_immutable_deps is None: - self.optimizer.quasi_immutable_deps = {} - self.optimizer.quasi_immutable_deps[qmutdescr.qmut] = None + self.optimizer.record_quasi_immutable_dep(qmutdescr.qmut) self._remove_guard_not_invalidated = False def optimize_GUARD_NOT_INVALIDATED(self, op): diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -268,6 +268,11 @@ self.set_optimizations(optimizations) self.setup() + def record_quasi_immutable_dep(self, qmut): + if self.quasi_immutable_deps is None: + self.quasi_immutable_deps = {} + self.quasi_immutable_deps[qmut] = None + def init_inparg_dict_from(self, lst): self.inparg_dict = {} for box in lst: @@ -457,10 +462,13 @@ if arg0.is_constant(): return info.ConstPtrInfo(arg0) opinfo = arg0.get_forwarded() + ccond = None if isinstance(opinfo, info.AbstractVirtualPtrInfo): return opinfo elif opinfo is not None: last_guard_pos = opinfo.get_last_guard_pos() + if isinstance(opinfo, info.PtrInfo): + ccond = opinfo._compatibility_conditions else: last_guard_pos = -1 assert opinfo is None or opinfo.__class__ is info.NonNullPtrInfo @@ -473,6 +481,7 @@ else: opinfo = info.StructPtrInfo(parent_descr) opinfo.init_fields(parent_descr, descr.get_index()) + opinfo._compatibility_conditions = ccond elif (op.is_getarrayitem() or op.getopnum() == rop.SETARRAYITEM_GC or op.getopnum() == rop.ARRAYLEN_GC): opinfo = info.ArrayPtrInfo(op.getdescr()) diff --git a/rpython/jit/metainterp/optimizeopt/pure.py b/rpython/jit/metainterp/optimizeopt/pure.py --- a/rpython/jit/metainterp/optimizeopt/pure.py +++ b/rpython/jit/metainterp/optimizeopt/pure.py @@ -142,14 +142,14 @@ ccond = info._compatibility_conditions if ccond: # it's subject to guard_compatible - copied_op = op.copy() - copied_op.setarg(1, ccond.known_valid) - result = self._can_optimize_call_pure(copied_op) - if result is not None: - self.make_constant(op, result) - self.last_emitted_operation = REMOVED - ccond.record_pure_call(copied_op, result) - return + copied_op, cond = ccond.prepare_const_arg_call(op) + if copied_op: + result = self._can_optimize_call_pure(copied_op) + if result is not None: + self.make_constant(op, result) + self.last_emitted_operation = REMOVED + ccond.record_condition(cond, result, self.optimizer) + return # Step 1: check if all arguments are constant for arg in op.getarglist(): diff --git a/rpython/jit/metainterp/quasiimmut.py b/rpython/jit/metainterp/quasiimmut.py --- a/rpython/jit/metainterp/quasiimmut.py +++ b/rpython/jit/metainterp/quasiimmut.py @@ -129,6 +129,9 @@ def get_current_constant_fieldvalue(self): struct = self.struct + return self._get_fieldvalue(struct) + + def _get_fieldvalue(self, struct): fielddescr = self.fielddescr if self.fielddescr.is_pointer_field(): return ConstPtr(self.cpu.bh_getfield_gc_r(struct, fielddescr)) diff --git a/rpython/jit/metainterp/test/test_compatible.py b/rpython/jit/metainterp/test/test_compatible.py --- a/rpython/jit/metainterp/test/test_compatible.py +++ b/rpython/jit/metainterp/test/test_compatible.py @@ -76,3 +76,79 @@ self.meta_interp(main, []) # XXX check number of bridges + + + def test_quasi_immutable(self): + from rpython.rlib.objectmodel import we_are_translated + class C(object): + _immutable_fields_ = ['version?'] + + class Version(object): + def __init__(self, cls): + self.cls = cls + p1 = C() + p1.version = Version(p1) + p1.x = 1 + p2 = C() + p2.version = Version(p2) + p2.x = 1 + p3 = C() + p3.version = Version(p3) + p3.x = 3 + + driver = jit.JitDriver(greens = [], reds = ['n', 'x']) + + class Counter(object): + pass + + c = Counter() + c.count = 0 + @jit.elidable_compatible() + def g(cls, v): + if we_are_translated(): + c.count += 1 + return cls.x + + def f(n, x): + res = 0 + while n > 0: + driver.can_enter_jit(n=n, x=x) + driver.jit_merge_point(n=n, x=x) + x = jit.hint(x, promote_compatible=True) + res = g(x, x.version) + n -= res + return res + + def main(x): + res = f(100, p1) + assert res == 1 + res = f(100, p2) + assert res == 1 + res = f(100, p3) + assert res == 3 + # invalidate p1 or p2 + if x: + p1.x = 2 + p1.version = Version(p1) + res = f(100, p1) + assert res == 2 + p1.x = 1 + p1.version = Version(p1) + else: + p2.x = 2 + p2.version = Version(p2) + res = f(100, p2) + assert res == 2 + p2.x = 1 + p2.version = Version(p2) + return c.count + main(True) + main(False) + + x = self.meta_interp(main, [True]) + assert x < 30 + + x = self.meta_interp(main, [False]) + assert x < 30 + # XXX check number of bridges + From pypy.commits at gmail.com Fri Mar 18 13:03:46 2016 From: pypy.commits at gmail.com (cfbolz) Date: Fri, 18 Mar 2016 10:03:46 -0700 (PDT) Subject: [pypy-commit] pypy guard-compatible: much more lenient compatibility checking, in particular objects with the same Message-ID: <56ec34f2.2968c20a.84ddc.ffffd1ae@mx.google.com> Author: Carl Friedrich Bolz Branch: guard-compatible Changeset: r83135:73f2b4d50419 Date: 2016-03-15 17:58 +0100 http://bitbucket.org/pypy/pypy/changeset/73f2b4d50419/ Log: much more lenient compatibility checking, in particular objects with the same layout and different classes can use the same trace. this disables immutable attributes on user-defined classes for now diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -46,29 +46,27 @@ return self.terminator def read(self, obj, name, index): - attr = self.find_map_attr(name, index) - if attr is None: + storageindex = self.find_map_storageindex(name, index) + if storageindex == -1: return self._get_terminator()._read_terminator(obj, name, index) - if ( # XXX in the guard_compatible world the following isconstant may never be true? - jit.isconstant(attr.storageindex) and - jit.isconstant(obj) and - not attr.ever_mutated - ): - return self._pure_mapdict_read_storage(obj, attr.storageindex) - else: - return obj._mapdict_read_storage(attr.storageindex) + #if ( # XXX in the guard_compatible world the following isconstant may never be true? + # jit.isconstant(attr.storageindex) and + # jit.isconstant(obj) and + # not attr.ever_mutated + #): + # return self._pure_mapdict_read_storage(obj, attr.storageindex) + #else: + return obj._mapdict_read_storage(storageindex) @jit.elidable def _pure_mapdict_read_storage(self, obj, storageindex): return obj._mapdict_read_storage(storageindex) def write(self, obj, name, index, w_value): - attr = self.find_map_attr(name, index) - if attr is None: + storageindex = self.find_map_storageindex(name, index) + if storageindex == -1: return self._get_terminator()._write_terminator(obj, name, index, w_value) - if not attr.ever_mutated: - attr.ever_mutated = True - obj._mapdict_write_storage(attr.storageindex, w_value) + obj._mapdict_write_storage(storageindex, w_value) return True def delete(self, obj, name, index): @@ -80,6 +78,13 @@ return self._find_map_attr_cache(name, index) return self._find_map_attr(name, index) + @jit.elidable_compatible() + def find_map_storageindex(self, name, index): + attr = self.find_map_attr(name, index) + if attr is None: + return -1 + return attr.storageindex + @jit.dont_look_inside def _find_map_attr_cache(self, name, index): space = self.space @@ -389,7 +394,7 @@ self.storageindex = back.length() self.back = back self._size_estimate = self.length() * NUM_DIGITS_POW2 - self.ever_mutated = False + #self.ever_mutated = False # XXX XXX XXX immutability is disabled for now self.order = len(back.cache_attrs) if back.cache_attrs else 0 def _copy_attr(self, obj, new_obj): @@ -399,8 +404,8 @@ def delete(self, obj, name, index): if index == self.index and name == self.name: # ok, attribute is deleted - if not self.ever_mutated: - self.ever_mutated = True + #if not self.ever_mutated: + # self.ever_mutated = True return self.back.copy(obj) new_obj = self.back.delete(obj, name, index) if new_obj is not None: From pypy.commits at gmail.com Fri Mar 18 13:03:55 2016 From: pypy.commits at gmail.com (cfbolz) Date: Fri, 18 Mar 2016 10:03:55 -0700 (PDT) Subject: [pypy-commit] pypy guard-compatible: simply the interpreter caches a lot to use the version on the map. Message-ID: <56ec34fb.8d571c0a.29616.1049@mx.google.com> Author: Carl Friedrich Bolz Branch: guard-compatible Changeset: r83140:bae02262ef63 Date: 2016-03-18 14:44 +0100 http://bitbucket.org/pypy/pypy/changeset/bae02262ef63/ Log: simply the interpreter caches a lot to use the version on the map. Makes the fast path significantly faster. diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -310,7 +310,7 @@ _immutable_fields_ = ['w_cls', 'version?'] def __init__(self, space, w_cls): - if w_cls._version_tag is None: + if w_cls is None or w_cls._version_tag is None: self.version = None else: self.version = Version() @@ -961,7 +961,7 @@ # Magic caching class CacheEntry(object): - version_tag = None + mapversion = None storageindex = 0 w_method = None # for callmethod success_counter = 0 @@ -973,35 +973,33 @@ @jit.dont_look_inside def is_valid_for_map(self, map): - # note that 'map' can be None here - mymap = self.map_wref() - if mymap is not None and mymap is map: - version_tag = map.terminator.w_cls.version_tag() - if version_tag is self.version_tag: - # everything matches, it's incredibly fast - if map.space.config.objspace.std.withmethodcachecounter: - self.success_counter += 1 - return True + # since map.version can be None, self.mapversion must never be + # thus the INVALID_CACHE_ENTRY has a fake but non-None Version() + if map is not None and self.mapversion is map.version: + # everything matches, it's incredibly fast + if map.space.config.objspace.std.withmethodcachecounter: + self.success_counter += 1 + return True return False _invalid_cache_entry_map = objectmodel.instantiate(AbstractAttribute) _invalid_cache_entry_map.terminator = None INVALID_CACHE_ENTRY = CacheEntry() -INVALID_CACHE_ENTRY.map_wref = weakref.ref(_invalid_cache_entry_map) - # different from any real map ^^^ +INVALID_CACHE_ENTRY.mapversion = Version() +# different from any real map's version ^^^ def init_mapdict_cache(pycode): num_entries = len(pycode.co_names_w) pycode._mapdict_caches = [INVALID_CACHE_ENTRY] * num_entries @jit.dont_look_inside -def _fill_cache(pycode, nameindex, map, version_tag, storageindex, w_method=None): +def _fill_cache(pycode, nameindex, mapversion, storageindex, w_method=None): + assert isinstance(mapversion, Version) entry = pycode._mapdict_caches[nameindex] if entry is INVALID_CACHE_ENTRY: entry = CacheEntry() pycode._mapdict_caches[nameindex] = entry - entry.map_wref = weakref.ref(map) - entry.version_tag = version_tag + entry.mapversion = mapversion entry.storageindex = storageindex entry.w_method = w_method if pycode.space.config.objspace.std.withmethodcachecounter: @@ -1026,8 +1024,10 @@ w_descr = w_type.getattribute_if_not_from_object() if w_descr is not None: return space._handle_getattribute(w_descr, w_obj, w_name) - version_tag = w_type.version_tag() - if version_tag is not None: + mapversion = map.version + if mapversion is not None: + version_tag = w_type.version_tag() + assert version_tag is not None name = space.str_w(w_name) # We need to care for obscure cases in which the w_descr is # a MutableCell, which may change without changing the version_tag @@ -1059,7 +1059,7 @@ if attr is not None: # Note that if map.terminator is a DevolvedDictTerminator, # map.find_map_attr will always return None if index==DICT. - _fill_cache(pycode, nameindex, map, version_tag, attr.storageindex) + _fill_cache(pycode, nameindex, mapversion, attr.storageindex) return w_obj._mapdict_read_storage(attr.storageindex) if space.config.objspace.std.withmethodcachecounter: INVALID_CACHE_ENTRY.failure_counter += 1 @@ -1094,7 +1094,7 @@ name, version_tag) if w_method is None or isinstance(w_method, MutableCell): return - _fill_cache(pycode, nameindex, map, version_tag, -1, w_method) + _fill_cache(pycode, nameindex, map.version, -1, w_method) # XXX fix me: if a function contains a loop with both LOAD_ATTR and # XXX LOOKUP_METHOD on the same attribute name, it keeps trashing and From pypy.commits at gmail.com Fri Mar 18 13:03:56 2016 From: pypy.commits at gmail.com (cfbolz) Date: Fri, 18 Mar 2016 10:03:56 -0700 (PDT) Subject: [pypy-commit] pypy guard-compatible: remove dead code Message-ID: <56ec34fc.10921c0a.55175.1236@mx.google.com> Author: Carl Friedrich Bolz Branch: guard-compatible Changeset: r83141:84e7d731787b Date: 2016-03-18 14:48 +0100 http://bitbucket.org/pypy/pypy/changeset/84e7d731787b/ Log: remove dead code diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -982,8 +982,6 @@ return True return False -_invalid_cache_entry_map = objectmodel.instantiate(AbstractAttribute) -_invalid_cache_entry_map.terminator = None INVALID_CACHE_ENTRY = CacheEntry() INVALID_CACHE_ENTRY.mapversion = Version() # different from any real map's version ^^^ From pypy.commits at gmail.com Fri Mar 18 13:03:51 2016 From: pypy.commits at gmail.com (cfbolz) Date: Fri, 18 Mar 2016 10:03:51 -0700 (PDT) Subject: [pypy-commit] pypy guard-compatible: skip some tests that were broken by the disabling of the immutable stuff Message-ID: <56ec34f7.c13fc20a.54b2.ffffd2eb@mx.google.com> Author: Carl Friedrich Bolz Branch: guard-compatible Changeset: r83138:718cd5a1e7ed Date: 2016-03-18 11:49 +0100 http://bitbucket.org/pypy/pypy/changeset/718cd5a1e7ed/ Log: skip some tests that were broken by the disabling of the immutable stuff diff --git a/pypy/objspace/std/test/test_mapdict.py b/pypy/objspace/std/test/test_mapdict.py --- a/pypy/objspace/std/test/test_mapdict.py +++ b/pypy/objspace/std/test/test_mapdict.py @@ -1,3 +1,4 @@ +import pytest from pypy.objspace.std.test.test_dictmultiobject import FakeSpace, W_DictObject from pypy.objspace.std.mapdict import * @@ -255,6 +256,7 @@ def test_attr_immutability(monkeypatch): + pytest.skip("disabled for now") cls = Class() obj = cls.instantiate() obj.setdictvalue(space, "a", 10) @@ -292,6 +294,7 @@ assert obj2.map is obj.map def test_attr_immutability_delete(): + pytest.skip("disabled for now") cls = Class() obj = cls.instantiate() obj.setdictvalue(space, "a", 10) From pypy.commits at gmail.com Fri Mar 18 13:03:58 2016 From: pypy.commits at gmail.com (cfbolz) Date: Fri, 18 Mar 2016 10:03:58 -0700 (PDT) Subject: [pypy-commit] pypy guard-compatible: start using elidable_compatible functions on the map that get stuff from the Message-ID: <56ec34fe.41e11c0a.d2dd2.10dd@mx.google.com> Author: Carl Friedrich Bolz Branch: guard-compatible Changeset: r83142:fa123e6f37d8 Date: 2016-03-18 17:26 +0100 http://bitbucket.org/pypy/pypy/changeset/fa123e6f37d8/ Log: start using elidable_compatible functions on the map that get stuff from the type. The goal is to never have to promote the type in most common code paths. somewhat experimental diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -227,7 +227,7 @@ "make instances really small but slow without the JIT", default=False, requires=[("objspace.std.getattributeshortcut", True), - ("objspace.std.withtypeversion", True), + ("objspace.std.withmethodcache", True), ]), BoolOption("withrangelist", diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -174,6 +174,8 @@ # hooks that the mapdict implementations needs: def _get_mapdict_map(self): return None + def _get_mapdict_map_no_promote(self): + return None def _set_mapdict_map(self, map): raise NotImplementedError def _mapdict_read_storage(self, index): diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -2,6 +2,7 @@ from rpython.rlib import jit, objectmodel, debug, rerased from rpython.rlib.rarithmetic import intmask, r_uint +from rpython.rlib.jit import we_are_jitted from pypy.interpreter.baseobjspace import W_Root from pypy.objspace.std.dictmultiobject import ( @@ -305,6 +306,36 @@ def __repr__(self): return "<%s>" % (self.__class__.__name__,) + # ____________________________________________________________ + # a few things that also interact with the type + # the important idea is: don't read self.terminator.w_cls outside of an + # elidable_compatible function + + @jit.elidable_compatible(quasi_immut_field_name_for_second_arg="version") + def _type_safe_to_do_getattr(self, version): + # it's safe if the version is not None and the type does not define its + # own __getattribute__ + if version is None: + return False + w_type = self.terminator.w_cls + return w_type.has_object_getattribute() + + def _type_lookup(self, name): + if not self._type_safe_to_do_getattr(): + return self.getclass_from_terminator().lookup(name) + w_descr = self._type_lookup_pure(name) + if isinstance(w_descr, MutableCell): + w_descr = w_descr.unwrap_cell(self.space) + return w_descr + + @jit.elidable_compatible(quasi_immut_field_name_for_second_arg="version") + def _type_lookup_pure(self, version, name): + assert version is not None + w_type = self.terminator.w_cls + w_res = w_type._pure_lookup_where_with_method_cache( + name, w_cls.version_tag()) + return w_res + class Terminator(AbstractAttribute): _immutable_fields_ = ['w_cls', 'version?'] @@ -1097,3 +1128,16 @@ # XXX fix me: if a function contains a loop with both LOAD_ATTR and # XXX LOOKUP_METHOD on the same attribute name, it keeps trashing and # XXX rebuilding the cache + + +# ____________________________________________________________ +# various functions that replace objspace implementations + +def mapdict_lookup(space, w_obj, name): + if we_are_jitted(): + map = w_obj._get_mapdict_map_no_promote() + if map is not None: + return map._type_lookup(name) + w_type = space.type(w_obj) + return w_type.lookup(name) + diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -324,6 +324,9 @@ return w_obj.getclass(self) def lookup(self, w_obj, name): + if self.config.objspace.std.withmapdict: + from pypy.objspace.std.mapdict import mapdict_lookup + return mapdict_lookup(self, w_obj, name) w_type = self.type(w_obj) return w_type.lookup(name) lookup._annspecialcase_ = 'specialize:lookup' diff --git a/rpython/rlib/jit.py b/rpython/rlib/jit.py --- a/rpython/rlib/jit.py +++ b/rpython/rlib/jit.py @@ -128,7 +128,7 @@ def promote_string(x): return hint(x, promote_string=True) -def elidable_compatible(): +def elidable_compatible(quasi_immut_field_name_for_second_arg=None): """ func must be a function of at least one argument. That first argument must be pointer-like (XXX for now?) The behaviour of @elidable_compatible is as follows: @@ -151,14 +151,16 @@ single value res. If func is an injection, there is no reason to not simply use a regular promote. - XXX what happens if the *args are not constant? XXX we need a better name + XXX document quasi_immut_field_name_for_second_arg """ def decorate(func): elidable(func) def wrapped_func(x, *args): assert x is not None x = hint(x, promote_compatible=True) + if quasi_immut_field_name_for_second_arg is not None: + return func(x, getattr(x, quasi_immut_field_name_for_second_arg), *args) return func(x, *args) return wrapped_func return decorate From pypy.commits at gmail.com Fri Mar 18 13:03:53 2016 From: pypy.commits at gmail.com (cfbolz) Date: Fri, 18 Mar 2016 10:03:53 -0700 (PDT) Subject: [pypy-commit] pypy guard-compatible: make every map of a class have a version that is updated when the class is changed Message-ID: <56ec34f9.6614c20a.b0622.ffffde34@mx.google.com> Author: Carl Friedrich Bolz Branch: guard-compatible Changeset: r83139:76268c65c9d5 Date: 2016-03-18 13:57 +0100 http://bitbucket.org/pypy/pypy/changeset/76268c65c9d5/ Log: make every map of a class have a version that is updated when the class is changed diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -26,8 +26,11 @@ # we want to propagate knowledge that the result cannot be negative +class Version(object): + pass + class AbstractAttribute(object): - _immutable_fields_ = ['terminator'] + _immutable_fields_ = ['terminator', 'version?'] cache_attrs = None _size_estimate = 0 @@ -35,6 +38,12 @@ self.space = space assert isinstance(terminator, Terminator) self.terminator = terminator + # the maps have their own versions, if the terminator version is not + # None + if terminator.version is not None: + self.version = Version() + else: + self.version = None @jit.elidable_compatible() def getclass_from_terminator(self): @@ -159,6 +168,9 @@ attr = cache.get((name, index), None) if attr is None: attr = PlainAttribute(name, index, self) + if self.terminator.all_children is None: + self.terminator.all_children = [] + self.terminator.all_children.append(attr) cache[name, index] = attr return attr @@ -295,11 +307,28 @@ class Terminator(AbstractAttribute): - _immutable_fields_ = ['w_cls'] + _immutable_fields_ = ['w_cls', 'version?'] def __init__(self, space, w_cls): + if w_cls._version_tag is None: + self.version = None + else: + self.version = Version() AbstractAttribute.__init__(self, space, self) self.w_cls = w_cls + self.all_children = None + + def mutated_w_cls_version(self, version): + if version is None: + self.version = None + else: + self.version = Version() + if self.all_children is not None: + for map in self.all_children: + if version is None: + map.version = None + else: + map.version = Version() def _read_terminator(self, obj, name, index): return None @@ -335,6 +364,10 @@ Terminator.__init__(self, space, w_cls) self.devolved_dict_terminator = DevolvedDictTerminator(space, w_cls) + def mutated_w_cls_version(self, version): + self.devolved_dict_terminator.mutated_w_cls_version(version) + Terminator.mutated_w_cls_version(self, version) + def materialize_r_dict(self, space, obj, dict_w): result = Object() result.space = space diff --git a/pypy/objspace/std/test/test_mapdict.py b/pypy/objspace/std/test/test_mapdict.py --- a/pypy/objspace/std/test/test_mapdict.py +++ b/pypy/objspace/std/test/test_mapdict.py @@ -17,6 +17,7 @@ class Class(object): def __init__(self, hasdict=True): self.hasdict = True + self._version_tag = None if hasdict: self.terminator = DictTerminator(space, self) else: @@ -34,7 +35,7 @@ hasdict = False def test_plain_attribute(): - w_cls = "class" + w_cls = Class() aa = PlainAttribute("b", DICT, PlainAttribute("a", DICT, Terminator(space, w_cls))) @@ -62,14 +63,14 @@ assert aa.get_terminator() is aa.back.back def test_huge_chain(): - current = Terminator(space, "cls") + current = Terminator(space, Class()) for i in range(20000): current = PlainAttribute(str(i), DICT, current) assert current.find_map_attr("0", DICT).storageindex == 0 def test_search(): - aa = PlainAttribute("b", DICT, PlainAttribute("a", DICT, Terminator(None, None))) + aa = PlainAttribute("b", DICT, PlainAttribute("a", DICT, Terminator(None, Class()))) assert aa.search(DICT) is aa assert aa.search(SLOTS_STARTING_FROM) is None assert aa.search(SPECIAL) is None @@ -1209,6 +1210,7 @@ got = x.a assert got == 'd' + class AppTestGlobalCaching(AppTestWithMapDict): spaceconfig = {"objspace.std.withmethodcachecounter": True, "objspace.std.withmapdict": True} diff --git a/pypy/objspace/std/test/test_versionedtype.py b/pypy/objspace/std/test/test_versionedtype.py --- a/pypy/objspace/std/test/test_versionedtype.py +++ b/pypy/objspace/std/test/test_versionedtype.py @@ -260,6 +260,68 @@ assert space.float_w(cell.w_value) == 2.2 +class TestVersionedTypeMapDict(test_typeobject.TestTypeObject): + spaceconfig = {"objspace.std.withtypeversion": True, + "objspace.std.withmapdict": True} + + def get_three_classes_and_instances(self): + space = self.space + w_types = space.appexec([], """(): + class A(object): + def f(self): pass + class B(A): + pass + class X: + pass + class Y(object): + pass + class C(Y, X): + pass + a = A() + a.x = 1 + b = B() + b.x = 1 + c = C() + c.x = 1 + c.y = 2 + return A, B, C, a, b, c + """) + return space.unpackiterable(w_types) + + def test_update_map_version_too(self): + space = self.space + w_A, w_B, w_C, a, b, c = self.get_three_classes_and_instances() + def get_versions(cls, *maps): + result = [cls.version_tag(), cls.terminator.version, + cls.terminator.devolved_dict_terminator.version] + result += [m.version for m in maps] + return result + def all_different(v1s, v2s): + for v1, v2 in zip(v1s, v2s): + assert v1 is not v2 + aversions = get_versions(w_A, a.map) + bversions = get_versions(w_B, b.map) + + assert w_C.version_tag() is None + assert w_C.terminator.version is None + assert c.map.version is None + # all versions are different + assert len(set(aversions)) == len(aversions) + assert len(set(bversions)) == len(bversions) + + space.setattr(w_B, space.wrap("a"), space.wrap(1)) + assert get_versions(w_A, a.map) == aversions + all_different(get_versions(w_B, b.map), bversions) + bversions = get_versions(w_B, b.map) + + space.setattr(w_A, space.wrap("f"), space.wrap(5)) + all_different(get_versions(w_A, a.map), aversions) + all_different(get_versions(w_B, b.map), bversions) + + space.delattr(w_A, space.wrap("f")) + all_different(get_versions(w_A, a.map), aversions) + all_different(get_versions(w_B, b.map), bversions) + class AppTestVersionedType(test_typeobject.AppTestTypeObject): spaceconfig = {"objspace.std.withtypeversion": True} diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py --- a/pypy/objspace/std/typeobject.py +++ b/pypy/objspace/std/typeobject.py @@ -217,7 +217,7 @@ if (space.config.objspace.std.withtypeversion and w_self._version_tag is not None): - w_self._version_tag = VersionTag() + w_self._set_version_tag(VersionTag()) subclasses_w = w_self.get_subclasses() for w_subclass in subclasses_w: @@ -234,6 +234,11 @@ def _pure_version_tag(w_self): return w_self._version_tag + def _set_version_tag(self, version_tag): + self._version_tag = version_tag + if self.space.config.objspace.std.withmapdict: + self.terminator.mutated_w_cls_version(version_tag) + def getattribute_if_not_from_object(w_self): """ this method returns the applevel __getattribute__ if that is not the one from object, in which case it returns None """ @@ -837,10 +842,10 @@ w_type.version_tag() is not None and not is_mro_purely_of_types(w_type.mro_w)): # Disable method cache if the hierarchy isn't pure. - w_type._version_tag = None + w_type._set_version_tag(None) for w_subclass in w_type.get_subclasses(): if isinstance(w_subclass, W_TypeObject): - w_subclass._version_tag = None + w_subclass._set_version_tag(None) def descr__base(space, w_type): w_type = _check(space, w_type) From pypy.commits at gmail.com Fri Mar 18 13:03:59 2016 From: pypy.commits at gmail.com (cfbolz) Date: Fri, 18 Mar 2016 10:03:59 -0700 (PDT) Subject: [pypy-commit] pypy guard-compatible: fix getattr (both in the objspace and in the callmethod version) to not read Message-ID: <56ec34ff.86351c0a.dbdc2.0f5f@mx.google.com> Author: Carl Friedrich Bolz Branch: guard-compatible Changeset: r83143:befa6a197166 Date: 2016-03-18 18:02 +0100 http://bitbucket.org/pypy/pypy/changeset/befa6a197166/ Log: fix getattr (both in the objspace and in the callmethod version) to not read (and thus promote) the type when we have a mapdict object and we are jitted. diff --git a/pypy/objspace/std/callmethod.py b/pypy/objspace/std/callmethod.py --- a/pypy/objspace/std/callmethod.py +++ b/pypy/objspace/std/callmethod.py @@ -41,10 +41,19 @@ w_name = f.getname_w(nameindex) w_value = None - w_type = space.type(w_obj) - if w_type.has_object_getattribute(): + safe = False + if space.config.objspace.std.withmapdict and jit.we_are_jitted(): + # compute safeness without reading the type + map = w_obj._get_mapdict_map_no_promote() + if map is not None and map._type_safe_to_do_getattr(): + safe = True + else: + w_type = space.type(w_obj) + safe = w_type.has_object_getattribute() + + if safe: name = space.str_w(w_name) - w_descr = w_type.lookup(name) + w_descr = space.lookup(w_obj, name) if w_descr is None: # this handles directly the common case # module.function(args..) @@ -62,6 +71,7 @@ if (space.config.objspace.std.withmapdict and not jit.we_are_jitted()): # let mapdict cache stuff + w_type = space.type(w_obj) LOOKUP_METHOD_mapdict_fill_cache_method( space, f.getcode(), name, nameindex, w_obj, w_type) return @@ -113,8 +123,17 @@ """An optimized version of space.call_method() based on the same principle as above. """ - w_type = space.type(w_obj) - if w_type.has_object_getattribute(): + safe = False + if space.config.objspace.std.withmapdict: + # compute safeness without reading the type + map = w_obj._get_mapdict_map_no_promote() + if map is not None and map._type_safe_to_do_getattr(): + import pdb; pdb.set_trace() + safe = True + else: + w_type = space.type(w_obj) + safe = w_type.has_object_getattribute() + if safe: w_descr = space.lookup(w_obj, methname) typ = type(w_descr) if typ is function.Function or typ is function.FunctionWithFixedCode: diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -333,7 +333,7 @@ assert version is not None w_type = self.terminator.w_cls w_res = w_type._pure_lookup_where_with_method_cache( - name, w_cls.version_tag()) + name, w_type.version_tag())[1] return w_res diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -530,15 +530,23 @@ return self._DescrOperation_getattr(w_obj, w_name) # an optional shortcut for performance - w_type = self.type(w_obj) - w_descr = w_type.getattribute_if_not_from_object() - if w_descr is not None: - return self._handle_getattribute(w_descr, w_obj, w_name) + safe = False + if self.config.objspace.std.withmapdict and jit.we_are_jitted(): + # compute safeness without reading the type + map = w_obj._get_mapdict_map_no_promote() + if map is not None and map._type_safe_to_do_getattr(): + safe = True + + if not safe: + w_type = self.type(w_obj) + w_descr = w_type.getattribute_if_not_from_object() + if w_descr is not None: + return self._handle_getattribute(w_descr, w_obj, w_name) # fast path: XXX this is duplicating most of the logic # from the default __getattribute__ and the getattr() method... name = self.str_w(w_name) - w_descr = w_type.lookup(name) + w_descr = self.lookup(w_obj, name) e = None if w_descr is not None: w_get = None @@ -554,6 +562,7 @@ if w_get is not None: # __get__ is allowed to raise an AttributeError to trigger # use of __getattr__. + w_type = self.type(w_obj) try: return self.get_and_call_function(w_get, w_descr, w_obj, w_type) diff --git a/pypy/objspace/std/test/test_callmethod.py b/pypy/objspace/std/test/test_callmethod.py --- a/pypy/objspace/std/test/test_callmethod.py +++ b/pypy/objspace/std/test/test_callmethod.py @@ -111,6 +111,9 @@ class AppTestCallMethodWithGetattributeShortcut(AppTestCallMethod): spaceconfig = {"objspace.std.getattributeshortcut": True} +class AppTestCallMethodWithGetattributeShortcutAndMapdict(AppTestCallMethod): + spaceconfig = {"objspace.std.getattributeshortcut": True, + "objspace.std.withmapdict": True} class TestCallMethod: def test_space_call_method(self): @@ -132,3 +135,7 @@ callmethod.LOOKUP_METHOD) assert (self.space.FrameClass.CALL_METHOD.im_func == callmethod.CALL_METHOD) + +class TestCallMethodMapDict(TestCallMethod): + spaceconfig = {"objspace.std.getattributeshortcut": True, + "objspace.std.withmapdict": True} From pypy.commits at gmail.com Fri Mar 18 13:06:30 2016 From: pypy.commits at gmail.com (cfbolz) Date: Fri, 18 Mar 2016 10:06:30 -0700 (PDT) Subject: [pypy-commit] pypy guard-compatible: remove forgotten pdb Message-ID: <56ec3596.99e61c0a.71f76.0e70@mx.google.com> Author: Carl Friedrich Bolz Branch: guard-compatible Changeset: r83144:8e40d7d6198c Date: 2016-03-18 18:05 +0100 http://bitbucket.org/pypy/pypy/changeset/8e40d7d6198c/ Log: remove forgotten pdb diff --git a/pypy/objspace/std/callmethod.py b/pypy/objspace/std/callmethod.py --- a/pypy/objspace/std/callmethod.py +++ b/pypy/objspace/std/callmethod.py @@ -128,7 +128,6 @@ # compute safeness without reading the type map = w_obj._get_mapdict_map_no_promote() if map is not None and map._type_safe_to_do_getattr(): - import pdb; pdb.set_trace() safe = True else: w_type = space.type(w_obj) From pypy.commits at gmail.com Fri Mar 18 13:22:48 2016 From: pypy.commits at gmail.com (cfbolz) Date: Fri, 18 Mar 2016 10:22:48 -0700 (PDT) Subject: [pypy-commit] pypy guard-compatible: fix version _immutable_fields_ declaration Message-ID: <56ec3968.8b941c0a.37c90.1962@mx.google.com> Author: Carl Friedrich Bolz Branch: guard-compatible Changeset: r83145:59f62a44d4cf Date: 2016-03-18 18:22 +0100 http://bitbucket.org/pypy/pypy/changeset/59f62a44d4cf/ Log: fix version _immutable_fields_ declaration diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -338,7 +338,7 @@ class Terminator(AbstractAttribute): - _immutable_fields_ = ['w_cls', 'version?'] + _immutable_fields_ = ['w_cls'] def __init__(self, space, w_cls): if w_cls is None or w_cls._version_tag is None: From pypy.commits at gmail.com Fri Mar 18 13:45:37 2016 From: pypy.commits at gmail.com (rlamy) Date: Fri, 18 Mar 2016 10:45:37 -0700 (PDT) Subject: [pypy-commit] pypy py3k: Reapply commit 0ad8f5229df1 Message-ID: <56ec3ec1.e853c20a.24b0f.ffffe64d@mx.google.com> Author: Ronan Lamy Branch: py3k Changeset: r83146:9e1a6d590094 Date: 2016-03-18 17:44 +0000 http://bitbucket.org/pypy/pypy/changeset/9e1a6d590094/ Log: Reapply commit 0ad8f5229df1 diff --git a/lib-python/3/site.py b/lib-python/3/site.py --- a/lib-python/3/site.py +++ b/lib-python/3/site.py @@ -70,6 +70,8 @@ import re import builtins +is_pypy = '__pypy__' in sys.builtin_module_names + # Prefixes for site-packages; add additional prefixes like /usr/local here PREFIXES = [sys.prefix, sys.exec_prefix] # Enable per user site-packages directory @@ -302,6 +304,10 @@ if sys.platform in ('os2emx', 'riscos'): sitepackages.append(os.path.join(prefix, "Lib", "site-packages")) + elif is_pypy: + from distutils.sysconfig import get_python_lib + sitepackages.append(get_python_lib(standard_lib=False, + prefix=prefix)) elif os.sep == '/': sitepackages.append(os.path.join(prefix, "lib", "python" + sys.version[:3], @@ -440,21 +446,27 @@ def setcopyright(): """Set 'copyright' and 'credits' in builtins""" + licenseargs = None + if is_pypy: + credits = "PyPy is maintained by the PyPy developers: http://pypy.org/" + license = "See https://bitbucket.org/pypy/pypy/src/default/LICENSE" + licenseargs = (license,) + elif sys.platform[:4] == 'java': + credits = ("Jython is maintained by the Jython developers " + "(www.jython.org).") + else: + credits = """\ + Thanks to CWI, CNRI, BeOpen.com, Zope Corporation and a cast of thousands + for supporting Python development. See www.python.org for more information.""" + if licenseargs is None: + here = os.path.dirname(os.__file__) + license = "See http://www.python.org/download/releases/%.5s/license/" % sys.version, + licenseargs = (license, ["LICENSE.txt", "LICENSE"], + [os.path.join(here, os.pardir), here, os.curdir]) + builtins.copyright = _Printer("copyright", sys.copyright) - if sys.platform[:4] == 'java': - builtins.credits = _Printer( - "credits", - "Jython is maintained by the Jython developers (www.jython.org).") - else: - builtins.credits = _Printer("credits", """\ - Thanks to CWI, CNRI, BeOpen.com, Zope Corporation and a cast of thousands - for supporting Python development. See www.python.org for more information.""") - here = os.path.dirname(os.__file__) - builtins.license = _Printer( - "license", - "See http://www.python.org/download/releases/%.5s/license/" % sys.version, - ["LICENSE.txt", "LICENSE"], - [os.path.join(here, os.pardir), here, os.curdir]) + builtins.credits = _Printer("credits", credits) + builtins.license = _Printer("license", *licenseargs) class _Helper(object): From pypy.commits at gmail.com Fri Mar 18 13:51:51 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 18 Mar 2016 10:51:51 -0700 (PDT) Subject: [pypy-commit] pypy default: Add some "add_memory_pressure=True" at all places that malloc something Message-ID: <56ec4037.6507c20a.ad77a.ffffe057@mx.google.com> Author: Armin Rigo Branch: Changeset: r83147:9137853fd0ec Date: 2016-03-18 17:52 +0000 http://bitbucket.org/pypy/pypy/changeset/9137853fd0ec/ Log: Add some "add_memory_pressure=True" at all places that malloc something that is likely to be a PyObject or attached to a PyObject, and thus relying on our GC to know when it must be freed. This change helps a lot on some examples. diff --git a/pypy/module/cpyext/bytesobject.py b/pypy/module/cpyext/bytesobject.py --- a/pypy/module/cpyext/bytesobject.py +++ b/pypy/module/cpyext/bytesobject.py @@ -80,7 +80,8 @@ buflen = length + 1 py_str.c_size = length py_str.c_buffer = lltype.malloc(rffi.CCHARP.TO, buflen, - flavor='raw', zero=True) + flavor='raw', zero=True, + add_memory_pressure=True) return py_str def string_attach(space, py_obj, w_obj): diff --git a/pypy/module/cpyext/object.py b/pypy/module/cpyext/object.py --- a/pypy/module/cpyext/object.py +++ b/pypy/module/cpyext/object.py @@ -17,7 +17,8 @@ @cpython_api([Py_ssize_t], rffi.VOIDP) def PyObject_MALLOC(space, size): return lltype.malloc(rffi.VOIDP.TO, size, - flavor='raw', zero=True) + flavor='raw', zero=True, + add_memory_pressure=True) @cpython_api([rffi.VOIDP], lltype.Void) def PyObject_FREE(space, ptr): diff --git a/pypy/module/cpyext/pyobject.py b/pypy/module/cpyext/pyobject.py --- a/pypy/module/cpyext/pyobject.py +++ b/pypy/module/cpyext/pyobject.py @@ -50,7 +50,8 @@ size += itemcount * pytype.c_tp_itemsize assert size >= rffi.sizeof(PyObject.TO) buf = lltype.malloc(rffi.VOIDP.TO, size, - flavor='raw', zero=True) + flavor='raw', zero=True, + add_memory_pressure=True) pyobj = rffi.cast(PyObject, buf) pyobj.c_ob_refcnt = 1 pyobj.c_ob_type = pytype diff --git a/pypy/module/cpyext/tupleobject.py b/pypy/module/cpyext/tupleobject.py --- a/pypy/module/cpyext/tupleobject.py +++ b/pypy/module/cpyext/tupleobject.py @@ -59,7 +59,8 @@ py_tup = rffi.cast(PyTupleObject, py_obj) py_tup.c_ob_item = lltype.malloc(ObjectItems, length, - flavor='raw', zero=True) + flavor='raw', zero=True, + add_memory_pressure=True) py_tup.c_ob_size = length return py_tup @@ -70,7 +71,8 @@ """ items_w = space.fixedview(w_obj) l = len(items_w) - p = lltype.malloc(ObjectItems, l, flavor='raw') + p = lltype.malloc(ObjectItems, l, flavor='raw', + add_memory_pressure=True) i = 0 try: while i < l: @@ -177,7 +179,8 @@ ref = rffi.cast(PyTupleObject, ref) oldsize = ref.c_ob_size oldp = ref.c_ob_item - newp = lltype.malloc(ObjectItems, newsize, zero=True, flavor='raw') + newp = lltype.malloc(ObjectItems, newsize, zero=True, flavor='raw', + add_memory_pressure=True) try: if oldsize < newsize: to_cp = oldsize diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -421,7 +421,8 @@ Py_DecRef(space, w_metatype) heaptype = lltype.malloc(PyHeapTypeObject.TO, - flavor='raw', zero=True) + flavor='raw', zero=True, + add_memory_pressure=True) pto = heaptype.c_ht_type pto.c_ob_refcnt = 1 pto.c_ob_type = metatype diff --git a/pypy/module/cpyext/unicodeobject.py b/pypy/module/cpyext/unicodeobject.py --- a/pypy/module/cpyext/unicodeobject.py +++ b/pypy/module/cpyext/unicodeobject.py @@ -55,7 +55,8 @@ buflen = length + 1 py_uni.c_size = length py_uni.c_buffer = lltype.malloc(rffi.CWCHARP.TO, buflen, - flavor='raw', zero=True) + flavor='raw', zero=True, + add_memory_pressure=True) return py_uni def unicode_attach(space, py_obj, w_obj): From pypy.commits at gmail.com Fri Mar 18 13:52:42 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 18 Mar 2016 10:52:42 -0700 (PDT) Subject: [pypy-commit] pypy release-5.x: Add some "add_memory_pressure=True" at all places that malloc something Message-ID: <56ec406a.02f0c20a.c3d6b.ffffdf41@mx.google.com> Author: Armin Rigo Branch: release-5.x Changeset: r83148:bbd45126bc69 Date: 2016-03-18 17:52 +0000 http://bitbucket.org/pypy/pypy/changeset/bbd45126bc69/ Log: Add some "add_memory_pressure=True" at all places that malloc something that is likely to be a PyObject or attached to a PyObject, and thus relying on our GC to know when it must be freed. This change helps a lot on some examples. diff --git a/pypy/module/cpyext/bytesobject.py b/pypy/module/cpyext/bytesobject.py --- a/pypy/module/cpyext/bytesobject.py +++ b/pypy/module/cpyext/bytesobject.py @@ -80,7 +80,8 @@ buflen = length + 1 py_str.c_size = length py_str.c_buffer = lltype.malloc(rffi.CCHARP.TO, buflen, - flavor='raw', zero=True) + flavor='raw', zero=True, + add_memory_pressure=True) return py_str def string_attach(space, py_obj, w_obj): diff --git a/pypy/module/cpyext/object.py b/pypy/module/cpyext/object.py --- a/pypy/module/cpyext/object.py +++ b/pypy/module/cpyext/object.py @@ -17,7 +17,8 @@ @cpython_api([Py_ssize_t], rffi.VOIDP) def PyObject_MALLOC(space, size): return lltype.malloc(rffi.VOIDP.TO, size, - flavor='raw', zero=True) + flavor='raw', zero=True, + add_memory_pressure=True) @cpython_api([rffi.VOIDP], lltype.Void) def PyObject_FREE(space, ptr): diff --git a/pypy/module/cpyext/pyobject.py b/pypy/module/cpyext/pyobject.py --- a/pypy/module/cpyext/pyobject.py +++ b/pypy/module/cpyext/pyobject.py @@ -50,7 +50,8 @@ size += itemcount * pytype.c_tp_itemsize assert size >= rffi.sizeof(PyObject.TO) buf = lltype.malloc(rffi.VOIDP.TO, size, - flavor='raw', zero=True) + flavor='raw', zero=True, + add_memory_pressure=True) pyobj = rffi.cast(PyObject, buf) pyobj.c_ob_refcnt = 1 pyobj.c_ob_type = pytype diff --git a/pypy/module/cpyext/tupleobject.py b/pypy/module/cpyext/tupleobject.py --- a/pypy/module/cpyext/tupleobject.py +++ b/pypy/module/cpyext/tupleobject.py @@ -59,7 +59,8 @@ py_tup = rffi.cast(PyTupleObject, py_obj) py_tup.c_ob_item = lltype.malloc(ObjectItems, length, - flavor='raw', zero=True) + flavor='raw', zero=True, + add_memory_pressure=True) py_tup.c_ob_size = length return py_tup @@ -70,7 +71,8 @@ """ items_w = space.fixedview(w_obj) l = len(items_w) - p = lltype.malloc(ObjectItems, l, flavor='raw') + p = lltype.malloc(ObjectItems, l, flavor='raw', + add_memory_pressure=True) i = 0 try: while i < l: @@ -177,7 +179,8 @@ ref = rffi.cast(PyTupleObject, ref) oldsize = ref.c_ob_size oldp = ref.c_ob_item - newp = lltype.malloc(ObjectItems, newsize, zero=True, flavor='raw') + newp = lltype.malloc(ObjectItems, newsize, zero=True, flavor='raw', + add_memory_pressure=True) try: if oldsize < newsize: to_cp = oldsize diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -421,7 +421,8 @@ Py_DecRef(space, w_metatype) heaptype = lltype.malloc(PyHeapTypeObject.TO, - flavor='raw', zero=True) + flavor='raw', zero=True, + add_memory_pressure=True) pto = heaptype.c_ht_type pto.c_ob_refcnt = 1 pto.c_ob_type = metatype diff --git a/pypy/module/cpyext/unicodeobject.py b/pypy/module/cpyext/unicodeobject.py --- a/pypy/module/cpyext/unicodeobject.py +++ b/pypy/module/cpyext/unicodeobject.py @@ -55,7 +55,8 @@ buflen = length + 1 py_uni.c_size = length py_uni.c_buffer = lltype.malloc(rffi.CWCHARP.TO, buflen, - flavor='raw', zero=True) + flavor='raw', zero=True, + add_memory_pressure=True) return py_uni def unicode_attach(space, py_obj, w_obj): From pypy.commits at gmail.com Fri Mar 18 16:45:35 2016 From: pypy.commits at gmail.com (fijal) Date: Fri, 18 Mar 2016 13:45:35 -0700 (PDT) Subject: [pypy-commit] pypy heapcache-refactor: whack enough to pass test_opencoder (without a random test so far) Message-ID: <56ec68ef.c65b1c0a.394c7.602e@mx.google.com> Author: fijal Branch: heapcache-refactor Changeset: r83149:896f8045f41d Date: 2016-03-18 22:40 +0200 http://bitbucket.org/pypy/pypy/changeset/896f8045f41d/ Log: whack enough to pass test_opencoder (without a random test so far) diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -75,54 +75,6 @@ ) #compute_unique_id(box)) -class XxxAbstractValue(object): - __slots__ = () - - def getint(self): - raise NotImplementedError - - def getfloatstorage(self): - raise NotImplementedError - - def getfloat(self): - return longlong.getrealfloat(self.getfloatstorage()) - - def getref_base(self): - raise NotImplementedError - - def getref(self, TYPE): - raise NotImplementedError - getref._annspecialcase_ = 'specialize:arg(1)' - - def constbox(self): - raise NotImplementedError - - def getaddr(self): - "Only for raw addresses (BoxInt & ConstInt), not for GC addresses" - raise NotImplementedError - - def sort_key(self): - raise NotImplementedError - - def nonnull(self): - raise NotImplementedError - - def repr_rpython(self): - return '%s' % self - - def _get_str(self): - raise NotImplementedError - - def same_box(self, other): - return self is other - - def same_shape(self, other): - # only structured containers can compare their shape (vector box) - return True - - def getaccum(self): - return None - class AbstractDescr(AbstractValue): __slots__ = ('descr_index',) llopaque = True @@ -657,6 +609,10 @@ def get_position(self): return intmask(self.position_and_flags & FO_POSITION_MASK) + def set_position(self, new_pos): + flags = self.position_and_flags & (~FO_POSITION_MASK) + self.position_and_flags = flags | r_uint(new_pos) + def is_replaced_with_const(self): return bool(self.position_and_flags & FO_REPLACED_WITH_CONST) diff --git a/rpython/jit/metainterp/opencoder.py b/rpython/jit/metainterp/opencoder.py --- a/rpython/jit/metainterp/opencoder.py +++ b/rpython/jit/metainterp/opencoder.py @@ -10,7 +10,7 @@ from rpython.jit.metainterp.history import ConstInt, Const, ConstFloat, ConstPtr from rpython.jit.metainterp.resoperation import AbstractResOp, AbstractInputArg,\ ResOperation, oparity, rop, opwithdescr, GuardResOp, IntOp, FloatOp, RefOp -from rpython.rlib.rarithmetic import intmask +from rpython.rlib.rarithmetic import intmask, r_uint from rpython.rlib.objectmodel import we_are_translated from rpython.rtyper.lltypesystem import rffi, lltype, llmemory from rpython.jit.metainterp.typesystem import llhelper @@ -63,24 +63,19 @@ if force_inputargs is not None: self.inputargs = [rop.inputarg_from_tp(arg.type) for arg in force_inputargs] - self._inputargs = [None] * len(trace.inputargs) for i, arg in enumerate(force_inputargs): - if arg.get_position() >= 0: - self._cache[arg.get_position()] = self.inputargs[i] - else: - self._inputargs[-arg.get_position()-1] = self.inputargs[i] + self._cache[arg.get_position()] = self.inputargs[i] else: self.inputargs = [rop.inputarg_from_tp(arg.type) for arg in self.trace.inputargs] - self._inputargs = self.inputargs[:] + for i, arg in enumerate(self.inputargs): + self._cache[i] = arg self.start = start self.pos = start - self._count = 0 + self._count = start self.end = end def _get(self, i): - if i < 0: - return self._inputargs[-i - 1] res = self._cache[i] assert res is not None return res @@ -197,9 +192,10 @@ self._floats_dict = {} self._snapshots = [] for i, inparg in enumerate(inputargs): - assert isinstance(inparg, AbstractInputArg) - inparg.position = -i - 1 - self._count = 0 + inparg.set_position(i) + self._count = len(inputargs) + self._start = len(inputargs) + self._pos = self._start self.inputargs = inputargs def append(self, v): @@ -362,7 +358,7 @@ def get_iter(self, metainterp_sd=None): assert metainterp_sd - return TraceIterator(self, 0, self._pos, metainterp_sd=metainterp_sd) + return TraceIterator(self, self._start, self._pos, metainterp_sd=metainterp_sd) def unpack(self): iter = self.get_iter() diff --git a/rpython/jit/metainterp/test/test_opencoder.py b/rpython/jit/metainterp/test/test_opencoder.py --- a/rpython/jit/metainterp/test/test_opencoder.py +++ b/rpython/jit/metainterp/test/test_opencoder.py @@ -1,7 +1,7 @@ from rpython.jit.metainterp.opencoder import Trace, untag, TAGINT, TAGBOX -from rpython.jit.metainterp.resoperation import rop, InputArgInt, AbstractResOp -from rpython.jit.metainterp.history import ConstInt +from rpython.jit.metainterp.resoperation import rop, AbstractResOp +from rpython.jit.metainterp.history import ConstInt, IntFrontendOp from rpython.jit.metainterp.optimizeopt.optimizer import Optimizer from rpython.jit.metainterp import resume from rpython.jit.metainterp.test.strategies import lists_of_operations @@ -31,8 +31,11 @@ self.jitcode = jitcode self.boxes = boxes - def get_list_of_active_boxes(self, flag): - return self.boxes + def get_list_of_active_boxes(self, flag, new_array, encode): + a = new_array(len(self.boxes)) + for i, box in enumerate(self.boxes): + a[i] = encode(box) + return a def unpack_snapshot(t, op, pos): op.framestack = [] @@ -58,7 +61,7 @@ return iter.inputargs, l, iter def test_simple_iterator(self): - i0, i1 = InputArgInt(), InputArgInt() + i0, i1 = IntFrontendOp(0), IntFrontendOp(0) t = Trace([i0, i1]) add = FakeOp(t.record_op(rop.INT_ADD, [i0, i1])) t.record_op(rop.INT_ADD, [add, ConstInt(1)]) @@ -72,7 +75,7 @@ assert l[0].getarg(1) is i1 def test_rd_snapshot(self): - i0, i1 = InputArgInt(), InputArgInt() + i0, i1 = IntFrontendOp(0), IntFrontendOp(0) t = Trace([i0, i1]) add = FakeOp(t.record_op(rop.INT_ADD, [i0, i1])) t.record_op(rop.GUARD_FALSE, [add]) @@ -96,7 +99,7 @@ assert fstack[1].boxes == [i0, i0, l[0]] def test_read_snapshot_interface(self): - i0, i1, i2 = InputArgInt(), InputArgInt(), InputArgInt() + i0, i1, i2 = IntFrontendOp(0), IntFrontendOp(0), IntFrontendOp(0) t = Trace([i0, i1, i2]) t.record_op(rop.GUARD_TRUE, [i1]) frame0 = FakeFrame(1, JitCode(2), [i0, i1]) @@ -128,8 +131,9 @@ assert pc == 3 assert snapshot_iter.unpack_array(framestack[1].box_array) == [i2, i2] + # XXXX fixme @given(lists_of_operations()) - def test_random_snapshot(self, lst): + def xxx_test_random_snapshot(self, lst): inputargs, ops = lst t = Trace(inputargs) for op in ops: @@ -156,11 +160,11 @@ assert (((-iter._next() - 1) << 15) | (iter._next())) == i def test_cut_trace_from(self): - i0, i1, i2 = InputArgInt(), InputArgInt(), InputArgInt() + i0, i1, i2 = IntFrontendOp(0), IntFrontendOp(0), IntFrontendOp(0) t = Trace([i0, i1, i2]) - add1 = t.record_op(rop.INT_ADD, [i0, i1]) + add1 = FakeOp(t.record_op(rop.INT_ADD, [i0, i1])) cut_point = t.cut_point() - add2 = t.record_op(rop.INT_ADD, [add1, i1]) + add2 = FakeOp(t.record_op(rop.INT_ADD, [add1, i1])) t.record_op(rop.GUARD_TRUE, [add2]) resume.capture_resumedata([FakeFrame(3, JitCode(4), [add2, add1, i1])], None, [], t) @@ -174,9 +178,9 @@ class SomeDescr(AbstractDescr): pass - i0, i1, i2 = InputArgInt(), InputArgInt(), InputArgInt() + i0, i1, i2 = IntFrontendOp(0), IntFrontendOp(0), IntFrontendOp(0) t = Trace([i0, i1, i2]) - p0 = t.record_op(rop.NEW_WITH_VTABLE, [], descr=SomeDescr()) + p0 = FakeOp(t.record_op(rop.NEW_WITH_VTABLE, [], descr=SomeDescr())) t.record_op(rop.GUARD_TRUE, [i0]) resume.capture_resumedata([], [i1, i2, p0], [p0, i1], t) (i0, i1, i2), l, iter = self.unpack(t) diff --git a/rpython/jit/metainterp/warmstate.py b/rpython/jit/metainterp/warmstate.py --- a/rpython/jit/metainterp/warmstate.py +++ b/rpython/jit/metainterp/warmstate.py @@ -75,7 +75,7 @@ if in_const_box: return history.ConstPtr(value) else: - return resoperation.InputArgRef(value) + return history.RefFrontendOp(xxx) else: adr = llmemory.cast_ptr_to_adr(value) value = heaptracker.adr2int(adr) From pypy.commits at gmail.com Fri Mar 18 16:45:37 2016 From: pypy.commits at gmail.com (fijal) Date: Fri, 18 Mar 2016 13:45:37 -0700 (PDT) Subject: [pypy-commit] pypy heapcache-refactor: merge (but broken) Message-ID: <56ec68f1.10921c0a.55175.60ca@mx.google.com> Author: fijal Branch: heapcache-refactor Changeset: r83150:61eeb63802a2 Date: 2016-03-18 22:44 +0200 http://bitbucket.org/pypy/pypy/changeset/61eeb63802a2/ Log: merge (but broken) diff --git a/rpython/jit/metainterp/heapcache.py b/rpython/jit/metainterp/heapcache.py --- a/rpython/jit/metainterp/heapcache.py +++ b/rpython/jit/metainterp/heapcache.py @@ -1,6 +1,5 @@ from rpython.jit.metainterp.history import Const, ConstInt from rpython.jit.metainterp.history import FrontendOp, RefFrontendOp -from rpython.jit.metainterp.history import FO_REPLACED_WITH_CONST from rpython.jit.metainterp.resoperation import rop, OpHelpers from rpython.jit.metainterp.executor import constant_from_op from rpython.rlib.rarithmetic import r_uint32, r_uint @@ -155,6 +154,9 @@ return ref_frontend_op._get_heapc_flags() >= self.likely_virtual_version def update_version(self, ref_frontend_op): + """Ensure the version of 'ref_frontend_op' is current. If not, + it will update 'ref_frontend_op' (removing most flags currently set). + """ if not self.test_head_version(ref_frontend_op): f = self.head_version if (self.test_likely_virtual_version(ref_frontend_op) and diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -594,8 +594,8 @@ # ____________________________________________________________ -FO_POSITION_MASK = r_uint(0x7FFFFFFF) -FO_REPLACED_WITH_CONST = r_uint(0x80000000) +FO_REPLACED_WITH_CONST = r_uint(1) +FO_POSITION_SHIFT = 1 class FrontendOp(AbstractResOp): @@ -603,11 +603,15 @@ _attrs_ = ('position_and_flags',) def __init__(self, pos): - assert pos >= 0 - self.position_and_flags = r_uint(pos) + # p is the 32-bit position shifted left by one (might be negative, + # but casted to the 32-bit UINT type) + p = rffi.cast(rffi.UINT, pos << FO_POSITION_SHIFT) + self.position_and_flags = r_uint(p) # zero-extended to a full word def get_position(self): - return intmask(self.position_and_flags & FO_POSITION_MASK) + # p is the signed 32-bit position, from self.position_and_flags + p = rffi.cast(rffi.INT, self.position_and_flags) + return intmask(p) >> FO_POSITION_SHIFT def set_position(self, new_pos): flags = self.position_and_flags & (~FO_POSITION_MASK) diff --git a/rpython/jit/metainterp/test/test_history.py b/rpython/jit/metainterp/test/test_history.py --- a/rpython/jit/metainterp/test/test_history.py +++ b/rpython/jit/metainterp/test/test_history.py @@ -62,6 +62,12 @@ assert c5.nonnull() assert c6.nonnull() +def test_frontendop(): + f = FrontendOp(42) + assert f.get_position() == 42 + f = FrontendOp(-56) + assert f.get_position() == -56 + class TestZTranslated(StandaloneTests): def test_ztranslated_same_constant_float(self): def fn(args): From pypy.commits at gmail.com Fri Mar 18 16:56:44 2016 From: pypy.commits at gmail.com (fijal) Date: Fri, 18 Mar 2016 13:56:44 -0700 (PDT) Subject: [pypy-commit] pypy heapcache-refactor: replace InputArgXxx with FrontendOps (still a bad call to set_position, I think) Message-ID: <56ec6b8c.6774c20a.14549.2568@mx.google.com> Author: fijal Branch: heapcache-refactor Changeset: r83151:fd211f9b818b Date: 2016-03-18 22:55 +0200 http://bitbucket.org/pypy/pypy/changeset/fd211f9b818b/ Log: replace InputArgXxx with FrontendOps (still a bad call to set_position, I think) diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -614,8 +614,9 @@ return intmask(p) >> FO_POSITION_SHIFT def set_position(self, new_pos): - flags = self.position_and_flags & (~FO_POSITION_MASK) - self.position_and_flags = flags | r_uint(new_pos) + self.__init__(new_pos) + #flags = self.position_and_flags & (~FO_POSITION_MASK) + #self.position_and_flags = flags | r_uint(new_pos) def is_replaced_with_const(self): return bool(self.position_and_flags & FO_REPLACED_WITH_CONST) diff --git a/rpython/jit/metainterp/resume.py b/rpython/jit/metainterp/resume.py --- a/rpython/jit/metainterp/resume.py +++ b/rpython/jit/metainterp/resume.py @@ -1,9 +1,9 @@ from rpython.jit.codewriter.effectinfo import EffectInfo from rpython.jit.metainterp import jitprof from rpython.jit.metainterp.history import (Const, ConstInt, getkind, - INT, REF, FLOAT, AbstractDescr) -from rpython.jit.metainterp.resoperation import rop, InputArgInt,\ - InputArgFloat, InputArgRef + INT, REF, FLOAT, AbstractDescr, IntFrontendOp, RefFrontendOp, + FloatFrontendOp) +from rpython.jit.metainterp.resoperation import rop from rpython.rlib import rarithmetic, rstack from rpython.rlib.objectmodel import (we_are_translated, specialize, compute_unique_id) @@ -1264,11 +1264,14 @@ num += len(self.liveboxes) assert num >= 0 if kind == INT: - box = InputArgInt(self.cpu.get_int_value(self.deadframe, num)) + box = IntFrontendOp(0) + box.setint(self.cpu.get_int_value(self.deadframe, num)) elif kind == REF: - box = InputArgRef(self.cpu.get_ref_value(self.deadframe, num)) + box = RefFrontendOp(0) + box.setref_base(self.cpu.get_ref_value(self.deadframe, num)) elif kind == FLOAT: - box = InputArgFloat(self.cpu.get_float_value(self.deadframe, num)) + box = FloatFrontendOp(0) + box.setfloatstorage(self.cpu.get_float_value(self.deadframe, num)) else: assert 0, "bad kind: %d" % ord(kind) self.liveboxes[num] = box diff --git a/rpython/jit/metainterp/warmstate.py b/rpython/jit/metainterp/warmstate.py --- a/rpython/jit/metainterp/warmstate.py +++ b/rpython/jit/metainterp/warmstate.py @@ -75,7 +75,9 @@ if in_const_box: return history.ConstPtr(value) else: - return history.RefFrontendOp(xxx) + res = history.RefFrontendOp(0) + res.setref_base(value) + return res else: adr = llmemory.cast_ptr_to_adr(value) value = heaptracker.adr2int(adr) @@ -89,7 +91,9 @@ if in_const_box: return history.ConstFloat(value) else: - return resoperation.InputArgFloat(value) + res = history.FloatFrontendOp(0) + res.setfloatstorage(value) + return res elif isinstance(value, str) or isinstance(value, unicode): assert len(value) == 1 # must be a character value = ord(value) @@ -100,7 +104,9 @@ if in_const_box: return history.ConstInt(value) else: - return resoperation.InputArgInt(value) + res = history.IntFrontendOp(0) + res.setint(value) + return res @specialize.arg(0) def equal_whatever(TYPE, x, y): From pypy.commits at gmail.com Fri Mar 18 18:40:27 2016 From: pypy.commits at gmail.com (fijal) Date: Fri, 18 Mar 2016 15:40:27 -0700 (PDT) Subject: [pypy-commit] pypy heapcache-refactor: fixes for consts Message-ID: <56ec83db.a3f6c20a.97230.4423@mx.google.com> Author: fijal Branch: heapcache-refactor Changeset: r83152:042155266b53 Date: 2016-03-19 00:39 +0200 http://bitbucket.org/pypy/pypy/changeset/042155266b53/ Log: fixes for consts diff --git a/rpython/jit/metainterp/heapcache.py b/rpython/jit/metainterp/heapcache.py --- a/rpython/jit/metainterp/heapcache.py +++ b/rpython/jit/metainterp/heapcache.py @@ -35,7 +35,7 @@ return bool(f & r_uint(flags)) def maybe_replace_with_const(box): - if box.is_replaced_with_const(): + if not isinstance(box, Const) and box.is_replaced_with_const(): return constant_from_op(box) else: return box @@ -58,7 +58,8 @@ self.cache_anything.clear() def _seen_alloc(self, ref_box): - assert isinstance(ref_box, RefFrontendOp) + if not isinstance(ref_box, RefFrontendOp): + return False return self.heapcache._check_flag(ref_box, HF_SEEN_ALLOCATION) def _getdict(self, seen_alloc): @@ -105,6 +106,18 @@ def setfield(self, fieldbox): self.cache.do_write_with_aliasing(self.ref_box, fieldbox) +class DummyFieldUpdater(FieldUpdater): + def __init__(self): + self.currfieldbox = None + + def getfield_now_known(self, fieldbox): + pass + + def setfield(self, fieldbox): + pass + +dummy_field_updater = DummyFieldUpdater() + class HeapCache(object): def __init__(self): @@ -342,12 +355,18 @@ return self._check_flag(box, HF_KNOWN_CLASS) def class_now_known(self, box): + if isinstance(box, Const): + return self._set_flag(box, HF_KNOWN_CLASS) def is_nullity_known(self, box): + if isinstance(box, Const): + return bool(box.getref_base()) return self._check_flag(box, HF_KNOWN_NULLITY) def nullity_now_known(self, box): + if isinstance(box, Const): + return self._set_flag(box, HF_KNOWN_NULLITY) def is_nonstandard_virtualizable(self, box): @@ -381,7 +400,8 @@ return None def get_field_updater(self, box, descr): - assert isinstance(box, RefFrontendOp) + if not isinstance(box, RefFrontendOp): + return dummy_field_updater cache = self.heap_cache.get(descr, None) if cache is None: cache = self.heap_cache[descr] = CacheEntry(self) @@ -448,6 +468,8 @@ # we store in '_heapc_deps' a list of boxes: the *first* box is # the known length or None, and the remaining boxes are the # regular dependencies. + if isinstance(box, Const): + return deps = self._get_deps(box) assert deps is not None deps[0] = lengthbox diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -695,7 +695,7 @@ self.trace.cut_at(cut_at) def any_operation(self): - return self.trace._count > 0 + return self.trace._count > self.trace._start @specialize.argtype(2) def set_op_value(self, op, value): From pypy.commits at gmail.com Fri Mar 18 20:02:53 2016 From: pypy.commits at gmail.com (mjacob) Date: Fri, 18 Mar 2016 17:02:53 -0700 (PDT) Subject: [pypy-commit] pypy py3k: Fix test by using another file. Message-ID: <56ec972d.6507c20a.ad77a.4c63@mx.google.com> Author: Manuel Jacob Branch: py3k Changeset: r83153:dfc55771338b Date: 2016-03-19 01:01 +0100 http://bitbucket.org/pypy/pypy/changeset/dfc55771338b/ Log: Fix test by using another file. The tests are not as independent as they probably should be and test_chmod / test_fchmod change the permissions of the file previously used. diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py --- a/pypy/module/posix/test/test_posix2.py +++ b/pypy/module/posix/test/test_posix2.py @@ -987,7 +987,7 @@ if hasattr(os, 'ftruncate'): def test_truncate(self): posix = self.posix - dest = self.path + dest = self.path2 def mkfile(dest, size=4): with open(dest, 'wb') as f: From pypy.commits at gmail.com Sat Mar 19 00:30:41 2016 From: pypy.commits at gmail.com (rlamy) Date: Fri, 18 Mar 2016 21:30:41 -0700 (PDT) Subject: [pypy-commit] pypy py3k: hg merge default Message-ID: <56ecd5f1.4a811c0a.35ca4.ffffb96a@mx.google.com> Author: Ronan Lamy Branch: py3k Changeset: r83154:9a167de52a1f Date: 2016-03-19 04:26 +0000 http://bitbucket.org/pypy/pypy/changeset/9a167de52a1f/ Log: hg merge default diff too long, truncating to 2000 out of 13776 lines diff --git a/.hgignore b/.hgignore --- a/.hgignore +++ b/.hgignore @@ -74,5 +74,6 @@ ^rpython/doc/_build/.*$ ^compiled ^.git/ +^.hypothesis/ ^release/ ^rpython/_cache$ diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -18,3 +18,4 @@ f3ad1e1e1d6215e20d34bb65ab85ff9188c9f559 release-2.6.1 850edf14b2c75573720f59e95767335fb1affe55 release-4.0.0 5f8302b8bf9f53056e40426f10c72151564e5b19 release-4.0.1 +246c9cf22037b11dc0e8c29ce3f291d3b8c5935a release-5.0 diff --git a/lib-python/2.7/xml/etree/ElementTree.py b/lib-python/2.7/xml/etree/ElementTree.py --- a/lib-python/2.7/xml/etree/ElementTree.py +++ b/lib-python/2.7/xml/etree/ElementTree.py @@ -1606,7 +1606,17 @@ pubid = pubid[1:-1] if hasattr(self.target, "doctype"): self.target.doctype(name, pubid, system[1:-1]) - elif self.doctype is not self._XMLParser__doctype: + elif 1: # XXX PyPy fix, used to be + # elif self.doctype is not self._XMLParser__doctype: + # but that condition is always True on CPython, as far + # as I can tell: self._XMLParser__doctype always + # returns a fresh unbound method object. + # On PyPy, unbound and bound methods have stronger + # unicity guarantees: self._XMLParser__doctype + # can return the same unbound method object, in + # some cases making the test above incorrectly False. + # (My guess would be that the line above is a backport + # from Python 3.) # warn about deprecated call self._XMLParser__doctype(name, pubid, system[1:-1]) self.doctype(name, pubid, system[1:-1]) diff --git a/lib_pypy/ctypes_config_cache/rebuild.py b/lib_pypy/ctypes_config_cache/rebuild.py --- a/lib_pypy/ctypes_config_cache/rebuild.py +++ b/lib_pypy/ctypes_config_cache/rebuild.py @@ -9,9 +9,8 @@ _dirpath = os.path.dirname(__file__) or os.curdir -from rpython.tool.ansi_print import ansi_log -log = py.log.Producer("ctypes_config_cache") -py.log.setconsumer("ctypes_config_cache", ansi_log) +from rpython.tool.ansi_print import AnsiLogger +log = AnsiLogger("ctypes_config_cache") def rebuild_one(name): diff --git a/pypy/doc/config/translation.gc.txt b/pypy/doc/config/translation.gc.txt --- a/pypy/doc/config/translation.gc.txt +++ b/pypy/doc/config/translation.gc.txt @@ -1,24 +1,26 @@ Choose the Garbage Collector used by the translated program. -The good performing collectors are "hybrid" and "minimark". -The default is "minimark". +The recommended default is "incminimark". - "ref": reference counting. Takes very long to translate and the result is - slow. + slow. Used only for tests. Don't use it for real RPython programs. - - "marksweep": naive mark & sweep. + - "none": no GC. Leaks everything. Don't use it for real RPython + programs: the rate of leaking is immense. - "semispace": a copying semi-space GC. - "generation": a generational GC using the semi-space GC for the older generation. - - "boehm": use the Boehm conservative GC. - - "hybrid": a hybrid collector of "generation" together with a mark-n-sweep old space - - "markcompact": a slow, but memory-efficient collector, - influenced e.g. by Smalltalk systems. + - "boehm": use the Boehm conservative GC. - "minimark": a generational mark-n-sweep collector with good performance. Includes page marking for large arrays. + + - "incminimark": like minimark, but adds incremental major + collections. Seems to come with no performance drawback over + "minimark", so it is the default. A few recent features of PyPy + (like cpyext) are only working with this GC. diff --git a/pypy/doc/extradoc.rst b/pypy/doc/extradoc.rst --- a/pypy/doc/extradoc.rst +++ b/pypy/doc/extradoc.rst @@ -80,7 +80,7 @@ .. _How to *not* write Virtual Machines for Dynamic Languages: https://bitbucket.org/pypy/extradoc/raw/tip/talk/dyla2007/dyla.pdf .. _`Tracing the Meta-Level: PyPy's Tracing JIT Compiler`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/icooolps2009/bolz-tracing-jit.pdf .. _`Faster than C#: Efficient Implementation of Dynamic Languages on .NET`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/icooolps2009-dotnet/cli-jit.pdf -.. _Automatic JIT Compiler Generation with Runtime Partial Evaluation: http://wwwold.cobra.cs.uni-duesseldorf.de/thesis/final-master.pdf +.. _Automatic JIT Compiler Generation with Runtime Partial Evaluation: http://stups.hhu.de/mediawiki/images/b/b9/Master_bolz.pdf .. _`RPython: A Step towards Reconciling Dynamically and Statically Typed OO Languages`: http://www.disi.unige.it/person/AnconaD/papers/DynamicLanguages_abstracts.html#AACM-DLS07 .. _EU Reports: index-report.html .. _Hardware Transactional Memory Support for Lightweight Dynamic Language Evolution: http://sabi.net/nriley/pubs/dls6-riley.pdf diff --git a/pypy/doc/how-to-release.rst b/pypy/doc/how-to-release.rst --- a/pypy/doc/how-to-release.rst +++ b/pypy/doc/how-to-release.rst @@ -76,5 +76,4 @@ * add a tag on the pypy/jitviewer repo that corresponds to pypy release * add a tag on the codespeed web site that corresponds to pypy release -* update the version number in {rpython,pypy}/doc/conf.py. * revise versioning at https://readthedocs.org/projects/pypy diff --git a/pypy/doc/index-of-release-notes.rst b/pypy/doc/index-of-release-notes.rst --- a/pypy/doc/index-of-release-notes.rst +++ b/pypy/doc/index-of-release-notes.rst @@ -6,6 +6,7 @@ .. toctree:: + release-5.0.1.rst release-5.0.0.rst release-4.0.1.rst release-4.0.0.rst diff --git a/pypy/doc/project-ideas.rst b/pypy/doc/project-ideas.rst --- a/pypy/doc/project-ideas.rst +++ b/pypy/doc/project-ideas.rst @@ -167,22 +167,13 @@ * `hg` -Embedding PyPy and improving CFFI ---------------------------------- - -PyPy has some basic :doc:`embedding infrastructure `. The idea would be to improve -upon that with cffi hacks that can automatically generate embeddable .so/.dll -library - - Optimising cpyext (CPython C-API compatibility layer) ----------------------------------------------------- A lot of work has gone into PyPy's implementation of CPython's C-API over the last years to let it reach a practical level of compatibility, so that C extensions for CPython work on PyPy without major rewrites. However, -there are still many edges and corner cases where it misbehaves, and it has -not received any substantial optimisation so far. +there are still many edges and corner cases where it misbehaves. The objective of this project is to fix bugs in cpyext and to optimise several performance critical parts of it, such as the reference counting diff --git a/pypy/doc/release-5.0.0.rst b/pypy/doc/release-5.0.0.rst --- a/pypy/doc/release-5.0.0.rst +++ b/pypy/doc/release-5.0.0.rst @@ -128,6 +128,9 @@ * Fix for corner case (likely shown by Krakatau) for consecutive guards with interdependencies + * Fix applevel bare class method comparisons which should fix pretty printing + in IPython + * Issues reported with our previous release were resolved_ after reports from users on our issue tracker at https://bitbucket.org/pypy/pypy/issues or on IRC at #pypy diff --git a/pypy/doc/release-5.0.1.rst b/pypy/doc/release-5.0.1.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-5.0.1.rst @@ -0,0 +1,35 @@ +========== +PyPy 5.0.1 +========== + +We have released a bugfix for PyPy 5.0, after reports that the newly released +`lxml 3.6.0`_, which now supports PyPy 5.0 +, can `crash on large files`_. +Thanks to those who reported the crash. Please update, downloads are available +at pypy.org/download.html + +.. _`lxml 3.6.0`: https://pypi.python.org/pypi/lxml/3.6.0 +.. _`crash on large files`: https://bitbucket.org/pypy/pypy/issues/2260 +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7. It's fast (`PyPy and CPython 2.7.x`_ performance comparison) +due to its integrated tracing JIT compiler. + +We also welcome developers of other +`dynamic languages`_ to see what RPython can do for them. + +This release supports **x86** machines on most common operating systems +(Linux 32/64, Mac OS X 64, Windows 32, OpenBSD, FreeBSD), +newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux, and the +big- and little-endian variants of **PPC64** running Linux. + +.. _`PyPy and CPython 2.7.x`: http://speed.pypy.org +.. _`dynamic languages`: http://pypyjs.org + +Please update, and continue to help us make PyPy better. + +Cheers + +The PyPy Team + diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -3,5 +3,23 @@ ========================= .. this is a revision shortly after release-5.0 -.. startrev: 9c4299dc2d60 +.. startrev: b238b48f9138 +.. branch: s390x-backend + +The jit compiler backend implementation for the s390x architecutre. +The backend manages 64-bit values in the literal pool of the assembly instead of loading them as immediates. +It includes a simplification for the operation 'zero_array'. Start and length parameters are bytes instead of size. + +.. branch: remove-py-log + +Replace py.log with something simpler, which should speed up logging + +.. branch: where_1_arg + +Implemented numpy.where for 1 argument (thanks sergem) + +.. branch: fix_indexing_by_numpy_int + +Implement yet another strange numpy indexing compatibility; indexing by a scalar +returns a scalar diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -331,7 +331,7 @@ # XXX possibly adapt options using modules failures = create_cffi_import_libraries(exename, options, basedir) # if failures, they were already printed - print >> sys.stderr, str(exename),'successfully built, but errors while building the above modules will be ignored' + print >> sys.stderr, str(exename),'successfully built (errors, if any, while building the above modules are ignored)' driver.task_build_cffi_imports = types.MethodType(task_build_cffi_imports, driver) driver.tasks['build_cffi_imports'] = driver.task_build_cffi_imports, [compile_goal] driver.default_goal = 'build_cffi_imports' diff --git a/pypy/interpreter/mixedmodule.py b/pypy/interpreter/mixedmodule.py --- a/pypy/interpreter/mixedmodule.py +++ b/pypy/interpreter/mixedmodule.py @@ -3,7 +3,7 @@ from pypy.interpreter import gateway from pypy.interpreter.error import OperationError from pypy.interpreter.baseobjspace import W_Root -import os, sys +import sys class MixedModule(Module): applevel_name = None @@ -74,6 +74,7 @@ if not self.space.contains_w(self.w_initialdict, w_key): self.space.setitem(self.w_initialdict, w_key, w_value) + @classmethod def get_applevel_name(cls): """ NOT_RPYTHON """ if cls.applevel_name is not None: @@ -81,7 +82,6 @@ else: pkgroot = cls.__module__ return pkgroot.split('.')[-1] - get_applevel_name = classmethod(get_applevel_name) def get(self, name): space = self.space @@ -123,7 +123,7 @@ # be normal Functions to get the correct binding behaviour func = w_value if (isinstance(func, Function) and - type(func) is not BuiltinFunction): + type(func) is not BuiltinFunction): try: bltin = func._builtinversion_ except AttributeError: @@ -135,7 +135,6 @@ space.setitem(self.w_dict, w_name, w_value) return w_value - def getdict(self, space): if self.lazy: for name in self.loaders: @@ -151,6 +150,7 @@ self.startup_called = False self._frozen = True + @classmethod def buildloaders(cls): """ NOT_RPYTHON """ if not hasattr(cls, 'loaders'): @@ -169,8 +169,6 @@ if '__doc__' not in loaders: loaders['__doc__'] = cls.get__doc__ - buildloaders = classmethod(buildloaders) - def extra_interpdef(self, name, spec): cls = self.__class__ pkgroot = cls.__module__ @@ -179,21 +177,21 @@ w_obj = loader(space) space.setattr(space.wrap(self), space.wrap(name), w_obj) + @classmethod def get__doc__(cls, space): return space.wrap(cls.__doc__) - get__doc__ = classmethod(get__doc__) def getinterpevalloader(pkgroot, spec): """ NOT_RPYTHON """ def ifileloader(space): - d = {'space' : space} + d = {'space': space} # EVIL HACK (but it works, and this is not RPython :-) while 1: try: value = eval(spec, d) except NameError, ex: - name = ex.args[0].split("'")[1] # super-Evil + name = ex.args[0].split("'")[1] # super-Evil if name in d: raise # propagate the NameError try: diff --git a/pypy/interpreter/test/test_objspace.py b/pypy/interpreter/test/test_objspace.py --- a/pypy/interpreter/test/test_objspace.py +++ b/pypy/interpreter/test/test_objspace.py @@ -447,3 +447,14 @@ i -= 1 assert i >= 0 gc.collect() + + def test_exitfunc_catches_exceptions(self): + from pypy.tool.pytest.objspace import maketestobjspace + space = maketestobjspace() + space.appexec([], """(): + import sys + sys.exitfunc = lambda: this_is_an_unknown_name + """) + space.finish() + # assert that we reach this point without getting interrupted + # by the OperationError(NameError) diff --git a/pypy/module/__pypy__/test/test_signal.py b/pypy/module/__pypy__/test/test_signal.py --- a/pypy/module/__pypy__/test/test_signal.py +++ b/pypy/module/__pypy__/test/test_signal.py @@ -2,7 +2,6 @@ from pypy.module.thread.test.support import GenericTestThread - class AppTestMinimal: spaceconfig = dict(usemodules=['__pypy__']) diff --git a/pypy/module/_cffi_backend/ctypeptr.py b/pypy/module/_cffi_backend/ctypeptr.py --- a/pypy/module/_cffi_backend/ctypeptr.py +++ b/pypy/module/_cffi_backend/ctypeptr.py @@ -356,10 +356,11 @@ # ____________________________________________________________ -rffi_fdopen = rffi.llexternal("fdopen", [rffi.INT, rffi.CCHARP], rffi.CCHARP, +FILEP = rffi.COpaquePtr("FILE") +rffi_fdopen = rffi.llexternal("fdopen", [rffi.INT, rffi.CCHARP], FILEP, save_err=rffi.RFFI_SAVE_ERRNO) -rffi_setbuf = rffi.llexternal("setbuf", [rffi.CCHARP, rffi.CCHARP], lltype.Void) -rffi_fclose = rffi.llexternal("fclose", [rffi.CCHARP], rffi.INT) +rffi_setbuf = rffi.llexternal("setbuf", [FILEP, rffi.CCHARP], lltype.Void) +rffi_fclose = rffi.llexternal("fclose", [FILEP], rffi.INT) class CffiFileObj(object): _immutable_ = True @@ -389,4 +390,4 @@ w_fileobj.cffi_fileobj = CffiFileObj(fd, mode) except OSError, e: raise wrap_oserror(space, e) - return w_fileobj.cffi_fileobj.llf + return rffi.cast(rffi.CCHARP, fileobj.cffi_fileobj.llf) diff --git a/pypy/module/_rawffi/callback.py b/pypy/module/_rawffi/callback.py --- a/pypy/module/_rawffi/callback.py +++ b/pypy/module/_rawffi/callback.py @@ -1,17 +1,23 @@ - +import sys from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.typedef import TypeDef, GetSetProperty from rpython.rtyper.lltypesystem import lltype, rffi from pypy.module._rawffi.interp_rawffi import write_ptr from pypy.module._rawffi.structure import W_Structure from pypy.module._rawffi.interp_rawffi import (W_DataInstance, letter2tp, - unwrap_value, unpack_argshapes, got_libffi_error) + unwrap_value, unpack_argshapes, got_libffi_error, is_narrow_integer_type, + LL_TYPEMAP, NARROW_INTEGER_TYPES) from rpython.rlib.clibffi import USERDATA_P, CallbackFuncPtr, FUNCFLAG_CDECL from rpython.rlib.clibffi import ffi_type_void, LibFFIError from rpython.rlib import rweakref from pypy.module._rawffi.tracker import tracker from pypy.interpreter.error import OperationError from pypy.interpreter import gateway +from rpython.rlib.unroll import unrolling_iterable + +BIGENDIAN = sys.byteorder == 'big' + +unroll_narrow_integer_types = unrolling_iterable(NARROW_INTEGER_TYPES) app = gateway.applevel(''' def tbprint(tb, err): @@ -42,8 +48,17 @@ args_w[i] = space.wrap(rffi.cast(rffi.ULONG, ll_args[i])) w_res = space.call(w_callable, space.newtuple(args_w)) if callback_ptr.result is not None: # don't return void - unwrap_value(space, write_ptr, ll_res, 0, - callback_ptr.result, w_res) + ptr = ll_res + letter = callback_ptr.result + if BIGENDIAN: + # take care of narrow integers! + for int_type in unroll_narrow_integer_types: + if int_type == letter: + T = LL_TYPEMAP[int_type] + n = rffi.sizeof(lltype.Signed) - rffi.sizeof(T) + ptr = rffi.ptradd(ptr, n) + break + unwrap_value(space, write_ptr, ptr, 0, letter, w_res) except OperationError, e: tbprint(space, space.wrap(e.get_traceback()), space.wrap(e.errorstr(space))) diff --git a/pypy/module/_rawffi/interp_rawffi.py b/pypy/module/_rawffi/interp_rawffi.py --- a/pypy/module/_rawffi/interp_rawffi.py +++ b/pypy/module/_rawffi/interp_rawffi.py @@ -1,3 +1,4 @@ +import sys from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.error import OperationError, oefmt, wrap_oserror from pypy.interpreter.gateway import interp2app, unwrap_spec @@ -21,6 +22,8 @@ from pypy.module._rawffi.buffer import RawFFIBuffer from pypy.module._rawffi.tracker import tracker +BIGENDIAN = sys.byteorder == 'big' + TYPEMAP = { # XXX A mess with unsigned/signed/normal chars :-/ 'c' : ffi_type_uchar, @@ -338,10 +341,14 @@ if tracker.DO_TRACING: ll_buf = rffi.cast(lltype.Signed, self.ll_buffer) tracker.trace_allocation(ll_buf, self) + self._ll_buffer = self.ll_buffer def getbuffer(self, space): return space.wrap(rffi.cast(lltype.Unsigned, self.ll_buffer)) + def buffer_advance(self, n): + self.ll_buffer = rffi.ptradd(self.ll_buffer, n) + def byptr(self, space): from pypy.module._rawffi.array import ARRAY_OF_PTRS array = ARRAY_OF_PTRS.allocate(space, 1) @@ -349,16 +356,17 @@ return space.wrap(array) def free(self, space): - if not self.ll_buffer: + if not self._ll_buffer: raise segfault_exception(space, "freeing NULL pointer") self._free() def _free(self): if tracker.DO_TRACING: - ll_buf = rffi.cast(lltype.Signed, self.ll_buffer) + ll_buf = rffi.cast(lltype.Signed, self._ll_buffer) tracker.trace_free(ll_buf) - lltype.free(self.ll_buffer, flavor='raw') + lltype.free(self._ll_buffer, flavor='raw') self.ll_buffer = lltype.nullptr(rffi.VOIDP.TO) + self._ll_buffer = self.ll_buffer def buffer_w(self, space, flags): return RawFFIBuffer(self) @@ -435,12 +443,19 @@ space.wrap("cannot directly read value")) wrap_value._annspecialcase_ = 'specialize:arg(1)' +NARROW_INTEGER_TYPES = 'cbhiBIH?' + +def is_narrow_integer_type(letter): + return letter in NARROW_INTEGER_TYPES class W_FuncPtr(W_Root): def __init__(self, space, ptr, argshapes, resshape): self.ptr = ptr self.argshapes = argshapes self.resshape = resshape + self.narrow_integer = False + if resshape is not None: + self.narrow_integer = is_narrow_integer_type(resshape.itemcode.lower()) def getbuffer(self, space): return space.wrap(rffi.cast(lltype.Unsigned, self.ptr.funcsym)) @@ -500,6 +515,10 @@ result = self.resshape.allocate(space, 1, autofree=True) # adjust_return_size() was used here on result.ll_buffer self.ptr.call(args_ll, result.ll_buffer) + if BIGENDIAN and self.narrow_integer: + # we get a 8 byte value in big endian + n = rffi.sizeof(lltype.Signed) - result.shape.size + result.buffer_advance(n) return space.wrap(result) else: self.ptr.call(args_ll, lltype.nullptr(rffi.VOIDP.TO)) diff --git a/pypy/module/_rawffi/structure.py b/pypy/module/_rawffi/structure.py --- a/pypy/module/_rawffi/structure.py +++ b/pypy/module/_rawffi/structure.py @@ -18,6 +18,9 @@ from rpython.rlib.rarithmetic import intmask, signedtype, r_uint, \ r_ulonglong from rpython.rtyper.lltypesystem import lltype, rffi +import sys + +IS_BIG_ENDIAN = sys.byteorder == 'big' @@ -114,20 +117,32 @@ size += intmask(fieldsize) bitsizes.append(fieldsize) elif field_type == NEW_BITFIELD: - bitsizes.append((bitsize << 16) + bitoffset) + if IS_BIG_ENDIAN: + off = last_size - bitoffset - bitsize + else: + off = bitoffset + bitsizes.append((bitsize << 16) + off) bitoffset = bitsize size = round_up(size, fieldalignment) pos.append(size) size += fieldsize elif field_type == CONT_BITFIELD: - bitsizes.append((bitsize << 16) + bitoffset) + if IS_BIG_ENDIAN: + off = last_size - bitoffset - bitsize + else: + off = bitoffset + bitsizes.append((bitsize << 16) + off) bitoffset += bitsize # offset is already updated for the NEXT field pos.append(size - fieldsize) elif field_type == EXPAND_BITFIELD: size += fieldsize - last_size / 8 last_size = fieldsize * 8 - bitsizes.append((bitsize << 16) + bitoffset) + if IS_BIG_ENDIAN: + off = last_size - bitoffset - bitsize + else: + off = bitoffset + bitsizes.append((bitsize << 16) + off) bitoffset += bitsize # offset is already updated for the NEXT field pos.append(size - fieldsize) diff --git a/pypy/module/_rawffi/test/test__rawffi.py b/pypy/module/_rawffi/test/test__rawffi.py --- a/pypy/module/_rawffi/test/test__rawffi.py +++ b/pypy/module/_rawffi/test/test__rawffi.py @@ -704,7 +704,6 @@ def compare(a, b): a1 = _rawffi.Array('i').fromaddress(_rawffi.Array('P').fromaddress(a, 1)[0], 1) a2 = _rawffi.Array('i').fromaddress(_rawffi.Array('P').fromaddress(b, 1)[0], 1) - print("comparing", a1[0], "with", a2[0]) if a1[0] not in [1,2,3,4] or a2[0] not in [1,2,3,4]: bogus_args.append((a1[0], a2[0])) if a1[0] > a2[0]: @@ -715,7 +714,7 @@ a2[0] = len(ll_to_sort) a3 = _rawffi.Array('l')(1) a3[0] = struct.calcsize('i') - cb = _rawffi.CallbackPtr(compare, ['P', 'P'], 'i') + cb = _rawffi.CallbackPtr(compare, ['P', 'P'], 'l') a4 = cb.byptr() qsort(a1, a2, a3, a4) res = [ll_to_sort[i] for i in range(len(ll_to_sort))] @@ -895,7 +894,10 @@ b = _rawffi.Array('c').fromaddress(a.buffer, 38) if sys.maxunicode > 65535: # UCS4 build - assert b[0:5] == b'x\x00\x00\x00y' + if sys.byteorder == 'big': + assert b[0:8] == b'\x00\x00\x00x\x00\x00\x00y' + else: + assert b[0:5] == b'x\x00\x00\x00y' else: # UCS2 build assert b[0:2] == b'x\x00y' diff --git a/pypy/module/_rawffi/test/test_struct.py b/pypy/module/_rawffi/test/test_struct.py --- a/pypy/module/_rawffi/test/test_struct.py +++ b/pypy/module/_rawffi/test/test_struct.py @@ -1,4 +1,4 @@ - +import sys from pypy.module._rawffi.structure import size_alignment_pos from pypy.module._rawffi.interp_rawffi import TYPEMAP, letter2tp @@ -63,4 +63,7 @@ for (name, t, size) in fields]) assert size == 8 assert pos == [0, 0, 0] - assert bitsizes == [0x10000, 0x3e0001, 0x1003f] + if sys.byteorder == 'little': + assert bitsizes == [0x10000, 0x3e0001, 0x1003f] + else: + assert bitsizes == [0x1003f, 0x3e0001, 0x10000] diff --git a/pypy/module/_vmprof/conftest.py b/pypy/module/_vmprof/conftest.py new file mode 100644 --- /dev/null +++ b/pypy/module/_vmprof/conftest.py @@ -0,0 +1,6 @@ +import py, platform + +def pytest_collect_directory(path, parent): + if platform.machine() == 's390x': + py.test.skip("zarch tests skipped") +pytest_collect_file = pytest_collect_directory diff --git a/pypy/module/_vmprof/test/test__vmprof.py b/pypy/module/_vmprof/test/test__vmprof.py --- a/pypy/module/_vmprof/test/test__vmprof.py +++ b/pypy/module/_vmprof/test/test__vmprof.py @@ -75,9 +75,9 @@ def test_enable_ovf(self): import _vmprof - raises(_vmprof.VMProfError, _vmprof.enable, 999, 0) - raises(_vmprof.VMProfError, _vmprof.enable, 999, -2.5) - raises(_vmprof.VMProfError, _vmprof.enable, 999, 1e300) - raises(_vmprof.VMProfError, _vmprof.enable, 999, 1e300 * 1e300) + raises(_vmprof.VMProfError, _vmprof.enable, 2, 0) + raises(_vmprof.VMProfError, _vmprof.enable, 2, -2.5) + raises(_vmprof.VMProfError, _vmprof.enable, 2, 1e300) + raises(_vmprof.VMProfError, _vmprof.enable, 2, 1e300 * 1e300) NaN = (1e300*1e300) / (1e300*1e300) - raises(_vmprof.VMProfError, _vmprof.enable, 999, NaN) + raises(_vmprof.VMProfError, _vmprof.enable, 2, NaN) diff --git a/pypy/module/cppyy/src/dummy_backend.cxx b/pypy/module/cppyy/src/dummy_backend.cxx --- a/pypy/module/cppyy/src/dummy_backend.cxx +++ b/pypy/module/cppyy/src/dummy_backend.cxx @@ -390,7 +390,7 @@ ((dummy::cppyy_test_data*)self)->destroy_arrays(); } else if (idx == s_methods["cppyy_test_data::set_bool"]) { assert(self && nargs == 1); - ((dummy::cppyy_test_data*)self)->set_bool((bool)((CPPYY_G__value*)args)[0].obj.in); + ((dummy::cppyy_test_data*)self)->set_bool((bool)((CPPYY_G__value*)args)[0].obj.i); } else if (idx == s_methods["cppyy_test_data::set_char"]) { assert(self && nargs == 1); ((dummy::cppyy_test_data*)self)->set_char(((CPPYY_G__value*)args)[0].obj.ch); diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -835,14 +835,14 @@ modulename = py.path.local(eci.libraries[-1]) def dealloc_trigger(): - from pypy.module.cpyext.pyobject import _Py_Dealloc + from pypy.module.cpyext.pyobject import decref print 'dealloc_trigger...' while True: ob = rawrefcount.next_dead(PyObject) if not ob: break print ob - _Py_Dealloc(space, ob) + decref(space, ob) print 'dealloc_trigger DONE' return "RETRY" rawrefcount.init(dealloc_trigger) diff --git a/pypy/module/cpyext/bytesobject.py b/pypy/module/cpyext/bytesobject.py --- a/pypy/module/cpyext/bytesobject.py +++ b/pypy/module/cpyext/bytesobject.py @@ -80,7 +80,8 @@ buflen = length + 1 py_str.c_size = length py_str.c_buffer = lltype.malloc(rffi.CCHARP.TO, buflen, - flavor='raw', zero=True) + flavor='raw', zero=True, + add_memory_pressure=True) return py_str def bytes_attach(space, py_obj, w_obj): diff --git a/pypy/module/cpyext/object.py b/pypy/module/cpyext/object.py --- a/pypy/module/cpyext/object.py +++ b/pypy/module/cpyext/object.py @@ -21,7 +21,8 @@ @cpython_api([Py_ssize_t], rffi.VOIDP) def PyObject_MALLOC(space, size): return lltype.malloc(rffi.VOIDP.TO, size, - flavor='raw', zero=True) + flavor='raw', zero=True, + add_memory_pressure=True) @cpython_api([rffi.VOIDP], lltype.Void) def PyObject_FREE(space, ptr): diff --git a/pypy/module/cpyext/pyobject.py b/pypy/module/cpyext/pyobject.py --- a/pypy/module/cpyext/pyobject.py +++ b/pypy/module/cpyext/pyobject.py @@ -50,7 +50,8 @@ size += itemcount * pytype.c_tp_itemsize assert size >= rffi.sizeof(PyObject.TO) buf = lltype.malloc(rffi.VOIDP.TO, size, - flavor='raw', zero=True) + flavor='raw', zero=True, + add_memory_pressure=True) pyobj = rffi.cast(PyObject, buf) pyobj.c_ob_refcnt = 1 pyobj.c_ob_type = pytype diff --git a/pypy/module/cpyext/state.py b/pypy/module/cpyext/state.py --- a/pypy/module/cpyext/state.py +++ b/pypy/module/cpyext/state.py @@ -145,10 +145,10 @@ """ def perform(self, executioncontext, frame): - from pypy.module.cpyext.pyobject import PyObject, _Py_Dealloc + from pypy.module.cpyext.pyobject import PyObject, decref while True: py_obj = rawrefcount.next_dead(PyObject) if not py_obj: break - _Py_Dealloc(self.space, py_obj) + decref(self.space, py_obj) diff --git a/pypy/module/cpyext/test/test_arraymodule.py b/pypy/module/cpyext/test/test_arraymodule.py --- a/pypy/module/cpyext/test/test_arraymodule.py +++ b/pypy/module/cpyext/test/test_arraymodule.py @@ -51,10 +51,19 @@ assert arr.tolist() == [1, 23, 4] def test_buffer(self): + import sys module = self.import_module(name='array') arr = module.array('i', [1,2,3,4]) - # XXX big-endian - assert bytes(arr) == (b'\x01\0\0\0' - b'\x02\0\0\0' - b'\x03\0\0\0' - b'\x04\0\0\0') + buf = buffer(arr) + exc = raises(TypeError, "buf[1] = '1'") + assert str(exc.value) == "buffer is read-only" + if sys.byteorder == 'big': + assert str(buf) == (b'\0\0\0\x01' + b'\0\0\0\x02' + b'\0\0\0\x03' + b'\0\0\0\x04') + else: + assert str(buf) == (b'\x01\0\0\0' + b'\x02\0\0\0' + b'\x03\0\0\0' + b'\x04\0\0\0') diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -599,7 +599,7 @@ long intval; PyObject *name; - if (!PyArg_ParseTuple(args, "i", &intval)) + if (!PyArg_ParseTuple(args, "l", &intval)) return NULL; IntLike_Type.tp_as_number = &intlike_as_number; diff --git a/pypy/module/cpyext/test/test_unicodeobject.py b/pypy/module/cpyext/test/test_unicodeobject.py --- a/pypy/module/cpyext/test/test_unicodeobject.py +++ b/pypy/module/cpyext/test/test_unicodeobject.py @@ -511,11 +511,11 @@ lltype.free(pendian, flavor='raw') test("\x61\x00\x62\x00\x63\x00\x64\x00", -1) - - test("\x61\x00\x62\x00\x63\x00\x64\x00", None) - + if sys.byteorder == 'big': + test("\x00\x61\x00\x62\x00\x63\x00\x64", None) + else: + test("\x61\x00\x62\x00\x63\x00\x64\x00", None) test("\x00\x61\x00\x62\x00\x63\x00\x64", 1) - test("\xFE\xFF\x00\x61\x00\x62\x00\x63\x00\x64", 0, 1) test("\xFF\xFE\x61\x00\x62\x00\x63\x00\x64\x00", 0, -1) @@ -548,7 +548,10 @@ test("\x61\x00\x00\x00\x62\x00\x00\x00", -1) - test("\x61\x00\x00\x00\x62\x00\x00\x00", None) + if sys.byteorder == 'big': + test("\x00\x00\x00\x61\x00\x00\x00\x62", None) + else: + test("\x61\x00\x00\x00\x62\x00\x00\x00", None) test("\x00\x00\x00\x61\x00\x00\x00\x62", 1) diff --git a/pypy/module/cpyext/tupleobject.py b/pypy/module/cpyext/tupleobject.py --- a/pypy/module/cpyext/tupleobject.py +++ b/pypy/module/cpyext/tupleobject.py @@ -59,7 +59,8 @@ py_tup = rffi.cast(PyTupleObject, py_obj) py_tup.c_ob_item = lltype.malloc(ObjectItems, length, - flavor='raw', zero=True) + flavor='raw', zero=True, + add_memory_pressure=True) py_tup.c_ob_size = length return py_tup @@ -70,7 +71,8 @@ """ items_w = space.fixedview(w_obj) l = len(items_w) - p = lltype.malloc(ObjectItems, l, flavor='raw') + p = lltype.malloc(ObjectItems, l, flavor='raw', + add_memory_pressure=True) i = 0 try: while i < l: @@ -177,7 +179,8 @@ ref = rffi.cast(PyTupleObject, ref) oldsize = ref.c_ob_size oldp = ref.c_ob_item - newp = lltype.malloc(ObjectItems, newsize, zero=True, flavor='raw') + newp = lltype.malloc(ObjectItems, newsize, zero=True, flavor='raw', + add_memory_pressure=True) try: if oldsize < newsize: to_cp = oldsize diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -374,7 +374,8 @@ Py_DecRef(space, w_metatype) heaptype = lltype.malloc(PyHeapTypeObject.TO, - flavor='raw', zero=True) + flavor='raw', zero=True, + add_memory_pressure=True) pto = heaptype.c_ht_type pto.c_ob_refcnt = 1 pto.c_ob_type = metatype diff --git a/pypy/module/cpyext/unicodeobject.py b/pypy/module/cpyext/unicodeobject.py --- a/pypy/module/cpyext/unicodeobject.py +++ b/pypy/module/cpyext/unicodeobject.py @@ -55,7 +55,8 @@ buflen = length + 1 py_uni.c_size = length py_uni.c_buffer = lltype.malloc(rffi.CWCHARP.TO, buflen, - flavor='raw', zero=True) + flavor='raw', zero=True, + add_memory_pressure=True) return py_uni def unicode_attach(space, py_obj, w_obj): diff --git a/pypy/module/marshal/test/test_marshalimpl.py b/pypy/module/marshal/test/test_marshalimpl.py --- a/pypy/module/marshal/test/test_marshalimpl.py +++ b/pypy/module/marshal/test/test_marshalimpl.py @@ -59,14 +59,17 @@ import marshal, struct class FakeM: + # NOTE: marshal is platform independent, running this test must assume + # that self.seen gets values from the endianess of the marshal module. + # (which is little endian!) def __init__(self): self.seen = [] def start(self, code): self.seen.append(code) def put_int(self, value): - self.seen.append(struct.pack("i", value)) + self.seen.append(struct.pack("i4'), ('y', '>f4')]" in repr(a) + else: + assert "[('x', 'i4" + E = '<' if sys.byteorder == 'little' else '>' + b = np.dtype((xyz, [("col1", E+"i4"), ("col2", E+"i4"), ("col3", E+"i4")])) data = [(1, 2,3), (4, 5, 6)] a = np.array(data, dtype=b) x = pickle.loads(pickle.dumps(a)) @@ -425,18 +431,20 @@ assert hash(t5) != hash(t6) def test_pickle(self): + import sys import numpy as np from numpy import array, dtype from cPickle import loads, dumps a = array([1,2,3]) + E = '<' if sys.byteorder == 'little' else '>' if self.ptr_size == 8: - assert a.dtype.__reduce__() == (dtype, ('i8', 0, 1), (3, '<', None, None, None, -1, -1, 0)) + assert a.dtype.__reduce__() == (dtype, ('i8', 0, 1), (3, E, None, None, None, -1, -1, 0)) else: - assert a.dtype.__reduce__() == (dtype, ('i4', 0, 1), (3, '<', None, None, None, -1, -1, 0)) + assert a.dtype.__reduce__() == (dtype, ('i4', 0, 1), (3, E, None, None, None, -1, -1, 0)) assert loads(dumps(a.dtype)) == a.dtype assert np.dtype('bool').__reduce__() == (dtype, ('b1', 0, 1), (3, '|', None, None, None, -1, -1, 0)) assert np.dtype('|V16').__reduce__() == (dtype, ('V16', 0, 1), (3, '|', None, None, None, 16, 1, 0)) - assert np.dtype(('')+'U7' assert dtype([('', 'f8')]).str == "|V8" assert dtype(('f8', 2)).str == "|V16" @@ -970,8 +980,12 @@ def test_isnative(self): from numpy import dtype + import sys assert dtype('i4').isnative == True - assert dtype('>i8').isnative == False + if sys.byteorder == 'big': + assert dtype('i8').isnative == False def test_any_all_nonzero(self): import numpy @@ -1187,6 +1201,7 @@ def test_setstate(self): import numpy as np import sys + E = '<' if sys.byteorder == 'little' else '>' d = np.dtype('f8') d.__setstate__((3, '|', (np.dtype('float64'), (2,)), None, None, 20, 1, 0)) assert d.str == ('<' if sys.byteorder == 'little' else '>') + 'f8' @@ -1203,7 +1218,7 @@ assert d.shape == (2,) assert d.itemsize == 8 assert d.subdtype is not None - assert repr(d) == "dtype(('' + assert str(dt) == "{'names':['f0','f1'], 'formats':['%si4','u1'], 'offsets':[0,4], 'itemsize':8, 'aligned':True}" % E dt = np.dtype([('f1', 'u1'), ('f0', 'i4')], align=True) - assert str(dt) == "{'names':['f1','f0'], 'formats':['u1',' 2 ** 31 - 1: - assert (u == [1]).all() + if sys.byteorder == 'big': + assert (u == [0x0100000000000000]).all() + else: + assert (u == [1]).all() else: - assert (u == [1, 0]).all() + if sys.byteorder == 'big': + assert (u == [0x01000000, 0]).all() + else: + assert (u == [1, 0]).all() v = fromstring("abcd", dtype="|S2") assert v[0] == "ab" assert v[1] == "cd" @@ -3659,9 +3725,15 @@ k = fromstring(self.float16val, dtype='float16') assert k[0] == dtype('float16').type(5.) dt = array([5], dtype='longfloat').dtype + print(dt.itemsize) if dt.itemsize == 8: - m = fromstring('\x00\x00\x00\x00\x00\x00\x14@', - dtype='float64') + import sys + if sys.byteorder == 'big': + m = fromstring('@\x14\x00\x00\x00\x00\x00\x00', + dtype='float64') + else: + m = fromstring('\x00\x00\x00\x00\x00\x00\x14@', + dtype='float64') elif dt.itemsize == 12: m = fromstring('\x00\x00\x00\x00\x00\x00\x00\xa0\x01@\x00\x00', dtype='float96') @@ -3683,8 +3755,13 @@ def test_tostring(self): from numpy import array - assert array([1, 2, 3], 'i2').tostring() == '\x01\x00\x02\x00\x03\x00' - assert array([1, 2, 3], 'i2')[::2].tostring() == '\x01\x00\x03\x00' + import sys + if sys.byteorder == 'big': + assert array([1, 2, 3], 'i2').tostring() == '\x00\x01\x00\x02\x00\x03' + assert array([1, 2, 3], 'i2')[::2].tostring() == '\x00\x01\x00\x03' + else: + assert array([1, 2, 3], 'i2').tostring() == '\x01\x00\x02\x00\x03\x00' + assert array([1, 2, 3], 'i2')[::2].tostring() == '\x01\x00\x03\x00' assert array([1, 2, 3], 'i2')[::2].tostring() == '\x00\x01\x00\x03' assert array(0, dtype='i2').tostring() == '\x00\x00' @@ -4180,7 +4257,11 @@ v = a.view(('float32', 4)) assert v.dtype == np.dtype('float32') assert v.shape == (10, 4) - assert v[0][-1] == 2.53125 + import sys + if sys.byteorder == 'big': + assert v[0][-2] == 2.53125 + else: + assert v[0][-1] == 2.53125 exc = raises(ValueError, "a.view(('float32', 2))") assert exc.value[0] == 'new type not compatible with array.' diff --git a/pypy/module/micronumpy/test/test_scalar.py b/pypy/module/micronumpy/test/test_scalar.py --- a/pypy/module/micronumpy/test/test_scalar.py +++ b/pypy/module/micronumpy/test/test_scalar.py @@ -109,6 +109,7 @@ def test_pickle(self): from numpy import dtype, zeros + import sys try: from numpy.core.multiarray import scalar except ImportError: @@ -119,9 +120,11 @@ f = dtype('float64').type(13.37) c = dtype('complex128').type(13 + 37.j) - assert i.__reduce__() == (scalar, (dtype('int32'), '9\x05\x00\x00')) - assert f.__reduce__() == (scalar, (dtype('float64'), '=\n\xd7\xa3p\xbd*@')) - assert c.__reduce__() == (scalar, (dtype('complex128'), '\x00\x00\x00\x00\x00\x00*@\x00\x00\x00\x00\x00\x80B@')) + swap = lambda s: (''.join(reversed(s))) if sys.byteorder == 'big' else s + assert i.__reduce__() == (scalar, (dtype('int32'), swap('9\x05\x00\x00'))) + assert f.__reduce__() == (scalar, (dtype('float64'), swap('=\n\xd7\xa3p\xbd*@'))) + assert c.__reduce__() == (scalar, (dtype('complex128'), swap('\x00\x00\x00\x00\x00\x00*@') + \ + swap('\x00\x00\x00\x00\x00\x80B@'))) assert loads(dumps(i)) == i assert loads(dumps(f)) == f @@ -256,13 +259,20 @@ assert t < 7e-323 t = s.view('complex64') assert type(t) is np.complex64 - assert 0 < t.real < 1 - assert t.imag == 0 + if sys.byteorder == 'big': + assert 0 < t.imag < 1 + assert t.real == 0 + else: + assert 0 < t.real < 1 + assert t.imag == 0 exc = raises(TypeError, s.view, 'string') assert str(exc.value) == "data-type must not be 0-sized" t = s.view('S8') assert type(t) is np.string_ - assert t == '\x0c' + if sys.byteorder == 'big': + assert t == '\x00' * 7 + '\x0c' + else: + assert t == '\x0c' s = np.dtype('string').type('abc1') assert s.view('S4') == 'abc1' if '__pypy__' in sys.builtin_module_names: diff --git a/pypy/module/micronumpy/test/test_selection.py b/pypy/module/micronumpy/test/test_selection.py --- a/pypy/module/micronumpy/test/test_selection.py +++ b/pypy/module/micronumpy/test/test_selection.py @@ -327,10 +327,15 @@ # tests from numpy/core/tests/test_regression.py def test_sort_bigendian(self): from numpy import array, dtype - a = array(range(11), dtype='float64') - c = a.astype(dtype('' D.__module__ = 'mod' mod = new.module('mod') mod.D = D @@ -510,7 +511,7 @@ tp9 Rp10 (I3 - S'<' + S'{E}' p11 NNNI-1 I-1 @@ -520,7 +521,7 @@ S'\x00\x00\x00\x00\x00\x00\xf0?\x00\x00\x00\x00\x00\x00\x00@' p13 tp14 - b.'''.replace(' ','') + b.'''.replace(' ','').format(E=E) for ss,sn in zip(s.split('\n')[1:],s_from_numpy.split('\n')[1:]): if len(ss)>10: # ignore binary data, it will be checked later diff --git a/pypy/module/pypyjit/test_pypy_c/test_buffers.py b/pypy/module/pypyjit/test_pypy_c/test_buffers.py --- a/pypy/module/pypyjit/test_pypy_c/test_buffers.py +++ b/pypy/module/pypyjit/test_pypy_c/test_buffers.py @@ -34,7 +34,7 @@ i = 0 while i < n: i += 1 - struct.unpack('i', a) # ID: unpack + struct.unpack('') + else: + bit = ord('<') assert loop.match(""" guard_class(p1, #, descr=...) p4 = getfield_gc_r(p1, descr=) @@ -109,7 +113,7 @@ i9 = getfield_gc_i(p4, descr=) i10 = getfield_gc_i(p6, descr=) i12 = int_eq(i10, 61) - i14 = int_eq(i10, 60) + i14 = int_eq(i10, %d) i15 = int_or(i12, i14) f16 = raw_load_f(i9, i5, descr=) guard_true(i15, descr=...) @@ -142,7 +146,7 @@ setfield_gc(p34, i30, descr=) }}} jump(..., descr=...) - """) + """ % (bit,)) def test_reduce_logical_and(self): def main(): diff --git a/pypy/module/pypyjit/test_pypy_c/test_struct.py b/pypy/module/pypyjit/test_pypy_c/test_struct.py --- a/pypy/module/pypyjit/test_pypy_c/test_struct.py +++ b/pypy/module/pypyjit/test_pypy_c/test_struct.py @@ -19,8 +19,8 @@ import struct i = 1 while i < n: - buf = struct.pack("i", i) # ID: pack - x = struct.unpack("i", buf)[0] # ID: unpack + buf = struct.pack(" len(value): @@ -613,10 +616,7 @@ def descr_endswith(self, space, w_suffix, w_start=None, w_end=None): (value, start, end) = self._convert_idx_params(space, w_start, w_end) if space.isinstance_w(w_suffix, space.w_tuple): - for w_suffix in space.fixedview(w_suffix): - if self._endswith(space, value, w_suffix, start, end): - return space.w_True - return space.w_False + return self._endswith_tuple(space, value, w_suffix, start, end) try: res = self._endswith(space, value, w_suffix, start, end) except OperationError as e: @@ -628,6 +628,12 @@ "%T", wanted, wanted, w_suffix) return space.newbool(res) + def _endswith_tuple(self, space, value, w_suffix, start, end): + for w_suffix in space.fixedview(w_suffix): + if self._endswith(space, value, w_suffix, start, end): + return space.w_True + return space.w_False + def _endswith(self, space, value, w_prefix, start, end): prefix = self._op_val(space, w_prefix) if start > len(value): @@ -787,5 +793,3 @@ def _get_buffer(space, w_obj): return space.buffer_w(w_obj, space.BUF_SIMPLE) - - diff --git a/pypy/testrunner_cfg.py b/pypy/testrunner_cfg.py --- a/pypy/testrunner_cfg.py +++ b/pypy/testrunner_cfg.py @@ -5,6 +5,7 @@ 'translator/c', 'rlib', 'memory/test', 'jit/metainterp', 'jit/backend/arm', 'jit/backend/x86', + 'jit/backend/zarch', ] def collect_one_testdir(testdirs, reldir, tests): diff --git a/pypy/tool/release/repackage.sh b/pypy/tool/release/repackage.sh --- a/pypy/tool/release/repackage.sh +++ b/pypy/tool/release/repackage.sh @@ -2,14 +2,16 @@ maj=5 min=0 rev=0 +branchname=release-$maj.x # ==OR== release-$maj.$min.x +tagname=release-$maj.$min # ==OR== release-$maj.$min.$rev # This script will download latest builds from the buildmaster, rename the top # level directory, and repackage ready to be uploaded to bitbucket. It will also # download source, assuming a tag for the release already exists, and repackage them. # The script should be run in an empty directory, i.e. /tmp/release_xxx -for plat in linux linux64 linux-armhf-raspbian linux-armhf-raring linux-armel osx64 freebsd64 +for plat in linux linux64 linux-armhf-raspbian linux-armhf-raring linux-armel osx64 do - wget http://buildbot.pypy.org/nightly/release-$maj.$min.x/pypy-c-jit-latest-$plat.tar.bz2 + wget http://buildbot.pypy.org/nightly/$branchname/pypy-c-jit-latest-$plat.tar.bz2 tar -xf pypy-c-jit-latest-$plat.tar.bz2 rm pypy-c-jit-latest-$plat.tar.bz2 mv pypy-c-jit-*-$plat pypy-$maj.$min.$rev-$plat @@ -18,7 +20,7 @@ done plat=win32 -wget http://buildbot.pypy.org/nightly/release-$maj.$min.x/pypy-c-jit-latest-$plat.zip +wget http://buildbot.pypy.org/nightly/$branchname/pypy-c-jit-latest-$plat.zip unzip pypy-c-jit-latest-$plat.zip mv pypy-c-jit-*-$plat pypy-$maj.$min.$rev-$plat zip -r pypy-$maj.$min.$rev-$plat.zip pypy-$maj.$min.$rev-$plat @@ -26,16 +28,17 @@ # Do this after creating a tag, note the untarred directory is pypy-pypy- # so make sure there is not another one -wget https://bitbucket.org/pypy/pypy/get/release-$maj.$min.$rev.tar.bz2 -tar -xf release-$maj.$min.$rev.tar.bz2 +wget https://bitbucket.org/pypy/pypy/get/$tagname.tar.bz2 +tar -xf $tagname.tar.bz2 mv pypy-pypy-* pypy-$maj.$min.$rev-src tar --owner=root --group=root --numeric-owner -cvjf pypy-$maj.$min.$rev-src.tar.bz2 pypy-$maj.$min.$rev-src zip -r pypy-$maj.$min.$rev-src.zip pypy-$maj.$min.$rev-src rm -rf pypy-$maj.$min.$rev-src -# Print out the md5, sha1 +# Print out the md5, sha1, sha256 md5sum *.bz2 *.zip sha1sum *.bz2 *.zip +sha256sum *.bz2 *.zip # Now upload all the bz2 and zip diff --git a/requirements.txt b/requirements.txt --- a/requirements.txt +++ b/requirements.txt @@ -1,3 +1,4 @@ # hypothesis is used for test generation on untranslated jit tests hypothesis enum>=0.4.6 # is a dependency, but old pip does not pick it up +enum34>=1.1.2 diff --git a/rpython/annotator/annrpython.py b/rpython/annotator/annrpython.py --- a/rpython/annotator/annrpython.py +++ b/rpython/annotator/annrpython.py @@ -3,7 +3,7 @@ import types from collections import defaultdict -from rpython.tool.ansi_print import ansi_log +from rpython.tool.ansi_print import AnsiLogger from rpython.tool.pairtype import pair from rpython.tool.error import (format_blocked_annotation_error, gather_error, source_lines) @@ -15,9 +15,7 @@ from rpython.annotator.bookkeeper import Bookkeeper from rpython.rtyper.normalizecalls import perform_normalizations -import py -log = py.log.Producer("annrpython") -py.log.setconsumer("annrpython", ansi_log) +log = AnsiLogger("annrpython") class RPythonAnnotator(object): diff --git a/rpython/doc/index.rst b/rpython/doc/index.rst --- a/rpython/doc/index.rst +++ b/rpython/doc/index.rst @@ -37,6 +37,7 @@ arm logging + s390x Writing your own interpreter in RPython diff --git a/rpython/doc/s390x.rst b/rpython/doc/s390x.rst new file mode 100644 --- /dev/null +++ b/rpython/doc/s390x.rst @@ -0,0 +1,20 @@ +.. _s390x: + +S390X JIT Backend +================= + +Our JIT implements the 64 bit version of the IBM Mainframe called s390x. +Note that this architecture is big endian. + +The following facilities need to be installed to operate +correctly (all of the machines used for development these where installed): + +* General-Instructions-Extension +* Long-Displacement +* Binary Floating Point (IEEE) + +Translating +----------- + +Ensure that libffi is installed (version should do > 3.0.+). +CPython should be version 2.7.+. diff --git a/rpython/jit/backend/arm/locations.py b/rpython/jit/backend/arm/locations.py --- a/rpython/jit/backend/arm/locations.py +++ b/rpython/jit/backend/arm/locations.py @@ -1,7 +1,6 @@ from rpython.jit.metainterp.history import INT, FLOAT from rpython.jit.backend.arm.arch import WORD, DOUBLE_WORD, JITFRAME_FIXED_SIZE - class AssemblerLocation(object): _immutable_ = True type = INT diff --git a/rpython/jit/backend/arm/opassembler.py b/rpython/jit/backend/arm/opassembler.py --- a/rpython/jit/backend/arm/opassembler.py +++ b/rpython/jit/backend/arm/opassembler.py @@ -1143,35 +1143,42 @@ def emit_op_zero_array(self, op, arglocs, regalloc, fcond): from rpython.jit.backend.llsupport.descr import unpack_arraydescr assert len(arglocs) == 0 - length_box = op.getarg(2) - if isinstance(length_box, ConstInt) and length_box.getint() == 0: + size_box = op.getarg(2) + if isinstance(size_box, ConstInt) and size_box.getint() == 0: return fcond # nothing to do itemsize, baseofs, _ = unpack_arraydescr(op.getdescr()) args = op.getarglist() + # + # ZERO_ARRAY(base_loc, start, size, 1, 1) + # 'start' and 'size' are both expressed in bytes, + # and the two scaling arguments should always be ConstInt(1) on ARM. + assert args[3].getint() == 1 + assert args[4].getint() == 1 + # base_loc = regalloc.rm.make_sure_var_in_reg(args[0], args) - sibox = args[1] - if isinstance(sibox, ConstInt): - startindex_loc = None - startindex = sibox.getint() - assert startindex >= 0 + startbyte_box = args[1] + if isinstance(startbyte_box, ConstInt): + startbyte_loc = None + startbyte = startbyte_box.getint() + assert startbyte >= 0 else: - startindex_loc = regalloc.rm.make_sure_var_in_reg(sibox, args) - startindex = -1 + startbyte_loc = regalloc.rm.make_sure_var_in_reg(startbyte_box, + args) + startbyte = -1 - # base_loc and startindex_loc are in two regs here (or they are - # immediates). Compute the dstaddr_loc, which is the raw + # base_loc and startbyte_loc are in two regs here (or startbyte_loc + # is an immediate). Compute the dstaddr_loc, which is the raw # address that we will pass as first argument to memset(). # It can be in the same register as either one, but not in # args[2], because we're still needing the latter. dstaddr_box = TempVar() dstaddr_loc = regalloc.rm.force_allocate_reg(dstaddr_box, [args[2]]) - if startindex >= 0: # a constant - ofs = baseofs + startindex * itemsize + if startbyte >= 0: # a constant + ofs = baseofs + startbyte reg = base_loc.value else: - self.mc.gen_load_int(r.ip.value, itemsize) - self.mc.MLA(dstaddr_loc.value, r.ip.value, - startindex_loc.value, base_loc.value) + self.mc.ADD_rr(dstaddr_loc.value, + base_loc.value, startbyte_loc.value) ofs = baseofs reg = dstaddr_loc.value if check_imm_arg(ofs): @@ -1180,20 +1187,27 @@ self.mc.gen_load_int(r.ip.value, ofs) self.mc.ADD_rr(dstaddr_loc.value, reg, r.ip.value) - if (isinstance(length_box, ConstInt) and - length_box.getint() <= 14 and # same limit as GCC From pypy.commits at gmail.com Sat Mar 19 00:31:35 2016 From: pypy.commits at gmail.com (rlamy) Date: Fri, 18 Mar 2016 21:31:35 -0700 (PDT) Subject: [pypy-commit] pypy default: Remove debug artifact (backout 18bd2d236f85) Message-ID: <56ecd627.890bc30a.e5675.74b9@mx.google.com> Author: Ronan Lamy Branch: Changeset: r83155:c3f763db98bb Date: 2016-03-19 04:29 +0000 http://bitbucket.org/pypy/pypy/changeset/c3f763db98bb/ Log: Remove debug artifact (backout 18bd2d236f85) diff --git a/pypy/module/select/test/test_select.py b/pypy/module/select/test/test_select.py --- a/pypy/module/select/test/test_select.py +++ b/pypy/module/select/test/test_select.py @@ -287,8 +287,7 @@ t = thread.start_new_thread(pollster.poll, ()) try: time.sleep(0.3) - # TODO restore print '', if this is not the reason - for i in range(5): print 'release gil select' # to release GIL untranslated + for i in range(5): print '', # to release GIL untranslated # trigger ufds array reallocation for fd in rfds: pollster.unregister(fd) From pypy.commits at gmail.com Sat Mar 19 02:35:56 2016 From: pypy.commits at gmail.com (rlamy) Date: Fri, 18 Mar 2016 23:35:56 -0700 (PDT) Subject: [pypy-commit] pypy follow_symlinks: Import a bunch of docstrings from CPython 3.3 (b9c8f1c80f47) Message-ID: <56ecf34c.0a301c0a.49916.ffffdba6@mx.google.com> Author: Ronan Lamy Branch: follow_symlinks Changeset: r83156:664a8d8d8eb4 Date: 2016-03-19 06:31 +0000 http://bitbucket.org/pypy/pypy/changeset/664a8d8d8eb4/ Log: Import a bunch of docstrings from CPython 3.3 (b9c8f1c80f47) Update docstrings for all functions that support the new keyword- only arguments 'follow_symlinks' and 'dir_fd'. Add stubs for the missing posix.* functions chflags(), lchflags(), getxattr(), setxattr(), removexattr() and listxattr(). diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -106,8 +106,14 @@ @unwrap_spec(flag=c_int, mode=c_int) def open(space, w_fname, flag, mode=0777): - """Open a file (for low level IO). -Return a file descriptor (a small integer).""" + """open(path, flags, mode=0o777, *, dir_fd=None) + +Open a file for low level IO. Returns a file handle (integer). + +If dir_fd is not None, it should be a file descriptor open to a directory, + and path should be relative; path will then be relative to that directory. +dir_fd may not be implemented on your platform. + If it is unavailable, using it will raise a NotImplementedError.""" try: fd = dispatch_filename(rposix.open)( space, w_fname, flag, mode) @@ -298,20 +304,21 @@ return build_stat_result(space, st) def stat(space, w_path): - """Perform a stat system call on the given path. Return an object -with (at least) the following attributes: - st_mode - st_ino - st_dev - st_nlink - st_uid - st_gid - st_size - st_atime - st_mtime - st_ctime -""" + """stat(path, *, dir_fd=None, follow_symlinks=True) -> stat result +Perform a stat system call on the given path. + +path may be specified as either a string or as an open file descriptor. + +If dir_fd is not None, it should be a file descriptor open to a directory, + and path should be relative; path will then be relative to that directory. + dir_fd may not be supported on your platform; if it is unavailable, using + it will raise a NotImplementedError. +If follow_symlinks is False, and the last element of the path is a symbolic + link, stat will examine the symbolic link itself instead of the file the + link points to. +It is an error to use dir_fd or follow_symlinks when specifying path as + an open file descriptor.""" try: st = dispatch_filename(rposix_stat.stat, 0, allow_fd_fn=rposix_stat.fstat)(space, w_path) @@ -321,7 +328,11 @@ return build_stat_result(space, st) def lstat(space, w_path): - "Like stat(path), but do not follow symbolic links." + """lstat(path, *, dir_fd=None) -> stat result + +Like stat(), but do not follow symbolic links. +Equivalent to stat(path, follow_symlinks=False).""" + try: st = dispatch_filename(rposix_stat.lstat)(space, w_path) except OSError, e: @@ -360,6 +371,13 @@ def statvfs(space, w_path): + """statvfs(path) + +Perform a statvfs system call on the given path. + +path may always be specified as a string. +On some platforms, path may also be specified as an open file descriptor. + If this functionality is unavailable, using it raises an exception.""" try: st = dispatch_filename(rposix_stat.statvfs)(space, w_path) except OSError as e: @@ -389,15 +407,27 @@ @unwrap_spec(mode=c_int) def access(space, w_path, mode): - """ - access(path, mode) -> 1 if granted, 0 otherwise + """access(path, mode, *, dir_fd=None, effective_ids=False, follow_symlinks=True) - Use the real uid/gid to test for access to a path. Note that most - operations will use the effective uid/gid, therefore this routine can - be used in a suid/sgid environment to test if the invoking user has the - specified access to the path. The mode argument can be F_OK to test - existence, or the inclusive-OR of R_OK, W_OK, and X_OK. - """ +Use the real uid/gid to test for access to a path. Returns True if granted, +False otherwise. + +If dir_fd is not None, it should be a file descriptor open to a directory, + and path should be relative; path will then be relative to that directory. +If effective_ids is True, access will use the effective uid/gid instead of + the real uid/gid. +If follow_symlinks is False, and the last element of the path is a symbolic + link, access will examine the symbolic link itself instead of the file the + link points to. +dir_fd, effective_ids, and follow_symlinks may not be implemented + on your platform. If they are unavailable, using them will raise a + NotImplementedError. + +Note that most operations will use the effective uid/gid, therefore this + routine can be used in a suid/sgid environment to test if the invoking user + has the specified access to the path. +The mode argument can be F_OK to test existence, or the inclusive-OR + of R_OK, W_OK, and X_OK.""" try: ok = dispatch_filename(rposix.access)(space, w_path, mode) except OSError, e: @@ -434,14 +464,28 @@ return space.wrap(rc) def unlink(space, w_path): - """Remove a file (same as remove(path)).""" + """unlink(path, *, dir_fd=None) + +Remove a file (same as remove()). + +If dir_fd is not None, it should be a file descriptor open to a directory, + and path should be relative; path will then be relative to that directory. +dir_fd may not be implemented on your platform. + If it is unavailable, using it will raise a NotImplementedError.""" try: dispatch_filename(rposix.unlink)(space, w_path) except OSError, e: raise wrap_oserror2(space, e, w_path) def remove(space, w_path): - """Remove a file (same as unlink(path)).""" + """remove(path, *, dir_fd=None) + +Remove a file (same as unlink()). + +If dir_fd is not None, it should be a file descriptor open to a directory, + and path should be relative; path will then be relative to that directory. +dir_fd may not be implemented on your platform. + If it is unavailable, using it will raise a NotImplementedError.""" try: dispatch_filename(rposix.unlink)(space, w_path) except OSError, e: @@ -494,15 +538,31 @@ raise wrap_oserror2(space, e, w_path) @unwrap_spec(mode=c_int) -def mkdir(space, w_path, mode=0777): - """Create a directory.""" +def mkdir(space, w_path, mode=0o777): + """mkdir(path, mode=0o777, *, dir_fd=None) + +Create a directory. + +If dir_fd is not None, it should be a file descriptor open to a directory, + and path should be relative; path will then be relative to that directory. +dir_fd may not be implemented on your platform. + If it is unavailable, using it will raise a NotImplementedError. + +The mode argument is ignored on Windows.""" try: dispatch_filename(rposix.mkdir)(space, w_path, mode) except OSError, e: raise wrap_oserror2(space, e, w_path) def rmdir(space, w_path): - """Remove a directory.""" + """rmdir(path, *, dir_fd=None) + +Remove a directory. + +If dir_fd is not None, it should be a file descriptor open to a directory, + and path should be relative; path will then be relative to that directory. +dir_fd may not be implemented on your platform. + If it is unavailable, using it will raise a NotImplementedError.""" try: dispatch_filename(rposix.rmdir)(space, w_path) except OSError, e: @@ -636,7 +696,22 @@ @unwrap_spec(mode=c_int) def chmod(space, w_path, mode): - "Change the access permissions of a file." + """chmod(path, mode, *, dir_fd=None, follow_symlinks=True) + +Change the access permissions of a file. + +path may always be specified as a string. +On some platforms, path may also be specified as an open file descriptor. + If this functionality is unavailable, using it raises an exception. +If dir_fd is not None, it should be a file descriptor open to a directory, + and path should be relative; path will then be relative to that directory. +If follow_symlinks is False, and the last element of the path is a symbolic + link, chmod will modify the symbolic link itself instead of the file the + link points to. +It is an error to use dir_fd or follow_symlinks when specifying path as + an open file descriptor. +dir_fd and follow_symlinks may not be implemented on your platform. + If they are unavailable, using them will raise a NotImplementedError.""" try: dispatch_filename(rposix.chmod)(space, w_path, mode) except OSError, e: @@ -653,14 +728,30 @@ raise wrap_oserror(space, e) def rename(space, w_old, w_new): - "Rename a file or directory." + """rename(src, dst, *, src_dir_fd=None, dst_dir_fd=None) + +Rename a file or directory. + +If either src_dir_fd or dst_dir_fd is not None, it should be a file + descriptor open to a directory, and the respective path string (src or dst) + should be relative; the path will then be relative to that directory. +src_dir_fd and dst_dir_fd, may not be implemented on your platform. + If they are unavailable, using them will raise a NotImplementedError.""" try: dispatch_filename_2(rposix.rename)(space, w_old, w_new) except OSError, e: raise wrap_oserror(space, e) def replace(space, w_old, w_new): - "Replace a file or directory, overwriting the destination." + """replace(src, dst, *, src_dir_fd=None, dst_dir_fd=None) + +Rename a file or directory, overwriting the destination. + +If either src_dir_fd or dst_dir_fd is not None, it should be a file + descriptor open to a directory, and the respective path string (src or dst) + should be relative; the path will then be relative to that directory. +src_dir_fd and dst_dir_fd, may not be implemented on your platform. + If they are unavailable, using them will raise a NotImplementedError.""" try: dispatch_filename_2(rposix.replace)(space, w_old, w_new) except OSError, e: @@ -668,7 +759,14 @@ @unwrap_spec(mode=c_int) def mkfifo(space, w_filename, mode=0666): - """Create a FIFO (a POSIX named pipe).""" + """mkfifo(path, mode=0o666, *, dir_fd=None) + +Create a FIFO (a POSIX named pipe). + +If dir_fd is not None, it should be a file descriptor open to a directory, + and path should be relative; path will then be relative to that directory. +dir_fd may not be implemented on your platform. + If it is unavailable, using it will raise a NotImplementedError.""" try: dispatch_filename(rposix.mkfifo)(space, w_filename, mode) except OSError, e: @@ -676,12 +774,19 @@ @unwrap_spec(mode=c_int, device=c_int) def mknod(space, w_filename, mode=0600, device=0): - """Create a filesystem node (file, device special file or named pipe) + """mknod(filename, mode=0o600, device=0, *, dir_fd=None) + +Create a filesystem node (file, device special file or named pipe) named filename. mode specifies both the permissions to use and the type of node to be created, being combined (bitwise OR) with one of S_IFREG, S_IFCHR, S_IFBLK, and S_IFIFO. For S_IFCHR and S_IFBLK, device defines the newly created device special file (probably using -os.makedev()), otherwise it is ignored.""" +os.makedev()), otherwise it is ignored. + +If dir_fd is not None, it should be a file descriptor open to a directory, + and path should be relative; path will then be relative to that directory. +dir_fd may not be implemented on your platform. + If it is unavailable, using it will raise a NotImplementedError.""" try: dispatch_filename(rposix.mknod)(space, w_filename, mode, device) except OSError, e: @@ -725,22 +830,52 @@ @unwrap_spec(src='fsencode', dst='fsencode') def link(space, src, dst): - "Create a hard link to a file." + """link(src, dst, *, src_dir_fd=None, dst_dir_fd=None, follow_symlinks=True) + +Create a hard link to a file. + +If either src_dir_fd or dst_dir_fd is not None, it should be a file + descriptor open to a directory, and the respective path string (src or dst) + should be relative; the path will then be relative to that directory. +If follow_symlinks is False, and the last element of src is a symbolic + link, link will create a link to the symbolic link itself instead of the + file the link points to. +src_dir_fd, dst_dir_fd, and follow_symlinks may not be implemented on your + platform. If they are unavailable, using them will raise a + NotImplementedError.""" try: os.link(src, dst) except OSError, e: raise wrap_oserror(space, e) def symlink(space, w_src, w_dst, w_target_is_directory=None): - "Create a symbolic link pointing to src named dst." - # TODO: target_is_directory has a meaning on Windows + """symlink(src, dst, target_is_directory=False, *, dir_fd=None) + +Create a symbolic link pointing to src named dst. + +target_is_directory is required on Windows if the target is to be + interpreted as a directory. (On Windows, symlink requires + Windows 6.0 or greater, and raises a NotImplementedError otherwise.) + target_is_directory is ignored on non-Windows platforms. + +If dir_fd is not None, it should be a file descriptor open to a directory, + and path should be relative; path will then be relative to that directory. +dir_fd may not be implemented on your platform. + If it is unavailable, using it will raise a NotImplementedError.""" try: dispatch_filename_2(rposix.symlink)(space, w_src, w_dst) except OSError, e: raise wrap_oserror(space, e) def readlink(space, w_path): - "Return a string representing the path to which the symbolic link points." + """readlink(path, *, dir_fd=None) -> path + +Return a string representing the path to which the symbolic link points. + +If dir_fd is not None, it should be a file descriptor open to a directory, + and path should be relative; path will then be relative to that directory. +dir_fd may not be implemented on your platform. + If it is unavailable, using it will raise a NotImplementedError.""" is_unicode = space.isinstance_w(w_path, space.w_unicode) if is_unicode: path = space.fsencode_w(w_path) @@ -909,12 +1044,31 @@ return space.wrap(ret) def utime(space, w_path, w_tuple): - """ utime(path, (atime, mtime)) -utime(path, None) + """utime(path, times=None, *, ns=None, dir_fd=None, follow_symlinks=True) -Set the access and modified time of the file to the given values. If the -second form is used, set the access and modified times to the current time. - """ +Set the access and modified time of path. + +path may always be specified as a string. +On some platforms, path may also be specified as an open file descriptor. + If this functionality is unavailable, using it raises an exception. + +If times is not None, it must be a tuple (atime, mtime); + atime and mtime should be expressed as float seconds since the epoch. +If ns is not None, it must be a tuple (atime_ns, mtime_ns); + atime_ns and mtime_ns should be expressed as integer nanoseconds + since the epoch. +If both times and ns are None, utime uses the current time. +Specifying tuples for both times and ns is an error. + +If dir_fd is not None, it should be a file descriptor open to a directory, + and path should be relative; path will then be relative to that directory. +If follow_symlinks is False, and the last element of the path is a symbolic + link, utime will modify the symbolic link itself instead of the file the + link points to. +It is an error to use dir_fd or follow_symlinks when specifying path + as an open file descriptor. +dir_fd and follow_symlinks may not be available on your platform. + If they are unavailable, using them will raise a NotImplementedError.""" if space.is_w(w_tuple, space.w_None): try: dispatch_filename(rposix.utime, 1)(space, w_path, None) @@ -1073,7 +1227,7 @@ @unwrap_spec(username=str, gid=c_gid_t) def initgroups(space, username, gid): """ initgroups(username, gid) -> None - + Call the system initgroups() to initialize the group access list with all of the groups of which the specified username is a member, plus the specified group id. @@ -1246,7 +1400,7 @@ @unwrap_spec(rgid=c_gid_t, egid=c_gid_t, sgid=c_gid_t) def setresgid(space, rgid, egid, sgid): """ setresgid(rgid, egid, sgid) - + Set the current process's real, effective, and saved group ids. """ try: @@ -1329,7 +1483,22 @@ @unwrap_spec(path='fsencode', uid=c_uid_t, gid=c_gid_t) def chown(space, path, uid, gid): - """Change the owner and group id of path to the numeric uid and gid.""" + """chown(path, uid, gid, *, dir_fd=None, follow_symlinks=True) + +Change the owner and group id of path to the numeric uid and gid. + +path may always be specified as a string. +On some platforms, path may also be specified as an open file descriptor. + If this functionality is unavailable, using it raises an exception. +If dir_fd is not None, it should be a file descriptor open to a directory, + and path should be relative; path will then be relative to that directory. +If follow_symlinks is False, and the last element of the path is a symbolic + link, chown will modify the symbolic link itself instead of the file the + link points to. +It is an error to use dir_fd or follow_symlinks when specifying path as + an open file descriptor. +dir_fd and follow_symlinks may not be implemented on your platform. + If they are unavailable, using them will raise a NotImplementedError.""" check_uid_range(space, uid) check_uid_range(space, gid) try: @@ -1339,8 +1508,11 @@ @unwrap_spec(path='fsencode', uid=c_uid_t, gid=c_gid_t) def lchown(space, path, uid, gid): - """Change the owner and group id of path to the numeric uid and gid. -This function will not follow symbolic links.""" + """lchown(path, uid, gid) + +Change the owner and group id of path to the numeric uid and gid. +This function will not follow symbolic links. +Equivalent to os.chown(path, uid, gid, follow_symlinks=False).""" check_uid_range(space, uid) check_uid_range(space, gid) try: @@ -1350,8 +1522,10 @@ @unwrap_spec(uid=c_uid_t, gid=c_gid_t) def fchown(space, w_fd, uid, gid): - """Change the owner and group id of the file given by file descriptor -fd to the numeric uid and gid.""" + """fchown(fd, uid, gid) + +Change the owner and group id of the file given by file descriptor +fd to the numeric uid and gid. Equivalent to os.chown(fd, uid, gid).""" fd = space.c_filedescriptor_w(w_fd) check_uid_range(space, uid) check_uid_range(space, gid) @@ -1458,11 +1632,71 @@ raise wrap_oserror2(space, e, w_path) return space.wrap(result) + +def chflags(): + """chflags(path, flags, *, follow_symlinks=True) + +Set file flags. + +If follow_symlinks is False, and the last element of the path is a symbolic + link, chflags will change flags on the symbolic link itself instead of the + file the link points to. +follow_symlinks may not be implemented on your platform. If it is +unavailable, using it will raise a NotImplementedError.""" + +def lchflags(): + """lchflags(path, flags) + +Set file flags. +This function will not follow symbolic links. +Equivalent to chflags(path, flags, follow_symlinks=False).""" + +def getxattr(): + """getxattr(path, attribute, *, follow_symlinks=True) -> value + +Return the value of extended attribute attribute on path. + +path may be either a string or an open file descriptor. +If follow_symlinks is False, and the last element of the path is a symbolic + link, getxattr will examine the symbolic link itself instead of the file + the link points to.""" + +def setxattr(): + """setxattr(path, attribute, value, flags=0, *, follow_symlinks=True) + +Set extended attribute attribute on path to value. +path may be either a string or an open file descriptor. +If follow_symlinks is False, and the last element of the path is a symbolic + link, setxattr will modify the symbolic link itself instead of the file + the link points to.""" + + +def removexattr(): + """removexattr(path, attribute, *, follow_symlinks=True) + +Remove extended attribute attribute on path. +path may be either a string or an open file descriptor. +If follow_symlinks is False, and the last element of the path is a symbolic + link, removexattr will modify the symbolic link itself instead of the file + the link points to.""" + +def listxattr(): + """listxattr(path='.', *, follow_symlinks=True) + +Return a list of extended attributes on path. + +path may be either None, a string, or an open file descriptor. +if path is None, listxattr will examine the current directory. +If follow_symlinks is False, and the last element of the path is a symbolic + link, listxattr will examine the symbolic link itself instead of the file + the link points to.""" + + have_functions = [] for name in """FCHDIR FCHMOD FCHMODAT FCHOWN FCHOWNAT FEXECVE FDOPENDIR FPATHCONF FSTATAT FSTATVFS FTRUNCATE FUTIMENS FUTIMES FUTIMESAT LINKAT LCHFLAGS LCHMOD LCHOWN LSTAT LUTIMES - MKDIRAT MKFIFOAT MKNODAT OPENAT READLINKAT RENAMEAT + MKDIRAT MKFIFOAT MKNODAT OPENAT READLINKAT RENAMEAT SYMLINKAT UNLINKAT UTIMENSAT""".split(): if getattr(rposix, "HAVE_%s" % name): have_functions.append("HAVE_%s" % name) From pypy.commits at gmail.com Sat Mar 19 02:38:18 2016 From: pypy.commits at gmail.com (arigo) Date: Fri, 18 Mar 2016 23:38:18 -0700 (PDT) Subject: [pypy-commit] pypy.org extradoc: Update the "low on address space" comment Message-ID: <56ecf3da.2968c20a.84ddc.ffff9194@mx.google.com> Author: Armin Rigo Branch: extradoc Changeset: r724:8f629ad05c40 Date: 2016-03-19 07:38 +0100 http://bitbucket.org/pypy/pypy.org/changeset/8f629ad05c40/ Log: Update the "low on address space" comment diff --git a/download.html b/download.html --- a/download.html +++ b/download.html @@ -291,7 +291,8 @@ machine with insufficient RAM! It will just swap forever. See notes below in that case.)

  • -
  • If you want to install this PyPy as root, please read the next section.

    +
  • If you want to install this PyPy as root, please read the next section, +Packaging.

  • Notes:

    @@ -300,16 +301,25 @@ because it is twice as fast. You should just start by downloading an official release of PyPy (with the JIT). If you really have to use CPython then note that we are talking about CPython 2.7 here, not CPython 3.x. -(CPython 2.6 might or might not work. Older versions are out.)

    +(Older versions like 2.6 are out.)

    -
  • If RAM usage is a problem (or if you are on Windows, because win32's limit -is 2 GB unless you have a 64 bit OS), then you can (for now) tweak some parameters -via environment variables and command-line options. The following command -takes a bit more time, but finishes with only using 3.0 GB of RAM (on -Linux 64-bit; probably not much more than 1.6 GB on 32-bit). It should be -noted that it is less than with CPython.

    +
  • On some 32-bit systems, the address space limit of 2 or 3 GB of RAM +can be an issue. More generally you may be just a little bit low of +RAM. First note that 2 GB is really not enough nowadays; on Windows +you first need to refer to the Windows build instructions. More +precisely, translation on 32-bit takes at this point 2.7 GB if PyPy is +used and 2.9 GB if CPython is used. There are two workarounds:

    +

    1. use PyPy, not CPython. If you don't have any PyPy so far, not even +an older version, then you need to build one first, with some parts +removed. So, first translate with ...rpython -Ojit +targetpypystandalone --withoutmod-micronumpy --withoutmod-cpyext, +then copy pypy-c and libpypy_c.so somewhere else, and finally +call it with ...pypy-c ../../rpython/bin/rpython -Ojit.

    +

    2. if even using PyPy instead of CPython is not enough, try to tweak +some internal parameters. Example (slower but saves around 400MB):

    -PYPY_GC_MAX_DELTA=200MB pypy --jit loop_longevity=300 ../../rpython/bin/rpython -Ojit targetpypystandalone
    +PYPY_DONT_RUN_SUBPROCESS=1 PYPY_GC_MAX_DELTA=200MB pypy --jit loop_longevity=300 ../../rpython/bin/rpython -Ojit --source
    +# then read the next point about --source
     
  • You can run translations with --source, which only builds the C diff --git a/source/download.txt b/source/download.txt --- a/source/download.txt +++ b/source/download.txt @@ -313,7 +313,8 @@ machine with insufficient RAM! It will just swap forever. See notes below in that case.) -6. If you want to install this PyPy as root, please read the next section. +6. If you want to install this PyPy as root, please read the next section, + Packaging_. Notes: @@ -321,16 +322,27 @@ because it is twice as fast. You should just start by downloading an official release of PyPy (with the JIT). If you really have to use CPython then note that we are talking about CPython 2.7 here, not CPython 3.x. - (CPython 2.6 might or might not work. Older versions are out.) + (Older versions like 2.6 are out.) -* If RAM usage is a problem (or if you are on Windows, because win32's limit - is 2 GB unless you `have a 64 bit OS`_), then you can (for now) tweak some parameters - via environment variables and command-line options. The following command - takes a bit more time, but finishes with only using 3.0 GB of RAM (on - Linux 64-bit; probably not much more than 1.6 GB on 32-bit). It should be - noted that it is less than with CPython. :: +* On some 32-bit systems, the address space limit of 2 or 3 GB of RAM + can be an issue. More generally you may be just a little bit low of + RAM. First note that 2 GB is really not enough nowadays; on Windows + you first need to refer to the `Windows build instructions`_. More + precisely, translation on 32-bit takes at this point 2.7 GB if PyPy is + used and 2.9 GB if CPython is used. There are two workarounds: + + 1. use PyPy, not CPython. If you don't have any PyPy so far, not even + an older version, then you need to build one first, with some parts + removed. So, first translate with ``...rpython -Ojit + targetpypystandalone --withoutmod-micronumpy --withoutmod-cpyext``, + then copy ``pypy-c`` and ``libpypy_c.so`` somewhere else, and finally + call it with ``...pypy-c ../../rpython/bin/rpython -Ojit``. - PYPY_GC_MAX_DELTA=200MB pypy --jit loop_longevity=300 ../../rpython/bin/rpython -Ojit targetpypystandalone + 2. if even using PyPy instead of CPython is not enough, try to tweak + some internal parameters. Example (slower but saves around 400MB):: + + PYPY_DONT_RUN_SUBPROCESS=1 PYPY_GC_MAX_DELTA=200MB pypy --jit loop_longevity=300 ../../rpython/bin/rpython -Ojit --source + # then read the next point about --source * You can run translations with ``--source``, which only builds the C source files (and prints at the end where). Then you can ``cd`` there @@ -360,7 +372,7 @@ .. _`sandboxing`: features.html#sandboxing .. _`stackless`: http://www.stackless.com/ .. _`greenlets`: http://pypy.readthedocs.org/en/latest/stackless.html#greenlets -.. _`have a 64 bit OS`: http://doc.pypy.org/en/latest/windows.html#preparing-windows-for-the-large-build +.. _`Windows build instructions`: http://doc.pypy.org/en/latest/windows.html#preparing-windows-for-the-large-build .. _`shadow stack`: http://pypy.readthedocs.org/en/latest/config/translation.gcrootfinder.html .. _Mercurial: http://mercurial.selenic.com/ From pypy.commits at gmail.com Sat Mar 19 04:19:14 2016 From: pypy.commits at gmail.com (fijal) Date: Sat, 19 Mar 2016 01:19:14 -0700 (PDT) Subject: [pypy-commit] pypy heapcache-refactor: fix position Message-ID: <56ed0b82.657bc20a.dd20.ffffac58@mx.google.com> Author: fijal Branch: heapcache-refactor Changeset: r83157:40d970d5e3f4 Date: 2016-03-19 10:18 +0200 http://bitbucket.org/pypy/pypy/changeset/40d970d5e3f4/ Log: fix position diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -614,9 +614,8 @@ return intmask(p) >> FO_POSITION_SHIFT def set_position(self, new_pos): + self.position_and_flags |= r_uint(new_pos << FO_POSITION_SHIFT) self.__init__(new_pos) - #flags = self.position_and_flags & (~FO_POSITION_MASK) - #self.position_and_flags = flags | r_uint(new_pos) def is_replaced_with_const(self): return bool(self.position_and_flags & FO_REPLACED_WITH_CONST) From pypy.commits at gmail.com Sat Mar 19 04:26:21 2016 From: pypy.commits at gmail.com (fijal) Date: Sat, 19 Mar 2016 01:26:21 -0700 (PDT) Subject: [pypy-commit] pypy heapcache-refactor: fix Message-ID: <56ed0d2d.46fac20a.5cc1.ffffb3be@mx.google.com> Author: fijal Branch: heapcache-refactor Changeset: r83158:c5557d5125c2 Date: 2016-03-19 10:25 +0200 http://bitbucket.org/pypy/pypy/changeset/c5557d5125c2/ Log: fix diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -681,7 +681,7 @@ # hack to record the ops *after* we know our inputargs for (opnum, argboxes, op, descr) in self._cache: pos = self.trace.record_op(opnum, argboxes, descr) - op.position = pos + op.set_position(pos) self._cache = None def length(self): diff --git a/rpython/jit/metainterp/opencoder.py b/rpython/jit/metainterp/opencoder.py --- a/rpython/jit/metainterp/opencoder.py +++ b/rpython/jit/metainterp/opencoder.py @@ -280,8 +280,7 @@ self._refs.append(box.getref_base()) return tag(TAGCONSTPTR, v) elif isinstance(box, AbstractResOp): - return tag(TAGBOX, box.get_position()) - elif isinstance(box, AbstractInputArg): + assert box.get_position() >= 0 return tag(TAGBOX, box.get_position()) else: assert False, "unreachable code" From pypy.commits at gmail.com Sat Mar 19 04:43:13 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 19 Mar 2016 01:43:13 -0700 (PDT) Subject: [pypy-commit] pypy heapcache-refactor: Fix set_position() Message-ID: <56ed1121.41e11c0a.d2dd2.ffffeccc@mx.google.com> Author: Armin Rigo Branch: heapcache-refactor Changeset: r83159:85289474b3ac Date: 2016-03-19 09:42 +0100 http://bitbucket.org/pypy/pypy/changeset/85289474b3ac/ Log: Fix set_position() diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -614,8 +614,9 @@ return intmask(p) >> FO_POSITION_SHIFT def set_position(self, new_pos): + assert new_pos >= 0 + self.position_and_flags &= ~0xFFFFFFFE self.position_and_flags |= r_uint(new_pos << FO_POSITION_SHIFT) - self.__init__(new_pos) def is_replaced_with_const(self): return bool(self.position_and_flags & FO_REPLACED_WITH_CONST) From pypy.commits at gmail.com Sat Mar 19 04:52:41 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 19 Mar 2016 01:52:41 -0700 (PDT) Subject: [pypy-commit] pypy heapcache-refactor: Test, and probably fix translation Message-ID: <56ed1359.8216c20a.704ee.ffffb6dc@mx.google.com> Author: Armin Rigo Branch: heapcache-refactor Changeset: r83160:431096e1fd42 Date: 2016-03-19 09:52 +0100 http://bitbucket.org/pypy/pypy/changeset/431096e1fd42/ Log: Test, and probably fix translation diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -596,6 +596,7 @@ FO_REPLACED_WITH_CONST = r_uint(1) FO_POSITION_SHIFT = 1 +FO_POSITION_MASK = r_uint(0xFFFFFFFE) class FrontendOp(AbstractResOp): @@ -615,7 +616,7 @@ def set_position(self, new_pos): assert new_pos >= 0 - self.position_and_flags &= ~0xFFFFFFFE + self.position_and_flags &= ~FO_POSITION_MASK self.position_and_flags |= r_uint(new_pos << FO_POSITION_SHIFT) def is_replaced_with_const(self): diff --git a/rpython/jit/metainterp/test/test_history.py b/rpython/jit/metainterp/test/test_history.py --- a/rpython/jit/metainterp/test/test_history.py +++ b/rpython/jit/metainterp/test/test_history.py @@ -67,6 +67,8 @@ assert f.get_position() == 42 f = FrontendOp(-56) assert f.get_position() == -56 + f.set_position(6519) + assert f.get_position() == 6519 class TestZTranslated(StandaloneTests): def test_ztranslated_same_constant_float(self): From pypy.commits at gmail.com Sat Mar 19 04:53:42 2016 From: pypy.commits at gmail.com (cfbolz) Date: Sat, 19 Mar 2016 01:53:42 -0700 (PDT) Subject: [pypy-commit] pypy guard-compatible: fix translation Message-ID: <56ed1396.c65b1c0a.394c7.fffff1bf@mx.google.com> Author: Carl Friedrich Bolz Branch: guard-compatible Changeset: r83161:a9e78ff3dcb9 Date: 2016-03-19 09:53 +0100 http://bitbucket.org/pypy/pypy/changeset/a9e78ff3dcb9/ Log: fix translation diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -313,12 +313,14 @@ @jit.elidable_compatible(quasi_immut_field_name_for_second_arg="version") def _type_safe_to_do_getattr(self, version): + from pypy.objspace.descroperation import object_getattribute # it's safe if the version is not None and the type does not define its # own __getattribute__ if version is None: return False w_type = self.terminator.w_cls - return w_type.has_object_getattribute() + w_descr = self._type_lookup_pure('__getattribute__') + return w_descr is object_getattribute(self.space) def _type_lookup(self, name): if not self._type_safe_to_do_getattr(): From pypy.commits at gmail.com Sat Mar 19 04:55:36 2016 From: pypy.commits at gmail.com (fijal) Date: Sat, 19 Mar 2016 01:55:36 -0700 (PDT) Subject: [pypy-commit] pypy heapcache-refactor: fixes Message-ID: <56ed1408.96811c0a.d5976.ffffef09@mx.google.com> Author: fijal Branch: heapcache-refactor Changeset: r83162:d1552513a12b Date: 2016-03-19 10:54 +0200 http://bitbucket.org/pypy/pypy/changeset/d1552513a12b/ Log: fixes diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -455,7 +455,7 @@ if box is not frame.current_op: value = frame.env[box] else: - value = box.getvalue() # 0 or 0.0 or NULL + value = 0 # box.getvalue() # 0 or 0.0 or NULL else: value = None values.append(value) diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -715,7 +715,7 @@ @specialize.argtype(3) def record(self, opnum, argboxes, value, descr=None): if self.trace is None: - pos = -1 + pos = 2**14 - 1 else: pos = self.trace.record_op(opnum, argboxes, descr) if value is None: diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -2822,7 +2822,7 @@ self.history.record_nospec(rop.VIRTUAL_REF_FINISH, [vrefbox, virtualbox], None) newop = self.history.record_nospec(opnum, arglist, descr) - op.position = newop.position + op.set_position(newop.get_position()) # mark by replacing it with ConstPtr(NULL) self.virtualref_boxes[i+1] = self.cpu.ts.CONST_NULL From pypy.commits at gmail.com Sat Mar 19 04:58:08 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 19 Mar 2016 01:58:08 -0700 (PDT) Subject: [pypy-commit] pypy heapcache-refactor: close, merging into jit-leaner-frontend Message-ID: <56ed14a0.2179c20a.32153.ffffb26d@mx.google.com> Author: Armin Rigo Branch: heapcache-refactor Changeset: r83163:99278eb0c445 Date: 2016-03-19 09:57 +0100 http://bitbucket.org/pypy/pypy/changeset/99278eb0c445/ Log: close, merging into jit-leaner-frontend From pypy.commits at gmail.com Sat Mar 19 04:58:10 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 19 Mar 2016 01:58:10 -0700 (PDT) Subject: [pypy-commit] pypy jit-leaner-frontend: hg merge heapcache-refactor Message-ID: <56ed14a2.03dd1c0a.c9780.ffffef7b@mx.google.com> Author: Armin Rigo Branch: jit-leaner-frontend Changeset: r83164:5ad06b41df68 Date: 2016-03-19 09:57 +0100 http://bitbucket.org/pypy/pypy/changeset/5ad06b41df68/ Log: hg merge heapcache-refactor diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -455,7 +455,7 @@ if box is not frame.current_op: value = frame.env[box] else: - value = box.getvalue() # 0 or 0.0 or NULL + value = 0 # box.getvalue() # 0 or 0.0 or NULL else: value = None values.append(value) diff --git a/rpython/jit/metainterp/heapcache.py b/rpython/jit/metainterp/heapcache.py --- a/rpython/jit/metainterp/heapcache.py +++ b/rpython/jit/metainterp/heapcache.py @@ -1,33 +1,54 @@ -from rpython.jit.metainterp.history import ConstInt +from rpython.jit.metainterp.history import Const, ConstInt +from rpython.jit.metainterp.history import FrontendOp, RefFrontendOp from rpython.jit.metainterp.resoperation import rop, OpHelpers +from rpython.jit.metainterp.executor import constant_from_op +from rpython.rlib.rarithmetic import r_uint32, r_uint +from rpython.rlib.objectmodel import always_inline -class HeapCacheValue(object): - def __init__(self, box): - self.box = box - self.likely_virtual = False - self.reset_keep_likely_virtual() - def reset_keep_likely_virtual(self): - self.known_class = False - self.known_nullity = False - # did we see the allocation during tracing? - self.seen_allocation = False - self.is_unescaped = False - self.nonstandard_virtualizable = False - self.length = None - self.dependencies = None +# RefFrontendOp._heapc_flags: +HF_LIKELY_VIRTUAL = 0x01 +HF_KNOWN_CLASS = 0x02 +HF_KNOWN_NULLITY = 0x04 +HF_SEEN_ALLOCATION = 0x08 # did we see the allocation during tracing? +HF_IS_UNESCAPED = 0x10 +HF_NONSTD_VABLE = 0x20 - def __repr__(self): - return 'HeapCacheValue(%s)' % (self.box, ) +_HF_VERSION_INC = 0x40 # must be last +_HF_VERSION_MAX = r_uint(2 ** 32 - _HF_VERSION_INC) + + at always_inline +def add_flags(ref_frontend_op, flags): + f = ref_frontend_op._get_heapc_flags() + f |= r_uint(flags) + ref_frontend_op._set_heapc_flags(f) + + at always_inline +def remove_flags(ref_frontend_op, flags): + f = ref_frontend_op._get_heapc_flags() + f &= r_uint(~flags) + ref_frontend_op._set_heapc_flags(f) + + at always_inline +def test_flags(ref_frontend_op, flags): + f = ref_frontend_op._get_heapc_flags() + return bool(f & r_uint(flags)) + +def maybe_replace_with_const(box): + if not isinstance(box, Const) and box.is_replaced_with_const(): + return constant_from_op(box) + else: + return box class CacheEntry(object): - def __init__(self): - # both are {from_value: to_value} dicts + def __init__(self, heapcache): + # both are {from_ref_box: to_field_box} dicts # the first is for boxes where we did not see the allocation, the # second for anything else. the reason that distinction makes sense is # because if we saw the allocation, we know it cannot alias with # anything else where we saw the allocation. + self.heapcache = heapcache self.cache_anything = {} self.cache_seen_allocation = {} @@ -36,112 +57,137 @@ self.cache_seen_allocation.clear() self.cache_anything.clear() - def _getdict(self, value): - if value.seen_allocation: + def _seen_alloc(self, ref_box): + if not isinstance(ref_box, RefFrontendOp): + return False + return self.heapcache._check_flag(ref_box, HF_SEEN_ALLOCATION) + + def _getdict(self, seen_alloc): + if seen_alloc: return self.cache_seen_allocation else: return self.cache_anything - def do_write_with_aliasing(self, value, fieldvalue): - self._clear_cache_on_write(value.seen_allocation) - self._getdict(value)[value] = fieldvalue + def do_write_with_aliasing(self, ref_box, fieldbox): + seen_alloc = self._seen_alloc(ref_box) + self._clear_cache_on_write(seen_alloc) + self._getdict(seen_alloc)[ref_box] = fieldbox - def read(self, value): - return self._getdict(value).get(value, None) + def read(self, ref_box): + dict = self._getdict(self._seen_alloc(ref_box)) + try: + res_box = dict[ref_box] + except KeyError: + return None + return maybe_replace_with_const(res_box) - def read_now_known(self, value, fieldvalue): - self._getdict(value)[value] = fieldvalue + def read_now_known(self, ref_box, fieldbox): + self._getdict(self._seen_alloc(ref_box))[ref_box] = fieldbox def invalidate_unescaped(self): self._invalidate_unescaped(self.cache_anything) self._invalidate_unescaped(self.cache_seen_allocation) def _invalidate_unescaped(self, d): - for value in d.keys(): - if not value.is_unescaped: - del d[value] + for ref_box in d.keys(): + if not self.heapcache.is_unescaped(ref_box): + del d[ref_box] class FieldUpdater(object): - def __init__(self, heapcache, value, cache, fieldvalue): - self.heapcache = heapcache - self.value = value + def __init__(self, ref_box, cache, fieldbox): + self.ref_box = ref_box self.cache = cache - if fieldvalue is not None: - self.currfieldbox = fieldvalue.box - else: - self.currfieldbox = None + self.currfieldbox = fieldbox # <= read directly from pyjitpl.py def getfield_now_known(self, fieldbox): - fieldvalue = self.heapcache.getvalue(fieldbox) - self.cache.read_now_known(self.value, fieldvalue) + self.cache.read_now_known(self.ref_box, fieldbox) def setfield(self, fieldbox): - fieldvalue = self.heapcache.getvalue(fieldbox) - self.cache.do_write_with_aliasing(self.value, fieldvalue) + self.cache.do_write_with_aliasing(self.ref_box, fieldbox) + +class DummyFieldUpdater(FieldUpdater): + def __init__(self): + self.currfieldbox = None + + def getfield_now_known(self, fieldbox): + pass + + def setfield(self, fieldbox): + pass + +dummy_field_updater = DummyFieldUpdater() class HeapCache(object): def __init__(self): + # Works with flags stored on RefFrontendOp._heapc_flags. + # There are two ways to do a global resetting of these flags: + # reset() and reset_keep_likely_virtual(). The basic idea is + # to use a version number in each RefFrontendOp, and in order + # to reset the flags globally, we increment the global version + # number in this class. Then when we read '_heapc_flags' we + # also check if the associated version number is up-to-date + # or not. More precisely, we have two global version numbers + # here: 'head_version' and 'likely_virtual_version'. Normally + # we use 'head_version'. For is_likely_virtual() though, we + # use the other, older version number. + self.head_version = r_uint(0) + self.likely_virtual_version = r_uint(0) self.reset() def reset(self): - # maps boxes to values - self.values = {} - # store the boxes that contain newly allocated objects, this maps the - # boxes to a bool, the bool indicates whether or not the object has - # escaped the trace or not (True means the box never escaped, False - # means it did escape), its presences in the mapping shows that it was - # allocated inside the trace - #if trace_branch: - #self.new_boxes = {} - # pass - #else: - #for box in self.new_boxes: - # self.new_boxes[box] = False - # pass - #if reset_virtuals: - # self.likely_virtuals = {} # only for jit.isvirtual() - # Tracks which boxes should be marked as escaped when the key box - # escapes. - #self.dependencies = {} - + # Global reset of all flags. Update both version numbers so + # that any access to '_heapc_flags' will be marked as outdated. + assert self.head_version < _HF_VERSION_MAX + self.head_version += _HF_VERSION_INC + self.likely_virtual_version = self.head_version + # # heap cache # maps descrs to CacheEntry self.heap_cache = {} # heap array cache - # maps descrs to {index: {from_value: to_value}} dicts + # maps descrs to {index: CacheEntry} dicts self.heap_array_cache = {} def reset_keep_likely_virtuals(self): - for value in self.values.itervalues(): - value.reset_keep_likely_virtual() + # Update only 'head_version', but 'likely_virtual_version' remains + # at its older value. + assert self.head_version < _HF_VERSION_MAX + self.head_version += _HF_VERSION_INC self.heap_cache = {} self.heap_array_cache = {} - def getvalue(self, box, create=True): - value = self.values.get(box, None) - if not value and create: - value = self.values[box] = HeapCacheValue(box) - return value + @always_inline + def test_head_version(self, ref_frontend_op): + return ref_frontend_op._get_heapc_flags() >= self.head_version - def getvalues(self, boxes): - return [self.getvalue(box) for box in boxes] + @always_inline + def test_likely_virtual_version(self, ref_frontend_op): + return ref_frontend_op._get_heapc_flags() >= self.likely_virtual_version + + def update_version(self, ref_frontend_op): + """Ensure the version of 'ref_frontend_op' is current. If not, + it will update 'ref_frontend_op' (removing most flags currently set). + """ + if not self.test_head_version(ref_frontend_op): + f = self.head_version + if (self.test_likely_virtual_version(ref_frontend_op) and + test_flags(ref_frontend_op, HF_LIKELY_VIRTUAL)): + f |= HF_LIKELY_VIRTUAL + ref_frontend_op._set_heapc_flags(f) + ref_frontend_op._heapc_deps = None def invalidate_caches(self, opnum, descr, argboxes): self.mark_escaped(opnum, descr, argboxes) self.clear_caches(opnum, descr, argboxes) def _escape_from_write(self, box, fieldbox): - value = self.getvalue(box, create=False) - fieldvalue = self.getvalue(fieldbox, create=False) - if (value is not None and value.is_unescaped and - fieldvalue is not None and fieldvalue.is_unescaped): - if value.dependencies is None: - value.dependencies = [] - value.dependencies.append(fieldvalue) - elif fieldvalue is not None: - self._escape(fieldvalue) + if self.is_unescaped(box) and self.is_unescaped(fieldbox): + deps = self._get_deps(box) + deps.append(fieldbox) + elif fieldbox is not None: + self._escape_box(fieldbox) def mark_escaped(self, opnum, descr, argboxes): if opnum == rop.SETFIELD_GC: @@ -176,19 +222,20 @@ self._escape_box(box) def _escape_box(self, box): - value = self.getvalue(box, create=False) - if not value: - return - self._escape(value) - - def _escape(self, value): - value.is_unescaped = False - value.likely_virtual = False - deps = value.dependencies - value.dependencies = None - if deps is not None: - for dep in deps: - self._escape(dep) + if isinstance(box, RefFrontendOp): + remove_flags(box, HF_LIKELY_VIRTUAL | HF_IS_UNESCAPED) + deps = box._heapc_deps + if deps is not None: + if not self.test_head_version(box): + box._heapc_deps = None + else: + # 'deps[0]' is abused to store the array length, keep it + if deps[0] is None: + box._heapc_deps = None + else: + box._heapc_deps = [deps[0]] + for i in range(1, len(deps)): + self._escape_box(deps[i]) def clear_caches(self, opnum, descr, argboxes): if (opnum == rop.SETFIELD_GC or @@ -241,7 +288,8 @@ self.reset_keep_likely_virtuals() def _clear_caches_arraycopy(self, opnum, desrc, argboxes, effectinfo): - seen_allocation_of_target = self.getvalue(argboxes[2]).seen_allocation + seen_allocation_of_target = self._check_flag( + argboxes[2], HF_SEEN_ALLOCATION) if ( isinstance(argboxes[3], ConstInt) and isinstance(argboxes[4], ConstInt) and @@ -285,74 +333,82 @@ return self.reset_keep_likely_virtuals() + def _get_deps(self, box): + if not isinstance(box, RefFrontendOp): + return None + self.update_version(box) + if box._heapc_deps is None: + box._heapc_deps = [None] + return box._heapc_deps + + def _check_flag(self, box, flag): + return (isinstance(box, RefFrontendOp) and + self.test_head_version(box) and + test_flags(box, flag)) + + def _set_flag(self, box, flag): + assert isinstance(box, RefFrontendOp) + self.update_version(box) + add_flags(box, flag) + def is_class_known(self, box): - value = self.getvalue(box, create=False) - if value: - return value.known_class - return False + return self._check_flag(box, HF_KNOWN_CLASS) def class_now_known(self, box): - self.getvalue(box).known_class = True + if isinstance(box, Const): + return + self._set_flag(box, HF_KNOWN_CLASS) def is_nullity_known(self, box): - value = self.getvalue(box, create=False) - if value: - return value.known_nullity - return False + if isinstance(box, Const): + return bool(box.getref_base()) + return self._check_flag(box, HF_KNOWN_NULLITY) def nullity_now_known(self, box): - self.getvalue(box).known_nullity = True + if isinstance(box, Const): + return + self._set_flag(box, HF_KNOWN_NULLITY) def is_nonstandard_virtualizable(self, box): - value = self.getvalue(box, create=False) - if value: - return value.nonstandard_virtualizable - return False + return self._check_flag(box, HF_NONSTD_VABLE) def nonstandard_virtualizables_now_known(self, box): - self.getvalue(box).nonstandard_virtualizable = True + self._set_flag(box, HF_NONSTD_VABLE) def is_unescaped(self, box): - value = self.getvalue(box, create=False) - if value: - return value.is_unescaped - return False + return self._check_flag(box, HF_IS_UNESCAPED) def is_likely_virtual(self, box): - value = self.getvalue(box, create=False) - if value: - return value.likely_virtual - return False + # note: this is different from _check_flag() + return (isinstance(box, RefFrontendOp) and + self.test_likely_virtual_version(box) and + test_flags(box, HF_LIKELY_VIRTUAL)) def new(self, box): - value = self.getvalue(box) - value.is_unescaped = True - value.likely_virtual = True - value.seen_allocation = True + assert isinstance(box, RefFrontendOp) + self.update_version(box) + add_flags(box, HF_LIKELY_VIRTUAL | HF_SEEN_ALLOCATION | HF_IS_UNESCAPED) def new_array(self, box, lengthbox): self.new(box) self.arraylen_now_known(box, lengthbox) def getfield(self, box, descr): - value = self.getvalue(box, create=False) - if value: - cache = self.heap_cache.get(descr, None) - if cache: - tovalue = cache.read(value) - if tovalue: - return tovalue.box + cache = self.heap_cache.get(descr, None) + if cache: + return cache.read(box) return None def get_field_updater(self, box, descr): - value = self.getvalue(box) + if not isinstance(box, RefFrontendOp): + return dummy_field_updater cache = self.heap_cache.get(descr, None) if cache is None: - cache = self.heap_cache[descr] = CacheEntry() - fieldvalue = None + cache = self.heap_cache[descr] = CacheEntry(self) + fieldbox = None else: - fieldvalue = cache.read(value) - return FieldUpdater(self, value, cache, fieldvalue) + fieldbox = cache.read(box) + return FieldUpdater(box, cache, fieldbox) def getfield_now_known(self, box, descr, fieldbox): upd = self.get_field_updater(box, descr) @@ -365,17 +421,12 @@ def getarrayitem(self, box, indexbox, descr): if not isinstance(indexbox, ConstInt): return None - value = self.getvalue(box, create=False) - if value is None: - return None index = indexbox.getint() cache = self.heap_array_cache.get(descr, None) if cache: indexcache = cache.get(index, None) if indexcache is not None: - resvalue = indexcache.read(value) - if resvalue: - return resvalue.box + return indexcache.read(box) return None def _get_or_make_array_cache_entry(self, indexbox, descr): @@ -385,16 +436,14 @@ cache = self.heap_array_cache.setdefault(descr, {}) indexcache = cache.get(index, None) if indexcache is None: - cache[index] = indexcache = CacheEntry() + cache[index] = indexcache = CacheEntry(self) return indexcache def getarrayitem_now_known(self, box, indexbox, fieldbox, descr): - value = self.getvalue(box) - fieldvalue = self.getvalue(fieldbox) indexcache = self._get_or_make_array_cache_entry(indexbox, descr) if indexcache: - indexcache.read_now_known(value, fieldvalue) + indexcache.read_now_known(box, fieldbox) def setarrayitem(self, box, indexbox, fieldbox, descr): if not isinstance(indexbox, ConstInt): @@ -402,25 +451,31 @@ if cache is not None: cache.clear() return - value = self.getvalue(box) - fieldvalue = self.getvalue(fieldbox) indexcache = self._get_or_make_array_cache_entry(indexbox, descr) if indexcache: - indexcache.do_write_with_aliasing(value, fieldvalue) + indexcache.do_write_with_aliasing(box, fieldbox) def arraylen(self, box): - value = self.getvalue(box, create=False) - if value and value.length: - return value.length.box + if (isinstance(box, RefFrontendOp) and + self.test_head_version(box) and + box._heapc_deps is not None): + res_box = box._heapc_deps[0] + if res_box is not None: + return maybe_replace_with_const(res_box) return None def arraylen_now_known(self, box, lengthbox): - value = self.getvalue(box) - value.length = self.getvalue(lengthbox) + # we store in '_heapc_deps' a list of boxes: the *first* box is + # the known length or None, and the remaining boxes are the + # regular dependencies. + if isinstance(box, Const): + return + deps = self._get_deps(box) + assert deps is not None + deps[0] = lengthbox def replace_box(self, oldbox, newbox): - value = self.getvalue(oldbox, create=False) - if value is None: - return - value.box = newbox - self.values[newbox] = value + # here, only for replacing a box with a const + if isinstance(oldbox, FrontendOp) and isinstance(newbox, Const): + assert newbox.same_constant(constant_from_op(oldbox)) + oldbox.set_replaced_with_const() diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -3,6 +3,7 @@ from rpython.rlib.objectmodel import we_are_translated, Symbolic from rpython.rlib.objectmodel import compute_unique_id, specialize from rpython.rlib.rarithmetic import r_int64, is_valid_int +from rpython.rlib.rarithmetic import LONG_BIT, intmask, r_uint from rpython.conftest import option @@ -74,54 +75,6 @@ ) #compute_unique_id(box)) -class XxxAbstractValue(object): - __slots__ = () - - def getint(self): - raise NotImplementedError - - def getfloatstorage(self): - raise NotImplementedError - - def getfloat(self): - return longlong.getrealfloat(self.getfloatstorage()) - - def getref_base(self): - raise NotImplementedError - - def getref(self, TYPE): - raise NotImplementedError - getref._annspecialcase_ = 'specialize:arg(1)' - - def constbox(self): - raise NotImplementedError - - def getaddr(self): - "Only for raw addresses (BoxInt & ConstInt), not for GC addresses" - raise NotImplementedError - - def sort_key(self): - raise NotImplementedError - - def nonnull(self): - raise NotImplementedError - - def repr_rpython(self): - return '%s' % self - - def _get_str(self): - raise NotImplementedError - - def same_box(self, other): - return self is other - - def same_shape(self, other): - # only structured containers can compare their shape (vector box) - return True - - def getaccum(self): - return None - class AbstractDescr(AbstractValue): __slots__ = ('descr_index',) llopaque = True @@ -641,34 +594,76 @@ # ____________________________________________________________ +FO_REPLACED_WITH_CONST = r_uint(1) +FO_POSITION_SHIFT = 1 +FO_POSITION_MASK = r_uint(0xFFFFFFFE) + + class FrontendOp(AbstractResOp): type = 'v' - _attrs_ = ('position',) + _attrs_ = ('position_and_flags',) def __init__(self, pos): - self.position = pos + # p is the 32-bit position shifted left by one (might be negative, + # but casted to the 32-bit UINT type) + p = rffi.cast(rffi.UINT, pos << FO_POSITION_SHIFT) + self.position_and_flags = r_uint(p) # zero-extended to a full word def get_position(self): - return self.position + # p is the signed 32-bit position, from self.position_and_flags + p = rffi.cast(rffi.INT, self.position_and_flags) + return intmask(p) >> FO_POSITION_SHIFT + + def set_position(self, new_pos): + assert new_pos >= 0 + self.position_and_flags &= ~FO_POSITION_MASK + self.position_and_flags |= r_uint(new_pos << FO_POSITION_SHIFT) + + def is_replaced_with_const(self): + return bool(self.position_and_flags & FO_REPLACED_WITH_CONST) + + def set_replaced_with_const(self): + self.position_and_flags |= FO_REPLACED_WITH_CONST + + def __repr__(self): + return '%s(0x%x)' % (self.__class__.__name__, self.position_and_flags) class IntFrontendOp(IntOp, FrontendOp): - _attrs_ = ('position', '_resint') + _attrs_ = ('position_and_flags', '_resint') def copy_value_from(self, other): self._resint = other.getint() class FloatFrontendOp(FloatOp, FrontendOp): - _attrs_ = ('position', '_resfloat') + _attrs_ = ('position_and_flags', '_resfloat') def copy_value_from(self, other): self._resfloat = other.getfloatstorage() class RefFrontendOp(RefOp, FrontendOp): - _attrs_ = ('position', '_resref') + _attrs_ = ('position_and_flags', '_resref', '_heapc_deps') + if LONG_BIT == 32: + _attrs_ += ('_heapc_flags',) # on 64 bit, this gets stored into the + _heapc_flags = r_uint(0) # high 32 bits of 'position_and_flags' + _heapc_deps = None def copy_value_from(self, other): self._resref = other.getref_base() + if LONG_BIT == 32: + def _get_heapc_flags(self): + return self._heapc_flags + def _set_heapc_flags(self, value): + self._heapc_flags = value + else: + def _get_heapc_flags(self): + return self.position_and_flags >> 32 + def _set_heapc_flags(self, value): + self.position_and_flags = ( + (self.position_and_flags & 0xFFFFFFFF) | + (value << 32)) + + class History(object): ends_with_jump = False trace = None @@ -688,7 +683,7 @@ # hack to record the ops *after* we know our inputargs for (opnum, argboxes, op, descr) in self._cache: pos = self.trace.record_op(opnum, argboxes, descr) - op.position = pos + op.set_position(pos) self._cache = None def length(self): @@ -701,7 +696,7 @@ self.trace.cut_at(cut_at) def any_operation(self): - return self.trace._count > 0 + return self.trace._count > self.trace._start @specialize.argtype(2) def set_op_value(self, op, value): @@ -720,7 +715,7 @@ @specialize.argtype(3) def record(self, opnum, argboxes, value, descr=None): if self.trace is None: - pos = -1 + pos = 2**14 - 1 else: pos = self.trace.record_op(opnum, argboxes, descr) if value is None: diff --git a/rpython/jit/metainterp/opencoder.py b/rpython/jit/metainterp/opencoder.py --- a/rpython/jit/metainterp/opencoder.py +++ b/rpython/jit/metainterp/opencoder.py @@ -10,7 +10,7 @@ from rpython.jit.metainterp.history import ConstInt, Const, ConstFloat, ConstPtr from rpython.jit.metainterp.resoperation import AbstractResOp, AbstractInputArg,\ ResOperation, oparity, rop, opwithdescr, GuardResOp, IntOp, FloatOp, RefOp -from rpython.rlib.rarithmetic import intmask +from rpython.rlib.rarithmetic import intmask, r_uint from rpython.rlib.objectmodel import we_are_translated from rpython.rtyper.lltypesystem import rffi, lltype, llmemory from rpython.jit.metainterp.typesystem import llhelper @@ -63,24 +63,19 @@ if force_inputargs is not None: self.inputargs = [rop.inputarg_from_tp(arg.type) for arg in force_inputargs] - self._inputargs = [None] * len(trace.inputargs) for i, arg in enumerate(force_inputargs): - if arg.get_position() >= 0: - self._cache[arg.get_position()] = self.inputargs[i] - else: - self._inputargs[-arg.get_position()-1] = self.inputargs[i] + self._cache[arg.get_position()] = self.inputargs[i] else: self.inputargs = [rop.inputarg_from_tp(arg.type) for arg in self.trace.inputargs] - self._inputargs = self.inputargs[:] + for i, arg in enumerate(self.inputargs): + self._cache[i] = arg self.start = start self.pos = start - self._count = 0 + self._count = start self.end = end def _get(self, i): - if i < 0: - return self._inputargs[-i - 1] res = self._cache[i] assert res is not None return res @@ -197,9 +192,10 @@ self._floats_dict = {} self._snapshots = [] for i, inparg in enumerate(inputargs): - assert isinstance(inparg, AbstractInputArg) - inparg.position = -i - 1 - self._count = 0 + inparg.set_position(i) + self._count = len(inputargs) + self._start = len(inputargs) + self._pos = self._start self.inputargs = inputargs def append(self, v): @@ -284,8 +280,7 @@ self._refs.append(box.getref_base()) return tag(TAGCONSTPTR, v) elif isinstance(box, AbstractResOp): - return tag(TAGBOX, box.get_position()) - elif isinstance(box, AbstractInputArg): + assert box.get_position() >= 0 return tag(TAGBOX, box.get_position()) else: assert False, "unreachable code" @@ -362,7 +357,7 @@ def get_iter(self, metainterp_sd=None): assert metainterp_sd - return TraceIterator(self, 0, self._pos, metainterp_sd=metainterp_sd) + return TraceIterator(self, self._start, self._pos, metainterp_sd=metainterp_sd) def unpack(self): iter = self.get_iter() diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -2822,7 +2822,7 @@ self.history.record_nospec(rop.VIRTUAL_REF_FINISH, [vrefbox, virtualbox], None) newop = self.history.record_nospec(opnum, arglist, descr) - op.position = newop.position + op.set_position(newop.get_position()) # mark by replacing it with ConstPtr(NULL) self.virtualref_boxes[i+1] = self.cpu.ts.CONST_NULL diff --git a/rpython/jit/metainterp/resume.py b/rpython/jit/metainterp/resume.py --- a/rpython/jit/metainterp/resume.py +++ b/rpython/jit/metainterp/resume.py @@ -1,9 +1,9 @@ from rpython.jit.codewriter.effectinfo import EffectInfo from rpython.jit.metainterp import jitprof from rpython.jit.metainterp.history import (Const, ConstInt, getkind, - INT, REF, FLOAT, AbstractDescr) -from rpython.jit.metainterp.resoperation import rop, InputArgInt,\ - InputArgFloat, InputArgRef + INT, REF, FLOAT, AbstractDescr, IntFrontendOp, RefFrontendOp, + FloatFrontendOp) +from rpython.jit.metainterp.resoperation import rop from rpython.rlib import rarithmetic, rstack from rpython.rlib.objectmodel import (we_are_translated, specialize, compute_unique_id) @@ -1264,11 +1264,14 @@ num += len(self.liveboxes) assert num >= 0 if kind == INT: - box = InputArgInt(self.cpu.get_int_value(self.deadframe, num)) + box = IntFrontendOp(0) + box.setint(self.cpu.get_int_value(self.deadframe, num)) elif kind == REF: - box = InputArgRef(self.cpu.get_ref_value(self.deadframe, num)) + box = RefFrontendOp(0) + box.setref_base(self.cpu.get_ref_value(self.deadframe, num)) elif kind == FLOAT: - box = InputArgFloat(self.cpu.get_float_value(self.deadframe, num)) + box = FloatFrontendOp(0) + box.setfloatstorage(self.cpu.get_float_value(self.deadframe, num)) else: assert 0, "bad kind: %d" % ord(kind) self.liveboxes[num] = box diff --git a/rpython/jit/metainterp/test/test_heapcache.py b/rpython/jit/metainterp/test/test_heapcache.py --- a/rpython/jit/metainterp/test/test_heapcache.py +++ b/rpython/jit/metainterp/test/test_heapcache.py @@ -1,15 +1,9 @@ +import py from rpython.jit.metainterp.heapcache import HeapCache from rpython.jit.metainterp.resoperation import rop, InputArgInt -from rpython.jit.metainterp.history import ConstInt, BasicFailDescr +from rpython.jit.metainterp.history import ConstInt, ConstPtr, BasicFailDescr +from rpython.jit.metainterp.history import IntFrontendOp, RefFrontendOp -box1 = "box1" -box2 = "box2" -box3 = "box3" -box4 = "box4" -box5 = "box5" -lengthbox1 = object() -lengthbox2 = object() -lengthbox3 = object() descr1 = object() descr2 = object() descr3 = object() @@ -59,43 +53,52 @@ class TestHeapCache(object): def test_known_class_box(self): h = HeapCache() - assert not h.is_class_known(1) - assert not h.is_class_known(2) - h.class_now_known(1) - assert h.is_class_known(1) - assert not h.is_class_known(2) + box1 = RefFrontendOp(1) + box2 = RefFrontendOp(2) + assert not h.is_class_known(box1) + assert not h.is_class_known(box2) + h.class_now_known(box1) + assert h.is_class_known(box1) + assert not h.is_class_known(box2) h.reset() - assert not h.is_class_known(1) - assert not h.is_class_known(2) + assert not h.is_class_known(box1) + assert not h.is_class_known(box2) def test_known_nullity(self): h = HeapCache() - assert not h.is_nullity_known(1) - assert not h.is_nullity_known(2) - h.nullity_now_known(1) - assert h.is_nullity_known(1) - assert not h.is_nullity_known(2) + box1 = RefFrontendOp(1) + box2 = RefFrontendOp(2) + assert not h.is_nullity_known(box1) + assert not h.is_nullity_known(box2) + h.nullity_now_known(box1) + assert h.is_nullity_known(box1) + assert not h.is_nullity_known(box2) h.reset() - assert not h.is_nullity_known(1) - assert not h.is_nullity_known(2) + assert not h.is_nullity_known(box1) + assert not h.is_nullity_known(box2) def test_nonstandard_virtualizable(self): h = HeapCache() - assert not h.is_nonstandard_virtualizable(1) - assert not h.is_nonstandard_virtualizable(2) - h.nonstandard_virtualizables_now_known(1) - assert h.is_nonstandard_virtualizable(1) - assert not h.is_nonstandard_virtualizable(2) + box1 = RefFrontendOp(1) + box2 = RefFrontendOp(2) + assert not h.is_nonstandard_virtualizable(box1) + assert not h.is_nonstandard_virtualizable(box2) + h.nonstandard_virtualizables_now_known(box1) + assert h.is_nonstandard_virtualizable(box1) + assert not h.is_nonstandard_virtualizable(box2) h.reset() - assert not h.is_nonstandard_virtualizable(1) - assert not h.is_nonstandard_virtualizable(2) + assert not h.is_nonstandard_virtualizable(box1) + assert not h.is_nonstandard_virtualizable(box2) def test_heapcache_fields(self): h = HeapCache() + box1 = RefFrontendOp(1) + box2 = RefFrontendOp(2) + box3 = RefFrontendOp(3) assert h.getfield(box1, descr1) is None assert h.getfield(box1, descr2) is None h.setfield(box1, box2, descr1) @@ -119,6 +122,10 @@ def test_heapcache_read_fields_multiple(self): h = HeapCache() + box1 = RefFrontendOp(1) + box2 = RefFrontendOp(2) + box3 = RefFrontendOp(3) + box4 = RefFrontendOp(4) h.getfield_now_known(box1, descr1, box2) h.getfield_now_known(box3, descr1, box4) assert h.getfield(box1, descr1) is box2 @@ -134,6 +141,10 @@ def test_heapcache_write_fields_multiple(self): h = HeapCache() + box1 = RefFrontendOp(1) + box2 = RefFrontendOp(2) + box3 = RefFrontendOp(3) + box4 = RefFrontendOp(4) h.setfield(box1, box2, descr1) assert h.getfield(box1, descr1) is box2 h.setfield(box3, box4, descr1) @@ -141,6 +152,10 @@ assert h.getfield(box1, descr1) is None # box1 and box3 can alias h = HeapCache() + box1 = RefFrontendOp(1) + box2 = RefFrontendOp(2) + box3 = RefFrontendOp(3) + box4 = RefFrontendOp(4) h.new(box1) h.setfield(box1, box2, descr1) assert h.getfield(box1, descr1) is box2 @@ -149,6 +164,10 @@ assert h.getfield(box1, descr1) is None # box1 and box3 can alias h = HeapCache() + box1 = RefFrontendOp(1) + box2 = RefFrontendOp(2) + box3 = RefFrontendOp(3) + box4 = RefFrontendOp(4) h.new(box1) h.new(box3) h.setfield(box1, box2, descr1) @@ -162,6 +181,10 @@ def test_heapcache_arrays(self): h = HeapCache() + box1 = RefFrontendOp(1) + box2 = RefFrontendOp(2) + box3 = RefFrontendOp(3) + box4 = RefFrontendOp(4) assert h.getarrayitem(box1, index1, descr1) is None assert h.getarrayitem(box1, index1, descr2) is None assert h.getarrayitem(box1, index2, descr1) is None @@ -204,6 +227,10 @@ def test_heapcache_array_nonconst_index(self): h = HeapCache() + box1 = RefFrontendOp(1) + box2 = RefFrontendOp(2) + box3 = RefFrontendOp(3) + box4 = RefFrontendOp(4) h.setarrayitem(box1, index1, box2, descr1) h.setarrayitem(box1, index2, box4, descr1) assert h.getarrayitem(box1, index1, descr1) is box2 @@ -214,6 +241,10 @@ def test_heapcache_read_fields_multiple_array(self): h = HeapCache() + box1 = RefFrontendOp(1) + box2 = RefFrontendOp(2) + box3 = RefFrontendOp(3) + box4 = RefFrontendOp(4) h.getarrayitem_now_known(box1, index1, box2, descr1) h.getarrayitem_now_known(box3, index1, box4, descr1) assert h.getarrayitem(box1, index1, descr1) is box2 @@ -229,6 +260,10 @@ def test_heapcache_write_fields_multiple_array(self): h = HeapCache() + box1 = RefFrontendOp(1) + box2 = RefFrontendOp(2) + box3 = RefFrontendOp(3) + box4 = RefFrontendOp(4) h.setarrayitem(box1, index1, box2, descr1) assert h.getarrayitem(box1, index1, descr1) is box2 h.setarrayitem(box3, index1, box4, descr1) @@ -236,6 +271,10 @@ assert h.getarrayitem(box1, index1, descr1) is None # box1 and box3 can alias h = HeapCache() + box1 = RefFrontendOp(1) + box2 = RefFrontendOp(2) + box3 = RefFrontendOp(3) + box4 = RefFrontendOp(4) h.new(box1) h.setarrayitem(box1, index1, box2, descr1) assert h.getarrayitem(box1, index1, descr1) is box2 @@ -244,6 +283,10 @@ assert h.getarrayitem(box1, index1, descr1) is None # box1 and box3 can alias h = HeapCache() + box1 = RefFrontendOp(1) + box2 = RefFrontendOp(2) + box3 = RefFrontendOp(3) + box4 = RefFrontendOp(4) h.new(box1) h.new(box3) h.setarrayitem(box1, index1, box2, descr1) @@ -257,6 +300,10 @@ def test_length_cache(self): h = HeapCache() + box1 = RefFrontendOp(1) + box2 = RefFrontendOp(2) + lengthbox1 = IntFrontendOp(11) + lengthbox2 = IntFrontendOp(12) h.new_array(box1, lengthbox1) assert h.arraylen(box1) is lengthbox1 @@ -267,6 +314,9 @@ def test_invalidate_cache(self): h = HeapCache() + box1 = RefFrontendOp(1) + box2 = RefFrontendOp(2) + box4 = RefFrontendOp(4) h.setfield(box1, box2, descr1) h.setarrayitem(box1, index1, box2, descr1) h.setarrayitem(box1, index2, box4, descr1) @@ -298,8 +348,13 @@ assert h.getarrayitem(box1, index1, descr1) is None assert h.getarrayitem(box1, index2, descr1) is None - def test_replace_box(self): + def test_replace_box_with_box(self): + py.test.skip("replacing a box with another box: not supported any more") h = HeapCache() + box1 = RefFrontendOp(1) + box2 = RefFrontendOp(2) + box3 = RefFrontendOp(3) + box4 = RefFrontendOp(4) h.setfield(box1, box2, descr1) h.setfield(box1, box3, descr2) h.setfield(box2, box3, descr3) @@ -310,16 +365,22 @@ h.setfield(box4, box3, descr1) assert h.getfield(box4, descr1) is box3 + def test_replace_box_with_const(self): h = HeapCache() + box1 = RefFrontendOp(1) + box2 = RefFrontendOp(2) + box3 = RefFrontendOp(3) + c_box3 = ConstPtr(ConstPtr.value) h.setfield(box1, box2, descr1) h.setfield(box1, box3, descr2) h.setfield(box2, box3, descr3) - h.replace_box(box3, box4) + h.replace_box(box3, c_box3) assert h.getfield(box1, descr1) is box2 - assert h.getfield(box1, descr2) is box4 - assert h.getfield(box2, descr3) is box4 + assert c_box3.same_constant(h.getfield(box1, descr2)) + assert c_box3.same_constant(h.getfield(box2, descr3)) def test_replace_box_twice(self): + py.test.skip("replacing a box with another box: not supported any more") h = HeapCache() h.setfield(box1, box2, descr1) h.setfield(box1, box3, descr2) @@ -343,6 +404,7 @@ assert h.getfield(box2, descr3) is box5 def test_replace_box_array(self): + py.test.skip("replacing a box with another box: not supported any more") h = HeapCache() h.setarrayitem(box1, index1, box2, descr1) h.setarrayitem(box1, index1, box3, descr2) @@ -362,6 +424,7 @@ assert h.arraylen(box4) is lengthbox2 def test_replace_box_array_twice(self): + py.test.skip("replacing a box with another box: not supported any more") h = HeapCache() h.setarrayitem(box1, index1, box2, descr1) h.setarrayitem(box1, index1, box3, descr2) @@ -382,8 +445,34 @@ h.replace_box(lengthbox2, lengthbox3) assert h.arraylen(box4) is lengthbox3 + def test_replace_box_with_const_in_array(self): + h = HeapCache() + box1 = RefFrontendOp(1) + lengthbox2 = IntFrontendOp(2) + lengthbox2.setint(10) + h.arraylen_now_known(box1, lengthbox2) + assert h.arraylen(box1) is lengthbox2 + c10 = ConstInt(10) + h.replace_box(lengthbox2, c10) + assert c10.same_constant(h.arraylen(box1)) + + box2 = IntFrontendOp(2) + box2.setint(12) + h.setarrayitem(box1, index2, box2, descr1) + assert h.getarrayitem(box1, index2, descr1) is box2 + c12 = ConstInt(12) + h.replace_box(box2, c12) + assert c12.same_constant(h.getarrayitem(box1, index2, descr1)) + def test_ll_arraycopy(self): h = HeapCache() + box1 = RefFrontendOp(1) + box2 = RefFrontendOp(2) + box3 = RefFrontendOp(3) + box4 = RefFrontendOp(4) + box5 = RefFrontendOp(5) + lengthbox1 = IntFrontendOp(11) + lengthbox2 = IntFrontendOp(12) h.new_array(box1, lengthbox1) h.setarrayitem(box1, index1, box2, descr1) h.new_array(box2, lengthbox1) @@ -412,6 +501,10 @@ def test_ll_arraycopy_differing_descrs(self): h = HeapCache() + box1 = RefFrontendOp(1) + box2 = RefFrontendOp(2) + box3 = RefFrontendOp(3) + lengthbox2 = IntFrontendOp(12) h.setarrayitem(box1, index1, box2, descr2) assert h.getarrayitem(box1, index1, descr2) is box2 h.new_array(box2, lengthbox2) @@ -424,6 +517,9 @@ def test_ll_arraycopy_differing_descrs_nonconst_index(self): h = HeapCache() + box1 = RefFrontendOp(1) + box2 = RefFrontendOp(2) + box3 = RefFrontendOp(3) h.setarrayitem(box1, index1, box2, descr2) assert h.getarrayitem(box1, index1, descr2) is box2 h.invalidate_caches( @@ -435,6 +531,9 @@ def test_ll_arraycopy_result_propogated(self): h = HeapCache() + box1 = RefFrontendOp(1) + box2 = RefFrontendOp(2) + box3 = RefFrontendOp(3) h.setarrayitem(box1, index1, box2, descr1) h.invalidate_caches( rop.CALL_N, @@ -445,6 +544,11 @@ def test_ll_arraycopy_dest_new(self): h = HeapCache() + box1 = RefFrontendOp(1) + box2 = RefFrontendOp(2) + box3 = RefFrontendOp(3) + box4 = RefFrontendOp(4) + lengthbox1 = IntFrontendOp(11) h.new_array(box1, lengthbox1) h.setarrayitem(box3, index1, box4, descr1) h.invalidate_caches( @@ -455,6 +559,10 @@ def test_ll_arraycopy_doesnt_escape_arrays(self): h = HeapCache() + box1 = RefFrontendOp(1) + box2 = RefFrontendOp(2) + lengthbox1 = IntFrontendOp(11) + lengthbox2 = IntFrontendOp(12) h.new_array(box1, lengthbox1) h.new_array(box2, lengthbox2) h.invalidate_caches( @@ -474,6 +582,8 @@ def test_unescaped(self): h = HeapCache() + box1 = RefFrontendOp(1) + box2 = RefFrontendOp(2) assert not h.is_unescaped(box1) h.new(box2) assert h.is_unescaped(box2) @@ -484,6 +594,9 @@ def test_unescaped_testing(self): h = HeapCache() + box1 = RefFrontendOp(1) + box2 = RefFrontendOp(2) + box3 = RefFrontendOp(3) h.new(box1) h.new(box2) assert h.is_unescaped(box1) @@ -502,6 +615,8 @@ def test_ops_dont_escape(self): h = HeapCache() + box1 = RefFrontendOp(1) + box2 = RefFrontendOp(2) h.new(box1) h.new(box2) assert h.is_unescaped(box1) @@ -515,6 +630,9 @@ def test_circular_virtuals(self): h = HeapCache() + box1 = RefFrontendOp(1) + box2 = RefFrontendOp(2) + box3 = RefFrontendOp(3) h.new(box1) h.new(box2) h.invalidate_caches(rop.SETFIELD_GC, None, [box1, box2]) @@ -523,6 +641,10 @@ def test_unescaped_array(self): h = HeapCache() + box1 = RefFrontendOp(1) + box2 = RefFrontendOp(2) + lengthbox1 = IntFrontendOp(11) + lengthbox2 = IntFrontendOp(12) h.new_array(box1, lengthbox1) assert h.is_unescaped(box1) h.invalidate_caches(rop.SETARRAYITEM_GC, None, [box1, index1, box2]) @@ -546,6 +668,8 @@ def test_call_doesnt_invalidate_unescaped_boxes(self): h = HeapCache() + box1 = RefFrontendOp(1) + box2 = RefFrontendOp(2) h.new(box1) assert h.is_unescaped(box1) h.setfield(box1, box2, descr1) @@ -557,6 +681,9 @@ def test_call_doesnt_invalidate_unescaped_array_boxes(self): h = HeapCache() + box1 = RefFrontendOp(1) + box3 = RefFrontendOp(3) + lengthbox1 = IntFrontendOp(11) h.new_array(box1, lengthbox1) assert h.is_unescaped(box1) h.setarrayitem(box1, index1, box3, descr1) @@ -568,6 +695,8 @@ def test_bug_missing_ignored_operations(self): h = HeapCache() + box1 = RefFrontendOp(1) + box2 = RefFrontendOp(2) h.new(box1) h.new(box2) h.setfield(box1, box2, descr1) @@ -590,6 +719,8 @@ # calling some residual code that changes the values on box3: then # the content of box2 is still cached at the old value. h = HeapCache() + box1 = RefFrontendOp(1) + box2 = RefFrontendOp(2) h.new(box1) h.new(box2) h.setfield(box1, box2, descr1) @@ -602,6 +733,8 @@ def test_bug_heap_cache_is_cleared_but_not_is_unescaped_2(self): h = HeapCache() + box1 = RefFrontendOp(1) + box2 = RefFrontendOp(2) h.new(box1) h.new(box2) h.setfield(box1, box2, descr1) @@ -624,6 +757,7 @@ def test_is_likely_virtual(self): h = HeapCache() + box1 = RefFrontendOp(1) h.new(box1) assert h.is_unescaped(box1) assert h.is_likely_virtual(box1) @@ -633,3 +767,29 @@ h._escape_box(box1) assert not h.is_unescaped(box1) assert not h.is_likely_virtual(box1) + + def test_is_likely_virtual_2(self): + h = HeapCache() + box1 = RefFrontendOp(1) + h.new(box1) + assert h.is_unescaped(box1) + assert h.is_likely_virtual(box1) + h.reset_keep_likely_virtuals() + assert not h.is_unescaped(box1) + assert h.is_likely_virtual(box1) + h.reset() # reset everything + assert not h.is_unescaped(box1) + assert not h.is_likely_virtual(box1) + + def test_is_likely_virtual_3(self): + h = HeapCache() + box1 = RefFrontendOp(1) + h.new(box1) + assert h.is_unescaped(box1) + assert h.is_likely_virtual(box1) + h.reset_keep_likely_virtuals() + assert not h.is_unescaped(box1) + assert h.is_likely_virtual(box1) + h.class_now_known(box1) # interaction of the two families of flags + assert not h.is_unescaped(box1) + assert h.is_likely_virtual(box1) diff --git a/rpython/jit/metainterp/test/test_history.py b/rpython/jit/metainterp/test/test_history.py --- a/rpython/jit/metainterp/test/test_history.py +++ b/rpython/jit/metainterp/test/test_history.py @@ -62,6 +62,14 @@ assert c5.nonnull() assert c6.nonnull() +def test_frontendop(): + f = FrontendOp(42) + assert f.get_position() == 42 + f = FrontendOp(-56) + assert f.get_position() == -56 + f.set_position(6519) + assert f.get_position() == 6519 + class TestZTranslated(StandaloneTests): def test_ztranslated_same_constant_float(self): def fn(args): diff --git a/rpython/jit/metainterp/test/test_opencoder.py b/rpython/jit/metainterp/test/test_opencoder.py --- a/rpython/jit/metainterp/test/test_opencoder.py +++ b/rpython/jit/metainterp/test/test_opencoder.py @@ -1,7 +1,7 @@ from rpython.jit.metainterp.opencoder import Trace, untag, TAGINT, TAGBOX -from rpython.jit.metainterp.resoperation import rop, InputArgInt, AbstractResOp -from rpython.jit.metainterp.history import ConstInt +from rpython.jit.metainterp.resoperation import rop, AbstractResOp +from rpython.jit.metainterp.history import ConstInt, IntFrontendOp from rpython.jit.metainterp.optimizeopt.optimizer import Optimizer from rpython.jit.metainterp import resume from rpython.jit.metainterp.test.strategies import lists_of_operations @@ -31,8 +31,11 @@ self.jitcode = jitcode self.boxes = boxes - def get_list_of_active_boxes(self, flag): - return self.boxes + def get_list_of_active_boxes(self, flag, new_array, encode): + a = new_array(len(self.boxes)) + for i, box in enumerate(self.boxes): + a[i] = encode(box) + return a def unpack_snapshot(t, op, pos): op.framestack = [] @@ -58,7 +61,7 @@ return iter.inputargs, l, iter def test_simple_iterator(self): - i0, i1 = InputArgInt(), InputArgInt() + i0, i1 = IntFrontendOp(0), IntFrontendOp(0) t = Trace([i0, i1]) add = FakeOp(t.record_op(rop.INT_ADD, [i0, i1])) t.record_op(rop.INT_ADD, [add, ConstInt(1)]) @@ -72,7 +75,7 @@ assert l[0].getarg(1) is i1 def test_rd_snapshot(self): - i0, i1 = InputArgInt(), InputArgInt() + i0, i1 = IntFrontendOp(0), IntFrontendOp(0) t = Trace([i0, i1]) add = FakeOp(t.record_op(rop.INT_ADD, [i0, i1])) t.record_op(rop.GUARD_FALSE, [add]) @@ -96,7 +99,7 @@ assert fstack[1].boxes == [i0, i0, l[0]] def test_read_snapshot_interface(self): - i0, i1, i2 = InputArgInt(), InputArgInt(), InputArgInt() + i0, i1, i2 = IntFrontendOp(0), IntFrontendOp(0), IntFrontendOp(0) t = Trace([i0, i1, i2]) t.record_op(rop.GUARD_TRUE, [i1]) frame0 = FakeFrame(1, JitCode(2), [i0, i1]) @@ -128,8 +131,9 @@ assert pc == 3 assert snapshot_iter.unpack_array(framestack[1].box_array) == [i2, i2] + # XXXX fixme @given(lists_of_operations()) - def test_random_snapshot(self, lst): + def xxx_test_random_snapshot(self, lst): inputargs, ops = lst t = Trace(inputargs) for op in ops: @@ -156,11 +160,11 @@ assert (((-iter._next() - 1) << 15) | (iter._next())) == i def test_cut_trace_from(self): - i0, i1, i2 = InputArgInt(), InputArgInt(), InputArgInt() + i0, i1, i2 = IntFrontendOp(0), IntFrontendOp(0), IntFrontendOp(0) t = Trace([i0, i1, i2]) - add1 = t.record_op(rop.INT_ADD, [i0, i1]) + add1 = FakeOp(t.record_op(rop.INT_ADD, [i0, i1])) cut_point = t.cut_point() - add2 = t.record_op(rop.INT_ADD, [add1, i1]) + add2 = FakeOp(t.record_op(rop.INT_ADD, [add1, i1])) t.record_op(rop.GUARD_TRUE, [add2]) resume.capture_resumedata([FakeFrame(3, JitCode(4), [add2, add1, i1])], None, [], t) @@ -174,9 +178,9 @@ class SomeDescr(AbstractDescr): pass - i0, i1, i2 = InputArgInt(), InputArgInt(), InputArgInt() + i0, i1, i2 = IntFrontendOp(0), IntFrontendOp(0), IntFrontendOp(0) t = Trace([i0, i1, i2]) - p0 = t.record_op(rop.NEW_WITH_VTABLE, [], descr=SomeDescr()) + p0 = FakeOp(t.record_op(rop.NEW_WITH_VTABLE, [], descr=SomeDescr())) t.record_op(rop.GUARD_TRUE, [i0]) resume.capture_resumedata([], [i1, i2, p0], [p0, i1], t) (i0, i1, i2), l, iter = self.unpack(t) diff --git a/rpython/jit/metainterp/warmstate.py b/rpython/jit/metainterp/warmstate.py --- a/rpython/jit/metainterp/warmstate.py +++ b/rpython/jit/metainterp/warmstate.py @@ -75,7 +75,9 @@ if in_const_box: return history.ConstPtr(value) else: - return resoperation.InputArgRef(value) + res = history.RefFrontendOp(0) + res.setref_base(value) + return res else: adr = llmemory.cast_ptr_to_adr(value) value = heaptracker.adr2int(adr) @@ -89,7 +91,9 @@ if in_const_box: return history.ConstFloat(value) else: - return resoperation.InputArgFloat(value) + res = history.FloatFrontendOp(0) + res.setfloatstorage(value) + return res elif isinstance(value, str) or isinstance(value, unicode): assert len(value) == 1 # must be a character value = ord(value) @@ -100,7 +104,9 @@ if in_const_box: return history.ConstInt(value) else: - return resoperation.InputArgInt(value) + res = history.IntFrontendOp(0) + res.setint(value) + return res @specialize.arg(0) def equal_whatever(TYPE, x, y): From pypy.commits at gmail.com Sat Mar 19 06:07:56 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 19 Mar 2016 03:07:56 -0700 (PDT) Subject: [pypy-commit] pypy default: Expand by saying that there is more than just the cpyext fix (though not much more) Message-ID: <56ed24fc.a2afc20a.a24d3.ffffcf15@mx.google.com> Author: Armin Rigo Branch: Changeset: r83165:6a20a2ae2c3b Date: 2016-03-19 11:07 +0100 http://bitbucket.org/pypy/pypy/changeset/6a20a2ae2c3b/ Log: Expand by saying that there is more than just the cpyext fix (though not much more) diff --git a/pypy/doc/release-5.0.1.rst b/pypy/doc/release-5.0.1.rst --- a/pypy/doc/release-5.0.1.rst +++ b/pypy/doc/release-5.0.1.rst @@ -9,6 +9,11 @@ .. _`lxml 3.6.0`: https://pypi.python.org/pypi/lxml/3.6.0 .. _`crash on large files`: https://bitbucket.org/pypy/pypy/issues/2260 + +The changes with PyPy 5.0 are only two bug fixes: one in cpyext, which +fixes notably (but not only) lxml, and another for a corner case of the +JIT. + What is PyPy? ============= From pypy.commits at gmail.com Sat Mar 19 06:08:47 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 19 Mar 2016 03:08:47 -0700 (PDT) Subject: [pypy-commit] pypy release-5.x: Added tag release-5.0.1 for changeset bbd45126bc69 Message-ID: <56ed252f.82561c0a.518d.0b14@mx.google.com> Author: Armin Rigo Branch: release-5.x Changeset: r83166:445b602788cf Date: 2016-03-19 11:08 +0100 http://bitbucket.org/pypy/pypy/changeset/445b602788cf/ Log: Added tag release-5.0.1 for changeset bbd45126bc69 diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -18,3 +18,4 @@ f3ad1e1e1d6215e20d34bb65ab85ff9188c9f559 release-2.6.1 850edf14b2c75573720f59e95767335fb1affe55 release-4.0.0 5f8302b8bf9f53056e40426f10c72151564e5b19 release-4.0.1 +bbd45126bc691f669c4ebdfbd74456cd274c6b92 release-5.0.1 From pypy.commits at gmail.com Sat Mar 19 06:17:29 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 19 Mar 2016 03:17:29 -0700 (PDT) Subject: [pypy-commit] pypy.org extradoc: md5/sha of the 5.0.1 release Message-ID: <56ed2739.c711c30a.da056.ffffd6cd@mx.google.com> Author: Armin Rigo Branch: extradoc Changeset: r725:545c346c5deb Date: 2016-03-19 11:17 +0100 http://bitbucket.org/pypy/pypy.org/changeset/545c346c5deb/ Log: md5/sha of the 5.0.1 release diff --git a/download.html b/download.html --- a/download.html +++ b/download.html @@ -379,19 +379,19 @@

    Checksums

    Here are the checksums for each of the downloads

    -

    pypy-5.0.0 md5:

    +

    pypy-5.0.1 md5:

    -bcb5c830d6380ff78b759dbe075dfc14  pypy-5.0.0-linux-armel.tar.bz2
    -5649117ba754bef14550b6abc2135eab  pypy-5.0.0-linux-armhf-raring.tar.bz2
    -949344fff9c6942713f34e1a1fcbc7aa  pypy-5.0.0-linux-armhf-raspbian.tar.bz2
    -5e005cf26a3a58552fd77f9aaae6f614  pypy-5.0.0-linux.tar.bz2
    -133530cb9957a67807b25d23bb74ac24  pypy-5.0.0-linux64.tar.bz2
    -a091398908bf525149a1fdea8bf48ec2  pypy-5.0.0-osx64.tar.bz2
    +5544c118d270138125fec1ec5659ef80  pypy-5.0.1-linux-armel.tar.bz2
    +34d6cf783cf585bbfff1b394d2db9a26  pypy-5.0.1-linux-armhf-raring.tar.bz2
    +224546fb5999c4b08b2b1c51e40dc055  pypy-5.0.1-linux-armhf-raspbian.tar.bz2
    +3f05067352f25e23eae618dca96062a2  pypy-5.0.1-linux.tar.bz2
    +6a6b19f6c13b42f4ce9d0c0d892f597a  pypy-5.0.1-linux64.tar.bz2
    +bec87524ebc3f11c9c9817f64311ef65  pypy-5.0.1-osx64.tar.bz2
    +798c6e83536a5fa5ed7d6efb4d06db1a  pypy-5.0.1-src.tar.bz2
    +928761075bcc2d01f9f884eeee105bd0  pypy-5.0.1-src.zip
    +2e53db6766a718084c9327a6059f8ad7  pypy-5.0.1-win32.zip
     f243ff399a55f4370b6d1dc0a3650f1d  pypy-5.0.0-ppc64.tar.bz2
     51fb75ae0a143faa9a5b39f094965050  pypy-5.0.0-ppc64le.tar.bz2
    -6a26f735cb45a10255076fdd6cebee84  pypy-5.0.0-src.tar.bz2
    -1be14cf3ffc97da7521637f8f81abc3c  pypy-5.0.0-src.zip
    -d2c8237e8106b535850596f0e9762246  pypy-5.0.0-win32.zip
     

    pypy3-2.4.0 md5:

    @@ -410,33 +410,33 @@
     2c9f0054f3b93a6473f10be35277825a  pypy-1.8-sandbox-linux64.tar.bz2
     009c970b5fa75754ae4c32a5d108a8d4  pypy-1.8-sandbox-linux.tar.bz2
     
    -

    pypy-5.0.0 sha1:

    +

    pypy-5.0.1 sha1:

    -b7c82d437086660759ec18582dbdaf198b77e467  pypy-5.0.0-linux-armel.tar.bz2
    -85978b1d33b0db8b512eebb1558200c3ab76d462  pypy-5.0.0-linux-armhf-raring.tar.bz2
    -271472d0362ce02fd656024b64f0343cc8193f9d  pypy-5.0.0-linux-armhf-raspbian.tar.bz2
    -88ac71eebd65c35032325497cc450b4d184be005  pypy-5.0.0-linux.tar.bz2
    -22d32d92899a07cb8cbba4b8918a7919e34246c4  pypy-5.0.0-linux64.tar.bz2
    -f652b264ba063a8c472b753baaaacf63690be6c5  pypy-5.0.0-osx64.tar.bz2
    +d2df9030c670e178e2ee9b99934174184fe8aa1c  pypy-5.0.1-linux-armel.tar.bz2
    +89534b3b09336165bf706a459f170ae3628da891  pypy-5.0.1-linux-armhf-raring.tar.bz2
    +ecce668b3ec9d1a5d70e99ea4d0ce7491ca860e5  pypy-5.0.1-linux-armhf-raspbian.tar.bz2
    +b814bb1b70b39c1e601a15e8bb809f525d6ef04d  pypy-5.0.1-linux.tar.bz2
    +26f6bdada77adb2f79bce97513fdb58a91e6e967  pypy-5.0.1-linux64.tar.bz2
    +54eae1b3da6c29ba4bc5db35b89c23e6080a6d09  pypy-5.0.1-osx64.tar.bz2
    +e96dad1562c4a91b26612f0fad0e70d0635399ed  pypy-5.0.1-src.tar.bz2
    +f7e4cda496244eefc50323704c48c10b568937cf  pypy-5.0.1-src.zip
    +f0addc0cc809e3cc3ffe2c2dd643eb6e1c95cb49  pypy-5.0.1-win32.zip
     5620cead511ad33f9fface224544b70d72d9e4c9  pypy-5.0.0-ppc64.tar.bz2
     6ee6b0eb574f3d29a5eaf29fdae8745fd9fe3c38  pypy-5.0.0-ppc64le.tar.bz2
    -62ce000b887ea22f5bdddcc0f24dd571ca534f57  pypy-5.0.0-src.tar.bz2
    -6dcbde8242e0ee985ffed63c5bf204e7fd74ac2c  pypy-5.0.0-src.zip
    -62cef0e0dd8849c224c647e53b13d3c47c99807d  pypy-5.0.0-win32.zip
     
    -

    pypy-5.0.0 sha256:

    +

    pypy-5.0.1 sha256:

    -87bd85441b16ecca0d45ba6e9c0e9d26bb7bd8867afbf79d80312cf79b032dc1  pypy-5.0.0-linux-armel.tar.bz2
    -5bb52cf5db4ae8497c4e03cd8a70e49867e6b93d9f29ad335d030fcd3a375769  pypy-5.0.0-linux-armhf-raring.tar.bz2
    -8033c0cc39e9f6771688f2eda95c726595f5453b3e73e1cd5f7ebbe3dae1f685  pypy-5.0.0-linux-armhf-raspbian.tar.bz2
    -a9cc9afa94ff1cde811626a70081c477c9840e7816c0562d1903fd823d222ceb  pypy-5.0.0-linux.tar.bz2
    -b9c73be8e3c3b0835df83bdb86335712005240071cdd4dc245ac30b457063ae0  pypy-5.0.0-linux64.tar.bz2
    -45ed8bf799d0fd8eb051cbcc427173fba74dc9c2f6c309d7a3cc90f4917e6a10  pypy-5.0.0-osx64.tar.bz2
    +17d55804b2253acd9de42276d756d4a08b7d1d2da09ef81dd325e14b18a1bcda  pypy-5.0.1-linux-armel.tar.bz2
    +1e9146978cc7e7bd30683a518f304a824db7b9b1c6fae5e866eb703684ba3c98  pypy-5.0.1-linux-armhf-raring.tar.bz2
    +338d1c32c1326e6321b222ae357711b38c4a0ffddf020c2a35536b5f69376e28  pypy-5.0.1-linux-armhf-raspbian.tar.bz2
    +4b9a294033f917a1674c9ddcb2e7e8d32c4f4351f8216fd1fe23f6d2ad2b1a36  pypy-5.0.1-linux.tar.bz2
    +1b1363a48edd1c1b31ca5e995987eda3d460a3404f36c3bb2dd9f52c93eecff5  pypy-5.0.1-linux64.tar.bz2
    +6ebdb9d91203f053b38e3c21841c11a72f416dc185f7b3b7c908229df15e924a  pypy-5.0.1-osx64.tar.bz2
    +1573c9284d3ec236c8e6ef3b954753932dff29462c54b5885b761d1ee68b6e05  pypy-5.0.1-src.tar.bz2
    +6e343f24c5e4ea87879bc4fd299b65a2825796286319edc0b69b3681017c145f  pypy-5.0.1-src.zip
    +c12254d8b1747322736d26e014744a426c6900d232c1799140fbb43f44319730  pypy-5.0.1-win32.zip
     334a37e68cb543cf2cbcdd12379b9b770064bb70ba7fd104f1e451cfa10cdda5  pypy-5.0.0-ppc64.tar.bz2
     e72fe5c094186f79c997000ddbaa01616def652a8d1338b75a27dfa3755eb86c  pypy-5.0.0-ppc64le.tar.bz2
    -89027b1b33553b53ff7733dc4838f0a76af23552c0d915d9f6de5875b8d7d4ab  pypy-5.0.0-src.tar.bz2
    -03e19e9bafccf5b2f4dd422699f3fe42da754c3fcc1d1fd4c8d585d7c9d1849d  pypy-5.0.0-src.zip
    -c53f0946703f5e4885484c7cde2554a0320537135bf8965e054757c214412438  pypy-5.0.0-win32.zip
     

    pypy3-2.4.0 sha1:

    diff --git a/source/download.txt b/source/download.txt
    --- a/source/download.txt
    +++ b/source/download.txt
    @@ -415,19 +415,19 @@
     
     Here are the checksums for each of the downloads
     
    -pypy-5.0.0 md5::
    +pypy-5.0.1 md5::
     
    -    bcb5c830d6380ff78b759dbe075dfc14  pypy-5.0.0-linux-armel.tar.bz2
    -    5649117ba754bef14550b6abc2135eab  pypy-5.0.0-linux-armhf-raring.tar.bz2
    -    949344fff9c6942713f34e1a1fcbc7aa  pypy-5.0.0-linux-armhf-raspbian.tar.bz2
    -    5e005cf26a3a58552fd77f9aaae6f614  pypy-5.0.0-linux.tar.bz2
    -    133530cb9957a67807b25d23bb74ac24  pypy-5.0.0-linux64.tar.bz2
    -    a091398908bf525149a1fdea8bf48ec2  pypy-5.0.0-osx64.tar.bz2
    +    5544c118d270138125fec1ec5659ef80  pypy-5.0.1-linux-armel.tar.bz2
    +    34d6cf783cf585bbfff1b394d2db9a26  pypy-5.0.1-linux-armhf-raring.tar.bz2
    +    224546fb5999c4b08b2b1c51e40dc055  pypy-5.0.1-linux-armhf-raspbian.tar.bz2
    +    3f05067352f25e23eae618dca96062a2  pypy-5.0.1-linux.tar.bz2
    +    6a6b19f6c13b42f4ce9d0c0d892f597a  pypy-5.0.1-linux64.tar.bz2
    +    bec87524ebc3f11c9c9817f64311ef65  pypy-5.0.1-osx64.tar.bz2
    +    798c6e83536a5fa5ed7d6efb4d06db1a  pypy-5.0.1-src.tar.bz2
    +    928761075bcc2d01f9f884eeee105bd0  pypy-5.0.1-src.zip
    +    2e53db6766a718084c9327a6059f8ad7  pypy-5.0.1-win32.zip
         f243ff399a55f4370b6d1dc0a3650f1d  pypy-5.0.0-ppc64.tar.bz2
         51fb75ae0a143faa9a5b39f094965050  pypy-5.0.0-ppc64le.tar.bz2
    -    6a26f735cb45a10255076fdd6cebee84  pypy-5.0.0-src.tar.bz2
    -    1be14cf3ffc97da7521637f8f81abc3c  pypy-5.0.0-src.zip
    -    d2c8237e8106b535850596f0e9762246  pypy-5.0.0-win32.zip
     
     pypy3-2.4.0 md5::
     
    @@ -448,33 +448,33 @@
        009c970b5fa75754ae4c32a5d108a8d4  pypy-1.8-sandbox-linux.tar.bz2
     
     
    -pypy-5.0.0 sha1::
    +pypy-5.0.1 sha1::
         
    -    b7c82d437086660759ec18582dbdaf198b77e467  pypy-5.0.0-linux-armel.tar.bz2
    -    85978b1d33b0db8b512eebb1558200c3ab76d462  pypy-5.0.0-linux-armhf-raring.tar.bz2
    -    271472d0362ce02fd656024b64f0343cc8193f9d  pypy-5.0.0-linux-armhf-raspbian.tar.bz2
    -    88ac71eebd65c35032325497cc450b4d184be005  pypy-5.0.0-linux.tar.bz2
    -    22d32d92899a07cb8cbba4b8918a7919e34246c4  pypy-5.0.0-linux64.tar.bz2
    -    f652b264ba063a8c472b753baaaacf63690be6c5  pypy-5.0.0-osx64.tar.bz2
    +    d2df9030c670e178e2ee9b99934174184fe8aa1c  pypy-5.0.1-linux-armel.tar.bz2
    +    89534b3b09336165bf706a459f170ae3628da891  pypy-5.0.1-linux-armhf-raring.tar.bz2
    +    ecce668b3ec9d1a5d70e99ea4d0ce7491ca860e5  pypy-5.0.1-linux-armhf-raspbian.tar.bz2
    +    b814bb1b70b39c1e601a15e8bb809f525d6ef04d  pypy-5.0.1-linux.tar.bz2
    +    26f6bdada77adb2f79bce97513fdb58a91e6e967  pypy-5.0.1-linux64.tar.bz2
    +    54eae1b3da6c29ba4bc5db35b89c23e6080a6d09  pypy-5.0.1-osx64.tar.bz2
    +    e96dad1562c4a91b26612f0fad0e70d0635399ed  pypy-5.0.1-src.tar.bz2
    +    f7e4cda496244eefc50323704c48c10b568937cf  pypy-5.0.1-src.zip
    +    f0addc0cc809e3cc3ffe2c2dd643eb6e1c95cb49  pypy-5.0.1-win32.zip
         5620cead511ad33f9fface224544b70d72d9e4c9  pypy-5.0.0-ppc64.tar.bz2
         6ee6b0eb574f3d29a5eaf29fdae8745fd9fe3c38  pypy-5.0.0-ppc64le.tar.bz2
    -    62ce000b887ea22f5bdddcc0f24dd571ca534f57  pypy-5.0.0-src.tar.bz2
    -    6dcbde8242e0ee985ffed63c5bf204e7fd74ac2c  pypy-5.0.0-src.zip
    -    62cef0e0dd8849c224c647e53b13d3c47c99807d  pypy-5.0.0-win32.zip
     
    -pypy-5.0.0 sha256::
    +pypy-5.0.1 sha256::
     
    -    87bd85441b16ecca0d45ba6e9c0e9d26bb7bd8867afbf79d80312cf79b032dc1  pypy-5.0.0-linux-armel.tar.bz2
    -    5bb52cf5db4ae8497c4e03cd8a70e49867e6b93d9f29ad335d030fcd3a375769  pypy-5.0.0-linux-armhf-raring.tar.bz2
    -    8033c0cc39e9f6771688f2eda95c726595f5453b3e73e1cd5f7ebbe3dae1f685  pypy-5.0.0-linux-armhf-raspbian.tar.bz2
    -    a9cc9afa94ff1cde811626a70081c477c9840e7816c0562d1903fd823d222ceb  pypy-5.0.0-linux.tar.bz2
    -    b9c73be8e3c3b0835df83bdb86335712005240071cdd4dc245ac30b457063ae0  pypy-5.0.0-linux64.tar.bz2
    -    45ed8bf799d0fd8eb051cbcc427173fba74dc9c2f6c309d7a3cc90f4917e6a10  pypy-5.0.0-osx64.tar.bz2
    +    17d55804b2253acd9de42276d756d4a08b7d1d2da09ef81dd325e14b18a1bcda  pypy-5.0.1-linux-armel.tar.bz2
    +    1e9146978cc7e7bd30683a518f304a824db7b9b1c6fae5e866eb703684ba3c98  pypy-5.0.1-linux-armhf-raring.tar.bz2
    +    338d1c32c1326e6321b222ae357711b38c4a0ffddf020c2a35536b5f69376e28  pypy-5.0.1-linux-armhf-raspbian.tar.bz2
    +    4b9a294033f917a1674c9ddcb2e7e8d32c4f4351f8216fd1fe23f6d2ad2b1a36  pypy-5.0.1-linux.tar.bz2
    +    1b1363a48edd1c1b31ca5e995987eda3d460a3404f36c3bb2dd9f52c93eecff5  pypy-5.0.1-linux64.tar.bz2
    +    6ebdb9d91203f053b38e3c21841c11a72f416dc185f7b3b7c908229df15e924a  pypy-5.0.1-osx64.tar.bz2
    +    1573c9284d3ec236c8e6ef3b954753932dff29462c54b5885b761d1ee68b6e05  pypy-5.0.1-src.tar.bz2
    +    6e343f24c5e4ea87879bc4fd299b65a2825796286319edc0b69b3681017c145f  pypy-5.0.1-src.zip
    +    c12254d8b1747322736d26e014744a426c6900d232c1799140fbb43f44319730  pypy-5.0.1-win32.zip
         334a37e68cb543cf2cbcdd12379b9b770064bb70ba7fd104f1e451cfa10cdda5  pypy-5.0.0-ppc64.tar.bz2
         e72fe5c094186f79c997000ddbaa01616def652a8d1338b75a27dfa3755eb86c  pypy-5.0.0-ppc64le.tar.bz2
    -    89027b1b33553b53ff7733dc4838f0a76af23552c0d915d9f6de5875b8d7d4ab  pypy-5.0.0-src.tar.bz2
    -    03e19e9bafccf5b2f4dd422699f3fe42da754c3fcc1d1fd4c8d585d7c9d1849d  pypy-5.0.0-src.zip
    -    c53f0946703f5e4885484c7cde2554a0320537135bf8965e054757c214412438  pypy-5.0.0-win32.zip
     
     pypy3-2.4.0 sha1::
     
    
    From pypy.commits at gmail.com  Sat Mar 19 06:19:17 2016
    From: pypy.commits at gmail.com (arigo)
    Date: Sat, 19 Mar 2016 03:19:17 -0700 (PDT)
    Subject: [pypy-commit] pypy default: update
    Message-ID: <56ed27a5.e6ebc20a.bf07d.ffffcf7b@mx.google.com>
    
    Author: Armin Rigo 
    Branch: 
    Changeset: r83167:40cbac9041aa
    Date: 2016-03-19 11:18 +0100
    http://bitbucket.org/pypy/pypy/changeset/40cbac9041aa/
    
    Log:	update
    
    diff --git a/pypy/doc/release-5.0.1.rst b/pypy/doc/release-5.0.1.rst
    --- a/pypy/doc/release-5.0.1.rst
    +++ b/pypy/doc/release-5.0.1.rst
    @@ -10,9 +10,9 @@
     .. _`lxml 3.6.0`: https://pypi.python.org/pypi/lxml/3.6.0
     .. _`crash on large files`: https://bitbucket.org/pypy/pypy/issues/2260
     
    -The changes with PyPy 5.0 are only two bug fixes: one in cpyext, which
    -fixes notably (but not only) lxml, and another for a corner case of the
    -JIT.
    +The changes between PyPy 5.0 and 5.0.1 are only two bug fixes: one in
    +cpyext, which fixes notably (but not only) lxml; and another for a
    +corner case of the JIT.
     
     What is PyPy?
     =============
    
    From pypy.commits at gmail.com  Sat Mar 19 06:20:51 2016
    From: pypy.commits at gmail.com (arigo)
    Date: Sat, 19 Mar 2016 03:20:51 -0700 (PDT)
    Subject: [pypy-commit] pypy default: update, as used for 5.0.1
    Message-ID: <56ed2803.8fb81c0a.e11e0.71a6@mx.google.com>
    
    Author: Armin Rigo 
    Branch: 
    Changeset: r83168:ec5563e74874
    Date: 2016-03-19 11:20 +0100
    http://bitbucket.org/pypy/pypy/changeset/ec5563e74874/
    
    Log:	update, as used for 5.0.1
    
    diff --git a/pypy/tool/release/repackage.sh b/pypy/tool/release/repackage.sh
    --- a/pypy/tool/release/repackage.sh
    +++ b/pypy/tool/release/repackage.sh
    @@ -1,9 +1,9 @@
     # Edit these appropriately before running this script
     maj=5
     min=0
    -rev=0
    +rev=1
     branchname=release-$maj.x  # ==OR== release-$maj.$min.x
    -tagname=release-$maj.$min  # ==OR== release-$maj.$min.$rev
    +tagname=release-$maj.$min.$rev
     # This script will download latest builds from the buildmaster, rename the top
     # level directory, and repackage ready to be uploaded to bitbucket. It will also
     # download source, assuming a tag for the release already exists, and repackage them.
    
    From pypy.commits at gmail.com  Sat Mar 19 06:58:23 2016
    From: pypy.commits at gmail.com (cfbolz)
    Date: Sat, 19 Mar 2016 03:58:23 -0700 (PDT)
    Subject: [pypy-commit] pypy guard-compatible: less strict terminator getter,
     can be improved even more
    Message-ID: <56ed30cf.455e1c0a.57a4f.18cf@mx.google.com>
    
    Author: Carl Friedrich Bolz 
    Branch: guard-compatible
    Changeset: r83169:52d09afa52ce
    Date: 2016-03-19 11:57 +0100
    http://bitbucket.org/pypy/pypy/changeset/52d09afa52ce/
    
    Log:	less strict terminator getter, can be improved even more
    
    diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py
    --- a/pypy/objspace/std/mapdict.py
    +++ b/pypy/objspace/std/mapdict.py
    @@ -55,10 +55,20 @@
         def _get_terminator(self):
             return self.terminator
     
    +    @jit.elidable_compatible()
    +    def _get_terminator_if_devolved(self):
    +        if isinstance(self.terminator, DevolvedDictTerminator):
    +            return self.terminator
    +        return None
    +
         def read(self, obj, name, index):
             storageindex = self.find_map_storageindex(name, index)
             if storageindex == -1:
    -            return self._get_terminator()._read_terminator(obj, name, index)
    +            # XXX can improve the devolved case
    +            terminator = self._get_terminator_if_devolved()
    +            if terminator is not None:
    +                return terminator._read_terminator(obj, name, index)
    +            return None
             #if ( # XXX in the guard_compatible world the following isconstant may never be true?
             #    jit.isconstant(attr.storageindex) and
             #    jit.isconstant(obj) and
    
    From pypy.commits at gmail.com  Sat Mar 19 09:19:06 2016
    From: pypy.commits at gmail.com (alex_gaynor)
    Date: Sat, 19 Mar 2016 06:19:06 -0700 (PDT)
    Subject: [pypy-commit] pypy stdlib-2.7.11: merged in default
    Message-ID: <56ed51ca.838d1c0a.f191a.44d4@mx.google.com>
    
    Author: Alex Gaynor 
    Branch: stdlib-2.7.11
    Changeset: r83170:6f13fc854eb8
    Date: 2016-03-19 09:18 -0400
    http://bitbucket.org/pypy/pypy/changeset/6f13fc854eb8/
    
    Log:	merged in default
    
    diff too long, truncating to 2000 out of 42971 lines
    
    diff --git a/.hgignore b/.hgignore
    --- a/.hgignore
    +++ b/.hgignore
    @@ -22,6 +22,7 @@
     ^pypy/module/cpyext/test/.+\.obj$
     ^pypy/module/cpyext/test/.+\.manifest$
     ^pypy/module/test_lib_pypy/ctypes_tests/.+\.o$
    +^pypy/module/test_lib_pypy/ctypes_tests/_ctypes_test\.o$
     ^pypy/module/cppyy/src/.+\.o$
     ^pypy/module/cppyy/bench/.+\.so$
     ^pypy/module/cppyy/bench/.+\.root$
    @@ -35,7 +36,6 @@
     ^pypy/module/test_lib_pypy/cffi_tests/__pycache__.+$
     ^pypy/doc/.+\.html$
     ^pypy/doc/config/.+\.rst$
    -^pypy/doc/basicblock\.asc$
     ^pypy/doc/.+\.svninfo$
     ^rpython/translator/c/src/libffi_msvc/.+\.obj$
     ^rpython/translator/c/src/libffi_msvc/.+\.dll$
    @@ -45,53 +45,35 @@
     ^rpython/translator/c/src/cjkcodecs/.+\.obj$
     ^rpython/translator/c/src/stacklet/.+\.o$
     ^rpython/translator/c/src/.+\.o$
    -^rpython/translator/jvm/\.project$
    -^rpython/translator/jvm/\.classpath$
    -^rpython/translator/jvm/eclipse-bin$
    -^rpython/translator/jvm/src/pypy/.+\.class$
    -^rpython/translator/benchmark/docutils$
    -^rpython/translator/benchmark/templess$
    -^rpython/translator/benchmark/gadfly$
    -^rpython/translator/benchmark/mako$
    -^rpython/translator/benchmark/bench-custom\.benchmark_result$
    -^rpython/translator/benchmark/shootout_benchmarks$
    +^rpython/translator/llvm/.+\.so$
     ^rpython/translator/goal/target.+-c$
     ^rpython/translator/goal/.+\.exe$
     ^rpython/translator/goal/.+\.dll$
     ^pypy/goal/pypy-translation-snapshot$
     ^pypy/goal/pypy-c
    -^pypy/goal/pypy-jvm
    -^pypy/goal/pypy-jvm.jar
     ^pypy/goal/.+\.exe$
     ^pypy/goal/.+\.dll$
     ^pypy/goal/.+\.lib$
     ^pypy/_cache$
    -^pypy/doc/statistic/.+\.html$
    -^pypy/doc/statistic/.+\.eps$
    -^pypy/doc/statistic/.+\.pdf$
    -^rpython/translator/cli/src/pypylib\.dll$
    -^rpython/translator/cli/src/query\.exe$
    -^rpython/translator/cli/src/main\.exe$
    +^lib-python/2.7/lib2to3/.+\.pickle$
     ^lib_pypy/__pycache__$
     ^lib_pypy/ctypes_config_cache/_.+_cache\.py$
     ^lib_pypy/ctypes_config_cache/_.+_.+_\.py$
    -^rpython/translator/cli/query-descriptions$
    +^lib_pypy/_libmpdec/.+.o$
     ^pypy/doc/discussion/.+\.html$
     ^include/.+\.h$
     ^include/.+\.inl$
     ^pypy/doc/_build/.*$
     ^pypy/doc/config/.+\.html$
     ^pypy/doc/config/style\.css$
    -^pypy/doc/jit/.+\.html$
    -^pypy/doc/jit/style\.css$
     ^pypy/doc/image/lattice1\.png$
     ^pypy/doc/image/lattice2\.png$
     ^pypy/doc/image/lattice3\.png$
     ^pypy/doc/image/stackless_informal\.png$
     ^pypy/doc/image/parsing_example.+\.png$
     ^rpython/doc/_build/.*$
    -^pypy/module/test_lib_pypy/ctypes_tests/_ctypes_test\.o$
     ^compiled
     ^.git/
    +^.hypothesis/
     ^release/
     ^rpython/_cache$
    diff --git a/.hgtags b/.hgtags
    --- a/.hgtags
    +++ b/.hgtags
    @@ -18,3 +18,4 @@
     f3ad1e1e1d6215e20d34bb65ab85ff9188c9f559 release-2.6.1
     850edf14b2c75573720f59e95767335fb1affe55 release-4.0.0
     5f8302b8bf9f53056e40426f10c72151564e5b19 release-4.0.1
    +246c9cf22037b11dc0e8c29ce3f291d3b8c5935a release-5.0
    diff --git a/LICENSE b/LICENSE
    --- a/LICENSE
    +++ b/LICENSE
    @@ -28,7 +28,7 @@
         DEALINGS IN THE SOFTWARE.
     
     
    -PyPy Copyright holders 2003-2015
    +PyPy Copyright holders 2003-2016
     ----------------------------------- 
     
     Except when otherwise stated (look for LICENSE files or information at
    @@ -41,29 +41,29 @@
       Amaury Forgeot d'Arc
       Antonio Cuni
       Samuele Pedroni
    +  Matti Picus
       Alex Gaynor
       Brian Kearns
    -  Matti Picus
       Philip Jenvey
       Michael Hudson
    +  Ronan Lamy
       David Schneider
    +  Manuel Jacob
       Holger Krekel
       Christian Tismer
       Hakan Ardo
    -  Manuel Jacob
    -  Ronan Lamy
       Benjamin Peterson
    +  Richard Plangger
       Anders Chrigstrom
       Eric van Riet Paap
       Wim Lavrijsen
    -  Richard Plangger
       Richard Emslie
       Alexander Schremmer
       Dan Villiom Podlaski Christiansen
    +  Remi Meier
       Lukas Diekmann
       Sven Hager
       Anders Lehmann
    -  Remi Meier
       Aurelien Campeas
       Niklaus Haldimann
       Camillo Bruni
    @@ -72,8 +72,8 @@
       Romain Guillebert
       Leonardo Santagada
       Seo Sanghyeon
    +  Ronny Pfannschmidt
       Justin Peel
    -  Ronny Pfannschmidt
       David Edelsohn
       Anders Hammarquist
       Jakub Gustak
    @@ -95,6 +95,7 @@
       Tyler Wade
       Michael Foord
       Stephan Diehl
    +  Vincent Legoll
       Stefan Schwarzer
       Valentino Volonghi
       Tomek Meka
    @@ -105,9 +106,9 @@
       Jean-Paul Calderone
       Timo Paulssen
       Squeaky
    +  Marius Gedminas
       Alexandre Fayolle
       Simon Burton
    -  Marius Gedminas
       Martin Matusiak
       Konstantin Lopuhin
       Wenzhu Man
    @@ -116,16 +117,20 @@
       Ivan Sichmann Freitas
       Greg Price
       Dario Bertini
    +  Stefano Rivera
       Mark Pearse
       Simon Cross
       Andreas Stührk
    -  Stefano Rivera
    +  Edd Barrett
       Jean-Philippe St. Pierre
       Guido van Rossum
       Pavel Vinogradov
    +  Jeremy Thurgood
       Paweł Piotr Przeradowski
    +  Spenser Bauman
       Paul deGrandis
       Ilya Osadchiy
    +  marky1991
       Tobias Oberstein
       Adrian Kuhn
       Boris Feigin
    @@ -134,14 +139,12 @@
       Georg Brandl
       Bert Freudenberg
       Stian Andreassen
    -  Edd Barrett
    +  Tobias Pape
       Wanja Saatkamp
       Gerald Klix
       Mike Blume
    -  Tobias Pape
       Oscar Nierstrasz
       Stefan H. Muller
    -  Jeremy Thurgood
       Rami Chowdhury
       Eugene Oden
       Henry Mason
    @@ -153,6 +156,8 @@
       Lukas Renggli
       Guenter Jantzen
       Ned Batchelder
    +  Tim Felgentreff
    +  Anton Gulenko
       Amit Regmi
       Ben Young
       Nicolas Chauvat
    @@ -162,12 +167,12 @@
       Nicholas Riley
       Jason Chu
       Igor Trindade Oliveira
    -  Tim Felgentreff
    +  Yichao Yu
       Rocco Moretti
       Gintautas Miliauskas
       Michael Twomey
       Lucian Branescu Mihaila
    -  Yichao Yu
    +  Devin Jeanpierre
       Gabriel Lavoie
       Olivier Dormond
       Jared Grubb
    @@ -191,33 +196,33 @@
       Stanislaw Halik
       Mikael Schönenberg
       Berkin Ilbeyi
    -  Elmo M?ntynen
    +  Elmo Mäntynen
    +  Faye Zhao
       Jonathan David Riehl
       Anders Qvist
       Corbin Simpson
       Chirag Jadwani
       Beatrice During
       Alex Perry
    -  Vincent Legoll
    +  Vaibhav Sood
       Alan McIntyre
    -  Spenser Bauman
    +  William Leslie
       Alexander Sedov
       Attila Gobi
    +  Jasper.Schulz
       Christopher Pope
    -  Devin Jeanpierre
    -  Vaibhav Sood
       Christian Tismer 
       Marc Abramowitz
       Dan Stromberg
       Arjun Naik
       Valentina Mukhamedzhanova
       Stefano Parmesan
    +  Mark Young
       Alexis Daboville
       Jens-Uwe Mager
       Carl Meyer
       Karl Ramm
       Pieter Zieschang
    -  Anton Gulenko
       Gabriel
       Lukas Vacek
       Andrew Dalke
    @@ -225,6 +230,7 @@
       Jakub Stasiak
       Nathan Taylor
       Vladimir Kryachko
    +  Omer Katz
       Jacek Generowicz
       Alejandro J. Cura
       Jacob Oscarson
    @@ -234,11 +240,13 @@
       Kristjan Valur Jonsson
       David Lievens
       Neil Blakey-Milner
    +  Sergey Matyunin
       Lutz Paelike
       Lucio Torre
       Lars Wassermann
       Philipp Rustemeuer
       Henrik Vendelbo
    +  Richard Lancaster
       Dan Buch
       Miguel de Val Borro
       Artur Lisiecki
    @@ -250,20 +258,21 @@
       Tomo Cocoa
       Kim Jin Su
       Toni Mattis
    +  Amber Brown
       Lucas Stadler
       Julian Berman
       Markus Holtermann
       roberto at goyle
       Yury V. Zaytsev
       Anna Katrina Dominguez
    -  William Leslie
       Bobby Impollonia
    -  Faye Zhao
       timo at eistee.fritz.box
       Andrew Thompson
       Yusei Tahara
    +  Aaron Tubbs
       Ben Darnell
       Roberto De Ioris
    +  Logan Chien
       Juan Francisco Cantero Hurtado
       Ruochen Huang
       Jeong YunWon
    @@ -273,6 +282,7 @@
       Christopher Armstrong
       Michael Hudson-Doyle
       Anders Sigfridsson
    +  Nikolay Zinov
       Yasir Suhail
       Jason Michalski
       rafalgalczynski at gmail.com
    @@ -282,6 +292,7 @@
       Gustavo Niemeyer
       Stephan Busemann
       Rafał Gałczyński
    +  Matt Bogosian
       Christian Muirhead
       Berker Peksag
       James Lan
    @@ -316,9 +327,9 @@
       Stefan Marr
       jiaaro
       Mads Kiilerich
    -  Richard Lancaster
       opassembler.py
       Antony Lee
    +  Jason Madden
       Yaroslav Fedevych
       Jim Hunziker
       Markus Unterwaditzer
    @@ -327,6 +338,7 @@
       squeaky
       Zearin
       soareschen
    +  Jonas Pfannschmidt
       Kurt Griffiths
       Mike Bayer
       Matthew Miller
    diff --git a/Makefile b/Makefile
    --- a/Makefile
    +++ b/Makefile
    @@ -39,5 +39,5 @@
     # runs.  We cannot get their original value either:
     # http://lists.gnu.org/archive/html/help-make/2010-08/msg00106.html
     
    -cffi_imports:
    +cffi_imports: pypy-c
     	PYTHONPATH=. ./pypy-c pypy/tool/build_cffi_imports.py
    diff --git a/dotviewer/drawgraph.py b/dotviewer/drawgraph.py
    --- a/dotviewer/drawgraph.py
    +++ b/dotviewer/drawgraph.py
    @@ -14,12 +14,661 @@
     FONT = os.path.join(this_dir, 'font', 'DroidSans.ttf')
     FIXEDFONT = os.path.join(this_dir, 'font', 'DroidSansMono.ttf')
     COLOR = {
    -    'black': (0,0,0),
    -    'white': (255,255,255),
    -    'red': (255,0,0),
    -    'green': (0,255,0),
    -    'blue': (0,0,255),
    -    'yellow': (255,255,0),
    +    'aliceblue': (240, 248, 255),
    +    'antiquewhite': (250, 235, 215),
    +    'antiquewhite1': (255, 239, 219),
    +    'antiquewhite2': (238, 223, 204),
    +    'antiquewhite3': (205, 192, 176),
    +    'antiquewhite4': (139, 131, 120),
    +    'aquamarine': (127, 255, 212),
    +    'aquamarine1': (127, 255, 212),
    +    'aquamarine2': (118, 238, 198),
    +    'aquamarine3': (102, 205, 170),
    +    'aquamarine4': (69, 139, 116),
    +    'azure': (240, 255, 255),
    +    'azure1': (240, 255, 255),
    +    'azure2': (224, 238, 238),
    +    'azure3': (193, 205, 205),
    +    'azure4': (131, 139, 139),
    +    'beige': (245, 245, 220),
    +    'bisque': (255, 228, 196),
    +    'bisque1': (255, 228, 196),
    +    'bisque2': (238, 213, 183),
    +    'bisque3': (205, 183, 158),
    +    'bisque4': (139, 125, 107),
    +    'black': (0, 0, 0),
    +    'blanchedalmond': (255, 235, 205),
    +    'blue': (0, 0, 255),
    +    'blue1': (0, 0, 255),
    +    'blue2': (0, 0, 238),
    +    'blue3': (0, 0, 205),
    +    'blue4': (0, 0, 139),
    +    'blueviolet': (138, 43, 226),
    +    'brown': (165, 42, 42),
    +    'brown1': (255, 64, 64),
    +    'brown2': (238, 59, 59),
    +    'brown3': (205, 51, 51),
    +    'brown4': (139, 35, 35),
    +    'burlywood': (222, 184, 135),
    +    'burlywood1': (255, 211, 155),
    +    'burlywood2': (238, 197, 145),
    +    'burlywood3': (205, 170, 125),
    +    'burlywood4': (139, 115, 85),
    +    'cadetblue': (95, 158, 160),
    +    'cadetblue1': (152, 245, 255),
    +    'cadetblue2': (142, 229, 238),
    +    'cadetblue3': (122, 197, 205),
    +    'cadetblue4': (83, 134, 139),
    +    'chartreuse': (127, 255, 0),
    +    'chartreuse1': (127, 255, 0),
    +    'chartreuse2': (118, 238, 0),
    +    'chartreuse3': (102, 205, 0),
    +    'chartreuse4': (69, 139, 0),
    +    'chocolate': (210, 105, 30),
    +    'chocolate1': (255, 127, 36),
    +    'chocolate2': (238, 118, 33),
    +    'chocolate3': (205, 102, 29),
    +    'chocolate4': (139, 69, 19),
    +    'coral': (255, 127, 80),
    +    'coral1': (255, 114, 86),
    +    'coral2': (238, 106, 80),
    +    'coral3': (205, 91, 69),
    +    'coral4': (139, 62, 47),
    +    'cornflowerblue': (100, 149, 237),
    +    'cornsilk': (255, 248, 220),
    +    'cornsilk1': (255, 248, 220),
    +    'cornsilk2': (238, 232, 205),
    +    'cornsilk3': (205, 200, 177),
    +    'cornsilk4': (139, 136, 120),
    +    'crimson': (220, 20, 60),
    +    'cyan': (0, 255, 255),
    +    'cyan1': (0, 255, 255),
    +    'cyan2': (0, 238, 238),
    +    'cyan3': (0, 205, 205),
    +    'cyan4': (0, 139, 139),
    +    'darkgoldenrod': (184, 134, 11),
    +    'darkgoldenrod1': (255, 185, 15),
    +    'darkgoldenrod2': (238, 173, 14),
    +    'darkgoldenrod3': (205, 149, 12),
    +    'darkgoldenrod4': (139, 101, 8),
    +    'darkgreen': (0, 100, 0),
    +    'darkkhaki': (189, 183, 107),
    +    'darkolivegreen': (85, 107, 47),
    +    'darkolivegreen1': (202, 255, 112),
    +    'darkolivegreen2': (188, 238, 104),
    +    'darkolivegreen3': (162, 205, 90),
    +    'darkolivegreen4': (110, 139, 61),
    +    'darkorange': (255, 140, 0),
    +    'darkorange1': (255, 127, 0),
    +    'darkorange2': (238, 118, 0),
    +    'darkorange3': (205, 102, 0),
    +    'darkorange4': (139, 69, 0),
    +    'darkorchid': (153, 50, 204),
    +    'darkorchid1': (191, 62, 255),
    +    'darkorchid2': (178, 58, 238),
    +    'darkorchid3': (154, 50, 205),
    +    'darkorchid4': (104, 34, 139),
    +    'darksalmon': (233, 150, 122),
    +    'darkseagreen': (143, 188, 143),
    +    'darkseagreen1': (193, 255, 193),
    +    'darkseagreen2': (180, 238, 180),
    +    'darkseagreen3': (155, 205, 155),
    +    'darkseagreen4': (105, 139, 105),
    +    'darkslateblue': (72, 61, 139),
    +    'darkslategray': (47, 79, 79),
    +    'darkslategray1': (151, 255, 255),
    +    'darkslategray2': (141, 238, 238),
    +    'darkslategray3': (121, 205, 205),
    +    'darkslategray4': (82, 139, 139),
    +    'darkslategrey': (47, 79, 79),
    +    'darkturquoise': (0, 206, 209),
    +    'darkviolet': (148, 0, 211),
    +    'deeppink': (255, 20, 147),
    +    'deeppink1': (255, 20, 147),
    +    'deeppink2': (238, 18, 137),
    +    'deeppink3': (205, 16, 118),
    +    'deeppink4': (139, 10, 80),
    +    'deepskyblue': (0, 191, 255),
    +    'deepskyblue1': (0, 191, 255),
    +    'deepskyblue2': (0, 178, 238),
    +    'deepskyblue3': (0, 154, 205),
    +    'deepskyblue4': (0, 104, 139),
    +    'dimgray': (105, 105, 105),
    +    'dimgrey': (105, 105, 105),
    +    'dodgerblue': (30, 144, 255),
    +    'dodgerblue1': (30, 144, 255),
    +    'dodgerblue2': (28, 134, 238),
    +    'dodgerblue3': (24, 116, 205),
    +    'dodgerblue4': (16, 78, 139),
    +    'firebrick': (178, 34, 34),
    +    'firebrick1': (255, 48, 48),
    +    'firebrick2': (238, 44, 44),
    +    'firebrick3': (205, 38, 38),
    +    'firebrick4': (139, 26, 26),
    +    'floralwhite': (255, 250, 240),
    +    'forestgreen': (34, 139, 34),
    +    'gainsboro': (220, 220, 220),
    +    'ghostwhite': (248, 248, 255),
    +    'gold': (255, 215, 0),
    +    'gold1': (255, 215, 0),
    +    'gold2': (238, 201, 0),
    +    'gold3': (205, 173, 0),
    +    'gold4': (139, 117, 0),
    +    'goldenrod': (218, 165, 32),
    +    'goldenrod1': (255, 193, 37),
    +    'goldenrod2': (238, 180, 34),
    +    'goldenrod3': (205, 155, 29),
    +    'goldenrod4': (139, 105, 20),
    +    'gray': (192, 192, 192),
    +    'gray0': (0, 0, 0),
    +    'gray1': (3, 3, 3),
    +    'gray10': (26, 26, 26),
    +    'gray100': (255, 255, 255),
    +    'gray11': (28, 28, 28),
    +    'gray12': (31, 31, 31),
    +    'gray13': (33, 33, 33),
    +    'gray14': (36, 36, 36),
    +    'gray15': (38, 38, 38),
    +    'gray16': (41, 41, 41),
    +    'gray17': (43, 43, 43),
    +    'gray18': (46, 46, 46),
    +    'gray19': (48, 48, 48),
    +    'gray2': (5, 5, 5),
    +    'gray20': (51, 51, 51),
    +    'gray21': (54, 54, 54),
    +    'gray22': (56, 56, 56),
    +    'gray23': (59, 59, 59),
    +    'gray24': (61, 61, 61),
    +    'gray25': (64, 64, 64),
    +    'gray26': (66, 66, 66),
    +    'gray27': (69, 69, 69),
    +    'gray28': (71, 71, 71),
    +    'gray29': (74, 74, 74),
    +    'gray3': (8, 8, 8),
    +    'gray30': (77, 77, 77),
    +    'gray31': (79, 79, 79),
    +    'gray32': (82, 82, 82),
    +    'gray33': (84, 84, 84),
    +    'gray34': (87, 87, 87),
    +    'gray35': (89, 89, 89),
    +    'gray36': (92, 92, 92),
    +    'gray37': (94, 94, 94),
    +    'gray38': (97, 97, 97),
    +    'gray39': (99, 99, 99),
    +    'gray4': (10, 10, 10),
    +    'gray40': (102, 102, 102),
    +    'gray41': (105, 105, 105),
    +    'gray42': (107, 107, 107),
    +    'gray43': (110, 110, 110),
    +    'gray44': (112, 112, 112),
    +    'gray45': (115, 115, 115),
    +    'gray46': (117, 117, 117),
    +    'gray47': (120, 120, 120),
    +    'gray48': (122, 122, 122),
    +    'gray49': (125, 125, 125),
    +    'gray5': (13, 13, 13),
    +    'gray50': (127, 127, 127),
    +    'gray51': (130, 130, 130),
    +    'gray52': (133, 133, 133),
    +    'gray53': (135, 135, 135),
    +    'gray54': (138, 138, 138),
    +    'gray55': (140, 140, 140),
    +    'gray56': (143, 143, 143),
    +    'gray57': (145, 145, 145),
    +    'gray58': (148, 148, 148),
    +    'gray59': (150, 150, 150),
    +    'gray6': (15, 15, 15),
    +    'gray60': (153, 153, 153),
    +    'gray61': (156, 156, 156),
    +    'gray62': (158, 158, 158),
    +    'gray63': (161, 161, 161),
    +    'gray64': (163, 163, 163),
    +    'gray65': (166, 166, 166),
    +    'gray66': (168, 168, 168),
    +    'gray67': (171, 171, 171),
    +    'gray68': (173, 173, 173),
    +    'gray69': (176, 176, 176),
    +    'gray7': (18, 18, 18),
    +    'gray70': (179, 179, 179),
    +    'gray71': (181, 181, 181),
    +    'gray72': (184, 184, 184),
    +    'gray73': (186, 186, 186),
    +    'gray74': (189, 189, 189),
    +    'gray75': (191, 191, 191),
    +    'gray76': (194, 194, 194),
    +    'gray77': (196, 196, 196),
    +    'gray78': (199, 199, 199),
    +    'gray79': (201, 201, 201),
    +    'gray8': (20, 20, 20),
    +    'gray80': (204, 204, 204),
    +    'gray81': (207, 207, 207),
    +    'gray82': (209, 209, 209),
    +    'gray83': (212, 212, 212),
    +    'gray84': (214, 214, 214),
    +    'gray85': (217, 217, 217),
    +    'gray86': (219, 219, 219),
    +    'gray87': (222, 222, 222),
    +    'gray88': (224, 224, 224),
    +    'gray89': (227, 227, 227),
    +    'gray9': (23, 23, 23),
    +    'gray90': (229, 229, 229),
    +    'gray91': (232, 232, 232),
    +    'gray92': (235, 235, 235),
    +    'gray93': (237, 237, 237),
    +    'gray94': (240, 240, 240),
    +    'gray95': (242, 242, 242),
    +    'gray96': (245, 245, 245),
    +    'gray97': (247, 247, 247),
    +    'gray98': (250, 250, 250),
    +    'gray99': (252, 252, 252),
    +    'green': (0, 255, 0),
    +    'green1': (0, 255, 0),
    +    'green2': (0, 238, 0),
    +    'green3': (0, 205, 0),
    +    'green4': (0, 139, 0),
    +    'greenyellow': (173, 255, 47),
    +    'grey': (192, 192, 192),
    +    'grey0': (0, 0, 0),
    +    'grey1': (3, 3, 3),
    +    'grey10': (26, 26, 26),
    +    'grey100': (255, 255, 255),
    +    'grey11': (28, 28, 28),
    +    'grey12': (31, 31, 31),
    +    'grey13': (33, 33, 33),
    +    'grey14': (36, 36, 36),
    +    'grey15': (38, 38, 38),
    +    'grey16': (41, 41, 41),
    +    'grey17': (43, 43, 43),
    +    'grey18': (46, 46, 46),
    +    'grey19': (48, 48, 48),
    +    'grey2': (5, 5, 5),
    +    'grey20': (51, 51, 51),
    +    'grey21': (54, 54, 54),
    +    'grey22': (56, 56, 56),
    +    'grey23': (59, 59, 59),
    +    'grey24': (61, 61, 61),
    +    'grey25': (64, 64, 64),
    +    'grey26': (66, 66, 66),
    +    'grey27': (69, 69, 69),
    +    'grey28': (71, 71, 71),
    +    'grey29': (74, 74, 74),
    +    'grey3': (8, 8, 8),
    +    'grey30': (77, 77, 77),
    +    'grey31': (79, 79, 79),
    +    'grey32': (82, 82, 82),
    +    'grey33': (84, 84, 84),
    +    'grey34': (87, 87, 87),
    +    'grey35': (89, 89, 89),
    +    'grey36': (92, 92, 92),
    +    'grey37': (94, 94, 94),
    +    'grey38': (97, 97, 97),
    +    'grey39': (99, 99, 99),
    +    'grey4': (10, 10, 10),
    +    'grey40': (102, 102, 102),
    +    'grey41': (105, 105, 105),
    +    'grey42': (107, 107, 107),
    +    'grey43': (110, 110, 110),
    +    'grey44': (112, 112, 112),
    +    'grey45': (115, 115, 115),
    +    'grey46': (117, 117, 117),
    +    'grey47': (120, 120, 120),
    +    'grey48': (122, 122, 122),
    +    'grey49': (125, 125, 125),
    +    'grey5': (13, 13, 13),
    +    'grey50': (127, 127, 127),
    +    'grey51': (130, 130, 130),
    +    'grey52': (133, 133, 133),
    +    'grey53': (135, 135, 135),
    +    'grey54': (138, 138, 138),
    +    'grey55': (140, 140, 140),
    +    'grey56': (143, 143, 143),
    +    'grey57': (145, 145, 145),
    +    'grey58': (148, 148, 148),
    +    'grey59': (150, 150, 150),
    +    'grey6': (15, 15, 15),
    +    'grey60': (153, 153, 153),
    +    'grey61': (156, 156, 156),
    +    'grey62': (158, 158, 158),
    +    'grey63': (161, 161, 161),
    +    'grey64': (163, 163, 163),
    +    'grey65': (166, 166, 166),
    +    'grey66': (168, 168, 168),
    +    'grey67': (171, 171, 171),
    +    'grey68': (173, 173, 173),
    +    'grey69': (176, 176, 176),
    +    'grey7': (18, 18, 18),
    +    'grey70': (179, 179, 179),
    +    'grey71': (181, 181, 181),
    +    'grey72': (184, 184, 184),
    +    'grey73': (186, 186, 186),
    +    'grey74': (189, 189, 189),
    +    'grey75': (191, 191, 191),
    +    'grey76': (194, 194, 194),
    +    'grey77': (196, 196, 196),
    +    'grey78': (199, 199, 199),
    +    'grey79': (201, 201, 201),
    +    'grey8': (20, 20, 20),
    +    'grey80': (204, 204, 204),
    +    'grey81': (207, 207, 207),
    +    'grey82': (209, 209, 209),
    +    'grey83': (212, 212, 212),
    +    'grey84': (214, 214, 214),
    +    'grey85': (217, 217, 217),
    +    'grey86': (219, 219, 219),
    +    'grey87': (222, 222, 222),
    +    'grey88': (224, 224, 224),
    +    'grey89': (227, 227, 227),
    +    'grey9': (23, 23, 23),
    +    'grey90': (229, 229, 229),
    +    'grey91': (232, 232, 232),
    +    'grey92': (235, 235, 235),
    +    'grey93': (237, 237, 237),
    +    'grey94': (240, 240, 240),
    +    'grey95': (242, 242, 242),
    +    'grey96': (245, 245, 245),
    +    'grey97': (247, 247, 247),
    +    'grey98': (250, 250, 250),
    +    'grey99': (252, 252, 252),
    +    'honeydew': (240, 255, 240),
    +    'honeydew1': (240, 255, 240),
    +    'honeydew2': (224, 238, 224),
    +    'honeydew3': (193, 205, 193),
    +    'honeydew4': (131, 139, 131),
    +    'hotpink': (255, 105, 180),
    +    'hotpink1': (255, 110, 180),
    +    'hotpink2': (238, 106, 167),
    +    'hotpink3': (205, 96, 144),
    +    'hotpink4': (139, 58, 98),
    +    'indianred': (205, 92, 92),
    +    'indianred1': (255, 106, 106),
    +    'indianred2': (238, 99, 99),
    +    'indianred3': (205, 85, 85),
    +    'indianred4': (139, 58, 58),
    +    'indigo': (75, 0, 130),
    +    'invis': (255, 255, 254),
    +    'ivory': (255, 255, 240),
    +    'ivory1': (255, 255, 240),
    +    'ivory2': (238, 238, 224),
    +    'ivory3': (205, 205, 193),
    +    'ivory4': (139, 139, 131),
    +    'khaki': (240, 230, 140),
    +    'khaki1': (255, 246, 143),
    +    'khaki2': (238, 230, 133),
    +    'khaki3': (205, 198, 115),
    +    'khaki4': (139, 134, 78),
    +    'lavender': (230, 230, 250),
    +    'lavenderblush': (255, 240, 245),
    +    'lavenderblush1': (255, 240, 245),
    +    'lavenderblush2': (238, 224, 229),
    +    'lavenderblush3': (205, 193, 197),
    +    'lavenderblush4': (139, 131, 134),
    +    'lawngreen': (124, 252, 0),
    +    'lemonchiffon': (255, 250, 205),
    +    'lemonchiffon1': (255, 250, 205),
    +    'lemonchiffon2': (238, 233, 191),
    +    'lemonchiffon3': (205, 201, 165),
    +    'lemonchiffon4': (139, 137, 112),
    +    'lightblue': (173, 216, 230),
    +    'lightblue1': (191, 239, 255),
    +    'lightblue2': (178, 223, 238),
    +    'lightblue3': (154, 192, 205),
    +    'lightblue4': (104, 131, 139),
    +    'lightcoral': (240, 128, 128),
    +    'lightcyan': (224, 255, 255),
    +    'lightcyan1': (224, 255, 255),
    +    'lightcyan2': (209, 238, 238),
    +    'lightcyan3': (180, 205, 205),
    +    'lightcyan4': (122, 139, 139),
    +    'lightgoldenrod': (238, 221, 130),
    +    'lightgoldenrod1': (255, 236, 139),
    +    'lightgoldenrod2': (238, 220, 130),
    +    'lightgoldenrod3': (205, 190, 112),
    +    'lightgoldenrod4': (139, 129, 76),
    +    'lightgoldenrodyellow': (250, 250, 210),
    +    'lightgray': (211, 211, 211),
    +    'lightgrey': (211, 211, 211),
    +    'lightpink': (255, 182, 193),
    +    'lightpink1': (255, 174, 185),
    +    'lightpink2': (238, 162, 173),
    +    'lightpink3': (205, 140, 149),
    +    'lightpink4': (139, 95, 101),
    +    'lightsalmon': (255, 160, 122),
    +    'lightsalmon1': (255, 160, 122),
    +    'lightsalmon2': (238, 149, 114),
    +    'lightsalmon3': (205, 129, 98),
    +    'lightsalmon4': (139, 87, 66),
    +    'lightseagreen': (32, 178, 170),
    +    'lightskyblue': (135, 206, 250),
    +    'lightskyblue1': (176, 226, 255),
    +    'lightskyblue2': (164, 211, 238),
    +    'lightskyblue3': (141, 182, 205),
    +    'lightskyblue4': (96, 123, 139),
    +    'lightslateblue': (132, 112, 255),
    +    'lightslategray': (119, 136, 153),
    +    'lightslategrey': (119, 136, 153),
    +    'lightsteelblue': (176, 196, 222),
    +    'lightsteelblue1': (202, 225, 255),
    +    'lightsteelblue2': (188, 210, 238),
    +    'lightsteelblue3': (162, 181, 205),
    +    'lightsteelblue4': (110, 123, 139),
    +    'lightyellow': (255, 255, 224),
    +    'lightyellow1': (255, 255, 224),
    +    'lightyellow2': (238, 238, 209),
    +    'lightyellow3': (205, 205, 180),
    +    'lightyellow4': (139, 139, 122),
    +    'limegreen': (50, 205, 50),
    +    'linen': (250, 240, 230),
    +    'magenta': (255, 0, 255),
    +    'magenta1': (255, 0, 255),
    +    'magenta2': (238, 0, 238),
    +    'magenta3': (205, 0, 205),
    +    'magenta4': (139, 0, 139),
    +    'maroon': (176, 48, 96),
    +    'maroon1': (255, 52, 179),
    +    'maroon2': (238, 48, 167),
    +    'maroon3': (205, 41, 144),
    +    'maroon4': (139, 28, 98),
    +    'mediumaquamarine': (102, 205, 170),
    +    'mediumblue': (0, 0, 205),
    +    'mediumorchid': (186, 85, 211),
    +    'mediumorchid1': (224, 102, 255),
    +    'mediumorchid2': (209, 95, 238),
    +    'mediumorchid3': (180, 82, 205),
    +    'mediumorchid4': (122, 55, 139),
    +    'mediumpurple': (147, 112, 219),
    +    'mediumpurple1': (171, 130, 255),
    +    'mediumpurple2': (159, 121, 238),
    +    'mediumpurple3': (137, 104, 205),
    +    'mediumpurple4': (93, 71, 139),
    +    'mediumseagreen': (60, 179, 113),
    +    'mediumslateblue': (123, 104, 238),
    +    'mediumspringgreen': (0, 250, 154),
    +    'mediumturquoise': (72, 209, 204),
    +    'mediumvioletred': (199, 21, 133),
    +    'midnightblue': (25, 25, 112),
    +    'mintcream': (245, 255, 250),
    +    'mistyrose': (255, 228, 225),
    +    'mistyrose1': (255, 228, 225),
    +    'mistyrose2': (238, 213, 210),
    +    'mistyrose3': (205, 183, 181),
    +    'mistyrose4': (139, 125, 123),
    +    'moccasin': (255, 228, 181),
    +    'navajowhite': (255, 222, 173),
    +    'navajowhite1': (255, 222, 173),
    +    'navajowhite2': (238, 207, 161),
    +    'navajowhite3': (205, 179, 139),
    +    'navajowhite4': (139, 121, 94),
    +    'navy': (0, 0, 128),
    +    'navyblue': (0, 0, 128),
    +    'none': (255, 255, 254),
    +    'oldlace': (253, 245, 230),
    +    'olivedrab': (107, 142, 35),
    +    'olivedrab1': (192, 255, 62),
    +    'olivedrab2': (179, 238, 58),
    +    'olivedrab3': (154, 205, 50),
    +    'olivedrab4': (105, 139, 34),
    +    'orange': (255, 165, 0),
    +    'orange1': (255, 165, 0),
    +    'orange2': (238, 154, 0),
    +    'orange3': (205, 133, 0),
    +    'orange4': (139, 90, 0),
    +    'orangered': (255, 69, 0),
    +    'orangered1': (255, 69, 0),
    +    'orangered2': (238, 64, 0),
    +    'orangered3': (205, 55, 0),
    +    'orangered4': (139, 37, 0),
    +    'orchid': (218, 112, 214),
    +    'orchid1': (255, 131, 250),
    +    'orchid2': (238, 122, 233),
    +    'orchid3': (205, 105, 201),
    +    'orchid4': (139, 71, 137),
    +    'palegoldenrod': (238, 232, 170),
    +    'palegreen': (152, 251, 152),
    +    'palegreen1': (154, 255, 154),
    +    'palegreen2': (144, 238, 144),
    +    'palegreen3': (124, 205, 124),
    +    'palegreen4': (84, 139, 84),
    +    'paleturquoise': (175, 238, 238),
    +    'paleturquoise1': (187, 255, 255),
    +    'paleturquoise2': (174, 238, 238),
    +    'paleturquoise3': (150, 205, 205),
    +    'paleturquoise4': (102, 139, 139),
    +    'palevioletred': (219, 112, 147),
    +    'palevioletred1': (255, 130, 171),
    +    'palevioletred2': (238, 121, 159),
    +    'palevioletred3': (205, 104, 137),
    +    'palevioletred4': (139, 71, 93),
    +    'papayawhip': (255, 239, 213),
    +    'peachpuff': (255, 218, 185),
    +    'peachpuff1': (255, 218, 185),
    +    'peachpuff2': (238, 203, 173),
    +    'peachpuff3': (205, 175, 149),
    +    'peachpuff4': (139, 119, 101),
    +    'peru': (205, 133, 63),
    +    'pink': (255, 192, 203),
    +    'pink1': (255, 181, 197),
    +    'pink2': (238, 169, 184),
    +    'pink3': (205, 145, 158),
    +    'pink4': (139, 99, 108),
    +    'plum': (221, 160, 221),
    +    'plum1': (255, 187, 255),
    +    'plum2': (238, 174, 238),
    +    'plum3': (205, 150, 205),
    +    'plum4': (139, 102, 139),
    +    'powderblue': (176, 224, 230),
    +    'purple': (160, 32, 240),
    +    'purple1': (155, 48, 255),
    +    'purple2': (145, 44, 238),
    +    'purple3': (125, 38, 205),
    +    'purple4': (85, 26, 139),
    +    'red': (255, 0, 0),
    +    'red1': (255, 0, 0),
    +    'red2': (238, 0, 0),
    +    'red3': (205, 0, 0),
    +    'red4': (139, 0, 0),
    +    'rosybrown': (188, 143, 143),
    +    'rosybrown1': (255, 193, 193),
    +    'rosybrown2': (238, 180, 180),
    +    'rosybrown3': (205, 155, 155),
    +    'rosybrown4': (139, 105, 105),
    +    'royalblue': (65, 105, 225),
    +    'royalblue1': (72, 118, 255),
    +    'royalblue2': (67, 110, 238),
    +    'royalblue3': (58, 95, 205),
    +    'royalblue4': (39, 64, 139),
    +    'saddlebrown': (139, 69, 19),
    +    'salmon': (250, 128, 114),
    +    'salmon1': (255, 140, 105),
    +    'salmon2': (238, 130, 98),
    +    'salmon3': (205, 112, 84),
    +    'salmon4': (139, 76, 57),
    +    'sandybrown': (244, 164, 96),
    +    'seagreen': (46, 139, 87),
    +    'seagreen1': (84, 255, 159),
    +    'seagreen2': (78, 238, 148),
    +    'seagreen3': (67, 205, 128),
    +    'seagreen4': (46, 139, 87),
    +    'seashell': (255, 245, 238),
    +    'seashell1': (255, 245, 238),
    +    'seashell2': (238, 229, 222),
    +    'seashell3': (205, 197, 191),
    +    'seashell4': (139, 134, 130),
    +    'sienna': (160, 82, 45),
    +    'sienna1': (255, 130, 71),
    +    'sienna2': (238, 121, 66),
    +    'sienna3': (205, 104, 57),
    +    'sienna4': (139, 71, 38),
    +    'skyblue': (135, 206, 235),
    +    'skyblue1': (135, 206, 255),
    +    'skyblue2': (126, 192, 238),
    +    'skyblue3': (108, 166, 205),
    +    'skyblue4': (74, 112, 139),
    +    'slateblue': (106, 90, 205),
    +    'slateblue1': (131, 111, 255),
    +    'slateblue2': (122, 103, 238),
    +    'slateblue3': (105, 89, 205),
    +    'slateblue4': (71, 60, 139),
    +    'slategray': (112, 128, 144),
    +    'slategray1': (198, 226, 255),
    +    'slategray2': (185, 211, 238),
    +    'slategray3': (159, 182, 205),
    +    'slategray4': (108, 123, 139),
    +    'slategrey': (112, 128, 144),
    +    'snow': (255, 250, 250),
    +    'snow1': (255, 250, 250),
    +    'snow2': (238, 233, 233),
    +    'snow3': (205, 201, 201),
    +    'snow4': (139, 137, 137),
    +    'springgreen': (0, 255, 127),
    +    'springgreen1': (0, 255, 127),
    +    'springgreen2': (0, 238, 118),
    +    'springgreen3': (0, 205, 102),
    +    'springgreen4': (0, 139, 69),
    +    'steelblue': (70, 130, 180),
    +    'steelblue1': (99, 184, 255),
    +    'steelblue2': (92, 172, 238),
    +    'steelblue3': (79, 148, 205),
    +    'steelblue4': (54, 100, 139),
    +    'tan': (210, 180, 140),
    +    'tan1': (255, 165, 79),
    +    'tan2': (238, 154, 73),
    +    'tan3': (205, 133, 63),
    +    'tan4': (139, 90, 43),
    +    'thistle': (216, 191, 216),
    +    'thistle1': (255, 225, 255),
    +    'thistle2': (238, 210, 238),
    +    'thistle3': (205, 181, 205),
    +    'thistle4': (139, 123, 139),
    +    'tomato': (255, 99, 71),
    +    'tomato1': (255, 99, 71),
    +    'tomato2': (238, 92, 66),
    +    'tomato3': (205, 79, 57),
    +    'tomato4': (139, 54, 38),
    +    'transparent': (255, 255, 254),
    +    'turquoise': (64, 224, 208),
    +    'turquoise1': (0, 245, 255),
    +    'turquoise2': (0, 229, 238),
    +    'turquoise3': (0, 197, 205),
    +    'turquoise4': (0, 134, 139),
    +    'violet': (238, 130, 238),
    +    'violetred': (208, 32, 144),
    +    'violetred1': (255, 62, 150),
    +    'violetred2': (238, 58, 140),
    +    'violetred3': (205, 50, 120),
    +    'violetred4': (139, 34, 82),
    +    'wheat': (245, 222, 179),
    +    'wheat1': (255, 231, 186),
    +    'wheat2': (238, 216, 174),
    +    'wheat3': (205, 186, 150),
    +    'wheat4': (139, 126, 102),
    +    'white': (255, 255, 255),
    +    'whitesmoke': (245, 245, 245),
    +    'yellow': (255, 255, 0),
    +    'yellow1': (255, 255, 0),
    +    'yellow2': (238, 238, 0),
    +    'yellow3': (205, 205, 0),
    +    'yellow4': (139, 139, 0),
    +    'yellowgreen': (154, 205, 50),
         }
     re_nonword=re.compile(r'([^0-9a-zA-Z_.]+)')
     re_linewidth=re.compile(r'setlinewidth\((\d+(\.\d*)?|\.\d+)\)')
    diff --git a/lib-python/2.7/distutils/command/build_ext.py b/lib-python/2.7/distutils/command/build_ext.py
    --- a/lib-python/2.7/distutils/command/build_ext.py
    +++ b/lib-python/2.7/distutils/command/build_ext.py
    @@ -188,7 +188,7 @@
                 # the 'libs' directory is for binary installs - we assume that
                 # must be the *native* platform.  But we don't really support
                 # cross-compiling via a binary install anyway, so we let it go.
    -            self.library_dirs.append(os.path.join(sys.exec_prefix, 'include'))
    +            self.library_dirs.append(os.path.join(sys.exec_prefix, 'libs'))
                 if self.debug:
                     self.build_temp = os.path.join(self.build_temp, "Debug")
                 else:
    @@ -687,13 +687,17 @@
             # the previous version of this code did.  This should work for
             # CPython too.  The point is that on PyPy with cpyext, the
             # config var 'SO' is just ".so" but we want to return
    -        # ".pypy-VERSION.so" instead.
    -        so_ext = _get_c_extension_suffix()
    +        # ".pypy-VERSION.so" instead.  Note a further tweak for cffi's
    +        # embedding mode: if EXT_SUFFIX is also defined, use that
    +        # directly.
    +        so_ext = get_config_var('EXT_SUFFIX')
             if so_ext is None:
    -            so_ext = get_config_var('SO')     # fall-back
    -        # extensions in debug_mode are named 'module_d.pyd' under windows
    -        if os.name == 'nt' and self.debug:
    -            so_ext = '_d.pyd'
    +            so_ext = _get_c_extension_suffix()
    +            if so_ext is None:
    +                so_ext = get_config_var('SO')     # fall-back
    +            # extensions in debug_mode are named 'module_d.pyd' under windows
    +            if os.name == 'nt' and self.debug:
    +                so_ext = '_d.pyd'
             return os.path.join(*ext_path) + so_ext
     
         def get_export_symbols (self, ext):
    diff --git a/lib-python/2.7/pickle.py b/lib-python/2.7/pickle.py
    --- a/lib-python/2.7/pickle.py
    +++ b/lib-python/2.7/pickle.py
    @@ -1382,6 +1382,7 @@
     
     def decode_long(data):
         r"""Decode a long from a two's complement little-endian binary string.
    +    This is overriden on PyPy by a RPython version that has linear complexity.
     
         >>> decode_long('')
         0L
    @@ -1408,6 +1409,11 @@
             n -= 1L << (nbytes * 8)
         return n
     
    +try:
    +    from __pypy__ import decode_long
    +except ImportError:
    +    pass
    +
     # Shorthands
     
     try:
    diff --git a/lib-python/2.7/sysconfig.py b/lib-python/2.7/sysconfig.py
    --- a/lib-python/2.7/sysconfig.py
    +++ b/lib-python/2.7/sysconfig.py
    @@ -524,6 +524,13 @@
                 import _osx_support
                 _osx_support.customize_config_vars(_CONFIG_VARS)
     
    +        # PyPy:
    +        import imp
    +        for suffix, mode, type_ in imp.get_suffixes():
    +            if type_ == imp.C_EXTENSION:
    +                _CONFIG_VARS['SOABI'] = suffix.split('.')[1]
    +                break
    +
         if args:
             vals = []
             for name in args:
    diff --git a/lib-python/2.7/test/capath/0e4015b9.0 b/lib-python/2.7/test/capath/0e4015b9.0
    new file mode 100644
    --- /dev/null
    +++ b/lib-python/2.7/test/capath/0e4015b9.0
    @@ -0,0 +1,16 @@
    +-----BEGIN CERTIFICATE-----
    +MIIClTCCAf6gAwIBAgIJAKGU95wKR8pTMA0GCSqGSIb3DQEBBQUAMHAxCzAJBgNV
    +BAYTAlhZMRcwFQYDVQQHDA5DYXN0bGUgQW50aHJheDEjMCEGA1UECgwaUHl0aG9u
    +IFNvZnR3YXJlIEZvdW5kYXRpb24xIzAhBgNVBAMMGnNlbGYtc2lnbmVkLnB5dGhv
    +bnRlc3QubmV0MB4XDTE0MTEwMjE4MDkyOVoXDTI0MTAzMDE4MDkyOVowcDELMAkG
    +A1UEBhMCWFkxFzAVBgNVBAcMDkNhc3RsZSBBbnRocmF4MSMwIQYDVQQKDBpQeXRo
    +b24gU29mdHdhcmUgRm91bmRhdGlvbjEjMCEGA1UEAwwac2VsZi1zaWduZWQucHl0
    +aG9udGVzdC5uZXQwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBANDXQXW9tjyZ
    +Xt0Iv2tLL1+jinr4wGg36ioLDLFkMf+2Y1GL0v0BnKYG4N1OKlAU15LXGeGer8vm
    +Sv/yIvmdrELvhAbbo3w4a9TMYQA4XkIVLdvu3mvNOAet+8PMJxn26dbDhG809ALv
    +EHY57lQsBS3G59RZyBPVqAqmImWNJnVzAgMBAAGjNzA1MCUGA1UdEQQeMByCGnNl
    +bGYtc2lnbmVkLnB5dGhvbnRlc3QubmV0MAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcN
    +AQEFBQADgYEAIuzAhgMouJpNdf3URCHIineyoSt6WK/9+eyUcjlKOrDoXNZaD72h
    +TXMeKYoWvJyVcSLKL8ckPtDobgP2OTt0UkyAaj0n+ZHaqq1lH2yVfGUA1ILJv515
    +C8BqbvVZuqm3i7ygmw3bqE/lYMgOrYtXXnqOrz6nvsE6Yc9V9rFflOM=
    +-----END CERTIFICATE-----
    diff --git a/lib-python/2.7/test/capath/ce7b8643.0 b/lib-python/2.7/test/capath/ce7b8643.0
    new file mode 100644
    --- /dev/null
    +++ b/lib-python/2.7/test/capath/ce7b8643.0
    @@ -0,0 +1,16 @@
    +-----BEGIN CERTIFICATE-----
    +MIIClTCCAf6gAwIBAgIJAKGU95wKR8pTMA0GCSqGSIb3DQEBBQUAMHAxCzAJBgNV
    +BAYTAlhZMRcwFQYDVQQHDA5DYXN0bGUgQW50aHJheDEjMCEGA1UECgwaUHl0aG9u
    +IFNvZnR3YXJlIEZvdW5kYXRpb24xIzAhBgNVBAMMGnNlbGYtc2lnbmVkLnB5dGhv
    +bnRlc3QubmV0MB4XDTE0MTEwMjE4MDkyOVoXDTI0MTAzMDE4MDkyOVowcDELMAkG
    +A1UEBhMCWFkxFzAVBgNVBAcMDkNhc3RsZSBBbnRocmF4MSMwIQYDVQQKDBpQeXRo
    +b24gU29mdHdhcmUgRm91bmRhdGlvbjEjMCEGA1UEAwwac2VsZi1zaWduZWQucHl0
    +aG9udGVzdC5uZXQwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBANDXQXW9tjyZ
    +Xt0Iv2tLL1+jinr4wGg36ioLDLFkMf+2Y1GL0v0BnKYG4N1OKlAU15LXGeGer8vm
    +Sv/yIvmdrELvhAbbo3w4a9TMYQA4XkIVLdvu3mvNOAet+8PMJxn26dbDhG809ALv
    +EHY57lQsBS3G59RZyBPVqAqmImWNJnVzAgMBAAGjNzA1MCUGA1UdEQQeMByCGnNl
    +bGYtc2lnbmVkLnB5dGhvbnRlc3QubmV0MAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcN
    +AQEFBQADgYEAIuzAhgMouJpNdf3URCHIineyoSt6WK/9+eyUcjlKOrDoXNZaD72h
    +TXMeKYoWvJyVcSLKL8ckPtDobgP2OTt0UkyAaj0n+ZHaqq1lH2yVfGUA1ILJv515
    +C8BqbvVZuqm3i7ygmw3bqE/lYMgOrYtXXnqOrz6nvsE6Yc9V9rFflOM=
    +-----END CERTIFICATE-----
    diff --git a/lib-python/2.7/test/https_svn_python_org_root.pem b/lib-python/2.7/test/https_svn_python_org_root.pem
    deleted file mode 100644
    --- a/lib-python/2.7/test/https_svn_python_org_root.pem
    +++ /dev/null
    @@ -1,41 +0,0 @@
    ------BEGIN CERTIFICATE-----
    -MIIHPTCCBSWgAwIBAgIBADANBgkqhkiG9w0BAQQFADB5MRAwDgYDVQQKEwdSb290
    -IENBMR4wHAYDVQQLExVodHRwOi8vd3d3LmNhY2VydC5vcmcxIjAgBgNVBAMTGUNB
    -IENlcnQgU2lnbmluZyBBdXRob3JpdHkxITAfBgkqhkiG9w0BCQEWEnN1cHBvcnRA
    -Y2FjZXJ0Lm9yZzAeFw0wMzAzMzAxMjI5NDlaFw0zMzAzMjkxMjI5NDlaMHkxEDAO
    -BgNVBAoTB1Jvb3QgQ0ExHjAcBgNVBAsTFWh0dHA6Ly93d3cuY2FjZXJ0Lm9yZzEi
    -MCAGA1UEAxMZQ0EgQ2VydCBTaWduaW5nIEF1dGhvcml0eTEhMB8GCSqGSIb3DQEJ
    -ARYSc3VwcG9ydEBjYWNlcnQub3JnMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIIC
    -CgKCAgEAziLA4kZ97DYoB1CW8qAzQIxL8TtmPzHlawI229Z89vGIj053NgVBlfkJ
    -8BLPRoZzYLdufujAWGSuzbCtRRcMY/pnCujW0r8+55jE8Ez64AO7NV1sId6eINm6
    -zWYyN3L69wj1x81YyY7nDl7qPv4coRQKFWyGhFtkZip6qUtTefWIonvuLwphK42y
    -fk1WpRPs6tqSnqxEQR5YYGUFZvjARL3LlPdCfgv3ZWiYUQXw8wWRBB0bF4LsyFe7
    -w2t6iPGwcswlWyCR7BYCEo8y6RcYSNDHBS4CMEK4JZwFaz+qOqfrU0j36NK2B5jc
    -G8Y0f3/JHIJ6BVgrCFvzOKKrF11myZjXnhCLotLddJr3cQxyYN/Nb5gznZY0dj4k
    -epKwDpUeb+agRThHqtdB7Uq3EvbXG4OKDy7YCbZZ16oE/9KTfWgu3YtLq1i6L43q
    -laegw1SJpfvbi1EinbLDvhG+LJGGi5Z4rSDTii8aP8bQUWWHIbEZAWV/RRyH9XzQ
    -QUxPKZgh/TMfdQwEUfoZd9vUFBzugcMd9Zi3aQaRIt0AUMyBMawSB3s42mhb5ivU
    -fslfrejrckzzAeVLIL+aplfKkQABi6F1ITe1Yw1nPkZPcCBnzsXWWdsC4PDSy826
    -YreQQejdIOQpvGQpQsgi3Hia/0PsmBsJUUtaWsJx8cTLc6nloQsCAwEAAaOCAc4w
    -ggHKMB0GA1UdDgQWBBQWtTIb1Mfz4OaO873SsDrusjkY0TCBowYDVR0jBIGbMIGY
    -gBQWtTIb1Mfz4OaO873SsDrusjkY0aF9pHsweTEQMA4GA1UEChMHUm9vdCBDQTEe
    -MBwGA1UECxMVaHR0cDovL3d3dy5jYWNlcnQub3JnMSIwIAYDVQQDExlDQSBDZXJ0
    -IFNpZ25pbmcgQXV0aG9yaXR5MSEwHwYJKoZIhvcNAQkBFhJzdXBwb3J0QGNhY2Vy
    -dC5vcmeCAQAwDwYDVR0TAQH/BAUwAwEB/zAyBgNVHR8EKzApMCegJaAjhiFodHRw
    -czovL3d3dy5jYWNlcnQub3JnL3Jldm9rZS5jcmwwMAYJYIZIAYb4QgEEBCMWIWh0
    -dHBzOi8vd3d3LmNhY2VydC5vcmcvcmV2b2tlLmNybDA0BglghkgBhvhCAQgEJxYl
    -aHR0cDovL3d3dy5jYWNlcnQub3JnL2luZGV4LnBocD9pZD0xMDBWBglghkgBhvhC
    -AQ0ESRZHVG8gZ2V0IHlvdXIgb3duIGNlcnRpZmljYXRlIGZvciBGUkVFIGhlYWQg
    -b3ZlciB0byBodHRwOi8vd3d3LmNhY2VydC5vcmcwDQYJKoZIhvcNAQEEBQADggIB
    -ACjH7pyCArpcgBLKNQodgW+JapnM8mgPf6fhjViVPr3yBsOQWqy1YPaZQwGjiHCc
    -nWKdpIevZ1gNMDY75q1I08t0AoZxPuIrA2jxNGJARjtT6ij0rPtmlVOKTV39O9lg
    -18p5aTuxZZKmxoGCXJzN600BiqXfEVWqFcofN8CCmHBh22p8lqOOLlQ+TyGpkO/c
    -gr/c6EWtTZBzCDyUZbAEmXZ/4rzCahWqlwQ3JNgelE5tDlG+1sSPypZt90Pf6DBl
    -Jzt7u0NDY8RD97LsaMzhGY4i+5jhe1o+ATc7iwiwovOVThrLm82asduycPAtStvY
    -sONvRUgzEv/+PDIqVPfE94rwiCPCR/5kenHA0R6mY7AHfqQv0wGP3J8rtsYIqQ+T
    -SCX8Ev2fQtzzxD72V7DX3WnRBnc0CkvSyqD/HMaMyRa+xMwyN2hzXwj7UfdJUzYF
    -CpUCTPJ5GhD22Dp1nPMd8aINcGeGG7MW9S/lpOt5hvk9C8JzC6WZrG/8Z7jlLwum
    -GCSNe9FINSkYQKyTYOGWhlC0elnYjyELn8+CkcY7v2vcB5G5l1YjqrZslMZIBjzk
    -zk6q5PYvCdxTby78dOs6Y5nCpqyJvKeyRKANihDjbPIky/qbn3BHLt4Ui9SyIAmW
    -omTxJBzcoTWcFbLUvFUufQb1nA5V9FrWk9p2rSVzTMVD
    ------END CERTIFICATE-----
    diff --git a/lib-python/2.7/test/selfsigned_pythontestdotnet.pem b/lib-python/2.7/test/selfsigned_pythontestdotnet.pem
    --- a/lib-python/2.7/test/selfsigned_pythontestdotnet.pem
    +++ b/lib-python/2.7/test/selfsigned_pythontestdotnet.pem
    @@ -1,5 +1,5 @@
     -----BEGIN CERTIFICATE-----
    -MIIChzCCAfCgAwIBAgIJAKGU95wKR8pSMA0GCSqGSIb3DQEBBQUAMHAxCzAJBgNV
    +MIIClTCCAf6gAwIBAgIJAKGU95wKR8pTMA0GCSqGSIb3DQEBBQUAMHAxCzAJBgNV
     BAYTAlhZMRcwFQYDVQQHDA5DYXN0bGUgQW50aHJheDEjMCEGA1UECgwaUHl0aG9u
     IFNvZnR3YXJlIEZvdW5kYXRpb24xIzAhBgNVBAMMGnNlbGYtc2lnbmVkLnB5dGhv
     bnRlc3QubmV0MB4XDTE0MTEwMjE4MDkyOVoXDTI0MTAzMDE4MDkyOVowcDELMAkG
    @@ -8,9 +8,9 @@
     aG9udGVzdC5uZXQwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBANDXQXW9tjyZ
     Xt0Iv2tLL1+jinr4wGg36ioLDLFkMf+2Y1GL0v0BnKYG4N1OKlAU15LXGeGer8vm
     Sv/yIvmdrELvhAbbo3w4a9TMYQA4XkIVLdvu3mvNOAet+8PMJxn26dbDhG809ALv
    -EHY57lQsBS3G59RZyBPVqAqmImWNJnVzAgMBAAGjKTAnMCUGA1UdEQQeMByCGnNl
    -bGYtc2lnbmVkLnB5dGhvbnRlc3QubmV0MA0GCSqGSIb3DQEBBQUAA4GBAIOXmdtM
    -eG9qzP9TiXW/Gc/zI4cBfdCpC+Y4gOfC9bQUC7hefix4iO3+iZjgy3X/FaRxUUoV
    -HKiXcXIaWqTSUWp45cSh0MbwZXudp6JIAptzdAhvvCrPKeC9i9GvxsPD4LtDAL97
    -vSaxQBezA7hdxZd90/EeyMgVZgAnTCnvAWX9
    +EHY57lQsBS3G59RZyBPVqAqmImWNJnVzAgMBAAGjNzA1MCUGA1UdEQQeMByCGnNl
    +bGYtc2lnbmVkLnB5dGhvbnRlc3QubmV0MAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcN
    +AQEFBQADgYEAIuzAhgMouJpNdf3URCHIineyoSt6WK/9+eyUcjlKOrDoXNZaD72h
    +TXMeKYoWvJyVcSLKL8ckPtDobgP2OTt0UkyAaj0n+ZHaqq1lH2yVfGUA1ILJv515
    +C8BqbvVZuqm3i7ygmw3bqE/lYMgOrYtXXnqOrz6nvsE6Yc9V9rFflOM=
     -----END CERTIFICATE-----
    diff --git a/lib-python/2.7/test/test_ssl.py b/lib-python/2.7/test/test_ssl.py
    --- a/lib-python/2.7/test/test_ssl.py
    +++ b/lib-python/2.7/test/test_ssl.py
    @@ -57,7 +57,8 @@
     SIGNED_CERTFILE2 = data_file("keycert4.pem")
     SIGNING_CA = data_file("pycacert.pem")
     
    -SVN_PYTHON_ORG_ROOT_CERT = data_file("https_svn_python_org_root.pem")
    +REMOTE_HOST = "self-signed.pythontest.net"
    +REMOTE_ROOT_CERT = data_file("selfsigned_pythontestdotnet.pem")
     
     EMPTYCERT = data_file("nullcert.pem")
     BADCERT = data_file("badcert.pem")
    @@ -244,7 +245,7 @@
             self.assertEqual(p['subjectAltName'], san)
     
         def test_DER_to_PEM(self):
    -        with open(SVN_PYTHON_ORG_ROOT_CERT, 'r') as f:
    +        with open(CAFILE_CACERT, 'r') as f:
                 pem = f.read()
             d1 = ssl.PEM_cert_to_DER_cert(pem)
             p2 = ssl.DER_cert_to_PEM_cert(d1)
    @@ -792,7 +793,7 @@
             # Mismatching key and cert
             ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
             with self.assertRaisesRegexp(ssl.SSLError, "key values mismatch"):
    -            ctx.load_cert_chain(SVN_PYTHON_ORG_ROOT_CERT, ONLYKEY)
    +            ctx.load_cert_chain(CAFILE_CACERT, ONLYKEY)
             # Password protected key and cert
             ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD)
             ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD.encode())
    @@ -1013,7 +1014,7 @@
             ctx.load_verify_locations(CERTFILE)
             self.assertEqual(ctx.cert_store_stats(),
                 {'x509_ca': 0, 'crl': 0, 'x509': 1})
    -        ctx.load_verify_locations(SVN_PYTHON_ORG_ROOT_CERT)
    +        ctx.load_verify_locations(CAFILE_CACERT)
             self.assertEqual(ctx.cert_store_stats(),
                 {'x509_ca': 1, 'crl': 0, 'x509': 2})
     
    @@ -1023,8 +1024,8 @@
             # CERTFILE is not flagged as X509v3 Basic Constraints: CA:TRUE
             ctx.load_verify_locations(CERTFILE)
             self.assertEqual(ctx.get_ca_certs(), [])
    -        # but SVN_PYTHON_ORG_ROOT_CERT is a CA cert
    -        ctx.load_verify_locations(SVN_PYTHON_ORG_ROOT_CERT)
    +        # but CAFILE_CACERT is a CA cert
    +        ctx.load_verify_locations(CAFILE_CACERT)
             self.assertEqual(ctx.get_ca_certs(),
                 [{'issuer': ((('organizationName', 'Root CA'),),
                              (('organizationalUnitName', 'http://www.cacert.org'),),
    @@ -1040,7 +1041,7 @@
                               (('emailAddress', 'support at cacert.org'),)),
                   'version': 3}])
     
    -        with open(SVN_PYTHON_ORG_ROOT_CERT) as f:
    +        with open(CAFILE_CACERT) as f:
                 pem = f.read()
             der = ssl.PEM_cert_to_DER_cert(pem)
             self.assertEqual(ctx.get_ca_certs(True), [der])
    @@ -1215,11 +1216,11 @@
     class NetworkedTests(unittest.TestCase):
     
         def test_connect(self):
    -        with support.transient_internet("svn.python.org"):
    +        with support.transient_internet(REMOTE_HOST):
                 s = ssl.wrap_socket(socket.socket(socket.AF_INET),
                                     cert_reqs=ssl.CERT_NONE)
                 try:
    -                s.connect(("svn.python.org", 443))
    +                s.connect((REMOTE_HOST, 443))
                     self.assertEqual({}, s.getpeercert())
                 finally:
                     s.close()
    @@ -1228,27 +1229,27 @@
                 s = ssl.wrap_socket(socket.socket(socket.AF_INET),
                                     cert_reqs=ssl.CERT_REQUIRED)
                 self.assertRaisesRegexp(ssl.SSLError, "certificate verify failed",
    -                                   s.connect, ("svn.python.org", 443))
    +                                   s.connect, (REMOTE_HOST, 443))
                 s.close()
     
                 # this should succeed because we specify the root cert
                 s = ssl.wrap_socket(socket.socket(socket.AF_INET),
                                     cert_reqs=ssl.CERT_REQUIRED,
    -                                ca_certs=SVN_PYTHON_ORG_ROOT_CERT)
    +                                ca_certs=REMOTE_ROOT_CERT)
                 try:
    -                s.connect(("svn.python.org", 443))
    +                s.connect((REMOTE_HOST, 443))
                     self.assertTrue(s.getpeercert())
                 finally:
                     s.close()
     
         def test_connect_ex(self):
             # Issue #11326: check connect_ex() implementation
    -        with support.transient_internet("svn.python.org"):
    +        with support.transient_internet(REMOTE_HOST):
                 s = ssl.wrap_socket(socket.socket(socket.AF_INET),
                                     cert_reqs=ssl.CERT_REQUIRED,
    -                                ca_certs=SVN_PYTHON_ORG_ROOT_CERT)
    +                                ca_certs=REMOTE_ROOT_CERT)
                 try:
    -                self.assertEqual(0, s.connect_ex(("svn.python.org", 443)))
    +                self.assertEqual(0, s.connect_ex((REMOTE_HOST, 443)))
                     self.assertTrue(s.getpeercert())
                 finally:
                     s.close()
    @@ -1256,14 +1257,14 @@
         def test_non_blocking_connect_ex(self):
             # Issue #11326: non-blocking connect_ex() should allow handshake
             # to proceed after the socket gets ready.
    -        with support.transient_internet("svn.python.org"):
    +        with support.transient_internet(REMOTE_HOST):
                 s = ssl.wrap_socket(socket.socket(socket.AF_INET),
                                     cert_reqs=ssl.CERT_REQUIRED,
    -                                ca_certs=SVN_PYTHON_ORG_ROOT_CERT,
    +                                ca_certs=REMOTE_ROOT_CERT,
                                     do_handshake_on_connect=False)
                 try:
                     s.setblocking(False)
    -                rc = s.connect_ex(('svn.python.org', 443))
    +                rc = s.connect_ex((REMOTE_HOST, 443))
                     # EWOULDBLOCK under Windows, EINPROGRESS elsewhere
                     self.assertIn(rc, (0, errno.EINPROGRESS, errno.EWOULDBLOCK))
                     # Wait for connect to finish
    @@ -1285,58 +1286,62 @@
         def test_timeout_connect_ex(self):
             # Issue #12065: on a timeout, connect_ex() should return the original
             # errno (mimicking the behaviour of non-SSL sockets).
    -        with support.transient_internet("svn.python.org"):
    +        with support.transient_internet(REMOTE_HOST):
                 s = ssl.wrap_socket(socket.socket(socket.AF_INET),
                                     cert_reqs=ssl.CERT_REQUIRED,
    -                                ca_certs=SVN_PYTHON_ORG_ROOT_CERT,
    +                                ca_certs=REMOTE_ROOT_CERT,
                                     do_handshake_on_connect=False)
                 try:
                     s.settimeout(0.0000001)
    -                rc = s.connect_ex(('svn.python.org', 443))
    +                rc = s.connect_ex((REMOTE_HOST, 443))
                     if rc == 0:
    -                    self.skipTest("svn.python.org responded too quickly")
    +                    self.skipTest("REMOTE_HOST responded too quickly")
                     self.assertIn(rc, (errno.EAGAIN, errno.EWOULDBLOCK))
                 finally:
                     s.close()
     
         def test_connect_ex_error(self):
    -        with support.transient_internet("svn.python.org"):
    +        with support.transient_internet(REMOTE_HOST):
                 s = ssl.wrap_socket(socket.socket(socket.AF_INET),
                                     cert_reqs=ssl.CERT_REQUIRED,
    -                                ca_certs=SVN_PYTHON_ORG_ROOT_CERT)
    +                                ca_certs=REMOTE_ROOT_CERT)
                 try:
    -                rc = s.connect_ex(("svn.python.org", 444))
    +                rc = s.connect_ex((REMOTE_HOST, 444))
                     # Issue #19919: Windows machines or VMs hosted on Windows
                     # machines sometimes return EWOULDBLOCK.
    -                self.assertIn(rc, (errno.ECONNREFUSED, errno.EWOULDBLOCK))
    +                errors = (
    +                    errno.ECONNREFUSED, errno.EHOSTUNREACH, errno.ETIMEDOUT,
    +                    errno.EWOULDBLOCK,
    +                )
    +                self.assertIn(rc, errors)
                 finally:
                     s.close()
     
         def test_connect_with_context(self):
    -        with support.transient_internet("svn.python.org"):
    +        with support.transient_internet(REMOTE_HOST):
                 # Same as test_connect, but with a separately created context
                 ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
                 s = ctx.wrap_socket(socket.socket(socket.AF_INET))
    -            s.connect(("svn.python.org", 443))
    +            s.connect((REMOTE_HOST, 443))
                 try:
                     self.assertEqual({}, s.getpeercert())
                 finally:
                     s.close()
                 # Same with a server hostname
                 s = ctx.wrap_socket(socket.socket(socket.AF_INET),
    -                                server_hostname="svn.python.org")
    -            s.connect(("svn.python.org", 443))
    +                                server_hostname=REMOTE_HOST)
    +            s.connect((REMOTE_HOST, 443))
                 s.close()
                 # This should fail because we have no verification certs
                 ctx.verify_mode = ssl.CERT_REQUIRED
                 s = ctx.wrap_socket(socket.socket(socket.AF_INET))
                 self.assertRaisesRegexp(ssl.SSLError, "certificate verify failed",
    -                                    s.connect, ("svn.python.org", 443))
    +                                    s.connect, (REMOTE_HOST, 443))
                 s.close()
                 # This should succeed because we specify the root cert
    -            ctx.load_verify_locations(SVN_PYTHON_ORG_ROOT_CERT)
    +            ctx.load_verify_locations(REMOTE_ROOT_CERT)
                 s = ctx.wrap_socket(socket.socket(socket.AF_INET))
    -            s.connect(("svn.python.org", 443))
    +            s.connect((REMOTE_HOST, 443))
                 try:
                     cert = s.getpeercert()
                     self.assertTrue(cert)
    @@ -1349,12 +1354,12 @@
             # OpenSSL 0.9.8n and 1.0.0, as a result the capath directory must
             # contain both versions of each certificate (same content, different
             # filename) for this test to be portable across OpenSSL releases.
    -        with support.transient_internet("svn.python.org"):
    +        with support.transient_internet(REMOTE_HOST):
                 ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
                 ctx.verify_mode = ssl.CERT_REQUIRED
                 ctx.load_verify_locations(capath=CAPATH)
                 s = ctx.wrap_socket(socket.socket(socket.AF_INET))
    -            s.connect(("svn.python.org", 443))
    +            s.connect((REMOTE_HOST, 443))
                 try:
                     cert = s.getpeercert()
                     self.assertTrue(cert)
    @@ -1365,7 +1370,7 @@
                 ctx.verify_mode = ssl.CERT_REQUIRED
                 ctx.load_verify_locations(capath=BYTES_CAPATH)
                 s = ctx.wrap_socket(socket.socket(socket.AF_INET))
    -            s.connect(("svn.python.org", 443))
    +            s.connect((REMOTE_HOST, 443))
                 try:
                     cert = s.getpeercert()
                     self.assertTrue(cert)
    @@ -1373,15 +1378,15 @@
                     s.close()
     
         def test_connect_cadata(self):
    -        with open(CAFILE_CACERT) as f:
    +        with open(REMOTE_ROOT_CERT) as f:
                 pem = f.read().decode('ascii')
             der = ssl.PEM_cert_to_DER_cert(pem)
    -        with support.transient_internet("svn.python.org"):
    +        with support.transient_internet(REMOTE_HOST):
                 ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
                 ctx.verify_mode = ssl.CERT_REQUIRED
                 ctx.load_verify_locations(cadata=pem)
                 with closing(ctx.wrap_socket(socket.socket(socket.AF_INET))) as s:
    -                s.connect(("svn.python.org", 443))
    +                s.connect((REMOTE_HOST, 443))
                     cert = s.getpeercert()
                     self.assertTrue(cert)
     
    @@ -1390,7 +1395,7 @@
                 ctx.verify_mode = ssl.CERT_REQUIRED
                 ctx.load_verify_locations(cadata=der)
                 with closing(ctx.wrap_socket(socket.socket(socket.AF_INET))) as s:
    -                s.connect(("svn.python.org", 443))
    +                s.connect((REMOTE_HOST, 443))
                     cert = s.getpeercert()
                     self.assertTrue(cert)
     
    @@ -1399,9 +1404,9 @@
             # Issue #5238: creating a file-like object with makefile() shouldn't
             # delay closing the underlying "real socket" (here tested with its
             # file descriptor, hence skipping the test under Windows).
    -        with support.transient_internet("svn.python.org"):
    +        with support.transient_internet(REMOTE_HOST):
                 ss = ssl.wrap_socket(socket.socket(socket.AF_INET))
    -            ss.connect(("svn.python.org", 443))
    +            ss.connect((REMOTE_HOST, 443))
                 fd = ss.fileno()
                 f = ss.makefile()
                 f.close()
    @@ -1415,9 +1420,9 @@
                 self.assertEqual(e.exception.errno, errno.EBADF)
     
         def test_non_blocking_handshake(self):
    -        with support.transient_internet("svn.python.org"):
    +        with support.transient_internet(REMOTE_HOST):
                 s = socket.socket(socket.AF_INET)
    -            s.connect(("svn.python.org", 443))
    +            s.connect((REMOTE_HOST, 443))
                 s.setblocking(False)
                 s = ssl.wrap_socket(s,
                                     cert_reqs=ssl.CERT_NONE,
    @@ -1460,12 +1465,12 @@
                     if support.verbose:
                         sys.stdout.write("\nVerified certificate for %s:%s is\n%s\n" % (host, port ,pem))
     
    -        _test_get_server_certificate('svn.python.org', 443, SVN_PYTHON_ORG_ROOT_CERT)
    +        _test_get_server_certificate(REMOTE_HOST, 443, REMOTE_ROOT_CERT)
             if support.IPV6_ENABLED:
                 _test_get_server_certificate('ipv6.google.com', 443)
     
         def test_ciphers(self):
    -        remote = ("svn.python.org", 443)
    +        remote = (REMOTE_HOST, 443)
             with support.transient_internet(remote[0]):
                 with closing(ssl.wrap_socket(socket.socket(socket.AF_INET),
                                              cert_reqs=ssl.CERT_NONE, ciphers="ALL")) as s:
    @@ -1510,13 +1515,13 @@
     
         def test_get_ca_certs_capath(self):
             # capath certs are loaded on request
    -        with support.transient_internet("svn.python.org"):
    +        with support.transient_internet(REMOTE_HOST):
                 ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
                 ctx.verify_mode = ssl.CERT_REQUIRED
                 ctx.load_verify_locations(capath=CAPATH)
                 self.assertEqual(ctx.get_ca_certs(), [])
                 s = ctx.wrap_socket(socket.socket(socket.AF_INET))
    -            s.connect(("svn.python.org", 443))
    +            s.connect((REMOTE_HOST, 443))
                 try:
                     cert = s.getpeercert()
                     self.assertTrue(cert)
    @@ -1527,12 +1532,12 @@
         @needs_sni
         def test_context_setget(self):
             # Check that the context of a connected socket can be replaced.
    -        with support.transient_internet("svn.python.org"):
    +        with support.transient_internet(REMOTE_HOST):
                 ctx1 = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
                 ctx2 = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
                 s = socket.socket(socket.AF_INET)
                 with closing(ctx1.wrap_socket(s)) as ss:
    -                ss.connect(("svn.python.org", 443))
    +                ss.connect((REMOTE_HOST, 443))
                     self.assertIs(ss.context, ctx1)
                     self.assertIs(ss._sslobj.context, ctx1)
                     ss.context = ctx2
    @@ -3026,7 +3031,7 @@
                 pass
     
         for filename in [
    -        CERTFILE, SVN_PYTHON_ORG_ROOT_CERT, BYTES_CERTFILE,
    +        CERTFILE, REMOTE_ROOT_CERT, BYTES_CERTFILE,
             ONLYCERT, ONLYKEY, BYTES_ONLYCERT, BYTES_ONLYKEY,
             SIGNED_CERTFILE, SIGNED_CERTFILE2, SIGNING_CA,
             BADCERT, BADKEY, EMPTYCERT]:
    diff --git a/lib-python/2.7/xml/etree/ElementTree.py b/lib-python/2.7/xml/etree/ElementTree.py
    --- a/lib-python/2.7/xml/etree/ElementTree.py
    +++ b/lib-python/2.7/xml/etree/ElementTree.py
    @@ -1606,7 +1606,17 @@
                         pubid = pubid[1:-1]
                     if hasattr(self.target, "doctype"):
                         self.target.doctype(name, pubid, system[1:-1])
    -                elif self.doctype is not self._XMLParser__doctype:
    +                elif 1:  # XXX PyPy fix, used to be
    +                         #   elif self.doctype is not self._XMLParser__doctype:
    +                         # but that condition is always True on CPython, as far
    +                         # as I can tell: self._XMLParser__doctype always
    +                         # returns a fresh unbound method object.
    +                         # On PyPy, unbound and bound methods have stronger
    +                         # unicity guarantees: self._XMLParser__doctype
    +                         # can return the same unbound method object, in
    +                         # some cases making the test above incorrectly False.
    +                         # (My guess would be that the line above is a backport
    +                         # from Python 3.)
                         # warn about deprecated call
                         self._XMLParser__doctype(name, pubid, system[1:-1])
                         self.doctype(name, pubid, system[1:-1])
    diff --git a/lib_pypy/_pypy_testcapi.py b/lib_pypy/_pypy_testcapi.py
    --- a/lib_pypy/_pypy_testcapi.py
    +++ b/lib_pypy/_pypy_testcapi.py
    @@ -7,6 +7,7 @@
             content = fid.read()
         # from cffi's Verifier()
         key = '\x00'.join([sys.version[:3], content])
    +    key += 'cpyext-gc-support-2'   # this branch requires recompilation!
         if sys.version_info >= (3,):
             key = key.encode('utf-8')
         k1 = hex(binascii.crc32(key[0::2]) & 0xffffffff)
    @@ -62,7 +63,7 @@
         if sys.platform == 'win32':
             # XXX pyconfig.h uses a pragma to link to the import library,
             #     which is currently python27.lib
    -        library = os.path.join(thisdir, '..', 'include', 'python27')
    +        library = os.path.join(thisdir, '..', 'libs', 'python27')
             if not os.path.exists(library + '.lib'):
                 # For a local translation or nightly build
                 library = os.path.join(thisdir, '..', 'pypy', 'goal', 'python27')
    diff --git a/lib_pypy/cPickle.py b/lib_pypy/cPickle.py
    --- a/lib_pypy/cPickle.py
    +++ b/lib_pypy/cPickle.py
    @@ -167,7 +167,11 @@
             try:
                 key = ord(self.read(1))
                 while key != STOP:
    -                self.dispatch[key](self)
    +                try:
    +                    meth = self.dispatch[key]
    +                except KeyError:
    +                    raise UnpicklingError("invalid load key, %r." % chr(key))
    +                meth(self)
                     key = ord(self.read(1))
             except TypeError:
                 if self.read(1) == '':
    @@ -559,6 +563,7 @@
     
     def decode_long(data):
         r"""Decode a long from a two's complement little-endian binary string.
    +    This is overriden on PyPy by a RPython version that has linear complexity.
     
         >>> decode_long('')
         0L
    @@ -592,6 +597,11 @@
             n -= 1L << (nbytes << 3)
         return n
     
    +try:
    +    from __pypy__ import decode_long
    +except ImportError:
    +    pass
    +
     def load(f):
         return Unpickler(f).load()
     
    diff --git a/lib_pypy/cffi.egg-info/PKG-INFO b/lib_pypy/cffi.egg-info/PKG-INFO
    --- a/lib_pypy/cffi.egg-info/PKG-INFO
    +++ b/lib_pypy/cffi.egg-info/PKG-INFO
    @@ -1,6 +1,6 @@
     Metadata-Version: 1.1
     Name: cffi
    -Version: 1.4.2
    +Version: 1.5.2
     Summary: Foreign Function Interface for Python calling C code.
     Home-page: http://cffi.readthedocs.org
     Author: Armin Rigo, Maciej Fijalkowski
    diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py
    --- a/lib_pypy/cffi/__init__.py
    +++ b/lib_pypy/cffi/__init__.py
    @@ -4,8 +4,8 @@
     from .api import FFI, CDefError, FFIError
     from .ffiplatform import VerificationError, VerificationMissing
     
    -__version__ = "1.4.2"
    -__version_info__ = (1, 4, 2)
    +__version__ = "1.5.2"
    +__version_info__ = (1, 5, 2)
     
     # The verifier module file names are based on the CRC32 of a string that
     # contains the following version number.  It may be older than __version__
    diff --git a/lib_pypy/cffi/_cffi_include.h b/lib_pypy/cffi/_cffi_include.h
    --- a/lib_pypy/cffi/_cffi_include.h
    +++ b/lib_pypy/cffi/_cffi_include.h
    @@ -146,8 +146,9 @@
         ((Py_ssize_t(*)(CTypeDescrObject *, PyObject *, char **))_cffi_exports[23])
     #define _cffi_convert_array_from_object                                  \
         ((int(*)(char *, CTypeDescrObject *, PyObject *))_cffi_exports[24])
    +#define _CFFI_CPIDX  25
     #define _cffi_call_python                                                \
    -    ((void(*)(struct _cffi_externpy_s *, char *))_cffi_exports[25])
    +    ((void(*)(struct _cffi_externpy_s *, char *))_cffi_exports[_CFFI_CPIDX])
     #define _CFFI_NUM_EXPORTS 26
     
     typedef struct _ctypedescr CTypeDescrObject;
    @@ -206,7 +207,8 @@
     /**********  end CPython-specific section  **********/
     #else
     _CFFI_UNUSED_FN
    -static void (*_cffi_call_python)(struct _cffi_externpy_s *, char *);
    +static void (*_cffi_call_python_org)(struct _cffi_externpy_s *, char *);
    +# define _cffi_call_python  _cffi_call_python_org
     #endif
     
     
    @@ -229,6 +231,12 @@
         ((got_nonpos) == (expected <= 0) &&                 \
          (got) == (unsigned long long)expected)
     
    +#ifdef MS_WIN32
    +# define _cffi_stdcall  __stdcall
    +#else
    +# define _cffi_stdcall  /* nothing */
    +#endif
    +
     #ifdef __cplusplus
     }
     #endif
    diff --git a/lib_pypy/cffi/_embedding.h b/lib_pypy/cffi/_embedding.h
    new file mode 100644
    --- /dev/null
    +++ b/lib_pypy/cffi/_embedding.h
    @@ -0,0 +1,517 @@
    +
    +/***** Support code for embedding *****/
    +
    +#if defined(_MSC_VER)
    +#  define CFFI_DLLEXPORT  __declspec(dllexport)
    +#elif defined(__GNUC__)
    +#  define CFFI_DLLEXPORT  __attribute__((visibility("default")))
    +#else
    +#  define CFFI_DLLEXPORT  /* nothing */
    +#endif
    +
    +
    +/* There are two global variables of type _cffi_call_python_fnptr:
    +
    +   * _cffi_call_python, which we declare just below, is the one called
    +     by ``extern "Python"`` implementations.
    +
    +   * _cffi_call_python_org, which on CPython is actually part of the
    +     _cffi_exports[] array, is the function pointer copied from
    +     _cffi_backend.
    +
    +   After initialization is complete, both are equal.  However, the
    +   first one remains equal to &_cffi_start_and_call_python until the
    +   very end of initialization, when we are (or should be) sure that
    +   concurrent threads also see a completely initialized world, and
    +   only then is it changed.
    +*/
    +#undef _cffi_call_python
    +typedef void (*_cffi_call_python_fnptr)(struct _cffi_externpy_s *, char *);
    +static void _cffi_start_and_call_python(struct _cffi_externpy_s *, char *);
    +static _cffi_call_python_fnptr _cffi_call_python = &_cffi_start_and_call_python;
    +
    +
    +#ifndef _MSC_VER
    +   /* --- Assuming a GCC not infinitely old --- */
    +# define cffi_compare_and_swap(l,o,n)  __sync_bool_compare_and_swap(l,o,n)
    +# define cffi_write_barrier()          __sync_synchronize()
    +# if !defined(__amd64__) && !defined(__x86_64__) &&   \
    +     !defined(__i386__) && !defined(__i386)
    +#   define cffi_read_barrier()         __sync_synchronize()
    +# else
    +#   define cffi_read_barrier()         (void)0
    +# endif
    +#else
    +   /* --- Windows threads version --- */
    +# include 
    +# define cffi_compare_and_swap(l,o,n) \
    +                               (InterlockedCompareExchangePointer(l,n,o) == (o))
    +# define cffi_write_barrier()       InterlockedCompareExchange(&_cffi_dummy,0,0)
    +# define cffi_read_barrier()           (void)0
    +static volatile LONG _cffi_dummy;
    +#endif
    +
    +#ifdef WITH_THREAD
    +# ifndef _MSC_VER
    +#  include 
    +   static pthread_mutex_t _cffi_embed_startup_lock;
    +# else
    +   static CRITICAL_SECTION _cffi_embed_startup_lock;
    +# endif
    +  static char _cffi_embed_startup_lock_ready = 0;
    +#endif
    +
    +static void _cffi_acquire_reentrant_mutex(void)
    +{
    +    static void *volatile lock = NULL;
    +
    +    while (!cffi_compare_and_swap(&lock, NULL, (void *)1)) {
    +        /* should ideally do a spin loop instruction here, but
    +           hard to do it portably and doesn't really matter I
    +           think: pthread_mutex_init() should be very fast, and
    +           this is only run at start-up anyway. */
    +    }
    +
    +#ifdef WITH_THREAD
    +    if (!_cffi_embed_startup_lock_ready) {
    +# ifndef _MSC_VER
    +        pthread_mutexattr_t attr;
    +        pthread_mutexattr_init(&attr);
    +        pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
    +        pthread_mutex_init(&_cffi_embed_startup_lock, &attr);
    +# else
    +        InitializeCriticalSection(&_cffi_embed_startup_lock);
    +# endif
    +        _cffi_embed_startup_lock_ready = 1;
    +    }
    +#endif
    +
    +    while (!cffi_compare_and_swap(&lock, (void *)1, NULL))
    +        ;
    +
    +#ifndef _MSC_VER
    +    pthread_mutex_lock(&_cffi_embed_startup_lock);
    +#else
    +    EnterCriticalSection(&_cffi_embed_startup_lock);
    +#endif
    +}
    +
    +static void _cffi_release_reentrant_mutex(void)
    +{
    +#ifndef _MSC_VER
    +    pthread_mutex_unlock(&_cffi_embed_startup_lock);
    +#else
    +    LeaveCriticalSection(&_cffi_embed_startup_lock);
    +#endif
    +}
    +
    +
    +/**********  CPython-specific section  **********/
    +#ifndef PYPY_VERSION
    +
    +
    +#define _cffi_call_python_org  _cffi_exports[_CFFI_CPIDX]
    +
    +PyMODINIT_FUNC _CFFI_PYTHON_STARTUP_FUNC(void);   /* forward */
    +
    +static void _cffi_py_initialize(void)
    +{
    +    /* XXX use initsigs=0, which "skips initialization registration of
    +       signal handlers, which might be useful when Python is
    +       embedded" according to the Python docs.  But review and think
    +       if it should be a user-controllable setting.
    +
    +       XXX we should also give a way to write errors to a buffer
    +       instead of to stderr.
    +
    +       XXX if importing 'site' fails, CPython (any version) calls
    +       exit().  Should we try to work around this behavior here?
    +    */
    +    Py_InitializeEx(0);
    +}
    +
    +static int _cffi_initialize_python(void)
    +{
    +    /* This initializes Python, imports _cffi_backend, and then the
    +       present .dll/.so is set up as a CPython C extension module.
    +    */
    +    int result;
    +    PyGILState_STATE state;
    +    PyObject *pycode=NULL, *global_dict=NULL, *x;
    +
    +#if PY_MAJOR_VERSION >= 3
    +    /* see comments in _cffi_carefully_make_gil() about the
    +       Python2/Python3 difference 
    +    */
    +#else
    +    /* Acquire the GIL.  We have no threadstate here.  If Python is 
    +       already initialized, it is possible that there is already one
    +       existing for this thread, but it is not made current now.
    +    */
    +    PyEval_AcquireLock();
    +
    +    _cffi_py_initialize();
    +
    +    /* The Py_InitializeEx() sometimes made a threadstate for us, but
    +       not always.  Indeed Py_InitializeEx() could be called and do
    +       nothing.  So do we have a threadstate, or not?  We don't know,
    +       but we can replace it with NULL in all cases.
    +    */
    +    (void)PyThreadState_Swap(NULL);
    +
    +    /* Now we can release the GIL and re-acquire immediately using the
    +       logic of PyGILState(), which handles making or installing the
    +       correct threadstate.
    +    */
    +    PyEval_ReleaseLock();
    +#endif
    +    state = PyGILState_Ensure();
    +
    +    /* Call the initxxx() function from the present module.  It will
    +       create and initialize us as a CPython extension module, instead
    +       of letting the startup Python code do it---it might reimport
    +       the same .dll/.so and get maybe confused on some platforms.
    +       It might also have troubles locating the .dll/.so again for all
    +       I know.
    +    */
    +    (void)_CFFI_PYTHON_STARTUP_FUNC();
    +    if (PyErr_Occurred())
    +        goto error;
    +
    +    /* Now run the Python code provided to ffi.embedding_init_code().
    +     */
    +    pycode = Py_CompileString(_CFFI_PYTHON_STARTUP_CODE,
    +                              "",
    +                              Py_file_input);
    +    if (pycode == NULL)
    +        goto error;
    +    global_dict = PyDict_New();
    +    if (global_dict == NULL)
    +        goto error;
    +    if (PyDict_SetItemString(global_dict, "__builtins__",
    +                             PyThreadState_GET()->interp->builtins) < 0)
    +        goto error;
    +    x = PyEval_EvalCode(
    +#if PY_MAJOR_VERSION < 3
    +                        (PyCodeObject *)
    +#endif
    +                        pycode, global_dict, global_dict);
    +    if (x == NULL)
    +        goto error;
    +    Py_DECREF(x);
    +
    +    /* Done!  Now if we've been called from
    +       _cffi_start_and_call_python() in an ``extern "Python"``, we can
    +       only hope that the Python code did correctly set up the
    +       corresponding @ffi.def_extern() function.  Otherwise, the
    +       general logic of ``extern "Python"`` functions (inside the
    +       _cffi_backend module) will find that the reference is still
    +       missing and print an error.
    +     */
    +    result = 0;
    + done:
    +    Py_XDECREF(pycode);
    +    Py_XDECREF(global_dict);
    +    PyGILState_Release(state);
    +    return result;
    +
    + error:;
    +    {
    +        /* Print as much information as potentially useful.
    +           Debugging load-time failures with embedding is not fun
    +        */
    +        PyObject *exception, *v, *tb, *f, *modules, *mod;
    +        PyErr_Fetch(&exception, &v, &tb);
    +        if (exception != NULL) {
    +            PyErr_NormalizeException(&exception, &v, &tb);
    +            PyErr_Display(exception, v, tb);
    +        }
    +        Py_XDECREF(exception);
    +        Py_XDECREF(v);
    +        Py_XDECREF(tb);
    +
    +        f = PySys_GetObject((char *)"stderr");
    +        if (f != NULL && f != Py_None) {
    +            PyFile_WriteString("\nFrom: " _CFFI_MODULE_NAME
    +                               "\ncompiled with cffi version: 1.5.2"
    +                               "\n_cffi_backend module: ", f);
    +            modules = PyImport_GetModuleDict();
    +            mod = PyDict_GetItemString(modules, "_cffi_backend");
    +            if (mod == NULL) {
    +                PyFile_WriteString("not loaded", f);
    +            }
    +            else {
    +                v = PyObject_GetAttrString(mod, "__file__");
    +                PyFile_WriteObject(v, f, 0);
    +                Py_XDECREF(v);
    +            }
    +            PyFile_WriteString("\nsys.path: ", f);
    +            PyFile_WriteObject(PySys_GetObject((char *)"path"), f, 0);
    +            PyFile_WriteString("\n\n", f);
    +        }
    +    }
    +    result = -1;
    +    goto done;
    +}
    +
    +PyAPI_DATA(char *) _PyParser_TokenNames[];  /* from CPython */
    +
    +static int _cffi_carefully_make_gil(void)
    +{
    +    /* This does the basic initialization of Python.  It can be called
    +       completely concurrently from unrelated threads.  It assumes
    +       that we don't hold the GIL before (if it exists), and we don't
    +       hold it afterwards.
    +
    +       What it really does is completely different in Python 2 and 
    +       Python 3.
    +
    +    Python 2
    +    ========
    +
    +       Initialize the GIL, without initializing the rest of Python,
    +       by calling PyEval_InitThreads().
    +
    +       PyEval_InitThreads() must not be called concurrently at all.
    +       So we use a global variable as a simple spin lock.  This global
    +       variable must be from 'libpythonX.Y.so', not from this
    +       cffi-based extension module, because it must be shared from
    +       different cffi-based extension modules.  We choose
    +       _PyParser_TokenNames[0] as a completely arbitrary pointer value
    +       that is never written to.  The default is to point to the
    +       string "ENDMARKER".  We change it temporarily to point to the
    +       next character in that string.  (Yes, I know it's REALLY
    +       obscure.)
    +
    +    Python 3
    +    ========
    +
    +       In Python 3, PyEval_InitThreads() cannot be called before
    +       Py_InitializeEx() any more.  So this function calls
    +       Py_InitializeEx() first.  It uses the same obscure logic to
    +       make sure we never call it concurrently.
    +
    +       Arguably, this is less good on the spinlock, because
    +       Py_InitializeEx() takes much longer to run than
    +       PyEval_InitThreads().  But I didn't find a way around it.
    +    */
    +
    +#ifdef WITH_THREAD
    +    char *volatile *lock = (char *volatile *)_PyParser_TokenNames;
    +    char *old_value;
    +
    +    while (1) {    /* spin loop */
    +        old_value = *lock;
    +        if (old_value[0] == 'E') {
    +            assert(old_value[1] == 'N');
    
    From pypy.commits at gmail.com  Sat Mar 19 12:26:04 2016
    From: pypy.commits at gmail.com (mattip)
    Date: Sat, 19 Mar 2016 09:26:04 -0700 (PDT)
    Subject: [pypy-commit] pypy.org extradoc: update current version to 5.0.1
    Message-ID: <56ed7d9c.83561c0a.a74c2.777e@mx.google.com>
    
    Author: mattip 
    Branch: extradoc
    Changeset: r726:076a6aefa483
    Date: 2016-03-19 18:25 +0200
    http://bitbucket.org/pypy/pypy.org/changeset/076a6aefa483/
    
    Log:	update current version to 5.0.1
    
    diff --git a/download.html b/download.html
    --- a/download.html
    +++ b/download.html
    @@ -74,7 +74,7 @@
     performance improvements.

    We provide binaries for x86, ARM, and PPC Linux, Mac OS/X and Windows for:

    @@ -113,21 +113,21 @@ degrees of being up-to-date.
  • -
    -

    Python2.7 compatible PyPy 5.0

    +
    +

    Python2.7 compatible PyPy 5.0.1

    diff --git a/source/download.txt b/source/download.txt --- a/source/download.txt +++ b/source/download.txt @@ -14,12 +14,12 @@ We provide binaries for x86, ARM, and PPC Linux, Mac OS/X and Windows for: -* the Python2.7 compatible release — **PyPy 5.0** — (`what's new in PyPy 5.0?`_) +* the Python2.7 compatible release — **PyPy 5.0.1** — (`what's new in PyPy 5.0.1?`_) * the Python3.2.5 compatible release — **PyPy3 2.4.0** — (`what's new in PyPy3 2.4.0?`_). * the Python2.7 Software Transactional Memory special release — **PyPy-STM 2.5.1** (Linux x86-64 only) -.. _what's new in PyPy 5.0?: http://doc.pypy.org/en/latest/release-5.0.0.html +.. _what's new in PyPy 5.0.1?: http://doc.pypy.org/en/latest/release-5.0.1.html .. _what's new in PyPy3 2.4.0?: http://doc.pypy.org/en/latest/release-pypy3-2.4.0.html @@ -73,7 +73,7 @@ .. _`portable Linux binaries`: https://github.com/squeaky-pl/portable-pypy#portable-pypy-distribution-for-linux -Python2.7 compatible PyPy 5.0 +Python2.7 compatible PyPy 5.0.1 ----------------------------------- * `Linux x86 binary (32bit, tar.bz2 built on Ubuntu 12.04 - 14.04)`__ (see ``[1]`` below) @@ -91,17 +91,17 @@ * `All our downloads,`__ including previous versions. We also have a mirror_, but please use only if you have troubles accessing the links above -.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.0.0-linux.tar.bz2 -.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.0.0-linux64.tar.bz2 -.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.0.0-linux-armhf-raspbian.tar.bz2 -.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.0.0-linux-armhf-raring.tar.bz2 -.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.0.0-linux-armel.tar.bz2 -.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.0.0-osx64.tar.bz2 -.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.0.0-win32.zip -.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.0.0-ppc64.tar.bz2 -.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.0.0-ppc64le.tar.bz2 -.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.0.0-src.tar.bz2 -.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.0.0-src.zip +.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.0.1-linux.tar.bz2 +.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.0.1-linux64.tar.bz2 +.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.0.1-linux-armhf-raspbian.tar.bz2 +.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.0.1-linux-armhf-raring.tar.bz2 +.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.0.1-linux-armel.tar.bz2 +.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.0.1-osx64.tar.bz2 +.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.0.1-win32.zip +.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.0.1-ppc64.tar.bz2 +.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.0.1-ppc64le.tar.bz2 +.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.0.1-src.tar.bz2 +.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.0.1-src.zip .. _`vcredist_x86.exe`: http://www.microsoft.com/en-us/download/details.aspx?id=5582 .. __: https://bitbucket.org/pypy/pypy/downloads .. _mirror: http://buildbot.pypy.org/mirror/ From pypy.commits at gmail.com Sat Mar 19 12:49:37 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 19 Mar 2016 09:49:37 -0700 (PDT) Subject: [pypy-commit] pypy default: Added tag release-5.0.1 for changeset bbd45126bc69 Message-ID: <56ed8321.49f9c20a.bf8c1.3c93@mx.google.com> Author: Armin Rigo Branch: Changeset: r83171:957c344e7b98 Date: 2016-03-19 11:08 +0100 http://bitbucket.org/pypy/pypy/changeset/957c344e7b98/ Log: Added tag release-5.0.1 for changeset bbd45126bc69 diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -19,3 +19,4 @@ 850edf14b2c75573720f59e95767335fb1affe55 release-4.0.0 5f8302b8bf9f53056e40426f10c72151564e5b19 release-4.0.1 246c9cf22037b11dc0e8c29ce3f291d3b8c5935a release-5.0 +bbd45126bc691f669c4ebdfbd74456cd274c6b92 release-5.0.1 From pypy.commits at gmail.com Sat Mar 19 13:41:32 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 19 Mar 2016 10:41:32 -0700 (PDT) Subject: [pypy-commit] pypy sandbox-lib: A branch to revive a different version of sandboxing: playing with an Message-ID: <56ed8f4c.a2afc20a.a24d3.5510@mx.google.com> Author: Armin Rigo Branch: sandbox-lib Changeset: r83172:3208b1ef9bb5 Date: 2016-03-19 18:12 +0100 http://bitbucket.org/pypy/pypy/changeset/3208b1ef9bb5/ Log: A branch to revive a different version of sandboxing: playing with an in-process library that you can use from C (or any language that interfaces with C, including Python with cffi) From pypy.commits at gmail.com Sat Mar 19 13:41:33 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 19 Mar 2016 10:41:33 -0700 (PDT) Subject: [pypy-commit] pypy sandbox-lib: in-progress Message-ID: <56ed8f4d.45d61c0a.43f97.ffff8e5b@mx.google.com> Author: Armin Rigo Branch: sandbox-lib Changeset: r83173:8f8afefdd713 Date: 2016-03-19 18:38 +0100 http://bitbucket.org/pypy/pypy/changeset/8f8afefdd713/ Log: in-progress diff --git a/rpython/annotator/policy.py b/rpython/annotator/policy.py --- a/rpython/annotator/policy.py +++ b/rpython/annotator/policy.py @@ -88,7 +88,7 @@ if key not in bk.emulated_pbc_calls: params_s = s_func.args_s s_result = s_func.s_result - from rpython.translator.sandbox.rsandbox import make_sandbox_trampoline + from rpython.translator.sandboxlib.rsandbox import make_sandbox_trampoline sandbox_trampoline = make_sandbox_trampoline( s_func.name, params_s, s_result) sandbox_trampoline._signature_ = [SomeTuple(items=params_s)], s_result diff --git a/rpython/config/translationoption.py b/rpython/config/translationoption.py --- a/rpython/config/translationoption.py +++ b/rpython/config/translationoption.py @@ -106,10 +106,11 @@ # other noticeable options BoolOption("thread", "enable use of threading primitives", default=False, cmdline="--thread"), - BoolOption("sandbox", "Produce a fully-sandboxed executable", - default=False, cmdline="--sandbox", - requires=[("translation.thread", False)], - suggests=[("translation.gc", "generation"), + BoolOption("sandboxlib", "Produce a fully-sandboxed library", + default=False, cmdline="--sandboxlib", + requires=[("translation.thread", False), + ("translation.shared", True)], + suggests=[("translation.gc", "minimark"), ("translation.gcrootfinder", "shadowstack")]), BoolOption("rweakref", "The backend supports RPython-level weakrefs", default=True), diff --git a/rpython/rlib/objectmodel.py b/rpython/rlib/objectmodel.py --- a/rpython/rlib/objectmodel.py +++ b/rpython/rlib/objectmodel.py @@ -300,7 +300,7 @@ def compute_annotation(self): if sandboxed_name: config = self.bookkeeper.annotator.translator.config - if config.translation.sandbox: + if config.translation.sandboxlib: func._sandbox_external_name = sandboxed_name func._dont_inline_ = True return self.bookkeeper.immutablevalue(func) diff --git a/rpython/rtyper/extfunc.py b/rpython/rtyper/extfunc.py --- a/rpython/rtyper/extfunc.py +++ b/rpython/rtyper/extfunc.py @@ -95,7 +95,7 @@ def compute_annotation(self): s_result = SomeExternalFunction( self.name, self.signature_args, self.signature_result) - if (self.bookkeeper.annotator.translator.config.translation.sandbox + if (self.bookkeeper.annotator.translator.config.translation.sandboxlib and not self.safe_not_sandboxed): s_result.needs_sandboxing = True return s_result @@ -110,7 +110,7 @@ export_name: the name of the function as it will be seen by the backends llimpl: optional; if provided, this RPython function is called instead of the target function llfakeimpl: optional; if provided, called by the llinterpreter - sandboxsafe: use True if the function performs no I/O (safe for --sandbox) + sandboxsafe: use True if the function performs no I/O (safe for --sandboxlib) """ if export_name is None: diff --git a/rpython/rtyper/rtyper.py b/rpython/rtyper/rtyper.py --- a/rpython/rtyper/rtyper.py +++ b/rpython/rtyper/rtyper.py @@ -29,7 +29,6 @@ from rpython.rtyper.rclass import RootClassRepr from rpython.tool.pairtype import pair from rpython.translator.unsimplify import insert_empty_block -from rpython.translator.sandbox.rsandbox import make_sandbox_trampoline class RTyperBackend(object): @@ -575,7 +574,9 @@ def getcallable(self, graph): def getconcretetype(v): return self.bindingrepr(v).lowleveltype - if self.annotator.translator.config.translation.sandbox: + if self.annotator.translator.config.translation.sandboxlib: + from rpython.translator.sandboxlib.rsandbox import ( + make_sandbox_trampoline) # don't import this globally try: name = graph.func._sandbox_external_name except AttributeError: diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -131,7 +131,7 @@ gcpolicyclass=gcpolicyclass, exctransformer=exctransformer, thread_enabled=self.config.translation.thread, - sandbox=self.config.translation.sandbox) + sandbox=self.config.translation.sandboxlib) self.db = db # give the gc a chance to register interest in the start-up functions it @@ -211,7 +211,7 @@ defines = defines.copy() if self.config.translation.countmallocs: defines['COUNT_OP_MALLOCS'] = 1 - if self.config.translation.sandbox: + if self.config.translation.sandboxlib: defines['RPY_SANDBOXED'] = 1 if CBuilder.have___thread is None: CBuilder.have___thread = self.translator.platform.check___thread() @@ -380,7 +380,8 @@ headers_to_precompile=headers_to_precompile, no_precompile_cfiles = module_files, shared=self.config.translation.shared, - icon=self.config.translation.icon) + icon=self.config.translation.icon, + sandboxlib=self.config.translation.sandboxlib) if self.has_profopt(): profopt = self.config.translation.profopt diff --git a/rpython/translator/c/node.py b/rpython/translator/c/node.py --- a/rpython/translator/c/node.py +++ b/rpython/translator/c/node.py @@ -917,7 +917,7 @@ if db.sandbox: if (getattr(obj, 'external', None) is not None and not obj._safe_not_sandboxed): - from rpython.translator.sandbox import rsandbox + from rpython.translator.sandboxlib import rsandbox obj.__dict__['graph'] = rsandbox.get_sandbox_stub( obj, db.translator.rtyper) obj.__dict__.pop('_safe_not_sandboxed', None) diff --git a/rpython/translator/c/src/precommondefs.h b/rpython/translator/c/src/precommondefs.h --- a/rpython/translator/c/src/precommondefs.h +++ b/rpython/translator/c/src/precommondefs.h @@ -61,16 +61,26 @@ a bug; please report or fix it. */ #ifdef __GNUC__ -# define RPY_EXPORTED extern __attribute__((visibility("default"))) -# define _RPY_HIDDEN __attribute__((visibility("hidden"))) +# define _RPY_EXPORTED1 extern __attribute__((visibility("default"))) +# define _RPY_HIDDEN __attribute__((visibility("hidden"))) #else -# define RPY_EXPORTED extern __declspec(dllexport) -# define _RPY_HIDDEN /* nothing */ +# define _RPY_EXPORTED1 extern __declspec(dllexport) +# define _RPY_HIDDEN /* nothing */ #endif #ifndef RPY_EXTERN # define RPY_EXTERN extern _RPY_HIDDEN #endif +/* With --sandboxlib, don't export any of the standard functions. + We will instead export a different set of functions, using + the RPY_SANDBOX_EXPORTED macro. */ +#ifdef RPY_SANDBOXED +# define RPY_EXPORTED extern _RPY_HIDDEN +# define RPY_SANDBOX_EXPORTED _RPY_EXPORTED1 +#else +# define RPY_EXPORTED _RPY_EXPORTED1 +#endif + #endif /* __PYPY_PRECOMMONDEFS_H */ diff --git a/rpython/translator/platform/posix.py b/rpython/translator/platform/posix.py --- a/rpython/translator/platform/posix.py +++ b/rpython/translator/platform/posix.py @@ -99,7 +99,8 @@ def gen_makefile(self, cfiles, eci, exe_name=None, path=None, shared=False, headers_to_precompile=[], - no_precompile_cfiles = [], icon=None): + no_precompile_cfiles = [], icon=None, + sandboxlib=False): cfiles = self._all_cfiles(cfiles, eci) if path is None: @@ -133,6 +134,11 @@ m.exe_name = path.join(exe_name.basename) m.eci = eci + default_target = exe_name.basename + if sandboxlib: + assert shared + default_target = target_name + def rpyrel(fpath): lpath = py.path.local(fpath) rel = lpath.relto(rpypath) @@ -165,7 +171,7 @@ definitions = [ ('RPYDIR', '"%s"' % rpydir), ('TARGET', target_name), - ('DEFAULT_TARGET', exe_name.basename), + ('DEFAULT_TARGET', default_target), ('SOURCES', rel_cfiles), ('OBJECTS', rel_ofiles), ('LIBS', self._libs(eci.libraries) + list(self.extra_libs)), @@ -195,7 +201,7 @@ for rule in rules: m.rule(*rule) - if shared: + if shared and not sandboxlib: m.definition('SHARED_IMPORT_LIB', libname), m.definition('PYPY_MAIN_FUNCTION', "pypy_main_startup") m.rule('main.c', '', diff --git a/rpython/translator/sandboxlib/__init__.py b/rpython/translator/sandboxlib/__init__.py new file mode 100644 diff --git a/rpython/translator/sandboxlib/rsandbox.py b/rpython/translator/sandboxlib/rsandbox.py new file mode 100644 --- /dev/null +++ b/rpython/translator/sandboxlib/rsandbox.py @@ -0,0 +1,13 @@ + + +def make_sandbox_trampoline(fnname, args_s, s_result): + """Create a trampoline function with the specified signature. + + The trampoline is meant to be used in place of real calls to the external + function named 'fnname'. Instead, it calls a function pointer that is + under control of the main C program using the sandboxed library. + """ + def execute(*args): + raise NotImplementedError + execute.__name__ = 'sandboxed_%s' % (fnname,) + return execute diff --git a/rpython/translator/sandboxlib/test/__init__.py b/rpython/translator/sandboxlib/test/__init__.py new file mode 100644 diff --git a/rpython/translator/sandboxlib/test/test_sandbox.py b/rpython/translator/sandboxlib/test/test_sandbox.py new file mode 100644 --- /dev/null +++ b/rpython/translator/sandboxlib/test/test_sandbox.py @@ -0,0 +1,14 @@ +from rpython.translator.interactive import Translation + + + +def compile(entry_point): + t = Translation(entry_point, backend='c', sandboxlib=True) + return str(t.compile()) + + +def test_empty(): + def entry_point(argv): + return 0 + + print compile(entry_point) From pypy.commits at gmail.com Sat Mar 19 14:49:51 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 19 Mar 2016 11:49:51 -0700 (PDT) Subject: [pypy-commit] pypy sandbox-lib: in-progress, but looks like the wrong level somewhere Message-ID: <56ed9f4f.29cec20a.849e7.6aa1@mx.google.com> Author: Armin Rigo Branch: sandbox-lib Changeset: r83174:446297152ae0 Date: 2016-03-19 19:49 +0100 http://bitbucket.org/pypy/pypy/changeset/446297152ae0/ Log: in-progress, but looks like the wrong level somewhere diff --git a/rpython/annotator/policy.py b/rpython/annotator/policy.py --- a/rpython/annotator/policy.py +++ b/rpython/annotator/policy.py @@ -90,6 +90,7 @@ s_result = s_func.s_result from rpython.translator.sandboxlib.rsandbox import make_sandbox_trampoline sandbox_trampoline = make_sandbox_trampoline( + annotator.translator, s_func.name, params_s, s_result) sandbox_trampoline._signature_ = [SomeTuple(items=params_s)], s_result bk.emulate_pbc_call(key, bk.immutablevalue(sandbox_trampoline), params_s) diff --git a/rpython/config/translationoption.py b/rpython/config/translationoption.py --- a/rpython/config/translationoption.py +++ b/rpython/config/translationoption.py @@ -109,6 +109,7 @@ BoolOption("sandboxlib", "Produce a fully-sandboxed library", default=False, cmdline="--sandboxlib", requires=[("translation.thread", False), + ("translation.continuation", False), ("translation.shared", True)], suggests=[("translation.gc", "minimark"), ("translation.gcrootfinder", "shadowstack")]), diff --git a/rpython/rtyper/rtyper.py b/rpython/rtyper/rtyper.py --- a/rpython/rtyper/rtyper.py +++ b/rpython/rtyper/rtyper.py @@ -584,7 +584,8 @@ else: args_s = [v.annotation for v in graph.getargs()] s_result = graph.getreturnvar().annotation - sandboxed = make_sandbox_trampoline(name, args_s, s_result) + sandboxed = make_sandbox_trampoline(self.annotator.translator, + name, args_s, s_result) return self.getannmixlevel().delayedfunction( sandboxed, args_s, s_result) diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -853,6 +853,10 @@ print >>fi, "#define PYPY_INSTRUMENT_NCOUNTER %d" % n fi.close() + if 'RPY_SANDBOXED' in defines: + from rpython.translator.sandboxlib.rsandbox import add_sandbox_files + eci = add_sandbox_files(database.translator, eci) + eci = add_extra_files(eci) eci = eci.convert_sources_to_files() return eci, filename, sg.getextrafiles(), headers_to_precompile diff --git a/rpython/translator/platform/posix.py b/rpython/translator/platform/posix.py --- a/rpython/translator/platform/posix.py +++ b/rpython/translator/platform/posix.py @@ -121,7 +121,9 @@ if shared: libname = exe_name.new(ext='').basename - target_name = 'lib' + exe_name.new(ext=self.so_ext).basename + if sandboxlib: + libname += '-sandbox' + target_name = 'lib%s.%s' % (libname, self.so_ext) else: target_name = exe_name.basename @@ -201,9 +203,10 @@ for rule in rules: m.rule(*rule) + if shared: + m.definition('SHARED_IMPORT_LIB', libname) + m.definition('PYPY_MAIN_FUNCTION', "pypy_main_startup") if shared and not sandboxlib: - m.definition('SHARED_IMPORT_LIB', libname), - m.definition('PYPY_MAIN_FUNCTION', "pypy_main_startup") m.rule('main.c', '', 'echo "' 'int $(PYPY_MAIN_FUNCTION)(int, char*[]); ' diff --git a/rpython/translator/sandboxlib/rsandbox.py b/rpython/translator/sandboxlib/rsandbox.py --- a/rpython/translator/sandboxlib/rsandbox.py +++ b/rpython/translator/sandboxlib/rsandbox.py @@ -1,13 +1,92 @@ +import py +from rpython.rlib import jit +from rpython.annotator import model as annmodel +from rpython.rtyper.extregistry import ExtRegistryEntry +from rpython.rtyper.lltypesystem import lltype, rffi -def make_sandbox_trampoline(fnname, args_s, s_result): +class SandboxExternalFunc(object): + def __init__(self, cptrname): + self.cptrname = cptrname + + +def make_sandbox_trampoline(translator, fnname, args_s, s_result): """Create a trampoline function with the specified signature. The trampoline is meant to be used in place of real calls to the external function named 'fnname'. Instead, it calls a function pointer that is under control of the main C program using the sandboxed library. """ + try: + extfuncs, seen = translator._sandboxlib_fnnames + except AttributeError: + extfuncs, seen = translator._sandboxlib_fnnames = {}, set() + + if fnname not in extfuncs: + # map from 'fnname' to the C name of the function pointer + cptrname = fnname + if '.' in fnname: + cptrname = fnname.split('.', 1)[1] # drop the part before the '.' + cptrname = 'sandbox_' + cptrname + assert cptrname not in seen, "duplicate name %r" % (cptrname,) + seen.add(cptrname) + sandboxfunc = SandboxExternalFunc(cptrname) + extfuncs[fnname] = sandboxfunc + else: + sandboxfunc = extfuncs[fnname] + pargs_s, s_presult = sandboxfunc.args_s, sandboxfunc.s_result + assert len(args_s) == len(pargs_s), ( + "non-constant argument length for %r" % (fnname,)) + args_s = [annmodel.unionof(s1, s2) for (s1, s2) in zip(args_s, pargs_s)] + s_result = annmodel.unionof(s_result, s_presult) + sandboxfunc.args_s = args_s + sandboxfunc.s_result = s_result + # + @jit.dont_look_inside def execute(*args): - raise NotImplementedError + return _call_sandbox(fnname, *args) execute.__name__ = 'sandboxed_%s' % (fnname,) return execute + +def _call_sandbox(fnname, *args): + "NOT_RPYTHON" + raise NotImplementedError + +class ExtEntry(ExtRegistryEntry): + _about_ = _call_sandbox + + def compute_result_annotation(self, s_fnname, *args_s): + fnname = s_fnname.const + translator = self.bookkeeper.annotator.translator + sandboxfunc = translator._sandboxlib_fnnames[0][fnname] + return sandboxfunc.s_result + + def specialize_call(self, hop): + fnname = hop.spaceop.args[1].value + translator = hop.rtyper.annotator.translator + sandboxfunc = translator._sandboxlib_fnnames[0][fnname] + args_s, s_result = sandboxfunc.args_s, sandboxfunc.s_result + nb_args = len(args_s) + assert len(hop.spaceop.args) == 2 + nb_args + + args_r = [hop.rtyper.getrepr(s) for s in args_s] + r_result = hop.rtyper.getrepr(s_result) + FUNCPTR = lltype.Ptr(lltype.FuncType([r.lowleveltype for r in args_r], + r_result.lowleveltype)) + externalfuncptr = rffi.CConstant(sandbox.cptrname, FUNCPTR) + import pdb;pdb.set_trace() + + for i in range(nb_args): + v_arg = hop.inputarg(args_r[i], 2 + i) + xxx + + +def add_sandbox_files(translator, eci): + srcdir = py.path.local(__file__).join('..', 'src') + files = [ + srcdir / 'foo.c', + ] + fnnames = sorted(translator._sandboxlib_fnnames[0]) + import pdb;pdb.set_trace() + + return eci.merge(ExternalCompilationInfo(separate_module_files=files)) From pypy.commits at gmail.com Sat Mar 19 15:32:52 2016 From: pypy.commits at gmail.com (arigo) Date: Sat, 19 Mar 2016 12:32:52 -0700 (PDT) Subject: [pypy-commit] pypy sandbox-lib: in-progress Message-ID: <56eda964.6672c20a.220e2.79d9@mx.google.com> Author: Armin Rigo Branch: sandbox-lib Changeset: r83175:cd639cdf1174 Date: 2016-03-19 20:32 +0100 http://bitbucket.org/pypy/pypy/changeset/cd639cdf1174/ Log: in-progress diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -855,7 +855,7 @@ if 'RPY_SANDBOXED' in defines: from rpython.translator.sandboxlib.rsandbox import add_sandbox_files - eci = add_sandbox_files(database.translator, eci) + eci = add_sandbox_files(database, eci) eci = add_extra_files(eci) eci = eci.convert_sources_to_files() diff --git a/rpython/translator/sandboxlib/rsandbox.py b/rpython/translator/sandboxlib/rsandbox.py --- a/rpython/translator/sandboxlib/rsandbox.py +++ b/rpython/translator/sandboxlib/rsandbox.py @@ -6,8 +6,8 @@ class SandboxExternalFunc(object): - def __init__(self, cptrname): - self.cptrname = cptrname + def __init__(self, cfuncname): + self.cfuncname = cfuncname def make_sandbox_trampoline(translator, fnname, args_s, s_result): @@ -23,14 +23,14 @@ extfuncs, seen = translator._sandboxlib_fnnames = {}, set() if fnname not in extfuncs: - # map from 'fnname' to the C name of the function pointer - cptrname = fnname + # map from 'fnname' to the C function doing the call + cfuncname = fnname if '.' in fnname: - cptrname = fnname.split('.', 1)[1] # drop the part before the '.' - cptrname = 'sandbox_' + cptrname - assert cptrname not in seen, "duplicate name %r" % (cptrname,) - seen.add(cptrname) - sandboxfunc = SandboxExternalFunc(cptrname) + cfuncname = fnname.split('.', 1)[1] # drop the part before the '.' + cfuncname = 'sandbox_' + cfuncname + assert cfuncname not in seen, "duplicate name %r" % (cfuncname,) + seen.add(cfuncname) + sandboxfunc = SandboxExternalFunc(cfuncname) extfuncs[fnname] = sandboxfunc else: sandboxfunc = extfuncs[fnname] @@ -68,25 +68,57 @@ args_s, s_result = sandboxfunc.args_s, sandboxfunc.s_result nb_args = len(args_s) assert len(hop.spaceop.args) == 2 + nb_args - + assert len(hop.args_r) == 1 + nb_args args_r = [hop.rtyper.getrepr(s) for s in args_s] r_result = hop.rtyper.getrepr(s_result) - FUNCPTR = lltype.Ptr(lltype.FuncType([r.lowleveltype for r in args_r], - r_result.lowleveltype)) - externalfuncptr = rffi.CConstant(sandbox.cptrname, FUNCPTR) - import pdb;pdb.set_trace() - - for i in range(nb_args): - v_arg = hop.inputarg(args_r[i], 2 + i) - xxx + if not hasattr(sandboxfunc, 'externalfunc'): + externalfunc = rffi.llexternal(sandboxfunc.cfuncname, + [r.lowleveltype for r in args_r], + r_result.lowleveltype, + sandboxsafe=True, + _nowrapper=True) + sandboxfunc.externalfunc = externalfunc + else: + externalfunc = sandboxfunc.externalfunc -def add_sandbox_files(translator, eci): - srcdir = py.path.local(__file__).join('..', 'src') - files = [ - srcdir / 'foo.c', - ] - fnnames = sorted(translator._sandboxlib_fnnames[0]) + c_externalfunc = hop.inputconst(lltype.typeOf(externalfunc), + externalfunc) + + args_v = [hop.inputarg(args_r[i], 1 + i) for i in range(nb_args)] + hop.exception_cannot_occur() + return hop.genop("direct_call", [c_externalfunc] + args_v, + resulttype = r_result) + + +def add_sandbox_files(database, eci): + from rpython.translator.c.support import cdecl + + c_header = ['#include "common_header.h"\n'] + c_source = ['#include "sandboxlib.h"\n'] + fnnames = database.translator._sandboxlib_fnnames[0] + for fnname in sorted(fnnames): + sandboxfunc = fnnames[fnname] + if hasattr(sandboxfunc, 'externalfunc'): + externalfunc = sandboxfunc.externalfunc + TP = lltype.typeOf(externalfunc) + vardecl = cdecl(database.gettype(TP), sandboxfunc.cfuncname) + c_header.append('RPY_SANDBOX_EXPORTED %s;\n' % (vardecl,)) + # + emptyfuncname = 'empty_' + sandboxfunc.cfuncname + argnames = ['a%d' % i for i in range(len(TP.TO.ARGS))] + c_source.append(""" +static %s { + abort(); +}; +%s = %s; +""" % (cdecl(database.gettype(TP.TO, argnames=argnames), emptyfuncname), + vardecl, emptyfuncname)) + import pdb;pdb.set_trace() - return eci.merge(ExternalCompilationInfo(separate_module_files=files)) + #srcdir = py.path.local(__file__).join('..', 'src') + #files = [ + # srcdir / 'foo.c', + #] + #return eci.merge(ExternalCompilationInfo(separate_module_files=files)) From pypy.commits at gmail.com Sat Mar 19 23:38:07 2016 From: pypy.commits at gmail.com (alex_gaynor) Date: Sat, 19 Mar 2016 20:38:07 -0700 (PDT) Subject: [pypy-commit] pypy stdlib-2.7.11: Fixed a.fromstring(a) Message-ID: <56ee1b1f.84c9c20a.9cb22.ffffe52c@mx.google.com> Author: Alex Gaynor Branch: stdlib-2.7.11 Changeset: r83176:950a3333c302 Date: 2016-03-19 23:37 -0400 http://bitbucket.org/pypy/pypy/changeset/950a3333c302/ Log: Fixed a.fromstring(a) diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -238,6 +238,11 @@ Appends items from the string, interpreting it as an array of machine values,as if it had been read from a file using the fromfile() method). """ + if self is w_s: + raise OperationError( + self.space.w_ValueError, + self.space.wrap("array.fromstring(x): x cannot be self") + ) s = space.getarg_w('s#', w_s) if len(s) % self.itemsize != 0: msg = 'string length not a multiple of item size' diff --git a/pypy/module/array/test/test_array.py b/pypy/module/array/test/test_array.py --- a/pypy/module/array/test/test_array.py +++ b/pypy/module/array/test/test_array.py @@ -151,6 +151,8 @@ raises(OverflowError, a.append, 2 ** (8 * b)) def test_fromstring(self): + import sys + a = self.array('c') a.fromstring('Hi!') assert a[0] == 'H' and a[1] == 'i' and a[2] == '!' and len(a) == 3 @@ -174,6 +176,8 @@ raises(ValueError, a.fromstring, '\x00' * (2 * a.itemsize + 1)) b = self.array(t, '\x00' * a.itemsize * 2) assert len(b) == 2 and b[0] == 0 and b[1] == 0 + if sys.version_info >= (2, 7, 11): + raises(ValueError, a.fromstring, a) def test_fromfile(self): From pypy.commits at gmail.com Sat Mar 19 23:45:03 2016 From: pypy.commits at gmail.com (alex_gaynor) Date: Sat, 19 Mar 2016 20:45:03 -0700 (PDT) Subject: [pypy-commit] pypy stdlib-2.7.11: Added typecheck in Row.__init__, like CPython does Message-ID: <56ee1cbf.c1621c0a.ee33.7355@mx.google.com> Author: Alex Gaynor Branch: stdlib-2.7.11 Changeset: r83177:401daaffa88f Date: 2016-03-19 23:43 -0400 http://bitbucket.org/pypy/pypy/changeset/401daaffa88f/ Log: Added typecheck in Row.__init__, like CPython does diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -1155,6 +1155,8 @@ class Row(object): def __init__(self, cursor, values): + if not isinstance(cursor, Cursor): + raise TypeError("instance of cursor required for first argument") self.description = cursor.description self.values = values From pypy.commits at gmail.com Sat Mar 19 23:45:04 2016 From: pypy.commits at gmail.com (alex_gaynor) Date: Sat, 19 Mar 2016 20:45:04 -0700 (PDT) Subject: [pypy-commit] pypy stdlib-2.7.11: Disable SSLv3 by default Message-ID: <56ee1cc0.55031c0a.717d2.1e05@mx.google.com> Author: Alex Gaynor Branch: stdlib-2.7.11 Changeset: r83178:7371262b2838 Date: 2016-03-19 23:44 -0400 http://bitbucket.org/pypy/pypy/changeset/7371262b2838/ Log: Disable SSLv3 by default diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py --- a/pypy/module/_ssl/interp_ssl.py +++ b/pypy/module/_ssl/interp_ssl.py @@ -1288,6 +1288,8 @@ options = SSL_OP_ALL & ~SSL_OP_DONT_INSERT_EMPTY_FRAGMENTS if protocol != PY_SSL_VERSION_SSL2: options |= SSL_OP_NO_SSLv2 + if protocol != PY_SSL_VERSION_SSL3: + options |= SSL_OP_NO_SSLv3 libssl_SSL_CTX_set_options(ctx, options) if not OPENSSL_NO_ECDH: From pypy.commits at gmail.com Sun Mar 20 03:15:21 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 20 Mar 2016 00:15:21 -0700 (PDT) Subject: [pypy-commit] pypy sandbox-lib: Rename "sandboxlib" to "rsandbox" Message-ID: <56ee4e09.c85b1c0a.f5081.55f9@mx.google.com> Author: Armin Rigo Branch: sandbox-lib Changeset: r83179:28c504249fc6 Date: 2016-03-20 08:14 +0100 http://bitbucket.org/pypy/pypy/changeset/28c504249fc6/ Log: Rename "sandboxlib" to "rsandbox" diff --git a/rpython/annotator/policy.py b/rpython/annotator/policy.py --- a/rpython/annotator/policy.py +++ b/rpython/annotator/policy.py @@ -88,7 +88,7 @@ if key not in bk.emulated_pbc_calls: params_s = s_func.args_s s_result = s_func.s_result - from rpython.translator.sandboxlib.rsandbox import make_sandbox_trampoline + from rpython.translator.rsandbox.rsandbox import make_sandbox_trampoline sandbox_trampoline = make_sandbox_trampoline( annotator.translator, s_func.name, params_s, s_result) diff --git a/rpython/config/translationoption.py b/rpython/config/translationoption.py --- a/rpython/config/translationoption.py +++ b/rpython/config/translationoption.py @@ -106,8 +106,8 @@ # other noticeable options BoolOption("thread", "enable use of threading primitives", default=False, cmdline="--thread"), - BoolOption("sandboxlib", "Produce a fully-sandboxed library", - default=False, cmdline="--sandboxlib", + BoolOption("rsandbox", "Produce a fully-sandboxed library", + default=False, cmdline="--rsandbox", requires=[("translation.thread", False), ("translation.continuation", False), ("translation.shared", True)], diff --git a/rpython/rlib/objectmodel.py b/rpython/rlib/objectmodel.py --- a/rpython/rlib/objectmodel.py +++ b/rpython/rlib/objectmodel.py @@ -300,7 +300,7 @@ def compute_annotation(self): if sandboxed_name: config = self.bookkeeper.annotator.translator.config - if config.translation.sandboxlib: + if config.translation.rsandbox: func._sandbox_external_name = sandboxed_name func._dont_inline_ = True return self.bookkeeper.immutablevalue(func) diff --git a/rpython/rtyper/extfunc.py b/rpython/rtyper/extfunc.py --- a/rpython/rtyper/extfunc.py +++ b/rpython/rtyper/extfunc.py @@ -95,7 +95,7 @@ def compute_annotation(self): s_result = SomeExternalFunction( self.name, self.signature_args, self.signature_result) - if (self.bookkeeper.annotator.translator.config.translation.sandboxlib + if (self.bookkeeper.annotator.translator.config.translation.rsandbox and not self.safe_not_sandboxed): s_result.needs_sandboxing = True return s_result diff --git a/rpython/rtyper/rtyper.py b/rpython/rtyper/rtyper.py --- a/rpython/rtyper/rtyper.py +++ b/rpython/rtyper/rtyper.py @@ -574,8 +574,8 @@ def getcallable(self, graph): def getconcretetype(v): return self.bindingrepr(v).lowleveltype - if self.annotator.translator.config.translation.sandboxlib: - from rpython.translator.sandboxlib.rsandbox import ( + if self.annotator.translator.config.translation.rsandbox: + from rpython.translator.rsandbox.rsandbox import ( make_sandbox_trampoline) # don't import this globally try: name = graph.func._sandbox_external_name diff --git a/rpython/translator/c/database.py b/rpython/translator/c/database.py --- a/rpython/translator/c/database.py +++ b/rpython/translator/c/database.py @@ -31,10 +31,10 @@ gcpolicyclass=None, exctransformer=None, thread_enabled=False, - sandbox=False): + rsandbox=False): self.translator = translator self.standalone = standalone - self.sandbox = sandbox + self.rsandbox = rsandbox if gcpolicyclass is None: gcpolicyclass = gc.RefcountingGcPolicy self.gcpolicy = gcpolicyclass(self, thread_enabled) diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -131,7 +131,7 @@ gcpolicyclass=gcpolicyclass, exctransformer=exctransformer, thread_enabled=self.config.translation.thread, - sandbox=self.config.translation.sandboxlib) + rsandbox=self.config.translation.rsandbox) self.db = db # give the gc a chance to register interest in the start-up functions it @@ -211,7 +211,7 @@ defines = defines.copy() if self.config.translation.countmallocs: defines['COUNT_OP_MALLOCS'] = 1 - if self.config.translation.sandboxlib: + if self.config.translation.rsandbox: defines['RPY_SANDBOXED'] = 1 if CBuilder.have___thread is None: CBuilder.have___thread = self.translator.platform.check___thread() @@ -381,7 +381,7 @@ no_precompile_cfiles = module_files, shared=self.config.translation.shared, icon=self.config.translation.icon, - sandboxlib=self.config.translation.sandboxlib) + rsandbox=self.config.translation.rsandbox) if self.has_profopt(): profopt = self.config.translation.profopt @@ -854,7 +854,7 @@ fi.close() if 'RPY_SANDBOXED' in defines: - from rpython.translator.sandboxlib.rsandbox import add_sandbox_files + from rpython.translator.rsandbox.rsandbox import add_sandbox_files eci = add_sandbox_files(database, eci) eci = add_extra_files(eci) diff --git a/rpython/translator/c/node.py b/rpython/translator/c/node.py --- a/rpython/translator/c/node.py +++ b/rpython/translator/c/node.py @@ -914,10 +914,10 @@ def new_funcnode(db, T, obj, forcename=None): from rpython.rtyper.rtyper import llinterp_backend - if db.sandbox: + if db.rsandbox: if (getattr(obj, 'external', None) is not None and not obj._safe_not_sandboxed): - from rpython.translator.sandboxlib import rsandbox + from rpython.translator.rsandbox import rsandbox obj.__dict__['graph'] = rsandbox.get_sandbox_stub( obj, db.translator.rtyper) obj.__dict__.pop('_safe_not_sandboxed', None) @@ -930,7 +930,7 @@ return FuncNode(db, T, obj, name) elif getattr(obj, 'external', None) is not None: assert obj.external == 'C' - if db.sandbox: + if db.rsandbox: assert obj._safe_not_sandboxed return ExternalFuncNode(db, T, obj, name) elif hasattr(obj._callable, "c_name"): diff --git a/rpython/translator/platform/posix.py b/rpython/translator/platform/posix.py --- a/rpython/translator/platform/posix.py +++ b/rpython/translator/platform/posix.py @@ -100,7 +100,7 @@ def gen_makefile(self, cfiles, eci, exe_name=None, path=None, shared=False, headers_to_precompile=[], no_precompile_cfiles = [], icon=None, - sandboxlib=False): + rsandbox=False): cfiles = self._all_cfiles(cfiles, eci) if path is None: @@ -121,8 +121,8 @@ if shared: libname = exe_name.new(ext='').basename - if sandboxlib: - libname += '-sandbox' + if rsandbox: + libname += '-rsandbox' target_name = 'lib%s.%s' % (libname, self.so_ext) else: target_name = exe_name.basename @@ -137,7 +137,7 @@ m.eci = eci default_target = exe_name.basename - if sandboxlib: + if rsandbox: assert shared default_target = target_name @@ -206,7 +206,7 @@ if shared: m.definition('SHARED_IMPORT_LIB', libname) m.definition('PYPY_MAIN_FUNCTION', "pypy_main_startup") - if shared and not sandboxlib: + if shared and not rsandbox: m.rule('main.c', '', 'echo "' 'int $(PYPY_MAIN_FUNCTION)(int, char*[]); ' diff --git a/rpython/translator/sandboxlib/__init__.py b/rpython/translator/rsandbox/__init__.py rename from rpython/translator/sandboxlib/__init__.py rename to rpython/translator/rsandbox/__init__.py diff --git a/rpython/translator/sandboxlib/rsandbox.py b/rpython/translator/rsandbox/rsandbox.py rename from rpython/translator/sandboxlib/rsandbox.py rename to rpython/translator/rsandbox/rsandbox.py --- a/rpython/translator/sandboxlib/rsandbox.py +++ b/rpython/translator/rsandbox/rsandbox.py @@ -95,7 +95,7 @@ from rpython.translator.c.support import cdecl c_header = ['#include "common_header.h"\n'] - c_source = ['#include "sandboxlib.h"\n'] + c_source = ['#include "rsandbox.h"\n'] fnnames = database.translator._sandboxlib_fnnames[0] for fnname in sorted(fnnames): sandboxfunc = fnnames[fnname] diff --git a/rpython/translator/sandboxlib/test/__init__.py b/rpython/translator/rsandbox/test/__init__.py rename from rpython/translator/sandboxlib/test/__init__.py rename to rpython/translator/rsandbox/test/__init__.py diff --git a/rpython/translator/sandboxlib/test/test_sandbox.py b/rpython/translator/rsandbox/test/test_sandbox.py rename from rpython/translator/sandboxlib/test/test_sandbox.py rename to rpython/translator/rsandbox/test/test_sandbox.py --- a/rpython/translator/sandboxlib/test/test_sandbox.py +++ b/rpython/translator/rsandbox/test/test_sandbox.py @@ -3,7 +3,7 @@ def compile(entry_point): - t = Translation(entry_point, backend='c', sandboxlib=True) + t = Translation(entry_point, backend='c', rsandbox=True) return str(t.compile()) From pypy.commits at gmail.com Sun Mar 20 04:07:25 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 20 Mar 2016 01:07:25 -0700 (PDT) Subject: [pypy-commit] pypy sandbox-lib: Kill the high-level sandboxing, keep only the low-level one Message-ID: <56ee5a3d.838d1c0a.f191a.486f@mx.google.com> Author: Armin Rigo Branch: sandbox-lib Changeset: r83180:8f9fd1f3b161 Date: 2016-03-20 09:06 +0100 http://bitbucket.org/pypy/pypy/changeset/8f9fd1f3b161/ Log: Kill the high-level sandboxing, keep only the low-level one diff --git a/rpython/annotator/policy.py b/rpython/annotator/policy.py --- a/rpython/annotator/policy.py +++ b/rpython/annotator/policy.py @@ -72,30 +72,3 @@ for callback in bk.pending_specializations: callback() del bk.pending_specializations[:] - if annotator.added_blocks is not None: - all_blocks = annotator.added_blocks - else: - all_blocks = annotator.annotated - for block in list(all_blocks): - for i, instr in enumerate(block.operations): - if not isinstance(instr, (op.simple_call, op.call_args)): - continue - v_func = instr.args[0] - s_func = annotator.annotation(v_func) - if not hasattr(s_func, 'needs_sandboxing'): - continue - key = ('sandboxing', s_func.const) - if key not in bk.emulated_pbc_calls: - params_s = s_func.args_s - s_result = s_func.s_result - from rpython.translator.rsandbox.rsandbox import make_sandbox_trampoline - sandbox_trampoline = make_sandbox_trampoline( - annotator.translator, - s_func.name, params_s, s_result) - sandbox_trampoline._signature_ = [SomeTuple(items=params_s)], s_result - bk.emulate_pbc_call(key, bk.immutablevalue(sandbox_trampoline), params_s) - else: - s_trampoline = bk.emulated_pbc_calls[key][0] - sandbox_trampoline = s_trampoline.const - new = instr.replace({instr.args[0]: Constant(sandbox_trampoline)}) - block.operations[i] = new diff --git a/rpython/rlib/objectmodel.py b/rpython/rlib/objectmodel.py --- a/rpython/rlib/objectmodel.py +++ b/rpython/rlib/objectmodel.py @@ -292,17 +292,12 @@ def sc_we_are_translated(ctx): return Constant(True) -def register_replacement_for(replaced_function, sandboxed_name=None): +def register_replacement_for(replaced_function): def wrap(func): from rpython.rtyper.extregistry import ExtRegistryEntry class ExtRegistry(ExtRegistryEntry): _about_ = replaced_function def compute_annotation(self): - if sandboxed_name: - config = self.bookkeeper.annotator.translator.config - if config.translation.rsandbox: - func._sandbox_external_name = sandboxed_name - func._dont_inline_ = True return self.bookkeeper.immutablevalue(func) return func return wrap diff --git a/rpython/rlib/rposix.py b/rpython/rlib/rposix.py --- a/rpython/rlib/rposix.py +++ b/rpython/rlib/rposix.py @@ -346,9 +346,7 @@ func = getattr(os, name, None) if func is None: return lambda f: f - return register_replacement_for( - func, - sandboxed_name='ll_os.ll_os_%s' % name) + return register_replacement_for(func) @specialize.arg(0) def handle_posix_error(name, result): diff --git a/rpython/rlib/rtime.py b/rpython/rlib/rtime.py --- a/rpython/rlib/rtime.py +++ b/rpython/rlib/rtime.py @@ -91,9 +91,7 @@ func = getattr(pytime, name, None) if func is None: return lambda f: f - return register_replacement_for( - func, - sandboxed_name='ll_time.ll_time_%s' % name) + return register_replacement_for(func) config = rffi_platform.configure(CConfig) globals().update(config) diff --git a/rpython/rtyper/extfunc.py b/rpython/rtyper/extfunc.py --- a/rpython/rtyper/extfunc.py +++ b/rpython/rtyper/extfunc.py @@ -90,19 +90,15 @@ class ExtFuncEntry(ExtRegistryEntry): - safe_not_sandboxed = False def compute_annotation(self): s_result = SomeExternalFunction( self.name, self.signature_args, self.signature_result) - if (self.bookkeeper.annotator.translator.config.translation.rsandbox - and not self.safe_not_sandboxed): - s_result.needs_sandboxing = True return s_result def register_external(function, args, result=None, export_name=None, - llimpl=None, llfakeimpl=None, sandboxsafe=False): + llimpl=None, llfakeimpl=None, sandboxsafe=None): """ function: the RPython function that will be rendered as an external function (e.g.: math.floor) args: a list containing the annotation of the arguments @@ -110,7 +106,7 @@ export_name: the name of the function as it will be seen by the backends llimpl: optional; if provided, this RPython function is called instead of the target function llfakeimpl: optional; if provided, called by the llinterpreter - sandboxsafe: use True if the function performs no I/O (safe for --sandboxlib) + sandboxsafe: IGNORED at this level """ if export_name is None: @@ -120,7 +116,6 @@ class FunEntry(ExtFuncEntry): _about_ = function - safe_not_sandboxed = sandboxsafe signature_args = params_s signature_result = s_result name = export_name diff --git a/rpython/rtyper/rtyper.py b/rpython/rtyper/rtyper.py --- a/rpython/rtyper/rtyper.py +++ b/rpython/rtyper/rtyper.py @@ -574,20 +574,6 @@ def getcallable(self, graph): def getconcretetype(v): return self.bindingrepr(v).lowleveltype - if self.annotator.translator.config.translation.rsandbox: - from rpython.translator.rsandbox.rsandbox import ( - make_sandbox_trampoline) # don't import this globally - try: - name = graph.func._sandbox_external_name - except AttributeError: - pass - else: - args_s = [v.annotation for v in graph.getargs()] - s_result = graph.getreturnvar().annotation - sandboxed = make_sandbox_trampoline(self.annotator.translator, - name, args_s, s_result) - return self.getannmixlevel().delayedfunction( - sandboxed, args_s, s_result) return getfunctionptr(graph, getconcretetype) diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -742,6 +742,8 @@ print >> f print >> f, "#ifndef _PYPY_FORWARDDECL_H" print >> f, "#define _PYPY_FORWARDDECL_H" + if database.rsandbox: + print >> f, '#include "rsandbox.h"' for node in database.globalcontainers(): for line in node.forward_declaration(): print >> f, line @@ -853,9 +855,9 @@ print >>fi, "#define PYPY_INSTRUMENT_NCOUNTER %d" % n fi.close() - if 'RPY_SANDBOXED' in defines: + if database.rsandbox: from rpython.translator.rsandbox.rsandbox import add_sandbox_files - eci = add_sandbox_files(database, eci) + eci = add_sandbox_files(database, eci, targetdir) eci = add_extra_files(eci) eci = eci.convert_sources_to_files() diff --git a/rpython/translator/c/node.py b/rpython/translator/c/node.py --- a/rpython/translator/c/node.py +++ b/rpython/translator/c/node.py @@ -914,14 +914,6 @@ def new_funcnode(db, T, obj, forcename=None): from rpython.rtyper.rtyper import llinterp_backend - if db.rsandbox: - if (getattr(obj, 'external', None) is not None and - not obj._safe_not_sandboxed): - from rpython.translator.rsandbox import rsandbox - obj.__dict__['graph'] = rsandbox.get_sandbox_stub( - obj, db.translator.rtyper) - obj.__dict__.pop('_safe_not_sandboxed', None) - obj.__dict__.pop('external', None) if forcename: name = forcename else: @@ -930,8 +922,9 @@ return FuncNode(db, T, obj, name) elif getattr(obj, 'external', None) is not None: assert obj.external == 'C' - if db.rsandbox: - assert obj._safe_not_sandboxed + if db.rsandbox and not obj._safe_not_sandboxed: + from rpython.translator.rsandbox import rsandbox + name = rsandbox.register_rsandbox_func(db, obj, name) return ExternalFuncNode(db, T, obj, name) elif hasattr(obj._callable, "c_name"): return ExternalFuncNode(db, T, obj, name) # this case should only be used for entrypoints diff --git a/rpython/translator/rsandbox/rsandbox.py b/rpython/translator/rsandbox/rsandbox.py --- a/rpython/translator/rsandbox/rsandbox.py +++ b/rpython/translator/rsandbox/rsandbox.py @@ -1,124 +1,77 @@ import py -from rpython.rlib import jit -from rpython.annotator import model as annmodel -from rpython.rtyper.extregistry import ExtRegistryEntry from rpython.rtyper.lltypesystem import lltype, rffi +from rpython.translator.tool.cbuild import ExternalCompilationInfo +from rpython.translator.c.support import cdecl -class SandboxExternalFunc(object): - def __init__(self, cfuncname): - self.cfuncname = cfuncname - - -def make_sandbox_trampoline(translator, fnname, args_s, s_result): - """Create a trampoline function with the specified signature. +def register_rsandbox_func(database, ll_func, fnname): + """Register a trampoline function for the given ll_func + and return its name. The trampoline is meant to be used in place of real calls to the external - function named 'fnname'. Instead, it calls a function pointer that is + function named 'fnname'. It calls a function pointer that is under control of the main C program using the sandboxed library. """ try: - extfuncs, seen = translator._sandboxlib_fnnames + extfuncs = database._sandboxlib_fnnames except AttributeError: - extfuncs, seen = translator._sandboxlib_fnnames = {}, set() + extfuncs = database._sandboxlib_fnnames = {} if fnname not in extfuncs: - # map from 'fnname' to the C function doing the call - cfuncname = fnname - if '.' in fnname: - cfuncname = fnname.split('.', 1)[1] # drop the part before the '.' - cfuncname = 'sandbox_' + cfuncname - assert cfuncname not in seen, "duplicate name %r" % (cfuncname,) - seen.add(cfuncname) - sandboxfunc = SandboxExternalFunc(cfuncname) - extfuncs[fnname] = sandboxfunc + extfuncs[fnname] = lltype.typeOf(ll_func) else: - sandboxfunc = extfuncs[fnname] - pargs_s, s_presult = sandboxfunc.args_s, sandboxfunc.s_result - assert len(args_s) == len(pargs_s), ( - "non-constant argument length for %r" % (fnname,)) - args_s = [annmodel.unionof(s1, s2) for (s1, s2) in zip(args_s, pargs_s)] - s_result = annmodel.unionof(s_result, s_presult) - sandboxfunc.args_s = args_s - sandboxfunc.s_result = s_result - # - @jit.dont_look_inside - def execute(*args): - return _call_sandbox(fnname, *args) - execute.__name__ = 'sandboxed_%s' % (fnname,) - return execute + FUNC = extfuncs[fnname] + assert lltype.typeOf(ll_func) == FUNC, ( + "seen two sandboxed functions called %r with different " + "signatures:\n %r\n %r" % (fnname, FUNC, lltype.typeOf(ll_func))) + return 'rsandbox_' + fnname -def _call_sandbox(fnname, *args): - "NOT_RPYTHON" - raise NotImplementedError -class ExtEntry(ExtRegistryEntry): - _about_ = _call_sandbox +def add_sandbox_files(database, eci, targetdir): + c_header = [''' +#ifndef _RSANDBOX_H_ +#define _RSANDBOX_H_ - def compute_result_annotation(self, s_fnname, *args_s): - fnname = s_fnname.const - translator = self.bookkeeper.annotator.translator - sandboxfunc = translator._sandboxlib_fnnames[0][fnname] - return sandboxfunc.s_result +#ifndef RPY_SANDBOX_EXPORTED +/* common definitions when including this file from an external C project */ +#define RPY_SANDBOX_EXPORTED extern +typedef long Signed; +typedef unsigned long Unsigned; +#endif - def specialize_call(self, hop): - fnname = hop.spaceop.args[1].value - translator = hop.rtyper.annotator.translator - sandboxfunc = translator._sandboxlib_fnnames[0][fnname] - args_s, s_result = sandboxfunc.args_s, sandboxfunc.s_result - nb_args = len(args_s) - assert len(hop.spaceop.args) == 2 + nb_args - assert len(hop.args_r) == 1 + nb_args - args_r = [hop.rtyper.getrepr(s) for s in args_s] - r_result = hop.rtyper.getrepr(s_result) +'''] + c_source = [''' +#include +#include +#include "rsandbox.h" - if not hasattr(sandboxfunc, 'externalfunc'): - externalfunc = rffi.llexternal(sandboxfunc.cfuncname, - [r.lowleveltype for r in args_r], - r_result.lowleveltype, - sandboxsafe=True, - _nowrapper=True) - sandboxfunc.externalfunc = externalfunc - else: - externalfunc = sandboxfunc.externalfunc +'''] - c_externalfunc = hop.inputconst(lltype.typeOf(externalfunc), - externalfunc) + fnnames = database._sandboxlib_fnnames + for fnname in sorted(fnnames): + FUNC = fnnames[fnname] + rsandboxname = 'rsandbox_' + fnname - args_v = [hop.inputarg(args_r[i], 1 + i) for i in range(nb_args)] - hop.exception_cannot_occur() - return hop.genop("direct_call", [c_externalfunc] + args_v, - resulttype = r_result) + vardecl = cdecl(database.gettype(lltype.Ptr(FUNC)), rsandboxname) + c_header.append('RPY_SANDBOX_EXPORTED %s;\n' % (vardecl,)) - -def add_sandbox_files(database, eci): - from rpython.translator.c.support import cdecl - - c_header = ['#include "common_header.h"\n'] - c_source = ['#include "rsandbox.h"\n'] - fnnames = database.translator._sandboxlib_fnnames[0] - for fnname in sorted(fnnames): - sandboxfunc = fnnames[fnname] - if hasattr(sandboxfunc, 'externalfunc'): - externalfunc = sandboxfunc.externalfunc - TP = lltype.typeOf(externalfunc) - vardecl = cdecl(database.gettype(TP), sandboxfunc.cfuncname) - c_header.append('RPY_SANDBOX_EXPORTED %s;\n' % (vardecl,)) - # - emptyfuncname = 'empty_' + sandboxfunc.cfuncname - argnames = ['a%d' % i for i in range(len(TP.TO.ARGS))] - c_source.append(""" + emptyfuncname = 'rsand_def_' + fnname + argnames = ['a%d' % i for i in range(len(FUNC.ARGS))] + c_source.append(""" static %s { abort(); }; %s = %s; -""" % (cdecl(database.gettype(TP.TO, argnames=argnames), emptyfuncname), +""" % (cdecl(database.gettype(FUNC, argnames=argnames), emptyfuncname), vardecl, emptyfuncname)) - import pdb;pdb.set_trace() - - #srcdir = py.path.local(__file__).join('..', 'src') - #files = [ - # srcdir / 'foo.c', - #] - #return eci.merge(ExternalCompilationInfo(separate_module_files=files)) + c_header.append(''' +#endif /* _RSANDBOX_H_ */ +''') + targetdir.join('rsandbox.c').write(''.join(c_source)) + targetdir.join('rsandbox.h').write(''.join(c_header)) + # ^^^ a #include "rsandbox.h" is explicitly added to forwarddecl.h + # from genc.py + + return eci.merge(ExternalCompilationInfo( + separate_module_files=[targetdir.join('rsandbox.c')])) From pypy.commits at gmail.com Sun Mar 20 04:10:57 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 20 Mar 2016 01:10:57 -0700 (PDT) Subject: [pypy-commit] pypy sandbox-lib: Remove the deprecated "sandboxsafe" argument to register_external() Message-ID: <56ee5b11.83561c0a.a74c2.4867@mx.google.com> Author: Armin Rigo Branch: sandbox-lib Changeset: r83181:d9ce306bebe1 Date: 2016-03-20 09:10 +0100 http://bitbucket.org/pypy/pypy/changeset/d9ce306bebe1/ Log: Remove the deprecated "sandboxsafe" argument to register_external() diff --git a/rpython/rlib/rfloat.py b/rpython/rlib/rfloat.py --- a/rpython/rlib/rfloat.py +++ b/rpython/rlib/rfloat.py @@ -4,7 +4,6 @@ from rpython.annotator.model import SomeString, SomeChar from rpython.rlib import objectmodel, unroll -from rpython.rtyper.extfunc import register_external from rpython.rtyper.tool import rffi_platform from rpython.translator.tool.cbuild import ExternalCompilationInfo diff --git a/rpython/rlib/rlocale.py b/rpython/rlib/rlocale.py --- a/rpython/rlib/rlocale.py +++ b/rpython/rlib/rlocale.py @@ -178,8 +178,7 @@ return decimal_point, thousands_sep, grouping register_external(numeric_formatting, [], (str, str, str), - llimpl=numeric_formatting_impl, - sandboxsafe=True) + llimpl=numeric_formatting_impl) _setlocale = external('setlocale', [rffi.INT, rffi.CCHARP], rffi.CCHARP) diff --git a/rpython/rtyper/extfunc.py b/rpython/rtyper/extfunc.py --- a/rpython/rtyper/extfunc.py +++ b/rpython/rtyper/extfunc.py @@ -98,7 +98,7 @@ def register_external(function, args, result=None, export_name=None, - llimpl=None, llfakeimpl=None, sandboxsafe=None): + llimpl=None, llfakeimpl=None): """ function: the RPython function that will be rendered as an external function (e.g.: math.floor) args: a list containing the annotation of the arguments @@ -106,7 +106,6 @@ export_name: the name of the function as it will be seen by the backends llimpl: optional; if provided, this RPython function is called instead of the target function llfakeimpl: optional; if provided, called by the llinterpreter - sandboxsafe: IGNORED at this level """ if export_name is None: diff --git a/rpython/rtyper/extfuncregistry.py b/rpython/rtyper/extfuncregistry.py --- a/rpython/rtyper/extfuncregistry.py +++ b/rpython/rtyper/extfuncregistry.py @@ -23,7 +23,7 @@ f = getattr(rfloat, name) register_external(f, [float], float, export_name="ll_math.ll_math_%s" % name, - sandboxsafe=True, llimpl=llimpl) + llimpl=llimpl) _register = [ # (module, [(method name, arg types, return type), ...], ...) (rfloat, [ @@ -54,5 +54,4 @@ method_name = 'll_math_%s' % name register_external(getattr(module, name), arg_types, return_type, export_name='ll_math.%s' % method_name, - sandboxsafe=True, llimpl=getattr(ll_math, method_name)) diff --git a/rpython/rtyper/lltypesystem/llarena.py b/rpython/rtyper/lltypesystem/llarena.py --- a/rpython/rtyper/lltypesystem/llarena.py +++ b/rpython/rtyper/lltypesystem/llarena.py @@ -495,13 +495,11 @@ register_external(arena_malloc, [int, int], llmemory.Address, 'll_arena.arena_malloc', llimpl=llimpl_arena_malloc, - llfakeimpl=arena_malloc, - sandboxsafe=True) + llfakeimpl=arena_malloc) register_external(arena_free, [llmemory.Address], None, 'll_arena.arena_free', llimpl=llimpl_free, - llfakeimpl=arena_free, - sandboxsafe=True) + llfakeimpl=arena_free) def llimpl_arena_reset(arena_addr, size, zero): if zero: @@ -515,38 +513,34 @@ register_external(arena_reset, [llmemory.Address, int, int], None, 'll_arena.arena_reset', llimpl=llimpl_arena_reset, - llfakeimpl=arena_reset, - sandboxsafe=True) + llfakeimpl=arena_reset) def llimpl_arena_reserve(addr, size): pass register_external(arena_reserve, [llmemory.Address, int], None, 'll_arena.arena_reserve', llimpl=llimpl_arena_reserve, - llfakeimpl=arena_reserve, - sandboxsafe=True) + llfakeimpl=arena_reserve) def llimpl_arena_shrink_obj(addr, newsize): pass register_external(arena_shrink_obj, [llmemory.Address, int], None, 'll_arena.arena_shrink_obj', llimpl=llimpl_arena_shrink_obj, - llfakeimpl=arena_shrink_obj, - sandboxsafe=True) + llfakeimpl=arena_shrink_obj) def llimpl_round_up_for_allocation(size, minsize): return (max(size, minsize) + (MEMORY_ALIGNMENT-1)) & ~(MEMORY_ALIGNMENT-1) register_external(_round_up_for_allocation, [int, int], int, 'll_arena.round_up_for_allocation', llimpl=llimpl_round_up_for_allocation, - llfakeimpl=round_up_for_allocation, - sandboxsafe=True) + llfakeimpl=round_up_for_allocation) def llimpl_arena_new_view(addr): return addr register_external(arena_new_view, [llmemory.Address], llmemory.Address, 'll_arena.arena_new_view', llimpl=llimpl_arena_new_view, - llfakeimpl=arena_new_view, sandboxsafe=True) + llfakeimpl=arena_new_view) def llimpl_arena_protect(addr, size, inaccessible): if has_protect: @@ -561,12 +555,11 @@ register_external(arena_protect, [llmemory.Address, lltype.Signed, lltype.Bool], lltype.Void, 'll_arena.arena_protect', llimpl=llimpl_arena_protect, - llfakeimpl=arena_protect, sandboxsafe=True) + llfakeimpl=arena_protect) def llimpl_getfakearenaaddress(addr): return addr register_external(getfakearenaaddress, [llmemory.Address], llmemory.Address, 'll_arena.getfakearenaaddress', llimpl=llimpl_getfakearenaaddress, - llfakeimpl=getfakearenaaddress, - sandboxsafe=True) + llfakeimpl=getfakearenaaddress) From pypy.commits at gmail.com Sun Mar 20 04:17:00 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 20 Mar 2016 01:17:00 -0700 (PDT) Subject: [pypy-commit] pypy default: Patch from issue #2262, ported to trunk Message-ID: <56ee5c7c.8fb81c0a.e11e0.ffffb1a9@mx.google.com> Author: Armin Rigo Branch: Changeset: r83183:c6a8aac9927b Date: 2016-03-20 09:16 +0100 http://bitbucket.org/pypy/pypy/changeset/c6a8aac9927b/ Log: Patch from issue #2262, ported to trunk diff --git a/rpython/rlib/rvmprof/src/vmprof_config.h b/rpython/rlib/rvmprof/src/vmprof_config.h --- a/rpython/rlib/rvmprof/src/vmprof_config.h +++ b/rpython/rlib/rvmprof/src/vmprof_config.h @@ -1,6 +1,10 @@ #define HAVE_SYS_UCONTEXT_H #if defined(__FreeBSD__) -#define PC_FROM_UCONTEXT uc_mcontext.mc_rip + #ifdef __i386__ + #define PC_FROM_UCONTEXT uc_mcontext.mc_eip + #else + #define PC_FROM_UCONTEXT uc_mcontext.mc_rip + #endif #elif defined( __APPLE__) #if ((ULONG_MAX) == (UINT_MAX)) #define PC_FROM_UCONTEXT uc_mcontext->__ss.__eip From pypy.commits at gmail.com Sun Mar 20 04:16:58 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 20 Mar 2016 01:16:58 -0700 (PDT) Subject: [pypy-commit] pypy release-5.x: Patch from issue #2262 for FreeBSD 32-bit Message-ID: <56ee5c7a.865a1c0a.25b17.4698@mx.google.com> Author: Armin Rigo Branch: release-5.x Changeset: r83182:53133c3fbdbe Date: 2016-03-20 09:15 +0100 http://bitbucket.org/pypy/pypy/changeset/53133c3fbdbe/ Log: Patch from issue #2262 for FreeBSD 32-bit diff --git a/rpython/rlib/rvmprof/src/vmprof_config.h b/rpython/rlib/rvmprof/src/vmprof_config.h --- a/rpython/rlib/rvmprof/src/vmprof_config.h +++ b/rpython/rlib/rvmprof/src/vmprof_config.h @@ -1,5 +1,11 @@ #define HAVE_SYS_UCONTEXT_H -#if defined(__FreeBSD__) || defined(__APPLE__) +#if defined(__FreeBSD__) +#if defined(__i386__) +#define PC_FROM_UCONTEXT uc_mcontext.mc_eip +#else +#define PC_FROM_UCONTEXT uc_mcontext.mc_rip +#endif +#elif defined(__APPLE__) #define PC_FROM_UCONTEXT uc_mcontext.mc_rip #else #define PC_FROM_UCONTEXT uc_mcontext.gregs[REG_RIP] From pypy.commits at gmail.com Sun Mar 20 04:40:01 2016 From: pypy.commits at gmail.com (cfbolz) Date: Sun, 20 Mar 2016 01:40:01 -0700 (PDT) Subject: [pypy-commit] pypy guard-compatible: fix: things were broken with more than two args that are not the Message-ID: <56ee61e1.906b1c0a.b417c.55b5@mx.google.com> Author: Carl Friedrich Bolz Branch: guard-compatible Changeset: r83184:ada9ee9869bc Date: 2016-03-20 09:39 +0100 http://bitbucket.org/pypy/pypy/changeset/ada9ee9869bc/ Log: fix: things were broken with more than two args that are not the quasi-immutable case diff --git a/rpython/jit/metainterp/compatible.py b/rpython/jit/metainterp/compatible.py --- a/rpython/jit/metainterp/compatible.py +++ b/rpython/jit/metainterp/compatible.py @@ -78,6 +78,10 @@ if op.numargs() == 2: return copied_op, PureCallCondition(op) arg2 = copied_op.getarg(2) + if arg2.is_constant(): + # already a constant, can just use PureCallCondition + return copied_op, PureCallCondition(op) + # really simple-minded pattern matching # the order of things is like this: # GUARD_COMPATIBLE(x) diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -1095,13 +1095,11 @@ assert typetag == self.TY_REF # for now refval = metainterp_sd.cpu.get_value_direct(deadframe, 'r', index) if self.is_compatible(metainterp_sd.cpu, refval): - print "~~~~~~~~~~~~~~~~~~~ compatible! growing switch", self from rpython.jit.metainterp.blackhole import resume_in_blackhole metainterp_sd.cpu.grow_guard_compatible_switch( self.rd_loop_token, self, refval) resume_in_blackhole(metainterp_sd, jitdriver_sd, self, deadframe) else: - print "~~~~~~~~~~~~~~~~~~~ not compatible!", self # a real failure return ResumeGuardDescr.handle_fail(self, deadframe, metainterp_sd, jitdriver_sd) diff --git a/rpython/jit/metainterp/test/test_compatible.py b/rpython/jit/metainterp/test/test_compatible.py --- a/rpython/jit/metainterp/test/test_compatible.py +++ b/rpython/jit/metainterp/test/test_compatible.py @@ -22,7 +22,7 @@ c = A() c.count = 0 @jit.elidable_compatible() - def g(s): + def g(s, ignored): c.count += 1 return s.x @@ -30,9 +30,10 @@ while n > 0: driver.can_enter_jit(n=n, x=x) driver.jit_merge_point(n=n, x=x) - n -= g(x) + n -= g(x, 7) def main(): + g(p1, 9) # make annotator not make argument constant f(100, p1) f(100, p2) f(100, p3) From pypy.commits at gmail.com Sun Mar 20 05:14:24 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 20 Mar 2016 02:14:24 -0700 (PDT) Subject: [pypy-commit] pypy sandbox-lib: tweaks Message-ID: <56ee69f0.55031c0a.717d2.5d02@mx.google.com> Author: Armin Rigo Branch: sandbox-lib Changeset: r83185:70ce574912b2 Date: 2016-03-20 10:13 +0100 http://bitbucket.org/pypy/pypy/changeset/70ce574912b2/ Log: tweaks diff --git a/rpython/translator/rsandbox/default.h b/rpython/translator/rsandbox/default.h new file mode 100644 --- /dev/null +++ b/rpython/translator/rsandbox/default.h @@ -0,0 +1,24 @@ +/*** translator/rsandbox/default.h ***/ + + +/* This is called by most default implementations of 'rsandbox_*' */ +__attribute__((noinline, noreturn)) +static void rsand_fatal(const char *fnname) +{ + fprintf(stderr, "The sandboxed program called the C function %s(), " + "but no implementation of this function was provided.\n", + fnname); + abort(); +} + + +/* Default implementation for some functions that don't abort */ + +static char *rsand_def_getenv(char *v) +{ + /* default implementation: "no such environment variable" */ + return NULL; +} + + +/*** generated code follows ***/ diff --git a/rpython/translator/rsandbox/rsandbox.py b/rpython/translator/rsandbox/rsandbox.py --- a/rpython/translator/rsandbox/rsandbox.py +++ b/rpython/translator/rsandbox/rsandbox.py @@ -1,4 +1,4 @@ -import py +import py, re from rpython.rtyper.lltypesystem import lltype, rffi from rpython.translator.tool.cbuild import ExternalCompilationInfo from rpython.translator.c.support import cdecl @@ -33,20 +33,37 @@ #define _RSANDBOX_H_ #ifndef RPY_SANDBOX_EXPORTED -/* common definitions when including this file from an external C project */ +/* Common definitions when including this file from an external C project */ + +#include +#include + #define RPY_SANDBOX_EXPORTED extern + typedef long Signed; typedef unsigned long Unsigned; + #endif +/* The list of 'rsandbox_*' function pointers is automatically + generated. Most of these function pointers are initialized to + point to a function that aborts the sandboxed execution. The + sandboxed program cannot, by default, use any of them. A few + exceptions are provided, where the default implementation returns a + safe default; for example rsandbox_getenv(). +*/ '''] c_source = [''' +#include "common_header.h" +#include "rsandbox.h" #include -#include -#include "rsandbox.h" '''] + default_h = py.path.local(__file__).join('..', 'default.h').read() + c_source.append(default_h) + present = set(re.findall(r'\brsand_def_([a-zA-Z0-9_]+)[(]', default_h)) + fnnames = database._sandboxlib_fnnames for fnname in sorted(fnnames): FUNC = fnnames[fnname] @@ -57,13 +74,15 @@ emptyfuncname = 'rsand_def_' + fnname argnames = ['a%d' % i for i in range(len(FUNC.ARGS))] - c_source.append(""" + if fnname not in present: + c_source.append(""" static %s { - abort(); + rsand_fatal("%s"); }; -%s = %s; -""" % (cdecl(database.gettype(FUNC, argnames=argnames), emptyfuncname), - vardecl, emptyfuncname)) +""" % (cdecl(database.gettype(FUNC, argnames=argnames), emptyfuncname), fnname)) + else: + c_source.append('\n') + c_source.append("%s = %s;\n" % (vardecl, emptyfuncname)) c_header.append(''' #endif /* _RSANDBOX_H_ */ From pypy.commits at gmail.com Sun Mar 20 05:21:50 2016 From: pypy.commits at gmail.com (arigo) Date: Sun, 20 Mar 2016 02:21:50 -0700 (PDT) Subject: [pypy-commit] cffi default: Update comment Message-ID: <56ee6bae.c9161c0a.b2f40.60fb@mx.google.com> Author: Armin Rigo Branch: Changeset: r2650:9e6f7990f234 Date: 2016-03-20 10:21 +0100 http://bitbucket.org/cffi/cffi/changeset/9e6f7990f234/ Log: Update comment diff --git a/doc/source/embedding.rst b/doc/source/embedding.rst --- a/doc/source/embedding.rst +++ b/doc/source/embedding.rst @@ -28,9 +28,8 @@ the CPython C API: no ``Py_Initialize()`` nor ``PyRun_SimpleString()`` nor even ``PyObject``. It works identically on CPython and PyPy. -.. note:: PyPy release 4.0.1 contains CFFI 1.4 only. - -This is entirely *new in version 1.5.* +This is entirely *new in version 1.5.* (PyPy contains CFFI 1.5 since +release 5.0.) Usage From pypy.commits at gmail.com Sun Mar 20 05:50:01 2016 From: pypy.commits at gmail.com (fijal) Date: Sun, 20 Mar 2016 02:50:01 -0700 (PDT) Subject: [pypy-commit] pypy jit-leaner-frontend: start working on live ranges Message-ID: <56ee7249.c856c20a.b438.2d54@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r83186:46a2efc05eb0 Date: 2016-03-20 11:49 +0200 http://bitbucket.org/pypy/pypy/changeset/46a2efc05eb0/ Log: start working on live ranges diff --git a/rpython/jit/metainterp/opencoder.py b/rpython/jit/metainterp/opencoder.py --- a/rpython/jit/metainterp/opencoder.py +++ b/rpython/jit/metainterp/opencoder.py @@ -9,7 +9,8 @@ from rpython.jit.metainterp.history import ConstInt, Const, ConstFloat, ConstPtr from rpython.jit.metainterp.resoperation import AbstractResOp, AbstractInputArg,\ - ResOperation, oparity, rop, opwithdescr, GuardResOp, IntOp, FloatOp, RefOp + ResOperation, oparity, rop, opwithdescr, GuardResOp, IntOp, FloatOp, RefOp,\ + opclasses from rpython.rlib.rarithmetic import intmask, r_uint from rpython.rlib.objectmodel import we_are_translated from rpython.rtyper.lltypesystem import rffi, lltype, llmemory @@ -47,6 +48,20 @@ def get(self, index): return self.main_iter._untag(index) + def _update_liverange(self, item, index, liveranges): + tag, v = untag(item) + if tag == TAGBOX: + liveranges[v] = index + + def update_liveranges(self, index, liveranges): + for item in self.vable_array: + self._update_liverange(item, index, liveranges) + for item in self.vref_array: + self._update_liverange(item, index, liveranges) + for frame in self.framestack: + for item in frame.box_array: + self._update_liverange(item, index, liveranges) + def unpack_jitcode_pc(self, snapshot): return unpack_uint(snapshot.packed_jitcode_pc) @@ -107,6 +122,26 @@ def get_snapshot_iter(self, index): return SnapshotIterator(self, self.trace._snapshots[index]) + def next_element_update_live_range(self, index, liveranges): + opnum = self._next() + if oparity[opnum] == -1: + argnum = self._next() + else: + argnum = oparity[opnum] + for i in range(argnum): + tagged = self._next() + tag, v = untag(tagged) + if tag == TAGBOX: + liveranges[v] = index + if opclasses[opnum].type != 'v': + liveranges[index] = index + if opwithdescr[opnum]: + descr_index = self._next() + if rop.is_guard(opnum): + self.get_snapshot_iter(descr_index).update_liveranges( + index, liveranges) + return index + 1 + def next(self): opnum = self._next() if oparity[opnum] == -1: @@ -132,7 +167,8 @@ if rop.is_guard(opnum): assert isinstance(res, GuardResOp) res.rd_resume_position = descr_index - self._cache[self._count] = res + if res.type != 'v': + self._cache[self._count] = res self._count += 1 return res @@ -359,6 +395,14 @@ assert metainterp_sd return TraceIterator(self, self._start, self._pos, metainterp_sd=metainterp_sd) + def get_live_ranges(self, metainterp_sd): + t = self.get_iter(metainterp_sd) + liveranges = [0] * self._count + index = t._count + while not t.done(): + index = t.next_element_update_live_range(index, liveranges) + return liveranges + def unpack(self): iter = self.get_iter() ops = [] diff --git a/rpython/jit/metainterp/test/test_opencoder.py b/rpython/jit/metainterp/test/test_opencoder.py --- a/rpython/jit/metainterp/test/test_opencoder.py +++ b/rpython/jit/metainterp/test/test_opencoder.py @@ -13,6 +13,9 @@ def __init__(self, index): self.index = index +class SomeDescr(AbstractDescr): + pass + class metainterp_sd(object): pass @@ -175,9 +178,6 @@ assert l[0].getarglist() == [i0, i1] def test_virtualizable_virtualref(self): - class SomeDescr(AbstractDescr): - pass - i0, i1, i2 = IntFrontendOp(0), IntFrontendOp(0), IntFrontendOp(0) t = Trace([i0, i1, i2]) p0 = FakeOp(t.record_op(rop.NEW_WITH_VTABLE, [], descr=SomeDescr())) @@ -186,4 +186,12 @@ (i0, i1, i2), l, iter = self.unpack(t) assert not l[1].framestack assert l[1].virtualizables == [l[0], i1, i2] - assert l[1].vref_boxes == [l[0], i1] \ No newline at end of file + assert l[1].vref_boxes == [l[0], i1] + + def test_liveranges(self): + i0, i1, i2 = IntFrontendOp(0), IntFrontendOp(0), IntFrontendOp(0) + t = Trace([i0, i1, i2]) + p0 = FakeOp(t.record_op(rop.NEW_WITH_VTABLE, [], descr=SomeDescr())) + t.record_op(rop.GUARD_TRUE, [i0]) + resume.capture_resumedata([], [i1, i2, p0], [p0, i1], t) + assert t.get_live_ranges(metainterp_sd) == [4, 4, 4, 4, 0] From pypy.commits at gmail.com Sun Mar 20 08:47:10 2016 From: pypy.commits at gmail.com (cfbolz) Date: Sun, 20 Mar 2016 05:47:10 -0700 (PDT) Subject: [pypy-commit] pypy guard-compatible: make the heapcache remember that we already produced a guard_compatible for a Message-ID: <56ee9bce.0a301c0a.49916.ffffa24d@mx.google.com> Author: Carl Friedrich Bolz Branch: guard-compatible Changeset: r83187:57232342ceba Date: 2016-03-20 13:06 +0100 http://bitbucket.org/pypy/pypy/changeset/57232342ceba/ Log: make the heapcache remember that we already produced a guard_compatible for a box diff --git a/rpython/jit/metainterp/heapcache.py b/rpython/jit/metainterp/heapcache.py --- a/rpython/jit/metainterp/heapcache.py +++ b/rpython/jit/metainterp/heapcache.py @@ -9,6 +9,7 @@ def reset_keep_likely_virtual(self): self.known_class = False + self.guard_compatible = False self.known_nullity = False # did we see the allocation during tracing? self.seen_allocation = False @@ -303,6 +304,15 @@ def nullity_now_known(self, box): self.getvalue(box).known_nullity = True + def have_guard_compatible(self, box): + value = self.getvalue(box, create=False) + if value: + return value.guard_compatible + return False + + def have_guard_compatible_now(self, box): + self.getvalue(box).guard_compatible = True + def is_nonstandard_virtualizable(self, box): value = self.getvalue(box, create=False) if value: diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -1202,11 +1202,12 @@ @arguments("box", "orgpc") def opimpl_ref_guard_compatible(self, box, orgpc): if isinstance(box, Const): - return box # no promotion needed, already a Const - else: + return # no guard needed, already a Const + elif not self.metainterp.heapcache.have_guard_compatible(box): promoted_box = executor.constant_from_op(box) self.metainterp.generate_guard(rop.GUARD_COMPATIBLE, box, [promoted_box], resumepc=orgpc) + self.metainterp.heapcache.have_guard_compatible_now(box) # importantly, there is no replace_box here! @arguments("box", "orgpc") diff --git a/rpython/jit/metainterp/test/test_compatible.py b/rpython/jit/metainterp/test/test_compatible.py --- a/rpython/jit/metainterp/test/test_compatible.py +++ b/rpython/jit/metainterp/test/test_compatible.py @@ -153,3 +153,29 @@ assert x < 30 # XXX check number of bridges + + def test_dont_record_repeated_guard_compatible(self): + class A: + pass + class B(A): + pass + @jit.elidable_compatible() + def extern(x): + return isinstance(x, A) + @jit.dont_look_inside + def pick(n): + if n: + x = a + else: + x = b + return x + a = A() + b = B() + def fn(n): + x = pick(n) + return extern(x) + extern(x) + extern(x) + + res = self.interp_operations(fn, [1]) + assert res == 3 + self.check_operations_history(guard_compatible=1) + From pypy.commits at gmail.com Sun Mar 20 08:47:12 2016 From: pypy.commits at gmail.com (cfbolz) Date: Sun, 20 Mar 2016 05:47:12 -0700 (PDT) Subject: [pypy-commit] pypy guard-compatible: it can actually happen that there are two guard_compatibles with inconsistent Message-ID: <56ee9bd0.86351c0a.dbdc2.ffff9fe8@mx.google.com> Author: Carl Friedrich Bolz Branch: guard-compatible Changeset: r83188:bf997c3bade6 Date: 2016-03-20 13:11 +0100 http://bitbucket.org/pypy/pypy/changeset/bf997c3bade6/ Log: it can actually happen that there are two guard_compatibles with inconsistent constants. raise InvalidLoop instead of crashing. diff --git a/rpython/jit/metainterp/optimizeopt/pure.py b/rpython/jit/metainterp/optimizeopt/pure.py --- a/rpython/jit/metainterp/optimizeopt/pure.py +++ b/rpython/jit/metainterp/optimizeopt/pure.py @@ -220,10 +220,15 @@ else: self.make_nonnull(arg0) info = self.getptrinfo(arg0) - if info._compatibility_conditions: + ccond = info._compatibility_conditions + if ccond: # seen a previous guard_compatible # check that it's the same previous constant - assert info._compatibility_conditions.known_valid.same_constant(op.getarg(1)) + if not ccond.known_valid.same_constant(op.getarg(1)): + r = self.optimizer.metainterp_sd.logger_ops.repr_of_resop( + op) + raise InvalidLoop('A GUARD_VALUE (%s) ' + 'was proven to always fail' % r) return else: info._compatibility_conditions = CompatibilityCondition( diff --git a/rpython/jit/metainterp/optimizeopt/test/test_compatible.py b/rpython/jit/metainterp/optimizeopt/test/test_compatible.py --- a/rpython/jit/metainterp/optimizeopt/test/test_compatible.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_compatible.py @@ -1,8 +1,10 @@ +import pytest from rpython.jit.metainterp.optimizeopt.test.test_util import ( LLtypeMixin) from rpython.jit.metainterp.optimizeopt.test.test_optimizebasic import ( BaseTestBasic) from rpython.jit.metainterp.history import ConstInt, ConstPtr +from rpython.jit.metainterp.optimize import InvalidLoop class TestCompatible(BaseTestBasic, LLtypeMixin): @@ -60,6 +62,15 @@ """ self.optimize_loop(ops, expected) + def test_guard_compatible_inconsistent(self): + ops = """ + [p1] + guard_compatible(p1, ConstPtr(myptr)) [] + guard_compatible(p1, ConstPtr(myptrb)) [] + jump(ConstPtr(myptr)) + """ + pytest.raises(InvalidLoop, self.optimize_loop, ops, ops) + def test_guard_compatible_call_pure(self): call_pure_results = { (ConstInt(123), ConstPtr(self.myptr)): ConstInt(5), From pypy.commits at gmail.com Sun Mar 20 08:47:13 2016 From: pypy.commits at gmail.com (cfbolz) Date: Sun, 20 Mar 2016 05:47:13 -0700 (PDT) Subject: [pypy-commit] pypy guard-compatible: fix test Message-ID: <56ee9bd1.4c181c0a.e2e25.ffff99b4@mx.google.com> Author: Carl Friedrich Bolz Branch: guard-compatible Changeset: r83189:6874376877b6 Date: 2016-03-20 13:12 +0100 http://bitbucket.org/pypy/pypy/changeset/6874376877b6/ Log: fix test diff --git a/rpython/jit/metainterp/optimizeopt/test/test_compatible.py b/rpython/jit/metainterp/optimizeopt/test/test_compatible.py --- a/rpython/jit/metainterp/optimizeopt/test/test_compatible.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_compatible.py @@ -108,4 +108,4 @@ descr = self.loop.operations[1].getdescr() assert descr._compatibility_conditions is not None assert descr._compatibility_conditions.known_valid.same_constant(ConstPtr(self.myptr)) - assert len(descr._compatibility_conditions.pure_call_conditions) == 2 + assert len(descr._compatibility_conditions.conditions) == 2 From pypy.commits at gmail.com Sun Mar 20 10:01:52 2016 From: pypy.commits at gmail.com (alex_gaynor) Date: Sun, 20 Mar 2016 07:01:52 -0700 (PDT) Subject: [pypy-commit] pypy stdlib-2.7.11: Added some fixes for audioop Message-ID: <56eead50.654fc20a.991cb.7f30@mx.google.com> Author: Alex Gaynor Branch: stdlib-2.7.11 Changeset: r83190:23b3a3180df8 Date: 2016-03-20 10:01 -0400 http://bitbucket.org/pypy/pypy/changeset/23b3a3180df8/ Log: Added some fixes for audioop diff --git a/lib_pypy/audioop.py b/lib_pypy/audioop.py --- a/lib_pypy/audioop.py +++ b/lib_pypy/audioop.py @@ -553,9 +553,14 @@ def adpcm2lin(cp, size, state): _check_size(size) if state is None: - state = (0, 0) + valpred = 0 + index = 0 + else: + valpred, index = state + # XXX: len(stepsizeTable) = 89 + if valpred >= 0x8000 or valpred < -0x8000 or index >= 89: + raise ValueError("bad state") rv = ffi.new("unsigned char[]", len(cp) * size * 2) - state_ptr = ffi.new("int[]", state) + state_ptr = ffi.new("int[]", [valpred, index]) lib.adcpm2lin(rv, cp, len(cp), size, state_ptr) return ffi.buffer(rv)[:], tuple(state_ptr) - From pypy.commits at gmail.com Sun Mar 20 10:47:18 2016 From: pypy.commits at gmail.com (alex_gaynor) Date: Sun, 20 Mar 2016 07:47:18 -0700 (PDT) Subject: [pypy-commit] pypy stdlib-2.7.11: I have conquered the latest round of utf7 bugs! Message-ID: <56eeb7f6.d4df1c0a.4f9f4.24d7@mx.google.com> Author: Alex Gaynor Branch: stdlib-2.7.11 Changeset: r83191:f3d2f640ffca Date: 2016-03-20 10:46 -0400 http://bitbucket.org/pypy/pypy/changeset/f3d2f640ffca/ Log: I have conquered the latest round of utf7 bugs! diff --git a/pypy/module/_codecs/test/test_codecs.py b/pypy/module/_codecs/test/test_codecs.py --- a/pypy/module/_codecs/test/test_codecs.py +++ b/pypy/module/_codecs/test/test_codecs.py @@ -28,8 +28,8 @@ raises( UnicodeDecodeError, unicode,'\\NSPACE}','unicode-escape') raises( UnicodeDecodeError, unicode,'\\NSPACE','unicode-escape') raises( UnicodeDecodeError, unicode,'\\N','unicode-escape') - assert unicode('\\N{SPACE}\\N{SPACE}','unicode-escape') == u" " - assert unicode('\\N{SPACE}a\\N{SPACE}','unicode-escape') == u" a " + assert unicode('\\N{SPACE}\\N{SPACE}','unicode-escape') == u" " + assert unicode('\\N{SPACE}a\\N{SPACE}','unicode-escape') == u" a " assert "\\N{foo}xx".decode("unicode-escape", "ignore") == u"xx" assert 1 <= len(u"\N{CJK UNIFIED IDEOGRAPH-20000}") <= 2 @@ -676,6 +676,9 @@ (b'a+//,+IKw-b', u'a\ufffd\u20acb'), (b'a+///,+IKw-b', u'a\uffff\ufffd\u20acb'), (b'a+////,+IKw-b', u'a\uffff\ufffd\u20acb'), + (b'a+2AE\xe1b', u'a\ufffdb'), + (b'a+2AEA-b', u'a\ufffdb'), + (b'a+2AH-b', u'a\ufffdb'), ] for raw, expected in tests: raises(UnicodeDecodeError, codecs.utf_7_decode, raw, 'strict', True) diff --git a/rpython/rlib/runicode.py b/rpython/rlib/runicode.py --- a/rpython/rlib/runicode.py +++ b/rpython/rlib/runicode.py @@ -812,10 +812,9 @@ startinpos = 0 while pos < size: ch = s[pos] - oc = ord(ch) if inShift: # in a base-64 section - if _utf7_IS_BASE64(oc): #consume a base-64 character + if _utf7_IS_BASE64(ord(ch)): #consume a base-64 character base64buffer = (base64buffer << 6) | _utf7_FROM_BASE64(ch) base64bits += 6 pos += 1 @@ -828,7 +827,7 @@ assert outCh <= 0xffff if surrogate: # expecting a second surrogate - if outCh >= 0xDC00 and outCh <= 0xDFFFF: + if outCh >= 0xDC00 and outCh <= 0xDFFF: if MAXUNICODE < 65536: result.append(unichr(surrogate)) result.append(unichr(outCh)) @@ -851,15 +850,11 @@ else: # now leaving a base-64 section inShift = False - pos += 1 - - if surrogate: - result.append(unichr(surrogate)) - surrogate = 0 if base64bits > 0: # left-over bits if base64bits >= 6: # We've seen at least one base-64 character + pos += 1 msg = "partial character in shift sequence" res, pos = errorhandler(errors, 'utf7', msg, s, pos-1, pos) @@ -868,20 +863,21 @@ else: # Some bits remain; they should be zero if base64buffer != 0: + pos += 1 msg = "non-zero padding bits in shift sequence" res, pos = errorhandler(errors, 'utf7', msg, s, pos-1, pos) result.append(res) continue + if surrogate and _utf7_DECODE_DIRECT(ord(ch)): + result.append(unichr(surrogate)) + surrogate = 0 + if ch == '-': # '-' is absorbed; other terminating characters are # preserved - base64bits = 0 - base64buffer = 0 - surrogate = 0 - else: - result.append(unichr(ord(ch))) + pos += 1 elif ch == '+': startinpos = pos @@ -891,12 +887,13 @@ result.append(u'+') else: # begin base64-encoded section inShift = 1 + surrogate = 0 shiftOutStartPos = result.getlength() base64bits = 0 base64buffer = 0 - elif _utf7_DECODE_DIRECT(oc): # character decodes at itself - result.append(unichr(oc)) + elif _utf7_DECODE_DIRECT(ord(ch)): # character decodes at itself + result.append(unichr(ord(ch))) pos += 1 else: startinpos = pos @@ -909,6 +906,7 @@ final_length = result.getlength() if inShift and final: # in shift sequence, no more to follow # if we're in an inconsistent state, that's an error + inShift = 0 if (surrogate or base64bits >= 6 or (base64bits > 0 and base64buffer != 0)): From pypy.commits at gmail.com Sun Mar 20 10:52:16 2016 From: pypy.commits at gmail.com (alex_gaynor) Date: Sun, 20 Mar 2016 07:52:16 -0700 (PDT) Subject: [pypy-commit] pypy stdlib-2.7.11: Check an error case in random Message-ID: <56eeb920.d33f1c0a.a460e.ffffbd82@mx.google.com> Author: Alex Gaynor Branch: stdlib-2.7.11 Changeset: r83192:29e976bd194a Date: 2016-03-20 10:51 -0400 http://bitbucket.org/pypy/pypy/changeset/29e976bd194a/ Log: Check an error case in random diff --git a/pypy/module/_random/interp_random.py b/pypy/module/_random/interp_random.py --- a/pypy/module/_random/interp_random.py +++ b/pypy/module/_random/interp_random.py @@ -75,7 +75,10 @@ w_item = space.add(w_item, w_add) self._rnd.state[i] = space.uint_w(w_item) w_item = space.getitem(w_state, space.newint(rrandom.N)) - self._rnd.index = space.int_w(w_item) + index = space.int_w(w_item) + if index < 0 or index > rrandom.N: + raise OperationError(space.w_ValueError, "invalid state") + self._rnd.index = index def jumpahead(self, space, w_n): if space.isinstance_w(w_n, space.w_long): From pypy.commits at gmail.com Sun Mar 20 11:05:49 2016 From: pypy.commits at gmail.com (alex_gaynor) Date: Sun, 20 Mar 2016 08:05:49 -0700 (PDT) Subject: [pypy-commit] pypy stdlib-2.7.11: whoops Message-ID: <56eebc4d.07b71c0a.3bb3.ffffc2d0@mx.google.com> Author: Alex Gaynor Branch: stdlib-2.7.11 Changeset: r83193:0ef70082ab3c Date: 2016-03-20 11:05 -0400 http://bitbucket.org/pypy/pypy/changeset/0ef70082ab3c/ Log: whoops diff --git a/pypy/module/_random/interp_random.py b/pypy/module/_random/interp_random.py --- a/pypy/module/_random/interp_random.py +++ b/pypy/module/_random/interp_random.py @@ -77,7 +77,7 @@ w_item = space.getitem(w_state, space.newint(rrandom.N)) index = space.int_w(w_item) if index < 0 or index > rrandom.N: - raise OperationError(space.w_ValueError, "invalid state") + raise OperationError(space.w_ValueError, space.wrap("invalid state")) self._rnd.index = index def jumpahead(self, space, w_n): From pypy.commits at gmail.com Sun Mar 20 14:20:09 2016 From: pypy.commits at gmail.com (cfbolz) Date: Sun, 20 Mar 2016 11:20:09 -0700 (PDT) Subject: [pypy-commit] pypy guard-compatible: only store the parts of the op that are needed in PureCallCondition, also Message-ID: <56eee9d9.2a6ec20a.c0820.ffffc4e8@mx.google.com> Author: Carl Friedrich Bolz Branch: guard-compatible Changeset: r83194:a4b0c15a715b Date: 2016-03-20 14:09 +0100 http://bitbucket.org/pypy/pypy/changeset/a4b0c15a715b/ Log: only store the parts of the op that are needed in PureCallCondition, also deduplicate PureCallCondition diff --git a/rpython/jit/metainterp/compatible.py b/rpython/jit/metainterp/compatible.py --- a/rpython/jit/metainterp/compatible.py +++ b/rpython/jit/metainterp/compatible.py @@ -56,6 +56,9 @@ self.last_quasi_immut_field_op = None def record_condition(self, cond, res, optimizer): + for oldcond in self.conditions: + if oldcond.same_cond(cond, res): + return cond.activate(res, optimizer) self.conditions.append(cond) @@ -117,16 +120,22 @@ def activate_secondary(self, ref, loop_token): pass + def same_cond(self, other, res): + return False + class PureCallCondition(Condition): def __init__(self, op): - self.op = op + args = op.getarglist()[:] + args[1] = None + self.args = args + self.descr = op.getdescr() def check(self, cpu, ref): from rpython.rlib.debug import debug_print, debug_start, debug_stop - calldescr = self.op.getdescr() + calldescr = self.descr # change exactly the first argument - arglist = self.op.getarglist() + arglist = self.args arglist[1] = newconst(ref) try: res = do_call(cpu, arglist, calldescr) @@ -135,10 +144,27 @@ debug_print("call to elidable_compatible function raised") debug_stop("jit-guard-compatible") return False + finally: + arglist[1] = None if not res.same_constant(self.res): return False return True + def same_cond(self, other, res): + if type(other) != PureCallCondition: + return False + if len(self.args) != len(other.args): + return False + if not self.res.same_constant(res): + return False + assert self.args[1] is other.args[1] is None + for i in range(len(self.args)): + if i == 1: + continue + if not self.args[i].same_constant(other.args[i]): + return False + return True + class QuasiimmutGetfieldAndPureCallCondition(PureCallCondition): def __init__(self, op, qmutdescr): diff --git a/rpython/jit/metainterp/optimizeopt/test/test_compatible.py b/rpython/jit/metainterp/optimizeopt/test/test_compatible.py --- a/rpython/jit/metainterp/optimizeopt/test/test_compatible.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_compatible.py @@ -109,3 +109,35 @@ assert descr._compatibility_conditions is not None assert descr._compatibility_conditions.known_valid.same_constant(ConstPtr(self.myptr)) assert len(descr._compatibility_conditions.conditions) == 2 + + def test_deduplicate_conditions(self): + call_pure_results = { + (ConstInt(123), ConstPtr(self.myptr)): ConstInt(5), + } + ops = """ + [p1] + guard_compatible(p1, ConstPtr(myptr)) [] + i3 = call_pure_i(123, p1, descr=plaincalldescr) + i4 = call_pure_i(123, p1, descr=plaincalldescr) + i5 = call_pure_i(123, p1, descr=plaincalldescr) + i6 = call_pure_i(123, p1, descr=plaincalldescr) + escape_n(i3) + escape_n(i4) + escape_n(i5) + escape_n(i6) + jump(ConstPtr(myptr)) + """ + expected = """ + [p1] + guard_compatible(p1, ConstPtr(myptr)) [] + escape_n(5) + escape_n(5) + escape_n(5) + escape_n(5) + jump(ConstPtr(myptr)) + """ + self.optimize_loop(ops, expected, call_pure_results=call_pure_results) + descr = self.loop.operations[1].getdescr() + assert descr._compatibility_conditions is not None + assert descr._compatibility_conditions.known_valid.same_constant(ConstPtr(self.myptr)) + assert len(descr._compatibility_conditions.conditions) == 1 From pypy.commits at gmail.com Sun Mar 20 14:20:11 2016 From: pypy.commits at gmail.com (cfbolz) Date: Sun, 20 Mar 2016 11:20:11 -0700 (PDT) Subject: [pypy-commit] pypy guard-compatible: support for quasi-immutable int/float fields Message-ID: <56eee9db.86351c0a.dbdc2.006f@mx.google.com> Author: Carl Friedrich Bolz Branch: guard-compatible Changeset: r83195:6920e13f35fd Date: 2016-03-20 14:17 +0100 http://bitbucket.org/pypy/pypy/changeset/6920e13f35fd/ Log: support for quasi-immutable int/float fields diff --git a/rpython/jit/metainterp/compatible.py b/rpython/jit/metainterp/compatible.py --- a/rpython/jit/metainterp/compatible.py +++ b/rpython/jit/metainterp/compatible.py @@ -94,7 +94,7 @@ # we want to discover this (and so far precisely this) situation and # make it possible for the GUARD_COMPATIBLE to still remove the call, # even though the second argument is not constant - if arg2.getopnum() != rop.GETFIELD_GC_R: + if arg2.getopnum() not in (rop.GETFIELD_GC_R, rop.GETFIELD_GC_I, rop.GETFIELD_GC_F): return None, None if not self.last_quasi_immut_field_op: return None, None diff --git a/rpython/jit/metainterp/optimizeopt/test/test_compatible.py b/rpython/jit/metainterp/optimizeopt/test/test_compatible.py --- a/rpython/jit/metainterp/optimizeopt/test/test_compatible.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_compatible.py @@ -141,3 +141,28 @@ assert descr._compatibility_conditions is not None assert descr._compatibility_conditions.known_valid.same_constant(ConstPtr(self.myptr)) assert len(descr._compatibility_conditions.conditions) == 1 + + def test_quasiimmut(self): + ops = """ + [p1] + guard_compatible(p1, ConstPtr(quasiptr)) [] + quasiimmut_field(p1, descr=quasiimmutdescr) + guard_not_invalidated() [] + i0 = getfield_gc_i(p1, descr=quasifielddescr) + i1 = call_pure_i(123, p1, i0, descr=nonwritedescr) + escape_n(i1) + jump(p1) + """ + expected = """ + [p1] + guard_compatible(p1, ConstPtr(quasiptr)) [] + guard_not_invalidated() [] + i0 = getfield_gc_i(p1, descr=quasifielddescr) # will be removed by the backend + escape_n(5) + jump(p1) + """ + call_pure_results = { + (ConstInt(123), ConstPtr(self.quasiptr), ConstInt(-4247)): ConstInt(5), + } + self.optimize_loop(ops, expected, call_pure_results) + From pypy.commits at gmail.com Sun Mar 20 14:20:14 2016 From: pypy.commits at gmail.com (cfbolz) Date: Sun, 20 Mar 2016 11:20:14 -0700 (PDT) Subject: [pypy-commit] pypy guard-compatible: factor out (at least a part of) the common code between compatible.py and Message-ID: <56eee9de.6774c20a.14549.ffffc1ae@mx.google.com> Author: Carl Friedrich Bolz Branch: guard-compatible Changeset: r83197:c58e4aa8c314 Date: 2016-03-20 19:19 +0100 http://bitbucket.org/pypy/pypy/changeset/c58e4aa8c314/ Log: factor out (at least a part of) the common code between compatible.py and executor.py diff --git a/rpython/jit/metainterp/compatible.py b/rpython/jit/metainterp/compatible.py --- a/rpython/jit/metainterp/compatible.py +++ b/rpython/jit/metainterp/compatible.py @@ -5,35 +5,10 @@ def do_call(cpu, argboxes, descr): from rpython.jit.metainterp.history import INT, REF, FLOAT, VOID from rpython.jit.metainterp.blackhole import NULL - # XXX XXX almost copy from executor.py + from rpython.jit.metainterp.executor import _separate_call_arguments rettype = descr.get_result_type() # count the number of arguments of the different types - count_i = count_r = count_f = 0 - for i in range(1, len(argboxes)): - type = argboxes[i].type - if type == INT: count_i += 1 - elif type == REF: count_r += 1 - elif type == FLOAT: count_f += 1 - # allocate lists for each type that has at least one argument - if count_i: args_i = [0] * count_i - else: args_i = None - if count_r: args_r = [NULL] * count_r - else: args_r = None - if count_f: args_f = [longlong.ZEROF] * count_f - else: args_f = None - # fill in the lists - count_i = count_r = count_f = 0 - for i in range(1, len(argboxes)): - box = argboxes[i] - if box.type == INT: - args_i[count_i] = box.getint() - count_i += 1 - elif box.type == REF: - args_r[count_r] = box.getref_base() - count_r += 1 - elif box.type == FLOAT: - args_f[count_f] = box.getfloatstorage() - count_f += 1 + args_i, args_r, args_f = _separate_call_arguments(argboxes) # get the function address as an integer func = argboxes[0].getint() # do the call using the correct function from the cpu diff --git a/rpython/jit/metainterp/executor.py b/rpython/jit/metainterp/executor.py --- a/rpython/jit/metainterp/executor.py +++ b/rpython/jit/metainterp/executor.py @@ -15,9 +15,7 @@ # ____________________________________________________________ - at specialize.arg(4) -def _do_call(cpu, metainterp, argboxes, descr, rettype): - assert metainterp is not None +def _separate_call_arguments(argboxes): # count the number of arguments of the different types count_i = count_r = count_f = 0 for i in range(1, len(argboxes)): @@ -45,6 +43,13 @@ elif box.type == FLOAT: args_f[count_f] = box.getfloatstorage() count_f += 1 + return args_i, args_r, args_f + + + at specialize.arg(4) +def _do_call(cpu, metainterp, argboxes, descr, rettype): + assert metainterp is not None + args_i, args_r, args_f = _separate_call_arguments(argboxes) # get the function address as an integer func = argboxes[0].getint() # do the call using the correct function from the cpu From pypy.commits at gmail.com Sun Mar 20 14:20:16 2016 From: pypy.commits at gmail.com (cfbolz) Date: Sun, 20 Mar 2016 11:20:16 -0700 (PDT) Subject: [pypy-commit] pypy guard-compatible: merge default Message-ID: <56eee9e0.4577c20a.a7210.ffffc2e3@mx.google.com> Author: Carl Friedrich Bolz Branch: guard-compatible Changeset: r83198:1fa01a98cde5 Date: 2016-03-20 19:19 +0100 http://bitbucket.org/pypy/pypy/changeset/1fa01a98cde5/ Log: merge default diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -11,3 +11,15 @@ The backend manages 64-bit values in the literal pool of the assembly instead of loading them as immediates. It includes a simplification for the operation 'zero_array'. Start and length parameters are bytes instead of size. +.. branch: remove-py-log + +Replace py.log with something simpler, which should speed up logging + +.. branch: where_1_arg + +Implemented numpy.where for 1 argument (thanks sergem) + +.. branch: fix_indexing_by_numpy_int + +Implement yet another strange numpy indexing compatibility; indexing by a scalar +returns a scalar diff --git a/pypy/module/micronumpy/arrayops.py b/pypy/module/micronumpy/arrayops.py --- a/pypy/module/micronumpy/arrayops.py +++ b/pypy/module/micronumpy/arrayops.py @@ -71,8 +71,8 @@ """ if space.is_none(w_y): if space.is_none(w_x): - raise OperationError(space.w_NotImplementedError, space.wrap( - "1-arg where unsupported right now")) + arr = convert_to_array(space, w_arr) + return arr.descr_nonzero(space) raise OperationError(space.w_ValueError, space.wrap( "Where should be called with either 1 or 3 arguments")) if space.is_none(w_x): diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -267,6 +267,11 @@ "interpreted as a valid boolean index") elif isinstance(w_idx, boxes.W_GenericBox): w_ret = self.getitem_array_int(space, w_idx) + + if isinstance(w_idx, boxes.W_IntegerBox): + # if w_idx is integer then getitem_array_int must contain a single value and we must return it. + # Get 0-th element of the w_ret. + w_ret = w_ret.implementation.descr_getitem(space, self, space.wrap(0)) else: try: w_ret = self.implementation.descr_getitem(space, self, w_idx) diff --git a/pypy/module/micronumpy/test/test_arrayops.py b/pypy/module/micronumpy/test/test_arrayops.py --- a/pypy/module/micronumpy/test/test_arrayops.py +++ b/pypy/module/micronumpy/test/test_arrayops.py @@ -54,8 +54,24 @@ assert (where(False, 1, [1, 2, 3]) == [1, 2, 3]).all() assert (where([1, 2, 3], True, False) == [True, True, True]).all() - #def test_where_1_arg(self): - # xxx + def test_where_1_arg(self): + from numpy import where, array + + result = where([1,0,1]) + + assert isinstance(result, tuple) + assert len(result) == 1 + assert (result[0] == array([0, 2])).all() + + def test_where_1_arg_2d(self): + from numpy import where, array + + result = where([[1,0,1],[2,-1,-1]]) + + assert isinstance(result, tuple) + assert len(result) == 2 + assert (result[0] == array([0, 0, 1, 1, 1])).all() + assert (result[1] == array([0, 2, 0, 1, 2])).all() def test_where_invalidates(self): from numpy import where, ones, zeros, array diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -3437,6 +3437,21 @@ a.itemset(1, 2, 100) assert a[1, 2] == 100 + def test_index_int(self): + import numpy as np + a = np.array([10, 20, 30], dtype='int64') + res = a[np.int64(1)] + assert isinstance(res, np.int64) + assert res == 20 + res = a[np.int32(0)] + assert isinstance(res, np.int64) + assert res == 10 + + b = a.astype(float) + res = b[np.int64(1)] + assert res == 20.0 + assert isinstance(res, np.float64) + def test_index(self): import numpy as np a = np.array([1], np.uint16) @@ -3448,6 +3463,7 @@ assert exc.value.message == 'only integer arrays with one element ' \ 'can be converted to an index' + def test_int_array_index(self): from numpy import array assert (array([])[[]] == []).all() diff --git a/pypy/module/thread/test/test_lock.py b/pypy/module/thread/test/test_lock.py --- a/pypy/module/thread/test/test_lock.py +++ b/pypy/module/thread/test/test_lock.py @@ -3,6 +3,7 @@ import sys, os from pypy.module.thread.test.support import GenericTestThread from rpython.translator.c.test.test_genc import compile +import platform class AppTestLock(GenericTestThread): @@ -63,6 +64,8 @@ else: assert self.runappdirect, "missing lock._py3k_acquire()" + @py.test.mark.xfail(platform.machine() == 's390x', + reason='may fail this test under heavy load') def test_ping_pong(self): # The purpose of this test is that doing a large number of ping-pongs # between two threads, using locks, should complete in a reasonable diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py --- a/rpython/memory/gc/incminimark.py +++ b/rpython/memory/gc/incminimark.py @@ -1654,15 +1654,15 @@ else: self.nursery_objects_shadows.clear() # + # visit the P and O lists from rawrefcount, if enabled. + if self.rrc_enabled: + self.rrc_minor_collection_free() + # # Walk the list of young raw-malloced objects, and either free # them or make them old. if self.young_rawmalloced_objects: self.free_young_rawmalloced_objects() # - # visit the P and O lists from rawrefcount, if enabled. - if self.rrc_enabled: - self.rrc_minor_collection_free() - # # All live nursery objects are out of the nursery or pinned inside # the nursery. Create nursery barriers to protect the pinned objects, # fill the rest of the nursery with zeros and reset the current nursery diff --git a/rpython/memory/gc/test/test_rawrefcount.py b/rpython/memory/gc/test/test_rawrefcount.py --- a/rpython/memory/gc/test/test_rawrefcount.py +++ b/rpython/memory/gc/test/test_rawrefcount.py @@ -29,7 +29,8 @@ assert count2 - count1 == expected_trigger def _rawrefcount_pair(self, intval, is_light=False, is_pyobj=False, - create_old=False, create_immortal=False): + create_old=False, create_immortal=False, + force_external=False): if is_light: rc = REFCNT_FROM_PYPY_LIGHT else: @@ -40,7 +41,13 @@ if create_immortal: p1 = lltype.malloc(S, immortal=True) else: - p1 = self.malloc(S) + saved = self.gc.nonlarge_max + try: + if force_external: + self.gc.nonlarge_max = 1 + p1 = self.malloc(S) + finally: + self.gc.nonlarge_max = saved p1.x = intval if create_immortal: self.consider_constant(p1) @@ -220,9 +227,10 @@ def test_pypy_nonlight_dies_quickly_old(self): self.test_pypy_nonlight_dies_quickly(old=True) - def test_pyobject_pypy_link_dies_on_minor_collection(self): + @py.test.mark.parametrize('external', [False, True]) + def test_pyobject_pypy_link_dies_on_minor_collection(self, external): p1, p1ref, r1, r1addr, check_alive = ( - self._rawrefcount_pair(42, is_pyobj=True)) + self._rawrefcount_pair(42, is_pyobj=True, force_external=external)) check_alive(0) r1.ob_refcnt += 1 # the pyobject is kept alive self._collect(major=False) @@ -231,9 +239,12 @@ self.gc.check_no_more_rawrefcount_state() lltype.free(r1, flavor='raw') - def test_pyobject_dies(self, old=False): + @py.test.mark.parametrize('old,external', [ + (False, False), (True, False), (False, True)]) + def test_pyobject_dies(self, old, external): p1, p1ref, r1, r1addr, check_alive = ( - self._rawrefcount_pair(42, is_pyobj=True, create_old=old)) + self._rawrefcount_pair(42, is_pyobj=True, create_old=old, + force_external=external)) check_alive(0) if old: self._collect(major=False) @@ -247,9 +258,12 @@ self.gc.check_no_more_rawrefcount_state() lltype.free(r1, flavor='raw') - def test_pyobject_survives_from_obj(self, old=False): + @py.test.mark.parametrize('old,external', [ + (False, False), (True, False), (False, True)]) + def test_pyobject_survives_from_obj(self, old, external): p1, p1ref, r1, r1addr, check_alive = ( - self._rawrefcount_pair(42, is_pyobj=True, create_old=old)) + self._rawrefcount_pair(42, is_pyobj=True, create_old=old, + force_external=external)) check_alive(0) self.stackroots.append(p1) self._collect(major=False) @@ -269,11 +283,6 @@ self.gc.check_no_more_rawrefcount_state() lltype.free(r1, flavor='raw') - def test_pyobject_dies_old(self): - self.test_pyobject_dies(old=True) - def test_pyobject_survives_from_obj_old(self): - self.test_pyobject_survives_from_obj(old=True) - def test_pyobject_attached_to_prebuilt_obj(self): p1, p1ref, r1, r1addr, check_alive = ( self._rawrefcount_pair(42, create_immortal=True)) diff --git a/rpython/rlib/rvmprof/src/vmprof_config.h b/rpython/rlib/rvmprof/src/vmprof_config.h --- a/rpython/rlib/rvmprof/src/vmprof_config.h +++ b/rpython/rlib/rvmprof/src/vmprof_config.h @@ -1,6 +1,17 @@ #define HAVE_SYS_UCONTEXT_H -#if defined(__FreeBSD__) || defined(__APPLE__) +#if defined(__FreeBSD__) #define PC_FROM_UCONTEXT uc_mcontext.mc_rip +#elif defined( __APPLE__) + #if ((ULONG_MAX) == (UINT_MAX)) + #define PC_FROM_UCONTEXT uc_mcontext->__ss.__eip + #else + #define PC_FROM_UCONTEXT uc_mcontext->__ss.__rip + #endif +#elif defined(__arm__) +#define PC_FROM_UCONTEXT uc_mcontext.arm_ip +#elif defined(__linux) && defined(__i386) && defined(__GNUC__) +#define PC_FROM_UCONTEXT uc_mcontext.gregs[REG_EIP] #else +/* linux, gnuc */ #define PC_FROM_UCONTEXT uc_mcontext.gregs[REG_RIP] #endif diff --git a/rpython/rlib/rvmprof/src/vmprof_getpc.h b/rpython/rlib/rvmprof/src/vmprof_getpc.h --- a/rpython/rlib/rvmprof/src/vmprof_getpc.h +++ b/rpython/rlib/rvmprof/src/vmprof_getpc.h @@ -43,9 +43,6 @@ #ifndef BASE_GETPC_H_ #define BASE_GETPC_H_ - -#include "vmprof_config.h" - // On many linux systems, we may need _GNU_SOURCE to get access to // the defined constants that define the register we want to see (eg // REG_EIP). Note this #define must come first! @@ -58,6 +55,8 @@ #define _XOPEN_SOURCE 500 #endif +#include "vmprof_config.h" + #include // for memcmp #if defined(HAVE_SYS_UCONTEXT_H) #include @@ -112,13 +111,8 @@ // PC_FROM_UCONTEXT in config.h. The only thing we need to do here, // then, is to do the magic call-unrolling for systems that support it. -#if defined(__linux) && defined(__i386) && defined(__GNUC__) -intptr_t GetPC(ucontext_t *signal_ucontext) { - return signal_ucontext->uc_mcontext.gregs[REG_EIP]; -} - -// Special case #2: Windows, which has to do something totally different. -#elif defined(_WIN32) || defined(__CYGWIN__) || defined(__CYGWIN32__) || defined(__MINGW32__) +// Special case Windows, which has to do something totally different. +#if defined(_WIN32) || defined(__CYGWIN__) || defined(__CYGWIN32__) || defined(__MINGW32__) // If this is ever implemented, probably the way to do it is to have // profiler.cc use a high-precision timer via timeSetEvent: // http://msdn2.microsoft.com/en-us/library/ms712713.aspx @@ -141,18 +135,10 @@ // Normal cases. If this doesn't compile, it's probably because // PC_FROM_UCONTEXT is the empty string. You need to figure out // the right value for your system, and add it to the list in -// configure.ac (or set it manually in your config.h). +// vmrpof_config.h #else intptr_t GetPC(ucontext_t *signal_ucontext) { -#ifdef __APPLE__ -#if ((ULONG_MAX) == (UINT_MAX)) - return (signal_ucontext->uc_mcontext->__ss.__eip); -#else - return (signal_ucontext->uc_mcontext->__ss.__rip); -#endif -#else return signal_ucontext->PC_FROM_UCONTEXT; // defined in config.h -#endif } #endif diff --git a/rpython/rlib/test/test_rthread.py b/rpython/rlib/test/test_rthread.py --- a/rpython/rlib/test/test_rthread.py +++ b/rpython/rlib/test/test_rthread.py @@ -5,6 +5,7 @@ from rpython.translator.c.test.test_boehm import AbstractGCTestClass from rpython.rtyper.lltypesystem import lltype, rffi import py +import platform def test_lock(): l = allocate_lock() @@ -92,6 +93,8 @@ res = fn() assert res == 42 + @py.test.mark.xfail(platform.machine() == 's390x', + reason='may fail this test under heavy load') def test_gc_locking(self): import time from rpython.rlib.debug import ll_assert diff --git a/rpython/rlib/test/test_runicode.py b/rpython/rlib/test/test_runicode.py --- a/rpython/rlib/test/test_runicode.py +++ b/rpython/rlib/test/test_runicode.py @@ -4,6 +4,8 @@ import sys, random from rpython.rlib import runicode +from hypothesis import given, settings, strategies + def test_unichr(): assert runicode.UNICHR(0xffff) == u'\uffff' @@ -172,6 +174,17 @@ "utf-32 utf-32-be utf-32-le").split(): self.checkdecode(uni, encoding) + # Same as above, but uses Hypothesis to generate non-surrogate unicode + # characters. + @settings(max_examples=10000) + @given(strategies.characters(blacklist_categories=["Cs"])) + def test_random_hypothesis(self, uni): + if sys.version >= "2.7": + self.checkdecode(uni, "utf-7") + for encoding in ("utf-8 utf-16 utf-16-be utf-16-le " + "utf-32 utf-32-be utf-32-le").split(): + self.checkdecode(uni, encoding) + def test_maxunicode(self): uni = unichr(sys.maxunicode) if sys.version >= "2.7": diff --git a/rpython/tool/ansi_print.py b/rpython/tool/ansi_print.py --- a/rpython/tool/ansi_print.py +++ b/rpython/tool/ansi_print.py @@ -50,9 +50,9 @@ # some more methods used by sandlib call = _make_method(':call', (34,)) result = _make_method(':result', (34,)) - exception = _make_method(':exception', (34,)), - vpath = _make_method(':vpath', (35,)), - timeout = _make_method('', (1, 31)), + exception = _make_method(':exception', (34,)) + vpath = _make_method(':vpath', (35,)) + timeout = _make_method('', (1, 31)) # directly calling the logger writes "[name] text" with no particular color __call__ = _make_method('', ()) diff --git a/rpython/translator/sandbox/sandlib.py b/rpython/translator/sandbox/sandlib.py --- a/rpython/translator/sandbox/sandlib.py +++ b/rpython/translator/sandbox/sandlib.py @@ -527,6 +527,9 @@ node = self.get_node(vpathname) return node.keys() + def do_ll_os__ll_os_unlink(self, vpathname): + raise OSError(errno.EPERM, "write access denied") + def do_ll_os__ll_os_getuid(self): return UID do_ll_os__ll_os_geteuid = do_ll_os__ll_os_getuid diff --git a/testrunner/runner.py b/testrunner/runner.py --- a/testrunner/runner.py +++ b/testrunner/runner.py @@ -240,8 +240,8 @@ s = 'setting' if os.environ.get('MAKEFLAGS'): s = 'overriding' - out.write("%s MAKEFLAGS to '-j1'\n" % s) - os.environ['MAKEFLAGS'] = '-j1' + out.write("%s MAKEFLAGS to ' ' (space)\n" % s) + os.environ['MAKEFLAGS'] = ' ' failure = False for testname in testdirs: From pypy.commits at gmail.com Sun Mar 20 14:20:12 2016 From: pypy.commits at gmail.com (cfbolz) Date: Sun, 20 Mar 2016 11:20:12 -0700 (PDT) Subject: [pypy-commit] pypy guard-compatible: de-duplicate quasi-immutable conditions Message-ID: <56eee9dc.c856c20a.b438.ffffc226@mx.google.com> Author: Carl Friedrich Bolz Branch: guard-compatible Changeset: r83196:07d318a03f0d Date: 2016-03-20 15:20 +0100 http://bitbucket.org/pypy/pypy/changeset/07d318a03f0d/ Log: de-duplicate quasi-immutable conditions diff --git a/rpython/jit/metainterp/compatible.py b/rpython/jit/metainterp/compatible.py --- a/rpython/jit/metainterp/compatible.py +++ b/rpython/jit/metainterp/compatible.py @@ -157,6 +157,8 @@ return False if not self.res.same_constant(res): return False + if self.descr is not other.descr: + return False assert self.args[1] is other.args[1] is None for i in range(len(self.args)): if i == 1: @@ -168,28 +170,36 @@ class QuasiimmutGetfieldAndPureCallCondition(PureCallCondition): def __init__(self, op, qmutdescr): - self.op = op - self.qmutdescr = qmutdescr + args = op.getarglist()[:] + args[1] = None + args[2] = None + self.args = args + self.descr = op.getdescr() + self.qmut = qmutdescr.qmut + self.mutatefielddescr = qmutdescr.mutatefielddescr + self.fielddescr = qmutdescr.fielddescr def activate(self, ref, optimizer): # record the quasi-immutable - optimizer.record_quasi_immutable_dep(self.qmutdescr.qmut) + optimizer.record_quasi_immutable_dep(self.qmut) + # XXX can set self.qmut to None here? Condition.activate(self, ref, optimizer) def activate_secondary(self, ref, loop_token): from rpython.jit.metainterp.quasiimmut import get_current_qmut_instance # need to register the loop for invalidation as well! qmut = get_current_qmut_instance(loop_token.cpu, ref, - self.qmutdescr.mutatefielddescr) + self.mutatefielddescr) qmut.register_loop_token(loop_token.loop_token_wref) def check(self, cpu, ref): from rpython.rlib.debug import debug_print, debug_start, debug_stop - calldescr = self.op.getdescr() + from rpython.jit.metainterp.quasiimmut import QuasiImmutDescr + calldescr = self.descr # change exactly the first argument - arglist = self.op.getarglist() + arglist = self.args arglist[1] = newconst(ref) - arglist[2] = self.qmutdescr._get_fieldvalue(ref) + arglist[2] = QuasiImmutDescr._get_fieldvalue(self.fielddescr, ref, cpu) try: res = do_call(cpu, arglist, calldescr) except Exception: @@ -197,7 +207,30 @@ debug_print("call to elidable_compatible function raised") debug_stop("jit-guard-compatible") return False + finally: + arglist[1] = arglist[2] = None if not res.same_constant(self.res): return False return True + def same_cond(self, other, res): + if type(other) != QuasiimmutGetfieldAndPureCallCondition: + return False + if len(self.args) != len(other.args): + return False + if not self.res.same_constant(res): + return False + if self.descr is not other.descr: + return False + if self.fielddescr is not other.fielddescr: + return False + if self.mutatefielddescr is not other.mutatefielddescr: + return False + assert self.args[1] is other.args[1] is None + assert self.args[2] is other.args[2] is None + for i in range(len(self.args)): + if i == 1 or i == 2: + continue + if not self.args[i].same_constant(other.args[i]): + return False + return True diff --git a/rpython/jit/metainterp/optimizeopt/test/test_compatible.py b/rpython/jit/metainterp/optimizeopt/test/test_compatible.py --- a/rpython/jit/metainterp/optimizeopt/test/test_compatible.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_compatible.py @@ -150,7 +150,12 @@ guard_not_invalidated() [] i0 = getfield_gc_i(p1, descr=quasifielddescr) i1 = call_pure_i(123, p1, i0, descr=nonwritedescr) + quasiimmut_field(p1, descr=quasiimmutdescr) + guard_not_invalidated() [] + i3 = getfield_gc_i(p1, descr=quasifielddescr) + i4 = call_pure_i(123, p1, i3, descr=nonwritedescr) escape_n(i1) + escape_n(i4) jump(p1) """ expected = """ @@ -159,10 +164,14 @@ guard_not_invalidated() [] i0 = getfield_gc_i(p1, descr=quasifielddescr) # will be removed by the backend escape_n(5) + escape_n(5) jump(p1) """ call_pure_results = { (ConstInt(123), ConstPtr(self.quasiptr), ConstInt(-4247)): ConstInt(5), } self.optimize_loop(ops, expected, call_pure_results) - + descr = self.loop.operations[1].getdescr() + assert descr._compatibility_conditions is not None + assert descr._compatibility_conditions.known_valid.same_constant(ConstPtr(self.quasiptr)) + assert len(descr._compatibility_conditions.conditions) == 1 diff --git a/rpython/jit/metainterp/quasiimmut.py b/rpython/jit/metainterp/quasiimmut.py --- a/rpython/jit/metainterp/quasiimmut.py +++ b/rpython/jit/metainterp/quasiimmut.py @@ -129,16 +129,16 @@ def get_current_constant_fieldvalue(self): struct = self.struct - return self._get_fieldvalue(struct) + return self._get_fieldvalue(self.fielddescr, struct, self.cpu) - def _get_fieldvalue(self, struct): - fielddescr = self.fielddescr - if self.fielddescr.is_pointer_field(): - return ConstPtr(self.cpu.bh_getfield_gc_r(struct, fielddescr)) - elif self.fielddescr.is_float_field(): - return ConstFloat(self.cpu.bh_getfield_gc_f(struct, fielddescr)) + @staticmethod + def _get_fieldvalue(fielddescr, struct, cpu): + if fielddescr.is_pointer_field(): + return ConstPtr(cpu.bh_getfield_gc_r(struct, fielddescr)) + elif fielddescr.is_float_field(): + return ConstFloat(cpu.bh_getfield_gc_f(struct, fielddescr)) else: - return ConstInt(self.cpu.bh_getfield_gc_i(struct, fielddescr)) + return ConstInt(cpu.bh_getfield_gc_i(struct, fielddescr)) def is_still_valid_for(self, structconst): assert self.struct From pypy.commits at gmail.com Sun Mar 20 15:16:48 2016 From: pypy.commits at gmail.com (pjenvey) Date: Sun, 20 Mar 2016 12:16:48 -0700 (PDT) Subject: [pypy-commit] pypy stdlib-2.7.11: prefer oefmt Message-ID: <56eef720.019e1c0a.1ce3.13a6@mx.google.com> Author: Philip Jenvey Branch: stdlib-2.7.11 Changeset: r83199:0ad241802268 Date: 2016-03-20 12:15 -0700 http://bitbucket.org/pypy/pypy/changeset/0ad241802268/ Log: prefer oefmt diff --git a/pypy/module/_random/interp_random.py b/pypy/module/_random/interp_random.py --- a/pypy/module/_random/interp_random.py +++ b/pypy/module/_random/interp_random.py @@ -1,6 +1,6 @@ import time -from pypy.interpreter.error import OperationError +from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.typedef import TypeDef from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.baseobjspace import W_Root @@ -77,7 +77,7 @@ w_item = space.getitem(w_state, space.newint(rrandom.N)) index = space.int_w(w_item) if index < 0 or index > rrandom.N: - raise OperationError(space.w_ValueError, space.wrap("invalid state")) + raise oefmt(space.w_ValueError, "invalid state") self._rnd.index = index def jumpahead(self, space, w_n): diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -239,10 +239,8 @@ values,as if it had been read from a file using the fromfile() method). """ if self is w_s: - raise OperationError( - self.space.w_ValueError, - self.space.wrap("array.fromstring(x): x cannot be self") - ) + raise oefmt(space.w_ValueError, + "array.fromstring(x): x cannot be self") s = space.getarg_w('s#', w_s) if len(s) % self.itemsize != 0: msg = 'string length not a multiple of item size' From pypy.commits at gmail.com Sun Mar 20 15:36:49 2016 From: pypy.commits at gmail.com (cfbolz) Date: Sun, 20 Mar 2016 12:36:49 -0700 (PDT) Subject: [pypy-commit] pypy guard-compatible: chain guard_compatibles to not produce superfluous extra bridges Message-ID: <56eefbd1.0775c20a.acb71.ffffe85d@mx.google.com> Author: Carl Friedrich Bolz Branch: guard-compatible Changeset: r83200:fcc0b6e9d829 Date: 2016-03-20 20:35 +0100 http://bitbucket.org/pypy/pypy/changeset/fcc0b6e9d829/ Log: chain guard_compatibles to not produce superfluous extra bridges diff --git a/rpython/jit/metainterp/compatible.py b/rpython/jit/metainterp/compatible.py --- a/rpython/jit/metainterp/compatible.py +++ b/rpython/jit/metainterp/compatible.py @@ -126,7 +126,7 @@ return True def same_cond(self, other, res): - if type(other) != PureCallCondition: + if type(other) is not PureCallCondition: return False if len(self.args) != len(other.args): return False @@ -189,7 +189,7 @@ return True def same_cond(self, other, res): - if type(other) != QuasiimmutGetfieldAndPureCallCondition: + if type(other) is not QuasiimmutGetfieldAndPureCallCondition: return False if len(self.args) != len(other.args): return False diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -1088,20 +1088,26 @@ def __init__(self): # XXX think about what is being kept alive here self._compatibility_conditions = None + self.failarg_index = -1 + self._prev_guard_compatible_descr = None def handle_fail(self, deadframe, metainterp_sd, jitdriver_sd): index = intmask(self.status >> self.ST_SHIFT) typetag = intmask(self.status & self.ST_TYPE_MASK) assert typetag == self.TY_REF # for now refval = metainterp_sd.cpu.get_value_direct(deadframe, 'r', index) - if self.is_compatible(metainterp_sd.cpu, refval): - from rpython.jit.metainterp.blackhole import resume_in_blackhole - metainterp_sd.cpu.grow_guard_compatible_switch( - self.rd_loop_token, self, refval) - resume_in_blackhole(metainterp_sd, jitdriver_sd, self, deadframe) - else: - # a real failure - return ResumeGuardDescr.handle_fail(self, deadframe, metainterp_sd, jitdriver_sd) + curr = self + while curr: + if curr.is_compatible(metainterp_sd.cpu, refval): + from rpython.jit.metainterp.blackhole import resume_in_blackhole + metainterp_sd.cpu.grow_guard_compatible_switch( + curr.rd_loop_token, curr, refval) + resume_in_blackhole(metainterp_sd, jitdriver_sd, self, deadframe) + return + # try previous guards, maybe one of them would have matched + curr = curr._prev_guard_compatible_descr + # a real failure + return ResumeGuardDescr.handle_fail(self, deadframe, metainterp_sd, jitdriver_sd) def is_compatible(self, cpu, ref): const = history.newconst(ref) @@ -1112,6 +1118,27 @@ return False return True # no conditions, everything works + def compile_and_attach(self, metainterp, new_loop, orig_inputargs): + # if new_loop starts with another guard_compatible on the same argument + # (which is most of the time) we have to connect the new guard's descr + # to this descr + assert self.failarg_index != -1 + arg = new_loop.inputargs[self.failarg_index] + firstop = new_loop.operations[0] + if (firstop.getopnum() == rop.GUARD_COMPATIBLE and + firstop.getarg(0) is arg): + # a guard_compatible about the same box + newdescr = firstop.getdescr() + assert isinstance(newdescr, GuardCompatibleDescr) + newdescr._prev_guard_compatible_descr = self + ResumeGuardDescr.compile_and_attach( + self, metainterp, new_loop, orig_inputargs) + + def make_a_counter_per_value(self, guard_value_op, index): + self.failarg_index = guard_value_op.getfailargs().index( + guard_value_op.getarg(0)) + ResumeGuardDescr.make_a_counter_per_value(self, guard_value_op, index) + # ____________________________________________________________ memory_error = MemoryError() diff --git a/rpython/jit/metainterp/test/test_compatible.py b/rpython/jit/metainterp/test/test_compatible.py --- a/rpython/jit/metainterp/test/test_compatible.py +++ b/rpython/jit/metainterp/test/test_compatible.py @@ -42,7 +42,8 @@ x = self.meta_interp(main, []) assert x < 25 - # XXX check number of bridges + # trace, two bridges, a finish bridge + self.check_trace_count(4) def test_exception(self): S = lltype.GcStruct('S', ('x', lltype.Signed)) @@ -179,3 +180,45 @@ assert res == 3 self.check_operations_history(guard_compatible=1) + + def test_too_many_bridges(self): + S = lltype.GcStruct('S', ('x', lltype.Signed)) + p1 = lltype.malloc(S) + p1.x = 5 + + p2 = lltype.malloc(S) + p2.x = 5 + + p3 = lltype.malloc(S) + p3.x = 6 + driver = jit.JitDriver(greens = [], reds = ['n', 'x']) + + class A(object): + pass + + c = A() + c.count = 0 + @jit.elidable_compatible() + def g(s, ignored): + c.count += 1 + return s.x + + def f(n, x): + while n > 0: + driver.can_enter_jit(n=n, x=x) + driver.jit_merge_point(n=n, x=x) + n -= g(x, 7) + + def main(): + g(p1, 9) # make annotator not make argument constant + f(100, p1) + f(100, p3) # not compatible, so make a bridge + f(100, p2) # compatible with loop again, too bad + return c.count + + x = self.meta_interp(main, []) + + assert x < 30 + # trace, two bridges, a finish bridge + self.check_trace_count(4) + From pypy.commits at gmail.com Sun Mar 20 17:34:06 2016 From: pypy.commits at gmail.com (alex_gaynor) Date: Sun, 20 Mar 2016 14:34:06 -0700 (PDT) Subject: [pypy-commit] pypy stdlib-2.7.11: whacky isinstance check to match waht the C implementation does Message-ID: <56ef174e.12871c0a.807c5.3c89@mx.google.com> Author: Alex Gaynor Branch: stdlib-2.7.11 Changeset: r83201:ef51b367e34c Date: 2016-03-20 17:33 -0400 http://bitbucket.org/pypy/pypy/changeset/ef51b367e34c/ Log: whacky isinstance check to match waht the C implementation does diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -1155,7 +1155,7 @@ class Row(object): def __init__(self, cursor, values): - if not isinstance(cursor, Cursor): + if not (type(cursor) is Cursor or issubclass(type(cursor), Cursor): raise TypeError("instance of cursor required for first argument") self.description = cursor.description self.values = values From pypy.commits at gmail.com Sun Mar 20 17:34:08 2016 From: pypy.commits at gmail.com (alex_gaynor) Date: Sun, 20 Mar 2016 14:34:08 -0700 (PDT) Subject: [pypy-commit] pypy stdlib-2.7.11: merged upstream Message-ID: <56ef1750.13821c0a.5b006.3d5d@mx.google.com> Author: Alex Gaynor Branch: stdlib-2.7.11 Changeset: r83202:9a6a7a6afb33 Date: 2016-03-20 17:33 -0400 http://bitbucket.org/pypy/pypy/changeset/9a6a7a6afb33/ Log: merged upstream diff --git a/pypy/module/_random/interp_random.py b/pypy/module/_random/interp_random.py --- a/pypy/module/_random/interp_random.py +++ b/pypy/module/_random/interp_random.py @@ -1,6 +1,6 @@ import time -from pypy.interpreter.error import OperationError +from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.typedef import TypeDef from pypy.interpreter.gateway import interp2app, unwrap_spec from pypy.interpreter.baseobjspace import W_Root @@ -77,7 +77,7 @@ w_item = space.getitem(w_state, space.newint(rrandom.N)) index = space.int_w(w_item) if index < 0 or index > rrandom.N: - raise OperationError(space.w_ValueError, space.wrap("invalid state")) + raise oefmt(space.w_ValueError, "invalid state") self._rnd.index = index def jumpahead(self, space, w_n): diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py --- a/pypy/module/array/interp_array.py +++ b/pypy/module/array/interp_array.py @@ -239,10 +239,8 @@ values,as if it had been read from a file using the fromfile() method). """ if self is w_s: - raise OperationError( - self.space.w_ValueError, - self.space.wrap("array.fromstring(x): x cannot be self") - ) + raise oefmt(space.w_ValueError, + "array.fromstring(x): x cannot be self") s = space.getarg_w('s#', w_s) if len(s) % self.itemsize != 0: msg = 'string length not a multiple of item size' From pypy.commits at gmail.com Sun Mar 20 17:41:40 2016 From: pypy.commits at gmail.com (alex_gaynor) Date: Sun, 20 Mar 2016 14:41:40 -0700 (PDT) Subject: [pypy-commit] pypy stdlib-2.7.11: change the error message to match cpython Message-ID: <56ef1914.4c181c0a.e2e25.39bf@mx.google.com> Author: Alex Gaynor Branch: stdlib-2.7.11 Changeset: r83203:b7a4ddad1beb Date: 2016-03-20 17:41 -0400 http://bitbucket.org/pypy/pypy/changeset/b7a4ddad1beb/ Log: change the error message to match cpython diff --git a/rpython/rlib/rfloat.py b/rpython/rlib/rfloat.py --- a/rpython/rlib/rfloat.py +++ b/rpython/rlib/rfloat.py @@ -25,7 +25,7 @@ globals().update(rffi_platform.configure(CConfig)) -INVALID_MSG = "invalid literal for float()" +INVALID_MSG = "could not convert string to float" def string_to_float(s): """ From pypy.commits at gmail.com Sun Mar 20 18:04:04 2016 From: pypy.commits at gmail.com (mattip) Date: Sun, 20 Mar 2016 15:04:04 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-ext: add a failing test for PyGILState_Release, passes with -A. Also add thread to config.translation Message-ID: <56ef1e54.465ec20a.90fc6.01c3@mx.google.com> Author: mattip Branch: cpyext-ext Changeset: r83204:61665eeb889c Date: 2016-03-21 00:03 +0200 http://bitbucket.org/pypy/pypy/changeset/61665eeb889c/ Log: add a failing test for PyGILState_Release, passes with -A. Also add thread to config.translation diff --git a/pypy/module/cpyext/test/test_pystate.py b/pypy/module/cpyext/test/test_pystate.py --- a/pypy/module/cpyext/test/test_pystate.py +++ b/pypy/module/cpyext/test/test_pystate.py @@ -48,7 +48,7 @@ def test_basic_threadstate_dance(self): if self.runappdirect: - py.test.xfail('segfault') + py.test.xfail('segfault: PyThreadState_Get: no current thread') module = self.import_extension('foo', [ ("dance", "METH_NOARGS", """ @@ -113,6 +113,127 @@ """), ]) +class AppTestState(AppTestCpythonExtensionBase): + + def test_frame_tstate_tracing(self): + import sys, threading + module = self.import_extension('foo', [ + ("call_in_temporary_c_thread", "METH_O", + """ + PyObject *res = NULL; + test_c_thread_t test_c_thread; + long thread; + + PyEval_InitThreads(); + + test_c_thread.start_event = PyThread_allocate_lock(); + test_c_thread.exit_event = PyThread_allocate_lock(); + test_c_thread.callback = NULL; + if (!test_c_thread.start_event || !test_c_thread.exit_event) { + PyErr_SetString(PyExc_RuntimeError, "could not allocate lock"); + goto exit; + } + + Py_INCREF(args); + test_c_thread.callback = args; + + PyThread_acquire_lock(test_c_thread.start_event, 1); + PyThread_acquire_lock(test_c_thread.exit_event, 1); + + thread = PyThread_start_new_thread(temporary_c_thread, &test_c_thread); + if (thread == -1) { + PyErr_SetString(PyExc_RuntimeError, "unable to start the thread"); + PyThread_release_lock(test_c_thread.start_event); + PyThread_release_lock(test_c_thread.exit_event); + goto exit; + } + + PyThread_acquire_lock(test_c_thread.start_event, 1); + PyThread_release_lock(test_c_thread.start_event); + + Py_BEGIN_ALLOW_THREADS + PyThread_acquire_lock(test_c_thread.exit_event, 1); + PyThread_release_lock(test_c_thread.exit_event); + Py_END_ALLOW_THREADS + + Py_INCREF(Py_None); + res = Py_None; + + exit: + Py_CLEAR(test_c_thread.callback); + if (test_c_thread.start_event) + PyThread_free_lock(test_c_thread.start_event); + if (test_c_thread.exit_event) + PyThread_free_lock(test_c_thread.exit_event); + return res; + """), ], prologue = """ + #include "pythread.h" + typedef struct { + PyThread_type_lock start_event; + PyThread_type_lock exit_event; + PyObject *callback; + } test_c_thread_t; + + static void + temporary_c_thread(void *data) + { + test_c_thread_t *test_c_thread = data; + PyGILState_STATE state; + PyObject *res; + + PyThread_release_lock(test_c_thread->start_event); + + /* Allocate a Python thread state for this thread */ + state = PyGILState_Ensure(); + + res = PyObject_CallFunction(test_c_thread->callback, "", NULL); + Py_CLEAR(test_c_thread->callback); + + if (res == NULL) { + PyErr_Print(); + } + else { + Py_DECREF(res); + } + + /* Destroy the Python thread state for this thread */ + PyGILState_Release(state); + + PyThread_release_lock(test_c_thread->exit_event); + + /*PyThread_exit_thread(); NOP (on linux) and not implememnted */ + }; + """) + def noop_trace(frame, event, arg): + # no operation + return noop_trace + + def generator(): + while 1: + yield "genereator" + + def callback(): + if callback.gen is None: + callback.gen = generator() + return next(callback.gen) + callback.gen = None + + old_trace = sys.gettrace() + sys.settrace(noop_trace) + try: + # Install a trace function + threading.settrace(noop_trace) + + # Create a generator in a C thread which exits after the call + module.call_in_temporary_c_thread(callback) + + # Call the generator in a different Python thread, check that the + # generator didn't keep a reference to the destroyed thread state + for test in range(3): + # The trace function is still called here + callback() + finally: + sys.settrace(old_trace) class TestInterpreterState(BaseApiTest): diff --git a/pypy/tool/pytest/objspace.py b/pypy/tool/pytest/objspace.py --- a/pypy/tool/pytest/objspace.py +++ b/pypy/tool/pytest/objspace.py @@ -28,6 +28,8 @@ def maketestobjspace(config=None): if config is None: config = make_config(option) + if config.objspace.usemodules.thread: + config.translation.thread = True space = make_objspace(config) space.startup() # Initialize all builtin modules space.setitem(space.builtin.w_dict, space.wrap('AssertionError'), From pypy.commits at gmail.com Sun Mar 20 18:10:59 2016 From: pypy.commits at gmail.com (pjenvey) Date: Sun, 20 Mar 2016 15:10:59 -0700 (PDT) Subject: [pypy-commit] pypy stdlib-2.7.11: cpython issue7267: range check format(int, 'c') Message-ID: <56ef1ff3.e853c20a.24b0f.0c36@mx.google.com> Author: Philip Jenvey Branch: stdlib-2.7.11 Changeset: r83205:7f47c732437b Date: 2016-03-20 15:09 -0700 http://bitbucket.org/pypy/pypy/changeset/7f47c732437b/ Log: cpython issue7267: range check format(int, 'c') diff --git a/pypy/objspace/std/newformat.py b/pypy/objspace/std/newformat.py --- a/pypy/objspace/std/newformat.py +++ b/pypy/objspace/std/newformat.py @@ -771,6 +771,11 @@ msg = "sign not allowed with 'c' presentation type" raise OperationError(space.w_ValueError, space.wrap(msg)) value = space.int_w(w_num) + max_char = runicode.MAXUNICODE if self.is_unicode else 0xFF + if not (0 <= value <= max_char): + raise oefmt(space.w_OverflowError, + "%%c arg not in range(%s)", + hex(max_char)) if self.is_unicode: result = runicode.UNICHR(value) else: diff --git a/pypy/objspace/std/test/test_bytesobject.py b/pypy/objspace/std/test/test_bytesobject.py --- a/pypy/objspace/std/test/test_bytesobject.py +++ b/pypy/objspace/std/test/test_bytesobject.py @@ -103,6 +103,10 @@ assert result == "a foo b" assert isinstance(result, cls) + def test_format_c_overflow(self): + raises(OverflowError, b'{0:c}'.format, -1) + raises(OverflowError, b'{0:c}'.format, 256) + def test_split(self): assert "".split() == [] assert "".split('x') == [''] diff --git a/pypy/objspace/std/test/test_unicodeobject.py b/pypy/objspace/std/test/test_unicodeobject.py --- a/pypy/objspace/std/test/test_unicodeobject.py +++ b/pypy/objspace/std/test/test_unicodeobject.py @@ -947,6 +947,11 @@ assert repr("%s" % u) == "u'__unicode__ overridden'" assert repr("{}".format(u)) == "'__unicode__ overridden'" + def test_format_c_overflow(self): + import sys + raises(OverflowError, u'{0:c}'.format, -1) + raises(OverflowError, u'{0:c}'.format, sys.maxunicode + 1) + def test_replace_with_buffer(self): assert u'abc'.replace(buffer('b'), buffer('e')) == u'aec' assert u'abc'.replace(buffer('b'), u'e') == u'aec' From pypy.commits at gmail.com Sun Mar 20 18:57:24 2016 From: pypy.commits at gmail.com (pjenvey) Date: Sun, 20 Mar 2016 15:57:24 -0700 (PDT) Subject: [pypy-commit] pypy stdlib-2.7.11: adjust recursive tests assuming cpython cPickle's impl details Message-ID: <56ef2ad4.838d1c0a.f191a.4dd8@mx.google.com> Author: Philip Jenvey Branch: stdlib-2.7.11 Changeset: r83206:56c29bf49518 Date: 2016-03-20 15:56 -0700 http://bitbucket.org/pypy/pypy/changeset/56c29bf49518/ Log: adjust recursive tests assuming cpython cPickle's impl details diff --git a/lib-python/2.7/test/test_cpickle.py b/lib-python/2.7/test/test_cpickle.py --- a/lib-python/2.7/test/test_cpickle.py +++ b/lib-python/2.7/test/test_cpickle.py @@ -166,7 +166,9 @@ for name in dir(AbstractPickleTests): if name.startswith('test_recursive_'): func = getattr(AbstractPickleTests, name) - if '_subclass' in name and '_and_inst' not in name: + if (test_support.check_impl_detail(pypy=True) or + '_subclass' in name and '_and_inst' not in name): + # PyPy's cPickle matches pure python pickle's behavior here assert_args = RuntimeError, 'maximum recursion depth exceeded' else: assert_args = ValueError, "can't pickle cyclic objects" From pypy.commits at gmail.com Sun Mar 20 19:21:03 2016 From: pypy.commits at gmail.com (mjacob) Date: Sun, 20 Mar 2016 16:21:03 -0700 (PDT) Subject: [pypy-commit] pypy py3.3: Close branch. Message-ID: <56ef305f.c5301c0a.95419.508d@mx.google.com> Author: Manuel Jacob Branch: py3.3 Changeset: r83207:0067281a14da Date: 2016-03-21 00:20 +0100 http://bitbucket.org/pypy/pypy/changeset/0067281a14da/ Log: Close branch. From pypy.commits at gmail.com Sun Mar 20 19:45:09 2016 From: pypy.commits at gmail.com (alex_gaynor) Date: Sun, 20 Mar 2016 16:45:09 -0700 (PDT) Subject: [pypy-commit] pypy stdlib-2.7.11: crap Message-ID: <56ef3605.2179c20a.32153.137b@mx.google.com> Author: Alex Gaynor Branch: stdlib-2.7.11 Changeset: r83208:768873b2de62 Date: 2016-03-20 19:44 -0400 http://bitbucket.org/pypy/pypy/changeset/768873b2de62/ Log: crap diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py --- a/lib_pypy/_sqlite3.py +++ b/lib_pypy/_sqlite3.py @@ -1155,7 +1155,7 @@ class Row(object): def __init__(self, cursor, values): - if not (type(cursor) is Cursor or issubclass(type(cursor), Cursor): + if not (type(cursor) is Cursor or issubclass(type(cursor), Cursor)): raise TypeError("instance of cursor required for first argument") self.description = cursor.description self.values = values From pypy.commits at gmail.com Sun Mar 20 19:45:11 2016 From: pypy.commits at gmail.com (alex_gaynor) Date: Sun, 20 Mar 2016 16:45:11 -0700 (PDT) Subject: [pypy-commit] pypy stdlib-2.7.11: merged upstream Message-ID: <56ef3607.a2afc20a.a24d3.1be7@mx.google.com> Author: Alex Gaynor Branch: stdlib-2.7.11 Changeset: r83209:c1486e592f6f Date: 2016-03-20 19:44 -0400 http://bitbucket.org/pypy/pypy/changeset/c1486e592f6f/ Log: merged upstream diff --git a/lib-python/2.7/test/test_cpickle.py b/lib-python/2.7/test/test_cpickle.py --- a/lib-python/2.7/test/test_cpickle.py +++ b/lib-python/2.7/test/test_cpickle.py @@ -166,7 +166,9 @@ for name in dir(AbstractPickleTests): if name.startswith('test_recursive_'): func = getattr(AbstractPickleTests, name) - if '_subclass' in name and '_and_inst' not in name: + if (test_support.check_impl_detail(pypy=True) or + '_subclass' in name and '_and_inst' not in name): + # PyPy's cPickle matches pure python pickle's behavior here assert_args = RuntimeError, 'maximum recursion depth exceeded' else: assert_args = ValueError, "can't pickle cyclic objects" diff --git a/pypy/objspace/std/newformat.py b/pypy/objspace/std/newformat.py --- a/pypy/objspace/std/newformat.py +++ b/pypy/objspace/std/newformat.py @@ -771,6 +771,11 @@ msg = "sign not allowed with 'c' presentation type" raise OperationError(space.w_ValueError, space.wrap(msg)) value = space.int_w(w_num) + max_char = runicode.MAXUNICODE if self.is_unicode else 0xFF + if not (0 <= value <= max_char): + raise oefmt(space.w_OverflowError, + "%%c arg not in range(%s)", + hex(max_char)) if self.is_unicode: result = runicode.UNICHR(value) else: diff --git a/pypy/objspace/std/test/test_bytesobject.py b/pypy/objspace/std/test/test_bytesobject.py --- a/pypy/objspace/std/test/test_bytesobject.py +++ b/pypy/objspace/std/test/test_bytesobject.py @@ -103,6 +103,10 @@ assert result == "a foo b" assert isinstance(result, cls) + def test_format_c_overflow(self): + raises(OverflowError, b'{0:c}'.format, -1) + raises(OverflowError, b'{0:c}'.format, 256) + def test_split(self): assert "".split() == [] assert "".split('x') == [''] diff --git a/pypy/objspace/std/test/test_unicodeobject.py b/pypy/objspace/std/test/test_unicodeobject.py --- a/pypy/objspace/std/test/test_unicodeobject.py +++ b/pypy/objspace/std/test/test_unicodeobject.py @@ -947,6 +947,11 @@ assert repr("%s" % u) == "u'__unicode__ overridden'" assert repr("{}".format(u)) == "'__unicode__ overridden'" + def test_format_c_overflow(self): + import sys + raises(OverflowError, u'{0:c}'.format, -1) + raises(OverflowError, u'{0:c}'.format, sys.maxunicode + 1) + def test_replace_with_buffer(self): assert u'abc'.replace(buffer('b'), buffer('e')) == u'aec' assert u'abc'.replace(buffer('b'), u'e') == u'aec' From pypy.commits at gmail.com Sun Mar 20 19:51:13 2016 From: pypy.commits at gmail.com (alex_gaynor) Date: Sun, 20 Mar 2016 16:51:13 -0700 (PDT) Subject: [pypy-commit] pypy stdlib-2.7.11: More state checking in audioop Message-ID: <56ef3771.13821c0a.5b006.5a87@mx.google.com> Author: Alex Gaynor Branch: stdlib-2.7.11 Changeset: r83210:71b26501a3db Date: 2016-03-20 19:50 -0400 http://bitbucket.org/pypy/pypy/changeset/71b26501a3db/ Log: More state checking in audioop diff --git a/lib_pypy/audioop.py b/lib_pypy/audioop.py --- a/lib_pypy/audioop.py +++ b/lib_pypy/audioop.py @@ -23,6 +23,18 @@ raise error("not a whole number of frames") +def _check_state(state): + if state is None: + valpred = 0 + index = 0 + else: + valpred, index = state + # XXX: len(stepsizeTable) = 89 + if valpred >= 0x8000 or valpred < -0x8000 or index >= 89: + raise ValueError("bad state") + return (valpred, index) + + def _sample_count(cp, size): return len(cp) // size @@ -485,7 +497,6 @@ return (result, (d, tuple(samps))) - def _get_lin_samples(cp, size): for sample in _get_samples(cp, size): if size == 1: @@ -495,6 +506,7 @@ elif size == 4: yield sample >> 16 + def _put_lin_sample(result, size, i, sample): if size == 1: sample >>= 8 @@ -504,6 +516,7 @@ sample <<= 16 _put_sample(result, size, i, sample) + def lin2ulaw(cp, size): _check_params(len(cp), size) rv = ffi.new("unsigned char[]", _sample_count(cp, size)) @@ -542,8 +555,7 @@ def lin2adpcm(cp, size, state): _check_params(len(cp), size) - if state is None: - state = (0, 0) + state = _check_state(state) rv = ffi.new("unsigned char[]", len(cp) // size // 2) state_ptr = ffi.new("int[]", state) lib.lin2adcpm(rv, cp, len(cp), size, state_ptr) @@ -552,15 +564,8 @@ def adpcm2lin(cp, size, state): _check_size(size) - if state is None: - valpred = 0 - index = 0 - else: - valpred, index = state - # XXX: len(stepsizeTable) = 89 - if valpred >= 0x8000 or valpred < -0x8000 or index >= 89: - raise ValueError("bad state") + state = _check_state(state) rv = ffi.new("unsigned char[]", len(cp) * size * 2) - state_ptr = ffi.new("int[]", [valpred, index]) + state_ptr = ffi.new("int[]", state) lib.adcpm2lin(rv, cp, len(cp), size, state_ptr) return ffi.buffer(rv)[:], tuple(state_ptr) From pypy.commits at gmail.com Mon Mar 21 00:29:44 2016 From: pypy.commits at gmail.com (rlamy) Date: Sun, 20 Mar 2016 21:29:44 -0700 (PDT) Subject: [pypy-commit] pypy default: Reduce diff with py3k Message-ID: <56ef78b8.03321c0a.66922.ffff8297@mx.google.com> Author: Ronan Lamy Branch: Changeset: r83211:b23815a89df9 Date: 2016-03-21 04:28 +0000 http://bitbucket.org/pypy/pypy/changeset/b23815a89df9/ Log: Reduce diff with py3k diff --git a/pypy/module/_cffi_backend/ctypeptr.py b/pypy/module/_cffi_backend/ctypeptr.py --- a/pypy/module/_cffi_backend/ctypeptr.py +++ b/pypy/module/_cffi_backend/ctypeptr.py @@ -124,7 +124,7 @@ s = rffi.charp2str(ptr) else: s = rffi.charp2strn(ptr, length) - return space.wrap(s) + return space.wrapbytes(s) # # pointer to a wchar_t: builds and returns a unicode if self.is_unichar_ptr_or_array(): @@ -372,15 +372,15 @@ rffi_fclose(self.llf) -def prepare_file_argument(space, fileobj): - fileobj.direct_flush() - if fileobj.cffi_fileobj is None: - fd = fileobj.direct_fileno() +def prepare_file_argument(space, w_fileobj): + w_fileobj.direct_flush() + if w_fileobj.cffi_fileobj is None: + fd = w_fileobj.direct_fileno() if fd < 0: raise OperationError(space.w_ValueError, space.wrap("file has no OS file descriptor")) try: - fileobj.cffi_fileobj = CffiFileObj(fd, fileobj.mode) + w_fileobj.cffi_fileobj = CffiFileObj(fd, w_fileobj.mode) except OSError, e: raise wrap_oserror(space, e) - return rffi.cast(rffi.CCHARP, fileobj.cffi_fileobj.llf) + return rffi.cast(rffi.CCHARP, w_fileobj.cffi_fileobj.llf) From pypy.commits at gmail.com Mon Mar 21 01:12:18 2016 From: pypy.commits at gmail.com (rlamy) Date: Sun, 20 Mar 2016 22:12:18 -0700 (PDT) Subject: [pypy-commit] pypy py3k: hg merge default Message-ID: <56ef82b2.41e11c0a.d2dd2.ffff895f@mx.google.com> Author: Ronan Lamy Branch: py3k Changeset: r83212:c3ae6981e11f Date: 2016-03-21 04:58 +0000 http://bitbucket.org/pypy/pypy/changeset/c3ae6981e11f/ Log: hg merge default diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -19,3 +19,4 @@ 850edf14b2c75573720f59e95767335fb1affe55 release-4.0.0 5f8302b8bf9f53056e40426f10c72151564e5b19 release-4.0.1 246c9cf22037b11dc0e8c29ce3f291d3b8c5935a release-5.0 +bbd45126bc691f669c4ebdfbd74456cd274c6b92 release-5.0.1 diff --git a/pypy/doc/release-5.0.1.rst b/pypy/doc/release-5.0.1.rst --- a/pypy/doc/release-5.0.1.rst +++ b/pypy/doc/release-5.0.1.rst @@ -9,6 +9,11 @@ .. _`lxml 3.6.0`: https://pypi.python.org/pypi/lxml/3.6.0 .. _`crash on large files`: https://bitbucket.org/pypy/pypy/issues/2260 + +The changes between PyPy 5.0 and 5.0.1 are only two bug fixes: one in +cpyext, which fixes notably (but not only) lxml; and another for a +corner case of the JIT. + What is PyPy? ============= diff --git a/pypy/module/_cffi_backend/ctypeptr.py b/pypy/module/_cffi_backend/ctypeptr.py --- a/pypy/module/_cffi_backend/ctypeptr.py +++ b/pypy/module/_cffi_backend/ctypeptr.py @@ -390,4 +390,4 @@ w_fileobj.cffi_fileobj = CffiFileObj(fd, mode) except OSError, e: raise wrap_oserror(space, e) - return rffi.cast(rffi.CCHARP, fileobj.cffi_fileobj.llf) + return rffi.cast(rffi.CCHARP, w_fileobj.cffi_fileobj.llf) diff --git a/pypy/module/select/test/test_select.py b/pypy/module/select/test/test_select.py --- a/pypy/module/select/test/test_select.py +++ b/pypy/module/select/test/test_select.py @@ -329,6 +329,10 @@ "usemodules": ["select", "_socket", "time", "thread"], } + import os + if os.uname()[4] == 's390x': + py.test.skip("build bot for s390x cannot open sockets") + def w_make_server(self): import socket if hasattr(self, 'sock'): diff --git a/pypy/tool/release/repackage.sh b/pypy/tool/release/repackage.sh --- a/pypy/tool/release/repackage.sh +++ b/pypy/tool/release/repackage.sh @@ -1,9 +1,9 @@ # Edit these appropriately before running this script maj=5 min=0 -rev=0 +rev=1 branchname=release-$maj.x # ==OR== release-$maj.$min.x -tagname=release-$maj.$min # ==OR== release-$maj.$min.$rev +tagname=release-$maj.$min.$rev # This script will download latest builds from the buildmaster, rename the top # level directory, and repackage ready to be uploaded to bitbucket. It will also # download source, assuming a tag for the release already exists, and repackage them. diff --git a/rpython/rlib/rvmprof/src/vmprof_config.h b/rpython/rlib/rvmprof/src/vmprof_config.h --- a/rpython/rlib/rvmprof/src/vmprof_config.h +++ b/rpython/rlib/rvmprof/src/vmprof_config.h @@ -1,6 +1,10 @@ #define HAVE_SYS_UCONTEXT_H #if defined(__FreeBSD__) -#define PC_FROM_UCONTEXT uc_mcontext.mc_rip + #ifdef __i386__ + #define PC_FROM_UCONTEXT uc_mcontext.mc_eip + #else + #define PC_FROM_UCONTEXT uc_mcontext.mc_rip + #endif #elif defined( __APPLE__) #if ((ULONG_MAX) == (UINT_MAX)) #define PC_FROM_UCONTEXT uc_mcontext->__ss.__eip From pypy.commits at gmail.com Mon Mar 21 03:53:27 2016 From: pypy.commits at gmail.com (mattip) Date: Mon, 21 Mar 2016 00:53:27 -0700 (PDT) Subject: [pypy-commit] buildbot default: remove nightly applevel linux64, move nightly py3k to jit, add lock for speed-old Message-ID: <56efa877.88c8c20a.db21a.7aba@mx.google.com> Author: mattip Branch: Changeset: r996:1397a6c49fbe Date: 2016-03-21 09:47 +0200 http://bitbucket.org/pypy/buildbot/changeset/1397a6c49fbe/ Log: remove nightly applevel linux64, move nightly py3k to jit, add lock for speed-old diff --git a/bot2/pypybuildbot/builds.py b/bot2/pypybuildbot/builds.py --- a/bot2/pypybuildbot/builds.py +++ b/bot2/pypybuildbot/builds.py @@ -21,10 +21,13 @@ # translations in parallel, but then the actual benchmarks are run in # sequence. -# there are 8 logical CPUs, but only 4 physical ones, and only enough memory for ~3 translations +# tannit has 8 logical CPUs, but only 4 physical ones, and memory for ~3 translations TannitCPU = locks.MasterLock('tannit_cpu', maxCount=3) SpeedPythonCPU = locks.MasterLock('speed_python_cpu', maxCount=24) WinSlaveLock = locks.SlaveLock('win_cpu', maxCount=1) +# speed-old has 24 cores, but memory for ~2 translations +SpeedOldLock = locks.MasterLock('speed_old_lock', maxCount=2) + # The cross translation machine can accomodate 2 jobs at the same time ARMCrossLock = locks.MasterLock('arm_cpu', maxCount=2) diff --git a/bot2/pypybuildbot/master.py b/bot2/pypybuildbot/master.py --- a/bot2/pypybuildbot/master.py +++ b/bot2/pypybuildbot/master.py @@ -50,6 +50,7 @@ TannitCPU = pypybuilds.TannitCPU WinSlaveLock = pypybuilds.WinSlaveLock +SpeedOldLock = pypybuilds.SpeedOldLock pypyOwnTestFactory = pypybuilds.Own() pypyOwnTestFactoryWin = pypybuilds.Own(platform="win32") @@ -272,7 +273,7 @@ JITLINUX32, # on tannit32, uses 1 core JITLINUX64, # on speed-old, uses 1 core #APPLVLLINUX32, # on tannit32, uses 1 core - APPLVLLINUX64, # on speed-old, uses 1 core + #APPLVLLINUX64, # on speed-old, uses 1 core # other platforms #MACOSX32, # on minime JITWIN32, # on allegro_win32, SalsaSalsa @@ -303,7 +304,7 @@ Nightly("nightly-3-00-py3k", [ LINUX64, # on speed-old, uses all cores - APPLVLLINUX64, # on speed-old, uses 1 core + JITLINUX64, # on speed-old, uses 1 core ], branch="py3k", hour=3, minute=0), # S390X vm (ibm-research) @@ -378,7 +379,7 @@ "builddir": LINUX64, "factory": pypyOwnTestFactory, "category": 'linux64', - #"locks": [TannitCPU.access('counting')], + "locks": [SpeedOldLock.access('counting')], }, {"name": APPLVLLINUX32, #"slavenames": ["allegro32"], @@ -393,7 +394,7 @@ "builddir": APPLVLLINUX64, "factory": pypyTranslatedAppLevelTestFactory64, "category": "linux64", - #"locks": [TannitCPU.access('counting')], + "locks": [SpeedOldLock.access('counting')], }, {"name": LIBPYTHON_LINUX32, "slavenames": ["tannit32"], @@ -408,7 +409,7 @@ "builddir": LIBPYTHON_LINUX64, "factory": pypyTranslatedLibPythonTestFactory, "category": "linux64", - #"locks": [TannitCPU.access('counting')], + "locks": [SpeedOldLock.access('counting')], }, {"name" : JITLINUX32, #"slavenames": ["allegro32"], @@ -423,7 +424,7 @@ 'builddir': JITLINUX64, 'factory': pypyJITTranslatedTestFactory64, 'category': 'linux64', - #"locks": [TannitCPU.access('counting')], + "locks": [SpeedOldLock.access('counting')], }, {"name": JITBENCH, "slavenames": ["tannit32"], @@ -444,6 +445,7 @@ "builddir": JITBENCH64_NEW, "factory": pypyJITBenchmarkFactory64_speed, "category": "benchmark-run", + "locks": [SpeedOldLock.access('exclusive')], }, {"name": MACOSX32, "slavenames": ["minime"], From pypy.commits at gmail.com Mon Mar 21 07:14:17 2016 From: pypy.commits at gmail.com (mattip) Date: Mon, 21 Mar 2016 04:14:17 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-ext: implement PyThread_init_thread as a NOP, start to implement PyThread_start_new_thread Message-ID: <56efd789.e6bbc20a.a5878.ffffc53d@mx.google.com> Author: mattip Branch: cpyext-ext Changeset: r83213:1c960078443c Date: 2016-03-21 13:13 +0200 http://bitbucket.org/pypy/pypy/changeset/1c960078443c/ Log: implement PyThread_init_thread as a NOP, start to implement PyThread_start_new_thread diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -429,7 +429,7 @@ 'PyThread_acquire_lock', 'PyThread_release_lock', 'PyThread_create_key', 'PyThread_delete_key', 'PyThread_set_key_value', 'PyThread_get_key_value', 'PyThread_delete_key_value', - 'PyThread_ReInitTLS', + 'PyThread_ReInitTLS', 'PyThread_init_thread', 'PyStructSequence_InitType', 'PyStructSequence_New', 'PyStructSequence_UnnamedField', diff --git a/pypy/module/cpyext/pystate.py b/pypy/module/cpyext/pystate.py --- a/pypy/module/cpyext/pystate.py +++ b/pypy/module/cpyext/pystate.py @@ -54,6 +54,15 @@ return 0 return 1 +thread_func = lltype.Ptr(lltype.FuncType([rffi.VOIDP], lltype.Void)) + at cpython_api([thread_func, rffi.VOIDP], rffi.INT_real, error=-1) +def PyThread_start_new_thread(space, func, arg): + from pypy.module.thread import os_thread + w_args = space.newtuple([arg]) + XXX # How to wrap func as a space.callable ? + os_thread.start_new_thread(space, func, w_args) + return 0 + # XXX: might be generally useful def encapsulator(T, flavor='raw', dealloc=None): class MemoryCapsule(object): diff --git a/pypy/module/cpyext/src/pythread.c b/pypy/module/cpyext/src/pythread.c --- a/pypy/module/cpyext/src/pythread.c +++ b/pypy/module/cpyext/src/pythread.c @@ -15,6 +15,17 @@ #endif } +static int initialized; + +void +PyThread_init_thread(void) +{ + if (initialized) + return; + initialized = 1; + /*PyThread__init_thread(); a NOP on modern platforms */ +} + PyThread_type_lock PyThread_allocate_lock(void) { diff --git a/pypy/module/cpyext/stubsactive.py b/pypy/module/cpyext/stubsactive.py --- a/pypy/module/cpyext/stubsactive.py +++ b/pypy/module/cpyext/stubsactive.py @@ -42,7 +42,3 @@ """ return -1 -thread_func = lltype.Ptr(lltype.FuncType([rffi.VOIDP], lltype.Void)) - at cpython_api([thread_func, rffi.VOIDP], rffi.INT_real, error=-1) -def PyThread_start_new_thread(space, func, arg): - return -1 From pypy.commits at gmail.com Mon Mar 21 08:40:30 2016 From: pypy.commits at gmail.com (fijal) Date: Mon, 21 Mar 2016 05:40:30 -0700 (PDT) Subject: [pypy-commit] pypy jit-leaner-frontend: implement deadranges Message-ID: <56efebbe.a2afc20a.a24d3.fffff09a@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r83214:71d4428daa8d Date: 2016-03-21 14:39 +0200 http://bitbucket.org/pypy/pypy/changeset/71d4428daa8d/ Log: implement deadranges diff --git a/rpython/jit/metainterp/opencoder.py b/rpython/jit/metainterp/opencoder.py --- a/rpython/jit/metainterp/opencoder.py +++ b/rpython/jit/metainterp/opencoder.py @@ -88,8 +88,16 @@ self.start = start self.pos = start self._count = start + self.start_index = start self.end = end + def get_dead_ranges(self): + return self.trace.get_dead_ranges(self.metainterp_sd) + + def kill_cache_at(self, pos): + if pos: + self._cache[pos] = None + def _get(self, i): res = self._cache[i] assert res is not None @@ -183,6 +191,7 @@ iter = TraceIterator(self.trace, self.start, self.trace._pos, self.inputargs, metainterp_sd=metainterp_sd) iter._count = self.count + iter.start_index = self.count return iter def combine_uint(index1, index2): @@ -210,6 +219,8 @@ self.vref_array = vref_array class Trace(BaseTrace): + _deadranges = (-1, -1) + def __init__(self, inputargs): self._ops = [rffi.cast(rffi.SHORT, -15)] * 30000 self._pos = 0 @@ -403,12 +414,37 @@ index = t.next_element_update_live_range(index, liveranges) return liveranges - def unpack(self): - iter = self.get_iter() + def get_dead_ranges(self, metainterp_sd): + """ Same as get_live_ranges, but returns a list of "dying" indexes, + such as for each index x, the number found there is for sure dead + before x + """ + def insert(ranges, pos, v): + # XXX skiplist + while ranges[pos]: + pos += 1 + if pos == len(ranges): + return + ranges[pos] = v + + if self._deadranges != (-1, -1): + if self._deadranges[0] == self._count: + return self._deadranges[1] + liveranges = self.get_live_ranges(metainterp_sd) + deadranges = [0] * (self._count + 1) + for i in range(self._start, len(liveranges)): + elem = liveranges[i] + if elem: + insert(deadranges, elem + 1, i) + self._deadranges = (self._count, deadranges) + return deadranges + + def unpack(self, metainterp_sd): + iter = self.get_iter(metainterp_sd) ops = [] while not iter.done(): ops.append(iter.next()) - return ops + return iter.inputargs, ops def tag(kind, pos): #if not SMALL_INT_START <= pos < SMALL_INT_STOP: diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -507,6 +507,7 @@ def propagate_all_forward(self, trace, call_pure_results=None, flush=True): self.trace = trace + deadranges = trace.get_dead_ranges() self.call_pure_results = call_pure_results last_op = None i = 0 @@ -517,6 +518,7 @@ last_op = op break self.first_optimization.propagate_forward(op) + trace.kill_cache_at(deadranges[i + trace.start_index]) i += 1 # accumulate counters if flush: diff --git a/rpython/jit/metainterp/optimizeopt/test/test_util.py b/rpython/jit/metainterp/optimizeopt/test/test_util.py --- a/rpython/jit/metainterp/optimizeopt/test/test_util.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_util.py @@ -557,9 +557,21 @@ call_pure_results[list(k)] = v return call_pure_results + def pick_cls(self, inp): + if inp.type == 'i': + return history.IntFrontendOp + elif inp.type == 'r': + xxx + else: + assert inp.type == 'f' + xxx + def convert_loop_to_packed(self, loop, skip_last=False): + XXX # rewrite from rpython.jit.metainterp.opencoder import Trace - trace = Trace(loop.inputargs) + inputargs = [self.pick_cls(inparg)(i) for i, inparg in + enumerate(loop.inputargs)] + trace = Trace(inputargs) ops = loop.operations if skip_last: ops = ops[:-1] @@ -635,7 +647,7 @@ def convert_old_style_to_targets(loop, jump): newloop = TreeLoop(loop.name) newloop.inputargs = loop.inputargs - newloop.operations = [ResOperation(rop.LABEL, loop.inputargs, -1, descr=FakeDescr())] + \ + newloop.operations = [ResOperation(rop.LABEL, loop.inputargs, descr=FakeDescr())] + \ loop.operations if not jump: assert newloop.operations[-1].getopnum() == rop.JUMP diff --git a/rpython/jit/metainterp/test/test_opencoder.py b/rpython/jit/metainterp/test/test_opencoder.py --- a/rpython/jit/metainterp/test/test_opencoder.py +++ b/rpython/jit/metainterp/test/test_opencoder.py @@ -195,3 +195,19 @@ t.record_op(rop.GUARD_TRUE, [i0]) resume.capture_resumedata([], [i1, i2, p0], [p0, i1], t) assert t.get_live_ranges(metainterp_sd) == [4, 4, 4, 4, 0] + + def test_deadranges(self): + i0, i1, i2 = IntFrontendOp(0), IntFrontendOp(0), IntFrontendOp(0) + t = Trace([i0, i1, i2]) + p0 = FakeOp(t.record_op(rop.NEW_WITH_VTABLE, [], descr=SomeDescr())) + t.record_op(rop.GUARD_TRUE, [i0]) + resume.capture_resumedata([], [i1, i2, p0], [p0, i1], t) + i3 = FakeOp(t.record_op(rop.INT_ADD, [i1, ConstInt(1)])) + i4 = FakeOp(t.record_op(rop.INT_ADD, [i3, ConstInt(1)])) + t.record_op(rop.ESCAPE_N, [ConstInt(3)]) + t.record_op(rop.ESCAPE_N, [ConstInt(3)]) + t.record_op(rop.ESCAPE_N, [ConstInt(3)]) + t.record_op(rop.ESCAPE_N, [ConstInt(3)]) + t.record_op(rop.ESCAPE_N, [ConstInt(3)]) + t.record_op(rop.FINISH, [i4]) + assert t.get_dead_ranges(metainterp_sd) == [0, 0, 0, 0, 0, 3, 0, 5, 0, 0, 0, 0, 0, 6] From pypy.commits at gmail.com Mon Mar 21 08:46:57 2016 From: pypy.commits at gmail.com (fijal) Date: Mon, 21 Mar 2016 05:46:57 -0700 (PDT) Subject: [pypy-commit] pypy jit-leaner-frontend: fix rptthon Message-ID: <56efed41.8fb81c0a.e11e0.ffff8fee@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r83215:c2bae70672b4 Date: 2016-03-21 14:46 +0200 http://bitbucket.org/pypy/pypy/changeset/c2bae70672b4/ Log: fix rptthon diff --git a/rpython/jit/metainterp/opencoder.py b/rpython/jit/metainterp/opencoder.py --- a/rpython/jit/metainterp/opencoder.py +++ b/rpython/jit/metainterp/opencoder.py @@ -219,7 +219,7 @@ self.vref_array = vref_array class Trace(BaseTrace): - _deadranges = (-1, -1) + _deadranges = (-1, None) def __init__(self, inputargs): self._ops = [rffi.cast(rffi.SHORT, -15)] * 30000 @@ -427,7 +427,7 @@ return ranges[pos] = v - if self._deadranges != (-1, -1): + if self._deadranges != (-1, None): if self._deadranges[0] == self._count: return self._deadranges[1] liveranges = self.get_live_ranges(metainterp_sd) From pypy.commits at gmail.com Mon Mar 21 08:53:06 2016 From: pypy.commits at gmail.com (fijal) Date: Mon, 21 Mar 2016 05:53:06 -0700 (PDT) Subject: [pypy-commit] pypy jit-leaner-frontend: another rpython fix Message-ID: <56efeeb2.0357c20a.c858a.ffffe811@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r83216:c7460558e25f Date: 2016-03-21 14:51 +0200 http://bitbucket.org/pypy/pypy/changeset/c7460558e25f/ Log: another rpython fix diff --git a/rpython/jit/metainterp/opencoder.py b/rpython/jit/metainterp/opencoder.py --- a/rpython/jit/metainterp/opencoder.py +++ b/rpython/jit/metainterp/opencoder.py @@ -145,9 +145,9 @@ liveranges[index] = index if opwithdescr[opnum]: descr_index = self._next() - if rop.is_guard(opnum): - self.get_snapshot_iter(descr_index).update_liveranges( - index, liveranges) + if rop.is_guard(opnum): + self.get_snapshot_iter(descr_index).update_liveranges( + index, liveranges) return index + 1 def next(self): From pypy.commits at gmail.com Mon Mar 21 08:58:21 2016 From: pypy.commits at gmail.com (fijal) Date: Mon, 21 Mar 2016 05:58:21 -0700 (PDT) Subject: [pypy-commit] pypy jit-leaner-frontend: dreaded rpython... Message-ID: <56efefed.e213c20a.62e64.fffff587@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r83217:f3f01ee52e15 Date: 2016-03-21 14:57 +0200 http://bitbucket.org/pypy/pypy/changeset/f3f01ee52e15/ Log: dreaded rpython... diff --git a/rpython/jit/metainterp/opencoder.py b/rpython/jit/metainterp/opencoder.py --- a/rpython/jit/metainterp/opencoder.py +++ b/rpython/jit/metainterp/opencoder.py @@ -91,7 +91,7 @@ self.start_index = start self.end = end - def get_dead_ranges(self): + def get_dead_ranges(self, metainterp_sd=None): return self.trace.get_dead_ranges(self.metainterp_sd) def kill_cache_at(self, pos): @@ -414,7 +414,7 @@ index = t.next_element_update_live_range(index, liveranges) return liveranges - def get_dead_ranges(self, metainterp_sd): + def get_dead_ranges(self, metainterp_sd=None): """ Same as get_live_ranges, but returns a list of "dying" indexes, such as for each index x, the number found there is for sure dead before x From pypy.commits at gmail.com Mon Mar 21 11:56:17 2016 From: pypy.commits at gmail.com (plan_rich) Date: Mon, 21 Mar 2016 08:56:17 -0700 (PDT) Subject: [pypy-commit] pypy new-jit-log: class (const) attributes moved to outer scope Message-ID: <56f019a1.10921c0a.55175.7766@mx.google.com> Author: Richard Plangger Branch: new-jit-log Changeset: r83218:8022b7c95b47 Date: 2016-03-21 08:59 +0100 http://bitbucket.org/pypy/pypy/changeset/8022b7c95b47/ Log: class (const) attributes moved to outer scope diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -10,6 +10,7 @@ from rpython.jit.metainterp.history import (Const, VOID, ConstInt) from rpython.jit.metainterp.history import AbstractFailDescr, INT, REF, FLOAT from rpython.jit.metainterp.compile import ResumeGuardDescr +from rpython.jit.metainterp.jitlog import MARK_TRACE_ASM from rpython.rtyper.lltypesystem import lltype, rffi, rstr, llmemory from rpython.rtyper.lltypesystem.lloperation import llop from rpython.rtyper.annlowlevel import cast_instance_to_gcref @@ -532,8 +533,8 @@ looptoken._x86_ops_offset = ops_offset looptoken._ll_function_addr = rawstart if logger: - logger.log_trace(logger.MARK_TRACE_ASM, inputargs, operations, - ops_offset=ops_offset, self.mc) + logger.log_trace(MARK_TRACE_ASM, inputargs, operations, + ops_offset=ops_offset, mc=self.mc) self.fixup_target_tokens(rawstart) self.teardown() @@ -587,8 +588,9 @@ frame_depth = max(self.current_clt.frame_info.jfi_frame_depth, frame_depth_no_fixed_size + JITFRAME_FIXED_SIZE) if logger: - logger.log_trace(logger.MARK_TRACE_ASM, inputargs, operations, - faildescr=faildescr, ops_offset=ops_offset) + logger.log_trace(MARK_TRACE_ASM, inputargs, operations, + faildescr=faildescr, ops_offset=ops_offset, + mc=self.mc) self.fixup_target_tokens(rawstart) self.update_frame_depth(frame_depth) self.teardown() diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -17,6 +17,7 @@ from rpython.jit.metainterp.optimize import InvalidLoop from rpython.jit.metainterp.resume import (PENDINGFIELDSP, ResumeDataDirectReader, AccumInfo) +from rpython.jit.metainterp.jitlog import MARK_TRACE_OPT from rpython.jit.metainterp.resumecode import NUMBERING from rpython.jit.codewriter import heaptracker, longlong @@ -480,8 +481,7 @@ def do_compile_loop(jd_id, unique_id, metainterp_sd, inputargs, operations, looptoken, log=True, name='', memo=None): - mark = VMProfJitLogger.MARK_TRACE_OPT - metainterp_sd.jitlog.log_trace(mark, inputargs, operations) + metainterp_sd.jitlog.log_trace(MARK_TRACE_OPT, inputargs, operations) # TODO remove old metainterp_sd.logger_ops.log_loop(inputargs, operations, -2, 'compiling', None, name, memo) @@ -493,8 +493,8 @@ def do_compile_bridge(metainterp_sd, faildescr, inputargs, operations, original_loop_token, log=True, memo=None): - mark = VMProfJitLogger.MARK_TRACE_OPT - metainterp_sd.jitlog.log_trace(mark, inputargs, operations, faildescr=faildescr) + metainterp_sd.jitlog.log_trace(MARK_TRACE_OPT, inputargs, operations, + faildescr=faildescr) # TODO remove old metainterp_sd.logger_ops.log_bridge(inputargs, operations, "compiling", memo=memo) diff --git a/rpython/jit/metainterp/jitlog.py b/rpython/jit/metainterp/jitlog.py --- a/rpython/jit/metainterp/jitlog.py +++ b/rpython/jit/metainterp/jitlog.py @@ -1,42 +1,26 @@ from rpython.rlib.rvmprof.rvmprof import cintf from rpython.jit.metainterp import resoperation as resoperations -import struct +from struct import pack -class JitLogMarshall(object): - def encode(self, op): - str_args = [arg.repr_short(arg._repr_memo) for arg in op.getarglist()] - descr = op.getdescr() - line = struct.pack(' as two unsigend longs - lendian_addrs = struct.pack(' Author: Richard Plangger Branch: s390x-z196 Changeset: r83219:2c67ae9165f5 Date: 2016-03-21 09:00 +0100 http://bitbucket.org/pypy/pypy/changeset/2c67ae9165f5/ Log: close branch From pypy.commits at gmail.com Mon Mar 21 11:56:21 2016 From: pypy.commits at gmail.com (plan_rich) Date: Mon, 21 Mar 2016 08:56:21 -0700 (PDT) Subject: [pypy-commit] pypy new-jit-log: translation issues Message-ID: <56f019a5.552f1c0a.85295.77ff@mx.google.com> Author: Richard Plangger Branch: new-jit-log Changeset: r83220:9bd952847a5f Date: 2016-03-21 09:50 +0100 http://bitbucket.org/pypy/pypy/changeset/9bd952847a5f/ Log: translation issues diff --git a/rpython/jit/metainterp/jitlog.py b/rpython/jit/metainterp/jitlog.py --- a/rpython/jit/metainterp/jitlog.py +++ b/rpython/jit/metainterp/jitlog.py @@ -1,6 +1,8 @@ from rpython.rlib.rvmprof.rvmprof import cintf from rpython.jit.metainterp import resoperation as resoperations -from struct import pack +from rpython.jit.metainterp.history import ConstInt, ConstFloat +import sys +import weakref MARK_INPUT_ARGS = 0x10 MARK_RESOP_META = 0x11 @@ -20,10 +22,34 @@ # the machine code was patched (e.g. guard) MARK_ASM_PATCH = 0x19 +IS_32_BIT = sys.maxint == 2**31-1 + +# why is there no rlib/rstruct/pack.py? +def encode_le_16bit(val): + return chr((val >> 0) & 0xff) + chr((val >> 8) & 0xff) +def encode_le_32bit(val): + return ''.join([chr((val >> 0) & 0xff), + chr((val >> 8) & 0xff), + chr((val >> 16) & 0xff), + chr((val >> 24) & 0xff)]) +def encode_le_addr(val): + if IS_32_BIT: + return encode_be_32bit(val) + else: + return ''.join([chr((val >> 0) & 0xff), + chr((val >> 8) & 0xff), + chr((val >> 16) & 0xff), + chr((val >> 24) & 0xff), + chr((val >> 32) & 0xff), + chr((val >> 40) & 0xff), + chr((val >> 48) & 0xff), + chr((val >> 56)& 0xff)]) + class VMProfJitLogger(object): def __init__(self): self.cintf = cintf.setup() + self.memo = {} def setup_once(self): self.cintf.jitlog_try_init_using_env() @@ -32,7 +58,7 @@ count = len(resoperations.opname) mark = MARK_RESOP_META for opnum, opname in resoperations.opname.items(): - line = pack(" as two unsigend longs - lendian_addrs = pack(' Author: Richard Plangger Branch: new-jit-log Changeset: r83221:0b922ecc3d86 Date: 2016-03-21 16:55 +0100 http://bitbucket.org/pypy/pypy/changeset/0b922ecc3d86/ Log: added patch mark (rewrite machinecode). some refactoring to parse it more easily diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -533,9 +533,8 @@ looptoken._x86_ops_offset = ops_offset looptoken._ll_function_addr = rawstart if logger: - logger.log_trace(MARK_TRACE_ASM, inputargs, operations, - ops_offset=ops_offset, mc=self.mc) - + log = logger.log_trace(MARK_TRACE_ASM, None, self.mc) + log.write(inputargs, operations, None, ops_offset) self.fixup_target_tokens(rawstart) self.teardown() # oprofile support @@ -583,14 +582,15 @@ debug_bridge(descr_number, rawstart, codeendpos) self.patch_pending_failure_recoveries(rawstart) # patch the jump from original guard + if logger: + logger.log_patch_guard(faildescr.adr_new_target, rawstart) self.patch_jump_for_descr(faildescr, rawstart) ops_offset = self.mc.ops_offset frame_depth = max(self.current_clt.frame_info.jfi_frame_depth, frame_depth_no_fixed_size + JITFRAME_FIXED_SIZE) if logger: - logger.log_trace(MARK_TRACE_ASM, inputargs, operations, - faildescr=faildescr, ops_offset=ops_offset, - mc=self.mc) + log = logger.log_trace(MARK_TRACE_ASM, None, self.mc) + log.write(inputargs, operations, faildescr, ops_offset) self.fixup_target_tokens(rawstart) self.update_frame_depth(frame_depth) self.teardown() diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -481,7 +481,8 @@ def do_compile_loop(jd_id, unique_id, metainterp_sd, inputargs, operations, looptoken, log=True, name='', memo=None): - metainterp_sd.jitlog.log_trace(MARK_TRACE_OPT, inputargs, operations) + log = metainterp_sd.jitlog.log_trace(MARK_TRACE_OPT, metainterp_sd, None) + log.write(inputargs, operations) # TODO remove old metainterp_sd.logger_ops.log_loop(inputargs, operations, -2, 'compiling', None, name, memo) @@ -493,8 +494,8 @@ def do_compile_bridge(metainterp_sd, faildescr, inputargs, operations, original_loop_token, log=True, memo=None): - metainterp_sd.jitlog.log_trace(MARK_TRACE_OPT, inputargs, operations, - faildescr=faildescr) + log = metainterp_sd.jitlog.log_trace(MARK_TRACE_OPT, metainterp_sd, None) + log.write(inputargs, operations, faildescr=faildescr) # TODO remove old metainterp_sd.logger_ops.log_bridge(inputargs, operations, "compiling", memo=memo) diff --git a/rpython/jit/metainterp/jitlog.py b/rpython/jit/metainterp/jitlog.py --- a/rpython/jit/metainterp/jitlog.py +++ b/rpython/jit/metainterp/jitlog.py @@ -1,6 +1,8 @@ from rpython.rlib.rvmprof.rvmprof import cintf from rpython.jit.metainterp import resoperation as resoperations from rpython.jit.metainterp.history import ConstInt, ConstFloat +from rpython.rlib.objectmodel import we_are_translated +from rpython.rtyper.lltypesystem import lltype, llmemory, rffi import sys import weakref @@ -24,27 +26,6 @@ IS_32_BIT = sys.maxint == 2**31-1 -# why is there no rlib/rstruct/pack.py? -def encode_le_16bit(val): - return chr((val >> 0) & 0xff) + chr((val >> 8) & 0xff) -def encode_le_32bit(val): - return ''.join([chr((val >> 0) & 0xff), - chr((val >> 8) & 0xff), - chr((val >> 16) & 0xff), - chr((val >> 24) & 0xff)]) -def encode_le_addr(val): - if IS_32_BIT: - return encode_be_32bit(val) - else: - return ''.join([chr((val >> 0) & 0xff), - chr((val >> 8) & 0xff), - chr((val >> 16) & 0xff), - chr((val >> 24) & 0xff), - chr((val >> 32) & 0xff), - chr((val >> 40) & 0xff), - chr((val >> 48) & 0xff), - chr((val >> 56)& 0xff)]) - class VMProfJitLogger(object): def __init__(self): @@ -58,7 +39,7 @@ count = len(resoperations.opname) mark = MARK_RESOP_META for opnum, opname in resoperations.opname.items(): - line = encode_le_16bit(opnum) + opname.lower() + line = self.encode_le_16bit(opnum) + opname.lower() self.write_marked(mark, line) def teardown(self): @@ -67,52 +48,106 @@ def write_marked(self, mark, line): self.cintf.jitlog_write_marked(mark, line, len(line)) - def encode(self, op): + def log_trace(self, tag, metainterp_sd, mc, memo=None): + if self.cintf.jitlog_filter(tag): + return + assert isinstance(tag, int) + if memo is None: + memo = {} + return LogTrace(tag, memo, metainterp_sd, mc, self) + + def log_patch_guard(self, addr, target_addr): + if self.cintf.jitlog_filter(tag): + return + le_addr_write = self.encode_le_addr(addr) + le_len = self.encode_le_32bit(8) + le_addr = self.encode_le_addr(target_addr) + lst = [le_addr, le_len, le_addr] + self.cintf.jitlog_filter(MARK_ASM_PATCH, ''.join(lst)) + + def encode_le_16bit(self, val): + return chr((val >> 0) & 0xff) + chr((val >> 8) & 0xff) + + def encode_le_32bit(self, val): + return ''.join([chr((val >> 0) & 0xff), + chr((val >> 8) & 0xff), + chr((val >> 16) & 0xff), + chr((val >> 24) & 0xff)]) + + def encode_le_addr(self,val): + if IS_32_BIT: + return encode_be_32bit(val) + else: + return ''.join([chr((val >> 0) & 0xff), + chr((val >> 8) & 0xff), + chr((val >> 16) & 0xff), + chr((val >> 24) & 0xff), + chr((val >> 32) & 0xff), + chr((val >> 40) & 0xff), + chr((val >> 48) & 0xff), + chr((val >> 56)& 0xff)]) + + +class LogTrace(object): + def __init__(self, tag, memo, metainterp_sd, mc, logger): + self.memo = memo + self.metainterp_sd = metainterp_sd + if self.metainterp_sd is not None: + self.ts = metainterp_sd.cpu.ts + self.tag = tag + self.mc = mc + self.logger = logger + + def write(self, args, ops, faildescr=None, ops_offset={}): + log = self.logger + + # write the initial tag + if faildescr is None: + log.write_marked(self.tag, 'loop') + else: + log.write_marked(self.tag, 'bridge') + + # input args + str_args = [self.var_to_str(arg) for arg in args] + log.write_marked(MARK_INPUT_ARGS, ','.join(str_args)) + + # assembler address (to not duplicate it in write_code_dump) + if self.mc is not None: + absaddr = self.mc.absolute_addr() + rel = self.mc.get_relative_pos() + # packs as two unsigend longs + le_addr1 = self.encode_le_addr(absaddr) + le_addr2 = self.encode_le_addr(absaddr + rel) + log.write_marked(MARK_ASM_ADDR, le_addr1 + le_addr2) + for i,op in enumerate(ops): + mark, line = self.encode_op(op) + log.write_marked(mark, line) + self.write_core_dump(ops, i, op, ops_offset) + + self.memo = {} + + def encode_op(self, op): + """ an operation is written as follows: + \ + \ + ,,...,, + The marker indicates if the last argument is + a descr or a normal argument. + """ str_args = [self.var_to_str(arg) for arg in op.getarglist()] descr = op.getdescr() - le_len = encode_le_32bit(op.getopnum()) - line = le_len + ','.join(str_args) + le_opnum = self.logger.encode_le_16bit(op.getopnum()) + str_res = self.var_to_str(op) + line = le_opnum + ','.join([str_res] + str_args) if descr: - line += "|" + str(descr) - return MARK_RESOP_DESCR, line + descr_str = descr.repr_of_descr() + return MARK_RESOP_DESCR, line + ',' + descr_str else: return MARK_RESOP, line - def log_trace(self, tag, args, ops, - faildescr=None, ops_offset={}, mc=None): - # this is a mixture of binary and text! - if self.cintf.jitlog_filter(tag): - return - assert isinstance(tag, int) - # write the initial tag - if faildescr is None: - self.write_marked(tag, 'loop') - else: - self.write_marked(tag, 'bridge') - - # input args - str_args = [self.var_to_str(arg) for arg in args] - self.write_marked(MARK_INPUT_ARGS, ','.join(str_args)) - - # assembler address (to not duplicate it in write_code_dump) - if mc is not None: - absaddr = mc.absolute_addr() - rel = mc.get_relative_pos() - # packs as two unsigend longs - le_addr1 = encode_le_addr(absaddr) - le_addr2 = encode_le_addr(absaddr + rel) - self.write_marked(MARK_ASM_ADDR, le_addr1 + le_addr2) - for i,op in enumerate(ops): - mark, line = self.encode(op) - self.write_marked(mark, line) - self.write_core_dump(ops, i, op, ops_offset, mc) - - self.memo = {} - - def write_core_dump(self, operations, i, op, ops_offset, mc): - return - if mc is None: + def write_core_dump(self, operations, i, op, ops_offset): + if self.mc is None: return op2 = None @@ -138,12 +173,12 @@ # end offset is either the last pos in the assembler # or the offset of op2 if op2 is None: - end_offset = mc.get_relative_pos() + end_offset = self.mc.get_relative_pos() else: end_offset = ops_offset[op2] - dump = mc.copy_core_dump(mc.absolute_addr(), start_offset) - self.write_marked(MARK_ASM, dump) + dump = self.mc.copy_core_dump(self.mc.absolute_addr(), start_offset) + self.logger.write_marked(MARK_ASM, dump) def var_to_str(self, arg): try: @@ -151,18 +186,17 @@ except KeyError: mv = len(self.memo) self.memo[arg] = mv - # TODO - #if isinstance(arg, ConstInt): - # if int_could_be_an_address(arg.value): - # addr = arg.getaddr() - # name = self.metainterp_sd.get_name_from_address(addr) - # if name: - # return 'ConstClass(' + name + ')' - # return str(arg.value) - #elif isinstance(arg, self.ts.ConstRef): - # if arg.value: - # return 'ConstPtr(ptr' + str(mv) + ')' - # return 'ConstPtr(null)' + if isinstance(arg, ConstInt): + if self.metainterp_sd and int_could_be_an_address(arg.value): + addr = arg.getaddr() + name = self.metainterp_sd.get_name_from_address(addr) + if name: + return 'ConstClass(' + name + ')' + return str(arg.value) + elif self.ts is not None and isinstance(arg, self.ts.ConstRef): + if arg.value: + return 'ConstPtr(ptr' + str(mv) + ')' + return 'ConstPtr(null)' if isinstance(arg, ConstFloat): return str(arg.getfloat()) elif arg is None: @@ -178,3 +212,9 @@ else: return '?' +def int_could_be_an_address(x): + if we_are_translated(): + x = rffi.cast(lltype.Signed, x) # force it + return not (-32768 <= x <= 32767) + else: + return isinstance(x, llmemory.AddressAsInt) diff --git a/rpython/jit/metainterp/optimizeopt/__init__.py b/rpython/jit/metainterp/optimizeopt/__init__.py --- a/rpython/jit/metainterp/optimizeopt/__init__.py +++ b/rpython/jit/metainterp/optimizeopt/__init__.py @@ -55,8 +55,8 @@ debug_start("jit-optimize") inputargs = compile_data.start_label.getarglist() try: - metainterp_sd.jitlog.log_trace(MARK_TRACE, inputargs, - compile_data.operations) + log = metainterp_sd.jitlog.log_trace(MARK_TRACE, metainterp_sd, None) + log.write(inputargs, compile_data.operations) # metainterp_sd.logger_noopt.log_loop(inputargs, compile_data.operations, From pypy.commits at gmail.com Mon Mar 21 13:09:02 2016 From: pypy.commits at gmail.com (amauryfa) Date: Mon, 21 Mar 2016 10:09:02 -0700 (PDT) Subject: [pypy-commit] pypy default: Cherry-pick a few changes from the cpyext-ext branch: Message-ID: <56f02aae.890bc30a.e5675.4de7@mx.google.com> Author: Amaury Forgeot d'Arc Branch: Changeset: r83223:edf35def96ce Date: 2016-03-21 17:55 +0100 http://bitbucket.org/pypy/pypy/changeset/edf35def96ce/ Log: Cherry-pick a few changes from the cpyext-ext branch: 3df26326119c 43629fab94e1 931af853eaab - expose "defenc" and "hash" fields of PyUnicodeObject - Allow PyString_AsString to process unicode objects. The "defenc" field is returned. diff --git a/pypy/module/cpyext/bytesobject.py b/pypy/module/cpyext/bytesobject.py --- a/pypy/module/cpyext/bytesobject.py +++ b/pypy/module/cpyext/bytesobject.py @@ -1,4 +1,4 @@ -from pypy.interpreter.error import OperationError +from pypy.interpreter.error import OperationError, oefmt from rpython.rtyper.lltypesystem import rffi, lltype from pypy.module.cpyext.api import ( cpython_api, cpython_struct, bootstrap_function, build_type_checkers, @@ -134,8 +134,14 @@ if from_ref(space, rffi.cast(PyObject, ref.c_ob_type)) is space.w_str: pass # typecheck returned "ok" without forcing 'ref' at all elif not PyString_Check(space, ref): # otherwise, use the alternate way - raise OperationError(space.w_TypeError, space.wrap( - "PyString_AsString only support strings")) + from pypy.module.cpyext.unicodeobject import ( + PyUnicode_Check, _PyUnicode_AsDefaultEncodedString) + if PyUnicode_Check(space, ref): + ref = _PyUnicode_AsDefaultEncodedString(space, ref, None) + else: + raise oefmt(space.w_TypeError, + "expected string or Unicode object, %T found", + from_ref(space, ref)) ref_str = rffi.cast(PyStringObject, ref) if not ref_str.c_buffer: # copy string buffer @@ -147,8 +153,14 @@ @cpython_api([PyObject, rffi.CCHARPP, rffi.CArrayPtr(Py_ssize_t)], rffi.INT_real, error=-1) def PyString_AsStringAndSize(space, ref, buffer, length): if not PyString_Check(space, ref): - raise OperationError(space.w_TypeError, space.wrap( - "PyString_AsStringAndSize only support strings")) + from pypy.module.cpyext.unicodeobject import ( + PyUnicode_Check, _PyUnicode_AsDefaultEncodedString) + if PyUnicode_Check(space, ref): + ref = _PyUnicode_AsDefaultEncodedString(space, ref, None) + else: + raise oefmt(space.w_TypeError, + "expected string or Unicode object, %T found", + from_ref(space, ref)) ref_str = rffi.cast(PyStringObject, ref) if not ref_str.c_buffer: # copy string buffer diff --git a/pypy/module/cpyext/include/unicodeobject.h b/pypy/module/cpyext/include/unicodeobject.h --- a/pypy/module/cpyext/include/unicodeobject.h +++ b/pypy/module/cpyext/include/unicodeobject.h @@ -20,8 +20,12 @@ typedef struct { PyObject_HEAD - Py_UNICODE *buffer; + Py_UNICODE *str; Py_ssize_t size; + long hash; /* Hash value; -1 if not set */ + PyObject *defenc; /* (Default) Encoded version as Python + string, or NULL; this is used for + implementing the buffer protocol */ } PyUnicodeObject; diff --git a/pypy/module/cpyext/test/test_bytesobject.py b/pypy/module/cpyext/test/test_bytesobject.py --- a/pypy/module/cpyext/test/test_bytesobject.py +++ b/pypy/module/cpyext/test/test_bytesobject.py @@ -139,6 +139,44 @@ ]) module.getstring() + def test_py_string_as_string_Unicode(self): + module = self.import_extension('foo', [ + ("getstring_unicode", "METH_NOARGS", + """ + Py_UNICODE chars[] = {'t', 'e', 's', 't'}; + PyObject* u1 = PyUnicode_FromUnicode(chars, 4); + char *buf; + buf = PyString_AsString(u1); + if (buf == NULL) + return NULL; + if (buf[3] != 't') { + PyErr_SetString(PyExc_AssertionError, "Bad conversion"); + return NULL; + } + Py_DECREF(u1); + Py_INCREF(Py_None); + return Py_None; + """), + ("getstringandsize_unicode", "METH_NOARGS", + """ + Py_UNICODE chars[] = {'t', 'e', 's', 't'}; + PyObject* u1 = PyUnicode_FromUnicode(chars, 4); + char *buf; + Py_ssize_t len; + if (PyString_AsStringAndSize(u1, &buf, &len) < 0) + return NULL; + if (len != 4) { + PyErr_SetString(PyExc_AssertionError, "Bad Length"); + return NULL; + } + Py_DECREF(u1); + Py_INCREF(Py_None); + return Py_None; + """), + ]) + module.getstring_unicode() + module.getstringandsize_unicode() + def test_format_v(self): module = self.import_extension('foo', [ ("test_string_format_v", "METH_VARARGS", diff --git a/pypy/module/cpyext/test/test_unicodeobject.py b/pypy/module/cpyext/test/test_unicodeobject.py --- a/pypy/module/cpyext/test/test_unicodeobject.py +++ b/pypy/module/cpyext/test/test_unicodeobject.py @@ -24,7 +24,7 @@ if(PyUnicode_GetSize(s) == 11) { result = 1; } - if(s->ob_type->tp_basicsize != sizeof(void*)*5) + if(s->ob_type->tp_basicsize != sizeof(void*)*7) result = 0; Py_DECREF(s); return PyBool_FromLong(result); @@ -66,6 +66,7 @@ c = PyUnicode_AsUnicode(s); c[0] = 'a'; c[1] = 0xe9; + c[2] = 0x00; c[3] = 'c'; return s; """), @@ -74,7 +75,35 @@ assert len(s) == 4 assert s == u'a�\x00c' + def test_hash(self): + module = self.import_extension('foo', [ + ("test_hash", "METH_VARARGS", + ''' + PyObject* obj = (PyTuple_GetItem(args, 0)); + long hash = ((PyUnicodeObject*)obj)->hash; + return PyLong_FromLong(hash); + ''' + ), + ]) + res = module.test_hash(u"xyz") + assert res == hash(u'xyz') + def test_default_encoded_string(self): + module = self.import_extension('foo', [ + ("test_default_encoded_string", "METH_O", + ''' + PyObject* result = _PyUnicode_AsDefaultEncodedString(args, "replace"); + Py_INCREF(result); + return result; + ''' + ), + ]) + res = module.test_default_encoded_string(u"xyz") + assert isinstance(res, str) + assert res == 'xyz' + res = module.test_default_encoded_string(u"caf\xe9") + assert isinstance(res, str) + assert res == 'caf?' class TestUnicode(BaseApiTest): def test_unicodeobject(self, space, api): @@ -155,22 +184,22 @@ def test_unicode_resize(self, space, api): py_uni = new_empty_unicode(space, 10) ar = lltype.malloc(PyObjectP.TO, 1, flavor='raw') - py_uni.c_buffer[0] = u'a' - py_uni.c_buffer[1] = u'b' - py_uni.c_buffer[2] = u'c' + py_uni.c_str[0] = u'a' + py_uni.c_str[1] = u'b' + py_uni.c_str[2] = u'c' ar[0] = rffi.cast(PyObject, py_uni) api.PyUnicode_Resize(ar, 3) py_uni = rffi.cast(PyUnicodeObject, ar[0]) assert py_uni.c_size == 3 - assert py_uni.c_buffer[1] == u'b' - assert py_uni.c_buffer[3] == u'\x00' + assert py_uni.c_str[1] == u'b' + assert py_uni.c_str[3] == u'\x00' # the same for growing ar[0] = rffi.cast(PyObject, py_uni) api.PyUnicode_Resize(ar, 10) py_uni = rffi.cast(PyUnicodeObject, ar[0]) assert py_uni.c_size == 10 - assert py_uni.c_buffer[1] == 'b' - assert py_uni.c_buffer[10] == '\x00' + assert py_uni.c_str[1] == 'b' + assert py_uni.c_str[10] == '\x00' Py_DecRef(space, ar[0]) lltype.free(ar, flavor='raw') diff --git a/pypy/module/cpyext/unicodeobject.py b/pypy/module/cpyext/unicodeobject.py --- a/pypy/module/cpyext/unicodeobject.py +++ b/pypy/module/cpyext/unicodeobject.py @@ -22,7 +22,8 @@ PyUnicodeObjectStruct = lltype.ForwardReference() PyUnicodeObject = lltype.Ptr(PyUnicodeObjectStruct) PyUnicodeObjectFields = (PyObjectFields + - (("buffer", rffi.CWCHARP), ("size", Py_ssize_t))) + (("str", rffi.CWCHARP), ("size", Py_ssize_t), + ("hash", rffi.LONG), ("defenc", PyObject))) cpython_struct("PyUnicodeObject", PyUnicodeObjectFields, PyUnicodeObjectStruct) @bootstrap_function @@ -54,16 +55,20 @@ buflen = length + 1 py_uni.c_size = length - py_uni.c_buffer = lltype.malloc(rffi.CWCHARP.TO, buflen, - flavor='raw', zero=True, - add_memory_pressure=True) + py_uni.c_str = lltype.malloc(rffi.CWCHARP.TO, buflen, + flavor='raw', zero=True, + add_memory_pressure=True) + py_uni.c_hash = -1 + py_uni.c_defenc = lltype.nullptr(PyObject.TO) return py_uni def unicode_attach(space, py_obj, w_obj): "Fills a newly allocated PyUnicodeObject with a unicode string" py_unicode = rffi.cast(PyUnicodeObject, py_obj) py_unicode.c_size = len(space.unicode_w(w_obj)) - py_unicode.c_buffer = lltype.nullptr(rffi.CWCHARP.TO) + py_unicode.c_str = lltype.nullptr(rffi.CWCHARP.TO) + py_unicode.c_hash = space.hash_w(w_obj) + py_unicode.c_defenc = lltype.nullptr(PyObject.TO) def unicode_realize(space, py_obj): """ @@ -71,17 +76,20 @@ be modified after this call. """ py_uni = rffi.cast(PyUnicodeObject, py_obj) - s = rffi.wcharpsize2unicode(py_uni.c_buffer, py_uni.c_size) + s = rffi.wcharpsize2unicode(py_uni.c_str, py_uni.c_size) w_obj = space.wrap(s) + py_uni.c_hash = space.hash_w(w_obj) track_reference(space, py_obj, w_obj) return w_obj @cpython_api([PyObject], lltype.Void, header=None) def unicode_dealloc(space, py_obj): py_unicode = rffi.cast(PyUnicodeObject, py_obj) - if py_unicode.c_buffer: - lltype.free(py_unicode.c_buffer, flavor="raw") + if py_unicode.c_str: + lltype.free(py_unicode.c_str, flavor="raw") from pypy.module.cpyext.object import PyObject_dealloc + if py_unicode.c_defenc: + PyObject_dealloc(space, py_unicode.c_defenc) PyObject_dealloc(space, py_obj) @cpython_api([Py_UNICODE], rffi.INT_real, error=CANNOT_FAIL) @@ -205,12 +213,12 @@ """Return a pointer to the internal Py_UNICODE buffer of the object. ref has to be a PyUnicodeObject (not checked).""" ref_unicode = rffi.cast(PyUnicodeObject, ref) - if not ref_unicode.c_buffer: + if not ref_unicode.c_str: # Copy unicode buffer w_unicode = from_ref(space, ref) u = space.unicode_w(w_unicode) - ref_unicode.c_buffer = rffi.unicode2wcharp(u) - return ref_unicode.c_buffer + ref_unicode.c_str = rffi.unicode2wcharp(u) + return ref_unicode.c_str @cpython_api([PyObject], rffi.CWCHARP) def PyUnicode_AsUnicode(space, ref): @@ -241,7 +249,7 @@ string may or may not be 0-terminated. It is the responsibility of the caller to make sure that the wchar_t string is 0-terminated in case this is required by the application.""" - c_buffer = PyUnicode_AS_UNICODE(space, rffi.cast(PyObject, ref)) + c_str = PyUnicode_AS_UNICODE(space, rffi.cast(PyObject, ref)) c_size = ref.c_size # If possible, try to copy the 0-termination as well @@ -251,7 +259,7 @@ i = 0 while i < size: - buf[i] = c_buffer[i] + buf[i] = c_str[i] i += 1 if size > c_size: @@ -343,8 +351,15 @@ return PyUnicode_FromUnicode(space, wchar_p, length) @cpython_api([PyObject, CONST_STRING], PyObject) -def _PyUnicode_AsDefaultEncodedString(space, w_unicode, errors): - return PyUnicode_AsEncodedString(space, w_unicode, lltype.nullptr(rffi.CCHARP.TO), errors) +def _PyUnicode_AsDefaultEncodedString(space, ref, errors): + # Returns a borrowed reference. + py_uni = rffi.cast(PyUnicodeObject, ref) + if not py_uni.c_defenc: + py_uni.c_defenc = make_ref( + space, PyUnicode_AsEncodedString( + space, ref, + lltype.nullptr(rffi.CCHARP.TO), errors)) + return py_uni.c_defenc @cpython_api([CONST_STRING, Py_ssize_t, CONST_STRING, CONST_STRING], PyObject) def PyUnicode_Decode(space, s, size, encoding, errors): @@ -444,7 +459,7 @@ def PyUnicode_Resize(space, ref, newsize): # XXX always create a new string so far py_uni = rffi.cast(PyUnicodeObject, ref[0]) - if not py_uni.c_buffer: + if not py_uni.c_str: raise OperationError(space.w_SystemError, space.wrap( "PyUnicode_Resize called on already created string")) try: @@ -458,7 +473,7 @@ if oldsize < newsize: to_cp = oldsize for i in range(to_cp): - py_newuni.c_buffer[i] = py_uni.c_buffer[i] + py_newuni.c_str[i] = py_uni.c_str[i] Py_DecRef(space, ref[0]) ref[0] = rffi.cast(PyObject, py_newuni) return 0 From pypy.commits at gmail.com Mon Mar 21 13:40:01 2016 From: pypy.commits at gmail.com (plan_rich) Date: Mon, 21 Mar 2016 10:40:01 -0700 (PDT) Subject: [pypy-commit] pypy new-jit-log: encode the offset into the core dump Message-ID: <56f031f1.83561c0a.a74c2.ffff9b88@mx.google.com> Author: Richard Plangger Branch: new-jit-log Changeset: r83224:d96b7e680539 Date: 2016-03-21 18:39 +0100 http://bitbucket.org/pypy/pypy/changeset/d96b7e680539/ Log: encode the offset into the core dump diff --git a/rpython/jit/metainterp/jitlog.py b/rpython/jit/metainterp/jitlog.py --- a/rpython/jit/metainterp/jitlog.py +++ b/rpython/jit/metainterp/jitlog.py @@ -178,7 +178,8 @@ end_offset = ops_offset[op2] dump = self.mc.copy_core_dump(self.mc.absolute_addr(), start_offset) - self.logger.write_marked(MARK_ASM, dump) + offset = self.logger.encode_le_16bit(start_offset) + self.logger.write_marked(MARK_ASM, offset + dump) def var_to_str(self, arg): try: From pypy.commits at gmail.com Mon Mar 21 14:08:01 2016 From: pypy.commits at gmail.com (rlamy) Date: Mon, 21 Mar 2016 11:08:01 -0700 (PDT) Subject: [pypy-commit] pypy follow_symlinks: Stub follow_symlinks support Message-ID: <56f03881.c9161c0a.b2f40.ffffa918@mx.google.com> Author: Ronan Lamy Branch: follow_symlinks Changeset: r83225:4039497984aa Date: 2016-03-21 18:06 +0000 http://bitbucket.org/pypy/pypy/changeset/4039497984aa/ Log: Stub follow_symlinks support diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py --- a/pypy/interpreter/gateway.py +++ b/pypy/interpreter/gateway.py @@ -66,6 +66,13 @@ """NOT_RPYTHON""" raise NotImplementedError +def kwonly(arg_unwrapper): + """Mark argument as keyword-only. + + XXX: has no actual effect for now. + """ + return arg_unwrapper + class UnwrapSpecRecipe(object): "NOT_RPYTHON" diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -7,9 +7,10 @@ from rpython.rlib.rarithmetic import r_longlong, intmask from rpython.rlib.unroll import unrolling_iterable -from pypy.interpreter.gateway import unwrap_spec, WrappedDefault, Unwrapper -from pypy.interpreter.error import (OperationError, wrap_oserror, oefmt, - wrap_oserror2, strerror as _strerror) +from pypy.interpreter.gateway import ( + unwrap_spec, WrappedDefault, Unwrapper, kwonly) +from pypy.interpreter.error import ( + OperationError, wrap_oserror, oefmt, wrap_oserror2, strerror as _strerror) from pypy.interpreter.executioncontext import ExecutionContext @@ -327,8 +328,8 @@ else: return build_stat_result(space, st) - at unwrap_spec(dir_fd=DirFD) -def stat(space, w_path, dir_fd=DEFAULT_DIR_FD): + at unwrap_spec(dir_fd=DirFD, follow_symlinks=kwonly(bool)) +def stat(space, w_path, dir_fd=DEFAULT_DIR_FD, follow_symlinks=True): """stat(path, *, dir_fd=None, follow_symlinks=True) -> stat result Perform a stat system call on the given path. @@ -431,9 +432,12 @@ except OSError, e: raise wrap_oserror(space, e) - at unwrap_spec(mode=c_int, dir_fd=DirFD) -def access(space, w_path, mode, dir_fd=DEFAULT_DIR_FD): - """access(path, mode, *, dir_fd=None, effective_ids=False, follow_symlinks=True) + at unwrap_spec(mode=c_int, + dir_fd=DirFD, effective_ids=kwonly(bool), follow_symlinks=kwonly(bool)) +def access(space, w_path, mode, + dir_fd=DEFAULT_DIR_FD, effective_ids=True, follow_symlinks=True): + """\ +access(path, mode, *, dir_fd=None, effective_ids=False, follow_symlinks=True) Use the real uid/gid to test for access to a path. Returns True if granted, False otherwise. @@ -725,8 +729,8 @@ raise wrap_oserror(space, e) return space.newtuple([space.wrap(fd1), space.wrap(fd2)]) - at unwrap_spec(mode=c_int, dir_fd=DirFD) -def chmod(space, w_path, mode, dir_fd=DEFAULT_DIR_FD): + at unwrap_spec(mode=c_int, dir_fd=DirFD, follow_symlinks=kwonly(bool)) +def chmod(space, w_path, mode, dir_fd=DEFAULT_DIR_FD, follow_symlinks=True): """chmod(path, mode, *, dir_fd=None, follow_symlinks=True) Change the access permissions of a file. @@ -759,7 +763,8 @@ raise wrap_oserror(space, e) @unwrap_spec(src_dir_fd=DirFD, dst_dir_fd=DirFD) -def rename(space, w_old, w_new, src_dir_fd=DEFAULT_DIR_FD, dst_dir_fd=DEFAULT_DIR_FD): +def rename(space, w_old, w_new, + src_dir_fd=DEFAULT_DIR_FD, dst_dir_fd=DEFAULT_DIR_FD): """rename(src, dst, *, src_dir_fd=None, dst_dir_fd=None) Rename a file or directory. @@ -775,7 +780,8 @@ raise wrap_oserror(space, e) @unwrap_spec(src_dir_fd=DirFD, dst_dir_fd=DirFD) -def replace(space, w_old, w_new, src_dir_fd=DEFAULT_DIR_FD, dst_dir_fd=DEFAULT_DIR_FD): +def replace(space, w_old, w_new, + src_dir_fd=DEFAULT_DIR_FD, dst_dir_fd=DEFAULT_DIR_FD): """replace(src, dst, *, src_dir_fd=None, dst_dir_fd=None) Rename a file or directory, overwriting the destination. @@ -863,9 +869,13 @@ @unwrap_spec( src='fsencode', dst='fsencode', - src_dir_fd=DirFD, dst_dir_fd=DirFD) -def link(space, src, dst, src_dir_fd=DEFAULT_DIR_FD, dst_dir_fd=DEFAULT_DIR_FD): - """link(src, dst, *, src_dir_fd=None, dst_dir_fd=None, follow_symlinks=True) + src_dir_fd=DirFD, dst_dir_fd=DirFD, follow_symlinks=kwonly(bool)) +def link( + space, src, dst, + src_dir_fd=DEFAULT_DIR_FD, dst_dir_fd=DEFAULT_DIR_FD, + follow_symlinks=True): + """\ +link(src, dst, *, src_dir_fd=None, dst_dir_fd=None, follow_symlinks=True) Create a hard link to a file. @@ -885,7 +895,8 @@ @unwrap_spec(dir_fd=DirFD) -def symlink(space, w_src, w_dst, w_target_is_directory=None, dir_fd=DEFAULT_DIR_FD): +def symlink(space, w_src, w_dst, w_target_is_directory=None, + dir_fd=DEFAULT_DIR_FD): """symlink(src, dst, target_is_directory=False, *, dir_fd=None) Create a symbolic link pointing to src named dst. @@ -1082,8 +1093,8 @@ raise wrap_oserror(space, e) return space.wrap(ret) - at unwrap_spec(dir_fd=DirFD) -def utime(space, w_path, w_tuple, dir_fd=DEFAULT_DIR_FD): + at unwrap_spec(dir_fd=DirFD, follow_symlinks=kwonly(bool)) +def utime(space, w_path, w_tuple, dir_fd=DEFAULT_DIR_FD, follow_symlinks=True): """utime(path, times=None, *, ns=None, dir_fd=None, follow_symlinks=True) Set the access and modified time of path. @@ -1521,8 +1532,10 @@ raise wrap_oserror(space, e) return space.wrap(res) - at unwrap_spec(path='fsencode', uid=c_uid_t, gid=c_gid_t, dir_fd=DirFD) -def chown(space, path, uid, gid, dir_fd=DEFAULT_DIR_FD): + at unwrap_spec( + path='fsencode', uid=c_uid_t, gid=c_gid_t, + dir_fd=DirFD, follow_symlinks=kwonly(bool)) +def chown(space, path, uid, gid, dir_fd=DEFAULT_DIR_FD, follow_symlinks=True): """chown(path, uid, gid, *, dir_fd=None, follow_symlinks=True) Change the owner and group id of path to the numeric uid and gid. From pypy.commits at gmail.com Mon Mar 21 14:18:45 2016 From: pypy.commits at gmail.com (fijal) Date: Mon, 21 Mar 2016 11:18:45 -0700 (PDT) Subject: [pypy-commit] pypy jit-leaner-frontend: fix some tests Message-ID: <56f03b05.2968c20a.84ddc.7164@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r83226:043653d6cffe Date: 2016-03-21 20:17 +0200 http://bitbucket.org/pypy/pypy/changeset/043653d6cffe/ Log: fix some tests diff --git a/rpython/jit/metainterp/optimizeopt/dependency.py b/rpython/jit/metainterp/optimizeopt/dependency.py --- a/rpython/jit/metainterp/optimizeopt/dependency.py +++ b/rpython/jit/metainterp/optimizeopt/dependency.py @@ -3,7 +3,6 @@ from rpython.jit.metainterp import compile from rpython.jit.metainterp.optimizeopt.util import make_dispatcher_method from rpython.jit.metainterp.resoperation import (rop, GuardResOp, ResOperation) -from rpython.jit.metainterp.resume import Snapshot from rpython.jit.codewriter.effectinfo import EffectInfo from rpython.jit.metainterp.history import (ConstPtr, ConstInt,Const, AbstractValue, AbstractFailDescr) diff --git a/rpython/jit/metainterp/optimizeopt/renamer.py b/rpython/jit/metainterp/optimizeopt/renamer.py --- a/rpython/jit/metainterp/optimizeopt/renamer.py +++ b/rpython/jit/metainterp/optimizeopt/renamer.py @@ -1,5 +1,4 @@ from rpython.jit.metainterp import resoperation -from rpython.jit.metainterp.resume import Snapshot class Renamer(object): def __init__(self): diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -33,7 +33,7 @@ compile_data = compile.SimpleCompileData(trace, call_pure_results) info, ops = self._do_optimize_loop(compile_data) - label_op = ResOperation(rop.LABEL, info.inputargs, -1) + label_op = ResOperation(rop.LABEL, info.inputargs) loop.inputargs = info.inputargs loop.operations = [label_op] + ops #print '\n'.join([str(o) for o in loop.operations]) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_util.py b/rpython/jit/metainterp/optimizeopt/test/test_util.py --- a/rpython/jit/metainterp/optimizeopt/test/test_util.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_util.py @@ -497,18 +497,6 @@ final_descr = history.BasicFinalDescr() -class FakeFrame(object): - pc = 100 - - class jitcode: - index = 200 - - def __init__(self, boxes): - self.boxes = boxes - - def get_list_of_active_boxes(self, flag): - return self.boxes - class BaseTest(object): def parse(self, s, boxkinds=None, want_fail_descr=True, postprocess=None): @@ -561,29 +549,44 @@ if inp.type == 'i': return history.IntFrontendOp elif inp.type == 'r': - xxx + return history.RefFrontendOp else: assert inp.type == 'f' - xxx + return history.FloatFrontendOp def convert_loop_to_packed(self, loop, skip_last=False): - XXX # rewrite from rpython.jit.metainterp.opencoder import Trace + from rpython.jit.metainterp.test.test_opencoder import FakeFrame + + def get(a): + if isinstance(a, history.Const): + return a + return mapping[a] + + class jitcode: + index = 200 + inputargs = [self.pick_cls(inparg)(i) for i, inparg in enumerate(loop.inputargs)] + mapping = {} + for one, two in zip(loop.inputargs, inputargs): + mapping[one] = two trace = Trace(inputargs) ops = loop.operations if skip_last: ops = ops[:-1] for op in ops: - newop = trace.record_op(op.getopnum(), op.getarglist(), op.getdescr()) + newpos = trace.record_op(op.getopnum(), [get(arg) for arg in + op.getarglist()], op.getdescr()) if rop.is_guard(op.getopnum()): failargs = [] if op.getfailargs(): - failargs = op.getfailargs() - frame = FakeFrame(failargs) + failargs = [get(arg) for arg in op.getfailargs()] + frame = FakeFrame(100, jitcode, failargs) resume.capture_resumedata([frame], None, [], trace) - op.position = newop.position + if op.type != 'v': + newop = self.pick_cls(op)(newpos) + mapping[op] = newop return trace def unroll_and_optimize(self, loop, call_pure_results=None, diff --git a/rpython/jit/metainterp/optimizeopt/vector.py b/rpython/jit/metainterp/optimizeopt/vector.py --- a/rpython/jit/metainterp/optimizeopt/vector.py +++ b/rpython/jit/metainterp/optimizeopt/vector.py @@ -8,7 +8,6 @@ import py import time -from rpython.jit.metainterp.resume import Snapshot from rpython.jit.metainterp.jitexc import NotAVectorizeableLoop, NotAProfitableLoop from rpython.jit.metainterp.compile import (CompileLoopVersionDescr, ResumeDescr) from rpython.jit.metainterp.history import (INT, FLOAT, VECTOR, ConstInt, ConstFloat, diff --git a/rpython/jit/metainterp/resumecode.py b/rpython/jit/metainterp/resumecode.py --- a/rpython/jit/metainterp/resumecode.py +++ b/rpython/jit/metainterp/resumecode.py @@ -67,6 +67,7 @@ value = -1 - value value >>= 1 return value, index +numb_next_item._always_inline_ = True def numb_next_n_items(numb, size, index): for i in range(size): diff --git a/rpython/jit/metainterp/test/test_resume.py b/rpython/jit/metainterp/test/test_resume.py --- a/rpython/jit/metainterp/test/test_resume.py +++ b/rpython/jit/metainterp/test/test_resume.py @@ -184,7 +184,7 @@ def execute_and_record(self, opnum, descr, *argboxes): resvalue = executor.execute(self.cpu, None, opnum, descr, *argboxes) - op = ResOperation(opnum, list(argboxes), -1, descr) + op = ResOperation(opnum, list(argboxes), descr=descr) setvalue(op, resvalue) self.trace.append((opnum, list(argboxes), resvalue, descr)) return op From pypy.commits at gmail.com Mon Mar 21 14:33:47 2016 From: pypy.commits at gmail.com (stefanor) Date: Mon, 21 Mar 2016 11:33:47 -0700 (PDT) Subject: [pypy-commit] pypy default: Support GNU/kFreeBSD Debian ports in vmprof Message-ID: <56f03e8b.83561c0a.a74c2.ffffb18a@mx.google.com> Author: Stefano Rivera Branch: Changeset: r83228:936b7e8d9b6c Date: 2016-03-21 14:32 -0400 http://bitbucket.org/pypy/pypy/changeset/936b7e8d9b6c/ Log: Support GNU/kFreeBSD Debian ports in vmprof diff --git a/rpython/rlib/rvmprof/src/vmprof_config.h b/rpython/rlib/rvmprof/src/vmprof_config.h --- a/rpython/rlib/rvmprof/src/vmprof_config.h +++ b/rpython/rlib/rvmprof/src/vmprof_config.h @@ -1,5 +1,5 @@ #define HAVE_SYS_UCONTEXT_H -#if defined(__FreeBSD__) +#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) #ifdef __i386__ #define PC_FROM_UCONTEXT uc_mcontext.mc_eip #else diff --git a/rpython/translator/c/src/thread_pthread.c b/rpython/translator/c/src/thread_pthread.c --- a/rpython/translator/c/src/thread_pthread.c +++ b/rpython/translator/c/src/thread_pthread.c @@ -37,7 +37,7 @@ # define THREAD_STACK_SIZE 0 /* use default stack size */ # endif -# if (defined(__APPLE__) || defined(__FreeBSD__)) && defined(THREAD_STACK_SIZE) && THREAD_STACK_SIZE == 0 +# if (defined(__APPLE__) || defined(__FreeBSD__) || defined(__FreeBSD_kernel__)) && defined(THREAD_STACK_SIZE) && THREAD_STACK_SIZE == 0 /* The default stack size for new threads on OSX is small enough that * we'll get hard crashes instead of 'maximum recursion depth exceeded' * exceptions. @@ -84,7 +84,7 @@ if (tss != 0) pthread_attr_setstacksize(&attrs, tss); #endif -#if defined(PTHREAD_SYSTEM_SCHED_SUPPORTED) && !defined(__FreeBSD__) +#if defined(PTHREAD_SYSTEM_SCHED_SUPPORTED) && !(defined(__FreeBSD__) || defined(__FreeBSD_kernel__)) pthread_attr_setscope(&attrs, PTHREAD_SCOPE_SYSTEM); #endif From pypy.commits at gmail.com Mon Mar 21 14:33:45 2016 From: pypy.commits at gmail.com (stefanor) Date: Mon, 21 Mar 2016 11:33:45 -0700 (PDT) Subject: [pypy-commit] pypy default: Indent proprocessor logic, for readability Message-ID: <56f03e89.426dc20a.74986.7457@mx.google.com> Author: Stefano Rivera Branch: Changeset: r83227:f36e339f1a24 Date: 2016-03-21 14:25 -0400 http://bitbucket.org/pypy/pypy/changeset/f36e339f1a24/ Log: Indent proprocessor logic, for readability diff --git a/rpython/rlib/rvmprof/src/vmprof_config.h b/rpython/rlib/rvmprof/src/vmprof_config.h --- a/rpython/rlib/rvmprof/src/vmprof_config.h +++ b/rpython/rlib/rvmprof/src/vmprof_config.h @@ -12,10 +12,10 @@ #define PC_FROM_UCONTEXT uc_mcontext->__ss.__rip #endif #elif defined(__arm__) -#define PC_FROM_UCONTEXT uc_mcontext.arm_ip + #define PC_FROM_UCONTEXT uc_mcontext.arm_ip #elif defined(__linux) && defined(__i386) && defined(__GNUC__) -#define PC_FROM_UCONTEXT uc_mcontext.gregs[REG_EIP] + #define PC_FROM_UCONTEXT uc_mcontext.gregs[REG_EIP] #else -/* linux, gnuc */ -#define PC_FROM_UCONTEXT uc_mcontext.gregs[REG_RIP] + /* linux, gnuc */ + #define PC_FROM_UCONTEXT uc_mcontext.gregs[REG_RIP] #endif From pypy.commits at gmail.com Mon Mar 21 15:11:05 2016 From: pypy.commits at gmail.com (amauryfa) Date: Mon, 21 Mar 2016 12:11:05 -0700 (PDT) Subject: [pypy-commit] pypy default: Graft a9fccbdef513: Fix rposix module after bad merge Message-ID: <56f04749.82561c0a.518d.ffffbbfa@mx.google.com> Author: Amaury Forgeot d'Arc Branch: Changeset: r83229:51e1dd898fb8 Date: 2016-01-28 17:44 +0100 http://bitbucket.org/pypy/pypy/changeset/51e1dd898fb8/ Log: Graft a9fccbdef513: Fix rposix module after bad merge diff --git a/rpython/rlib/rposix.py b/rpython/rlib/rposix.py --- a/rpython/rlib/rposix.py +++ b/rpython/rlib/rposix.py @@ -22,6 +22,22 @@ from rpython.rlib import rwin32 from rpython.rlib.rwin32file import make_win32_traits +class CConfig: + _compilation_info_ = ExternalCompilationInfo( + includes=['sys/stat.h', + 'unistd.h', + 'fcntl.h'], + ) + for _name in """fchdir fchmod fchmodat fchown fchownat fexecve fdopendir + fpathconf fstat fstatat fstatvfs ftruncate futimens futimes + futimesat linkat lchflags lchmod lchown lstat lutimes + mkdirat mkfifoat mknodat openat readlinkat renameat + symlinkat unlinkat utimensat""".split(): + locals()['HAVE_%s' % _name.upper()] = rffi_platform.Has(_name) +cConfig = rffi_platform.configure(CConfig) +globals().update(cConfig) + + class CConstantErrno(CConstant): # these accessors are used when calling get_errno() or set_errno() # on top of CPython @@ -1024,6 +1040,13 @@ if not win32traits.MoveFile(path1, path2): raise rwin32.lastSavedWindowsError() + at specialize.argtype(0, 1) +def replace(path1, path2): + if os.name == 'nt': + raise NotImplementedError( + 'On windows, os.replace() should overwrite the destination') + return rename(path1, path2) + #___________________________________________________________________ c_mkfifo = external('mkfifo', [rffi.CCHARP, rffi.MODE_T], rffi.INT, diff --git a/rpython/rlib/rtime.py b/rpython/rlib/rtime.py --- a/rpython/rlib/rtime.py +++ b/rpython/rlib/rtime.py @@ -9,7 +9,6 @@ from rpython.rtyper.tool import rffi_platform from rpython.rtyper.lltypesystem import rffi, lltype from rpython.rlib.objectmodel import register_replacement_for -from rpython.rlib import jit from rpython.rlib.rarithmetic import intmask, UINT_MAX from rpython.rlib import rposix @@ -170,28 +169,30 @@ [lltype.Signed, lltype.Ptr(TIMESPEC)], rffi.INT, releasegil=False, compilation_info=eci_with_lrt) -else: +if need_rusage: RUSAGE = RUSAGE RUSAGE_SELF = RUSAGE_SELF or 0 c_getrusage = external('getrusage', [rffi.INT, lltype.Ptr(RUSAGE)], - lltype.Void, + rffi.INT, releasegil=False) +def win_perf_counter(): + a = lltype.malloc(A, flavor='raw') + if state.divisor == 0.0: + QueryPerformanceCounter(a) + state.counter_start = a[0] + QueryPerformanceFrequency(a) + state.divisor = float(a[0]) + QueryPerformanceCounter(a) + diff = a[0] - state.counter_start + lltype.free(a, flavor='raw') + return float(diff) / state.divisor + @replace_time_function('clock') - at jit.dont_look_inside # the JIT doesn't like FixedSizeArray def clock(): if _WIN32: - a = lltype.malloc(A, flavor='raw') - if state.divisor == 0.0: - QueryPerformanceCounter(a) - state.counter_start = a[0] - QueryPerformanceFrequency(a) - state.divisor = float(a[0]) - QueryPerformanceCounter(a) - diff = a[0] - state.counter_start - lltype.free(a, flavor='raw') - return float(diff) / state.divisor + return win_perf_counter() elif CLOCK_PROCESS_CPUTIME_ID is not None: with lltype.scoped_alloc(TIMESPEC) as a: c_clock_gettime(CLOCK_PROCESS_CPUTIME_ID, a) From pypy.commits at gmail.com Mon Mar 21 15:22:06 2016 From: pypy.commits at gmail.com (arigo) Date: Mon, 21 Mar 2016 12:22:06 -0700 (PDT) Subject: [pypy-commit] pypy sandbox-lib: in-progress: write the API that I think would make sense, with lots of Message-ID: <56f049de.426dc20a.74986.ffff8767@mx.google.com> Author: Armin Rigo Branch: sandbox-lib Changeset: r83230:1adffa44a320 Date: 2016-03-21 20:21 +0100 http://bitbucket.org/pypy/pypy/changeset/1adffa44a320/ Log: in-progress: write the API that I think would make sense, with lots of comments diff --git a/rpython/translator/rsandbox/rsandbox.py b/rpython/translator/rsandbox/rsandbox.py --- a/rpython/translator/rsandbox/rsandbox.py +++ b/rpython/translator/rsandbox/rsandbox.py @@ -24,50 +24,21 @@ assert lltype.typeOf(ll_func) == FUNC, ( "seen two sandboxed functions called %r with different " "signatures:\n %r\n %r" % (fnname, FUNC, lltype.typeOf(ll_func))) - return 'rsandbox_' + fnname + return 'rsandbox_fnptr_' + fnname def add_sandbox_files(database, eci, targetdir): - c_header = [''' -#ifndef _RSANDBOX_H_ -#define _RSANDBOX_H_ + c_part_header = py.path.local(__file__).join('..', 'src', 'part.h').read() + c_part_source = py.path.local(__file__).join('..', 'src', 'part.c').read() + c_header = [c_part_header] + c_source = [c_part_source] -#ifndef RPY_SANDBOX_EXPORTED -/* Common definitions when including this file from an external C project */ - -#include -#include - -#define RPY_SANDBOX_EXPORTED extern - -typedef long Signed; -typedef unsigned long Unsigned; - -#endif - -/* The list of 'rsandbox_*' function pointers is automatically - generated. Most of these function pointers are initialized to - point to a function that aborts the sandboxed execution. The - sandboxed program cannot, by default, use any of them. A few - exceptions are provided, where the default implementation returns a - safe default; for example rsandbox_getenv(). -*/ -'''] - c_source = [''' -#include "common_header.h" -#include "rsandbox.h" -#include - -'''] - - default_h = py.path.local(__file__).join('..', 'default.h').read() - c_source.append(default_h) - present = set(re.findall(r'\brsand_def_([a-zA-Z0-9_]+)[(]', default_h)) + present = set(re.findall(r'\brsand_def_([a-zA-Z0-9_]+)[(]', c_part_source)) fnnames = database._sandboxlib_fnnames for fnname in sorted(fnnames): FUNC = fnnames[fnname] - rsandboxname = 'rsandbox_' + fnname + rsandboxname = 'rsandbox_fnptr_' + fnname vardecl = cdecl(database.gettype(lltype.Ptr(FUNC)), rsandboxname) c_header.append('RPY_SANDBOX_EXPORTED %s;\n' % (vardecl,)) diff --git a/rpython/translator/rsandbox/default.h b/rpython/translator/rsandbox/src/part.c rename from rpython/translator/rsandbox/default.h rename to rpython/translator/rsandbox/src/part.c --- a/rpython/translator/rsandbox/default.h +++ b/rpython/translator/rsandbox/src/part.c @@ -1,4 +1,8 @@ -/*** translator/rsandbox/default.h ***/ +/*** rpython/translator/rsandbox/src/part.c ***/ + +#include "common_header.h" +#include "rsandbox.h" +#include /* This is called by most default implementations of 'rsandbox_*' */ diff --git a/rpython/translator/rsandbox/src/part.h b/rpython/translator/rsandbox/src/part.h new file mode 100644 --- /dev/null +++ b/rpython/translator/rsandbox/src/part.h @@ -0,0 +1,172 @@ +/*** rpython/translator/rsandbox/src/part.h ***/ + +#ifndef _RSANDBOX_H_ +#define _RSANDBOX_H_ + +#ifndef RPY_SANDBOX_EXPORTED +/* Common definitions when including this file from an external C project */ + +#include +#include + +#define RPY_SANDBOX_EXPORTED extern + +typedef long Signed; +typedef unsigned long Unsigned; + +#endif + + +/* *********************************************************** + + WARNING: Python is not meant to be a safe language. For example, + think about making a custom code object with a random string and + trying to interpret that. A sandboxed PyPy contains extra safety + checks that can detect such invalid operations before they cause + problems. When such a case is detected, THE WHOLE PROCESS IS + ABORTED right now. In the future, there should be a setjmp/longjmp + alternative to this, but the details need a bit of care (e.g. it + would still create memory leaks). + + For now, you have to accept that the process can be aborted if + given malicious code. Also, running several Python sources from + different sources in the same process is not recommended---there is + only one global state: malicious code can easily mangle the state + of the Python interpreter, influencing subsequent runs. Unless you + are fine with both issues, you MUST run Python from subprocesses, + not from your main program. + + Multi-threading issues: DO NOT USE FROM SEVERAL THREADS AT THE SAME + TIME! You need a lock. If you use subprocesses, they will likely + be single-threaded anyway. (This issue might be fixed in the + future. Note that the sandboxed Python itself doesn't have the + possibility of starting threads.) +*/ + + +/* This configures the maximum heap size allowed to the Python + interpreter. It only accounts for GC-tracked objects, so actual + memory usage can be larger. (It should hopefully never be more + than about twice the value, but for the paranoid, you should not + use this. You should do setrlimit() to bound the total RAM usage + of the subprocess. Similarly, you have no direct way to bound the + amount of time spent inside Python, but it is easy to set up an + alarm signal with alarm().) +*/ +void rsandbox_set_heap_size(size_t maximum); + + +/* Entry point: rsandbox_open() loads the given source code inside a + new Python module. The source code should define the interesting + Python functions, but cannot actually compute stuff yet: you cannot + pass here arguments or return values. rsandbox_open() returns a + module pointer if execution succeeds, or NULL if it gets an + exception. The pointer can be used in rsandbox_call(). It can + optionally be freed with rsandbox_close(). + + You can use this function with source code that is assembled from + untrusted sources, but it is recommended to pass a constant string + here. You can pass extra arguments with 'rpython_push_*()', + declared below; they are visible as 'args[0]', 'args[1]', and so + on. This allows you to do things like this: + + rsandbox_module_t *compile_expression(const char *expression) + { + rsandbox_push_string(expression); // 'expression' is untrusted + return rsandbox_open( + "code = compile(args[0], '', 'eval')\n" + "def evaluate(n):\n" + " return eval(code, {'n': n})\n") + } + + long safely_evaluate(rsandbox_module_t *mod, long n_value) + { + rsandbox_push_long(n_value); + rsandbox_call(mod, "evaluate"); // ignore exceptions + return rsandbox_result_long(); // result; if any problem, will be 0 + } +*/ +typedef struct rsandbox_module_s rsandbox_module_t; +RPY_SANDBOX_EXPORTED rsandbox_module_t *rsandbox_open(const char *src); +RPY_SANDBOX_EXPORTED void rsandbox_close(rsandbox_module_t *mod); + +/* To call one of the Python functions defined in the module, first + push the arguments one by one with rsandbox_push_*(), then call + rsandbox_call(). If an exception occur, -1 is returned. + + rsandbox_push_rw_buffer() is a way to pass read-write data. From + the Python side, this will pass a read-write buffer object. After + rsandbox_call() returns, the buffer becomes invalid. + (rsandbox_push_rw_buffer() is not very useful for rsandbox_open(): + the buffer becomes invalid as soon as rsandbox_open() returns.) +*/ +RPY_SANDBOX_EXPORTED void rsandbox_push_long(long); +RPY_SANDBOX_EXPORTED void rsandbox_push_double(double); +RPY_SANDBOX_EXPORTED void rsandbox_push_string(const char *); +RPY_SANDBOX_EXPORTED void rsandbox_push_string_and_size(const char *, size_t); +RPY_SANDBOX_EXPORTED void rsandbox_push_none(void); +RPY_SANDBOX_EXPORTED void rsandbox_push_rw_buffer(char *, size_t); + +RPY_SANDBOX_EXPORTED int rsandbox_call(rsandbox_module_t *mod, + const char *func_name); + +/* Returns the result of the previous rsandbox_call() if the Python + function returned an 'int' object. Otherwise, returns 0. (You + MUST NOT assume anything about the 'long': careful with malicious + code returning results like sys.maxint or -sys.maxint-1.) */ +RPY_SANDBOX_EXPORTED long rsandbox_result_long(void); + +/* Returns the result of the previous rsandbox_call() if the Python + function returned an 'int' or 'float' object. Otherwise, 0.0. + (You MUST NOT assume anything about the 'double': careful with + malicious code returning results like inf, nan, or 1e-323.) */ +RPY_SANDBOX_EXPORTED double rsandbox_result_double(void); + +/* Returns the length of the string returned in the previous + rsandbox_call(). If it was not a string, returns 0. */ +RPY_SANDBOX_EXPORTED size_t rsandbox_result_string_length(void); + +/* Returns the data in the string. This function always writes an + additional '\0'. If the string is longer than 'bufsize-1', it is + truncated to 'bufsize-1' characters. + + For small human-readable strings you can call + rsandbox_result_string() with some fixed maximum size. You get a + regular null-terminated 'char *' string. (If it contains embedded + '\0', it will appear truncated; if the Python function did not + return a string at all, it will be completely empty; but anyway + you MUST be ready to handle any malformed string at all.) + + For strings of larger sizes or strings that can meaningfully + contain embedded '\0', you should allocate a 'buf' of size + 'rsandbox_result_string_length() + 1'. + + To repeat: Be careful when reading strings from Python! They can + contain any character, so be sure to escape them correctly (or + reject them outright) if, for example, you are passing them + further. Malicious code can return any string. Your code must be + ready for anything. Err on the side of caution. +*/ +RPY_SANDBOX_EXPORTED void rsandbox_result_string(char *buf, size_t bufsize); + +/* When an exception occurred in rsandbox_open() or rsandbox_call(), + return more information as a string. Same rules as + rsandbox_result_string(). (Careful, you MUST NOT assume that the + string is well-formed: malicious code can make it contain anything. + If you are copying it to a web page, for example, then a good idea + is to replace any character not in a whitelist with '?'.) +*/ +RPY_SANDBOX_EXPORTED void rsandbox_last_exception(char *buf, size_t bufsize, + int include_traceback); + + +/************************************************************/ + + +/* The list of 'rsandbox_fnptr_*' function pointers is automatically + generated. Most of these function pointers are initialized to + point to a function that aborts the sandboxed execution. The + sandboxed program cannot, by default, use any of them. A few + exceptions are provided, where the default implementation returns a + safe default (for example rsandbox_fnptr_getenv()). +*/ From pypy.commits at gmail.com Mon Mar 21 15:28:20 2016 From: pypy.commits at gmail.com (mattip) Date: Mon, 21 Mar 2016 12:28:20 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-ext: create a wrapper class to call os_thread.start_new_thread from PyThread_start_new_thread Message-ID: <56f04b54.e6ebc20a.bf07d.ffff87d1@mx.google.com> Author: mattip Branch: cpyext-ext Changeset: r83231:ce2053a9cdeb Date: 2016-03-21 20:21 +0200 http://bitbucket.org/pypy/pypy/changeset/ce2053a9cdeb/ Log: create a wrapper class to call os_thread.start_new_thread from PyThread_start_new_thread diff --git a/pypy/module/cpyext/pystate.py b/pypy/module/cpyext/pystate.py --- a/pypy/module/cpyext/pystate.py +++ b/pypy/module/cpyext/pystate.py @@ -55,12 +55,12 @@ return 1 thread_func = lltype.Ptr(lltype.FuncType([rffi.VOIDP], lltype.Void)) - at cpython_api([thread_func, rffi.VOIDP], rffi.INT_real, error=-1) + at cpython_api([thread_func, rffi.VOIDP], rffi.INT_real, error=-1, gil='release') def PyThread_start_new_thread(space, func, arg): from pypy.module.thread import os_thread - w_args = space.newtuple([arg]) - XXX # How to wrap func as a space.callable ? - os_thread.start_new_thread(space, func, w_args) + w_args = space.newtuple([space.wrap(rffi.cast(lltype.Signed, arg)),]) + w_func = os_thread.W_WrapThreadFunc(func) + os_thread.start_new_thread(space, w_func, w_args) return 0 # XXX: might be generally useful diff --git a/pypy/module/thread/os_thread.py b/pypy/module/thread/os_thread.py --- a/pypy/module/thread/os_thread.py +++ b/pypy/module/thread/os_thread.py @@ -6,7 +6,9 @@ from rpython.rlib import rthread from pypy.module.thread.error import wrap_thread_error from pypy.interpreter.error import OperationError, oefmt -from pypy.interpreter.gateway import unwrap_spec, Arguments +from pypy.interpreter.gateway import unwrap_spec, Arguments, interp2app +from pypy.interpreter.baseobjspace import W_Root +from pypy.interpreter.typedef import TypeDef # Here are the steps performed to start a new thread: # @@ -161,6 +163,24 @@ if w_threading is not None: space.call_method(w_threading, "_after_fork") +class W_WrapThreadFunc(W_Root): + ''' Wrap a cpyext.pystate.thread_func, which + has the signature void func(void *) + ''' + def __init__(self, func): + self.func = func + + def descr_call(self, space, w_arg): + from rpython.rtyper.lltypesystem import rffi + try: + arg = rffi.cast(rffi.VOIDP, space.int_w(w_arg)) + self.func(arg) + except Exception as e: + import pdb;pdb.set_trace() + +W_WrapThreadFunc.typedef = TypeDef("hiddenclass", + __call__ = interp2app(W_WrapThreadFunc.descr_call), +) def start_new_thread(space, w_callable, w_args, w_kwargs=None): """Start a new thread and return its identifier. The thread will call the From pypy.commits at gmail.com Mon Mar 21 15:28:21 2016 From: pypy.commits at gmail.com (mattip) Date: Mon, 21 Mar 2016 12:28:21 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-ext: add a bit of context to print statement Message-ID: <56f04b55.465ec20a.90fc6.ffff8927@mx.google.com> Author: mattip Branch: cpyext-ext Changeset: r83232:ead04fcd08d5 Date: 2016-03-21 21:13 +0200 http://bitbucket.org/pypy/pypy/changeset/ead04fcd08d5/ Log: add a bit of context to print statement diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -841,7 +841,7 @@ ob = rawrefcount.next_dead(PyObject) if not ob: break - print ob + print 'deallocating PyObject', ob _Py_Dealloc(space, ob) print 'dealloc_trigger DONE' return "RETRY" From pypy.commits at gmail.com Mon Mar 21 17:01:19 2016 From: pypy.commits at gmail.com (mjacob) Date: Mon, 21 Mar 2016 14:01:19 -0700 (PDT) Subject: [pypy-commit] pypy py3k: Remove unused functions, classes and constants from builtin _imp module. Message-ID: <56f0611f.8216c20a.704ee.ffffa8a6@mx.google.com> Author: Manuel Jacob Branch: py3k Changeset: r83233:0456e4ccb47a Date: 2016-03-21 21:59 +0100 http://bitbucket.org/pypy/pypy/changeset/0456e4ccb47a/ Log: Remove unused functions, classes and constants from builtin _imp module. These are already implemented in the pure Python imp module. `get_magic` and `get_tag` have to stay because they are used by PyPy additions. diff --git a/pypy/module/imp/__init__.py b/pypy/module/imp/__init__.py --- a/pypy/module/imp/__init__.py +++ b/pypy/module/imp/__init__.py @@ -8,35 +8,22 @@ applevel_name = '_imp' interpleveldefs = { - 'SEARCH_ERROR': 'space.wrap(importing.SEARCH_ERROR)', - 'PY_SOURCE': 'space.wrap(importing.PY_SOURCE)', - 'PY_COMPILED': 'space.wrap(importing.PY_COMPILED)', - 'C_EXTENSION': 'space.wrap(importing.C_EXTENSION)', - 'PKG_DIRECTORY': 'space.wrap(importing.PKG_DIRECTORY)', - 'C_BUILTIN': 'space.wrap(importing.C_BUILTIN)', - 'PY_FROZEN': 'space.wrap(importing.PY_FROZEN)', - 'IMP_HOOK': 'space.wrap(importing.IMP_HOOK)', - 'get_suffixes': 'interp_imp.get_suffixes', 'extension_suffixes': 'interp_imp.extension_suffixes', 'get_magic': 'interp_imp.get_magic', 'get_tag': 'interp_imp.get_tag', 'load_dynamic': 'interp_imp.load_dynamic', - 'new_module': 'interp_imp.new_module', 'init_builtin': 'interp_imp.init_builtin', 'init_frozen': 'interp_imp.init_frozen', 'is_builtin': 'interp_imp.is_builtin', 'is_frozen': 'interp_imp.is_frozen', 'get_frozen_object': 'interp_imp.get_frozen_object', 'is_frozen_package': 'interp_imp.is_frozen_package', - 'NullImporter': 'importing.W_NullImporter', 'lock_held': 'interp_imp.lock_held', 'acquire_lock': 'interp_imp.acquire_lock', 'release_lock': 'interp_imp.release_lock', - 'cache_from_source': 'interp_imp.cache_from_source', - 'source_from_cache': 'interp_imp.source_from_cache', '_fix_co_filename': 'interp_imp.fix_co_filename', } diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py --- a/pypy/module/imp/importing.py +++ b/pypy/module/imp/importing.py @@ -20,17 +20,6 @@ _WIN32 = sys.platform == 'win32' -SEARCH_ERROR = 0 -PY_SOURCE = 1 -PY_COMPILED = 2 -C_EXTENSION = 3 -# PY_RESOURCE = 4 -PKG_DIRECTORY = 5 -C_BUILTIN = 6 -PY_FROZEN = 7 -# PY_CODERESOURCE = 8 -IMP_HOOK = 9 - SO = '.pyd' if _WIN32 else '.so' PREFIX = 'pypy3-' DEFAULT_SOABI = '%s%d%d' % ((PREFIX,) + PYPY_VERSION[:2]) @@ -104,40 +93,6 @@ def as_unicode(self): return self.path -class W_NullImporter(W_Root): - def __init__(self, space): - pass - - def descr_init(self, space, w_path): - self._descr_init(space, w_path, _WIN32) - - @specialize.arg(3) - def _descr_init(self, space, w_path, win32): - path = space.unicode0_w(w_path) if win32 else space.fsencode_w(w_path) - if not path: - raise OperationError(space.w_ImportError, space.wrap( - "empty pathname")) - - # Directory should not exist - try: - st = rposix_stat.stat(_WIN32Path(path) if win32 else path) - except OSError: - pass - else: - if stat.S_ISDIR(st.st_mode): - raise OperationError(space.w_ImportError, space.wrap( - "existing directory")) - - def find_module_w(self, space, __args__): - return space.wrap(None) - -W_NullImporter.typedef = TypeDef( - 'imp.NullImporter', - __new__=generic_new_descr(W_NullImporter), - __init__=interp2app(W_NullImporter.descr_init), - find_module=interp2app(W_NullImporter.find_module_w), - ) - def _prepare_module(space, w_mod, filename, pkgdir): w = space.wrap space.sys.setmodule(w_mod) diff --git a/pypy/module/imp/interp_imp.py b/pypy/module/imp/interp_imp.py --- a/pypy/module/imp/interp_imp.py +++ b/pypy/module/imp/interp_imp.py @@ -12,19 +12,6 @@ from pypy.interpreter.streamutil import wrap_streamerror -def get_suffixes(space): - w = space.wrap - suffixes_w = [] - if importing.has_so_extension(space): - suffixes_w.append( - space.newtuple([w(importing.get_so_extension(space)), - w('rb'), w(importing.C_EXTENSION)])) - suffixes_w.extend([ - space.newtuple([w('.py'), w('U'), w(importing.PY_SOURCE)]), - space.newtuple([w('.pyc'), w('rb'), w(importing.PY_COMPILED)]), - ]) - return space.newlist(suffixes_w) - def extension_suffixes(space): suffixes_w = [] if space.config.objspace.usemodules.cpyext: @@ -77,9 +64,6 @@ return importing.check_sys_modules(space, w_modulename) -def new_module(space, w_name): - return space.wrap(Module(space, w_name, add_package=False)) - def init_builtin(space, w_name): name = space.str0_w(w_name) if name not in space.builtin_modules: @@ -135,34 +119,6 @@ importing.getimportlock(space).reinit_lock() @unwrap_spec(pathname='fsencode') -def cache_from_source(space, pathname, w_debug_override=None): - """cache_from_source(path, [debug_override]) -> path - Given the path to a .py file, return the path to its .pyc/.pyo file. - - The .py file does not need to exist; this simply returns the path to the - .pyc/.pyo file calculated as if the .py file were imported. The extension - will be .pyc unless __debug__ is not defined, then it will be .pyo. - - If debug_override is not None, then it must be a boolean and is taken as - the value of __debug__ instead.""" - return space.fsdecode(space.wrapbytes( - importing.make_compiled_pathname(pathname))) - - at unwrap_spec(pathname='fsencode') -def source_from_cache(space, pathname): - """source_from_cache(path) -> path - Given the path to a .pyc./.pyo file, return the path to its .py file. - - The .pyc/.pyo file does not need to exist; this simply returns the path to - the .py file calculated to correspond to the .pyc/.pyo file. If path - does not conform to PEP 3147 format, ValueError will be raised.""" - sourcename = importing.make_source_pathname(pathname) - if sourcename is None: - raise oefmt(space.w_ValueError, - "Not a PEP 3147 pyc path: %s", pathname) - return space.fsdecode(space.wrapbytes(sourcename)) - - at unwrap_spec(pathname='fsencode') def fix_co_filename(space, w_code, pathname): code_w = space.interp_w(PyCode, w_code) importing.update_code_filenames(space, code_w, pathname) From pypy.commits at gmail.com Mon Mar 21 17:32:18 2016 From: pypy.commits at gmail.com (mattip) Date: Mon, 21 Mar 2016 14:32:18 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-ext: add test that hangs at rgil.acquire, passes with -A Message-ID: <56f06862.6672c20a.220e2.ffffb93d@mx.google.com> Author: mattip Branch: cpyext-ext Changeset: r83234:80601779330a Date: 2016-03-21 23:31 +0200 http://bitbucket.org/pypy/pypy/changeset/80601779330a/ Log: add test that hangs at rgil.acquire, passes with -A diff --git a/pypy/module/cpyext/test/test_pystate.py b/pypy/module/cpyext/test/test_pystate.py --- a/pypy/module/cpyext/test/test_pystate.py +++ b/pypy/module/cpyext/test/test_pystate.py @@ -1,4 +1,4 @@ -import py +import py, pytest from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase from pypy.module.cpyext.test.test_api import BaseApiTest from rpython.rtyper.lltypesystem.lltype import nullptr @@ -26,6 +26,29 @@ # Should compile at least module.test() + @pytest.mark.xfail(reason='hangs at rgil.acquire', run=False) + def test_gilstate(self): + module = self.import_extension('foo', [ + ("double_ensure", "METH_O", + ''' + PyGILState_STATE state0, state1; + int val = PyLong_AsLong(args); + PyEval_InitThreads(); + state0 = PyGILState_Ensure(); /* hangs here */ + if (val != 0) + { + state1 = PyGILState_Ensure(); + PyGILState_Release(state1); + } + PyGILState_Release(state0); + Py_RETURN_NONE; + '''), + ]) + module.double_ensure(0) + print '0 ok' + module.double_ensure(1) + print '1 ok' + def test_thread_state_get(self): module = self.import_extension('foo', [ @@ -48,18 +71,20 @@ def test_basic_threadstate_dance(self): if self.runappdirect: - py.test.xfail('segfault: PyThreadState_Get: no current thread') + py.test.xfail('segfault: on cpython cannot Get() a NULL tstate') module = self.import_extension('foo', [ ("dance", "METH_NOARGS", """ PyThreadState *old_tstate, *new_tstate; + PyEval_InitThreads(); + old_tstate = PyThreadState_Swap(NULL); if (old_tstate == NULL) { return PyLong_FromLong(0); } - new_tstate = PyThreadState_Get(); + new_tstate = PyThreadState_Get(); /* fails on cpython */ if (new_tstate != NULL) { return PyLong_FromLong(1); } From pypy.commits at gmail.com Mon Mar 21 21:26:24 2016 From: pypy.commits at gmail.com (pjenvey) Date: Mon, 21 Mar 2016 18:26:24 -0700 (PDT) Subject: [pypy-commit] pypy stdlib-2.7.11: skip a cpython impl detail Message-ID: <56f09f40.55031c0a.717d2.1d5a@mx.google.com> Author: Philip Jenvey Branch: stdlib-2.7.11 Changeset: r83235:54b078e95307 Date: 2016-03-21 18:15 -0700 http://bitbucket.org/pypy/pypy/changeset/54b078e95307/ Log: skip a cpython impl detail diff --git a/lib-python/2.7/test/test_base64.py b/lib-python/2.7/test/test_base64.py --- a/lib-python/2.7/test/test_base64.py +++ b/lib-python/2.7/test/test_base64.py @@ -79,8 +79,10 @@ eq(base64.b64encode('\xd3V\xbeo\xf7\x1d', altchars='*$'), '01a*b$cd') # Non-bytes eq(base64.b64encode(bytearray('abcd')), 'YWJjZA==') - self.assertRaises(TypeError, base64.b64encode, - '\xd3V\xbeo\xf7\x1d', altchars=bytearray('*$')) + if test_support.check_impl_detail(): + # only raises in CPython's optional strop.maketrans + self.assertRaises(TypeError, base64.b64encode, + '\xd3V\xbeo\xf7\x1d', altchars=bytearray('*$')) # Test standard alphabet eq(base64.standard_b64encode("www.python.org"), "d3d3LnB5dGhvbi5vcmc=") eq(base64.standard_b64encode("a"), "YQ==") From pypy.commits at gmail.com Mon Mar 21 21:26:26 2016 From: pypy.commits at gmail.com (pjenvey) Date: Mon, 21 Mar 2016 18:26:26 -0700 (PDT) Subject: [pypy-commit] pypy stdlib-2.7.11: implement a getter for parser's namespace_prefixes Message-ID: <56f09f42.07b71c0a.3bb3.1f9f@mx.google.com> Author: Philip Jenvey Branch: stdlib-2.7.11 Changeset: r83236:a3c0a3f30615 Date: 2016-03-21 18:25 -0700 http://bitbucket.org/pypy/pypy/changeset/a3c0a3f30615/ Log: implement a getter for parser's namespace_prefixes diff --git a/pypy/module/pyexpat/interp_pyexpat.py b/pypy/module/pyexpat/interp_pyexpat.py --- a/pypy/module/pyexpat/interp_pyexpat.py +++ b/pypy/module/pyexpat/interp_pyexpat.py @@ -429,6 +429,7 @@ self.returns_unicode = True self.ordered_attributes = False self.specified_attributes = False + self.ns_prefixes = False self.handlers = [None] * NB_HANDLERS @@ -620,12 +621,12 @@ def get_namespace_prefixes(self, space): - raise OperationError(space.w_AttributeError, - space.wrap("not implemented: reading namespace_prefixes")) + return space.wrap(self.ns_prefixes) @unwrap_spec(value=int) def set_namespace_prefixes(self, space, value): - XML_SetReturnNSTriplet(self.itself, bool(value)) + self.ns_prefixes = bool(value) + XML_SetReturnNSTriplet(self.itself, self.ns_prefixes) # Parse methods diff --git a/pypy/module/pyexpat/test/test_parser.py b/pypy/module/pyexpat/test/test_parser.py --- a/pypy/module/pyexpat/test/test_parser.py +++ b/pypy/module/pyexpat/test/test_parser.py @@ -20,6 +20,15 @@ pyexpat.ExpatError("error") + def test_attributes(self): + import pyexpat + p = pyexpat.ParserCreate() + assert p.buffer_text is False + assert p.namespace_prefixes is False + assert p.returns_unicode is True + assert p.ordered_attributes is False + assert p.specified_attributes is False + def test_version(self): import pyexpat assert isinstance(pyexpat.__version__, str) From pypy.commits at gmail.com Mon Mar 21 21:59:52 2016 From: pypy.commits at gmail.com (pjenvey) Date: Mon, 21 Mar 2016 18:59:52 -0700 (PDT) Subject: [pypy-commit] pypy stdlib-2.7.11: bytearray buffers don't support get_raw_address, use a buffer that does Message-ID: <56f0a718.85b01c0a.eaa4a.2584@mx.google.com> Author: Philip Jenvey Branch: stdlib-2.7.11 Changeset: r83237:86aeb7d656ab Date: 2016-03-21 18:58 -0700 http://bitbucket.org/pypy/pypy/changeset/86aeb7d656ab/ Log: bytearray buffers don't support get_raw_address, use a buffer that does diff --git a/lib-python/2.7/ctypes/test/test_bitfields.py b/lib-python/2.7/ctypes/test/test_bitfields.py --- a/lib-python/2.7/ctypes/test/test_bitfields.py +++ b/lib-python/2.7/ctypes/test/test_bitfields.py @@ -271,12 +271,13 @@ _fields_ = [("a", c_uint32, 24), ("b", c_uint32, 4), ("c", c_uint32, 4)] - b = bytearray(4) + import array + b = array.array("c", '\x00' * 4) x = Little.from_buffer(b) x.a = 0xabcdef x.b = 1 x.c = 2 - self.assertEqual(b, b'\xef\xcd\xab\x21') + self.assertEqual(b.tostring(), b'\xef\xcd\xab\x21') @need_symbol('c_uint32') def test_uint32_swap_big_endian(self): @@ -285,12 +286,13 @@ _fields_ = [("a", c_uint32, 24), ("b", c_uint32, 4), ("c", c_uint32, 4)] - b = bytearray(4) + import array + b = array.array("c", '\x00' * 4) x = Big.from_buffer(b) x.a = 0xabcdef x.b = 1 x.c = 2 - self.assertEqual(b, b'\xab\xcd\xef\x12') + self.assertEqual(b.tostring(), b'\xab\xcd\xef\x12') if __name__ == "__main__": unittest.main() From pypy.commits at gmail.com Mon Mar 21 22:42:37 2016 From: pypy.commits at gmail.com (rlamy) Date: Mon, 21 Mar 2016 19:42:37 -0700 (PDT) Subject: [pypy-commit] pypy rposix-for-3: Begin implementing missing rposix functions: faccessat(), linkat() Message-ID: <56f0b11d.07b71c0a.3bb3.2ab8@mx.google.com> Author: Ronan Lamy Branch: rposix-for-3 Changeset: r83238:feaaeac012f4 Date: 2016-03-22 02:37 +0000 http://bitbucket.org/pypy/pypy/changeset/feaaeac012f4/ Log: Begin implementing missing rposix functions: faccessat(), linkat() diff --git a/rpython/rlib/rposix.py b/rpython/rlib/rposix.py --- a/rpython/rlib/rposix.py +++ b/rpython/rlib/rposix.py @@ -22,21 +22,6 @@ from rpython.rlib import rwin32 from rpython.rlib.rwin32file import make_win32_traits -class CConfig: - _compilation_info_ = ExternalCompilationInfo( - includes=['sys/stat.h', - 'unistd.h', - 'fcntl.h'], - ) - for _name in """fchdir fchmod fchmodat fchown fchownat fexecve fdopendir - fpathconf fstat fstatat fstatvfs ftruncate futimens futimes - futimesat linkat lchflags lchmod lchown lstat lutimes - mkdirat mkfifoat mknodat openat readlinkat renameat - symlinkat unlinkat utimensat""".split(): - locals()['HAVE_%s' % _name.upper()] = rffi_platform.Has(_name) -cConfig = rffi_platform.configure(CConfig) -globals().update(cConfig) - class CConstantErrno(CConstant): # these accessors are used when calling get_errno() or set_errno() @@ -1739,3 +1724,64 @@ def getcontroller(self): from rpython.rlib.rposix_environ import OsEnvironController return OsEnvironController() + + +# ____________________________________________________________ +# Support for f... and ...at families of POSIX functions + +class CConfig: + _compilation_info_ = ExternalCompilationInfo( + includes=['sys/stat.h', + 'unistd.h', + 'fcntl.h'], + ) + AT_FDCWD = rffi_platform.DefinedConstantInteger('AT_FDCWD') + AT_SYMLINK_NOFOLLOW = rffi_platform.DefinedConstantInteger('AT_SYMLINK_NOFOLLOW') + AT_EACCESS = rffi_platform.DefinedConstantInteger('AT_EACCESS') + + for _name in """faccessat fchdir fchmod fchmodat fchown fchownat fexecve + fdopendir fpathconf fstat fstatat fstatvfs ftruncate + futimens futimes futimesat linkat chflags lchflags lchmod lchown + lstat lutimes mkdirat mkfifoat mknodat openat readlinkat renameat + symlinkat unlinkat utimensat""".split(): + locals()['HAVE_%s' % _name.upper()] = rffi_platform.Has(_name) +cConfig = rffi_platform.configure(CConfig) +globals().update(cConfig) + +if HAVE_FACCESSAT: + c_faccessat = external('faccessat', + [rffi.INT, rffi.CCHARP, rffi.INT, rffi.INT], rffi.INT) + + def faccessat(pathname, mode, dir_fd=AT_FDCWD, + effective_ids=False, follow_symlinks=True): + """Thin wrapper around faccessat(2) with an interface simlar to + Python3's os.access(). + """ + flags = 0 + if not follow_symlinks: + flags |= AT_SYMLINK_NOFOLLOW + if effective_ids: + flags |= AT_EACCESS + error = c_faccessat(dir_fd, pathname, mode, flags) + return error == 0 + +if HAVE_LINKAT: + c_linkat = external('linkat', + [rffi.INT, rffi.CCHARP, rffi.INT, rffi.CCHARP, rffi.INT], rffi.INT) + + def linkat(src, dst, src_dir_fd=AT_FDCWD, dst_dir_fd=AT_FDCWD, follow_symlinks=True): + """Thin wrapper around linkat(2) with an interface similar to + Python3's os.link() + """ + if follow_symlinks: + flag = 0 + else: + flag = AT_SYMLINK_NOFOLLOW + error = c_linkat(src_dir_fd, src, dst_dir_fd, dst, flag) + handle_posix_error('linkat', error) + +if HAVE_FUTIMENS: + pass + +if HAVE_UTIMENSAT: + pass From pypy.commits at gmail.com Tue Mar 22 01:08:10 2016 From: pypy.commits at gmail.com (mattip) Date: Mon, 21 Mar 2016 22:08:10 -0700 (PDT) Subject: [pypy-commit] pypy default: fix properly (amaury) Message-ID: <56f0d33a.865a1c0a.25b17.3a45@mx.google.com> Author: mattip Branch: Changeset: r83239:4ccaea59116d Date: 2016-03-16 19:23 +0200 http://bitbucket.org/pypy/pypy/changeset/4ccaea59116d/ Log: fix properly (amaury) (grafted from 68a119b4877f6ec2890600ecba5a8b64c17025f8) diff --git a/pypy/module/cpyext/bytesobject.py b/pypy/module/cpyext/bytesobject.py --- a/pypy/module/cpyext/bytesobject.py +++ b/pypy/module/cpyext/bytesobject.py @@ -137,7 +137,7 @@ from pypy.module.cpyext.unicodeobject import ( PyUnicode_Check, _PyUnicode_AsDefaultEncodedString) if PyUnicode_Check(space, ref): - ref = _PyUnicode_AsDefaultEncodedString(space, ref, None) + ref = _PyUnicode_AsDefaultEncodedString(space, ref, lltype.nullptr(rffi.CCHARP.TO)) else: raise oefmt(space.w_TypeError, "expected string or Unicode object, %T found", @@ -156,7 +156,7 @@ from pypy.module.cpyext.unicodeobject import ( PyUnicode_Check, _PyUnicode_AsDefaultEncodedString) if PyUnicode_Check(space, ref): - ref = _PyUnicode_AsDefaultEncodedString(space, ref, None) + ref = _PyUnicode_AsDefaultEncodedString(space, ref, lltype.nullptr(rffi.CCHARP.TO)) else: raise oefmt(space.w_TypeError, "expected string or Unicode object, %T found", From pypy.commits at gmail.com Tue Mar 22 04:22:48 2016 From: pypy.commits at gmail.com (fijal) Date: Tue, 22 Mar 2016 01:22:48 -0700 (PDT) Subject: [pypy-commit] pypy jit-leaner-frontend: whack at this test until it passes Message-ID: <56f100d8.0f941c0a.cfb84.6dd0@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r83240:2acab7b506ec Date: 2016-03-22 09:49 +0200 http://bitbucket.org/pypy/pypy/changeset/2acab7b506ec/ Log: whack at this test until it passes diff --git a/rpython/jit/metainterp/optimizeopt/test/test_util.py b/rpython/jit/metainterp/optimizeopt/test/test_util.py --- a/rpython/jit/metainterp/optimizeopt/test/test_util.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_util.py @@ -12,7 +12,7 @@ from rpython.jit.metainterp.optimizeopt.util import sort_descrs, equaloplists from rpython.jit.codewriter.effectinfo import EffectInfo from rpython.jit.metainterp.logger import LogOperations -from rpython.jit.tool.oparser import OpParser, pure_parse +from rpython.jit.tool.oparser import OpParser, pure_parse, convert_loop_to_trace from rpython.jit.metainterp.quasiimmut import QuasiImmutDescr from rpython.jit.metainterp import compile, resume, history from rpython.jit.metainterp.jitprof import EmptyProfiler @@ -545,50 +545,6 @@ call_pure_results[list(k)] = v return call_pure_results - def pick_cls(self, inp): - if inp.type == 'i': - return history.IntFrontendOp - elif inp.type == 'r': - return history.RefFrontendOp - else: - assert inp.type == 'f' - return history.FloatFrontendOp - - def convert_loop_to_packed(self, loop, skip_last=False): - from rpython.jit.metainterp.opencoder import Trace - from rpython.jit.metainterp.test.test_opencoder import FakeFrame - - def get(a): - if isinstance(a, history.Const): - return a - return mapping[a] - - class jitcode: - index = 200 - - inputargs = [self.pick_cls(inparg)(i) for i, inparg in - enumerate(loop.inputargs)] - mapping = {} - for one, two in zip(loop.inputargs, inputargs): - mapping[one] = two - trace = Trace(inputargs) - ops = loop.operations - if skip_last: - ops = ops[:-1] - for op in ops: - newpos = trace.record_op(op.getopnum(), [get(arg) for arg in - op.getarglist()], op.getdescr()) - if rop.is_guard(op.getopnum()): - failargs = [] - if op.getfailargs(): - failargs = [get(arg) for arg in op.getfailargs()] - frame = FakeFrame(100, jitcode, failargs) - resume.capture_resumedata([frame], None, [], trace) - if op.type != 'v': - newop = self.pick_cls(op)(newpos) - mapping[op] = newop - return trace - def unroll_and_optimize(self, loop, call_pure_results=None, jump_values=None): self.add_guard_future_condition(loop) @@ -601,7 +557,7 @@ # descr=jump_op.getdescr()) #end_label = jump_op.copy_and_change(opnum=rop.LABEL) call_pure_results = self._convert_call_pure_results(call_pure_results) - t = self.convert_loop_to_packed(loop) + t = convert_loop_to_trace(loop) preamble_data = compile.LoopCompileData(t, runtime_boxes, call_pure_results) start_state, preamble_ops = self._do_optimize_loop(preamble_data) diff --git a/rpython/jit/metainterp/test/test_compile.py b/rpython/jit/metainterp/test/test_compile.py --- a/rpython/jit/metainterp/test/test_compile.py +++ b/rpython/jit/metainterp/test/test_compile.py @@ -6,7 +6,7 @@ from rpython.jit.metainterp import jitexc from rpython.jit.metainterp import jitprof, typesystem, compile from rpython.jit.metainterp.optimizeopt.test.test_util import LLtypeMixin -from rpython.jit.tool.oparser import parse +from rpython.jit.tool.oparser import parse, convert_loop_to_trace from rpython.jit.metainterp.optimizeopt import ALL_OPTS_DICT class FakeCPU(object): @@ -94,15 +94,14 @@ metainterp.staticdata = staticdata metainterp.cpu = cpu metainterp.history = History() - metainterp.history.set_inputargs(loop.inputargs[:]) - for op in loop.operations: - newop = metainterp.history.record_nospec(op.getopnum(), op.getarglist(), op.getdescr()) - op.position = newop.position + t = convert_loop_to_trace(loop) + metainterp.history.inputargs = t.inputargs + metainterp.history.trace = t # greenkey = 'faked' target_token = compile_loop(metainterp, greenkey, (0, 0), - loop.inputargs, - loop.operations[-1].getarglist(), + t.inputargs, + [t._mapping[x] for x in loop.operations[-1].getarglist()], None) jitcell_token = target_token.targeting_jitcell_token assert jitcell_token == target_token.original_jitcell_token diff --git a/rpython/jit/tool/oparser.py b/rpython/jit/tool/oparser.py --- a/rpython/jit/tool/oparser.py +++ b/rpython/jit/tool/oparser.py @@ -413,6 +413,54 @@ return OpParser(input, cpu, namespace, boxkinds, invent_fail_descr, nonstrict, postprocess).parse() +def pick_cls(inp): + from rpython.jit.metainterp import history + + if inp.type == 'i': + return history.IntFrontendOp + elif inp.type == 'r': + return history.RefFrontendOp + else: + assert inp.type == 'f' + return history.FloatFrontendOp + +def convert_loop_to_trace(loop, skip_last=False): + from rpython.jit.metainterp.opencoder import Trace + from rpython.jit.metainterp.test.test_opencoder import FakeFrame + from rpython.jit.metainterp import history, resume + + def get(a): + if isinstance(a, history.Const): + return a + return mapping[a] + + class jitcode: + index = 200 + + inputargs = [pick_cls(inparg)(i) for i, inparg in + enumerate(loop.inputargs)] + mapping = {} + for one, two in zip(loop.inputargs, inputargs): + mapping[one] = two + trace = Trace(inputargs) + ops = loop.operations + if skip_last: + ops = ops[:-1] + for op in ops: + newpos = trace.record_op(op.getopnum(), [get(arg) for arg in + op.getarglist()], op.getdescr()) + if rop.is_guard(op.getopnum()): + failargs = [] + if op.getfailargs(): + failargs = [get(arg) for arg in op.getfailargs()] + frame = FakeFrame(100, jitcode, failargs) + resume.capture_resumedata([frame], None, [], trace) + if op.type != 'v': + newop = pick_cls(op)(newpos) + mapping[op] = newop + trace._mapping = mapping # for tests + return trace + def pure_parse(*args, **kwds): kwds['invent_fail_descr'] = None return parse(*args, **kwds) From pypy.commits at gmail.com Tue Mar 22 04:22:50 2016 From: pypy.commits at gmail.com (fijal) Date: Tue, 22 Mar 2016 01:22:50 -0700 (PDT) Subject: [pypy-commit] pypy jit-leaner-frontend: fix test_pyjitpl Message-ID: <56f100da.c856c20a.b438.3f38@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r83241:1b06b9d26321 Date: 2016-03-22 10:21 +0200 http://bitbucket.org/pypy/pypy/changeset/1b06b9d26321/ Log: fix test_pyjitpl diff --git a/rpython/jit/metainterp/opencoder.py b/rpython/jit/metainterp/opencoder.py --- a/rpython/jit/metainterp/opencoder.py +++ b/rpython/jit/metainterp/opencoder.py @@ -224,10 +224,8 @@ def __init__(self, inputargs): self._ops = [rffi.cast(rffi.SHORT, -15)] * 30000 self._pos = 0 - self._snapshot_lgt = 0 self._consts_bigint = 0 self._consts_float = 0 - self._sharings = 0 self._total_snapshots = 0 self._consts_ptr = 0 self._descrs = [None] @@ -261,9 +259,7 @@ self._floats_dict = {} debug_start("jit-trace-done") debug_print("trace length: " + str(self._pos)) - debug_print(" snapshots: " + str(self._snapshot_lgt)) - debug_print(" sharings: " + str(self._sharings)) - debug_print(" total snapshots: " + str(self._total_snapshots)) + debug_print(" total snapshots: " + str(self._total_snapshots)) debug_print(" bigint consts: " + str(self._consts_bigint) + " " + str(len(self._bigints))) debug_print(" float consts: " + str(self._consts_float) + " " + str(len(self._floats))) debug_print(" ref consts: " + str(self._consts_ptr) + " " + str(len(self._refs))) @@ -356,13 +352,6 @@ self._descrs.append(descr) return len(self._descrs) - 1 - def record_snapshot_link(self, pos): - self._sharings += 1 - lower = pos & 0x7fff - upper = pos >> 15 - self.append(-upper-1) - self.append(lower) - def _list_of_boxes(self, boxes): array = [rffi.cast(rffi.SHORT, 0)] * len(boxes) for i in range(len(boxes)): diff --git a/rpython/jit/metainterp/test/test_compile.py b/rpython/jit/metainterp/test/test_compile.py --- a/rpython/jit/metainterp/test/test_compile.py +++ b/rpython/jit/metainterp/test/test_compile.py @@ -116,7 +116,6 @@ def test_compile_tmp_callback(): - from rpython.jit.codewriter import heaptracker from rpython.jit.backend.llgraph import runner from rpython.rtyper.lltypesystem import lltype, llmemory from rpython.rtyper.annlowlevel import llhelper diff --git a/rpython/jit/metainterp/test/test_opencoder.py b/rpython/jit/metainterp/test/test_opencoder.py --- a/rpython/jit/metainterp/test/test_opencoder.py +++ b/rpython/jit/metainterp/test/test_opencoder.py @@ -155,13 +155,6 @@ loop2.operations = l BaseTest.assert_equal(loop1, loop2) - @given(strategies.integers(min_value=0, max_value=2**25)) - def test_packing(self, i): - t = Trace([]) - t.record_snapshot_link(i) - iter = t.get_iter(metainterp_sd) - assert (((-iter._next() - 1) << 15) | (iter._next())) == i - def test_cut_trace_from(self): i0, i1, i2 = IntFrontendOp(0), IntFrontendOp(0), IntFrontendOp(0) t = Trace([i0, i1, i2]) diff --git a/rpython/jit/metainterp/test/test_pyjitpl.py b/rpython/jit/metainterp/test/test_pyjitpl.py --- a/rpython/jit/metainterp/test/test_pyjitpl.py +++ b/rpython/jit/metainterp/test/test_pyjitpl.py @@ -5,7 +5,7 @@ from rpython.jit.metainterp import pyjitpl from rpython.jit.metainterp import jitprof from rpython.jit.metainterp.history import ConstInt -from rpython.jit.metainterp.history import History +from rpython.jit.metainterp.history import History, IntFrontendOp from rpython.jit.metainterp.resoperation import ResOperation, rop, InputArgInt from rpython.jit.metainterp.optimizeopt.util import equaloplists from rpython.jit.codewriter.jitcode import JitCode @@ -82,8 +82,10 @@ return True metainterp = pyjitpl.MetaInterp(FakeStaticData(), None) metainterp.history = History() - b1 = InputArgInt(1) - b2 = InputArgInt(2) + b1 = IntFrontendOp(1) + b1.setint(1) + b2 = IntFrontendOp(2) + b2.setint(2) c3 = ConstInt(3) boxes = [b1, b2, b1, c3] dup = {} @@ -93,21 +95,14 @@ assert boxes[1] is b2 assert is_another_box_like(boxes[2], b1) assert is_another_box_like(boxes[3], c3) - assert equaloplists(metainterp.history.operations, [ + inp, operations = metainterp.history.trace.unpack(metainterp.staticdata) + remap = dict(zip([b1, b2], inp)) + assert equaloplists(operations, [ ResOperation(rop.SAME_AS_I, [b1]), ResOperation(rop.SAME_AS_I, [c3]), - ]) + ], remap=remap) assert dup == {b1: None, b2: None} # - del metainterp.history.operations[:] - b4 = InputArgInt(4) - boxes = [b2, b4, "something random"] - metainterp.remove_consts_and_duplicates(boxes, 2, dup) - assert is_another_box_like(boxes[0], b2) - assert boxes[1] is b4 - assert equaloplists(metainterp.history.operations, [ - ResOperation(rop.SAME_AS_I, [b2]), - ]) def test_get_name_from_address(): class FakeMetaInterpSd(pyjitpl.MetaInterpStaticData): From pypy.commits at gmail.com Tue Mar 22 05:00:55 2016 From: pypy.commits at gmail.com (plan_rich) Date: Tue, 22 Mar 2016 02:00:55 -0700 (PDT) Subject: [pypy-commit] pypy jit-leaner-frontend: removed statements that copy rd_* on guard descr. test_vector passing Message-ID: <56f109c7.cf0b1c0a.c3ae3.ffff870a@mx.google.com> Author: Richard Plangger Branch: jit-leaner-frontend Changeset: r83242:721744197b8f Date: 2016-03-22 09:59 +0100 http://bitbucket.org/pypy/pypy/changeset/721744197b8f/ Log: removed statements that copy rd_* on guard descr. test_vector passing diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -287,9 +287,8 @@ return None if ((warmstate.vec and jitdriver_sd.vec) or warmstate.vec_all): - assert False, "vectorization disabled" from rpython.jit.metainterp.optimizeopt.vector import optimize_vector - loop_info, loop_ops = optimize_vector(metainterp_sd, + loop_info, loop_ops = optimize_vector(trace, metainterp_sd, jitdriver_sd, warmstate, loop_info, loop_ops, jitcell_token) diff --git a/rpython/jit/metainterp/optimizeopt/guard.py b/rpython/jit/metainterp/optimizeopt/guard.py --- a/rpython/jit/metainterp/optimizeopt/guard.py +++ b/rpython/jit/metainterp/optimizeopt/guard.py @@ -119,9 +119,9 @@ descr = myop.getdescr() descr.copy_all_attributes_from(other.op.getdescr()) - myop.rd_frame_info_list = otherop.rd_frame_info_list + # TODO myop.rd_frame_info_list = otherop.rd_frame_info_list myop.setfailargs(otherop.getfailargs()[:]) - myop.rd_snapshot = otherop.rd_snapshot + # TODO myop.rd_snapshot = otherop.rd_snapshot def emit_varops(self, opt, var, old_arg): assert isinstance(var, IndexVar) diff --git a/rpython/jit/metainterp/optimizeopt/renamer.py b/rpython/jit/metainterp/optimizeopt/renamer.py --- a/rpython/jit/metainterp/optimizeopt/renamer.py +++ b/rpython/jit/metainterp/optimizeopt/renamer.py @@ -17,7 +17,7 @@ if op.is_guard(): assert isinstance(op, resoperation.GuardResOp) - op.rd_snapshot = self.rename_rd_snapshot(op.rd_snapshot, clone=True) + # TODO op.rd_snapshot = self.rename_rd_snapshot(op.rd_snapshot, clone=True) failargs = self.rename_failargs(op, clone=True) op.setfailargs(failargs) diff --git a/rpython/jit/metainterp/optimizeopt/schedule.py b/rpython/jit/metainterp/optimizeopt/schedule.py --- a/rpython/jit/metainterp/optimizeopt/schedule.py +++ b/rpython/jit/metainterp/optimizeopt/schedule.py @@ -498,7 +498,7 @@ newarg = unpack_from_vector(state, newarg, 0, 1) args[i] = newarg vecop.setfailargs(args) - vecop.rd_snapshot = left.rd_snapshot + # TODO vecop.rd_snapshot = left.rd_snapshot @always_inline def crop_vector(state, oprestrict, restrict, pack, args, i): diff --git a/rpython/jit/metainterp/optimizeopt/vector.py b/rpython/jit/metainterp/optimizeopt/vector.py --- a/rpython/jit/metainterp/optimizeopt/vector.py +++ b/rpython/jit/metainterp/optimizeopt/vector.py @@ -111,7 +111,7 @@ loop.prefix_label = prefix_label return loop -def optimize_vector(metainterp_sd, jitdriver_sd, warmstate, +def optimize_vector(trace, metainterp_sd, jitdriver_sd, warmstate, loop_info, loop_ops, jitcell_token=None): """ Enter the world of SIMD. Bails if it cannot transform the trace. """ user_code = not jitdriver_sd.vec and warmstate.vec_all @@ -190,7 +190,7 @@ if warmstate.vec_ratio > 0.0: # blacklist - if op.is_call() or op.is_call_assembler(): + if op.is_call() or rop.is_call_assembler(op): return True if op.is_guard(): From pypy.commits at gmail.com Tue Mar 22 06:44:13 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 22 Mar 2016 03:44:13 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-gil-ensure: A branch to fix PyGILState_Ensure() Message-ID: <56f121fd.c1621c0a.ee33.0f4e@mx.google.com> Author: Armin Rigo Branch: cpyext-gil-ensure Changeset: r83243:13347c02c54c Date: 2016-03-22 11:41 +0100 http://bitbucket.org/pypy/pypy/changeset/13347c02c54c/ Log: A branch to fix PyGILState_Ensure() From pypy.commits at gmail.com Tue Mar 22 06:44:15 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 22 Mar 2016 03:44:15 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-gil-ensure: Write the plan Message-ID: <56f121ff.d4e01c0a.c4d1f.ffffaa4e@mx.google.com> Author: Armin Rigo Branch: cpyext-gil-ensure Changeset: r83244:4198667ea611 Date: 2016-03-22 11:43 +0100 http://bitbucket.org/pypy/pypy/changeset/4198667ea611/ Log: Write the plan diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -192,6 +192,58 @@ # exceptions generate a OperationError(w_SystemError); and the funtion returns # the error value specifed in the API. # +# Handling of the GIL +# ------------------- +# +# We add a global variable that contains a thread id. Invariant: this +# variable always contain 0 when the PyPy GIL is released. It should +# also contain 0 when regular RPython code executes. In +# non-cpyext-related code, it will thus always be 0. +# +# **make_generic_cpy_call():** RPython to C, with the GIL held. Before +# the call, must assert that the global variable is 0 and set the +# current thread identifier into the global variable. After the call, +# assert that the global variable still contains the current thread id, +# and reset it to 0. +# +# **make_wrapper():** C to RPython; by default assume that the GIL is +# held, but accepts gil="acquire", "release", "around", +# "pygilstate_ensure", "pygilstate_release". +# +# When a wrapper() is called: +# +# * "acquire": assert that the GIL is not currently held, i.e. the +# global variable does not contain the current thread id (otherwise, +# deadlock!). Acquire the PyPy GIL. After we acquired it, assert +# that the global variable is 0 (it must be 0 according to the +# invariant that it was 0 immediately before we acquired the GIL, +# because the GIL was released at that point). +# +# * gil=None: we hold the GIL already. Assert that the current thread +# identifier is in the global variable, and replace it with 0. +# +# * "pygilstate_ensure": if the global variable contains the current +# thread id, replace it with 0 and set the extra arg to 0. Otherwise, +# do the "acquire" and set the extra arg to 1. Then we'll call +# pystate.py:PyGILState_Ensure() with this extra arg, which will do +# the rest of the logic. +# +# When a wrapper() returns, first assert that the global variable is +# still 0, and then: +# +# * "release": release the PyPy GIL. The global variable was 0 up to +# and including at the point where we released the GIL, but afterwards +# it is possible that the GIL is acquired by a different thread very +# quickly. +# +# * gil=None: we keep holding the GIL. Set the current thread +# identifier into the global variable. +# +# * "pygilstate_release": if the argument is PyGILState_UNLOCKED, +# release the PyPy GIL; otherwise, set the current thread identifier +# into the global variable. The rest of the logic of +# PyGILState_Release() should be done before, in pystate.py. + cpyext_namespace = NameManager('cpyext_') From pypy.commits at gmail.com Tue Mar 22 07:37:23 2016 From: pypy.commits at gmail.com (fijal) Date: Tue, 22 Mar 2016 04:37:23 -0700 (PDT) Subject: [pypy-commit] pypy jit-leaner-frontend: whack at tests Message-ID: <56f12e73.838d1c0a.f191a.ffffc8aa@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r83245:da47dd47f143 Date: 2016-03-22 13:35 +0200 http://bitbucket.org/pypy/pypy/changeset/da47dd47f143/ Log: whack at tests diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -27,6 +27,7 @@ class CompileData(object): memo = None + log_noopt = True def forget_optimization_info(self): for arg in self.trace.inputargs: @@ -103,6 +104,8 @@ """ This represents label() ops jump with extra info that's from the run of LoopCompileData. Jump goes to the same label """ + log_noopt = False + def __init__(self, trace, celltoken, state, call_pure_results=None, enable_opts=None, inline_short_preamble=True): @@ -200,7 +203,8 @@ # ____________________________________________________________ -def compile_simple_loop(metainterp, greenkey, trace, runtime_args, enable_opts): +def compile_simple_loop(metainterp, greenkey, trace, runtime_args, enable_opts, + cut_at): from rpython.jit.metainterp.optimizeopt import optimize_trace jitdriver_sd = metainterp.jitdriver_sd @@ -213,6 +217,7 @@ loop_info, ops = optimize_trace(metainterp_sd, jitdriver_sd, data, metainterp.box_names_memo) except InvalidLoop: + history.cut(cut_at) return None loop = create_empty_loop(metainterp) loop.original_jitcell_token = jitcell_token @@ -254,12 +259,13 @@ del enable_opts['unroll'] jitcell_token = make_jitcell_token(jitdriver_sd) + cut_at = history.get_trace_position() history.record(rop.JUMP, jumpargs, None, descr=jitcell_token) if start != (0, 0): trace = trace.cut_trace_from(start, inputargs) if 'unroll' not in enable_opts or not metainterp.cpu.supports_guard_gc_type: return compile_simple_loop(metainterp, greenkey, trace, jumpargs, - enable_opts) + enable_opts, cut_at) call_pure_results = metainterp.call_pure_results preamble_data = LoopCompileData(trace, jumpargs, call_pure_results=call_pure_results, @@ -269,6 +275,7 @@ preamble_data, metainterp.box_names_memo) except InvalidLoop: + history.cut(cut_at) return None metainterp_sd = metainterp.staticdata @@ -284,6 +291,7 @@ loop_data, metainterp.box_names_memo) except InvalidLoop: + history.cut(cut_at) return None if ((warmstate.vec and jitdriver_sd.vec) or warmstate.vec_all): @@ -352,7 +360,6 @@ metainterp.box_names_memo) except InvalidLoop: # Fall back on jumping directly to preamble - raise InvalidLoop xxxx jump_op = ResOperation(rop.JUMP, inputargs[:], descr=loop_jitcell_token) loop_data = UnrolledLoopData(end_label, jump_op, [jump_op], start_state, diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py --- a/rpython/jit/metainterp/history.py +++ b/rpython/jit/metainterp/history.py @@ -687,7 +687,7 @@ self._cache = None def length(self): - return self.trace._count + return self.trace._count - len(self.trace.inputargs) def get_trace_position(self): return self.trace.cut_point() diff --git a/rpython/jit/metainterp/optimizeopt/__init__.py b/rpython/jit/metainterp/optimizeopt/__init__.py --- a/rpython/jit/metainterp/optimizeopt/__init__.py +++ b/rpython/jit/metainterp/optimizeopt/__init__.py @@ -53,7 +53,8 @@ """ debug_start("jit-optimize") try: - metainterp_sd.logger_noopt.log_loop_from_trace(compile_data.trace, memo=memo) + if compile_data.log_noopt: + metainterp_sd.logger_noopt.log_loop_from_trace(compile_data.trace, memo=memo) if memo is None: memo = {} compile_data.box_names_memo = memo diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -2245,7 +2245,7 @@ def aborted_tracing(self, reason): self.staticdata.profiler.count(reason) - debug_print('~~~ ABORTING TRACING') + debug_print('~~~ ABORTING TRACING %s' % Counters.counter_names[reason]) jd_sd = self.jitdriver_sd if not self.current_merge_points: greenkey = None # we're in the bridge diff --git a/rpython/jit/metainterp/test/test_resoperation.py b/rpython/jit/metainterp/test/test_resoperation.py --- a/rpython/jit/metainterp/test/test_resoperation.py +++ b/rpython/jit/metainterp/test/test_resoperation.py @@ -52,7 +52,7 @@ assert len(INT_ADD.__bases__) == 1 BinaryPlainResOp = INT_ADD.__bases__[0] assert BinaryPlainResOp.__name__ == 'BinaryPlainResOp' - assert BinaryPlainResOp.__bases__ == (rop.BinaryOp, rop.IntOp, rop.PlainResOp) + assert BinaryPlainResOp.__bases__ == (rop.BinaryOp, rop.PlainResOp) INT_SUB = rop.opclasses[rop.rop.INT_SUB] assert INT_SUB.__bases__[0] is BinaryPlainResOp diff --git a/rpython/jit/metainterp/test/test_resume.py b/rpython/jit/metainterp/test/test_resume.py --- a/rpython/jit/metainterp/test/test_resume.py +++ b/rpython/jit/metainterp/test/test_resume.py @@ -13,16 +13,16 @@ annlowlevel, PENDINGFIELDSP, TAG_CONST_OFFSET from rpython.jit.metainterp.resumecode import unpack_numbering,\ create_numbering, NULL_NUMBER -from rpython.jit.metainterp.opencoder import Trace +from rpython.jit.metainterp.opencoder import Trace, Snapshot, TopSnapshot from rpython.jit.metainterp.optimizeopt import info from rpython.jit.metainterp.history import ConstInt, Const, AbstractDescr -from rpython.jit.metainterp.history import ConstPtr, ConstFloat +from rpython.jit.metainterp.history import ConstPtr, ConstFloat,\ + IntFrontendOp, RefFrontendOp from rpython.jit.metainterp.optimizeopt.test.test_util import LLtypeMixin from rpython.jit.metainterp import executor from rpython.jit.codewriter import heaptracker, longlong -from rpython.jit.metainterp.resoperation import ResOperation, InputArgInt,\ - InputArgRef, rop +from rpython.jit.metainterp.resoperation import ResOperation, rop from rpython.jit.metainterp.test.strategies import boxlists from rpython.rlib.debug import debug_start, debug_stop, debug_print,\ have_debug_prints @@ -523,88 +523,11 @@ def __repr__(self): return "" % (self.jitcode, self.pc, self._env) -def test_Snapshot_create(): - l = ['b0', 'b1'] - snap = Snapshot(None, l) - assert snap.prev is None - assert snap.boxes is l - - l1 = ['b3'] - snap1 = Snapshot(snap, l1) - assert snap1.prev is snap - assert snap1.boxes is l1 - class FakeJitCode(object): def __init__(self, name, index): self.name = name self.index = index -def test_capture_resumedata(): - b1, b2, b3 = [InputArgInt(), InputArgRef(), InputArgInt()] - c1, c2, c3 = [ConstInt(1), ConstInt(2), ConstInt(3)] - fs = [FakeFrame(FakeJitCode("code0", 13), 0, b1, c1, b2)] - - t = Trace([b1, b2, b3]) - pos = capture_resumedata(fs, None, [], t) - - assert fs[0].parent_resume_position == -1 - s = t.get_iter().get_snapshot_iter(pos) - - size, jitcode, pc = s.get_size_jitcode_pc() - assert jitcode == 13 - boxes = s.read_boxes(size) - assert boxes == fs[0]._env - - storage = Storage() - fs = [FakeFrame(FakeJitCode("code0", 0), 0, b1, c1, b2), - FakeFrame(FakeJitCode("code1", 1), 3, b3, c2, b1), - FakeFrame(FakeJitCode("code2", 2), 9, c3, b2)] - t = Trace([b1, b2, b3]) - pos = capture_resumedata(fs, None, [], t) - - assert fs[2].parent_resume_position != -1 - s = t.get_iter().get_snapshot_iter(pos) - size, jitcode, pc = s.get_size_jitcode_pc() - assert (jitcode, pc) == (2, 9) - - xxx - assert storage.rd_snapshot.boxes == [] # for virtualrefs - snapshot = storage.rd_snapshot.prev - assert snapshot.prev is fs[2].parent_resumedata_snapshot - assert snapshot.boxes == fs[2]._env - - frame_info_list = frame_info_list.prev - assert frame_info_list.prev is fs[1].parent_resumedata_frame_info_list - assert unpack_uint(frame_info_list.packed_jitcode_pc) == (1, 3) - snapshot = snapshot.prev - assert snapshot.prev is fs[1].parent_resumedata_snapshot - assert snapshot.boxes == fs[1]._env - - frame_info_list = frame_info_list.prev - assert frame_info_list.prev is None - assert unpack_uint(frame_info_list.packed_jitcode_pc) == (0, 0) - snapshot = snapshot.prev - assert snapshot.prev is None - assert snapshot.boxes == fs[0]._env - - fs[2]._env = [b3, b2] - fs[2].pc = 15 - vbs = [b1, b2] - vrs = [b3] - capture_resumedata(fs, vbs, vrs, storage) - - frame_info_list = storage.rd_frame_info_list - assert frame_info_list.prev is fs[2].parent_resumedata_frame_info_list - assert unpack_uint(frame_info_list.packed_jitcode_pc) == (2, 15) - - snapshot = storage.rd_snapshot - assert snapshot.boxes == vrs - assert snapshot.vable_boxes == [b2, b1] - - snapshot = snapshot.prev - assert snapshot.prev is fs[2].parent_resumedata_snapshot - assert snapshot.boxes == fs[2]._env - class FakeMetaInterpStaticData: cpu = LLtypeMixin.cpu @@ -886,12 +809,12 @@ assert memo.consts[index - TAG_CONST_OFFSET] is const def test_ResumeDataLoopMemo_number(): - b1, b2, b3, b4, b5 = [InputArgInt(), InputArgInt(), InputArgInt(), - InputArgRef(), InputArgRef()] + b1, b2, b3, b4, b5 = [IntFrontendOp(0), IntFrontendOp(1), IntFrontendOp(2), + RefFrontendOp(3), RefFrontendOp(4)] c1, c2, c3, c4 = [ConstInt(1), ConstInt(2), ConstInt(3), ConstInt(4)] env = [b1, c1, b2, b1, c2] - snap = Snapshot(None, env) + snap = Snapshot(0, env) env1 = [c3, b3, b1, c1] snap1 = TopSnapshot(snap, env1, []) env2 = [c3, b3, b1, c3] From pypy.commits at gmail.com Tue Mar 22 07:37:25 2016 From: pypy.commits at gmail.com (fijal) Date: Tue, 22 Mar 2016 04:37:25 -0700 (PDT) Subject: [pypy-commit] pypy jit-leaner-frontend: merge; Message-ID: <56f12e75.857ac20a.dc7f4.ffff8f09@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r83246:bdb727f99955 Date: 2016-03-22 13:36 +0200 http://bitbucket.org/pypy/pypy/changeset/bdb727f99955/ Log: merge; diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -295,9 +295,8 @@ return None if ((warmstate.vec and jitdriver_sd.vec) or warmstate.vec_all): - assert False, "vectorization disabled" from rpython.jit.metainterp.optimizeopt.vector import optimize_vector - loop_info, loop_ops = optimize_vector(metainterp_sd, + loop_info, loop_ops = optimize_vector(trace, metainterp_sd, jitdriver_sd, warmstate, loop_info, loop_ops, jitcell_token) diff --git a/rpython/jit/metainterp/optimizeopt/guard.py b/rpython/jit/metainterp/optimizeopt/guard.py --- a/rpython/jit/metainterp/optimizeopt/guard.py +++ b/rpython/jit/metainterp/optimizeopt/guard.py @@ -119,9 +119,9 @@ descr = myop.getdescr() descr.copy_all_attributes_from(other.op.getdescr()) - myop.rd_frame_info_list = otherop.rd_frame_info_list + # TODO myop.rd_frame_info_list = otherop.rd_frame_info_list myop.setfailargs(otherop.getfailargs()[:]) - myop.rd_snapshot = otherop.rd_snapshot + # TODO myop.rd_snapshot = otherop.rd_snapshot def emit_varops(self, opt, var, old_arg): assert isinstance(var, IndexVar) diff --git a/rpython/jit/metainterp/optimizeopt/renamer.py b/rpython/jit/metainterp/optimizeopt/renamer.py --- a/rpython/jit/metainterp/optimizeopt/renamer.py +++ b/rpython/jit/metainterp/optimizeopt/renamer.py @@ -17,7 +17,7 @@ if op.is_guard(): assert isinstance(op, resoperation.GuardResOp) - op.rd_snapshot = self.rename_rd_snapshot(op.rd_snapshot, clone=True) + # TODO op.rd_snapshot = self.rename_rd_snapshot(op.rd_snapshot, clone=True) failargs = self.rename_failargs(op, clone=True) op.setfailargs(failargs) diff --git a/rpython/jit/metainterp/optimizeopt/schedule.py b/rpython/jit/metainterp/optimizeopt/schedule.py --- a/rpython/jit/metainterp/optimizeopt/schedule.py +++ b/rpython/jit/metainterp/optimizeopt/schedule.py @@ -498,7 +498,7 @@ newarg = unpack_from_vector(state, newarg, 0, 1) args[i] = newarg vecop.setfailargs(args) - vecop.rd_snapshot = left.rd_snapshot + # TODO vecop.rd_snapshot = left.rd_snapshot @always_inline def crop_vector(state, oprestrict, restrict, pack, args, i): diff --git a/rpython/jit/metainterp/optimizeopt/vector.py b/rpython/jit/metainterp/optimizeopt/vector.py --- a/rpython/jit/metainterp/optimizeopt/vector.py +++ b/rpython/jit/metainterp/optimizeopt/vector.py @@ -111,7 +111,7 @@ loop.prefix_label = prefix_label return loop -def optimize_vector(metainterp_sd, jitdriver_sd, warmstate, +def optimize_vector(trace, metainterp_sd, jitdriver_sd, warmstate, loop_info, loop_ops, jitcell_token=None): """ Enter the world of SIMD. Bails if it cannot transform the trace. """ user_code = not jitdriver_sd.vec and warmstate.vec_all @@ -190,7 +190,7 @@ if warmstate.vec_ratio > 0.0: # blacklist - if op.is_call() or op.is_call_assembler(): + if op.is_call() or rop.is_call_assembler(op): return True if op.is_guard(): From pypy.commits at gmail.com Tue Mar 22 07:50:00 2016 From: pypy.commits at gmail.com (fijal) Date: Tue, 22 Mar 2016 04:50:00 -0700 (PDT) Subject: [pypy-commit] pypy jit-leaner-frontend: implement a missing part in compile.py Message-ID: <56f13168.8fb81c0a.e11e0.3998@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r83247:1b684afa5204 Date: 2016-03-22 13:40 +0200 http://bitbucket.org/pypy/pypy/changeset/1b684afa5204/ Log: implement a missing part in compile.py diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -347,6 +347,7 @@ end_label = ResOperation(rop.LABEL, inputargs[:], descr=loop_jitcell_token) #cut_pos = history.get_trace_position() + cut = history.get_trace_position() history.record(rop.JUMP, jumpargs[:], None, descr=loop_jitcell_token) enable_opts = jitdriver_sd.warmstate.enable_opts call_pure_results = metainterp.call_pure_results @@ -359,9 +360,9 @@ metainterp.box_names_memo) except InvalidLoop: # Fall back on jumping directly to preamble - xxxx - jump_op = ResOperation(rop.JUMP, inputargs[:], descr=loop_jitcell_token) - loop_data = UnrolledLoopData(end_label, jump_op, [jump_op], start_state, + history.cut(cut) + history.record(rop.JUMP, jumpargs[:], None, descr=loop_jitcell_token) + loop_data = UnrolledLoopData(trace, loop_jitcell_token, start_state, call_pure_results=call_pure_results, enable_opts=enable_opts, inline_short_preamble=False) @@ -370,6 +371,7 @@ loop_data, metainterp.box_names_memo) except InvalidLoop: + history.cut(cut) return None label_token = loop_info.label_op.getdescr() From pypy.commits at gmail.com Tue Mar 22 07:50:02 2016 From: pypy.commits at gmail.com (fijal) Date: Tue, 22 Mar 2016 04:50:02 -0700 (PDT) Subject: [pypy-commit] pypy jit-leaner-frontend: whack at tests, unsure a bit about the virtualstate one Message-ID: <56f1316a.12871c0a.807c5.ffffd28b@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r83248:9d6c1e09270a Date: 2016-03-22 13:49 +0200 http://bitbucket.org/pypy/pypy/changeset/9d6c1e09270a/ Log: whack at tests, unsure a bit about the virtualstate one diff --git a/rpython/jit/metainterp/optimizeopt/virtualstate.py b/rpython/jit/metainterp/optimizeopt/virtualstate.py --- a/rpython/jit/metainterp/optimizeopt/virtualstate.py +++ b/rpython/jit/metainterp/optimizeopt/virtualstate.py @@ -157,9 +157,11 @@ raise VirtualStatesCantMatch("field descrs don't match") if runtime_box is not None and opinfo is not None: fieldbox = opinfo._fields[self.fielddescrs[i].get_index()] - # must be there - fieldbox_runtime = state.get_runtime_field(runtime_box, + if fieldbox is not None: + fieldbox_runtime = state.get_runtime_field(runtime_box, self.fielddescrs[i]) + else: + fieldbox_runtime = None else: fieldbox = None fieldbox_runtime = None diff --git a/rpython/jit/metainterp/test/test_warmspot.py b/rpython/jit/metainterp/test/test_warmspot.py --- a/rpython/jit/metainterp/test/test_warmspot.py +++ b/rpython/jit/metainterp/test/test_warmspot.py @@ -623,6 +623,9 @@ class tracker: pass + def setup_descrs(self): + pass + def get_latest_descr(self, deadframe): assert isinstance(deadframe, FakeDeadFrame) return self.get_fail_descr_from_number(deadframe._no) diff --git a/rpython/jit/metainterp/test/test_warmstate.py b/rpython/jit/metainterp/test/test_warmstate.py --- a/rpython/jit/metainterp/test/test_warmstate.py +++ b/rpython/jit/metainterp/test/test_warmstate.py @@ -6,7 +6,8 @@ from rpython.jit.metainterp.warmstate import WarmEnterState from rpython.jit.metainterp.resoperation import InputArgInt, InputArgRef,\ InputArgFloat -from rpython.jit.metainterp.history import ConstInt, ConstFloat, ConstPtr +from rpython.jit.metainterp.history import ConstInt, ConstFloat, ConstPtr,\ + IntFrontendOp, FloatFrontendOp, RefFrontendOp from rpython.jit.metainterp.counter import DeterministicJitCounter from rpython.jit.codewriter import longlong from rpython.rlib.rarithmetic import r_singlefloat @@ -31,6 +32,24 @@ assert unwrap(lltype.Ptr(RS), InputArgInt(0)) == lltype.nullptr(RS) def test_wrap(): + def InputArgInt(a): + i = IntFrontendOp(0) + i.setint(a) + return i + + def InputArgFloat(a): + i = FloatFrontendOp(0) + i.setfloatstorage(a) + return i + + def InputArgRef(a): + i = RefFrontendOp(0) + i.setref_base(a) + return i + + def boxfloat(x): + return InputArgFloat(longlong.getfloatstorage(x)) + def _is(box1, box2): return (box1.__class__ == box2.__class__ and box1.getvalue() == box2.getvalue()) From pypy.commits at gmail.com Tue Mar 22 07:56:31 2016 From: pypy.commits at gmail.com (fijal) Date: Tue, 22 Mar 2016 04:56:31 -0700 (PDT) Subject: [pypy-commit] pypy jit-leaner-frontend: redisable vectorization Message-ID: <56f132ef.4c181c0a.e2e25.ffffd40f@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r83249:ef49c050f982 Date: 2016-03-22 13:55 +0200 http://bitbucket.org/pypy/pypy/changeset/ef49c050f982/ Log: redisable vectorization diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -295,6 +295,7 @@ return None if ((warmstate.vec and jitdriver_sd.vec) or warmstate.vec_all): + assert False from rpython.jit.metainterp.optimizeopt.vector import optimize_vector loop_info, loop_ops = optimize_vector(trace, metainterp_sd, jitdriver_sd, warmstate, From pypy.commits at gmail.com Tue Mar 22 08:01:35 2016 From: pypy.commits at gmail.com (fijal) Date: Tue, 22 Mar 2016 05:01:35 -0700 (PDT) Subject: [pypy-commit] pypy jit-leaner-frontend: typo in untested path Message-ID: <56f1341f.6507c20a.ad77a.ffff8f63@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r83250:0b46342da8a3 Date: 2016-03-22 14:00 +0200 http://bitbucket.org/pypy/pypy/changeset/0b46342da8a3/ Log: typo in untested path diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -217,7 +217,7 @@ loop_info, ops = optimize_trace(metainterp_sd, jitdriver_sd, data, metainterp.box_names_memo) except InvalidLoop: - history.cut(cut_at) + trace.cut_at(cut_at) return None loop = create_empty_loop(metainterp) loop.original_jitcell_token = jitcell_token From pypy.commits at gmail.com Tue Mar 22 10:25:10 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 22 Mar 2016 07:25:10 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-gil-ensure: in-progress (untested) Message-ID: <56f155c6.12871c0a.807c5.1077@mx.google.com> Author: Armin Rigo Branch: cpyext-gil-ensure Changeset: r83251:45bacc9b58c2 Date: 2016-03-22 15:24 +0100 http://bitbucket.org/pypy/pypy/changeset/45bacc9b58c2/ Log: in-progress (untested) diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -37,6 +37,8 @@ from rpython.tool.sourcetools import func_with_new_name from rpython.rtyper.lltypesystem.lloperation import llop from rpython.rlib import rawrefcount +from rpython.rlib import rthread +from rpython.rlib.debug import fatalerror_notb DEBUG_WRAPPER = True @@ -195,10 +197,10 @@ # Handling of the GIL # ------------------- # -# We add a global variable that contains a thread id. Invariant: this -# variable always contain 0 when the PyPy GIL is released. It should -# also contain 0 when regular RPython code executes. In -# non-cpyext-related code, it will thus always be 0. +# We add a global variable 'cpyext_glob_tid' that contains a thread +# id. Invariant: this variable always contain 0 when the PyPy GIL is +# released. It should also contain 0 when regular RPython code +# executes. In non-cpyext-related code, it will thus always be 0. # # **make_generic_cpy_call():** RPython to C, with the GIL held. Before # the call, must assert that the global variable is 0 and set the @@ -244,6 +246,9 @@ # into the global variable. The rest of the logic of # PyGILState_Release() should be done before, in pystate.py. +cpyext_glob_tid_ptr = lltype.malloc(rffi.CArray(lltype.Signed), 1, + flavor='raw', immortal=True, zero=True) + cpyext_namespace = NameManager('cpyext_') @@ -668,7 +673,14 @@ fatal_value = callable.api_func.restype._defl() gil_acquire = (gil == "acquire" or gil == "around") gil_release = (gil == "release" or gil == "around") - assert gil is None or gil_acquire or gil_release + pygilstate_ensure = (gil == "pygilstate_ensure") + pygilstate_release = (gil == "pygilstate_release") + assert (gil is None or gil_acquire or gil_release + or pygilstate_ensure or pygilstate_release) + deadlock_error = ("GIL deadlock detected when a CPython C extension " + "module calls back %r" % (callable.__name__,)) + no_gil_error = ("GIL not held when a CPython C extension " + "module calls back %r" % (callable.__name__,)) @specialize.ll() def wrapper(*args): @@ -676,8 +688,27 @@ from pypy.module.cpyext.pyobject import as_pyobj # we hope that malloc removal removes the newtuple() that is # inserted exactly here by the varargs specializer + + # see "Handling of the GIL" above (careful, we don't have the GIL here) + tid = rthread.get_or_make_ident() if gil_acquire: + if cpyext_glob_tid_ptr[0] == tid: + fatalerror_notb(deadlock_error) rgil.acquire() + assert cpyext_glob_tid_ptr[0] == 0 + elif pygilstate_ensure: + from pypy.module.cpyext import pystate + if cpyext_glob_tid_ptr[0] == tid: + cpyext_glob_tid_ptr[0] = 0 + args += (pystate.PyGILState_LOCKED,) + else: + rgil.acquire() + args += (pystate.PyGILState_UNLOCKED,) + else: + if cpyext_glob_tid_ptr[0] != tid: + fatalerror_notb(no_gil_error) + cpyext_glob_tid_ptr[0] = 0 + rffi.stackcounter.stacks_counter += 1 llop.gc_stack_bottom(lltype.Void) # marker for trackgcroot.py retval = fatal_value @@ -754,8 +785,20 @@ pypy_debug_catch_fatal_exception() assert False rffi.stackcounter.stacks_counter -= 1 - if gil_release: + + # see "Handling of the GIL" above + assert cpyext_glob_tid_ptr[0] == 0 + if pygilstate_release: + from pypy.module.cpyext import pystate + arg = rffi.cast(lltype.Signed, args[-1]) + unlock = (arg == pystate.PyGILState_UNLOCKED) + else: + unlock = gil_release + if unlock: rgil.release() + else: + cpyext_glob_tid_ptr[0] = tid + return retval callable._always_inline_ = 'try' wrapper.__name__ = "wrapper for %r" % (callable, ) @@ -1401,10 +1444,17 @@ arg = as_pyobj(space, arg) boxed_args += (arg,) + # see "Handling of the GIL" above + tid = rthread.get_ident() + assert cpyext_glob_tid_ptr[0] == 0 + cpyext_glob_tid_ptr[0] = tid + try: # Call the function result = call_external_function(func, *boxed_args) finally: + assert cpyext_glob_tid_ptr[0] == tid + cpyext_glob_tid_ptr[0] = 0 keepalive_until_here(*keepalives) if is_PyObject(RESULT_TYPE): diff --git a/pypy/module/cpyext/pystate.py b/pypy/module/cpyext/pystate.py --- a/pypy/module/cpyext/pystate.py +++ b/pypy/module/cpyext/pystate.py @@ -204,18 +204,43 @@ compile time.""" PyGILState_STATE = rffi.INT +PyGILState_LOCKED = 0 +PyGILState_UNLOCKED = 1 - at cpython_api([], PyGILState_STATE, error=CANNOT_FAIL, gil="acquire") -def PyGILState_Ensure(space): - # XXX XXX XXX THIS IS A VERY MINIMAL IMPLEMENTATION THAT WILL HAPPILY - # DEADLOCK IF CALLED TWICE ON THE SAME THREAD, OR CRASH IF CALLED IN A - # NEW THREAD. We should very carefully follow what CPython does instead. - return rffi.cast(PyGILState_STATE, 0) +ExecutionContext.cpyext_gilstate_counter_noleave = 0 - at cpython_api([PyGILState_STATE], lltype.Void, gil="release") -def PyGILState_Release(space, state): - # XXX XXX XXX We should very carefully follow what CPython does instead. - pass + at cpython_api([], PyGILState_STATE, error=CANNOT_FAIL, gil="pygilstate_ensure") +def PyGILState_Ensure(space, previous_state): + # The argument 'previous_state' is not part of the API; it is inserted + # by make_wrapper() and contains PyGILState_LOCKED/UNLOCKED based on + # the previous GIL state. + must_leave = space.threadlocals.try_enter_thread(space) + ec = space.getexecutioncontext() + if not must_leave: + # This is a counter of how many times we called try_enter_thread() + # and it returned False. In PyGILState_Release(), if this counter + # is greater than zero, we decrement it; only if the counter is + # already zero do we call leave_thread(). + ec.cpyext_gilstate_counter_noleave += 1 + else: + # This case is for when we just built a fresh threadlocals. + # We should only see it when we are in a new thread with no + # PyPy code below. + assert previous_state == PyGILState_UNLOCKED + assert ec.cpyext_gilstate_counter_noleave == 0 + # + return rffi.cast(PyGILState_STATE, previous_state) + + at cpython_api([PyGILState_STATE], lltype.Void, gil="pygilstate_release") +def PyGILState_Release(space, oldstate): + oldstate = rffi.cast(lltype.Signed, oldstate) + ec = space.getexecutioncontext() + if ec.cpyext_gilstate_counter_noleave > 0: + ec.cpyext_gilstate_counter_noleave -= 1 + else: + assert ec.cpyext_gilstate_counter_noleave == 0 + assert oldstate == PyGILState_UNLOCKED + space.threadlocals.leave_thread(space) @cpython_api([], PyInterpreterState, error=CANNOT_FAIL) def PyInterpreterState_Head(space): diff --git a/rpython/rlib/rthread.py b/rpython/rlib/rthread.py --- a/rpython/rlib/rthread.py +++ b/rpython/rlib/rthread.py @@ -100,8 +100,11 @@ return thread.get_ident() def get_or_make_ident(): - assert we_are_translated() - return tlfield_thread_ident.get_or_make_raw() + if we_are_translated(): + return tlfield_thread_ident.get_or_make_raw() + else: + import thread + retrun thread.get_ident() @specialize.arg(0) def start_new_thread(x, y): From pypy.commits at gmail.com Tue Mar 22 10:35:25 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 22 Mar 2016 07:35:25 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-gil-ensure: fixes Message-ID: <56f1582d.45d61c0a.43f97.1211@mx.google.com> Author: Armin Rigo Branch: cpyext-gil-ensure Changeset: r83252:3d0bee673d77 Date: 2016-03-22 15:34 +0100 http://bitbucket.org/pypy/pypy/changeset/3d0bee673d77/ Log: fixes diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -268,6 +268,9 @@ argnames, varargname, kwargname = pycode.cpython_code_signature(callable.func_code) assert argnames[0] == 'space' + if gil == 'pygilstate_ensure': + assert argnames[-1] == 'previous_state' + del argnames[-1] self.argnames = argnames[1:] assert len(self.argnames) == len(self.argtypes) self.gil = gil @@ -678,9 +681,9 @@ assert (gil is None or gil_acquire or gil_release or pygilstate_ensure or pygilstate_release) deadlock_error = ("GIL deadlock detected when a CPython C extension " - "module calls back %r" % (callable.__name__,)) + "module calls %r" % (callable.__name__,)) no_gil_error = ("GIL not held when a CPython C extension " - "module calls back %r" % (callable.__name__,)) + "module calls %r" % (callable.__name__,)) @specialize.ll() def wrapper(*args): @@ -717,7 +720,8 @@ try: if not we_are_translated() and DEBUG_WRAPPER: print >>sys.stderr, callable, - assert len(args) == len(callable.api_func.argtypes) + assert len(args) == (len(callable.api_func.argtypes) + + pygilstate_ensure) for i, (typ, is_wrapped) in argtypes_enum_ui: arg = args[i] if is_PyObject(typ) and is_wrapped: @@ -726,6 +730,8 @@ else: arg_conv = arg boxed_args += (arg_conv, ) + if pygilstate_ensure: + boxed_args += (args[-1], ) state = space.fromcache(State) try: result = callable(space, *boxed_args) diff --git a/pypy/module/cpyext/test/test_pystate.py b/pypy/module/cpyext/test/test_pystate.py --- a/pypy/module/cpyext/test/test_pystate.py +++ b/pypy/module/cpyext/test/test_pystate.py @@ -26,7 +26,6 @@ # Should compile at least module.test() - @pytest.mark.xfail(reason='hangs at rgil.acquire', run=False) def test_gilstate(self): module = self.import_extension('foo', [ ("double_ensure", "METH_O", diff --git a/rpython/rlib/rthread.py b/rpython/rlib/rthread.py --- a/rpython/rlib/rthread.py +++ b/rpython/rlib/rthread.py @@ -104,7 +104,7 @@ return tlfield_thread_ident.get_or_make_raw() else: import thread - retrun thread.get_ident() + return thread.get_ident() @specialize.arg(0) def start_new_thread(x, y): From pypy.commits at gmail.com Tue Mar 22 10:52:00 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 22 Mar 2016 07:52:00 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-gil-ensure: test_pystate seems to work (apart from test_frame_tstate_tracing) Message-ID: <56f15c10.6614c20a.b0622.ffffe3ed@mx.google.com> Author: Armin Rigo Branch: cpyext-gil-ensure Changeset: r83253:c33af3dd078e Date: 2016-03-22 15:50 +0100 http://bitbucket.org/pypy/pypy/changeset/c33af3dd078e/ Log: test_pystate seems to work (apart from test_frame_tstate_tracing) From pypy.commits at gmail.com Tue Mar 22 10:52:02 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 22 Mar 2016 07:52:02 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-ext: hg merge cpyext-gil-ensure Message-ID: <56f15c12.01adc20a.2c7d4.ffffe842@mx.google.com> Author: Armin Rigo Branch: cpyext-ext Changeset: r83254:9b17e26b2764 Date: 2016-03-22 15:51 +0100 http://bitbucket.org/pypy/pypy/changeset/9b17e26b2764/ Log: hg merge cpyext-gil-ensure Really implement PyGILState_Ensure/Release diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -37,6 +37,8 @@ from rpython.tool.sourcetools import func_with_new_name from rpython.rtyper.lltypesystem.lloperation import llop from rpython.rlib import rawrefcount +from rpython.rlib import rthread +from rpython.rlib.debug import fatalerror_notb DEBUG_WRAPPER = True @@ -192,6 +194,61 @@ # exceptions generate a OperationError(w_SystemError); and the funtion returns # the error value specifed in the API. # +# Handling of the GIL +# ------------------- +# +# We add a global variable 'cpyext_glob_tid' that contains a thread +# id. Invariant: this variable always contain 0 when the PyPy GIL is +# released. It should also contain 0 when regular RPython code +# executes. In non-cpyext-related code, it will thus always be 0. +# +# **make_generic_cpy_call():** RPython to C, with the GIL held. Before +# the call, must assert that the global variable is 0 and set the +# current thread identifier into the global variable. After the call, +# assert that the global variable still contains the current thread id, +# and reset it to 0. +# +# **make_wrapper():** C to RPython; by default assume that the GIL is +# held, but accepts gil="acquire", "release", "around", +# "pygilstate_ensure", "pygilstate_release". +# +# When a wrapper() is called: +# +# * "acquire": assert that the GIL is not currently held, i.e. the +# global variable does not contain the current thread id (otherwise, +# deadlock!). Acquire the PyPy GIL. After we acquired it, assert +# that the global variable is 0 (it must be 0 according to the +# invariant that it was 0 immediately before we acquired the GIL, +# because the GIL was released at that point). +# +# * gil=None: we hold the GIL already. Assert that the current thread +# identifier is in the global variable, and replace it with 0. +# +# * "pygilstate_ensure": if the global variable contains the current +# thread id, replace it with 0 and set the extra arg to 0. Otherwise, +# do the "acquire" and set the extra arg to 1. Then we'll call +# pystate.py:PyGILState_Ensure() with this extra arg, which will do +# the rest of the logic. +# +# When a wrapper() returns, first assert that the global variable is +# still 0, and then: +# +# * "release": release the PyPy GIL. The global variable was 0 up to +# and including at the point where we released the GIL, but afterwards +# it is possible that the GIL is acquired by a different thread very +# quickly. +# +# * gil=None: we keep holding the GIL. Set the current thread +# identifier into the global variable. +# +# * "pygilstate_release": if the argument is PyGILState_UNLOCKED, +# release the PyPy GIL; otherwise, set the current thread identifier +# into the global variable. The rest of the logic of +# PyGILState_Release() should be done before, in pystate.py. + +cpyext_glob_tid_ptr = lltype.malloc(rffi.CArray(lltype.Signed), 1, + flavor='raw', immortal=True, zero=True) + cpyext_namespace = NameManager('cpyext_') @@ -211,6 +268,9 @@ argnames, varargname, kwargname = pycode.cpython_code_signature(callable.func_code) assert argnames[0] == 'space' + if gil == 'pygilstate_ensure': + assert argnames[-1] == 'previous_state' + del argnames[-1] self.argnames = argnames[1:] assert len(self.argnames) == len(self.argtypes) self.gil = gil @@ -616,7 +676,14 @@ fatal_value = callable.api_func.restype._defl() gil_acquire = (gil == "acquire" or gil == "around") gil_release = (gil == "release" or gil == "around") - assert gil is None or gil_acquire or gil_release + pygilstate_ensure = (gil == "pygilstate_ensure") + pygilstate_release = (gil == "pygilstate_release") + assert (gil is None or gil_acquire or gil_release + or pygilstate_ensure or pygilstate_release) + deadlock_error = ("GIL deadlock detected when a CPython C extension " + "module calls %r" % (callable.__name__,)) + no_gil_error = ("GIL not held when a CPython C extension " + "module calls %r" % (callable.__name__,)) @specialize.ll() def wrapper(*args): @@ -624,8 +691,27 @@ from pypy.module.cpyext.pyobject import as_pyobj # we hope that malloc removal removes the newtuple() that is # inserted exactly here by the varargs specializer + + # see "Handling of the GIL" above (careful, we don't have the GIL here) + tid = rthread.get_or_make_ident() if gil_acquire: + if cpyext_glob_tid_ptr[0] == tid: + fatalerror_notb(deadlock_error) rgil.acquire() + assert cpyext_glob_tid_ptr[0] == 0 + elif pygilstate_ensure: + from pypy.module.cpyext import pystate + if cpyext_glob_tid_ptr[0] == tid: + cpyext_glob_tid_ptr[0] = 0 + args += (pystate.PyGILState_LOCKED,) + else: + rgil.acquire() + args += (pystate.PyGILState_UNLOCKED,) + else: + if cpyext_glob_tid_ptr[0] != tid: + fatalerror_notb(no_gil_error) + cpyext_glob_tid_ptr[0] = 0 + rffi.stackcounter.stacks_counter += 1 llop.gc_stack_bottom(lltype.Void) # marker for trackgcroot.py retval = fatal_value @@ -634,7 +720,8 @@ try: if not we_are_translated() and DEBUG_WRAPPER: print >>sys.stderr, callable, - assert len(args) == len(callable.api_func.argtypes) + assert len(args) == (len(callable.api_func.argtypes) + + pygilstate_ensure) for i, (typ, is_wrapped) in argtypes_enum_ui: arg = args[i] if is_PyObject(typ) and is_wrapped: @@ -643,6 +730,8 @@ else: arg_conv = arg boxed_args += (arg_conv, ) + if pygilstate_ensure: + boxed_args += (args[-1], ) state = space.fromcache(State) try: result = callable(space, *boxed_args) @@ -702,8 +791,20 @@ pypy_debug_catch_fatal_exception() assert False rffi.stackcounter.stacks_counter -= 1 - if gil_release: + + # see "Handling of the GIL" above + assert cpyext_glob_tid_ptr[0] == 0 + if pygilstate_release: + from pypy.module.cpyext import pystate + arg = rffi.cast(lltype.Signed, args[-1]) + unlock = (arg == pystate.PyGILState_UNLOCKED) + else: + unlock = gil_release + if unlock: rgil.release() + else: + cpyext_glob_tid_ptr[0] = tid + return retval callable._always_inline_ = 'try' wrapper.__name__ = "wrapper for %r" % (callable, ) @@ -1349,10 +1450,17 @@ arg = as_pyobj(space, arg) boxed_args += (arg,) + # see "Handling of the GIL" above + tid = rthread.get_ident() + assert cpyext_glob_tid_ptr[0] == 0 + cpyext_glob_tid_ptr[0] = tid + try: # Call the function result = call_external_function(func, *boxed_args) finally: + assert cpyext_glob_tid_ptr[0] == tid + cpyext_glob_tid_ptr[0] = 0 keepalive_until_here(*keepalives) if is_PyObject(RESULT_TYPE): diff --git a/pypy/module/cpyext/pystate.py b/pypy/module/cpyext/pystate.py --- a/pypy/module/cpyext/pystate.py +++ b/pypy/module/cpyext/pystate.py @@ -204,18 +204,43 @@ compile time.""" PyGILState_STATE = rffi.INT +PyGILState_LOCKED = 0 +PyGILState_UNLOCKED = 1 - at cpython_api([], PyGILState_STATE, error=CANNOT_FAIL, gil="acquire") -def PyGILState_Ensure(space): - # XXX XXX XXX THIS IS A VERY MINIMAL IMPLEMENTATION THAT WILL HAPPILY - # DEADLOCK IF CALLED TWICE ON THE SAME THREAD, OR CRASH IF CALLED IN A - # NEW THREAD. We should very carefully follow what CPython does instead. - return rffi.cast(PyGILState_STATE, 0) +ExecutionContext.cpyext_gilstate_counter_noleave = 0 - at cpython_api([PyGILState_STATE], lltype.Void, gil="release") -def PyGILState_Release(space, state): - # XXX XXX XXX We should very carefully follow what CPython does instead. - pass + at cpython_api([], PyGILState_STATE, error=CANNOT_FAIL, gil="pygilstate_ensure") +def PyGILState_Ensure(space, previous_state): + # The argument 'previous_state' is not part of the API; it is inserted + # by make_wrapper() and contains PyGILState_LOCKED/UNLOCKED based on + # the previous GIL state. + must_leave = space.threadlocals.try_enter_thread(space) + ec = space.getexecutioncontext() + if not must_leave: + # This is a counter of how many times we called try_enter_thread() + # and it returned False. In PyGILState_Release(), if this counter + # is greater than zero, we decrement it; only if the counter is + # already zero do we call leave_thread(). + ec.cpyext_gilstate_counter_noleave += 1 + else: + # This case is for when we just built a fresh threadlocals. + # We should only see it when we are in a new thread with no + # PyPy code below. + assert previous_state == PyGILState_UNLOCKED + assert ec.cpyext_gilstate_counter_noleave == 0 + # + return rffi.cast(PyGILState_STATE, previous_state) + + at cpython_api([PyGILState_STATE], lltype.Void, gil="pygilstate_release") +def PyGILState_Release(space, oldstate): + oldstate = rffi.cast(lltype.Signed, oldstate) + ec = space.getexecutioncontext() + if ec.cpyext_gilstate_counter_noleave > 0: + ec.cpyext_gilstate_counter_noleave -= 1 + else: + assert ec.cpyext_gilstate_counter_noleave == 0 + assert oldstate == PyGILState_UNLOCKED + space.threadlocals.leave_thread(space) @cpython_api([], PyInterpreterState, error=CANNOT_FAIL) def PyInterpreterState_Head(space): diff --git a/pypy/module/cpyext/test/test_pystate.py b/pypy/module/cpyext/test/test_pystate.py --- a/pypy/module/cpyext/test/test_pystate.py +++ b/pypy/module/cpyext/test/test_pystate.py @@ -26,7 +26,6 @@ # Should compile at least module.test() - @pytest.mark.xfail(reason='hangs at rgil.acquire', run=False) def test_gilstate(self): module = self.import_extension('foo', [ ("double_ensure", "METH_O", diff --git a/rpython/rlib/rthread.py b/rpython/rlib/rthread.py --- a/rpython/rlib/rthread.py +++ b/rpython/rlib/rthread.py @@ -100,8 +100,11 @@ return thread.get_ident() def get_or_make_ident(): - assert we_are_translated() - return tlfield_thread_ident.get_or_make_raw() + if we_are_translated(): + return tlfield_thread_ident.get_or_make_raw() + else: + import thread + return thread.get_ident() @specialize.arg(0) def start_new_thread(x, y): From pypy.commits at gmail.com Tue Mar 22 12:04:49 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 22 Mar 2016 09:04:49 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-ext: Fix for test_frame_tstate_tracing: Message-ID: <56f16d21.8216c20a.704ee.fffff2f9@mx.google.com> Author: Armin Rigo Branch: cpyext-ext Changeset: r83255:29df5133bc06 Date: 2016-03-22 17:04 +0100 http://bitbucket.org/pypy/pypy/changeset/29df5133bc06/ Log: Fix for test_frame_tstate_tracing: * revert ce2053a9cdeb * extend RPyThreadStart to RPyThreadStartEx with an extra 'arg' * have PyThread_start_new_thread() call directly RPyThreadStartEx in C * fix a test problem where CPython kills the content of 'thread._local' instances as soon as a C-to-Python call returns, if we're in a context where the C thread was not started with CPython's official thread module diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -490,6 +490,7 @@ 'PyThread_create_key', 'PyThread_delete_key', 'PyThread_set_key_value', 'PyThread_get_key_value', 'PyThread_delete_key_value', 'PyThread_ReInitTLS', 'PyThread_init_thread', + 'PyThread_start_new_thread', 'PyStructSequence_InitType', 'PyStructSequence_New', 'PyStructSequence_UnnamedField', diff --git a/pypy/module/cpyext/include/pythread.h b/pypy/module/cpyext/include/pythread.h --- a/pypy/module/cpyext/include/pythread.h +++ b/pypy/module/cpyext/include/pythread.h @@ -18,6 +18,8 @@ #define NOWAIT_LOCK 0 PyAPI_FUNC(void) PyThread_release_lock(PyThread_type_lock); +PyAPI_FUNC(long) PyThread_start_new_thread(void (*func)(void *), void *arg); + /* Thread Local Storage (TLS) API */ PyAPI_FUNC(int) PyThread_create_key(void); PyAPI_FUNC(void) PyThread_delete_key(int); diff --git a/pypy/module/cpyext/pystate.py b/pypy/module/cpyext/pystate.py --- a/pypy/module/cpyext/pystate.py +++ b/pypy/module/cpyext/pystate.py @@ -3,6 +3,7 @@ from pypy.module.cpyext.pyobject import PyObject, Py_DecRef, make_ref, from_ref from rpython.rtyper.lltypesystem import rffi, lltype from rpython.rlib import rthread +from rpython.rlib.objectmodel import we_are_translated PyInterpreterStateStruct = lltype.ForwardReference() PyInterpreterState = lltype.Ptr(PyInterpreterStateStruct) @@ -54,15 +55,6 @@ return 0 return 1 -thread_func = lltype.Ptr(lltype.FuncType([rffi.VOIDP], lltype.Void)) - at cpython_api([thread_func, rffi.VOIDP], rffi.INT_real, error=-1, gil='release') -def PyThread_start_new_thread(space, func, arg): - from pypy.module.thread import os_thread - w_args = space.newtuple([space.wrap(rffi.cast(lltype.Signed, arg)),]) - w_func = os_thread.W_WrapThreadFunc(func) - os_thread.start_new_thread(space, w_func, w_args) - return 0 - # XXX: might be generally useful def encapsulator(T, flavor='raw', dealloc=None): class MemoryCapsule(object): @@ -209,6 +201,23 @@ ExecutionContext.cpyext_gilstate_counter_noleave = 0 +def _workaround_cpython_untranslated(space): + # Workaround when not translated. The problem is that + # space.threadlocals.get_ec() is based on "thread._local", but + # CPython will clear a "thread._local" as soon as CPython's + # PyThreadState goes away. This occurs even if we're in a thread + # created from C and we're going to call some more Python code + # from this thread. This case shows up in + # test_pystate.test_frame_tstate_tracing. + def get_possibly_deleted_ec(): + ec1 = space.threadlocals.raw_thread_local.get() + ec2 = space.threadlocals._valuedict.get(rthread.get_ident(), None) + if ec1 is None and ec2 is not None: + space.threadlocals.raw_thread_local.set(ec2) + return space.threadlocals.__class__.get_ec(space.threadlocals) + space.threadlocals.get_ec = get_possibly_deleted_ec + + @cpython_api([], PyGILState_STATE, error=CANNOT_FAIL, gil="pygilstate_ensure") def PyGILState_Ensure(space, previous_state): # The argument 'previous_state' is not part of the API; it is inserted @@ -228,6 +237,8 @@ # PyPy code below. assert previous_state == PyGILState_UNLOCKED assert ec.cpyext_gilstate_counter_noleave == 0 + if not we_are_translated(): + _workaround_cpython_untranslated(space) # return rffi.cast(PyGILState_STATE, previous_state) diff --git a/pypy/module/cpyext/src/pythread.c b/pypy/module/cpyext/src/pythread.c --- a/pypy/module/cpyext/src/pythread.c +++ b/pypy/module/cpyext/src/pythread.c @@ -64,6 +64,13 @@ RPyThreadReleaseLock((struct RPyOpaque_ThreadLock*)lock); } +long +PyThread_start_new_thread(void (*func)(void *), void *arg) +{ + PyThread_init_thread(); + return RPyThreadStartEx(func, arg); +} + /* ------------------------------------------------------------------------ Per-thread data ("key") support. diff --git a/pypy/module/thread/os_thread.py b/pypy/module/thread/os_thread.py --- a/pypy/module/thread/os_thread.py +++ b/pypy/module/thread/os_thread.py @@ -6,9 +6,7 @@ from rpython.rlib import rthread from pypy.module.thread.error import wrap_thread_error from pypy.interpreter.error import OperationError, oefmt -from pypy.interpreter.gateway import unwrap_spec, Arguments, interp2app -from pypy.interpreter.baseobjspace import W_Root -from pypy.interpreter.typedef import TypeDef +from pypy.interpreter.gateway import unwrap_spec, Arguments # Here are the steps performed to start a new thread: # @@ -163,25 +161,6 @@ if w_threading is not None: space.call_method(w_threading, "_after_fork") -class W_WrapThreadFunc(W_Root): - ''' Wrap a cpyext.pystate.thread_func, which - has the signature void func(void *) - ''' - def __init__(self, func): - self.func = func - - def descr_call(self, space, w_arg): - from rpython.rtyper.lltypesystem import rffi - try: - arg = rffi.cast(rffi.VOIDP, space.int_w(w_arg)) - self.func(arg) - except Exception as e: - import pdb;pdb.set_trace() - -W_WrapThreadFunc.typedef = TypeDef("hiddenclass", - __call__ = interp2app(W_WrapThreadFunc.descr_call), -) - def start_new_thread(space, w_callable, w_args, w_kwargs=None): """Start a new thread and return its identifier. The thread will call the function with positional arguments from the tuple args and keyword arguments diff --git a/rpython/translator/c/src/thread_nt.c b/rpython/translator/c/src/thread_nt.c --- a/rpython/translator/c/src/thread_nt.c +++ b/rpython/translator/c/src/thread_nt.c @@ -18,7 +18,8 @@ typedef struct RPyOpaque_ThreadLock NRMUTEX, *PNRMUTEX; typedef struct { - void (*func)(void); + void (*func)(void *); + void *arg; long id; HANDLE done; } callobj; @@ -30,20 +31,29 @@ { callobj *obj = (callobj*)call; /* copy callobj since other thread might free it before we're done */ - void (*func)(void) = obj->func; + void (*func)(void *) = obj->func; + void *arg = obj->arg; obj->id = GetCurrentThreadId(); ReleaseSemaphore(obj->done, 1, NULL); - func(); + func(arg); } long RPyThreadStart(void (*func)(void)) { + /* a kind-of-invalid cast, but the 'func' passed here doesn't expect + any argument, so it's unlikely to cause problems */ + return RPyThreadStartEx((void(*)(void *))func, NULL); +} + +long RPyThreadStartEx(void (*func)(void *), void *arg) +{ unsigned long rv; callobj obj; obj.id = -1; /* guilty until proved innocent */ obj.func = func; + obj.arg = arg; obj.done = CreateSemaphore(NULL, 0, 1, NULL); if (obj.done == NULL) return -1; diff --git a/rpython/translator/c/src/thread_nt.h b/rpython/translator/c/src/thread_nt.h --- a/rpython/translator/c/src/thread_nt.h +++ b/rpython/translator/c/src/thread_nt.h @@ -15,6 +15,8 @@ RPY_EXTERN long RPyThreadStart(void (*func)(void)); RPY_EXTERN +long RPyThreadStartEx(void (*func)(void *), void *arg); +RPY_EXTERN int RPyThreadLockInit(struct RPyOpaque_ThreadLock *lock); RPY_EXTERN void RPyOpaqueDealloc_ThreadLock(struct RPyOpaque_ThreadLock *lock); diff --git a/rpython/translator/c/src/thread_pthread.c b/rpython/translator/c/src/thread_pthread.c --- a/rpython/translator/c/src/thread_pthread.c +++ b/rpython/translator/c/src/thread_pthread.c @@ -58,13 +58,14 @@ static long _pypythread_stacksize = 0; -static void *bootstrap_pthread(void *func) +long RPyThreadStart(void (*func)(void)) { - ((void(*)(void))func)(); - return NULL; + /* a kind-of-invalid cast, but the 'func' passed here doesn't expect + any argument, so it's unlikely to cause problems */ + return RPyThreadStartEx((void(*)(void *))func, NULL); } -long RPyThreadStart(void (*func)(void)) +long RPyThreadStartEx(void (*func)(void *), void *arg) { pthread_t th; int status; @@ -94,8 +95,12 @@ #else (pthread_attr_t*)NULL, #endif - bootstrap_pthread, - (void *)func + /* the next line does an invalid cast: pthread_create() will see a + function that returns random garbage. The code is the same as + CPython: this random garbage will be stored for pthread_join() + to return, but in this case pthread_join() is never called. */ + (void* (*)(void *))func, + (void *)arg ); #if defined(THREAD_STACK_SIZE) || defined(PTHREAD_SYSTEM_SCHED_SUPPORTED) diff --git a/rpython/translator/c/src/thread_pthread.h b/rpython/translator/c/src/thread_pthread.h --- a/rpython/translator/c/src/thread_pthread.h +++ b/rpython/translator/c/src/thread_pthread.h @@ -62,6 +62,8 @@ RPY_EXTERN long RPyThreadStart(void (*func)(void)); RPY_EXTERN +long RPyThreadStartEx(void (*func)(void *), void *arg); +RPY_EXTERN int RPyThreadLockInit(struct RPyOpaque_ThreadLock *lock); RPY_EXTERN void RPyOpaqueDealloc_ThreadLock(struct RPyOpaque_ThreadLock *lock); From pypy.commits at gmail.com Tue Mar 22 12:21:28 2016 From: pypy.commits at gmail.com (rlamy) Date: Tue, 22 Mar 2016 09:21:28 -0700 (PDT) Subject: [pypy-commit] pypy rposix-for-3: Add futimens(), utimensat() Message-ID: <56f17108.85371c0a.8f590.3fbb@mx.google.com> Author: Ronan Lamy Branch: rposix-for-3 Changeset: r83256:e6ae15b21fbc Date: 2016-03-22 16:20 +0000 http://bitbucket.org/pypy/pypy/changeset/e6ae15b21fbc/ Log: Add futimens(), utimensat() diff --git a/rpython/rlib/rposix.py b/rpython/rlib/rposix.py --- a/rpython/rlib/rposix.py +++ b/rpython/rlib/rposix.py @@ -1738,6 +1738,9 @@ AT_FDCWD = rffi_platform.DefinedConstantInteger('AT_FDCWD') AT_SYMLINK_NOFOLLOW = rffi_platform.DefinedConstantInteger('AT_SYMLINK_NOFOLLOW') AT_EACCESS = rffi_platform.DefinedConstantInteger('AT_EACCESS') + TIMESPEC = rffi_platform.Struct('struct timespec', [ + ('tv_sec', rffi.TIME_T), + ('tv_nsec', rffi.LONG)]) for _name in """faccessat fchdir fchmod fchmodat fchown fchownat fexecve fdopendir fpathconf fstat fstatat fstatvfs ftruncate @@ -1747,6 +1750,7 @@ locals()['HAVE_%s' % _name.upper()] = rffi_platform.Has(_name) cConfig = rffi_platform.configure(CConfig) globals().update(cConfig) +TIMESPEC2P = rffi.CArrayPtr(TIMESPEC) if HAVE_FACCESSAT: c_faccessat = external('faccessat', @@ -1769,7 +1773,8 @@ c_linkat = external('linkat', [rffi.INT, rffi.CCHARP, rffi.INT, rffi.CCHARP, rffi.INT], rffi.INT) - def linkat(src, dst, src_dir_fd=AT_FDCWD, dst_dir_fd=AT_FDCWD, follow_symlinks=True): + def linkat(src, dst, src_dir_fd=AT_FDCWD, dst_dir_fd=AT_FDCWD, + follow_symlinks=True): """Thin wrapper around linkat(2) with an interface similar to Python3's os.link() """ @@ -1781,7 +1786,30 @@ handle_posix_error('linkat', error) if HAVE_FUTIMENS: - pass + c_futimens = external('futimens', [rffi.INT, TIMESPEC2P], rffi.INT) + + def futimens(fd, atime, atime_ns, mtime, mtime_ns): + l_times = lltype.malloc(TIMESPEC, 2, flavor='raw') + rffi.setintfield(l_times[0], 'c_tv_sec', atime) + rffi.setintfield(l_times[0], 'c_tv_nsec', atime_ns) + rffi.setintfield(l_times[1], 'c_tv_sec', mtime) + rffi.setintfield(l_times[1], 'c_tv_nsec', mtime_ns) + error = c_futimens(fd, l_times) + handle_posix_error('futimens', error) if HAVE_UTIMENSAT: - pass + c_utimensat = external('utimensat', [rffi.INT, TIMESPEC2P], rffi.INT) + + def utimensat(pathname, atime, atime_ns, mtime, mtime_ns, + dir_fd=AT_FDCWD, follow_symlinks=True): + l_times = lltype.malloc(TIMESPEC, 2, flavor='raw') + rffi.setintfield(l_times[0], 'c_tv_sec', atime) + rffi.setintfield(l_times[0], 'c_tv_nsec', atime_ns) + rffi.setintfield(l_times[1], 'c_tv_sec', mtime) + rffi.setintfield(l_times[1], 'c_tv_nsec', mtime_ns) + if follow_symlinks: + flag = 0 + else: + flag = AT_SYMLINK_NOFOLLOW + error = c_futimens(dir_fd, pathname, l_times, flag) + handle_posix_error('utimensat', error) From pypy.commits at gmail.com Tue Mar 22 12:47:38 2016 From: pypy.commits at gmail.com (cfbolz) Date: Tue, 22 Mar 2016 09:47:38 -0700 (PDT) Subject: [pypy-commit] pypy guard-compatible: temporary?: add some jit_debug operations into the log after the trace to show Message-ID: <56f1772a.c9161c0a.b2f40.3fd5@mx.google.com> Author: Carl Friedrich Bolz Branch: guard-compatible Changeset: r83257:d948bc9b9400 Date: 2016-03-22 15:11 +0100 http://bitbucket.org/pypy/pypy/changeset/d948bc9b9400/ Log: temporary?: add some jit_debug operations into the log after the trace to show the conditions of a guard_compatible diff --git a/rpython/jit/metainterp/compatible.py b/rpython/jit/metainterp/compatible.py --- a/rpython/jit/metainterp/compatible.py +++ b/rpython/jit/metainterp/compatible.py @@ -49,16 +49,16 @@ cond.activate_secondary(ref, loop_token) return True - def prepare_const_arg_call(self, op): + def prepare_const_arg_call(self, op, optimizer): from rpython.jit.metainterp.quasiimmut import QuasiImmutDescr copied_op = op.copy() copied_op.setarg(1, self.known_valid) if op.numargs() == 2: - return copied_op, PureCallCondition(op) + return copied_op, PureCallCondition(op, optimizer.metainterp_sd) arg2 = copied_op.getarg(2) if arg2.is_constant(): # already a constant, can just use PureCallCondition - return copied_op, PureCallCondition(op) + return copied_op, PureCallCondition(op, optimizer.metainterp_sd) # really simple-minded pattern matching # the order of things is like this: @@ -83,9 +83,17 @@ return None, None copied_op.setarg(2, qmutdescr.constantfieldbox) self.last_quasi_immut_field_op = None - return copied_op, QuasiimmutGetfieldAndPureCallCondition(op, qmutdescr) + return copied_op, QuasiimmutGetfieldAndPureCallCondition( + op, qmutdescr, optimizer.metainterp_sd) + + def repr_of_conditions(self, argrepr="?"): + return "\n".join([cond.repr(argrepr) for cond in self.conditions]) + class Condition(object): + def __init__(self, metainterp_sd): + self.metainterp_sd = metainterp_sd + def check(self, cpu, ref): raise NotImplementedError @@ -98,9 +106,22 @@ def same_cond(self, other, res): return False + def repr(self): + return "" + + @staticmethod + def _repr_const(arg): + from rpython.jit.metainterp.history import ConstInt, ConstFloat, ConstPtr + if isinstance(arg, ConstInt): + return str(arg.value) + elif isinstance(arg, ConstPtr): + return arg._getrepr_() + elif isinstance(arg, ConstFloat): + return str(arg.getfloat()) class PureCallCondition(Condition): - def __init__(self, op): + def __init__(self, op, metainterp_sd): + Condition.__init__(self, metainterp_sd) args = op.getarglist()[:] args[1] = None self.args = args @@ -142,9 +163,22 @@ return False return True + def repr(self, argrepr="?"): + addr = self.args[0].getaddr() + funcname = self.metainterp_sd.get_name_from_address(addr) + if not funcname: + funcname = hex(self.args[0].getint()) + result = self._repr_const(self.res) + if len(self.args) == 2: + extra = '' + else: + extra = ', ' + ', '.join([self._repr_const(arg) for arg in self.args[2:]]) + return "compatible if %s == %s(%s%s)" % (result, funcname, argrepr, extra) + class QuasiimmutGetfieldAndPureCallCondition(PureCallCondition): - def __init__(self, op, qmutdescr): + def __init__(self, op, qmutdescr, metainterp_sd): + Condition.__init__(self, metainterp_sd) args = op.getarglist()[:] args[1] = None args[2] = None @@ -209,3 +243,15 @@ if not self.args[i].same_constant(other.args[i]): return False return True + + def repr(self, argrepr="?"): + addr = self.args[0].getaddr() + funcname = self.metainterp_sd.get_name_from_address(addr) + result = self._repr_const(self.res) + if len(self.args) == 3: + extra = '' + else: + extra = ', ' + ', '.join([self._repr_const(arg) for arg in self.args[3:]]) + attrname = self.fielddescr.repr_of_descr() + return "compatible if %s == %s(%s, %s.%s%s)" % ( + result, funcname, argrepr, argrepr, attrname, extra) diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -1139,6 +1139,11 @@ guard_value_op.getarg(0)) ResumeGuardDescr.make_a_counter_per_value(self, guard_value_op, index) + def repr_of_conditions(self, argrepr="?"): + if self._compatibility_conditions: + return self._compatibility_conditions.repr_of_conditions(argrepr) + return '' + # ____________________________________________________________ memory_error = MemoryError() diff --git a/rpython/jit/metainterp/logger.py b/rpython/jit/metainterp/logger.py --- a/rpython/jit/metainterp/logger.py +++ b/rpython/jit/metainterp/logger.py @@ -181,7 +181,8 @@ s_offset = "" else: s_offset = "+%d: " % offset - args = ", ".join([self.repr_of_arg(op.getarg(i)) for i in range(op.numargs())]) + argreprs = [self.repr_of_arg(op.getarg(i)) for i in range(op.numargs())] + args = ", ".join(argreprs) if op.type != 'v': res = self.repr_of_arg(op) + " = " @@ -204,6 +205,17 @@ for arg in op.getfailargs()]) + ']' else: fail_args = '' + if op.getopnum() == rop.GUARD_COMPATIBLE and op.getdescr() is not None: + from rpython.jit.metainterp.compile import GuardCompatibleDescr + descr = op.getdescr() + assert isinstance(descr, GuardCompatibleDescr) + conditions = descr.repr_of_conditions(argreprs[0]) + if conditions: + # make fake jit-debug ops to print + conditions = conditions.split("\n") + for i in range(len(conditions)): + conditions[i] = "jit_debug('%s')" % (conditions[i], ) + fail_args += "\n" + "\n".join(conditions) return s_offset + res + op.getopname() + '(' + args + ')' + fail_args diff --git a/rpython/jit/metainterp/optimizeopt/pure.py b/rpython/jit/metainterp/optimizeopt/pure.py --- a/rpython/jit/metainterp/optimizeopt/pure.py +++ b/rpython/jit/metainterp/optimizeopt/pure.py @@ -142,7 +142,8 @@ ccond = info._compatibility_conditions if ccond: # it's subject to guard_compatible - copied_op, cond = ccond.prepare_const_arg_call(op) + copied_op, cond = ccond.prepare_const_arg_call( + op, self.optimizer) if copied_op: result = self._can_optimize_call_pure(copied_op) if result is not None: From pypy.commits at gmail.com Tue Mar 22 12:47:39 2016 From: pypy.commits at gmail.com (cfbolz) Date: Tue, 22 Mar 2016 09:47:39 -0700 (PDT) Subject: [pypy-commit] pypy guard-compatible: need to try the descrs in order old-to-new Message-ID: <56f1772b.01adc20a.2c7d4.1458@mx.google.com> Author: Carl Friedrich Bolz Branch: guard-compatible Changeset: r83258:8a97958ad065 Date: 2016-03-22 15:26 +0100 http://bitbucket.org/pypy/pypy/changeset/8a97958ad065/ Log: need to try the descrs in order old-to-new diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -1089,23 +1089,26 @@ # XXX think about what is being kept alive here self._compatibility_conditions = None self.failarg_index = -1 - self._prev_guard_compatible_descr = None + # list of descrs about the same variable, potentially shared with + # subsequent guards in bridges + self.guard_descrs_list = [self] def handle_fail(self, deadframe, metainterp_sd, jitdriver_sd): index = intmask(self.status >> self.ST_SHIFT) typetag = intmask(self.status & self.ST_TYPE_MASK) assert typetag == self.TY_REF # for now refval = metainterp_sd.cpu.get_value_direct(deadframe, 'r', index) - curr = self - while curr: + if not we_are_translated(): + assert self in self.guard_descrs_list + # need to do the checking oldest to newest, to check the most specific + # condition first + for curr in self.guard_descrs_list: if curr.is_compatible(metainterp_sd.cpu, refval): from rpython.jit.metainterp.blackhole import resume_in_blackhole metainterp_sd.cpu.grow_guard_compatible_switch( curr.rd_loop_token, curr, refval) resume_in_blackhole(metainterp_sd, jitdriver_sd, self, deadframe) return - # try previous guards, maybe one of them would have matched - curr = curr._prev_guard_compatible_descr # a real failure return ResumeGuardDescr.handle_fail(self, deadframe, metainterp_sd, jitdriver_sd) @@ -1130,7 +1133,8 @@ # a guard_compatible about the same box newdescr = firstop.getdescr() assert isinstance(newdescr, GuardCompatibleDescr) - newdescr._prev_guard_compatible_descr = self + newdescr.guard_descrs_list = self.guard_descrs_list + self.guard_descrs_list.append(newdescr) ResumeGuardDescr.compile_and_attach( self, metainterp, new_loop, orig_inputargs) diff --git a/rpython/jit/metainterp/test/test_compatible.py b/rpython/jit/metainterp/test/test_compatible.py --- a/rpython/jit/metainterp/test/test_compatible.py +++ b/rpython/jit/metainterp/test/test_compatible.py @@ -222,3 +222,57 @@ # trace, two bridges, a finish bridge self.check_trace_count(4) + + def test_order_of_chained_guards(self): + class Obj(object): + def __init__(self): + self.m = Map() + class Map(object): + pass + + p1 = Obj() + p1.m.x = 5 + p1.m.y = 5 + + p2 = Obj() + p2.m.x = 5 + p2.m.y = 5 + + p3 = Obj() + p3.m.x = 5 + p3.m.y = 6 + driver = jit.JitDriver(greens = [], reds = ['n', 'x']) + + class A(object): + pass + + c = A() + c.count = 0 + @jit.elidable_compatible() + def check1(m, ignored): + c.count += 1 + return m.x + + @jit.elidable_compatible() + def check2(m, ignored): + c.count += 1 + return m.y + + def f(n, x): + while n > 0: + driver.can_enter_jit(n=n, x=x) + driver.jit_merge_point(n=n, x=x) + n -= check1(x.m, 7) + check2(x.m, 7) + + def main(): + check1(p1.m, 9) # make annotator not make argument constant + f(100, p1) + f(100, p3) # not compatible, so make a bridge + f(100, p2) # compatible with loop again, too bad + return c.count + + x = self.meta_interp(main, []) + + # trace, two bridges, a finish bridge + self.check_trace_count(4) + assert x < 50 From pypy.commits at gmail.com Tue Mar 22 12:47:41 2016 From: pypy.commits at gmail.com (cfbolz) Date: Tue, 22 Mar 2016 09:47:41 -0700 (PDT) Subject: [pypy-commit] pypy guard-compatible: grumbl Message-ID: <56f1772d.49f9c20a.bf8c1.fffffcf0@mx.google.com> Author: Carl Friedrich Bolz Branch: guard-compatible Changeset: r83259:fef96a0053b2 Date: 2016-03-22 16:03 +0100 http://bitbucket.org/pypy/pypy/changeset/fef96a0053b2/ Log: grumbl diff --git a/rpython/jit/metainterp/compatible.py b/rpython/jit/metainterp/compatible.py --- a/rpython/jit/metainterp/compatible.py +++ b/rpython/jit/metainterp/compatible.py @@ -115,7 +115,7 @@ if isinstance(arg, ConstInt): return str(arg.value) elif isinstance(arg, ConstPtr): - return arg._getrepr_() + return " elif isinstance(arg, ConstFloat): return str(arg.getfloat()) From pypy.commits at gmail.com Tue Mar 22 12:47:43 2016 From: pypy.commits at gmail.com (cfbolz) Date: Tue, 22 Mar 2016 09:47:43 -0700 (PDT) Subject: [pypy-commit] pypy guard-compatible: stupid stupid, no cookie Message-ID: <56f1772f.a8c0c20a.c7c35.01ac@mx.google.com> Author: Carl Friedrich Bolz Branch: guard-compatible Changeset: r83260:3333d9e3bd46 Date: 2016-03-22 17:46 +0100 http://bitbucket.org/pypy/pypy/changeset/3333d9e3bd46/ Log: stupid stupid, no cookie diff --git a/rpython/jit/metainterp/compatible.py b/rpython/jit/metainterp/compatible.py --- a/rpython/jit/metainterp/compatible.py +++ b/rpython/jit/metainterp/compatible.py @@ -115,7 +115,7 @@ if isinstance(arg, ConstInt): return str(arg.value) elif isinstance(arg, ConstPtr): - return " + return "" elif isinstance(arg, ConstFloat): return str(arg.getfloat()) From pypy.commits at gmail.com Tue Mar 22 13:47:15 2016 From: pypy.commits at gmail.com (fijal) Date: Tue, 22 Mar 2016 10:47:15 -0700 (PDT) Subject: [pypy-commit] pypy jit-leaner-frontend: fix test_unroll_max_loops Message-ID: <56f18523.c5301c0a.95419.5959@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r83261:9232addccca2 Date: 2016-03-22 19:46 +0200 http://bitbucket.org/pypy/pypy/changeset/9232addccca2/ Log: fix test_unroll_max_loops diff --git a/rpython/jit/metainterp/opencoder.py b/rpython/jit/metainterp/opencoder.py --- a/rpython/jit/metainterp/opencoder.py +++ b/rpython/jit/metainterp/opencoder.py @@ -187,6 +187,10 @@ self.inputargs = inputargs self.count = count + def cut_at(self, cut): + assert cut[1] > self.count + self.trace.cut_at(cut) + def get_iter(self, metainterp_sd=None): iter = TraceIterator(self.trace, self.start, self.trace._pos, self.inputargs, metainterp_sd=metainterp_sd) diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -561,6 +561,7 @@ if op.is_constant(): return # can happen e.g. if we postpone the operation that becomes # constant + # XXX kill op = self.replace_op_with(op, op.getopnum()) for i in range(op.numargs()): arg = self.force_box(op.getarg(i)) From pypy.commits at gmail.com Tue Mar 22 13:47:29 2016 From: pypy.commits at gmail.com (plan_rich) Date: Tue, 22 Mar 2016 10:47:29 -0700 (PDT) Subject: [pypy-commit] pypy new-jit-log: catchup with default Message-ID: <56f18531.46fac20a.5cc1.2368@mx.google.com> Author: Richard Plangger Branch: new-jit-log Changeset: r83262:3ad35c51e7c7 Date: 2016-03-22 17:10 +0100 http://bitbucket.org/pypy/pypy/changeset/3ad35c51e7c7/ Log: catchup with default diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -19,3 +19,4 @@ 850edf14b2c75573720f59e95767335fb1affe55 release-4.0.0 5f8302b8bf9f53056e40426f10c72151564e5b19 release-4.0.1 246c9cf22037b11dc0e8c29ce3f291d3b8c5935a release-5.0 +bbd45126bc691f669c4ebdfbd74456cd274c6b92 release-5.0.1 diff --git a/pypy/doc/config/translation.gc.txt b/pypy/doc/config/translation.gc.txt --- a/pypy/doc/config/translation.gc.txt +++ b/pypy/doc/config/translation.gc.txt @@ -1,24 +1,26 @@ Choose the Garbage Collector used by the translated program. -The good performing collectors are "hybrid" and "minimark". -The default is "minimark". +The recommended default is "incminimark". - "ref": reference counting. Takes very long to translate and the result is - slow. + slow. Used only for tests. Don't use it for real RPython programs. - - "marksweep": naive mark & sweep. + - "none": no GC. Leaks everything. Don't use it for real RPython + programs: the rate of leaking is immense. - "semispace": a copying semi-space GC. - "generation": a generational GC using the semi-space GC for the older generation. - - "boehm": use the Boehm conservative GC. - - "hybrid": a hybrid collector of "generation" together with a mark-n-sweep old space - - "markcompact": a slow, but memory-efficient collector, - influenced e.g. by Smalltalk systems. + - "boehm": use the Boehm conservative GC. - "minimark": a generational mark-n-sweep collector with good performance. Includes page marking for large arrays. + + - "incminimark": like minimark, but adds incremental major + collections. Seems to come with no performance drawback over + "minimark", so it is the default. A few recent features of PyPy + (like cpyext) are only working with this GC. diff --git a/pypy/doc/index-of-release-notes.rst b/pypy/doc/index-of-release-notes.rst --- a/pypy/doc/index-of-release-notes.rst +++ b/pypy/doc/index-of-release-notes.rst @@ -6,6 +6,7 @@ .. toctree:: + release-5.0.1.rst release-5.0.0.rst release-4.0.1.rst release-4.0.0.rst diff --git a/pypy/doc/release-5.0.1.rst b/pypy/doc/release-5.0.1.rst new file mode 100644 --- /dev/null +++ b/pypy/doc/release-5.0.1.rst @@ -0,0 +1,40 @@ +========== +PyPy 5.0.1 +========== + +We have released a bugfix for PyPy 5.0, after reports that the newly released +`lxml 3.6.0`_, which now supports PyPy 5.0 +, can `crash on large files`_. +Thanks to those who reported the crash. Please update, downloads are available +at pypy.org/download.html + +.. _`lxml 3.6.0`: https://pypi.python.org/pypi/lxml/3.6.0 +.. _`crash on large files`: https://bitbucket.org/pypy/pypy/issues/2260 + +The changes between PyPy 5.0 and 5.0.1 are only two bug fixes: one in +cpyext, which fixes notably (but not only) lxml; and another for a +corner case of the JIT. + +What is PyPy? +============= + +PyPy is a very compliant Python interpreter, almost a drop-in replacement for +CPython 2.7. It's fast (`PyPy and CPython 2.7.x`_ performance comparison) +due to its integrated tracing JIT compiler. + +We also welcome developers of other +`dynamic languages`_ to see what RPython can do for them. + +This release supports **x86** machines on most common operating systems +(Linux 32/64, Mac OS X 64, Windows 32, OpenBSD, FreeBSD), +newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux, and the +big- and little-endian variants of **PPC64** running Linux. + +.. _`PyPy and CPython 2.7.x`: http://speed.pypy.org +.. _`dynamic languages`: http://pypyjs.org + +Please update, and continue to help us make PyPy better. + +Cheers + +The PyPy Team + diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -327,7 +327,7 @@ # XXX possibly adapt options using modules failures = create_cffi_import_libraries(exename, options, basedir) # if failures, they were already printed - print >> sys.stderr, str(exename),'successfully built, but errors while building the above modules will be ignored' + print >> sys.stderr, str(exename),'successfully built (errors, if any, while building the above modules are ignored)' driver.task_build_cffi_imports = types.MethodType(task_build_cffi_imports, driver) driver.tasks['build_cffi_imports'] = driver.task_build_cffi_imports, [compile_goal] driver.default_goal = 'build_cffi_imports' diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py --- a/pypy/interpreter/baseobjspace.py +++ b/pypy/interpreter/baseobjspace.py @@ -417,7 +417,10 @@ self.wait_for_thread_shutdown() w_exitfunc = self.sys.getdictvalue(self, 'exitfunc') if w_exitfunc is not None: - self.call_function(w_exitfunc) + try: + self.call_function(w_exitfunc) + except OperationError as e: + e.write_unraisable(self, 'sys.exitfunc == ', w_exitfunc) from pypy.interpreter.module import Module for w_mod in self.builtin_modules.values(): if isinstance(w_mod, Module) and w_mod.startup_called: diff --git a/pypy/interpreter/test/test_objspace.py b/pypy/interpreter/test/test_objspace.py --- a/pypy/interpreter/test/test_objspace.py +++ b/pypy/interpreter/test/test_objspace.py @@ -416,3 +416,14 @@ i -= 1 assert i >= 0 gc.collect() + + def test_exitfunc_catches_exceptions(self): + from pypy.tool.pytest.objspace import maketestobjspace + space = maketestobjspace() + space.appexec([], """(): + import sys + sys.exitfunc = lambda: this_is_an_unknown_name + """) + space.finish() + # assert that we reach this point without getting interrupted + # by the OperationError(NameError) diff --git a/pypy/module/_cffi_backend/ctypeptr.py b/pypy/module/_cffi_backend/ctypeptr.py --- a/pypy/module/_cffi_backend/ctypeptr.py +++ b/pypy/module/_cffi_backend/ctypeptr.py @@ -124,7 +124,7 @@ s = rffi.charp2str(ptr) else: s = rffi.charp2strn(ptr, length) - return space.wrap(s) + return space.wrapbytes(s) # # pointer to a wchar_t: builds and returns a unicode if self.is_unichar_ptr_or_array(): @@ -372,15 +372,15 @@ rffi_fclose(self.llf) -def prepare_file_argument(space, fileobj): - fileobj.direct_flush() - if fileobj.cffi_fileobj is None: - fd = fileobj.direct_fileno() +def prepare_file_argument(space, w_fileobj): + w_fileobj.direct_flush() + if w_fileobj.cffi_fileobj is None: + fd = w_fileobj.direct_fileno() if fd < 0: raise OperationError(space.w_ValueError, space.wrap("file has no OS file descriptor")) try: - fileobj.cffi_fileobj = CffiFileObj(fd, fileobj.mode) + w_fileobj.cffi_fileobj = CffiFileObj(fd, w_fileobj.mode) except OSError, e: raise wrap_oserror(space, e) - return rffi.cast(rffi.CCHARP, fileobj.cffi_fileobj.llf) + return rffi.cast(rffi.CCHARP, w_fileobj.cffi_fileobj.llf) diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -833,14 +833,14 @@ modulename = py.path.local(eci.libraries[-1]) def dealloc_trigger(): - from pypy.module.cpyext.pyobject import _Py_Dealloc + from pypy.module.cpyext.pyobject import decref print 'dealloc_trigger...' while True: ob = rawrefcount.next_dead(PyObject) if not ob: break print ob - _Py_Dealloc(space, ob) + decref(space, ob) print 'dealloc_trigger DONE' return "RETRY" rawrefcount.init(dealloc_trigger) diff --git a/pypy/module/cpyext/bytesobject.py b/pypy/module/cpyext/bytesobject.py --- a/pypy/module/cpyext/bytesobject.py +++ b/pypy/module/cpyext/bytesobject.py @@ -1,4 +1,4 @@ -from pypy.interpreter.error import OperationError +from pypy.interpreter.error import OperationError, oefmt from rpython.rtyper.lltypesystem import rffi, lltype from pypy.module.cpyext.api import ( cpython_api, cpython_struct, bootstrap_function, build_type_checkers, @@ -80,7 +80,8 @@ buflen = length + 1 py_str.c_size = length py_str.c_buffer = lltype.malloc(rffi.CCHARP.TO, buflen, - flavor='raw', zero=True) + flavor='raw', zero=True, + add_memory_pressure=True) return py_str def string_attach(space, py_obj, w_obj): @@ -133,8 +134,14 @@ if from_ref(space, rffi.cast(PyObject, ref.c_ob_type)) is space.w_str: pass # typecheck returned "ok" without forcing 'ref' at all elif not PyString_Check(space, ref): # otherwise, use the alternate way - raise OperationError(space.w_TypeError, space.wrap( - "PyString_AsString only support strings")) + from pypy.module.cpyext.unicodeobject import ( + PyUnicode_Check, _PyUnicode_AsDefaultEncodedString) + if PyUnicode_Check(space, ref): + ref = _PyUnicode_AsDefaultEncodedString(space, ref, lltype.nullptr(rffi.CCHARP.TO)) + else: + raise oefmt(space.w_TypeError, + "expected string or Unicode object, %T found", + from_ref(space, ref)) ref_str = rffi.cast(PyStringObject, ref) if not ref_str.c_buffer: # copy string buffer @@ -146,8 +153,14 @@ @cpython_api([PyObject, rffi.CCHARPP, rffi.CArrayPtr(Py_ssize_t)], rffi.INT_real, error=-1) def PyString_AsStringAndSize(space, ref, buffer, length): if not PyString_Check(space, ref): - raise OperationError(space.w_TypeError, space.wrap( - "PyString_AsStringAndSize only support strings")) + from pypy.module.cpyext.unicodeobject import ( + PyUnicode_Check, _PyUnicode_AsDefaultEncodedString) + if PyUnicode_Check(space, ref): + ref = _PyUnicode_AsDefaultEncodedString(space, ref, lltype.nullptr(rffi.CCHARP.TO)) + else: + raise oefmt(space.w_TypeError, + "expected string or Unicode object, %T found", + from_ref(space, ref)) ref_str = rffi.cast(PyStringObject, ref) if not ref_str.c_buffer: # copy string buffer diff --git a/pypy/module/cpyext/include/unicodeobject.h b/pypy/module/cpyext/include/unicodeobject.h --- a/pypy/module/cpyext/include/unicodeobject.h +++ b/pypy/module/cpyext/include/unicodeobject.h @@ -20,8 +20,12 @@ typedef struct { PyObject_HEAD - Py_UNICODE *buffer; + Py_UNICODE *str; Py_ssize_t size; + long hash; /* Hash value; -1 if not set */ + PyObject *defenc; /* (Default) Encoded version as Python + string, or NULL; this is used for + implementing the buffer protocol */ } PyUnicodeObject; diff --git a/pypy/module/cpyext/object.py b/pypy/module/cpyext/object.py --- a/pypy/module/cpyext/object.py +++ b/pypy/module/cpyext/object.py @@ -17,7 +17,8 @@ @cpython_api([Py_ssize_t], rffi.VOIDP) def PyObject_MALLOC(space, size): return lltype.malloc(rffi.VOIDP.TO, size, - flavor='raw', zero=True) + flavor='raw', zero=True, + add_memory_pressure=True) @cpython_api([rffi.VOIDP], lltype.Void) def PyObject_FREE(space, ptr): diff --git a/pypy/module/cpyext/pyobject.py b/pypy/module/cpyext/pyobject.py --- a/pypy/module/cpyext/pyobject.py +++ b/pypy/module/cpyext/pyobject.py @@ -50,7 +50,8 @@ size += itemcount * pytype.c_tp_itemsize assert size >= rffi.sizeof(PyObject.TO) buf = lltype.malloc(rffi.VOIDP.TO, size, - flavor='raw', zero=True) + flavor='raw', zero=True, + add_memory_pressure=True) pyobj = rffi.cast(PyObject, buf) pyobj.c_ob_refcnt = 1 pyobj.c_ob_type = pytype diff --git a/pypy/module/cpyext/state.py b/pypy/module/cpyext/state.py --- a/pypy/module/cpyext/state.py +++ b/pypy/module/cpyext/state.py @@ -147,10 +147,10 @@ """ def perform(self, executioncontext, frame): - from pypy.module.cpyext.pyobject import PyObject, _Py_Dealloc + from pypy.module.cpyext.pyobject import PyObject, decref while True: py_obj = rawrefcount.next_dead(PyObject) if not py_obj: break - _Py_Dealloc(self.space, py_obj) + decref(self.space, py_obj) diff --git a/pypy/module/cpyext/test/test_bytesobject.py b/pypy/module/cpyext/test/test_bytesobject.py --- a/pypy/module/cpyext/test/test_bytesobject.py +++ b/pypy/module/cpyext/test/test_bytesobject.py @@ -139,6 +139,44 @@ ]) module.getstring() + def test_py_string_as_string_Unicode(self): + module = self.import_extension('foo', [ + ("getstring_unicode", "METH_NOARGS", + """ + Py_UNICODE chars[] = {'t', 'e', 's', 't'}; + PyObject* u1 = PyUnicode_FromUnicode(chars, 4); + char *buf; + buf = PyString_AsString(u1); + if (buf == NULL) + return NULL; + if (buf[3] != 't') { + PyErr_SetString(PyExc_AssertionError, "Bad conversion"); + return NULL; + } + Py_DECREF(u1); + Py_INCREF(Py_None); + return Py_None; + """), + ("getstringandsize_unicode", "METH_NOARGS", + """ + Py_UNICODE chars[] = {'t', 'e', 's', 't'}; + PyObject* u1 = PyUnicode_FromUnicode(chars, 4); + char *buf; + Py_ssize_t len; + if (PyString_AsStringAndSize(u1, &buf, &len) < 0) + return NULL; + if (len != 4) { + PyErr_SetString(PyExc_AssertionError, "Bad Length"); + return NULL; + } + Py_DECREF(u1); + Py_INCREF(Py_None); + return Py_None; + """), + ]) + module.getstring_unicode() + module.getstringandsize_unicode() + def test_format_v(self): module = self.import_extension('foo', [ ("test_string_format_v", "METH_VARARGS", diff --git a/pypy/module/cpyext/test/test_unicodeobject.py b/pypy/module/cpyext/test/test_unicodeobject.py --- a/pypy/module/cpyext/test/test_unicodeobject.py +++ b/pypy/module/cpyext/test/test_unicodeobject.py @@ -24,7 +24,7 @@ if(PyUnicode_GetSize(s) == 11) { result = 1; } - if(s->ob_type->tp_basicsize != sizeof(void*)*5) + if(s->ob_type->tp_basicsize != sizeof(void*)*7) result = 0; Py_DECREF(s); return PyBool_FromLong(result); @@ -66,6 +66,7 @@ c = PyUnicode_AsUnicode(s); c[0] = 'a'; c[1] = 0xe9; + c[2] = 0x00; c[3] = 'c'; return s; """), @@ -74,7 +75,35 @@ assert len(s) == 4 assert s == u'a�\x00c' + def test_hash(self): + module = self.import_extension('foo', [ + ("test_hash", "METH_VARARGS", + ''' + PyObject* obj = (PyTuple_GetItem(args, 0)); + long hash = ((PyUnicodeObject*)obj)->hash; + return PyLong_FromLong(hash); + ''' + ), + ]) + res = module.test_hash(u"xyz") + assert res == hash(u'xyz') + def test_default_encoded_string(self): + module = self.import_extension('foo', [ + ("test_default_encoded_string", "METH_O", + ''' + PyObject* result = _PyUnicode_AsDefaultEncodedString(args, "replace"); + Py_INCREF(result); + return result; + ''' + ), + ]) + res = module.test_default_encoded_string(u"xyz") + assert isinstance(res, str) + assert res == 'xyz' + res = module.test_default_encoded_string(u"caf\xe9") + assert isinstance(res, str) + assert res == 'caf?' class TestUnicode(BaseApiTest): def test_unicodeobject(self, space, api): @@ -155,22 +184,22 @@ def test_unicode_resize(self, space, api): py_uni = new_empty_unicode(space, 10) ar = lltype.malloc(PyObjectP.TO, 1, flavor='raw') - py_uni.c_buffer[0] = u'a' - py_uni.c_buffer[1] = u'b' - py_uni.c_buffer[2] = u'c' + py_uni.c_str[0] = u'a' + py_uni.c_str[1] = u'b' + py_uni.c_str[2] = u'c' ar[0] = rffi.cast(PyObject, py_uni) api.PyUnicode_Resize(ar, 3) py_uni = rffi.cast(PyUnicodeObject, ar[0]) assert py_uni.c_size == 3 - assert py_uni.c_buffer[1] == u'b' - assert py_uni.c_buffer[3] == u'\x00' + assert py_uni.c_str[1] == u'b' + assert py_uni.c_str[3] == u'\x00' # the same for growing ar[0] = rffi.cast(PyObject, py_uni) api.PyUnicode_Resize(ar, 10) py_uni = rffi.cast(PyUnicodeObject, ar[0]) assert py_uni.c_size == 10 - assert py_uni.c_buffer[1] == 'b' - assert py_uni.c_buffer[10] == '\x00' + assert py_uni.c_str[1] == 'b' + assert py_uni.c_str[10] == '\x00' Py_DecRef(space, ar[0]) lltype.free(ar, flavor='raw') diff --git a/pypy/module/cpyext/tupleobject.py b/pypy/module/cpyext/tupleobject.py --- a/pypy/module/cpyext/tupleobject.py +++ b/pypy/module/cpyext/tupleobject.py @@ -59,7 +59,8 @@ py_tup = rffi.cast(PyTupleObject, py_obj) py_tup.c_ob_item = lltype.malloc(ObjectItems, length, - flavor='raw', zero=True) + flavor='raw', zero=True, + add_memory_pressure=True) py_tup.c_ob_size = length return py_tup @@ -70,7 +71,8 @@ """ items_w = space.fixedview(w_obj) l = len(items_w) - p = lltype.malloc(ObjectItems, l, flavor='raw') + p = lltype.malloc(ObjectItems, l, flavor='raw', + add_memory_pressure=True) i = 0 try: while i < l: @@ -177,7 +179,8 @@ ref = rffi.cast(PyTupleObject, ref) oldsize = ref.c_ob_size oldp = ref.c_ob_item - newp = lltype.malloc(ObjectItems, newsize, zero=True, flavor='raw') + newp = lltype.malloc(ObjectItems, newsize, zero=True, flavor='raw', + add_memory_pressure=True) try: if oldsize < newsize: to_cp = oldsize diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py --- a/pypy/module/cpyext/typeobject.py +++ b/pypy/module/cpyext/typeobject.py @@ -421,7 +421,8 @@ Py_DecRef(space, w_metatype) heaptype = lltype.malloc(PyHeapTypeObject.TO, - flavor='raw', zero=True) + flavor='raw', zero=True, + add_memory_pressure=True) pto = heaptype.c_ht_type pto.c_ob_refcnt = 1 pto.c_ob_type = metatype diff --git a/pypy/module/cpyext/unicodeobject.py b/pypy/module/cpyext/unicodeobject.py --- a/pypy/module/cpyext/unicodeobject.py +++ b/pypy/module/cpyext/unicodeobject.py @@ -22,7 +22,8 @@ PyUnicodeObjectStruct = lltype.ForwardReference() PyUnicodeObject = lltype.Ptr(PyUnicodeObjectStruct) PyUnicodeObjectFields = (PyObjectFields + - (("buffer", rffi.CWCHARP), ("size", Py_ssize_t))) + (("str", rffi.CWCHARP), ("size", Py_ssize_t), + ("hash", rffi.LONG), ("defenc", PyObject))) cpython_struct("PyUnicodeObject", PyUnicodeObjectFields, PyUnicodeObjectStruct) @bootstrap_function @@ -54,15 +55,20 @@ buflen = length + 1 py_uni.c_size = length - py_uni.c_buffer = lltype.malloc(rffi.CWCHARP.TO, buflen, - flavor='raw', zero=True) + py_uni.c_str = lltype.malloc(rffi.CWCHARP.TO, buflen, + flavor='raw', zero=True, + add_memory_pressure=True) + py_uni.c_hash = -1 + py_uni.c_defenc = lltype.nullptr(PyObject.TO) return py_uni def unicode_attach(space, py_obj, w_obj): "Fills a newly allocated PyUnicodeObject with a unicode string" py_unicode = rffi.cast(PyUnicodeObject, py_obj) py_unicode.c_size = len(space.unicode_w(w_obj)) - py_unicode.c_buffer = lltype.nullptr(rffi.CWCHARP.TO) + py_unicode.c_str = lltype.nullptr(rffi.CWCHARP.TO) + py_unicode.c_hash = space.hash_w(w_obj) + py_unicode.c_defenc = lltype.nullptr(PyObject.TO) def unicode_realize(space, py_obj): """ @@ -70,17 +76,20 @@ be modified after this call. """ py_uni = rffi.cast(PyUnicodeObject, py_obj) - s = rffi.wcharpsize2unicode(py_uni.c_buffer, py_uni.c_size) + s = rffi.wcharpsize2unicode(py_uni.c_str, py_uni.c_size) w_obj = space.wrap(s) + py_uni.c_hash = space.hash_w(w_obj) track_reference(space, py_obj, w_obj) return w_obj @cpython_api([PyObject], lltype.Void, header=None) def unicode_dealloc(space, py_obj): py_unicode = rffi.cast(PyUnicodeObject, py_obj) - if py_unicode.c_buffer: - lltype.free(py_unicode.c_buffer, flavor="raw") + if py_unicode.c_str: + lltype.free(py_unicode.c_str, flavor="raw") from pypy.module.cpyext.object import PyObject_dealloc + if py_unicode.c_defenc: + PyObject_dealloc(space, py_unicode.c_defenc) PyObject_dealloc(space, py_obj) @cpython_api([Py_UNICODE], rffi.INT_real, error=CANNOT_FAIL) @@ -204,12 +213,12 @@ """Return a pointer to the internal Py_UNICODE buffer of the object. ref has to be a PyUnicodeObject (not checked).""" ref_unicode = rffi.cast(PyUnicodeObject, ref) - if not ref_unicode.c_buffer: + if not ref_unicode.c_str: # Copy unicode buffer w_unicode = from_ref(space, ref) u = space.unicode_w(w_unicode) - ref_unicode.c_buffer = rffi.unicode2wcharp(u) - return ref_unicode.c_buffer + ref_unicode.c_str = rffi.unicode2wcharp(u) + return ref_unicode.c_str @cpython_api([PyObject], rffi.CWCHARP) def PyUnicode_AsUnicode(space, ref): @@ -240,7 +249,7 @@ string may or may not be 0-terminated. It is the responsibility of the caller to make sure that the wchar_t string is 0-terminated in case this is required by the application.""" - c_buffer = PyUnicode_AS_UNICODE(space, rffi.cast(PyObject, ref)) + c_str = PyUnicode_AS_UNICODE(space, rffi.cast(PyObject, ref)) c_size = ref.c_size # If possible, try to copy the 0-termination as well @@ -250,7 +259,7 @@ i = 0 while i < size: - buf[i] = c_buffer[i] + buf[i] = c_str[i] i += 1 if size > c_size: @@ -342,8 +351,15 @@ return PyUnicode_FromUnicode(space, wchar_p, length) @cpython_api([PyObject, CONST_STRING], PyObject) -def _PyUnicode_AsDefaultEncodedString(space, w_unicode, errors): - return PyUnicode_AsEncodedString(space, w_unicode, lltype.nullptr(rffi.CCHARP.TO), errors) +def _PyUnicode_AsDefaultEncodedString(space, ref, errors): + # Returns a borrowed reference. + py_uni = rffi.cast(PyUnicodeObject, ref) + if not py_uni.c_defenc: + py_uni.c_defenc = make_ref( + space, PyUnicode_AsEncodedString( + space, ref, + lltype.nullptr(rffi.CCHARP.TO), errors)) + return py_uni.c_defenc @cpython_api([CONST_STRING, Py_ssize_t, CONST_STRING, CONST_STRING], PyObject) def PyUnicode_Decode(space, s, size, encoding, errors): @@ -443,7 +459,7 @@ def PyUnicode_Resize(space, ref, newsize): # XXX always create a new string so far py_uni = rffi.cast(PyUnicodeObject, ref[0]) - if not py_uni.c_buffer: + if not py_uni.c_str: raise OperationError(space.w_SystemError, space.wrap( "PyUnicode_Resize called on already created string")) try: @@ -457,7 +473,7 @@ if oldsize < newsize: to_cp = oldsize for i in range(to_cp): - py_newuni.c_buffer[i] = py_uni.c_buffer[i] + py_newuni.c_str[i] = py_uni.c_str[i] Py_DecRef(space, ref[0]) ref[0] = rffi.cast(PyObject, py_newuni) return 0 diff --git a/pypy/module/select/test/test_select.py b/pypy/module/select/test/test_select.py --- a/pypy/module/select/test/test_select.py +++ b/pypy/module/select/test/test_select.py @@ -287,8 +287,7 @@ t = thread.start_new_thread(pollster.poll, ()) try: time.sleep(0.3) - # TODO restore print '', if this is not the reason - for i in range(5): print 'release gil select' # to release GIL untranslated + for i in range(5): print '', # to release GIL untranslated # trigger ufds array reallocation for fd in rfds: pollster.unregister(fd) diff --git a/pypy/tool/release/repackage.sh b/pypy/tool/release/repackage.sh --- a/pypy/tool/release/repackage.sh +++ b/pypy/tool/release/repackage.sh @@ -1,9 +1,9 @@ # Edit these appropriately before running this script maj=5 min=0 -rev=0 +rev=1 branchname=release-$maj.x # ==OR== release-$maj.$min.x -tagname=release-$maj.$min # ==OR== release-$maj.$min.$rev +tagname=release-$maj.$min.$rev # This script will download latest builds from the buildmaster, rename the top # level directory, and repackage ready to be uploaded to bitbucket. It will also # download source, assuming a tag for the release already exists, and repackage them. diff --git a/rpython/jit/backend/detect_cpu.py b/rpython/jit/backend/detect_cpu.py --- a/rpython/jit/backend/detect_cpu.py +++ b/rpython/jit/backend/detect_cpu.py @@ -66,6 +66,7 @@ 'x86_64': MODEL_X86, 'amd64': MODEL_X86, # freebsd 'AMD64': MODEL_X86, # win64 + 'armv8l': MODEL_ARM, # 32-bit ARMv8 'armv7l': MODEL_ARM, 'armv6l': MODEL_ARM, 'arm': MODEL_ARM, # freebsd diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -784,11 +784,13 @@ return [] # check for _immutable_fields_ hints immut = v_inst.concretetype.TO._immutable_field(c_fieldname.value) + need_live = False if immut: if (self.callcontrol is not None and self.callcontrol.could_be_green_field(v_inst.concretetype.TO, c_fieldname.value)): pure = '_greenfield' + need_live = True else: pure = '_pure' else: @@ -815,10 +817,12 @@ descr1 = self.cpu.fielddescrof( v_inst.concretetype.TO, quasiimmut.get_mutate_field_name(c_fieldname.value)) - op1 = [SpaceOperation('-live-', [], None), + return [SpaceOperation('-live-', [], None), SpaceOperation('record_quasiimmut_field', [v_inst, descr, descr1], None), op1] + if need_live: + return [SpaceOperation('-live-', [], None), op1] return op1 def rewrite_op_setfield(self, op, override_type=None): diff --git a/rpython/jit/codewriter/test/test_jtransform.py b/rpython/jit/codewriter/test/test_jtransform.py --- a/rpython/jit/codewriter/test/test_jtransform.py +++ b/rpython/jit/codewriter/test/test_jtransform.py @@ -1012,7 +1012,8 @@ v1 = varoftype(lltype.Ptr(S)) v2 = varoftype(lltype.Char) op = SpaceOperation('getfield', [v1, Constant('x', lltype.Void)], v2) - op1 = Transformer(FakeCPU(), FakeCC()).rewrite_operation(op) + op0, op1 = Transformer(FakeCPU(), FakeCC()).rewrite_operation(op) + assert op0.opname == '-live-' assert op1.opname == 'getfield_gc_i_greenfield' assert op1.args == [v1, ('fielddescr', S, 'x')] assert op1.result == v2 diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py --- a/rpython/memory/gc/incminimark.py +++ b/rpython/memory/gc/incminimark.py @@ -2929,10 +2929,19 @@ ll_assert(rc < int(REFCNT_FROM_PYPY_LIGHT * 0.99), "refcount underflow from REFCNT_FROM_PYPY_LIGHT?") rc -= REFCNT_FROM_PYPY - self._pyobj(pyobject).ob_refcnt = rc self._pyobj(pyobject).ob_pypy_link = 0 if rc == 0: self.rrc_dealloc_pending.append(pyobject) + # an object with refcnt == 0 cannot stay around waiting + # for its deallocator to be called. Some code (lxml) + # expects that tp_dealloc is called immediately when + # the refcnt drops to 0. If it isn't, we get some + # uncleared raw pointer that can still be used to access + # the object; but (PyObject *)raw_pointer is then bogus + # because after a Py_INCREF()/Py_DECREF() on it, its + # tp_dealloc is also called! + rc = 1 + self._pyobj(pyobject).ob_refcnt = rc _rrc_free._always_inline_ = True def rrc_major_collection_trace(self): diff --git a/rpython/memory/gc/test/test_rawrefcount.py b/rpython/memory/gc/test/test_rawrefcount.py --- a/rpython/memory/gc/test/test_rawrefcount.py +++ b/rpython/memory/gc/test/test_rawrefcount.py @@ -174,7 +174,7 @@ p1 = check_alive(0) self._collect(major=True, expected_trigger=1) py.test.raises(RuntimeError, "p1.x") # dead - assert r1.ob_refcnt == 0 + assert r1.ob_refcnt == 1 # in the pending list assert r1.ob_pypy_link == 0 assert self.gc.rawrefcount_next_dead() == r1addr assert self.gc.rawrefcount_next_dead() == llmemory.NULL @@ -197,7 +197,7 @@ assert p1.x == 42 self._collect(major=True, expected_trigger=1) py.test.raises(RuntimeError, "p1.x") # dead - assert r1.ob_refcnt == 0 + assert r1.ob_refcnt == 1 assert r1.ob_pypy_link == 0 assert self.gc.rawrefcount_next_dead() == r1addr self.gc.check_no_more_rawrefcount_state() @@ -214,7 +214,7 @@ else: self._collect(major=False, expected_trigger=1) py.test.raises(RuntimeError, "p1.x") # dead - assert r1.ob_refcnt == 0 + assert r1.ob_refcnt == 1 assert r1.ob_pypy_link == 0 assert self.gc.rawrefcount_next_dead() == r1addr self.gc.check_no_more_rawrefcount_state() @@ -252,7 +252,7 @@ self._collect(major=True, expected_trigger=1) else: self._collect(major=False, expected_trigger=1) - assert r1.ob_refcnt == 0 # refcnt dropped to 0 + assert r1.ob_refcnt == 1 # refcnt 1, in the pending list assert r1.ob_pypy_link == 0 # detached assert self.gc.rawrefcount_next_dead() == r1addr self.gc.check_no_more_rawrefcount_state() @@ -277,7 +277,7 @@ assert self.trigger == [] self._collect(major=True, expected_trigger=1) py.test.raises(RuntimeError, "p1.x") # dead - assert r1.ob_refcnt == 0 + assert r1.ob_refcnt == 1 assert r1.ob_pypy_link == 0 assert self.gc.rawrefcount_next_dead() == r1addr self.gc.check_no_more_rawrefcount_state() diff --git a/rpython/rlib/rawrefcount.py b/rpython/rlib/rawrefcount.py --- a/rpython/rlib/rawrefcount.py +++ b/rpython/rlib/rawrefcount.py @@ -72,6 +72,12 @@ return p def next_dead(OB_PTR_TYPE): + """NOT_RPYTHON. When the GC runs, it finds some pyobjs to be dead + but cannot immediately dispose of them (it doesn't know how to call + e.g. tp_dealloc(), and anyway calling it immediately would cause all + sorts of bugs). So instead, it stores them in an internal list, + initially with refcnt == 1. This pops the next item off this list. + """ if len(_d_list) == 0: return lltype.nullptr(OB_PTR_TYPE.TO) ob = _d_list.pop() @@ -136,6 +142,7 @@ ob.c_ob_refcnt -= REFCNT_FROM_PYPY ob.c_ob_pypy_link = 0 if ob.c_ob_refcnt == 0: + ob.c_ob_refcnt = 1 _d_list.append(ob) return None diff --git a/rpython/rlib/rposix.py b/rpython/rlib/rposix.py --- a/rpython/rlib/rposix.py +++ b/rpython/rlib/rposix.py @@ -22,6 +22,22 @@ from rpython.rlib import rwin32 from rpython.rlib.rwin32file import make_win32_traits +class CConfig: + _compilation_info_ = ExternalCompilationInfo( + includes=['sys/stat.h', + 'unistd.h', + 'fcntl.h'], + ) + for _name in """fchdir fchmod fchmodat fchown fchownat fexecve fdopendir + fpathconf fstat fstatat fstatvfs ftruncate futimens futimes + futimesat linkat lchflags lchmod lchown lstat lutimes + mkdirat mkfifoat mknodat openat readlinkat renameat + symlinkat unlinkat utimensat""".split(): + locals()['HAVE_%s' % _name.upper()] = rffi_platform.Has(_name) +cConfig = rffi_platform.configure(CConfig) +globals().update(cConfig) + + class CConstantErrno(CConstant): # these accessors are used when calling get_errno() or set_errno() # on top of CPython @@ -1024,6 +1040,13 @@ if not win32traits.MoveFile(path1, path2): raise rwin32.lastSavedWindowsError() + at specialize.argtype(0, 1) +def replace(path1, path2): + if os.name == 'nt': + raise NotImplementedError( + 'On windows, os.replace() should overwrite the destination') + return rename(path1, path2) + #___________________________________________________________________ c_mkfifo = external('mkfifo', [rffi.CCHARP, rffi.MODE_T], rffi.INT, diff --git a/rpython/rlib/rtime.py b/rpython/rlib/rtime.py --- a/rpython/rlib/rtime.py +++ b/rpython/rlib/rtime.py @@ -9,7 +9,6 @@ from rpython.rtyper.tool import rffi_platform from rpython.rtyper.lltypesystem import rffi, lltype from rpython.rlib.objectmodel import register_replacement_for -from rpython.rlib import jit from rpython.rlib.rarithmetic import intmask, UINT_MAX from rpython.rlib import rposix @@ -170,28 +169,30 @@ [lltype.Signed, lltype.Ptr(TIMESPEC)], rffi.INT, releasegil=False, compilation_info=eci_with_lrt) -else: +if need_rusage: RUSAGE = RUSAGE RUSAGE_SELF = RUSAGE_SELF or 0 c_getrusage = external('getrusage', [rffi.INT, lltype.Ptr(RUSAGE)], - lltype.Void, + rffi.INT, releasegil=False) +def win_perf_counter(): + a = lltype.malloc(A, flavor='raw') + if state.divisor == 0.0: + QueryPerformanceCounter(a) + state.counter_start = a[0] + QueryPerformanceFrequency(a) + state.divisor = float(a[0]) + QueryPerformanceCounter(a) + diff = a[0] - state.counter_start + lltype.free(a, flavor='raw') + return float(diff) / state.divisor + @replace_time_function('clock') - at jit.dont_look_inside # the JIT doesn't like FixedSizeArray def clock(): if _WIN32: - a = lltype.malloc(A, flavor='raw') - if state.divisor == 0.0: - QueryPerformanceCounter(a) - state.counter_start = a[0] - QueryPerformanceFrequency(a) - state.divisor = float(a[0]) - QueryPerformanceCounter(a) - diff = a[0] - state.counter_start - lltype.free(a, flavor='raw') - return float(diff) / state.divisor + return win_perf_counter() elif CLOCK_PROCESS_CPUTIME_ID is not None: with lltype.scoped_alloc(TIMESPEC) as a: c_clock_gettime(CLOCK_PROCESS_CPUTIME_ID, a) diff --git a/rpython/rlib/rvmprof/src/vmprof_config.h b/rpython/rlib/rvmprof/src/vmprof_config.h --- a/rpython/rlib/rvmprof/src/vmprof_config.h +++ b/rpython/rlib/rvmprof/src/vmprof_config.h @@ -1,6 +1,10 @@ #define HAVE_SYS_UCONTEXT_H -#if defined(__FreeBSD__) -#define PC_FROM_UCONTEXT uc_mcontext.mc_rip +#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) + #ifdef __i386__ + #define PC_FROM_UCONTEXT uc_mcontext.mc_eip + #else + #define PC_FROM_UCONTEXT uc_mcontext.mc_rip + #endif #elif defined( __APPLE__) #if ((ULONG_MAX) == (UINT_MAX)) #define PC_FROM_UCONTEXT uc_mcontext->__ss.__eip @@ -8,10 +12,10 @@ #define PC_FROM_UCONTEXT uc_mcontext->__ss.__rip #endif #elif defined(__arm__) -#define PC_FROM_UCONTEXT uc_mcontext.arm_ip + #define PC_FROM_UCONTEXT uc_mcontext.arm_ip #elif defined(__linux) && defined(__i386) && defined(__GNUC__) -#define PC_FROM_UCONTEXT uc_mcontext.gregs[REG_EIP] + #define PC_FROM_UCONTEXT uc_mcontext.gregs[REG_EIP] #else -/* linux, gnuc */ -#define PC_FROM_UCONTEXT uc_mcontext.gregs[REG_RIP] + /* linux, gnuc */ + #define PC_FROM_UCONTEXT uc_mcontext.gregs[REG_RIP] #endif diff --git a/rpython/rlib/test/test_rawrefcount.py b/rpython/rlib/test/test_rawrefcount.py --- a/rpython/rlib/test/test_rawrefcount.py +++ b/rpython/rlib/test/test_rawrefcount.py @@ -116,7 +116,7 @@ assert rawrefcount.next_dead(PyObject) == lltype.nullptr(PyObjectS) assert rawrefcount._o_list == [] assert wr_p() is None - assert ob.c_ob_refcnt == 0 + assert ob.c_ob_refcnt == 1 # from the pending list assert ob.c_ob_pypy_link == 0 lltype.free(ob, flavor='raw') @@ -173,7 +173,7 @@ assert rawrefcount._d_list == [ob] assert rawrefcount._p_list == [] assert wr_p() is None - assert ob.c_ob_refcnt == 0 + assert ob.c_ob_refcnt == 1 # from _d_list assert ob.c_ob_pypy_link == 0 lltype.free(ob, flavor='raw') diff --git a/rpython/tool/runsubprocess.py b/rpython/tool/runsubprocess.py --- a/rpython/tool/runsubprocess.py +++ b/rpython/tool/runsubprocess.py @@ -20,6 +20,8 @@ def _run(executable, args, env, cwd): # note that this function can be *overridden* below # in some cases! + if sys.platform == 'win32': + executable = executable.replace('/','\\') if isinstance(args, str): args = str(executable) + ' ' + args shell = True diff --git a/rpython/translator/c/src/thread_pthread.c b/rpython/translator/c/src/thread_pthread.c --- a/rpython/translator/c/src/thread_pthread.c +++ b/rpython/translator/c/src/thread_pthread.c @@ -37,7 +37,7 @@ # define THREAD_STACK_SIZE 0 /* use default stack size */ # endif -# if (defined(__APPLE__) || defined(__FreeBSD__)) && defined(THREAD_STACK_SIZE) && THREAD_STACK_SIZE == 0 +# if (defined(__APPLE__) || defined(__FreeBSD__) || defined(__FreeBSD_kernel__)) && defined(THREAD_STACK_SIZE) && THREAD_STACK_SIZE == 0 /* The default stack size for new threads on OSX is small enough that * we'll get hard crashes instead of 'maximum recursion depth exceeded' * exceptions. @@ -84,7 +84,7 @@ if (tss != 0) pthread_attr_setstacksize(&attrs, tss); #endif -#if defined(PTHREAD_SYSTEM_SCHED_SUPPORTED) && !defined(__FreeBSD__) +#if defined(PTHREAD_SYSTEM_SCHED_SUPPORTED) && !(defined(__FreeBSD__) || defined(__FreeBSD_kernel__)) pthread_attr_setscope(&attrs, PTHREAD_SCOPE_SYSTEM); #endif From pypy.commits at gmail.com Tue Mar 22 13:47:31 2016 From: pypy.commits at gmail.com (plan_rich) Date: Tue, 22 Mar 2016 10:47:31 -0700 (PDT) Subject: [pypy-commit] pypy new-jit-log: translation issue Message-ID: <56f18533.703dc20a.159b.220a@mx.google.com> Author: Richard Plangger Branch: new-jit-log Changeset: r83263:f4455cafdb2d Date: 2016-03-22 18:45 +0100 http://bitbucket.org/pypy/pypy/changeset/f4455cafdb2d/ Log: translation issue diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -583,7 +583,7 @@ self.patch_pending_failure_recoveries(rawstart) # patch the jump from original guard if logger: - logger.log_patch_guard(faildescr.adr_new_target, rawstart) + logger.log_patch_guard(faildescr.adr_jump_offset, rawstart) self.patch_jump_for_descr(faildescr, rawstart) ops_offset = self.mc.ops_offset frame_depth = max(self.current_clt.frame_info.jfi_frame_depth, diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -481,8 +481,8 @@ def do_compile_loop(jd_id, unique_id, metainterp_sd, inputargs, operations, looptoken, log=True, name='', memo=None): - log = metainterp_sd.jitlog.log_trace(MARK_TRACE_OPT, metainterp_sd, None) - log.write(inputargs, operations) + _log = metainterp_sd.jitlog.log_trace(MARK_TRACE_OPT, metainterp_sd, None) + _log.write(inputargs, operations) # TODO remove old metainterp_sd.logger_ops.log_loop(inputargs, operations, -2, 'compiling', None, name, memo) @@ -494,8 +494,8 @@ def do_compile_bridge(metainterp_sd, faildescr, inputargs, operations, original_loop_token, log=True, memo=None): - log = metainterp_sd.jitlog.log_trace(MARK_TRACE_OPT, metainterp_sd, None) - log.write(inputargs, operations, faildescr=faildescr) + _log = metainterp_sd.jitlog.log_trace(MARK_TRACE_OPT, metainterp_sd, None) + _log.write(inputargs, operations, faildescr=faildescr) # TODO remove old metainterp_sd.logger_ops.log_bridge(inputargs, operations, "compiling", memo=memo) diff --git a/rpython/jit/metainterp/jitlog.py b/rpython/jit/metainterp/jitlog.py --- a/rpython/jit/metainterp/jitlog.py +++ b/rpython/jit/metainterp/jitlog.py @@ -57,7 +57,7 @@ return LogTrace(tag, memo, metainterp_sd, mc, self) def log_patch_guard(self, addr, target_addr): - if self.cintf.jitlog_filter(tag): + if self.cintf.jitlog_filter(MARK_ASM_PATCH): return le_addr_write = self.encode_le_addr(addr) le_len = self.encode_le_32bit(8) @@ -116,8 +116,8 @@ absaddr = self.mc.absolute_addr() rel = self.mc.get_relative_pos() # packs as two unsigend longs - le_addr1 = self.encode_le_addr(absaddr) - le_addr2 = self.encode_le_addr(absaddr + rel) + le_addr1 = self.logger.encode_le_addr(absaddr) + le_addr2 = self.logger.encode_le_addr(absaddr + rel) log.write_marked(MARK_ASM_ADDR, le_addr1 + le_addr2) for i,op in enumerate(ops): mark, line = self.encode_op(op) From pypy.commits at gmail.com Tue Mar 22 14:12:28 2016 From: pypy.commits at gmail.com (fijal) Date: Tue, 22 Mar 2016 11:12:28 -0700 (PDT) Subject: [pypy-commit] pypy jit-leaner-frontend: clean up the calling convention of rop vs op Message-ID: <56f18b0c.d4e01c0a.c4d1f.5b7a@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r83264:d2202ec34cb6 Date: 2016-03-22 20:11 +0200 http://bitbucket.org/pypy/pypy/changeset/d2202ec34cb6/ Log: clean up the calling convention of rop vs op diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -347,7 +347,7 @@ opnum == rop.COPYSTRCONTENT or # no effect on GC struct/array opnum == rop.COPYUNICODECONTENT): # no effect on GC struct/array return - if op.is_call(): + if rop.is_call(op.opnum): if rop.is_call_assembler(op.getopnum()): self._seen_guard_not_invalidated = False else: diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -464,8 +464,9 @@ else: last_guard_pos = -1 assert opinfo is None or opinfo.__class__ is info.NonNullPtrInfo - if (op.is_getfield() or op.getopnum() == rop.SETFIELD_GC or - op.getopnum() == rop.QUASIIMMUT_FIELD): + opnum = op.opnum + if (rop.is_getfield(opnum) or opnum == rop.SETFIELD_GC or + opnum == rop.QUASIIMMUT_FIELD): descr = op.getdescr() parent_descr = descr.get_parent_descr() if parent_descr.is_object(): @@ -473,14 +474,14 @@ else: opinfo = info.StructPtrInfo(parent_descr) opinfo.init_fields(parent_descr, descr.get_index()) - elif (op.is_getarrayitem() or op.getopnum() == rop.SETARRAYITEM_GC or - op.getopnum() == rop.ARRAYLEN_GC): + elif (rop.is_getarrayitem(opnum) or opnum == rop.SETARRAYITEM_GC or + opnum == rop.ARRAYLEN_GC): opinfo = info.ArrayPtrInfo(op.getdescr()) - elif op.getopnum() in (rop.GUARD_CLASS, rop.GUARD_NONNULL_CLASS): + elif opnum in (rop.GUARD_CLASS, rop.GUARD_NONNULL_CLASS): opinfo = info.InstancePtrInfo() - elif op.getopnum() in (rop.STRLEN,): + elif opnum in (rop.STRLEN,): opinfo = vstring.StrPtrInfo(vstring.mode_string) - elif op.getopnum() in (rop.UNICODELEN,): + elif opnum in (rop.UNICODELEN,): opinfo = vstring.StrPtrInfo(vstring.mode_unicode) else: assert False, "operations %s unsupported" % op @@ -542,7 +543,7 @@ dispatch_opt(self, op) def emit_operation(self, op): - if op.returns_bool_result(): + if rop.returns_bool_result(op.opnum): self.getintbound(op).make_bool() self._emit_operation(op) op = self.get_box_replacement(op) @@ -561,8 +562,8 @@ if op.is_constant(): return # can happen e.g. if we postpone the operation that becomes # constant - # XXX kill - op = self.replace_op_with(op, op.getopnum()) + # XXX kill, requires thinking + op = self.replace_op_with(op, op.opnum) for i in range(op.numargs()): arg = self.force_box(op.getarg(i)) op.setarg(i, arg) @@ -580,8 +581,10 @@ op = self.emit_guard_operation(op, pendingfields) elif op.can_raise(): self.exception_might_have_happened = True - if ((op.has_no_side_effect() or op.is_guard() or op.is_jit_debug() or - op.is_ovf()) and not self.is_call_pure_pure_canraise(op)): + opnum = op.opnum + if ((rop.has_no_side_effect(opnum) or rop.is_guard(opnum) or + rop.is_jit_debug(opnum) or + rop.is_ovf(opnum)) and not self.is_call_pure_pure_canraise(op)): pass else: self._last_guard_op = None diff --git a/rpython/jit/metainterp/optimizeopt/pure.py b/rpython/jit/metainterp/optimizeopt/pure.py --- a/rpython/jit/metainterp/optimizeopt/pure.py +++ b/rpython/jit/metainterp/optimizeopt/pure.py @@ -75,8 +75,8 @@ dispatch_opt(self, op) def optimize_default(self, op): - canfold = op.is_always_pure() - if op.is_ovf(): + canfold = rop.is_always_pure(op.opnum) + if rop.is_ovf(op.opnum): self.postponed_op = op return if self.postponed_op: @@ -110,7 +110,7 @@ # otherwise, the operation remains self.emit_operation(op) - if op.returns_bool_result(): + if rop.returns_bool_result(op.opnum): self.getintbound(op).make_bool() if save: recentops = self.getrecentops(op.getopnum()) @@ -221,9 +221,9 @@ def produce_potential_short_preamble_ops(self, sb): ops = self.optimizer._newoperations for i, op in enumerate(ops): - if op.is_always_pure(): + if rop.is_always_pure(op.opnum): sb.add_pure_op(op) - if op.is_ovf() and ops[i + 1].getopnum() == rop.GUARD_NO_OVERFLOW: + if rop.is_ovf(op.opnum) and ops[i + 1].getopnum() == rop.GUARD_NO_OVERFLOW: sb.add_pure_op(op) for i in self.call_pure_positions: op = ops[i] @@ -232,7 +232,7 @@ effectinfo = op.getdescr().get_extra_info() if not effectinfo.check_can_raise(ignore_memoryerror=True): - assert op.is_call() + assert rop.is_call(op.opnum) sb.add_pure_op(op) dispatch_opt = make_dispatcher_method(OptPure, 'optimize_', diff --git a/rpython/jit/metainterp/optimizeopt/shortpreamble.py b/rpython/jit/metainterp/optimizeopt/shortpreamble.py --- a/rpython/jit/metainterp/optimizeopt/shortpreamble.py +++ b/rpython/jit/metainterp/optimizeopt/shortpreamble.py @@ -72,7 +72,7 @@ pop = PreambleOp(self.res, preamble_op, invented_name) assert not opinfo.is_virtual() descr = self.getfield_op.getdescr() - if g.is_getfield(): + if rop.is_getfield(g.opnum): cf = optheap.field_cache(descr) opinfo.setfield(preamble_op.getdescr(), self.res, pop, optheap, cf) @@ -92,7 +92,7 @@ preamble_arg = sb.produce_arg(sop.getarg(0)) if preamble_arg is None: return None - if sop.is_getfield(): + if rop.is_getfield(sop.opnum): preamble_op = ResOperation(sop.getopnum(), [preamble_arg], descr=sop.getdescr()) else: @@ -117,7 +117,7 @@ op.set_forwarded(self.res) else: op = self.res - if preamble_op.is_call(): + if rop.is_call(preamble_op.opnum): optpure.extra_call_pure.append(PreambleOp(op, preamble_op, invented_name)) else: @@ -132,7 +132,7 @@ if newarg is None: return None arglist.append(newarg) - if op.is_call(): + if rop.is_call(op.opnum): opnum = OpHelpers.call_pure_for_descr(op.getdescr()) else: opnum = op.getopnum() diff --git a/rpython/jit/metainterp/optimizeopt/unroll.py b/rpython/jit/metainterp/optimizeopt/unroll.py --- a/rpython/jit/metainterp/optimizeopt/unroll.py +++ b/rpython/jit/metainterp/optimizeopt/unroll.py @@ -391,7 +391,7 @@ for box in self._map_args(mapping, short_jump_args)] def _expand_info(self, arg, infos): - if isinstance(arg, AbstractResOp) and arg.is_same_as(): + if isinstance(arg, AbstractResOp) and rop.is_same_as(arg.opnum): info = self.optimizer.getinfo(arg.getarg(0)) else: info = self.optimizer.getinfo(arg) diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -205,7 +205,7 @@ type = vecinfo.datatype signed = vecinfo.signed bytesize = vecinfo.bytesize - if op.returns_bool_result(): + if rop.returns_bool_result(op.opnum): type = 'i' self.setinfo(type, bytesize, signed) @@ -402,58 +402,10 @@ def is_foldable_guard(self): return rop.is_foldable_guard(self.getopnun()) - def is_guard_exception(self): - return rop.is_guard_ - return (self.getopnum() == rop.GUARD_EXCEPTION or - self.getopnum() == rop.GUARD_NO_EXCEPTION) - - def is_guard_overflow(self): - return (self.getopnum() == rop.GUARD_OVERFLOW or - self.getopnum() == rop.GUARD_NO_OVERFLOW) - - def is_jit_debug(self): - return rop._JIT_DEBUG_FIRST <= self.getopnum() <= rop._JIT_DEBUG_LAST - - def is_always_pure(self): - # Tells whether an operation is pure based solely on the opcode. - # Other operations (e.g. getfield ops) may be pure in some cases are well. - return rop._ALWAYS_PURE_FIRST <= self.getopnum() <= rop._ALWAYS_PURE_LAST - - def has_no_side_effect(self): - return rop._NOSIDEEFFECT_FIRST <= self.getopnum() <= rop._NOSIDEEFFECT_LAST - - def is_malloc(self): - # a slightly different meaning from can_malloc - return rop._MALLOC_FIRST <= self.getopnum() <= rop._MALLOC_LAST - - def can_malloc(self): - return self.is_call() or self.is_malloc() - - def is_call(self): - return rop._CALL_FIRST <= self.getopnum() <= rop._CALL_LAST - - def is_same_as(self): - return self.opnum in (rop.SAME_AS_I, rop.SAME_AS_F, rop.SAME_AS_R) - - def is_getfield(self): - return self.opnum in (rop.GETFIELD_GC_I, rop.GETFIELD_GC_F, rop.GETFIELD_GC_R) - - def is_getarrayitem(self): - return self.opnum in (rop.GETARRAYITEM_GC_I, rop.GETARRAYITEM_GC_F, - rop.GETARRAYITEM_GC_R, rop.GETARRAYITEM_GC_PURE_I, - rop.GETARRAYITEM_GC_PURE_F, - rop.GETARRAYITEM_GC_PURE_R) - - def is_vector_arithmetic(self): - return rop._VEC_ARITHMETIC_FIRST <= self.getopnum() <= rop._VEC_ARITHMETIC_LAST - - def is_raw_array_access(self): - return self.is_raw_load() or self.is_raw_store() - def is_primitive_array_access(self): """ Indicates that this operations loads/stores a primitive type (int,float) """ - if self.is_primitive_load() or self.is_primitive_store(): + if rop.is_primitive_load(self.opnum) or rop.is_primitive_store(self.opnum): descr = self.getdescr() if not we_are_translated(): from rpython.jit.backend.llgraph.runner import _getdescr @@ -462,24 +414,6 @@ return True return False - def is_primitive_load(self): - return rop._RAW_LOAD_FIRST < self.getopnum() < rop._RAW_LOAD_LAST - - def is_primitive_store(self): - return rop._RAW_STORE_FIRST < self.getopnum() < rop._RAW_STORE_LAST - - def is_final(self): - return rop._FINAL_FIRST <= self.getopnum() <= rop._FINAL_LAST - - def returns_bool_result(self): - return self._cls_has_bool_result - - #def forget_value(self): -- in the base class, AbstractResOpOrInputArg - # pass - - def is_label(self): - return self.getopnum() == rop.LABEL - def is_vector(self): return False @@ -1442,27 +1376,30 @@ def can_raise(opnum): return rop._CANRAISE_FIRST <= opnum <= rop._CANRAISE_LAST - def is_malloc(self): + @staticmethod + def is_malloc(opnum): # a slightly different meaning from can_malloc - return rop._MALLOC_FIRST <= self.getopnum() <= rop._MALLOC_LAST + return rop._MALLOC_FIRST <= opnum <= rop._MALLOC_LAST - def can_malloc(self): - return self.is_call() or self.is_malloc() + @staticmethod + def can_malloc(opnum): + return rop.is_call(opnum) or rop.is_malloc(opnum) @staticmethod def is_same_as(opnum): return opnum in (rop.SAME_AS_I, rop.SAME_AS_F, rop.SAME_AS_R) - def is_getfield(self): - return self.opnum in (rop.GETFIELD_GC_I, rop.GETFIELD_GC_F, - rop.GETFIELD_GC_R, rop.GETFIELD_GC_PURE_I, - rop.GETFIELD_GC_PURE_R, rop.GETFIELD_GC_PURE_F) + @staticmethod + def is_getfield(opnum): + return opnum in (rop.GETFIELD_GC_I, rop.GETFIELD_GC_F, + rop.GETFIELD_GC_R) - def is_getarrayitem(self): - return self.opnum in (rop.GETARRAYITEM_GC_I, rop.GETARRAYITEM_GC_F, - rop.GETARRAYITEM_GC_R, rop.GETARRAYITEM_GC_PURE_I, - rop.GETARRAYITEM_GC_PURE_F, - rop.GETARRAYITEM_GC_PURE_R) + @staticmethod + def is_getarrayitem(opnum): + return opnum in (rop.GETARRAYITEM_GC_I, rop.GETARRAYITEM_GC_F, + rop.GETARRAYITEM_GC_R, rop.GETARRAYITEM_GC_PURE_I, + rop.GETARRAYITEM_GC_PURE_F, + rop.GETARRAYITEM_GC_PURE_R) @staticmethod def is_real_call(opnum): @@ -1503,42 +1440,33 @@ def is_ovf(opnum): return rop._OVF_FIRST <= opnum <= rop._OVF_LAST - def is_vector_arithmetic(self): - return rop._VEC_ARITHMETIC_FIRST <= self.getopnum() <= rop._VEC_ARITHMETIC_LAST + @staticmethod + def is_vector_arithmetic(opnum): + return rop._VEC_ARITHMETIC_FIRST <= opnum <= rop._VEC_ARITHMETIC_LAST - def is_raw_array_access(self): - return self.is_raw_load() or self.is_raw_store() + @staticmethod + def is_raw_array_access(opnum): + return rop.is_raw_load(opnum) or rop.is_raw_store(opnum) - def is_primitive_array_access(self): - """ Indicates that this operations loads/stores a - primitive type (int,float) """ - if self.is_primitive_load() or self.is_primitive_store(): - descr = self.getdescr() - if not we_are_translated(): - from rpython.jit.backend.llgraph.runner import _getdescr - descr = _getdescr(self) - if descr and descr.is_array_of_primitives(): - return True - return False + @staticmethod + def is_primitive_load(opnum): + return rop._RAW_LOAD_FIRST < opnum < rop._RAW_LOAD_LAST - def is_primitive_load(self): - return rop._RAW_LOAD_FIRST < self.getopnum() < rop._RAW_LOAD_LAST + @staticmethod + def is_primitive_store(opnum): + return rop._RAW_STORE_FIRST < opnum < rop._RAW_STORE_LAST - def is_primitive_store(self): - return rop._RAW_STORE_FIRST < self.getopnum() < rop._RAW_STORE_LAST - - def is_final(self): - return rop._FINAL_FIRST <= self.getopnum() <= rop._FINAL_LAST + @staticmethod + def is_final(opnum): + return rop._FINAL_FIRST <= opnum <= rop._FINAL_LAST @staticmethod def returns_bool_result(opnum): return opclasses[opnum]._cls_has_bool_result - #def forget_value(self): -- in the base class, AbstractResOpOrInputArg - # pass - - def is_label(self): - return self.getopnum() == rop.LABEL + @staticmethod + def is_label(opnum): + return opnum == rop.LABEL @staticmethod def is_call(opnum): From pypy.commits at gmail.com Tue Mar 22 14:22:55 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 22 Mar 2016 11:22:55 -0700 (PDT) Subject: [pypy-commit] pypy jit-leaner-frontend: Fix: setint() doesn't exist and is not needed any more Message-ID: <56f18d7f.83561c0a.a74c2.6434@mx.google.com> Author: Armin Rigo Branch: jit-leaner-frontend Changeset: r83265:615d42fe6737 Date: 2016-03-22 19:11 +0100 http://bitbucket.org/pypy/pypy/changeset/615d42fe6737/ Log: Fix: setint() doesn't exist and is not needed any more diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py +++ b/rpython/jit/backend/llsupport/rewrite.py @@ -391,7 +391,6 @@ # it's hard to test all cases). Rewrite it away. value = int(opnum == rop.GUARD_FALSE) op1 = ResOperation(rop.SAME_AS_I, [ConstInt(value)]) - op1.setint(value) self.emit_op(op1) lst = op.getfailargs()[:] lst[i] = op1 From pypy.commits at gmail.com Tue Mar 22 14:22:56 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 22 Mar 2016 11:22:56 -0700 (PDT) Subject: [pypy-commit] pypy jit-leaner-frontend: Fixes for test_random Message-ID: <56f18d80.654fc20a.991cb.31f1@mx.google.com> Author: Armin Rigo Branch: jit-leaner-frontend Changeset: r83266:fc986a17549c Date: 2016-03-22 19:13 +0100 http://bitbucket.org/pypy/pypy/changeset/fc986a17549c/ Log: Fixes for test_random diff --git a/rpython/jit/backend/test/test_ll_random.py b/rpython/jit/backend/test/test_ll_random.py --- a/rpython/jit/backend/test/test_ll_random.py +++ b/rpython/jit/backend/test/test_ll_random.py @@ -2,6 +2,7 @@ from rpython.rtyper.lltypesystem import lltype, llmemory, rffi, rstr from rpython.rtyper import rclass from rpython.jit.backend.test import test_random +from rpython.jit.backend.test.test_random import getint, getref_base, getref from rpython.jit.metainterp.resoperation import ResOperation, rop, optypes from rpython.jit.metainterp.history import ConstInt, ConstPtr, getkind from rpython.jit.codewriter import heaptracker @@ -169,7 +170,7 @@ if length == 0: raise test_random.CannotProduceOperation v_index = r.choice(self.intvars) - if not (0 <= v_index.getint() < length): + if not (0 <= getint(v_index) < length): v_index = ConstInt(r.random_integer() % length) return v_index @@ -311,7 +312,7 @@ def field_descr(self, builder, r): v, A = builder.get_structptr_var(r, type=lltype.Array, array_of_structs=True) - array = v.getref(lltype.Ptr(A)) + array = getref(lltype.Ptr(A), v) v_index = builder.get_index(len(array), r) choice = [] for name in A.OF._names: @@ -344,7 +345,7 @@ w = ConstInt(r.random_integer()) else: w = r.choice(builder.intvars) - value = w.getint() + value = getint(w) if rffi.cast(lltype.Signed, rffi.cast(TYPE, value)) == value: break builder.do(self.opnum, [v, w], descr) @@ -357,7 +358,7 @@ w = ConstInt(r.random_integer()) else: w = r.choice(builder.intvars) - value = w.getint() + value = getint(w) if rffi.cast(lltype.Signed, rffi.cast(TYPE, value)) == value: break builder.do(self.opnum, [v, v_index, w], descr) @@ -389,7 +390,7 @@ class GetArrayItemOperation(ArrayOperation): def field_descr(self, builder, r): v, A = builder.get_arrayptr_var(r) - array = v.getref(lltype.Ptr(A)) + array = getref(lltype.Ptr(A), v) v_index = builder.get_index(len(array), r) descr = self.array_descr(builder, A) return v, A, v_index, descr @@ -411,7 +412,7 @@ w = ConstInt(r.random_integer()) else: w = r.choice(builder.intvars) - value = w.getint() + value = getint(w) if rffi.cast(lltype.Signed, rffi.cast(A.OF, value)) == value: break builder.do(self.opnum, [v, v_index, w], descr) @@ -455,7 +456,7 @@ v_ptr = builder.do(self.opnum, [v_length]) getattr(builder, self.builder_cache).append(v_ptr) # Initialize the string. Is there a better way to do this? - for i in range(v_length.getint()): + for i in range(getint(v_length)): v_index = ConstInt(i) v_char = ConstInt(r.random_integer() % self.max) builder.do(self.set_char, [v_ptr, v_index, v_char]) @@ -471,9 +472,9 @@ current = getattr(builder, self.builder_cache) if current and r.random() < .8: v_string = r.choice(current) - string = v_string.getref(self.ptr) + string = getref(self.ptr, v_string) else: - string = self.alloc(builder.get_index(500, r).getint()) + string = self.alloc(getint(builder.get_index(500, r))) v_string = ConstPtr(lltype.cast_opaque_ptr(llmemory.GCREF, string)) current.append(v_string) for i in range(len(string.chars)): @@ -484,7 +485,7 @@ class AbstractGetItemOperation(AbstractStringOperation): def produce_into(self, builder, r): v_string = self.get_string(builder, r) - v_index = builder.get_index(len(v_string.getref(self.ptr).chars), r) + v_index = builder.get_index(len(getref(self.ptr, v_string).chars), r) builder.do(self.opnum, [v_string, v_index]) class AbstractSetItemOperation(AbstractStringOperation): @@ -492,7 +493,7 @@ v_string = self.get_string(builder, r) if isinstance(v_string, ConstPtr): raise test_random.CannotProduceOperation # setitem(Const, ...) - v_index = builder.get_index(len(v_string.getref(self.ptr).chars), r) + v_index = builder.get_index(len(getref(self.ptr, v_string).chars), r) v_target = ConstInt(r.random_integer() % self.max) builder.do(self.opnum, [v_string, v_index, v_target]) @@ -505,15 +506,15 @@ def produce_into(self, builder, r): v_srcstring = self.get_string(builder, r) v_dststring = self.get_string(builder, r) - src = v_srcstring.getref(self.ptr) - dst = v_dststring.getref(self.ptr) + src = getref(self.ptr, v_srcstring) + dst = getref(self.ptr, v_dststring) if src == dst: # because it's not a raise test_random.CannotProduceOperation # memmove(), but memcpy() srclen = len(src.chars) dstlen = len(dst.chars) v_length = builder.get_index(min(srclen, dstlen), r) - v_srcstart = builder.get_index(srclen - v_length.getint() + 1, r) - v_dststart = builder.get_index(dstlen - v_length.getint() + 1, r) + v_srcstart = builder.get_index(srclen - getint(v_length) + 1, r) + v_dststart = builder.get_index(dstlen - getint(v_length) + 1, r) builder.do(self.opnum, [v_srcstring, v_dststring, v_srcstart, v_dststart, v_length]) @@ -585,7 +586,7 @@ """ % funcargs).compile() vtableptr = v._hints['vtable']._as_ptr() d = { - 'ptr': S.getref_base(), + 'ptr': getref_base(S), 'vtable' : vtableptr, 'LLException' : LLException, } diff --git a/rpython/jit/backend/test/test_random.py b/rpython/jit/backend/test/test_random.py --- a/rpython/jit/backend/test/test_random.py +++ b/rpython/jit/backend/test/test_random.py @@ -11,11 +11,9 @@ from rpython.jit.metainterp.executor import _execute_arglist, wrap_constant from rpython.jit.metainterp.resoperation import opname from rpython.jit.codewriter import longlong -from rpython.rtyper.lltypesystem import lltype, rstr +from rpython.rtyper.lltypesystem import lltype, llmemory, rstr from rpython.rtyper import rclass -class PleaseRewriteMe(Exception): - pass class DummyLoop(object): def __init__(self, subops): @@ -27,6 +25,41 @@ def execute_raised(self, exc, constant=False): self._got_exc = exc + +def getint(v): + if isinstance(v, (ConstInt, InputArgInt)): + return v.getint() + else: + return v._example_int + +def getfloatstorage(v): + if isinstance(v, (ConstFloat, InputArgFloat)): + return v.getfloatstorage() + else: + return v._example_float + +def getfloat(v): + return longlong.getrealfloat(getfloatstorage(v)) + +def getref_base(v): + if isinstance(v, (ConstPtr, InputArgRef)): + return v.getref_base() + else: + return v._example_ref + +def getref(PTR, v): + return lltype.cast_opaque_ptr(PTR, getref_base(v)) + +def constbox(v): + if v.type == INT: + return ConstInt(getint(v)) + if v.type == FLOAT: + return ConstFloat(getfloatstorage(v)) + if v.type == REF: + return ConstPtr(getref_base(v)) + assert 0, v.type + + class OperationBuilder(object): def __init__(self, cpu, loop, vars): self.cpu = cpu @@ -57,11 +90,21 @@ def do(self, opnum, argboxes, descr=None): self.fakemetainterp._got_exc = None op = ResOperation(opnum, argboxes, descr) + argboxes = map(constbox, argboxes) result = _execute_arglist(self.cpu, self.fakemetainterp, opnum, argboxes, descr) if result is not None: - c_result = wrap_constant(result) - op.copy_value_from(c_result) + if lltype.typeOf(result) == lltype.Signed: + op._example_int = result + elif isinstance(result, bool): + op._example_int = int(result) + elif lltype.typeOf(result) == longlong.FLOATSTORAGE: + op._example_float = result + elif isinstance(result, float): + op._example_float = longlong.getfloatstorage(result) + else: + assert lltype.typeOf(result) == llmemory.GCREF + op._example_ref = result self.loop.operations.append(op) return op @@ -101,7 +144,7 @@ if v in names: args.append(names[v]) elif isinstance(v, ConstPtr): - assert not v.getref_base() # otherwise should be in the names + assert not getref_base(v) # otherwise should be in the names args.append('ConstPtr(lltype.nullptr(llmemory.GCREF.TO))') elif isinstance(v, ConstFloat): args.append('ConstFloat(longlong.getfloatstorage(%r))' @@ -198,10 +241,10 @@ # def writevar(v, nameprefix, init=''): if nameprefix == 'const_ptr': - if not v.getref_base(): + if not getref_base(v): return 'lltype.nullptr(llmemory.GCREF.TO)' - TYPE = v.getref_base()._obj.ORIGTYPE - cont = lltype.cast_opaque_ptr(TYPE, v.getref_base()) + TYPE = getref_base(v)._obj.ORIGTYPE + cont = lltype.cast_opaque_ptr(TYPE, getref_base(v)) if TYPE.TO._is_varsize(): if isinstance(TYPE.TO, lltype.GcStruct): lgt = len(cont.chars) @@ -252,9 +295,9 @@ for i, v in enumerate(self.loop.inputargs): assert not isinstance(v, Const) if v.type == FLOAT: - vals.append("longlong.getfloatstorage(%r)" % v.getfloat()) + vals.append("longlong.getfloatstorage(%r)" % getfloat(v)) else: - vals.append("%r" % v.getint()) + vals.append("%r" % getint(v)) print >>s, ' loop_args = [%s]' % ", ".join(vals) print >>s, ' frame = cpu.execute_token(looptoken, *loop_args)' if self.should_fail_by is None: @@ -264,10 +307,10 @@ for i, v in enumerate(fail_args): if v.type == FLOAT: print >>s, (' assert longlong.getrealfloat(' - 'cpu.get_float_value(frame, %d)) == %r' % (i, v.getfloatstorage())) + 'cpu.get_float_value(frame, %d)) == %r' % (i, getfloatstorage(v))) else: print >>s, (' assert cpu.get_int_value(frame, %d) == %d' - % (i, v.getint())) + % (i, getint(v))) self.names = names s.flush() @@ -295,7 +338,7 @@ builder.intvars.append(v_result) boolres = self.boolres if boolres == 'sometimes': - boolres = v_result.getint() in [0, 1] + boolres = getint(v_result) in [0, 1] if boolres: builder.boolvars.append(v_result) elif v_result.type == FLOAT: @@ -346,10 +389,10 @@ v_second = ConstInt((value & self.and_mask) | self.or_mask) else: v = r.choice(builder.intvars) - v_value = v.getint() + v_value = getint(v) if (v_value & self.and_mask) != v_value: v = builder.do(rop.INT_AND, [v, ConstInt(self.and_mask)]) - v_value = v.getint() + v_value = getint(v) if (v_value | self.or_mask) != v_value: v = builder.do(rop.INT_OR, [v, ConstInt(self.or_mask)]) v_second = v @@ -395,9 +438,9 @@ v_second = ConstFloat(r.random_float_storage()) else: v_second = r.choice(builder.floatvars) - if abs(v_first.getfloat()) > 1E100 or abs(v_second.getfloat()) > 1E100: + if abs(getfloat(v_first)) > 1E100 or abs(getfloat(v_second)) > 1E100: raise CannotProduceOperation # avoid infinities - if abs(v_second.getfloat()) < 1E-100: + if abs(getfloat(v_second)) < 1E-100: raise CannotProduceOperation # e.g. division by zero error self.put(builder, [v_first, v_second]) @@ -432,7 +475,7 @@ if not builder.floatvars: raise CannotProduceOperation box = r.choice(builder.floatvars) - if not (-sys.maxint-1 <= box.getfloat() <= sys.maxint): + if not (-sys.maxint-1 <= getfloat(box) <= sys.maxint): raise CannotProduceOperation # would give an overflow self.put(builder, [box]) @@ -440,8 +483,8 @@ def gen_guard(self, builder, r): v = builder.get_bool_var(r) op = ResOperation(self.opnum, [v]) - passing = ((self.opnum == rop.GUARD_TRUE and v.getint()) or - (self.opnum == rop.GUARD_FALSE and not v.getint())) + passing = ((self.opnum == rop.GUARD_TRUE and getint(v)) or + (self.opnum == rop.GUARD_FALSE and not getint(v))) return op, passing def produce_into(self, builder, r): @@ -459,8 +502,8 @@ raise CannotProduceOperation box = r.choice(builder.ptrvars)[0] op = ResOperation(self.opnum, [box]) - passing = ((self.opnum == rop.GUARD_NONNULL and box.getref_base()) or - (self.opnum == rop.GUARD_ISNULL and not box.getref_base())) + passing = ((self.opnum == rop.GUARD_NONNULL and getref_base(box)) or + (self.opnum == rop.GUARD_ISNULL and not getref_base(box))) return op, passing class GuardValueOperation(GuardOperation): @@ -470,14 +513,14 @@ other = r.choice(builder.intvars) else: if r.random() < 0.75: - value = v.getint() + value = getint(v) elif r.random() < 0.5: - value = v.getint() ^ 1 + value = getint(v) ^ 1 else: value = r.random_integer() other = ConstInt(value) op = ResOperation(self.opnum, [v, other]) - return op, (v.getint() == other.getint()) + return op, (getint(v) == getint(other)) # ____________________________________________________________ @@ -728,9 +771,9 @@ self.expected = {} for v in endvars: if v.type == INT: - self.expected[v] = v.getint() + self.expected[v] = getint(v) elif v.type == FLOAT: - self.expected[v] = v.getfloatstorage() + self.expected[v] = getfloatstorage(v) else: assert 0, v.type @@ -742,7 +785,7 @@ args = [] for box in self.startvars: if box not in self.loop.inputargs: - box = box.constbox() + box = constbox(box) args.append(box) self.cpu.compile_loop(self.loop.inputargs, [ResOperation(rop.JUMP, args, @@ -760,7 +803,7 @@ def clear_state(self): for v, S, fields in self.prebuilt_ptr_consts: - container = v.getref_base()._obj.container + container = getref_base(v)._obj.container for name, value in fields.items(): if isinstance(name, str): setattr(container, name, value) @@ -781,9 +824,9 @@ arguments = [] for box in self.loop.inputargs: if box.type == INT: - arguments.append(box.getint()) + arguments.append(getint(box)) elif box.type == FLOAT: - arguments.append(box.getfloatstorage()) + arguments.append(getfloatstorage(box)) else: assert 0, box.type deadframe = cpu.execute_token(self.runjitcelltoken(), *arguments) @@ -795,7 +838,7 @@ if v not in self.expected: assert v.getopnum() == rop.SAME_AS_I # special case assert isinstance(v.getarg(0), ConstInt) - self.expected[v] = v.getarg(0).getint() + self.expected[v] = getint(v.getarg(0)) if v.type == FLOAT: value = cpu.get_float_value(deadframe, i) else: @@ -807,7 +850,7 @@ ) exc = cpu.grab_exc_value(deadframe) if (self.guard_op is not None and - self.guard_op.is_guard_exception()): + rop.is_guard_exception(self.guard_op.getopnum())): if self.guard_op.getopnum() == rop.GUARD_NO_EXCEPTION: do_assert(exc, "grab_exc_value() should not be %r" % (exc,)) @@ -840,7 +883,7 @@ # generate the branch: a sequence of operations that ends in a FINISH subloop = DummyLoop([]) self.subloops.append(subloop) # keep around for debugging - if guard_op.is_guard_exception(): + if rop.is_guard_exception(guard_op.getopnum()): subloop.operations.append(exc_handling(guard_op)) bridge_builder = self.builder.fork(self.builder.cpu, subloop, op.getfailargs()[:]) @@ -876,9 +919,9 @@ args = [] for x in subset: if x.type == INT: - args.append(InputArgInt(x.getint())) + args.append(InputArgInt(getint(x))) elif x.type == FLOAT: - args.append(InputArgFloat(x.getfloatstorage())) + args.append(InputArgFloat(getfloatstorage(x))) else: assert 0, x.type rl = RandomLoop(self.builder.cpu, self.builder.fork, diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -402,15 +402,6 @@ def is_foldable_guard(self): return rop.is_foldable_guard(self.getopnun()) - def is_guard_exception(self): - return rop.is_guard_ - return (self.getopnum() == rop.GUARD_EXCEPTION or - self.getopnum() == rop.GUARD_NO_EXCEPTION) - - def is_guard_overflow(self): - return (self.getopnum() == rop.GUARD_OVERFLOW or - self.getopnum() == rop.GUARD_NO_OVERFLOW) - def is_jit_debug(self): return rop._JIT_DEBUG_FIRST <= self.getopnum() <= rop._JIT_DEBUG_LAST From pypy.commits at gmail.com Tue Mar 22 14:22:58 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 22 Mar 2016 11:22:58 -0700 (PDT) Subject: [pypy-commit] pypy jit-leaner-frontend: merge heads Message-ID: <56f18d82.a2afc20a.a24d3.2cf0@mx.google.com> Author: Armin Rigo Branch: jit-leaner-frontend Changeset: r83267:b7002d40b823 Date: 2016-03-22 19:21 +0100 http://bitbucket.org/pypy/pypy/changeset/b7002d40b823/ Log: merge heads diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py --- a/rpython/jit/metainterp/optimizeopt/heap.py +++ b/rpython/jit/metainterp/optimizeopt/heap.py @@ -347,7 +347,7 @@ opnum == rop.COPYSTRCONTENT or # no effect on GC struct/array opnum == rop.COPYUNICODECONTENT): # no effect on GC struct/array return - if op.is_call(): + if rop.is_call(op.opnum): if rop.is_call_assembler(op.getopnum()): self._seen_guard_not_invalidated = False else: diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -464,8 +464,9 @@ else: last_guard_pos = -1 assert opinfo is None or opinfo.__class__ is info.NonNullPtrInfo - if (op.is_getfield() or op.getopnum() == rop.SETFIELD_GC or - op.getopnum() == rop.QUASIIMMUT_FIELD): + opnum = op.opnum + if (rop.is_getfield(opnum) or opnum == rop.SETFIELD_GC or + opnum == rop.QUASIIMMUT_FIELD): descr = op.getdescr() parent_descr = descr.get_parent_descr() if parent_descr.is_object(): @@ -473,14 +474,14 @@ else: opinfo = info.StructPtrInfo(parent_descr) opinfo.init_fields(parent_descr, descr.get_index()) - elif (op.is_getarrayitem() or op.getopnum() == rop.SETARRAYITEM_GC or - op.getopnum() == rop.ARRAYLEN_GC): + elif (rop.is_getarrayitem(opnum) or opnum == rop.SETARRAYITEM_GC or + opnum == rop.ARRAYLEN_GC): opinfo = info.ArrayPtrInfo(op.getdescr()) - elif op.getopnum() in (rop.GUARD_CLASS, rop.GUARD_NONNULL_CLASS): + elif opnum in (rop.GUARD_CLASS, rop.GUARD_NONNULL_CLASS): opinfo = info.InstancePtrInfo() - elif op.getopnum() in (rop.STRLEN,): + elif opnum in (rop.STRLEN,): opinfo = vstring.StrPtrInfo(vstring.mode_string) - elif op.getopnum() in (rop.UNICODELEN,): + elif opnum in (rop.UNICODELEN,): opinfo = vstring.StrPtrInfo(vstring.mode_unicode) else: assert False, "operations %s unsupported" % op @@ -542,7 +543,7 @@ dispatch_opt(self, op) def emit_operation(self, op): - if op.returns_bool_result(): + if rop.returns_bool_result(op.opnum): self.getintbound(op).make_bool() self._emit_operation(op) op = self.get_box_replacement(op) @@ -561,8 +562,8 @@ if op.is_constant(): return # can happen e.g. if we postpone the operation that becomes # constant - # XXX kill - op = self.replace_op_with(op, op.getopnum()) + # XXX kill, requires thinking + op = self.replace_op_with(op, op.opnum) for i in range(op.numargs()): arg = self.force_box(op.getarg(i)) op.setarg(i, arg) @@ -580,8 +581,10 @@ op = self.emit_guard_operation(op, pendingfields) elif op.can_raise(): self.exception_might_have_happened = True - if ((op.has_no_side_effect() or op.is_guard() or op.is_jit_debug() or - op.is_ovf()) and not self.is_call_pure_pure_canraise(op)): + opnum = op.opnum + if ((rop.has_no_side_effect(opnum) or rop.is_guard(opnum) or + rop.is_jit_debug(opnum) or + rop.is_ovf(opnum)) and not self.is_call_pure_pure_canraise(op)): pass else: self._last_guard_op = None diff --git a/rpython/jit/metainterp/optimizeopt/pure.py b/rpython/jit/metainterp/optimizeopt/pure.py --- a/rpython/jit/metainterp/optimizeopt/pure.py +++ b/rpython/jit/metainterp/optimizeopt/pure.py @@ -75,8 +75,8 @@ dispatch_opt(self, op) def optimize_default(self, op): - canfold = op.is_always_pure() - if op.is_ovf(): + canfold = rop.is_always_pure(op.opnum) + if rop.is_ovf(op.opnum): self.postponed_op = op return if self.postponed_op: @@ -110,7 +110,7 @@ # otherwise, the operation remains self.emit_operation(op) - if op.returns_bool_result(): + if rop.returns_bool_result(op.opnum): self.getintbound(op).make_bool() if save: recentops = self.getrecentops(op.getopnum()) @@ -221,9 +221,9 @@ def produce_potential_short_preamble_ops(self, sb): ops = self.optimizer._newoperations for i, op in enumerate(ops): - if op.is_always_pure(): + if rop.is_always_pure(op.opnum): sb.add_pure_op(op) - if op.is_ovf() and ops[i + 1].getopnum() == rop.GUARD_NO_OVERFLOW: + if rop.is_ovf(op.opnum) and ops[i + 1].getopnum() == rop.GUARD_NO_OVERFLOW: sb.add_pure_op(op) for i in self.call_pure_positions: op = ops[i] @@ -232,7 +232,7 @@ effectinfo = op.getdescr().get_extra_info() if not effectinfo.check_can_raise(ignore_memoryerror=True): - assert op.is_call() + assert rop.is_call(op.opnum) sb.add_pure_op(op) dispatch_opt = make_dispatcher_method(OptPure, 'optimize_', diff --git a/rpython/jit/metainterp/optimizeopt/shortpreamble.py b/rpython/jit/metainterp/optimizeopt/shortpreamble.py --- a/rpython/jit/metainterp/optimizeopt/shortpreamble.py +++ b/rpython/jit/metainterp/optimizeopt/shortpreamble.py @@ -72,7 +72,7 @@ pop = PreambleOp(self.res, preamble_op, invented_name) assert not opinfo.is_virtual() descr = self.getfield_op.getdescr() - if g.is_getfield(): + if rop.is_getfield(g.opnum): cf = optheap.field_cache(descr) opinfo.setfield(preamble_op.getdescr(), self.res, pop, optheap, cf) @@ -92,7 +92,7 @@ preamble_arg = sb.produce_arg(sop.getarg(0)) if preamble_arg is None: return None - if sop.is_getfield(): + if rop.is_getfield(sop.opnum): preamble_op = ResOperation(sop.getopnum(), [preamble_arg], descr=sop.getdescr()) else: @@ -117,7 +117,7 @@ op.set_forwarded(self.res) else: op = self.res - if preamble_op.is_call(): + if rop.is_call(preamble_op.opnum): optpure.extra_call_pure.append(PreambleOp(op, preamble_op, invented_name)) else: @@ -132,7 +132,7 @@ if newarg is None: return None arglist.append(newarg) - if op.is_call(): + if rop.is_call(op.opnum): opnum = OpHelpers.call_pure_for_descr(op.getdescr()) else: opnum = op.getopnum() diff --git a/rpython/jit/metainterp/optimizeopt/unroll.py b/rpython/jit/metainterp/optimizeopt/unroll.py --- a/rpython/jit/metainterp/optimizeopt/unroll.py +++ b/rpython/jit/metainterp/optimizeopt/unroll.py @@ -391,7 +391,7 @@ for box in self._map_args(mapping, short_jump_args)] def _expand_info(self, arg, infos): - if isinstance(arg, AbstractResOp) and arg.is_same_as(): + if isinstance(arg, AbstractResOp) and rop.is_same_as(arg.opnum): info = self.optimizer.getinfo(arg.getarg(0)) else: info = self.optimizer.getinfo(arg) diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -205,7 +205,7 @@ type = vecinfo.datatype signed = vecinfo.signed bytesize = vecinfo.bytesize - if op.returns_bool_result(): + if rop.returns_bool_result(op.opnum): type = 'i' self.setinfo(type, bytesize, signed) @@ -402,49 +402,10 @@ def is_foldable_guard(self): return rop.is_foldable_guard(self.getopnun()) - def is_jit_debug(self): - return rop._JIT_DEBUG_FIRST <= self.getopnum() <= rop._JIT_DEBUG_LAST - - def is_always_pure(self): - # Tells whether an operation is pure based solely on the opcode. - # Other operations (e.g. getfield ops) may be pure in some cases are well. - return rop._ALWAYS_PURE_FIRST <= self.getopnum() <= rop._ALWAYS_PURE_LAST - - def has_no_side_effect(self): - return rop._NOSIDEEFFECT_FIRST <= self.getopnum() <= rop._NOSIDEEFFECT_LAST - - def is_malloc(self): - # a slightly different meaning from can_malloc - return rop._MALLOC_FIRST <= self.getopnum() <= rop._MALLOC_LAST - - def can_malloc(self): - return self.is_call() or self.is_malloc() - - def is_call(self): - return rop._CALL_FIRST <= self.getopnum() <= rop._CALL_LAST - - def is_same_as(self): - return self.opnum in (rop.SAME_AS_I, rop.SAME_AS_F, rop.SAME_AS_R) - - def is_getfield(self): - return self.opnum in (rop.GETFIELD_GC_I, rop.GETFIELD_GC_F, rop.GETFIELD_GC_R) - - def is_getarrayitem(self): - return self.opnum in (rop.GETARRAYITEM_GC_I, rop.GETARRAYITEM_GC_F, - rop.GETARRAYITEM_GC_R, rop.GETARRAYITEM_GC_PURE_I, - rop.GETARRAYITEM_GC_PURE_F, - rop.GETARRAYITEM_GC_PURE_R) - - def is_vector_arithmetic(self): - return rop._VEC_ARITHMETIC_FIRST <= self.getopnum() <= rop._VEC_ARITHMETIC_LAST - - def is_raw_array_access(self): - return self.is_raw_load() or self.is_raw_store() - def is_primitive_array_access(self): """ Indicates that this operations loads/stores a primitive type (int,float) """ - if self.is_primitive_load() or self.is_primitive_store(): + if rop.is_primitive_load(self.opnum) or rop.is_primitive_store(self.opnum): descr = self.getdescr() if not we_are_translated(): from rpython.jit.backend.llgraph.runner import _getdescr @@ -453,24 +414,6 @@ return True return False - def is_primitive_load(self): - return rop._RAW_LOAD_FIRST < self.getopnum() < rop._RAW_LOAD_LAST - - def is_primitive_store(self): - return rop._RAW_STORE_FIRST < self.getopnum() < rop._RAW_STORE_LAST - - def is_final(self): - return rop._FINAL_FIRST <= self.getopnum() <= rop._FINAL_LAST - - def returns_bool_result(self): - return self._cls_has_bool_result - - #def forget_value(self): -- in the base class, AbstractResOpOrInputArg - # pass - - def is_label(self): - return self.getopnum() == rop.LABEL - def is_vector(self): return False @@ -1433,27 +1376,30 @@ def can_raise(opnum): return rop._CANRAISE_FIRST <= opnum <= rop._CANRAISE_LAST - def is_malloc(self): + @staticmethod + def is_malloc(opnum): # a slightly different meaning from can_malloc - return rop._MALLOC_FIRST <= self.getopnum() <= rop._MALLOC_LAST + return rop._MALLOC_FIRST <= opnum <= rop._MALLOC_LAST - def can_malloc(self): - return self.is_call() or self.is_malloc() + @staticmethod + def can_malloc(opnum): + return rop.is_call(opnum) or rop.is_malloc(opnum) @staticmethod def is_same_as(opnum): return opnum in (rop.SAME_AS_I, rop.SAME_AS_F, rop.SAME_AS_R) - def is_getfield(self): - return self.opnum in (rop.GETFIELD_GC_I, rop.GETFIELD_GC_F, - rop.GETFIELD_GC_R, rop.GETFIELD_GC_PURE_I, - rop.GETFIELD_GC_PURE_R, rop.GETFIELD_GC_PURE_F) + @staticmethod + def is_getfield(opnum): + return opnum in (rop.GETFIELD_GC_I, rop.GETFIELD_GC_F, + rop.GETFIELD_GC_R) - def is_getarrayitem(self): - return self.opnum in (rop.GETARRAYITEM_GC_I, rop.GETARRAYITEM_GC_F, - rop.GETARRAYITEM_GC_R, rop.GETARRAYITEM_GC_PURE_I, - rop.GETARRAYITEM_GC_PURE_F, - rop.GETARRAYITEM_GC_PURE_R) + @staticmethod + def is_getarrayitem(opnum): + return opnum in (rop.GETARRAYITEM_GC_I, rop.GETARRAYITEM_GC_F, + rop.GETARRAYITEM_GC_R, rop.GETARRAYITEM_GC_PURE_I, + rop.GETARRAYITEM_GC_PURE_F, + rop.GETARRAYITEM_GC_PURE_R) @staticmethod def is_real_call(opnum): @@ -1494,42 +1440,33 @@ def is_ovf(opnum): return rop._OVF_FIRST <= opnum <= rop._OVF_LAST - def is_vector_arithmetic(self): - return rop._VEC_ARITHMETIC_FIRST <= self.getopnum() <= rop._VEC_ARITHMETIC_LAST + @staticmethod + def is_vector_arithmetic(opnum): + return rop._VEC_ARITHMETIC_FIRST <= opnum <= rop._VEC_ARITHMETIC_LAST - def is_raw_array_access(self): - return self.is_raw_load() or self.is_raw_store() + @staticmethod + def is_raw_array_access(opnum): + return rop.is_raw_load(opnum) or rop.is_raw_store(opnum) - def is_primitive_array_access(self): - """ Indicates that this operations loads/stores a - primitive type (int,float) """ - if self.is_primitive_load() or self.is_primitive_store(): - descr = self.getdescr() - if not we_are_translated(): - from rpython.jit.backend.llgraph.runner import _getdescr - descr = _getdescr(self) - if descr and descr.is_array_of_primitives(): - return True - return False + @staticmethod + def is_primitive_load(opnum): + return rop._RAW_LOAD_FIRST < opnum < rop._RAW_LOAD_LAST - def is_primitive_load(self): - return rop._RAW_LOAD_FIRST < self.getopnum() < rop._RAW_LOAD_LAST + @staticmethod + def is_primitive_store(opnum): + return rop._RAW_STORE_FIRST < opnum < rop._RAW_STORE_LAST - def is_primitive_store(self): - return rop._RAW_STORE_FIRST < self.getopnum() < rop._RAW_STORE_LAST - - def is_final(self): - return rop._FINAL_FIRST <= self.getopnum() <= rop._FINAL_LAST + @staticmethod + def is_final(opnum): + return rop._FINAL_FIRST <= opnum <= rop._FINAL_LAST @staticmethod def returns_bool_result(opnum): return opclasses[opnum]._cls_has_bool_result - #def forget_value(self): -- in the base class, AbstractResOpOrInputArg - # pass - - def is_label(self): - return self.getopnum() == rop.LABEL + @staticmethod + def is_label(opnum): + return opnum == rop.LABEL @staticmethod def is_call(opnum): From pypy.commits at gmail.com Tue Mar 22 14:25:59 2016 From: pypy.commits at gmail.com (fijal) Date: Tue, 22 Mar 2016 11:25:59 -0700 (PDT) Subject: [pypy-commit] pypy jit-leaner-frontend: merge Message-ID: <56f18e37.c65b1c0a.394c7.675a@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r83269:2b41cc59ff3b Date: 2016-03-22 20:25 +0200 http://bitbucket.org/pypy/pypy/changeset/2b41cc59ff3b/ Log: merge diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py +++ b/rpython/jit/backend/llsupport/rewrite.py @@ -391,7 +391,6 @@ # it's hard to test all cases). Rewrite it away. value = int(opnum == rop.GUARD_FALSE) op1 = ResOperation(rop.SAME_AS_I, [ConstInt(value)]) - op1.setint(value) self.emit_op(op1) lst = op.getfailargs()[:] lst[i] = op1 diff --git a/rpython/jit/backend/test/test_ll_random.py b/rpython/jit/backend/test/test_ll_random.py --- a/rpython/jit/backend/test/test_ll_random.py +++ b/rpython/jit/backend/test/test_ll_random.py @@ -2,6 +2,7 @@ from rpython.rtyper.lltypesystem import lltype, llmemory, rffi, rstr from rpython.rtyper import rclass from rpython.jit.backend.test import test_random +from rpython.jit.backend.test.test_random import getint, getref_base, getref from rpython.jit.metainterp.resoperation import ResOperation, rop, optypes from rpython.jit.metainterp.history import ConstInt, ConstPtr, getkind from rpython.jit.codewriter import heaptracker @@ -169,7 +170,7 @@ if length == 0: raise test_random.CannotProduceOperation v_index = r.choice(self.intvars) - if not (0 <= v_index.getint() < length): + if not (0 <= getint(v_index) < length): v_index = ConstInt(r.random_integer() % length) return v_index @@ -311,7 +312,7 @@ def field_descr(self, builder, r): v, A = builder.get_structptr_var(r, type=lltype.Array, array_of_structs=True) - array = v.getref(lltype.Ptr(A)) + array = getref(lltype.Ptr(A), v) v_index = builder.get_index(len(array), r) choice = [] for name in A.OF._names: @@ -344,7 +345,7 @@ w = ConstInt(r.random_integer()) else: w = r.choice(builder.intvars) - value = w.getint() + value = getint(w) if rffi.cast(lltype.Signed, rffi.cast(TYPE, value)) == value: break builder.do(self.opnum, [v, w], descr) @@ -357,7 +358,7 @@ w = ConstInt(r.random_integer()) else: w = r.choice(builder.intvars) - value = w.getint() + value = getint(w) if rffi.cast(lltype.Signed, rffi.cast(TYPE, value)) == value: break builder.do(self.opnum, [v, v_index, w], descr) @@ -389,7 +390,7 @@ class GetArrayItemOperation(ArrayOperation): def field_descr(self, builder, r): v, A = builder.get_arrayptr_var(r) - array = v.getref(lltype.Ptr(A)) + array = getref(lltype.Ptr(A), v) v_index = builder.get_index(len(array), r) descr = self.array_descr(builder, A) return v, A, v_index, descr @@ -411,7 +412,7 @@ w = ConstInt(r.random_integer()) else: w = r.choice(builder.intvars) - value = w.getint() + value = getint(w) if rffi.cast(lltype.Signed, rffi.cast(A.OF, value)) == value: break builder.do(self.opnum, [v, v_index, w], descr) @@ -455,7 +456,7 @@ v_ptr = builder.do(self.opnum, [v_length]) getattr(builder, self.builder_cache).append(v_ptr) # Initialize the string. Is there a better way to do this? - for i in range(v_length.getint()): + for i in range(getint(v_length)): v_index = ConstInt(i) v_char = ConstInt(r.random_integer() % self.max) builder.do(self.set_char, [v_ptr, v_index, v_char]) @@ -471,9 +472,9 @@ current = getattr(builder, self.builder_cache) if current and r.random() < .8: v_string = r.choice(current) - string = v_string.getref(self.ptr) + string = getref(self.ptr, v_string) else: - string = self.alloc(builder.get_index(500, r).getint()) + string = self.alloc(getint(builder.get_index(500, r))) v_string = ConstPtr(lltype.cast_opaque_ptr(llmemory.GCREF, string)) current.append(v_string) for i in range(len(string.chars)): @@ -484,7 +485,7 @@ class AbstractGetItemOperation(AbstractStringOperation): def produce_into(self, builder, r): v_string = self.get_string(builder, r) - v_index = builder.get_index(len(v_string.getref(self.ptr).chars), r) + v_index = builder.get_index(len(getref(self.ptr, v_string).chars), r) builder.do(self.opnum, [v_string, v_index]) class AbstractSetItemOperation(AbstractStringOperation): @@ -492,7 +493,7 @@ v_string = self.get_string(builder, r) if isinstance(v_string, ConstPtr): raise test_random.CannotProduceOperation # setitem(Const, ...) - v_index = builder.get_index(len(v_string.getref(self.ptr).chars), r) + v_index = builder.get_index(len(getref(self.ptr, v_string).chars), r) v_target = ConstInt(r.random_integer() % self.max) builder.do(self.opnum, [v_string, v_index, v_target]) @@ -505,15 +506,15 @@ def produce_into(self, builder, r): v_srcstring = self.get_string(builder, r) v_dststring = self.get_string(builder, r) - src = v_srcstring.getref(self.ptr) - dst = v_dststring.getref(self.ptr) + src = getref(self.ptr, v_srcstring) + dst = getref(self.ptr, v_dststring) if src == dst: # because it's not a raise test_random.CannotProduceOperation # memmove(), but memcpy() srclen = len(src.chars) dstlen = len(dst.chars) v_length = builder.get_index(min(srclen, dstlen), r) - v_srcstart = builder.get_index(srclen - v_length.getint() + 1, r) - v_dststart = builder.get_index(dstlen - v_length.getint() + 1, r) + v_srcstart = builder.get_index(srclen - getint(v_length) + 1, r) + v_dststart = builder.get_index(dstlen - getint(v_length) + 1, r) builder.do(self.opnum, [v_srcstring, v_dststring, v_srcstart, v_dststart, v_length]) @@ -585,7 +586,7 @@ """ % funcargs).compile() vtableptr = v._hints['vtable']._as_ptr() d = { - 'ptr': S.getref_base(), + 'ptr': getref_base(S), 'vtable' : vtableptr, 'LLException' : LLException, } diff --git a/rpython/jit/backend/test/test_random.py b/rpython/jit/backend/test/test_random.py --- a/rpython/jit/backend/test/test_random.py +++ b/rpython/jit/backend/test/test_random.py @@ -11,11 +11,9 @@ from rpython.jit.metainterp.executor import _execute_arglist, wrap_constant from rpython.jit.metainterp.resoperation import opname from rpython.jit.codewriter import longlong -from rpython.rtyper.lltypesystem import lltype, rstr +from rpython.rtyper.lltypesystem import lltype, llmemory, rstr from rpython.rtyper import rclass -class PleaseRewriteMe(Exception): - pass class DummyLoop(object): def __init__(self, subops): @@ -27,6 +25,41 @@ def execute_raised(self, exc, constant=False): self._got_exc = exc + +def getint(v): + if isinstance(v, (ConstInt, InputArgInt)): + return v.getint() + else: + return v._example_int + +def getfloatstorage(v): + if isinstance(v, (ConstFloat, InputArgFloat)): + return v.getfloatstorage() + else: + return v._example_float + +def getfloat(v): + return longlong.getrealfloat(getfloatstorage(v)) + +def getref_base(v): + if isinstance(v, (ConstPtr, InputArgRef)): + return v.getref_base() + else: + return v._example_ref + +def getref(PTR, v): + return lltype.cast_opaque_ptr(PTR, getref_base(v)) + +def constbox(v): + if v.type == INT: + return ConstInt(getint(v)) + if v.type == FLOAT: + return ConstFloat(getfloatstorage(v)) + if v.type == REF: + return ConstPtr(getref_base(v)) + assert 0, v.type + + class OperationBuilder(object): def __init__(self, cpu, loop, vars): self.cpu = cpu @@ -57,11 +90,21 @@ def do(self, opnum, argboxes, descr=None): self.fakemetainterp._got_exc = None op = ResOperation(opnum, argboxes, descr) + argboxes = map(constbox, argboxes) result = _execute_arglist(self.cpu, self.fakemetainterp, opnum, argboxes, descr) if result is not None: - c_result = wrap_constant(result) - op.copy_value_from(c_result) + if lltype.typeOf(result) == lltype.Signed: + op._example_int = result + elif isinstance(result, bool): + op._example_int = int(result) + elif lltype.typeOf(result) == longlong.FLOATSTORAGE: + op._example_float = result + elif isinstance(result, float): + op._example_float = longlong.getfloatstorage(result) + else: + assert lltype.typeOf(result) == llmemory.GCREF + op._example_ref = result self.loop.operations.append(op) return op @@ -101,7 +144,7 @@ if v in names: args.append(names[v]) elif isinstance(v, ConstPtr): - assert not v.getref_base() # otherwise should be in the names + assert not getref_base(v) # otherwise should be in the names args.append('ConstPtr(lltype.nullptr(llmemory.GCREF.TO))') elif isinstance(v, ConstFloat): args.append('ConstFloat(longlong.getfloatstorage(%r))' @@ -198,10 +241,10 @@ # def writevar(v, nameprefix, init=''): if nameprefix == 'const_ptr': - if not v.getref_base(): + if not getref_base(v): return 'lltype.nullptr(llmemory.GCREF.TO)' - TYPE = v.getref_base()._obj.ORIGTYPE - cont = lltype.cast_opaque_ptr(TYPE, v.getref_base()) + TYPE = getref_base(v)._obj.ORIGTYPE + cont = lltype.cast_opaque_ptr(TYPE, getref_base(v)) if TYPE.TO._is_varsize(): if isinstance(TYPE.TO, lltype.GcStruct): lgt = len(cont.chars) @@ -252,9 +295,9 @@ for i, v in enumerate(self.loop.inputargs): assert not isinstance(v, Const) if v.type == FLOAT: - vals.append("longlong.getfloatstorage(%r)" % v.getfloat()) + vals.append("longlong.getfloatstorage(%r)" % getfloat(v)) else: - vals.append("%r" % v.getint()) + vals.append("%r" % getint(v)) print >>s, ' loop_args = [%s]' % ", ".join(vals) print >>s, ' frame = cpu.execute_token(looptoken, *loop_args)' if self.should_fail_by is None: @@ -264,10 +307,10 @@ for i, v in enumerate(fail_args): if v.type == FLOAT: print >>s, (' assert longlong.getrealfloat(' - 'cpu.get_float_value(frame, %d)) == %r' % (i, v.getfloatstorage())) + 'cpu.get_float_value(frame, %d)) == %r' % (i, getfloatstorage(v))) else: print >>s, (' assert cpu.get_int_value(frame, %d) == %d' - % (i, v.getint())) + % (i, getint(v))) self.names = names s.flush() @@ -295,7 +338,7 @@ builder.intvars.append(v_result) boolres = self.boolres if boolres == 'sometimes': - boolres = v_result.getint() in [0, 1] + boolres = getint(v_result) in [0, 1] if boolres: builder.boolvars.append(v_result) elif v_result.type == FLOAT: @@ -346,10 +389,10 @@ v_second = ConstInt((value & self.and_mask) | self.or_mask) else: v = r.choice(builder.intvars) - v_value = v.getint() + v_value = getint(v) if (v_value & self.and_mask) != v_value: v = builder.do(rop.INT_AND, [v, ConstInt(self.and_mask)]) - v_value = v.getint() + v_value = getint(v) if (v_value | self.or_mask) != v_value: v = builder.do(rop.INT_OR, [v, ConstInt(self.or_mask)]) v_second = v @@ -395,9 +438,9 @@ v_second = ConstFloat(r.random_float_storage()) else: v_second = r.choice(builder.floatvars) - if abs(v_first.getfloat()) > 1E100 or abs(v_second.getfloat()) > 1E100: + if abs(getfloat(v_first)) > 1E100 or abs(getfloat(v_second)) > 1E100: raise CannotProduceOperation # avoid infinities - if abs(v_second.getfloat()) < 1E-100: + if abs(getfloat(v_second)) < 1E-100: raise CannotProduceOperation # e.g. division by zero error self.put(builder, [v_first, v_second]) @@ -432,7 +475,7 @@ if not builder.floatvars: raise CannotProduceOperation box = r.choice(builder.floatvars) - if not (-sys.maxint-1 <= box.getfloat() <= sys.maxint): + if not (-sys.maxint-1 <= getfloat(box) <= sys.maxint): raise CannotProduceOperation # would give an overflow self.put(builder, [box]) @@ -440,8 +483,8 @@ def gen_guard(self, builder, r): v = builder.get_bool_var(r) op = ResOperation(self.opnum, [v]) - passing = ((self.opnum == rop.GUARD_TRUE and v.getint()) or - (self.opnum == rop.GUARD_FALSE and not v.getint())) + passing = ((self.opnum == rop.GUARD_TRUE and getint(v)) or + (self.opnum == rop.GUARD_FALSE and not getint(v))) return op, passing def produce_into(self, builder, r): @@ -459,8 +502,8 @@ raise CannotProduceOperation box = r.choice(builder.ptrvars)[0] op = ResOperation(self.opnum, [box]) - passing = ((self.opnum == rop.GUARD_NONNULL and box.getref_base()) or - (self.opnum == rop.GUARD_ISNULL and not box.getref_base())) + passing = ((self.opnum == rop.GUARD_NONNULL and getref_base(box)) or + (self.opnum == rop.GUARD_ISNULL and not getref_base(box))) return op, passing class GuardValueOperation(GuardOperation): @@ -470,14 +513,14 @@ other = r.choice(builder.intvars) else: if r.random() < 0.75: - value = v.getint() + value = getint(v) elif r.random() < 0.5: - value = v.getint() ^ 1 + value = getint(v) ^ 1 else: value = r.random_integer() other = ConstInt(value) op = ResOperation(self.opnum, [v, other]) - return op, (v.getint() == other.getint()) + return op, (getint(v) == getint(other)) # ____________________________________________________________ @@ -728,9 +771,9 @@ self.expected = {} for v in endvars: if v.type == INT: - self.expected[v] = v.getint() + self.expected[v] = getint(v) elif v.type == FLOAT: - self.expected[v] = v.getfloatstorage() + self.expected[v] = getfloatstorage(v) else: assert 0, v.type @@ -742,7 +785,7 @@ args = [] for box in self.startvars: if box not in self.loop.inputargs: - box = box.constbox() + box = constbox(box) args.append(box) self.cpu.compile_loop(self.loop.inputargs, [ResOperation(rop.JUMP, args, @@ -760,7 +803,7 @@ def clear_state(self): for v, S, fields in self.prebuilt_ptr_consts: - container = v.getref_base()._obj.container + container = getref_base(v)._obj.container for name, value in fields.items(): if isinstance(name, str): setattr(container, name, value) @@ -781,9 +824,9 @@ arguments = [] for box in self.loop.inputargs: if box.type == INT: - arguments.append(box.getint()) + arguments.append(getint(box)) elif box.type == FLOAT: - arguments.append(box.getfloatstorage()) + arguments.append(getfloatstorage(box)) else: assert 0, box.type deadframe = cpu.execute_token(self.runjitcelltoken(), *arguments) @@ -795,7 +838,7 @@ if v not in self.expected: assert v.getopnum() == rop.SAME_AS_I # special case assert isinstance(v.getarg(0), ConstInt) - self.expected[v] = v.getarg(0).getint() + self.expected[v] = getint(v.getarg(0)) if v.type == FLOAT: value = cpu.get_float_value(deadframe, i) else: @@ -807,7 +850,7 @@ ) exc = cpu.grab_exc_value(deadframe) if (self.guard_op is not None and - self.guard_op.is_guard_exception()): + rop.is_guard_exception(self.guard_op.getopnum())): if self.guard_op.getopnum() == rop.GUARD_NO_EXCEPTION: do_assert(exc, "grab_exc_value() should not be %r" % (exc,)) @@ -840,7 +883,7 @@ # generate the branch: a sequence of operations that ends in a FINISH subloop = DummyLoop([]) self.subloops.append(subloop) # keep around for debugging - if guard_op.is_guard_exception(): + if rop.is_guard_exception(guard_op.getopnum()): subloop.operations.append(exc_handling(guard_op)) bridge_builder = self.builder.fork(self.builder.cpu, subloop, op.getfailargs()[:]) @@ -876,9 +919,9 @@ args = [] for x in subset: if x.type == INT: - args.append(InputArgInt(x.getint())) + args.append(InputArgInt(getint(x))) elif x.type == FLOAT: - args.append(InputArgFloat(x.getfloatstorage())) + args.append(InputArgFloat(getfloatstorage(x))) else: assert 0, x.type rl = RandomLoop(self.builder.cpu, self.builder.fork, From pypy.commits at gmail.com Tue Mar 22 14:25:57 2016 From: pypy.commits at gmail.com (fijal) Date: Tue, 22 Mar 2016 11:25:57 -0700 (PDT) Subject: [pypy-commit] pypy jit-leaner-frontend: fix a stupid test Message-ID: <56f18e35.49f9c20a.bf8c1.2256@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r83268:89e98a074346 Date: 2016-03-22 20:23 +0200 http://bitbucket.org/pypy/pypy/changeset/89e98a074346/ Log: fix a stupid test diff --git a/rpython/jit/metainterp/test/test_resoperation.py b/rpython/jit/metainterp/test/test_resoperation.py --- a/rpython/jit/metainterp/test/test_resoperation.py +++ b/rpython/jit/metainterp/test/test_resoperation.py @@ -74,13 +74,9 @@ #assert re.match("guard_no_exception\(descr=<.+>\)$", repr(op)) def test_can_malloc(): - a = ConstInt(1) - b = ConstInt(2) - mydescr = AbstractDescr() - assert rop.ResOperation(rop.rop.NEW, []).can_malloc() - call = rop.ResOperation(rop.rop.CALL_N, [a, b], descr=mydescr) - assert call.can_malloc() - assert not rop.ResOperation(rop.rop.INT_ADD, [a, b]).can_malloc() + assert rop.rop.can_malloc(rop.rop.NEW) + assert rop.rop.can_malloc(rop.rop.CALL_N) + assert not rop.rop.can_malloc(rop.rop.INT_ADD) def test_get_deep_immutable_oplist(): a = ConstInt(1) From pypy.commits at gmail.com Tue Mar 22 14:28:29 2016 From: pypy.commits at gmail.com (rlamy) Date: Tue, 22 Mar 2016 11:28:29 -0700 (PDT) Subject: [pypy-commit] pypy rposix-for-3: Add mkdirat(), unlinkat(), readlinkat(), symlinkat(), openat(), mkfifoat(), mknodat() (untested) Message-ID: <56f18ecd.c818c20a.e1f8d.140b@mx.google.com> Author: Ronan Lamy Branch: rposix-for-3 Changeset: r83270:411c0cdddcd1 Date: 2016-03-22 17:36 +0000 http://bitbucket.org/pypy/pypy/changeset/411c0cdddcd1/ Log: Add mkdirat(), unlinkat(), readlinkat(), symlinkat(), openat(), mkfifoat(), mknodat() (untested) diff --git a/rpython/rlib/rposix.py b/rpython/rlib/rposix.py --- a/rpython/rlib/rposix.py +++ b/rpython/rlib/rposix.py @@ -1738,6 +1738,7 @@ AT_FDCWD = rffi_platform.DefinedConstantInteger('AT_FDCWD') AT_SYMLINK_NOFOLLOW = rffi_platform.DefinedConstantInteger('AT_SYMLINK_NOFOLLOW') AT_EACCESS = rffi_platform.DefinedConstantInteger('AT_EACCESS') + AT_REMOVEDIR = rffi_platform.DefinedConstantInteger('AT_REMOVEDIR') TIMESPEC = rffi_platform.Struct('struct timespec', [ ('tv_sec', rffi.TIME_T), ('tv_nsec', rffi.LONG)]) @@ -1813,3 +1814,86 @@ flag = AT_SYMLINK_NOFOLLOW error = c_futimens(dir_fd, pathname, l_times, flag) handle_posix_error('utimensat', error) + +if HAVE_MKDIRAT: + c_mkdirat = external('mkdirat', + [rffi.INT, rffi.CCHARP, rffi.INT], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) + + def mkdirat(pathname, mode, dir_fd=AT_FDCWD): + error = c_mkdirat(dir_fd, pathname, mode) + handle_posix_error('mkdirat', error) + +if HAVE_UNLINKAT: + c_unlinkat = external('unlinkat', + [rffi.INT, rffi.CCHARP, rffi.INT], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) + + def unlinkat(pathname, dir_fd=AT_FDCWD, removedir=False): + flag = AT_REMOVEDIR if removedir else 0 + error = c_unlinkat(dir_fd, pathname, flag) + handle_posix_error('unlinkat', error) + +if HAVE_READLINKAT: + c_readlinkat = external( + 'readlinkat', + [rffi.INT, rffi.CCHARP, rffi.CCHARP, rffi.SIZE_T], rffi.SSIZE_T, + save_err=rffi.RFFI_SAVE_ERRNO) + + def readlinkat(pathname, dir_fd=AT_FDCWD): + pathname = _as_bytes0(pathname) + bufsize = 1023 + while True: + buf = lltype.malloc(rffi.CCHARP.TO, bufsize, flavor='raw') + res = widen(c_readlinkat(dir_fd, pathname, buf, bufsize)) + if res < 0: + lltype.free(buf, flavor='raw') + error = get_saved_errno() # failed + raise OSError(error, "readlinkat failed") + elif res < bufsize: + break # ok + else: + # buf too small, try again with a larger buffer + lltype.free(buf, flavor='raw') + bufsize *= 4 + # convert the result to a string + result = rffi.charp2strn(buf, res) + lltype.free(buf, flavor='raw') + return result + +if HAVE_SYMLINKAT: + c_symlinkat = external('symlinkat', + [rffi.CCHARP, rffi.CCHARP, rffi.INT], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) + + def symlinkat(src, dst, dir_fd=AT_FDCWD): + error = c_symlinkat(src, dst, dir_fd) + handle_posix_error('symlinkat', error) + +if HAVE_OPENAT: + c_openat = external('openat', + [rffi.INT, rffi.CCHARP, rffi.INT, rffi.MODE_T], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) + + @enforceargs(s_Str0, int, int, int, typecheck=False) + def openat(path, flags, mode, dir_fd=AT_FDCWD): + fd = c_openat(path, flags, mode, dir_fd) + return handle_posix_error('open', fd) + +if HAVE_MKFIFOAT: + c_mkfifoat = external('mkfifoat', + [rffi.INT, rffi.CCHARP, rffi.MODE_T], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) + + def mkfifoat(path, mode, dir_fd=AT_FDCWD): + error = c_mkfifoat(dir_fd, path, mode) + handle_posix_error('mkfifoat', error) + +if HAVE_MKNODAT: + c_mknodat = external('mknodat', + [rffi.INT, rffi.CCHARP, rffi.MODE_T, rffi.INT], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) + + def mknodat(path, mode, device, dir_fd=AT_FDCWD): + error = c_mknodat(dir_fd, path, mode, device) + handle_posix_error('mknodat', error) From pypy.commits at gmail.com Tue Mar 22 14:28:31 2016 From: pypy.commits at gmail.com (rlamy) Date: Tue, 22 Mar 2016 11:28:31 -0700 (PDT) Subject: [pypy-commit] pypy rposix-for-3: Add tests for mkdirat(), openat(), unlinkat() Message-ID: <56f18ecf.865a1c0a.25b17.604d@mx.google.com> Author: Ronan Lamy Branch: rposix-for-3 Changeset: r83271:49faef350e25 Date: 2016-03-22 18:27 +0000 http://bitbucket.org/pypy/pypy/changeset/49faef350e25/ Log: Add tests for mkdirat(), openat(), unlinkat() diff --git a/rpython/rlib/rposix.py b/rpython/rlib/rposix.py --- a/rpython/rlib/rposix.py +++ b/rpython/rlib/rposix.py @@ -1877,7 +1877,7 @@ @enforceargs(s_Str0, int, int, int, typecheck=False) def openat(path, flags, mode, dir_fd=AT_FDCWD): - fd = c_openat(path, flags, mode, dir_fd) + fd = c_openat(dir_fd, path, flags, mode) return handle_posix_error('open', fd) if HAVE_MKFIFOAT: diff --git a/rpython/rlib/test/test_rposix.py b/rpython/rlib/test/test_rposix.py --- a/rpython/rlib/test/test_rposix.py +++ b/rpython/rlib/test/test_rposix.py @@ -7,6 +7,10 @@ import errno import py +def rposix_requires(funcname): + return py.test.mark.skipif(not hasattr(rposix, funcname), + reason="Requires rposix.%s()" % funcname) + class TestPosixFunction: def test_access(self): filename = str(udir.join('test_access.txt')) @@ -99,11 +103,25 @@ def test_mkdir(self): filename = str(udir.join('test_mkdir.dir')) rposix.mkdir(filename, 0) - exc = py.test.raises(OSError, rposix.mkdir, filename, 0) - assert exc.value.errno == errno.EEXIST + with py.test.raises(OSError) as excinfo: + rposix.mkdir(filename, 0) + assert excinfo.value.errno == errno.EEXIST if sys.platform == 'win32': assert exc.type is WindowsError + @rposix_requires('mkdirat') + def test_mkdirat(self): + relpath = 'test_mkdirat.dir' + filename = str(udir.join(relpath)) + dirfd = os.open(os.path.dirname(filename), os.O_RDONLY) + try: + rposix.mkdirat(relpath, 0, dir_fd=dirfd) + with py.test.raises(OSError) as excinfo: + rposix.mkdirat(relpath, 0, dir_fd=dirfd) + assert excinfo.value.errno == errno.EEXIST + finally: + os.close(dirfd) + def test_strerror(self): assert rposix.strerror(2) == os.strerror(2) @@ -448,6 +466,38 @@ def _get_filename(self): return str(udir.join('test_open_ascii')) + @rposix_requires('openat') + def test_openat(self): + def f(dirfd): + try: + fd = rposix.openat('test_open_ascii', os.O_RDONLY, 0777, dirfd) + try: + text = os.read(fd, 50) + return text + finally: + os.close(fd) + except OSError: + return '' + + dirfd = os.open(os.path.dirname(self.ufilename), os.O_RDONLY) + try: + assert ll_to_string(interpret(f, [dirfd])) == "test" + finally: + os.close(dirfd) + + @rposix_requires('unlinkat') + def test_unlinkat(self): + def f(dirfd): + return rposix.unlinkat('test_open_ascii', dir_fd=dirfd) + + dirfd = os.open(os.path.dirname(self.ufilename), os.O_RDONLY) + try: + interpret(f, [dirfd]) + finally: + os.close(dirfd) + assert not os.path.exists(self.ufilename) + + class TestPosixUnicode(BasePosixUnicodeOrAscii): def _get_filename(self): return (unicode(udir.join('test_open')) + From pypy.commits at gmail.com Tue Mar 22 14:29:02 2016 From: pypy.commits at gmail.com (fijal) Date: Tue, 22 Mar 2016 11:29:02 -0700 (PDT) Subject: [pypy-commit] pypy jit-leaner-frontend: more fixes, at least starts backend tests Message-ID: <56f18eee.019e1c0a.1ce3.6f34@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r83272:88e0e2f821e2 Date: 2016-03-22 20:28 +0200 http://bitbucket.org/pypy/pypy/changeset/88e0e2f821e2/ Log: more fixes, at least starts backend tests diff --git a/rpython/jit/backend/llsupport/regalloc.py b/rpython/jit/backend/llsupport/regalloc.py --- a/rpython/jit/backend/llsupport/regalloc.py +++ b/rpython/jit/backend/llsupport/regalloc.py @@ -683,7 +683,7 @@ for i in range(len(operations)-1, -1, -1): op = operations[i] if op.type != 'v': - if op not in last_used and op.has_no_side_effect(): + if op not in last_used and rop.has_no_side_effect(op.opnum): continue opnum = op.getopnum() for j in range(op.numargs()): diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py +++ b/rpython/jit/backend/llsupport/rewrite.py @@ -103,7 +103,7 @@ orig_op.set_forwarded(op) replaced = True op.setarg(i, arg) - if op.is_guard(): + if rop.is_guard(op.opnum): if not replaced: op = op.copy_and_change(op.getopnum()) orig_op.set_forwarded(op) @@ -203,7 +203,7 @@ def transform_to_gc_load(self, op): NOT_SIGNED = 0 CINT_ZERO = ConstInt(0) - if op.is_getarrayitem() or \ + if rop.is_getarrayitem(op.opnum) or \ op.getopnum() in (rop.GETARRAYITEM_RAW_I, rop.GETARRAYITEM_RAW_F): self.handle_getarrayitem(op) @@ -315,13 +315,13 @@ if self.transform_to_gc_load(op): continue # ---------- turn NEWxxx into CALL_MALLOC_xxx ---------- - if op.is_malloc(): + if rop.is_malloc(op.opnum): self.handle_malloc_operation(op) continue - if (op.is_guard() or + if (rop.is_guard(op.opnum) or self.could_merge_with_next_guard(op, i, operations)): self.emit_pending_zeros() - elif op.can_malloc(): + elif rop.can_malloc(op.opnum): self.emitting_an_operation_that_can_collect() elif op.getopnum() == rop.LABEL: self.emitting_an_operation_that_can_collect() diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -357,7 +357,7 @@ assert self.assembler.mc._frame_size == DEFAULT_FRAME_BYTES self.rm.position = i self.xrm.position = i - if op.has_no_side_effect() and op not in self.longevity: + if rop.has_no_side_effect(op.opnum) and op not in self.longevity: i += 1 self.possibly_free_vars_for_op(op) continue From pypy.commits at gmail.com Tue Mar 22 14:34:12 2016 From: pypy.commits at gmail.com (fijal) Date: Tue, 22 Mar 2016 11:34:12 -0700 (PDT) Subject: [pypy-commit] pypy jit-leaner-frontend: fix enough to pass llsupport/test Message-ID: <56f19024.96811c0a.d5976.5f85@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r83273:53578fa6db89 Date: 2016-03-22 20:33 +0200 http://bitbucket.org/pypy/pypy/changeset/53578fa6db89/ Log: fix enough to pass llsupport/test diff --git a/rpython/jit/backend/llsupport/regalloc.py b/rpython/jit/backend/llsupport/regalloc.py --- a/rpython/jit/backend/llsupport/regalloc.py +++ b/rpython/jit/backend/llsupport/regalloc.py @@ -695,7 +695,7 @@ if opnum != rop.JUMP and opnum != rop.LABEL: if arg not in last_real_usage: last_real_usage[arg] = i - if op.is_guard(): + if rop.is_guard(op.opnum): for arg in op.getfailargs(): if arg is None: # hole continue @@ -732,14 +732,7 @@ return longevity, last_real_usage def is_comparison_or_ovf_op(opnum): - from rpython.jit.metainterp.resoperation import opclasses - cls = opclasses[opnum] - # hack hack: in theory they are instance method, but they don't use - # any instance field, we can use a fake object - class Fake(cls): - pass - op = Fake() - return op.is_comparison() or op.is_ovf() + return rop.is_comparison(opnum) or rop.is_ovf(opnum) def valid_addressing_size(size): return size == 1 or size == 2 or size == 4 or size == 8 diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -361,7 +361,7 @@ i += 1 self.possibly_free_vars_for_op(op) continue - if not we_are_translated() and op.getopnum() == -127: + if not we_are_translated() and op.getopnum() == rop.FORCE_SPILL: self._consider_force_spill(op) else: oplist[op.getopnum()](self, op) From pypy.commits at gmail.com Tue Mar 22 15:03:35 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 22 Mar 2016 12:03:35 -0700 (PDT) Subject: [pypy-commit] pypy jit-leaner-frontend: fix Message-ID: <56f19707.918e1c0a.eb19e.7ed9@mx.google.com> Author: Armin Rigo Branch: jit-leaner-frontend Changeset: r83274:46299b0a7723 Date: 2016-03-22 19:41 +0100 http://bitbucket.org/pypy/pypy/changeset/46299b0a7723/ Log: fix diff --git a/rpython/jit/metainterp/graphpage.py b/rpython/jit/metainterp/graphpage.py --- a/rpython/jit/metainterp/graphpage.py +++ b/rpython/jit/metainterp/graphpage.py @@ -170,7 +170,8 @@ while True: op = operations[opindex] op_repr = op.repr(self.memo, graytext=True) - if op.getopnum() == rop.DEBUG_MERGE_POINT: + if (op.getopnum() == rop.DEBUG_MERGE_POINT and + self.metainterp_sd is not None): jd_sd = self.metainterp_sd.jitdrivers_sd[op.getarg(0).getint()] if jd_sd._get_printable_location_ptr: s = jd_sd.warmstate.get_location_str(op.getarglist()[3:]) From pypy.commits at gmail.com Tue Mar 22 15:03:37 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 22 Mar 2016 12:03:37 -0700 (PDT) Subject: [pypy-commit] pypy jit-leaner-frontend: Simplify a bit the test, making it more independent on details. Message-ID: <56f19709.4a811c0a.35ca4.73ed@mx.google.com> Author: Armin Rigo Branch: jit-leaner-frontend Changeset: r83275:441f511c56d9 Date: 2016-03-22 20:02 +0100 http://bitbucket.org/pypy/pypy/changeset/441f511c56d9/ Log: Simplify a bit the test, making it more independent on details. Create two versions of the test, too, to check with or without the exit bridge. diff --git a/rpython/jit/metainterp/test/test_memmgr.py b/rpython/jit/metainterp/test/test_memmgr.py --- a/rpython/jit/metainterp/test/test_memmgr.py +++ b/rpython/jit/metainterp/test/test_memmgr.py @@ -127,38 +127,65 @@ n = n - 1 return 21 def f(): - # Depending on loop_longevity, either: - # A. create the loop and the entry bridge for 'g(5)' - # B. create 8 loops (and throw them away at each iteration) - # Actually, it's 4 loops and 4 exit bridges thrown away - # every second iteration - for i in range(8): - g(5) - # create another loop and another entry bridge for 'g(7)', - # to increase the current_generation + # If loop_longevity is large enough, this creates a loop + # and an entry bridge for 'g(7)', and another for 'g(5)': + # total 4. If loop_longevity is set to 1 instead, whenever + # we create a loop for 'g(7)' we forget the loop created + # for 'g(5)' and vice-versa. We end up creating loops + # over and over again, for a total of 40 of them. for i in range(20): g(7) - # Depending on loop_longevity, either: - # A. reuse the existing loop and entry bridge for 'g(5)'. - # The entry bridge for g(5) should never grow too old. - # The loop itself gets old, but is kept alive by the - # entry bridge via contains_jumps_to. - # B. or, create another loop (and throw away the previous one) g(5) return 42 # case A res = self.meta_interp(f, [], loop_longevity=3) assert res == 42 - # we should see only the loop with preamble and the exit bridge - # for g(5) and g(7) + # we should see (1) the loop-with-preamble, (2) the exit bridge + # for g(7), and another time the same for g(5). self.check_enter_count(4) # case B, with a lower longevity res = self.meta_interp(f, [], loop_longevity=1) assert res == 42 # we should see a loop for each call to g() - self.check_enter_count(8 + 20*2) + self.check_enter_count(40) + + def test_target_loop_kept_alive_or_not_2(self): + myjitdriver = JitDriver(greens=['m'], reds=['n']) + def g(m): + n = 10 + while n > 0: + myjitdriver.can_enter_jit(n=n, m=m) + myjitdriver.jit_merge_point(n=n, m=m) + n = n - 1 + return 21 + def f(): + # If loop_longevity is large enough, this creates a loop + # and an entry bridge for 'g(7)', and another for 'g(5)': + # total 4. If loop_longevity is set to 1 instead, whenever + # we create a loop for 'g(7)', we create the entry bridge + # on the next 'g(7)', but we forget them both when we move + # on to 'g(5)', and vice-versa. We end up creating loops + # and entry bridges over and over again, for a total of 32 + # of them. + for i in range(8): + g(7); g(7) + g(5); g(5) + return 42 + + # case A + res = self.meta_interp(f, [], loop_longevity=5) + assert res == 42 + # we should see (1) the loop-with-preamble, (2) the exit bridge + # for g(7), and another time the same for g(5). + self.check_enter_count(4) + + # case B, with a lower longevity + res = self.meta_interp(f, [], loop_longevity=1) + assert res == 42 + # we should see a loop for each call to g() + self.check_enter_count(32) def test_throw_away_old_loops(self): myjitdriver = JitDriver(greens=['m'], reds=['n']) From pypy.commits at gmail.com Tue Mar 22 15:03:39 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 22 Mar 2016 12:03:39 -0700 (PDT) Subject: [pypy-commit] pypy jit-leaner-frontend: merge heads Message-ID: <56f1970b.a2afc20a.a24d3.3d9c@mx.google.com> Author: Armin Rigo Branch: jit-leaner-frontend Changeset: r83276:a5e18f5034d1 Date: 2016-03-22 20:02 +0100 http://bitbucket.org/pypy/pypy/changeset/a5e18f5034d1/ Log: merge heads diff --git a/rpython/jit/backend/llsupport/regalloc.py b/rpython/jit/backend/llsupport/regalloc.py --- a/rpython/jit/backend/llsupport/regalloc.py +++ b/rpython/jit/backend/llsupport/regalloc.py @@ -683,7 +683,7 @@ for i in range(len(operations)-1, -1, -1): op = operations[i] if op.type != 'v': - if op not in last_used and op.has_no_side_effect(): + if op not in last_used and rop.has_no_side_effect(op.opnum): continue opnum = op.getopnum() for j in range(op.numargs()): @@ -695,7 +695,7 @@ if opnum != rop.JUMP and opnum != rop.LABEL: if arg not in last_real_usage: last_real_usage[arg] = i - if op.is_guard(): + if rop.is_guard(op.opnum): for arg in op.getfailargs(): if arg is None: # hole continue @@ -732,14 +732,7 @@ return longevity, last_real_usage def is_comparison_or_ovf_op(opnum): - from rpython.jit.metainterp.resoperation import opclasses - cls = opclasses[opnum] - # hack hack: in theory they are instance method, but they don't use - # any instance field, we can use a fake object - class Fake(cls): - pass - op = Fake() - return op.is_comparison() or op.is_ovf() + return rop.is_comparison(opnum) or rop.is_ovf(opnum) def valid_addressing_size(size): return size == 1 or size == 2 or size == 4 or size == 8 diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py +++ b/rpython/jit/backend/llsupport/rewrite.py @@ -103,7 +103,7 @@ orig_op.set_forwarded(op) replaced = True op.setarg(i, arg) - if op.is_guard(): + if rop.is_guard(op.opnum): if not replaced: op = op.copy_and_change(op.getopnum()) orig_op.set_forwarded(op) @@ -203,7 +203,7 @@ def transform_to_gc_load(self, op): NOT_SIGNED = 0 CINT_ZERO = ConstInt(0) - if op.is_getarrayitem() or \ + if rop.is_getarrayitem(op.opnum) or \ op.getopnum() in (rop.GETARRAYITEM_RAW_I, rop.GETARRAYITEM_RAW_F): self.handle_getarrayitem(op) @@ -315,13 +315,13 @@ if self.transform_to_gc_load(op): continue # ---------- turn NEWxxx into CALL_MALLOC_xxx ---------- - if op.is_malloc(): + if rop.is_malloc(op.opnum): self.handle_malloc_operation(op) continue - if (op.is_guard() or + if (rop.is_guard(op.opnum) or self.could_merge_with_next_guard(op, i, operations)): self.emit_pending_zeros() - elif op.can_malloc(): + elif rop.can_malloc(op.opnum): self.emitting_an_operation_that_can_collect() elif op.getopnum() == rop.LABEL: self.emitting_an_operation_that_can_collect() diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -357,11 +357,11 @@ assert self.assembler.mc._frame_size == DEFAULT_FRAME_BYTES self.rm.position = i self.xrm.position = i - if op.has_no_side_effect() and op not in self.longevity: + if rop.has_no_side_effect(op.opnum) and op not in self.longevity: i += 1 self.possibly_free_vars_for_op(op) continue - if not we_are_translated() and op.getopnum() == -127: + if not we_are_translated() and op.getopnum() == rop.FORCE_SPILL: self._consider_force_spill(op) else: oplist[op.getopnum()](self, op) diff --git a/rpython/jit/metainterp/test/test_resoperation.py b/rpython/jit/metainterp/test/test_resoperation.py --- a/rpython/jit/metainterp/test/test_resoperation.py +++ b/rpython/jit/metainterp/test/test_resoperation.py @@ -74,13 +74,9 @@ #assert re.match("guard_no_exception\(descr=<.+>\)$", repr(op)) def test_can_malloc(): - a = ConstInt(1) - b = ConstInt(2) - mydescr = AbstractDescr() - assert rop.ResOperation(rop.rop.NEW, []).can_malloc() - call = rop.ResOperation(rop.rop.CALL_N, [a, b], descr=mydescr) - assert call.can_malloc() - assert not rop.ResOperation(rop.rop.INT_ADD, [a, b]).can_malloc() + assert rop.rop.can_malloc(rop.rop.NEW) + assert rop.rop.can_malloc(rop.rop.CALL_N) + assert not rop.rop.can_malloc(rop.rop.INT_ADD) def test_get_deep_immutable_oplist(): a = ConstInt(1) From pypy.commits at gmail.com Tue Mar 22 15:56:10 2016 From: pypy.commits at gmail.com (mattip) Date: Tue, 22 Mar 2016 12:56:10 -0700 (PDT) Subject: [pypy-commit] pypy default: Merged in sergem/pypy/fix_transpose_for_list_v3 (pull request #420) Message-ID: <56f1a35a.857ac20a.dc7f4.50d1@mx.google.com> Author: mattip Branch: Changeset: r83278:e7bacd0b61e2 Date: 2016-03-22 21:55 +0200 http://bitbucket.org/pypy/pypy/changeset/e7bacd0b61e2/ Log: Merged in sergem/pypy/fix_transpose_for_list_v3 (pull request #420) Fixed ndarray.transpose when argument is a list or an array diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -502,29 +502,34 @@ return W_NDimArray(self.implementation.transpose(self, axes)) def descr_transpose(self, space, args_w): - if len(args_w) == 1 and space.isinstance_w(args_w[0], space.w_tuple): - args_w = space.fixedview(args_w[0]) - if (len(args_w) == 0 or - len(args_w) == 1 and space.is_none(args_w[0])): + if len(args_w) == 0 or len(args_w) == 1 and space.is_none(args_w[0]): return self.descr_get_transpose(space) else: - if len(args_w) != self.ndims(): - raise oefmt(space.w_ValueError, "axes don't match array") - axes = [] - axes_seen = [False] * self.ndims() - for w_arg in args_w: - try: - axis = support.index_w(space, w_arg) - except OperationError: - raise oefmt(space.w_TypeError, "an integer is required") - if axis < 0 or axis >= self.ndims(): - raise oefmt(space.w_ValueError, "invalid axis for this array") - if axes_seen[axis] is True: - raise oefmt(space.w_ValueError, "repeated axis in transpose") - axes.append(axis) - axes_seen[axis] = True - return self.descr_get_transpose(space, axes) + if len(args_w) > 1: + axes = args_w + else: # Iterable in the only argument (len(arg_w) == 1 and arg_w[0] is not None) + axes = space.fixedview(args_w[0]) + axes = self._checked_axes(axes, space) + return self.descr_get_transpose(space, axes) + + def _checked_axes(self, axes_raw, space): + if len(axes_raw) != self.ndims(): + raise oefmt(space.w_ValueError, "axes don't match array") + axes = [] + axes_seen = [False] * self.ndims() + for elem in axes_raw: + try: + axis = support.index_w(space, elem) + except OperationError: + raise oefmt(space.w_TypeError, "an integer is required") + if axis < 0 or axis >= self.ndims(): + raise oefmt(space.w_ValueError, "invalid axis for this array") + if axes_seen[axis] is True: + raise oefmt(space.w_ValueError, "repeated axis in transpose") + axes.append(axis) + axes_seen[axis] = True + return axes @unwrap_spec(axis1=int, axis2=int) def descr_swapaxes(self, space, axis1, axis2): diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -2960,6 +2960,36 @@ assert (a.transpose() == b).all() assert (a.transpose(None) == b).all() + def test_transpose_arg_tuple(self): + import numpy as np + a = np.arange(24).reshape(2, 3, 4) + transpose_args = a.transpose(1, 2, 0) + + transpose_test = a.transpose((1, 2, 0)) + + assert transpose_test.shape == (3, 4, 2) + assert (transpose_args == transpose_test).all() + + def test_transpose_arg_list(self): + import numpy as np + a = np.arange(24).reshape(2, 3, 4) + transpose_args = a.transpose(1, 2, 0) + + transpose_test = a.transpose([1, 2, 0]) + + assert transpose_test.shape == (3, 4, 2) + assert (transpose_args == transpose_test).all() + + def test_transpose_arg_array(self): + import numpy as np + a = np.arange(24).reshape(2, 3, 4) + transpose_args = a.transpose(1, 2, 0) + + transpose_test = a.transpose(np.array([1, 2, 0])) + + assert transpose_test.shape == (3, 4, 2) + assert (transpose_args == transpose_test).all() + def test_transpose_error(self): import numpy as np a = np.arange(24).reshape(2, 3, 4) @@ -2968,6 +2998,11 @@ raises(ValueError, a.transpose, 1, 0, 1) raises(TypeError, a.transpose, 1, 0, '2') + def test_transpose_unexpected_argument(self): + import numpy as np + a = np.array([[1, 2], [3, 4], [5, 6]]) + raises(TypeError, 'a.transpose(axes=(1,2,0))') + def test_flatiter(self): from numpy import array, flatiter, arange, zeros a = array([[10, 30], [40, 60]]) From pypy.commits at gmail.com Tue Mar 22 15:56:17 2016 From: pypy.commits at gmail.com (Sergey Matyunin) Date: Tue, 22 Mar 2016 12:56:17 -0700 (PDT) Subject: [pypy-commit] pypy fix_transpose_for_list_v3: Fixed ndarray.transpose when argument is a list or an array Message-ID: <56f1a361.29cec20a.849e7.4d39@mx.google.com> Author: Sergey Matyunin Branch: fix_transpose_for_list_v3 Changeset: r83277:ef93194a1339 Date: 2016-03-22 18:08 +0100 http://bitbucket.org/pypy/pypy/changeset/ef93194a1339/ Log: Fixed ndarray.transpose when argument is a list or an array diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -502,29 +502,34 @@ return W_NDimArray(self.implementation.transpose(self, axes)) def descr_transpose(self, space, args_w): - if len(args_w) == 1 and space.isinstance_w(args_w[0], space.w_tuple): - args_w = space.fixedview(args_w[0]) - if (len(args_w) == 0 or - len(args_w) == 1 and space.is_none(args_w[0])): + if len(args_w) == 0 or len(args_w) == 1 and space.is_none(args_w[0]): return self.descr_get_transpose(space) else: - if len(args_w) != self.ndims(): - raise oefmt(space.w_ValueError, "axes don't match array") - axes = [] - axes_seen = [False] * self.ndims() - for w_arg in args_w: - try: - axis = support.index_w(space, w_arg) - except OperationError: - raise oefmt(space.w_TypeError, "an integer is required") - if axis < 0 or axis >= self.ndims(): - raise oefmt(space.w_ValueError, "invalid axis for this array") - if axes_seen[axis] is True: - raise oefmt(space.w_ValueError, "repeated axis in transpose") - axes.append(axis) - axes_seen[axis] = True - return self.descr_get_transpose(space, axes) + if len(args_w) > 1: + axes = args_w + else: # Iterable in the only argument (len(arg_w) == 1 and arg_w[0] is not None) + axes = space.fixedview(args_w[0]) + axes = self._checked_axes(axes, space) + return self.descr_get_transpose(space, axes) + + def _checked_axes(self, axes_raw, space): + if len(axes_raw) != self.ndims(): + raise oefmt(space.w_ValueError, "axes don't match array") + axes = [] + axes_seen = [False] * self.ndims() + for elem in axes_raw: + try: + axis = support.index_w(space, elem) + except OperationError: + raise oefmt(space.w_TypeError, "an integer is required") + if axis < 0 or axis >= self.ndims(): + raise oefmt(space.w_ValueError, "invalid axis for this array") + if axes_seen[axis] is True: + raise oefmt(space.w_ValueError, "repeated axis in transpose") + axes.append(axis) + axes_seen[axis] = True + return axes @unwrap_spec(axis1=int, axis2=int) def descr_swapaxes(self, space, axis1, axis2): diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -2960,6 +2960,36 @@ assert (a.transpose() == b).all() assert (a.transpose(None) == b).all() + def test_transpose_arg_tuple(self): + import numpy as np + a = np.arange(24).reshape(2, 3, 4) + transpose_args = a.transpose(1, 2, 0) + + transpose_test = a.transpose((1, 2, 0)) + + assert transpose_test.shape == (3, 4, 2) + assert (transpose_args == transpose_test).all() + + def test_transpose_arg_list(self): + import numpy as np + a = np.arange(24).reshape(2, 3, 4) + transpose_args = a.transpose(1, 2, 0) + + transpose_test = a.transpose([1, 2, 0]) + + assert transpose_test.shape == (3, 4, 2) + assert (transpose_args == transpose_test).all() + + def test_transpose_arg_array(self): + import numpy as np + a = np.arange(24).reshape(2, 3, 4) + transpose_args = a.transpose(1, 2, 0) + + transpose_test = a.transpose(np.array([1, 2, 0])) + + assert transpose_test.shape == (3, 4, 2) + assert (transpose_args == transpose_test).all() + def test_transpose_error(self): import numpy as np a = np.arange(24).reshape(2, 3, 4) @@ -2968,6 +2998,11 @@ raises(ValueError, a.transpose, 1, 0, 1) raises(TypeError, a.transpose, 1, 0, '2') + def test_transpose_unexpected_argument(self): + import numpy as np + a = np.array([[1, 2], [3, 4], [5, 6]]) + raises(TypeError, 'a.transpose(axes=(1,2,0))') + def test_flatiter(self): from numpy import array, flatiter, arange, zeros a = array([[10, 30], [40, 60]]) From pypy.commits at gmail.com Tue Mar 22 16:20:46 2016 From: pypy.commits at gmail.com (fijal) Date: Tue, 22 Mar 2016 13:20:46 -0700 (PDT) Subject: [pypy-commit] pypy jit-leaner-frontend: fix some part of test_resume Message-ID: <56f1a91e.c1621c0a.ee33.ffffec44@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r83279:325452ed8546 Date: 2016-03-22 22:16 +0200 http://bitbucket.org/pypy/pypy/changeset/325452ed8546/ Log: fix some part of test_resume diff --git a/rpython/jit/metainterp/test/test_resume.py b/rpython/jit/metainterp/test/test_resume.py --- a/rpython/jit/metainterp/test/test_resume.py +++ b/rpython/jit/metainterp/test/test_resume.py @@ -808,22 +808,41 @@ assert tagbits == TAGCONST assert memo.consts[index - TAG_CONST_OFFSET] is const +class Frame(object): + def __init__(self, boxes): + self.boxes = boxes + + def get_list_of_active_boxes(self, flag, new_array, encode): + a = new_array(len(self.boxes)) + for i, box in enumerate(self.boxes): + a[i] = encode(box) + return a + def test_ResumeDataLoopMemo_number(): b1, b2, b3, b4, b5 = [IntFrontendOp(0), IntFrontendOp(1), IntFrontendOp(2), RefFrontendOp(3), RefFrontendOp(4)] c1, c2, c3, c4 = [ConstInt(1), ConstInt(2), ConstInt(3), ConstInt(4)] env = [b1, c1, b2, b1, c2] - snap = Snapshot(0, env) + t = Trace([b1, b2, b3, b4, b5]) + snap = t.create_snapshot(FakeJitCode("jitcode", 0), 0, Frame(env), False) env1 = [c3, b3, b1, c1] - snap1 = TopSnapshot(snap, env1, []) + t.append(0) # descr index + snap1 = t.create_top_snapshot(FakeJitCode("jitcode", 0), 2, Frame(env1), False, + [], []) + snap1.prev = snap + env2 = [c3, b3, b1, c3] - snap2 = TopSnapshot(snap, env2, []) + env3 = [c3, b3, b1, c3] + env4 = [c3, b4, b1, c3] + env5 = [b1, b4, b5] + metainterp_sd = FakeMetaInterpStaticData() - memo = ResumeDataLoopMemo(FakeMetaInterpStaticData()) - frameinfo = FrameInfo(None, FakeJitCode("jitcode", 0), 0) + memo = ResumeDataLoopMemo(metainterp_sd) - numb, liveboxes, v = memo.number(FakeOptimizer(), snap1, frameinfo) + iter = t.get_iter(metainterp_sd) + b1, b2, b3, b4, b5 = iter.inputargs + numb, liveboxes, v = memo.number(FakeOptimizer(), 0, iter) assert v == 0 assert liveboxes == {b1: tag(0, TAGBOX), b2: tag(1, TAGBOX), @@ -831,20 +850,26 @@ base = [0, 0, tag(0, TAGBOX), tag(1, TAGINT), tag(1, TAGBOX), tag(0, TAGBOX), tag(2, TAGINT)] - assert unpack_numbering(numb) == [0, 2, tag(3, TAGINT), tag(2, TAGBOX), - tag(0, TAGBOX), tag(1, TAGINT)] + base + assert unpack_numbering(numb) == [0, 0] + base + [0, 2, tag(3, TAGINT), tag(2, TAGBOX), + tag(0, TAGBOX), tag(1, TAGINT)] + t.append(0) + snap2 = t.create_top_snapshot(FakeJitCode("jitcode", 0), 2, Frame(env2), + False, [], []) + snap2.prev = snap - numb2, liveboxes2, v = memo.number(FakeOptimizer(), snap2, frameinfo) + numb2, liveboxes2, v = memo.number(FakeOptimizer(), 1, iter) assert v == 0 assert liveboxes2 == {b1: tag(0, TAGBOX), b2: tag(1, TAGBOX), b3: tag(2, TAGBOX)} assert liveboxes2 is not liveboxes - assert unpack_numbering(numb2) == [0, 2, tag(3, TAGINT), tag(2, TAGBOX), - tag(0, TAGBOX), tag(3, TAGINT)] + base + assert unpack_numbering(numb2) == [0, 0] + base + [0, 2, tag(3, TAGINT), tag(2, TAGBOX), + tag(0, TAGBOX), tag(3, TAGINT)] - env3 = [c3, b3, b1, c3] - snap3 = TopSnapshot(snap, env3, []) + t.append(0) + snap3 = t.create_top_snapshot(FakeJitCode("jitcode", 0), 2, Frame([]), + False, [], env3) + snap3.prev = snap class FakeVirtualInfo(info.AbstractInfo): def __init__(self, virt): @@ -855,45 +880,55 @@ # renamed b3.set_forwarded(c4) - numb3, liveboxes3, v = memo.number(FakeOptimizer(), snap3, frameinfo) + numb3, liveboxes3, v = memo.number(FakeOptimizer(), 2, iter) assert v == 0 assert liveboxes3 == {b1: tag(0, TAGBOX), b2: tag(1, TAGBOX)} - assert unpack_numbering(numb3) == [0, 2, tag(3, TAGINT), tag(4, TAGINT), - tag(0, TAGBOX), tag(3, TAGINT)] + base + assert unpack_numbering(numb3) == ([0, 2, tag(3, TAGINT), tag(4, TAGINT), + tag(0, TAGBOX), tag(3, TAGINT)] + + base + [0, 2]) # virtual - env4 = [c3, b4, b1, c3] - snap4 = TopSnapshot(snap, env4, []) + t.append(0) + snap4 = t.create_top_snapshot(FakeJitCode("jitcode", 0), 2, Frame([]), + False, [], env4) + snap4.prev = snap b4.set_forwarded(FakeVirtualInfo(True)) - numb4, liveboxes4, v = memo.number(FakeOptimizer(), snap4, frameinfo) + numb4, liveboxes4, v = memo.number(FakeOptimizer(), 3, iter) assert v == 1 assert liveboxes4 == {b1: tag(0, TAGBOX), b2: tag(1, TAGBOX), b4: tag(0, TAGVIRTUAL)} assert unpack_numbering(numb4) == [0, 2, tag(3, TAGINT), tag(0, TAGVIRTUAL), - tag(0, TAGBOX), tag(3, TAGINT)] + base + tag(0, TAGBOX), tag(3, TAGINT)] + base + [0, 2] - env5 = [b1, b4, b5] - snap5 = TopSnapshot(snap4, [], env5) + t.append(0) + snap4 = t.create_snapshot(FakeJitCode("jitcode", 2), 1, Frame(env4), False) + t.append(0) + snap4.prev = snap + snap5 = t.create_top_snapshot(FakeJitCode("jitcode", 0), 0, Frame([]), False, + env5, []) + snap5.prev = snap4 b4.set_forwarded(FakeVirtualInfo(True)) b5.set_forwarded(FakeVirtualInfo(True)) - frameinfo = FrameInfo(frameinfo, FakeJitCode("foo", 2), 1) - numb5, liveboxes5, v = memo.number(FakeOptimizer(), snap5, frameinfo) + numb5, liveboxes5, v = memo.number(FakeOptimizer(), 4, iter) assert v == 2 assert liveboxes5 == {b1: tag(0, TAGBOX), b2: tag(1, TAGBOX), b4: tag(0, TAGVIRTUAL), b5: tag(1, TAGVIRTUAL)} assert unpack_numbering(numb5) == [ 3, tag(0, TAGBOX), tag(0, TAGVIRTUAL), tag(1, TAGVIRTUAL), - 0, + 0] + base + [ 2, 1, tag(3, TAGINT), tag(0, TAGVIRTUAL), tag(0, TAGBOX), tag(3, TAGINT) - ] + base + ] + [0, 0] @given(boxlists) def test_ResumeDataLoopMemo_random(lst): + t = Trace() + t.append(0) + s = t.create_top_snapshot(FakeJitCode("", 0), 1, Frame([]), lst, []) s = TopSnapshot(None, [], lst) frameinfo = FrameInfo(None, FakeJitCode("foo", 0), 0) memo = ResumeDataLoopMemo(FakeMetaInterpStaticData()) From pypy.commits at gmail.com Tue Mar 22 16:20:48 2016 From: pypy.commits at gmail.com (fijal) Date: Tue, 22 Mar 2016 13:20:48 -0700 (PDT) Subject: [pypy-commit] pypy jit-leaner-frontend: merge Message-ID: <56f1a920.c13fc20a.54b2.559d@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r83280:689143122fd4 Date: 2016-03-22 22:19 +0200 http://bitbucket.org/pypy/pypy/changeset/689143122fd4/ Log: merge diff --git a/rpython/jit/metainterp/graphpage.py b/rpython/jit/metainterp/graphpage.py --- a/rpython/jit/metainterp/graphpage.py +++ b/rpython/jit/metainterp/graphpage.py @@ -170,7 +170,8 @@ while True: op = operations[opindex] op_repr = op.repr(self.memo, graytext=True) - if op.getopnum() == rop.DEBUG_MERGE_POINT: + if (op.getopnum() == rop.DEBUG_MERGE_POINT and + self.metainterp_sd is not None): jd_sd = self.metainterp_sd.jitdrivers_sd[op.getarg(0).getint()] if jd_sd._get_printable_location_ptr: s = jd_sd.warmstate.get_location_str(op.getarglist()[3:]) diff --git a/rpython/jit/metainterp/test/test_memmgr.py b/rpython/jit/metainterp/test/test_memmgr.py --- a/rpython/jit/metainterp/test/test_memmgr.py +++ b/rpython/jit/metainterp/test/test_memmgr.py @@ -127,38 +127,65 @@ n = n - 1 return 21 def f(): - # Depending on loop_longevity, either: - # A. create the loop and the entry bridge for 'g(5)' - # B. create 8 loops (and throw them away at each iteration) - # Actually, it's 4 loops and 4 exit bridges thrown away - # every second iteration - for i in range(8): - g(5) - # create another loop and another entry bridge for 'g(7)', - # to increase the current_generation + # If loop_longevity is large enough, this creates a loop + # and an entry bridge for 'g(7)', and another for 'g(5)': + # total 4. If loop_longevity is set to 1 instead, whenever + # we create a loop for 'g(7)' we forget the loop created + # for 'g(5)' and vice-versa. We end up creating loops + # over and over again, for a total of 40 of them. for i in range(20): g(7) - # Depending on loop_longevity, either: - # A. reuse the existing loop and entry bridge for 'g(5)'. - # The entry bridge for g(5) should never grow too old. - # The loop itself gets old, but is kept alive by the - # entry bridge via contains_jumps_to. - # B. or, create another loop (and throw away the previous one) g(5) return 42 # case A res = self.meta_interp(f, [], loop_longevity=3) assert res == 42 - # we should see only the loop with preamble and the exit bridge - # for g(5) and g(7) + # we should see (1) the loop-with-preamble, (2) the exit bridge + # for g(7), and another time the same for g(5). self.check_enter_count(4) # case B, with a lower longevity res = self.meta_interp(f, [], loop_longevity=1) assert res == 42 # we should see a loop for each call to g() - self.check_enter_count(8 + 20*2) + self.check_enter_count(40) + + def test_target_loop_kept_alive_or_not_2(self): + myjitdriver = JitDriver(greens=['m'], reds=['n']) + def g(m): + n = 10 + while n > 0: + myjitdriver.can_enter_jit(n=n, m=m) + myjitdriver.jit_merge_point(n=n, m=m) + n = n - 1 + return 21 + def f(): + # If loop_longevity is large enough, this creates a loop + # and an entry bridge for 'g(7)', and another for 'g(5)': + # total 4. If loop_longevity is set to 1 instead, whenever + # we create a loop for 'g(7)', we create the entry bridge + # on the next 'g(7)', but we forget them both when we move + # on to 'g(5)', and vice-versa. We end up creating loops + # and entry bridges over and over again, for a total of 32 + # of them. + for i in range(8): + g(7); g(7) + g(5); g(5) + return 42 + + # case A + res = self.meta_interp(f, [], loop_longevity=5) + assert res == 42 + # we should see (1) the loop-with-preamble, (2) the exit bridge + # for g(7), and another time the same for g(5). + self.check_enter_count(4) + + # case B, with a lower longevity + res = self.meta_interp(f, [], loop_longevity=1) + assert res == 42 + # we should see a loop for each call to g() + self.check_enter_count(32) def test_throw_away_old_loops(self): myjitdriver = JitDriver(greens=['m'], reds=['n']) From pypy.commits at gmail.com Tue Mar 22 16:35:59 2016 From: pypy.commits at gmail.com (mattip) Date: Tue, 22 Mar 2016 13:35:59 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-ext: test, implement slot for __pow__, which is called 'ternary' but accepts two arguments as well Message-ID: <56f1acaf.03dd1c0a.c9780.ffff94de@mx.google.com> Author: mattip Branch: cpyext-ext Changeset: r83281:6bf99bc91614 Date: 2016-03-22 22:03 +0200 http://bitbucket.org/pypy/pypy/changeset/6bf99bc91614/ Log: test, implement slot for __pow__, which is called 'ternary' but accepts two arguments as well diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py --- a/pypy/module/cpyext/slotdefs.py +++ b/pypy/module/cpyext/slotdefs.py @@ -7,7 +7,7 @@ cpython_api, generic_cpy_call, PyObject, Py_ssize_t, Py_TPFLAGS_CHECKTYPES, mangle_name, pypy_decl) from pypy.module.cpyext.typeobjectdefs import ( - unaryfunc, wrapperfunc, ternaryfunc, PyTypeObjectPtr, binaryfunc, + unaryfunc, wrapperfunc, ternaryfunc, PyTypeObjectPtr, binaryfunc, ternaryfunc, getattrfunc, getattrofunc, setattrofunc, lenfunc, ssizeargfunc, inquiry, ssizessizeargfunc, ssizeobjargproc, iternextfunc, initproc, richcmpfunc, cmpfunc, hashfunc, descrgetfunc, descrsetfunc, objobjproc, objobjargproc, @@ -43,6 +43,17 @@ "expected %d arguments, got %d", n, space.len_w(w_ob)) +def check_num_argsv(space, w_ob, low, high): + from pypy.module.cpyext.tupleobject import PyTuple_CheckExact + if not PyTuple_CheckExact(space, w_ob): + raise OperationError(space.w_SystemError, + space.wrap("PyArg_UnpackTuple() argument list is not a tuple")) + if low <=space.len_w(w_ob) <= high: + return + raise oefmt(space.w_TypeError, + "expected %d-%d arguments, got %d", + low, high, space.len_w(w_ob)) + def wrap_init(space, w_self, w_args, func, w_kwargs): func_init = rffi.cast(initproc, func) res = generic_cpy_call(space, func_init, w_self, w_args, w_kwargs) @@ -85,6 +96,33 @@ Py_DecRef(space, ref) return generic_cpy_call(space, func_binary, args_w[0], w_self) +def wrap_ternaryfunc(space, w_self, w_args, func): + # The third argument is optional + func_ternary = rffi.cast(ternaryfunc, func) + check_num_argsv(space, w_args, 1, 2) + args_w = space.fixedview(w_args) + arg3 = space.w_None + if len(args_w) > 1: + arg3 = args_w[1] + return generic_cpy_call(space, func_ternary, w_self, args_w[0], arg3) + +def wrap_ternaryfunc_r(space, w_self, w_args, func): + # The third argument is optional + func_ternary = rffi.cast(ternaryfunc, func) + check_num_argsv(space, w_args, 1, 2) + args_w = space.fixedview(w_args) + ref = make_ref(space, w_self) + if (not ref.c_ob_type.c_tp_flags & Py_TPFLAGS_CHECKTYPES and + not space.is_true(space.issubtype(space.type(args_w[0]), + space.type(w_self)))): + return space.w_NotImplemented + Py_DecRef(space, ref) + arg3 = space.w_None + if len(args_w) > 1: + arg3 = args_w[1] + return generic_cpy_call(space, func_ternary, args_w[0], w_self, arg3) + + def wrap_inquirypred(space, w_self, w_args, func): func_inquiry = rffi.cast(inquiry, func) check_num_args(space, w_args, 0) diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py --- a/pypy/module/cpyext/test/test_typeobject.py +++ b/pypy/module/cpyext/test/test_typeobject.py @@ -775,7 +775,7 @@ raises(SystemError, bool, module.newInt(-1)) raises(ValueError, bool, module.newInt(-42)) - def test_binaryfunc(self): + def test_mathfunc(self): module = self.import_extension('foo', [ ("newInt", "METH_VARARGS", """ @@ -788,6 +788,7 @@ IntLike_Type.tp_as_number = &intlike_as_number; IntLike_Type.tp_flags |= Py_TPFLAGS_DEFAULT | Py_TPFLAGS_CHECKTYPES; intlike_as_number.nb_add = intlike_nb_add; + intlike_as_number.nb_power = intlike_nb_pow; if (PyType_Ready(&IntLike_Type) < 0) return NULL; intObj = PyObject_New(IntLikeObject, &IntLike_Type); if (!intObj) { @@ -814,8 +815,9 @@ intObjNoOp->ival = intval; return (PyObject *)intObjNoOp; - """)], + """)], prologue= """ + #include typedef struct { PyObject_HEAD @@ -835,6 +837,19 @@ return PyInt_FromLong(val1+val2); } + static PyObject * + intlike_nb_pow(PyObject *self, PyObject *other, PyObject * z) + { + long val2, val1 = ((IntLikeObject *)(self))->ival; + if (PyInt_Check(other)) { + long val2 = PyInt_AsLong(other); + return PyInt_FromLong(val1+val2); + } + + val2 = ((IntLikeObject *)(other))->ival; + return PyInt_FromLong((int)pow(val1,val2)); + } + PyTypeObject IntLike_Type = { PyObject_HEAD_INIT(0) /*ob_size*/ 0, @@ -863,6 +878,7 @@ assert (a + b) == 3 assert (b + c) == 5 assert (d + a) == 5 + assert pow(d,b) == 16 def test_tp_new_in_subclass_of_type(self): module = self.import_module(name='foo3') From pypy.commits at gmail.com Tue Mar 22 17:25:36 2016 From: pypy.commits at gmail.com (mattip) Date: Tue, 22 Mar 2016 14:25:36 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-ext: expose next capi functions for implementation Message-ID: <56f1b850.c856c20a.b438.6ad5@mx.google.com> Author: mattip Branch: cpyext-ext Changeset: r83282:781f8311f14e Date: 2016-03-22 23:24 +0200 http://bitbucket.org/pypy/pypy/changeset/781f8311f14e/ Log: expose next capi functions for implementation diff --git a/pypy/module/cpyext/pyfile.py b/pypy/module/cpyext/pyfile.py --- a/pypy/module/cpyext/pyfile.py +++ b/pypy/module/cpyext/pyfile.py @@ -45,6 +45,15 @@ w_mode = space.wrap(rffi.charp2str(mode)) return space.call_method(space.builtin, 'file', w_filename, w_mode) + at cpython_api([PyObject], FILEP, error=CANNOT_FAIL) +def PyFile_AsFile(space, w_p): + """Return the file object associated with p as a FILE*. + + If the caller will ever use the returned FILE* object while + the GIL is released it must also call the PyFile_IncUseCount() and + PyFile_DecUseCount() functions as appropriate.""" + raise NotImplementedError + @cpython_api([FILEP, CONST_STRING, CONST_STRING, rffi.VOIDP], PyObject) def PyFile_FromFile(space, fp, name, mode, close): """Create a new PyFileObject from the already-open standard C file diff --git a/pypy/module/cpyext/stubsactive.py b/pypy/module/cpyext/stubsactive.py --- a/pypy/module/cpyext/stubsactive.py +++ b/pypy/module/cpyext/stubsactive.py @@ -4,16 +4,6 @@ from rpython.rtyper.lltypesystem import rffi, lltype from pypy.module.cpyext.pystate import PyThreadState, PyInterpreterState - - at cpython_api([PyObject], FILEP, error=CANNOT_FAIL) -def PyFile_AsFile(space, p): - """Return the file object associated with p as a FILE*. - - If the caller will ever use the returned FILE* object while - the GIL is released it must also call the PyFile_IncUseCount() and - PyFile_DecUseCount() functions described below as appropriate.""" - raise NotImplementedError - @cpython_api([], rffi.INT_real, error=CANNOT_FAIL) def Py_MakePendingCalls(space): return 0 diff --git a/pypy/module/cpyext/test/test_pyfile.py b/pypy/module/cpyext/test/test_pyfile.py --- a/pypy/module/cpyext/test/test_pyfile.py +++ b/pypy/module/cpyext/test/test_pyfile.py @@ -60,9 +60,8 @@ w_file = api.PyFile_FromString(filename, mode) assert space.str_w(api.PyFile_Name(w_file)) == name - @pytest.mark.xfail def test_file_fromfile(self, space, api): - api.PyFile_Fromfile() + api.PyFile_FromFile() @pytest.mark.xfail def test_file_setbufsize(self, space, api): From pypy.commits at gmail.com Tue Mar 22 21:35:09 2016 From: pypy.commits at gmail.com (pjenvey) Date: Tue, 22 Mar 2016 18:35:09 -0700 (PDT) Subject: [pypy-commit] pypy stdlib-2.7.11: PyPy ctypes doesn't support endian swapping (pypy issue1213) Message-ID: <56f1f2cd.88c8c20a.575f5.05ef@mx.google.com> Author: Philip Jenvey Branch: stdlib-2.7.11 Changeset: r83283:99cde77e5894 Date: 2016-03-22 18:34 -0700 http://bitbucket.org/pypy/pypy/changeset/99cde77e5894/ Log: PyPy ctypes doesn't support endian swapping (pypy issue1213) diff --git a/lib-python/2.7/ctypes/test/test_bitfields.py b/lib-python/2.7/ctypes/test/test_bitfields.py --- a/lib-python/2.7/ctypes/test/test_bitfields.py +++ b/lib-python/2.7/ctypes/test/test_bitfields.py @@ -1,5 +1,5 @@ from ctypes import * -from ctypes.test import need_symbol +from ctypes.test import need_symbol, xfail import unittest import os @@ -279,6 +279,7 @@ x.c = 2 self.assertEqual(b.tostring(), b'\xef\xcd\xab\x21') + @xfail @need_symbol('c_uint32') def test_uint32_swap_big_endian(self): # Issue #23319 From pypy.commits at gmail.com Tue Mar 22 21:51:59 2016 From: pypy.commits at gmail.com (pjenvey) Date: Tue, 22 Mar 2016 18:51:59 -0700 (PDT) Subject: [pypy-commit] pypy stdlib-2.7.11: unwrap_spec doesn't work with GetSetProperty funcs, fix Message-ID: <56f1f6bf.a151c20a.7046d.0aeb@mx.google.com> Author: Philip Jenvey Branch: stdlib-2.7.11 Changeset: r83284:d2bb4879ed49 Date: 2016-03-22 18:51 -0700 http://bitbucket.org/pypy/pypy/changeset/d2bb4879ed49/ Log: unwrap_spec doesn't work with GetSetProperty funcs, fix diff --git a/pypy/module/pyexpat/interp_pyexpat.py b/pypy/module/pyexpat/interp_pyexpat.py --- a/pypy/module/pyexpat/interp_pyexpat.py +++ b/pypy/module/pyexpat/interp_pyexpat.py @@ -623,9 +623,8 @@ def get_namespace_prefixes(self, space): return space.wrap(self.ns_prefixes) - @unwrap_spec(value=int) - def set_namespace_prefixes(self, space, value): - self.ns_prefixes = bool(value) + def set_namespace_prefixes(self, space, w_value): + self.ns_prefixes = space.bool_w(w_value) XML_SetReturnNSTriplet(self.itself, self.ns_prefixes) # Parse methods diff --git a/pypy/module/pyexpat/test/test_parser.py b/pypy/module/pyexpat/test/test_parser.py --- a/pypy/module/pyexpat/test/test_parser.py +++ b/pypy/module/pyexpat/test/test_parser.py @@ -23,11 +23,15 @@ def test_attributes(self): import pyexpat p = pyexpat.ParserCreate() - assert p.buffer_text is False - assert p.namespace_prefixes is False - assert p.returns_unicode is True - assert p.ordered_attributes is False - assert p.specified_attributes is False + def test_setget(p, attr, default=False): + assert getattr(p, attr) is default + for x in 0, 1, 2, 0: + setattr(p, attr, x) + assert getattr(p, attr) is bool(x), attr + for attr in ('buffer_text', 'namespace_prefixes', 'ordered_attributes', + 'specified_attributes'): + test_setget(p, attr) + test_setget(p, 'returns_unicode', True) def test_version(self): import pyexpat From pypy.commits at gmail.com Tue Mar 22 22:22:22 2016 From: pypy.commits at gmail.com (rlamy) Date: Tue, 22 Mar 2016 19:22:22 -0700 (PDT) Subject: [pypy-commit] pypy follow_symlinks: hg merge rposix-for-3 Message-ID: <56f1fdde.86351c0a.dbdc2.ffffdd03@mx.google.com> Author: Ronan Lamy Branch: follow_symlinks Changeset: r83285:66663350a720 Date: 2016-03-23 00:57 +0000 http://bitbucket.org/pypy/pypy/changeset/66663350a720/ Log: hg merge rposix-for-3 diff --git a/.hgtags b/.hgtags --- a/.hgtags +++ b/.hgtags @@ -19,3 +19,4 @@ 850edf14b2c75573720f59e95767335fb1affe55 release-4.0.0 5f8302b8bf9f53056e40426f10c72151564e5b19 release-4.0.1 246c9cf22037b11dc0e8c29ce3f291d3b8c5935a release-5.0 +bbd45126bc691f669c4ebdfbd74456cd274c6b92 release-5.0.1 diff --git a/pypy/doc/release-5.0.1.rst b/pypy/doc/release-5.0.1.rst --- a/pypy/doc/release-5.0.1.rst +++ b/pypy/doc/release-5.0.1.rst @@ -9,6 +9,11 @@ .. _`lxml 3.6.0`: https://pypi.python.org/pypi/lxml/3.6.0 .. _`crash on large files`: https://bitbucket.org/pypy/pypy/issues/2260 + +The changes between PyPy 5.0 and 5.0.1 are only two bug fixes: one in +cpyext, which fixes notably (but not only) lxml; and another for a +corner case of the JIT. + What is PyPy? ============= diff --git a/pypy/module/_cffi_backend/ctypeptr.py b/pypy/module/_cffi_backend/ctypeptr.py --- a/pypy/module/_cffi_backend/ctypeptr.py +++ b/pypy/module/_cffi_backend/ctypeptr.py @@ -390,4 +390,4 @@ w_fileobj.cffi_fileobj = CffiFileObj(fd, mode) except OSError, e: raise wrap_oserror(space, e) - return rffi.cast(rffi.CCHARP, fileobj.cffi_fileobj.llf) + return rffi.cast(rffi.CCHARP, w_fileobj.cffi_fileobj.llf) diff --git a/pypy/module/cpyext/bytesobject.py b/pypy/module/cpyext/bytesobject.py --- a/pypy/module/cpyext/bytesobject.py +++ b/pypy/module/cpyext/bytesobject.py @@ -1,4 +1,4 @@ -from pypy.interpreter.error import OperationError +from pypy.interpreter.error import OperationError, oefmt from rpython.rtyper.lltypesystem import rffi, lltype from pypy.module.cpyext.api import ( cpython_api, cpython_struct, bootstrap_function, build_type_checkers, @@ -134,8 +134,8 @@ if from_ref(space, rffi.cast(PyObject, ref.c_ob_type)) is space.w_str: pass # typecheck returned "ok" without forcing 'ref' at all elif not PyBytes_Check(space, ref): # otherwise, use the alternate way - raise OperationError(space.w_TypeError, space.wrap( - "PyBytes_AsString only support strings")) + raise oefmt(space.w_TypeError, + "expected bytes, %T found", from_ref(space, ref)) ref_str = rffi.cast(PyBytesObject, ref) if not ref_str.c_buffer: # copy string buffer @@ -147,8 +147,8 @@ @cpython_api([PyObject, rffi.CCHARPP, rffi.CArrayPtr(Py_ssize_t)], rffi.INT_real, error=-1) def PyBytes_AsStringAndSize(space, ref, buffer, length): if not PyBytes_Check(space, ref): - raise OperationError(space.w_TypeError, space.wrap( - "PyBytes_AsStringAndSize only support strings")) + raise oefmt(space.w_TypeError, + "expected bytes, %T found", from_ref(space, ref)) ref_str = rffi.cast(PyBytesObject, ref) if not ref_str.c_buffer: # copy string buffer diff --git a/pypy/tool/release/repackage.sh b/pypy/tool/release/repackage.sh --- a/pypy/tool/release/repackage.sh +++ b/pypy/tool/release/repackage.sh @@ -1,9 +1,9 @@ # Edit these appropriately before running this script maj=5 min=0 -rev=0 +rev=1 branchname=release-$maj.x # ==OR== release-$maj.$min.x -tagname=release-$maj.$min # ==OR== release-$maj.$min.$rev +tagname=release-$maj.$min.$rev # This script will download latest builds from the buildmaster, rename the top # level directory, and repackage ready to be uploaded to bitbucket. It will also # download source, assuming a tag for the release already exists, and repackage them. diff --git a/rpython/rlib/rposix.py b/rpython/rlib/rposix.py --- a/rpython/rlib/rposix.py +++ b/rpython/rlib/rposix.py @@ -22,21 +22,6 @@ from rpython.rlib import rwin32 from rpython.rlib.rwin32file import make_win32_traits -class CConfig: - _compilation_info_ = ExternalCompilationInfo( - includes=['sys/stat.h', - 'unistd.h', - 'fcntl.h'], - ) - for _name in """fchdir fchmod fchmodat fchown fchownat fexecve fdopendir - fpathconf fstat fstatat fstatvfs ftruncate futimens futimes - futimesat linkat lchflags lchmod lchown lstat lutimes - mkdirat mkfifoat mknodat openat readlinkat renameat - symlinkat unlinkat utimensat""".split(): - locals()['HAVE_%s' % _name.upper()] = rffi_platform.Has(_name) -cConfig = rffi_platform.configure(CConfig) -globals().update(cConfig) - class CConstantErrno(CConstant): # these accessors are used when calling get_errno() or set_errno() @@ -1739,3 +1724,176 @@ def getcontroller(self): from rpython.rlib.rposix_environ import OsEnvironController return OsEnvironController() + + +# ____________________________________________________________ +# Support for f... and ...at families of POSIX functions + +class CConfig: + _compilation_info_ = ExternalCompilationInfo( + includes=['sys/stat.h', + 'unistd.h', + 'fcntl.h'], + ) + AT_FDCWD = rffi_platform.DefinedConstantInteger('AT_FDCWD') + AT_SYMLINK_NOFOLLOW = rffi_platform.DefinedConstantInteger('AT_SYMLINK_NOFOLLOW') + AT_EACCESS = rffi_platform.DefinedConstantInteger('AT_EACCESS') + AT_REMOVEDIR = rffi_platform.DefinedConstantInteger('AT_REMOVEDIR') + TIMESPEC = rffi_platform.Struct('struct timespec', [ + ('tv_sec', rffi.TIME_T), + ('tv_nsec', rffi.LONG)]) + + for _name in """faccessat fchdir fchmod fchmodat fchown fchownat fexecve + fdopendir fpathconf fstat fstatat fstatvfs ftruncate + futimens futimes futimesat linkat chflags lchflags lchmod lchown + lstat lutimes mkdirat mkfifoat mknodat openat readlinkat renameat + symlinkat unlinkat utimensat""".split(): + locals()['HAVE_%s' % _name.upper()] = rffi_platform.Has(_name) +cConfig = rffi_platform.configure(CConfig) +globals().update(cConfig) +TIMESPEC2P = rffi.CArrayPtr(TIMESPEC) + +if HAVE_FACCESSAT: + c_faccessat = external('faccessat', + [rffi.INT, rffi.CCHARP, rffi.INT, rffi.INT], rffi.INT) + + def faccessat(pathname, mode, dir_fd=AT_FDCWD, + effective_ids=False, follow_symlinks=True): + """Thin wrapper around faccessat(2) with an interface simlar to + Python3's os.access(). + """ + flags = 0 + if not follow_symlinks: + flags |= AT_SYMLINK_NOFOLLOW + if effective_ids: + flags |= AT_EACCESS + error = c_faccessat(dir_fd, pathname, mode, flags) + return error == 0 + +if HAVE_LINKAT: + c_linkat = external('linkat', + [rffi.INT, rffi.CCHARP, rffi.INT, rffi.CCHARP, rffi.INT], rffi.INT) + + def linkat(src, dst, src_dir_fd=AT_FDCWD, dst_dir_fd=AT_FDCWD, + follow_symlinks=True): + """Thin wrapper around linkat(2) with an interface similar to + Python3's os.link() + """ + if follow_symlinks: + flag = 0 + else: + flag = AT_SYMLINK_NOFOLLOW + error = c_linkat(src_dir_fd, src, dst_dir_fd, dst, flag) + handle_posix_error('linkat', error) + +if HAVE_FUTIMENS: + c_futimens = external('futimens', [rffi.INT, TIMESPEC2P], rffi.INT) + + def futimens(fd, atime, atime_ns, mtime, mtime_ns): + l_times = lltype.malloc(TIMESPEC, 2, flavor='raw') + rffi.setintfield(l_times[0], 'c_tv_sec', atime) + rffi.setintfield(l_times[0], 'c_tv_nsec', atime_ns) + rffi.setintfield(l_times[1], 'c_tv_sec', mtime) + rffi.setintfield(l_times[1], 'c_tv_nsec', mtime_ns) + error = c_futimens(fd, l_times) + handle_posix_error('futimens', error) + +if HAVE_UTIMENSAT: + c_utimensat = external('utimensat', [rffi.INT, TIMESPEC2P], rffi.INT) + + def utimensat(pathname, atime, atime_ns, mtime, mtime_ns, + dir_fd=AT_FDCWD, follow_symlinks=True): + l_times = lltype.malloc(TIMESPEC, 2, flavor='raw') + rffi.setintfield(l_times[0], 'c_tv_sec', atime) + rffi.setintfield(l_times[0], 'c_tv_nsec', atime_ns) + rffi.setintfield(l_times[1], 'c_tv_sec', mtime) + rffi.setintfield(l_times[1], 'c_tv_nsec', mtime_ns) + if follow_symlinks: + flag = 0 + else: + flag = AT_SYMLINK_NOFOLLOW + error = c_futimens(dir_fd, pathname, l_times, flag) + handle_posix_error('utimensat', error) + +if HAVE_MKDIRAT: + c_mkdirat = external('mkdirat', + [rffi.INT, rffi.CCHARP, rffi.INT], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) + + def mkdirat(pathname, mode, dir_fd=AT_FDCWD): + error = c_mkdirat(dir_fd, pathname, mode) + handle_posix_error('mkdirat', error) + +if HAVE_UNLINKAT: + c_unlinkat = external('unlinkat', + [rffi.INT, rffi.CCHARP, rffi.INT], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) + + def unlinkat(pathname, dir_fd=AT_FDCWD, removedir=False): + flag = AT_REMOVEDIR if removedir else 0 + error = c_unlinkat(dir_fd, pathname, flag) + handle_posix_error('unlinkat', error) + +if HAVE_READLINKAT: + c_readlinkat = external( + 'readlinkat', + [rffi.INT, rffi.CCHARP, rffi.CCHARP, rffi.SIZE_T], rffi.SSIZE_T, + save_err=rffi.RFFI_SAVE_ERRNO) + + def readlinkat(pathname, dir_fd=AT_FDCWD): + pathname = _as_bytes0(pathname) + bufsize = 1023 + while True: + buf = lltype.malloc(rffi.CCHARP.TO, bufsize, flavor='raw') + res = widen(c_readlinkat(dir_fd, pathname, buf, bufsize)) + if res < 0: + lltype.free(buf, flavor='raw') + error = get_saved_errno() # failed + raise OSError(error, "readlinkat failed") + elif res < bufsize: + break # ok + else: + # buf too small, try again with a larger buffer + lltype.free(buf, flavor='raw') + bufsize *= 4 + # convert the result to a string + result = rffi.charp2strn(buf, res) + lltype.free(buf, flavor='raw') + return result + +if HAVE_SYMLINKAT: + c_symlinkat = external('symlinkat', + [rffi.CCHARP, rffi.CCHARP, rffi.INT], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) + + def symlinkat(src, dst, dir_fd=AT_FDCWD): + error = c_symlinkat(src, dst, dir_fd) + handle_posix_error('symlinkat', error) + +if HAVE_OPENAT: + c_openat = external('openat', + [rffi.INT, rffi.CCHARP, rffi.INT, rffi.MODE_T], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) + + @enforceargs(s_Str0, int, int, int, typecheck=False) + def openat(path, flags, mode, dir_fd=AT_FDCWD): + fd = c_openat(dir_fd, path, flags, mode) + return handle_posix_error('open', fd) + +if HAVE_MKFIFOAT: + c_mkfifoat = external('mkfifoat', + [rffi.INT, rffi.CCHARP, rffi.MODE_T], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) + + def mkfifoat(path, mode, dir_fd=AT_FDCWD): + error = c_mkfifoat(dir_fd, path, mode) + handle_posix_error('mkfifoat', error) + +if HAVE_MKNODAT: + c_mknodat = external('mknodat', + [rffi.INT, rffi.CCHARP, rffi.MODE_T, rffi.INT], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) + + def mknodat(path, mode, device, dir_fd=AT_FDCWD): + error = c_mknodat(dir_fd, path, mode, device) + handle_posix_error('mknodat', error) diff --git a/rpython/rlib/rvmprof/src/vmprof_config.h b/rpython/rlib/rvmprof/src/vmprof_config.h --- a/rpython/rlib/rvmprof/src/vmprof_config.h +++ b/rpython/rlib/rvmprof/src/vmprof_config.h @@ -1,6 +1,10 @@ #define HAVE_SYS_UCONTEXT_H -#if defined(__FreeBSD__) -#define PC_FROM_UCONTEXT uc_mcontext.mc_rip +#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) + #ifdef __i386__ + #define PC_FROM_UCONTEXT uc_mcontext.mc_eip + #else + #define PC_FROM_UCONTEXT uc_mcontext.mc_rip + #endif #elif defined( __APPLE__) #if ((ULONG_MAX) == (UINT_MAX)) #define PC_FROM_UCONTEXT uc_mcontext->__ss.__eip @@ -8,10 +12,10 @@ #define PC_FROM_UCONTEXT uc_mcontext->__ss.__rip #endif #elif defined(__arm__) -#define PC_FROM_UCONTEXT uc_mcontext.arm_ip + #define PC_FROM_UCONTEXT uc_mcontext.arm_ip #elif defined(__linux) && defined(__i386) && defined(__GNUC__) -#define PC_FROM_UCONTEXT uc_mcontext.gregs[REG_EIP] + #define PC_FROM_UCONTEXT uc_mcontext.gregs[REG_EIP] #else -/* linux, gnuc */ -#define PC_FROM_UCONTEXT uc_mcontext.gregs[REG_RIP] + /* linux, gnuc */ + #define PC_FROM_UCONTEXT uc_mcontext.gregs[REG_RIP] #endif diff --git a/rpython/rlib/test/test_rposix.py b/rpython/rlib/test/test_rposix.py --- a/rpython/rlib/test/test_rposix.py +++ b/rpython/rlib/test/test_rposix.py @@ -7,6 +7,10 @@ import errno import py +def rposix_requires(funcname): + return py.test.mark.skipif(not hasattr(rposix, funcname), + reason="Requires rposix.%s()" % funcname) + class TestPosixFunction: def test_access(self): filename = str(udir.join('test_access.txt')) @@ -99,11 +103,25 @@ def test_mkdir(self): filename = str(udir.join('test_mkdir.dir')) rposix.mkdir(filename, 0) - exc = py.test.raises(OSError, rposix.mkdir, filename, 0) - assert exc.value.errno == errno.EEXIST + with py.test.raises(OSError) as excinfo: + rposix.mkdir(filename, 0) + assert excinfo.value.errno == errno.EEXIST if sys.platform == 'win32': assert exc.type is WindowsError + @rposix_requires('mkdirat') + def test_mkdirat(self): + relpath = 'test_mkdirat.dir' + filename = str(udir.join(relpath)) + dirfd = os.open(os.path.dirname(filename), os.O_RDONLY) + try: + rposix.mkdirat(relpath, 0, dir_fd=dirfd) + with py.test.raises(OSError) as excinfo: + rposix.mkdirat(relpath, 0, dir_fd=dirfd) + assert excinfo.value.errno == errno.EEXIST + finally: + os.close(dirfd) + def test_strerror(self): assert rposix.strerror(2) == os.strerror(2) @@ -448,6 +466,38 @@ def _get_filename(self): return str(udir.join('test_open_ascii')) + @rposix_requires('openat') + def test_openat(self): + def f(dirfd): + try: + fd = rposix.openat('test_open_ascii', os.O_RDONLY, 0777, dirfd) + try: + text = os.read(fd, 50) + return text + finally: + os.close(fd) + except OSError: + return '' + + dirfd = os.open(os.path.dirname(self.ufilename), os.O_RDONLY) + try: + assert ll_to_string(interpret(f, [dirfd])) == "test" + finally: + os.close(dirfd) + + @rposix_requires('unlinkat') + def test_unlinkat(self): + def f(dirfd): + return rposix.unlinkat('test_open_ascii', dir_fd=dirfd) + + dirfd = os.open(os.path.dirname(self.ufilename), os.O_RDONLY) + try: + interpret(f, [dirfd]) + finally: + os.close(dirfd) + assert not os.path.exists(self.ufilename) + + class TestPosixUnicode(BasePosixUnicodeOrAscii): def _get_filename(self): return (unicode(udir.join('test_open')) + diff --git a/rpython/translator/c/src/thread_pthread.c b/rpython/translator/c/src/thread_pthread.c --- a/rpython/translator/c/src/thread_pthread.c +++ b/rpython/translator/c/src/thread_pthread.c @@ -37,7 +37,7 @@ # define THREAD_STACK_SIZE 0 /* use default stack size */ # endif -# if (defined(__APPLE__) || defined(__FreeBSD__)) && defined(THREAD_STACK_SIZE) && THREAD_STACK_SIZE == 0 +# if (defined(__APPLE__) || defined(__FreeBSD__) || defined(__FreeBSD_kernel__)) && defined(THREAD_STACK_SIZE) && THREAD_STACK_SIZE == 0 /* The default stack size for new threads on OSX is small enough that * we'll get hard crashes instead of 'maximum recursion depth exceeded' * exceptions. @@ -84,7 +84,7 @@ if (tss != 0) pthread_attr_setstacksize(&attrs, tss); #endif -#if defined(PTHREAD_SYSTEM_SCHED_SUPPORTED) && !defined(__FreeBSD__) +#if defined(PTHREAD_SYSTEM_SCHED_SUPPORTED) && !(defined(__FreeBSD__) || defined(__FreeBSD_kernel__)) pthread_attr_setscope(&attrs, PTHREAD_SCOPE_SYSTEM); #endif From pypy.commits at gmail.com Tue Mar 22 22:22:24 2016 From: pypy.commits at gmail.com (rlamy) Date: Tue, 22 Mar 2016 19:22:24 -0700 (PDT) Subject: [pypy-commit] pypy follow_symlinks: Implement dir_fd argument for many posix.* functions Message-ID: <56f1fde0.c13fc20a.33183.0cb2@mx.google.com> Author: Ronan Lamy Branch: follow_symlinks Changeset: r83286:fbc1f8a5b79b Date: 2016-03-23 02:12 +0000 http://bitbucket.org/pypy/pypy/changeset/fbc1f8a5b79b/ Log: Implement dir_fd argument for many posix.* functions diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -105,7 +105,10 @@ return func(fname1, fname2, *args) return dispatch -DEFAULT_DIR_FD = -100 +if hasattr(rposix, 'AT_FDCWD'): + DEFAULT_DIR_FD = rposix.AT_FDCWD +else: + DEFAULT_DIR_FD = -100 DIR_FD_AVAILABLE = False def _unwrap_fd(space, w_value): @@ -128,8 +131,8 @@ return dir_fd - at unwrap_spec(flag=c_int, mode=c_int, dir_fd=DirFD) -def open(space, w_fname, flag, mode=0777, dir_fd=DEFAULT_DIR_FD): + at unwrap_spec(flags=c_int, mode=c_int, dir_fd=DirFD) +def open(space, w_path, flags, mode=0777, dir_fd=DEFAULT_DIR_FD): """open(path, flags, mode=0o777, *, dir_fd=None) Open a file for low level IO. Returns a file handle (integer). @@ -139,10 +142,13 @@ dir_fd may not be implemented on your platform. If it is unavailable, using it will raise a NotImplementedError.""" try: - fd = dispatch_filename(rposix.open)( - space, w_fname, flag, mode) - except OSError, e: - raise wrap_oserror2(space, e, w_fname) + if dir_fd == DEFAULT_DIR_FD: + fd = dispatch_filename(rposix.open)(space, w_path, flags, mode) + else: + path = space.fsencode_w(w_path) + fd = rposix.openat(path, flags, mode, dir_fd) + except OSError as e: + raise wrap_oserror2(space, e, w_path) return space.wrap(fd) @unwrap_spec(fd=c_int, pos=r_longlong, how=c_int) @@ -504,8 +510,12 @@ dir_fd may not be implemented on your platform. If it is unavailable, using it will raise a NotImplementedError.""" try: - dispatch_filename(rposix.unlink)(space, w_path) - except OSError, e: + if dir_fd == DEFAULT_DIR_FD: + dispatch_filename(rposix.unlink)(space, w_path) + else: + path = space.fsencode_w(w_path) + rposix.unlinkat(path, dir_fd, removedir=False) + except OSError as e: raise wrap_oserror2(space, e, w_path) @unwrap_spec(dir_fd=DirFD) @@ -519,8 +529,12 @@ dir_fd may not be implemented on your platform. If it is unavailable, using it will raise a NotImplementedError.""" try: - dispatch_filename(rposix.unlink)(space, w_path) - except OSError, e: + if dir_fd == DEFAULT_DIR_FD: + dispatch_filename(rposix.unlink)(space, w_path) + else: + path = space.fsencode_w(w_path) + rposix.unlinkat(path, dir_fd, removedir=False) + except OSError as e: raise wrap_oserror2(space, e, w_path) def _getfullpathname(space, w_path): @@ -582,8 +596,12 @@ The mode argument is ignored on Windows.""" try: - dispatch_filename(rposix.mkdir)(space, w_path, mode) - except OSError, e: + if dir_fd == DEFAULT_DIR_FD: + dispatch_filename(rposix.mkdir)(space, w_path, mode) + else: + path = space.fsencode_w(w_path) + rposix.mkdirat(path, mode, dir_fd) + except OSError as e: raise wrap_oserror2(space, e, w_path) @unwrap_spec(dir_fd=DirFD) @@ -597,8 +615,12 @@ dir_fd may not be implemented on your platform. If it is unavailable, using it will raise a NotImplementedError.""" try: - dispatch_filename(rposix.rmdir)(space, w_path) - except OSError, e: + if dir_fd == DEFAULT_DIR_FD: + dispatch_filename(rposix.rmdir)(space, w_path) + else: + path = space.fsencode_w(w_path) + rposix.unlinkat(path, dir_fd, removedir=True) + except OSError as e: raise wrap_oserror2(space, e, w_path) @unwrap_spec(errno=c_int) @@ -797,7 +819,7 @@ raise wrap_oserror(space, e) @unwrap_spec(mode=c_int, dir_fd=DirFD) -def mkfifo(space, w_filename, mode=0666, dir_fd=DEFAULT_DIR_FD): +def mkfifo(space, w_path, mode=0666, dir_fd=DEFAULT_DIR_FD): """mkfifo(path, mode=0o666, *, dir_fd=None) Create a FIFO (a POSIX named pipe). @@ -807,9 +829,13 @@ dir_fd may not be implemented on your platform. If it is unavailable, using it will raise a NotImplementedError.""" try: - dispatch_filename(rposix.mkfifo)(space, w_filename, mode) - except OSError, e: - raise wrap_oserror2(space, e, w_filename) + if dir_fd == DEFAULT_DIR_FD: + dispatch_filename(rposix.mkfifo)(space, w_path, mode) + else: + path = space.fsencode_w(w_path) + rposix.mkfifoat(path, mode, dir_fd) + except OSError as e: + raise wrap_oserror2(space, e, w_path) @unwrap_spec(mode=c_int, device=c_int, dir_fd=DirFD) def mknod(space, w_filename, mode=0600, device=0, dir_fd=DEFAULT_DIR_FD): @@ -827,8 +853,12 @@ dir_fd may not be implemented on your platform. If it is unavailable, using it will raise a NotImplementedError.""" try: - dispatch_filename(rposix.mknod)(space, w_filename, mode, device) - except OSError, e: + if dir_fd == DEFAULT_DIR_FD: + dispatch_filename(rposix.mknod)(space, w_filename, mode, device) + else: + fname = space.fsencode_w(w_filename) + rposix.mknodat(fname, mode, device, dir_fd) + except OSError as e: raise wrap_oserror2(space, e, w_filename) @unwrap_spec(mask=c_int) @@ -911,8 +941,13 @@ dir_fd may not be implemented on your platform. If it is unavailable, using it will raise a NotImplementedError.""" try: - dispatch_filename_2(rposix.symlink)(space, w_src, w_dst) - except OSError, e: + if dir_fd == DEFAULT_DIR_FD: + dispatch_filename_2(rposix.symlink)(space, w_src, w_dst) + else: + src = space.fsencode_w(w_src) + dst = space.fsencode_w(w_dst) + rposix.symlinkat(src, dst, dir_fd) + except OSError as e: raise wrap_oserror(space, e) @@ -932,7 +967,10 @@ else: path = space.bytes0_w(w_path) try: - result = os.readlink(path) + if dir_fd == DEFAULT_DIR_FD: + result = rposix.readlink(path) + else: + result = rposix.readlinkat(path, dir_fd) except OSError, e: raise wrap_oserror2(space, e, w_path) w_result = space.wrapbytes(result) From pypy.commits at gmail.com Tue Mar 22 22:43:00 2016 From: pypy.commits at gmail.com (pjenvey) Date: Tue, 22 Mar 2016 19:43:00 -0700 (PDT) Subject: [pypy-commit] pypy stdlib-2.7.11: PyPy doesn't crash here which is good enough Message-ID: <56f202b4.e853c20a.29f98.1030@mx.google.com> Author: Philip Jenvey Branch: stdlib-2.7.11 Changeset: r83287:1a5942445daa Date: 2016-03-22 19:42 -0700 http://bitbucket.org/pypy/pypy/changeset/1a5942445daa/ Log: PyPy doesn't crash here which is good enough diff --git a/lib-python/2.7/test/test_compile.py b/lib-python/2.7/test/test_compile.py --- a/lib-python/2.7/test/test_compile.py +++ b/lib-python/2.7/test/test_compile.py @@ -568,7 +568,13 @@ fn = os.path.join(tmpd, "bad.py") with open(fn, "wb") as fp: fp.write(src) - rc, out, err = script_helper.assert_python_failure(fn) + try: + rc, out, err = script_helper.assert_python_failure(fn) + except AssertionError: + if check_impl_detail(pypy=True): + # as long as we don't crash + return + raise finally: test_support.rmtree(tmpd) self.assertIn(b"Non-ASCII", err) From pypy.commits at gmail.com Wed Mar 23 00:57:01 2016 From: pypy.commits at gmail.com (rlamy) Date: Tue, 22 Mar 2016 21:57:01 -0700 (PDT) Subject: [pypy-commit] pypy follow_symlinks: Actually enable dir_fd support in the functions where it's implemented Message-ID: <56f2221d.05de1c0a.8ace4.fffff37a@mx.google.com> Author: Ronan Lamy Branch: follow_symlinks Changeset: r83288:2c9c63383ddd Date: 2016-03-23 04:46 +0000 http://bitbucket.org/pypy/pypy/changeset/2c9c63383ddd/ Log: Actually enable dir_fd support in the functions where it's implemented diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -105,6 +105,7 @@ return func(fname1, fname2, *args) return dispatch + if hasattr(rposix, 'AT_FDCWD'): DEFAULT_DIR_FD = rposix.AT_FDCWD else: @@ -117,21 +118,25 @@ else: return space.c_int_w(w_value) +class _DirFD(Unwrapper): + def unwrap(self, space, w_value): + return _unwrap_fd(space, w_value) -class DirFD(Unwrapper): +class _DirFD_Unavailable(Unwrapper): def unwrap(self, space, w_value): dir_fd = _unwrap_fd(space, w_value) if dir_fd == DEFAULT_DIR_FD: return dir_fd - elif not DIR_FD_AVAILABLE: + else: raise oefmt( space.w_NotImplementedError, "dir_fd unavailable on this platform") - else: - return dir_fd +def DirFD(available=False): + return _DirFD if available else _DirFD_Unavailable - at unwrap_spec(flags=c_int, mode=c_int, dir_fd=DirFD) + + at unwrap_spec(flags=c_int, mode=c_int, dir_fd=DirFD(rposix.HAVE_OPENAT)) def open(space, w_path, flags, mode=0777, dir_fd=DEFAULT_DIR_FD): """open(path, flags, mode=0o777, *, dir_fd=None) @@ -334,7 +339,7 @@ else: return build_stat_result(space, st) - at unwrap_spec(dir_fd=DirFD, follow_symlinks=kwonly(bool)) + at unwrap_spec(dir_fd=DirFD(available=False), follow_symlinks=kwonly(bool)) def stat(space, w_path, dir_fd=DEFAULT_DIR_FD, follow_symlinks=True): """stat(path, *, dir_fd=None, follow_symlinks=True) -> stat result @@ -359,7 +364,7 @@ else: return build_stat_result(space, st) - at unwrap_spec(dir_fd=DirFD) + at unwrap_spec(dir_fd=DirFD(available=False)) def lstat(space, w_path, dir_fd=DEFAULT_DIR_FD): """lstat(path, *, dir_fd=None) -> stat result @@ -439,7 +444,7 @@ raise wrap_oserror(space, e) @unwrap_spec(mode=c_int, - dir_fd=DirFD, effective_ids=kwonly(bool), follow_symlinks=kwonly(bool)) + dir_fd=DirFD(available=False), effective_ids=kwonly(bool), follow_symlinks=kwonly(bool)) def access(space, w_path, mode, dir_fd=DEFAULT_DIR_FD, effective_ids=True, follow_symlinks=True): """\ @@ -499,7 +504,7 @@ else: return space.wrap(rc) - at unwrap_spec(dir_fd=DirFD) + at unwrap_spec(dir_fd=DirFD(rposix.HAVE_UNLINKAT)) def unlink(space, w_path, dir_fd=DEFAULT_DIR_FD): """unlink(path, *, dir_fd=None) @@ -518,7 +523,7 @@ except OSError as e: raise wrap_oserror2(space, e, w_path) - at unwrap_spec(dir_fd=DirFD) + at unwrap_spec(dir_fd=DirFD(rposix.HAVE_UNLINKAT)) def remove(space, w_path, dir_fd=DEFAULT_DIR_FD): """remove(path, *, dir_fd=None) @@ -583,7 +588,7 @@ except OSError, e: raise wrap_oserror2(space, e, w_path) - at unwrap_spec(mode=c_int, dir_fd=DirFD) + at unwrap_spec(mode=c_int, dir_fd=DirFD(rposix.HAVE_MKDIRAT)) def mkdir(space, w_path, mode=0o777, dir_fd=DEFAULT_DIR_FD): """mkdir(path, mode=0o777, *, dir_fd=None) @@ -604,7 +609,7 @@ except OSError as e: raise wrap_oserror2(space, e, w_path) - at unwrap_spec(dir_fd=DirFD) + at unwrap_spec(dir_fd=DirFD(rposix.HAVE_UNLINKAT)) def rmdir(space, w_path, dir_fd=DEFAULT_DIR_FD): """rmdir(path, *, dir_fd=None) @@ -751,7 +756,7 @@ raise wrap_oserror(space, e) return space.newtuple([space.wrap(fd1), space.wrap(fd2)]) - at unwrap_spec(mode=c_int, dir_fd=DirFD, follow_symlinks=kwonly(bool)) + at unwrap_spec(mode=c_int, dir_fd=DirFD(available=False), follow_symlinks=kwonly(bool)) def chmod(space, w_path, mode, dir_fd=DEFAULT_DIR_FD, follow_symlinks=True): """chmod(path, mode, *, dir_fd=None, follow_symlinks=True) @@ -784,7 +789,7 @@ except OSError, e: raise wrap_oserror(space, e) - at unwrap_spec(src_dir_fd=DirFD, dst_dir_fd=DirFD) + at unwrap_spec(src_dir_fd=DirFD(available=False), dst_dir_fd=DirFD(available=False)) def rename(space, w_old, w_new, src_dir_fd=DEFAULT_DIR_FD, dst_dir_fd=DEFAULT_DIR_FD): """rename(src, dst, *, src_dir_fd=None, dst_dir_fd=None) @@ -801,7 +806,7 @@ except OSError, e: raise wrap_oserror(space, e) - at unwrap_spec(src_dir_fd=DirFD, dst_dir_fd=DirFD) + at unwrap_spec(src_dir_fd=DirFD(available=False), dst_dir_fd=DirFD(available=False)) def replace(space, w_old, w_new, src_dir_fd=DEFAULT_DIR_FD, dst_dir_fd=DEFAULT_DIR_FD): """replace(src, dst, *, src_dir_fd=None, dst_dir_fd=None) @@ -818,7 +823,7 @@ except OSError, e: raise wrap_oserror(space, e) - at unwrap_spec(mode=c_int, dir_fd=DirFD) + at unwrap_spec(mode=c_int, dir_fd=DirFD(rposix.HAVE_MKFIFOAT)) def mkfifo(space, w_path, mode=0666, dir_fd=DEFAULT_DIR_FD): """mkfifo(path, mode=0o666, *, dir_fd=None) @@ -837,7 +842,7 @@ except OSError as e: raise wrap_oserror2(space, e, w_path) - at unwrap_spec(mode=c_int, device=c_int, dir_fd=DirFD) + at unwrap_spec(mode=c_int, device=c_int, dir_fd=DirFD(rposix.HAVE_MKNODAT)) def mknod(space, w_filename, mode=0600, device=0, dir_fd=DEFAULT_DIR_FD): """mknod(filename, mode=0o600, device=0, *, dir_fd=None) @@ -899,7 +904,8 @@ @unwrap_spec( src='fsencode', dst='fsencode', - src_dir_fd=DirFD, dst_dir_fd=DirFD, follow_symlinks=kwonly(bool)) + src_dir_fd=DirFD(available=False), dst_dir_fd=DirFD(available=False), + follow_symlinks=kwonly(bool)) def link( space, src, dst, src_dir_fd=DEFAULT_DIR_FD, dst_dir_fd=DEFAULT_DIR_FD, @@ -924,7 +930,7 @@ raise wrap_oserror(space, e) - at unwrap_spec(dir_fd=DirFD) + at unwrap_spec(dir_fd=DirFD(rposix.HAVE_SYMLINKAT)) def symlink(space, w_src, w_dst, w_target_is_directory=None, dir_fd=DEFAULT_DIR_FD): """symlink(src, dst, target_is_directory=False, *, dir_fd=None) @@ -951,7 +957,7 @@ raise wrap_oserror(space, e) - at unwrap_spec(dir_fd=DirFD) + at unwrap_spec(dir_fd=DirFD(rposix.HAVE_READLINKAT)) def readlink(space, w_path, dir_fd=DEFAULT_DIR_FD): """readlink(path, *, dir_fd=None) -> path @@ -1131,7 +1137,7 @@ raise wrap_oserror(space, e) return space.wrap(ret) - at unwrap_spec(dir_fd=DirFD, follow_symlinks=kwonly(bool)) + at unwrap_spec(dir_fd=DirFD(available=False), follow_symlinks=kwonly(bool)) def utime(space, w_path, w_tuple, dir_fd=DEFAULT_DIR_FD, follow_symlinks=True): """utime(path, times=None, *, ns=None, dir_fd=None, follow_symlinks=True) @@ -1572,7 +1578,7 @@ @unwrap_spec( path='fsencode', uid=c_uid_t, gid=c_gid_t, - dir_fd=DirFD, follow_symlinks=kwonly(bool)) + dir_fd=DirFD(available=False), follow_symlinks=kwonly(bool)) def chown(space, path, uid, gid, dir_fd=DEFAULT_DIR_FD, follow_symlinks=True): """chown(path, uid, gid, *, dir_fd=None, follow_symlinks=True) From pypy.commits at gmail.com Wed Mar 23 04:44:40 2016 From: pypy.commits at gmail.com (fijal) Date: Wed, 23 Mar 2016 01:44:40 -0700 (PDT) Subject: [pypy-commit] pypy jit-leaner-frontend: finish whacking at test_resume Message-ID: <56f25778.c1621c0a.65c64.ffff8a7e@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r83289:231f68c404dc Date: 2016-03-23 10:43 +0200 http://bitbucket.org/pypy/pypy/changeset/231f68c404dc/ Log: finish whacking at test_resume diff --git a/rpython/jit/metainterp/opencoder.py b/rpython/jit/metainterp/opencoder.py --- a/rpython/jit/metainterp/opencoder.py +++ b/rpython/jit/metainterp/opencoder.py @@ -1,10 +1,9 @@ """ Storage format: for each operation (inputargs numbered with negative numbers) - [size-if-unknown-arity] [ ...] [descr] [potential snapshot] -snapshot is as follows - - [ ...] + [size-if-unknown-arity] [ ...] [descr-or-snapshot-index] + +Snapshot index for guards points to snapshot stored in _snapshots of trace """ from rpython.jit.metainterp.history import ConstInt, Const, ConstFloat, ConstPtr @@ -24,6 +23,9 @@ MIN_SHORT = -2**15 + 1 MAX_SHORT = 2**15 - 1 +class TagOverflow(Exception): + pass + class BaseTrace(object): pass @@ -76,6 +78,8 @@ self.metainterp_sd = metainterp_sd self._cache = [None] * trace._count if force_inputargs is not None: + # the trace here is cut and we're working from + # inputargs that are in the middle, shuffle stuff around a bit self.inputargs = [rop.inputarg_from_tp(arg.type) for arg in force_inputargs] for i, arg in enumerate(force_inputargs): @@ -251,7 +255,8 @@ if self._pos >= len(self._ops): # grow by 2X self._ops = self._ops + [rffi.cast(rffi.SHORT, -15)] * len(self._ops) - assert MIN_SHORT < v < MAX_SHORT + if not MIN_SHORT < v < MAX_SHORT: + raise TagOverflow self._ops[self._pos] = rffi.cast(rffi.SHORT, v) self._pos += 1 @@ -305,14 +310,10 @@ self._bigints.append(box.getint()) return tag(TAGCONSTOTHER, v) elif isinstance(box, ConstFloat): + # don't intern float constants self._consts_float += 1 - v = self._floats_dict.get(box.getfloat(), -1) - if v == -1: - v = (len(self._floats) << 1) | 1 - # XXX the next line is bogus, can't use a float as - # dict key. Must convert it first to a longlong - self._floats_dict[box.getfloat()] = v - self._floats.append(box.getfloat()) + v = (len(self._floats) << 1) | 1 + self._floats.append(box.getfloat()) return tag(TAGCONSTOTHER, v) else: self._consts_ptr += 1 @@ -343,6 +344,8 @@ for box in argboxes: self.append(self._encode(box)) if opwithdescr[opnum]: + # note that for guards we always store 0 which is later + # patched during capture_resumedata if descr is None: self.append(0) else: diff --git a/rpython/jit/metainterp/test/test_resume.py b/rpython/jit/metainterp/test/test_resume.py --- a/rpython/jit/metainterp/test/test_resume.py +++ b/rpython/jit/metainterp/test/test_resume.py @@ -23,12 +23,12 @@ from rpython.jit.metainterp import executor from rpython.jit.codewriter import heaptracker, longlong from rpython.jit.metainterp.resoperation import ResOperation, rop -from rpython.jit.metainterp.test.strategies import boxlists from rpython.rlib.debug import debug_start, debug_stop, debug_print,\ have_debug_prints +from rpython.jit.metainterp.test.strategies import intconsts from rpython.jit.metainterp import resumecode -from hypothesis import given +from hypothesis import given, strategies class Storage: rd_frame_info_list = None @@ -40,6 +40,9 @@ class FakeOptimizer(object): + def __init__(self, trace=None): + self.trace = trace + def get_box_replacement(self, op): while (op.get_forwarded() is not None and not isinstance(op.get_forwarded(), info.AbstractInfo)): @@ -184,7 +187,10 @@ def execute_and_record(self, opnum, descr, *argboxes): resvalue = executor.execute(self.cpu, None, opnum, descr, *argboxes) - op = ResOperation(opnum, list(argboxes), descr=descr) + if isinstance(resvalue, int): + op = IntFrontendOp(0) + else: + op = RefFrontendOp(0) setvalue(op, resvalue) self.trace.append((opnum, list(argboxes), resvalue, descr)) return op @@ -924,22 +930,27 @@ 2, 1, tag(3, TAGINT), tag(0, TAGVIRTUAL), tag(0, TAGBOX), tag(3, TAGINT) ] + [0, 0] - at given(boxlists) + at given(strategies.lists(strategies.builds(IntFrontendOp, strategies.just(0)) | intconsts, + min_size=1)) def test_ResumeDataLoopMemo_random(lst): - t = Trace() + inpargs = [box for box in lst if not isinstance(box, Const)] + t = Trace(inpargs) t.append(0) - s = t.create_top_snapshot(FakeJitCode("", 0), 1, Frame([]), lst, []) - s = TopSnapshot(None, [], lst) - frameinfo = FrameInfo(None, FakeJitCode("foo", 0), 0) - memo = ResumeDataLoopMemo(FakeMetaInterpStaticData()) - num, liveboxes, v = memo.number(FakeOptimizer(), s, frameinfo) + metainterp_sd = FakeMetaInterpStaticData() + i = t.get_iter(metainterp_sd) + t.create_top_snapshot(FakeJitCode("", 0), 0, Frame(lst), False, [], []) + memo = ResumeDataLoopMemo(metainterp_sd) + num, liveboxes, v = memo.number(FakeOptimizer(), 0, i) l = unpack_numbering(num) - assert l[-1] == 0 - assert l[0] == len(lst) + assert l[0] == 0 + assert l[1] == 0 + assert l[2] == 0 + assert l[3] == 0 + mapping = dict(zip(inpargs, i.inputargs)) for i, item in enumerate(lst): - v, tag = untag(l[i + 1]) + v, tag = untag(l[i + 4]) if tag == TAGBOX: - assert l[i + 1] == liveboxes[item] + assert l[i + 4] == liveboxes[mapping[item]] elif tag == TAGCONST: assert memo.consts[v].getint() == item.getint() elif tag == TAGINT: @@ -947,7 +958,7 @@ def test_ResumeDataLoopMemo_number_boxes(): memo = ResumeDataLoopMemo(FakeMetaInterpStaticData()) - b1, b2 = [InputArgInt(), InputArgInt()] + b1, b2 = [IntFrontendOp(0), IntFrontendOp(0)] assert memo.num_cached_boxes() == 0 boxes = [] num = memo.assign_number_to_box(b1, boxes) @@ -976,7 +987,7 @@ def test_ResumeDataLoopMemo_number_virtuals(): memo = ResumeDataLoopMemo(FakeMetaInterpStaticData()) - b1, b2 = [InputArgInt(), InputArgInt()] + b1, b2 = [IntFrontendOp(0), IntFrontendOp(0)] assert memo.num_cached_virtuals() == 0 num = memo.assign_number_to_virtual(b1) assert num == -1 @@ -996,9 +1007,9 @@ assert memo.num_cached_virtuals() == 0 def test_register_virtual_fields(): - b1, b2 = InputArgInt(), InputArgInt() - vbox = InputArgRef() - modifier = ResumeDataVirtualAdder(FakeOptimizer(), None, None, None) + b1, b2 = IntFrontendOp(0), IntFrontendOp(1) + vbox = RefFrontendOp(2) + modifier = ResumeDataVirtualAdder(FakeOptimizer(), None, None, None, None) modifier.liveboxes_from_env = {} modifier.liveboxes = {} modifier.vfieldboxes = {} @@ -1007,7 +1018,7 @@ b2: UNASSIGNED} assert modifier.vfieldboxes == {vbox: [b1, b2]} - modifier = ResumeDataVirtualAdder(FakeOptimizer(), None, None, None) + modifier = ResumeDataVirtualAdder(FakeOptimizer(), None, None, None, None) modifier.liveboxes_from_env = {vbox: tag(0, TAGVIRTUAL)} modifier.liveboxes = {} modifier.vfieldboxes = {} @@ -1025,48 +1036,55 @@ return newboxes def make_storage(b1, b2, b3): + t = Trace([box for box in [b1, b2, b3] if not isinstance(box, Const)]) + t.append(0) storage = Storage() - snapshot = Snapshot(None, [b1, ConstInt(1), b1, b2]) - snapshot = Snapshot(snapshot, [ConstInt(2), ConstInt(3)]) - snapshot = Snapshot(snapshot, [b1, b2, b3]) - top_snapshot = TopSnapshot(snapshot, [], []) - frameinfo = FrameInfo(FrameInfo(FrameInfo(None, FakeJitCode("code1", 21), 22), - FakeJitCode("code2", 31), 32), FakeJitCode("code3", 41), 42) - storage.rd_snapshot = top_snapshot - storage.rd_frame_info_list = frameinfo - return storage + snap1 = t.create_snapshot(FakeJitCode("code3", 41), 42, + Frame([b1, ConstInt(1), b1, b2]), False) + snap2 = t.create_snapshot(FakeJitCode("code2", 31), 32, + Frame([ConstInt(2), ConstInt(3)]), False) + snap3 = t.create_top_snapshot(FakeJitCode("code1", 21), 22, + Frame([b1, b2, b3]), False, [], []) + snap3.prev = snap2 + snap2.prev = snap1 + storage.rd_resume_position = 0 + return storage, t def test_virtual_adder_int_constants(): b1s, b2s, b3s = [ConstInt(sys.maxint), ConstInt(2**16), ConstInt(-65)] - storage = make_storage(b1s, b2s, b3s) - memo = ResumeDataLoopMemo(FakeMetaInterpStaticData()) - modifier = ResumeDataVirtualAdder(FakeOptimizer(), storage, storage, memo) - liveboxes = modifier.finish(FakeOptimizer()) - assert storage.rd_snapshot is None + storage, t = make_storage(b1s, b2s, b3s) + metainterp_sd = FakeMetaInterpStaticData() + memo = ResumeDataLoopMemo(metainterp_sd) + i = t.get_iter(metainterp_sd) + modifier = ResumeDataVirtualAdder(FakeOptimizer(i), storage, storage, i, memo) + liveboxes = modifier.finish(FakeOptimizer(i)) cpu = MyCPU([]) reader = ResumeDataDirectReader(MyMetaInterp(cpu), storage, "deadframe") reader.consume_vref_and_vable(None, None, None) reader.cur_index += 2 # framestack - _next_section(reader, sys.maxint, 2**16, -65) + _next_section(reader, sys.maxint, 1, sys.maxint, 2**16) reader.cur_index += 2 # framestack _next_section(reader, 2, 3) reader.cur_index += 2 # framestack - _next_section(reader, sys.maxint, 1, sys.maxint, 2**16) + _next_section(reader, sys.maxint, 2**16, -65) def test_virtual_adder_memo_const_sharing(): b1s, b2s, b3s = [ConstInt(sys.maxint), ConstInt(2**16), ConstInt(-65)] - storage = make_storage(b1s, b2s, b3s) - memo = ResumeDataLoopMemo(FakeMetaInterpStaticData()) - modifier = ResumeDataVirtualAdder(FakeOptimizer(), storage, storage, memo) - modifier.finish(FakeOptimizer()) + storage, t = make_storage(b1s, b2s, b3s) + metainterp_sd = FakeMetaInterpStaticData() + memo = ResumeDataLoopMemo(metainterp_sd) + i = t.get_iter(metainterp_sd) + modifier = ResumeDataVirtualAdder(FakeOptimizer(i), storage, storage, i, memo) + modifier.finish(FakeOptimizer(i)) assert len(memo.consts) == 2 assert storage.rd_consts is memo.consts b1s, b2s, b3s = [ConstInt(sys.maxint), ConstInt(2**17), ConstInt(-65)] - storage2 = make_storage(b1s, b2s, b3s) - modifier2 = ResumeDataVirtualAdder(FakeOptimizer(), storage2, storage2, - memo) - modifier2.finish(FakeOptimizer()) + storage2, t = make_storage(b1s, b2s, b3s) + i = t.get_iter(metainterp_sd) + modifier2 = ResumeDataVirtualAdder(FakeOptimizer(i), storage2, storage2, + i, memo) + modifier2.finish(FakeOptimizer(i)) assert len(memo.consts) == 3 assert storage2.rd_consts is memo.consts @@ -1173,12 +1191,12 @@ def test_virtual_adder_make_virtual(): - b2s, b3s, b4s, b5s = [InputArgRef(), InputArgInt(3), InputArgRef(), - InputArgRef()] + b2s, b3s, b4s, b5s = [IntFrontendOp(0), IntFrontendOp(0), RefFrontendOp(0), + RefFrontendOp(0)] c1s = ConstInt(111) storage = Storage() memo = ResumeDataLoopMemo(FakeMetaInterpStaticData()) - modifier = ResumeDataVirtualAdder(FakeOptimizer(), storage, storage, memo) + modifier = ResumeDataVirtualAdder(FakeOptimizer(), storage, storage, None, memo) modifier.liveboxes_from_env = {} modifier.liveboxes = {} modifier.vfieldboxes = {} @@ -1204,7 +1222,9 @@ storage.rd_consts = memo.consts[:] storage.rd_numb = None # resume - b3t, b5t = [InputArgInt(33), InputArgRef(demo55o)] + b3t, b5t = [IntFrontendOp(0), RefFrontendOp(0)] + b5t.setref_base(demo55o) + b3t.setint(33) newboxes = _resume_remap(liveboxes, [#b2s -- virtual b3s, #b4s -- virtual @@ -1253,11 +1273,12 @@ del Const.__eq__ def test_virtual_adder_make_varray(): - b2s, b4s = [InputArgRef(), InputArgInt(4)] + b2s, b4s = [IntFrontendOp(0), IntFrontendOp(0)] + b4s.setint(4) c1s = ConstInt(111) storage = Storage() memo = ResumeDataLoopMemo(FakeMetaInterpStaticData()) - modifier = ResumeDataVirtualAdder(FakeOptimizer(), storage, storage, memo) + modifier = ResumeDataVirtualAdder(FakeOptimizer(), storage, storage, None, memo) modifier.liveboxes_from_env = {} modifier.liveboxes = {} modifier.vfieldboxes = {} @@ -1272,7 +1293,10 @@ storage.rd_consts = memo.consts[:] storage.rd_numb = None # resume - b1t, b3t, b4t = [InputArgInt(11), InputArgInt(33), InputArgInt(44)] + b1t, b3t, b4t = [IntFrontendOp(0), IntFrontendOp(0), IntFrontendOp(0)] + b1t.setint(11) + b3t.setint(33) + b4t.setint(44) newboxes = _resume_remap(liveboxes, [#b2s -- virtual b4s], b4t) @@ -1302,11 +1326,11 @@ def test_virtual_adder_make_vstruct(): - b2s, b4s = [InputArgRef(), InputArgRef()] + b2s, b4s = [RefFrontendOp(0), RefFrontendOp(0)] c1s = ConstInt(111) storage = Storage() memo = ResumeDataLoopMemo(FakeMetaInterpStaticData()) - modifier = ResumeDataVirtualAdder(FakeOptimizer(), storage, storage, memo) + modifier = ResumeDataVirtualAdder(FakeOptimizer(), storage, storage, None, memo) modifier.liveboxes_from_env = {} modifier.liveboxes = {} modifier.vfieldboxes = {} @@ -1321,7 +1345,7 @@ dump_storage(storage, liveboxes) storage.rd_consts = memo.consts[:] storage.rd_numb = None - b4t = InputArgRef() + b4t = RefFrontendOp(0) newboxes = _resume_remap(liveboxes, [#b2s -- virtual b4s], b4t) # @@ -1349,10 +1373,10 @@ def test_virtual_adder_pending_fields(): - b2s, b4s = [InputArgRef(), InputArgRef()] + b2s, b4s = [RefFrontendOp(0), RefFrontendOp(0)] storage = Storage() memo = ResumeDataLoopMemo(FakeMetaInterpStaticData()) - modifier = ResumeDataVirtualAdder(None, storage, storage, memo) + modifier = ResumeDataVirtualAdder(None, storage, storage, None, memo) modifier.liveboxes_from_env = {} modifier.liveboxes = {} modifier.vfieldboxes = {} @@ -1369,8 +1393,10 @@ storage.rd_numb = None # resume demo55.next = lltype.nullptr(LLtypeMixin.NODE) - b2t = InputArgRef(demo55o) - b4t = InputArgRef(demo66o) + b2t = RefFrontendOp(0) + b2t.setref_base(demo55o) + b4t = RefFrontendOp(0) + b4t.setref_base(demo66o) newboxes = _resume_remap(liveboxes, [b2s, b4s], b2t, b4t) metainterp = MyMetaInterp() @@ -1389,7 +1415,7 @@ class Storage(object): pass storage = Storage() - modifier = ResumeDataVirtualAdder(None, storage, storage, None) + modifier = ResumeDataVirtualAdder(None, storage, storage, None, None) modifier._add_pending_fields(None, []) assert not storage.rd_pendingfields # @@ -1398,9 +1424,9 @@ return False field_a = FieldDescr() storage = Storage() - modifier = ResumeDataVirtualAdder(None, storage, storage, None) - a = InputArgInt() - b = InputArgInt() + modifier = ResumeDataVirtualAdder(None, storage, storage, None, None) + a = IntFrontendOp(0) + b = IntFrontendOp(0) modifier.liveboxes_from_env = {a: rffi.cast(rffi.SHORT, 1042), b: rffi.cast(rffi.SHORT, 1061)} modifier._add_pending_fields(FakeOptimizer(), [ @@ -1416,11 +1442,11 @@ # array_a = FieldDescr() storage = Storage() - modifier = ResumeDataVirtualAdder(None, storage, storage, None) - a42 = InputArgInt() - a61 = InputArgInt() - a62 = InputArgInt() - a63 = InputArgInt() + modifier = ResumeDataVirtualAdder(None, storage, storage, None, None) + a42 = IntFrontendOp(0) + a61 = IntFrontendOp(0) + a62 = IntFrontendOp(0) + a63 = IntFrontendOp(0) modifier.liveboxes_from_env = {a42: rffi.cast(rffi.SHORT, 1042), a61: rffi.cast(rffi.SHORT, 1061), a62: rffi.cast(rffi.SHORT, 1062), @@ -1508,7 +1534,7 @@ metainterp_sd = FakeMetaInterpStaticData() metainterp_sd.options = options memo = ResumeDataLoopMemo(metainterp_sd) - modifier = ResumeDataVirtualAdder(None, None, None, memo) + modifier = ResumeDataVirtualAdder(None, None, None, None, memo) for i in range(5): assert not modifier._invalidation_needed(5, i) From pypy.commits at gmail.com Wed Mar 23 05:20:34 2016 From: pypy.commits at gmail.com (fijal) Date: Wed, 23 Mar 2016 02:20:34 -0700 (PDT) Subject: [pypy-commit] pypy jit-leaner-frontend: whack enough tests to pass Message-ID: <56f25fe2.e213c20a.f93e0.780b@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r83290:05ae1cad8b2d Date: 2016-03-23 11:19 +0200 http://bitbucket.org/pypy/pypy/changeset/05ae1cad8b2d/ Log: whack enough tests to pass diff --git a/rpython/jit/metainterp/optimizeopt/dependency.py b/rpython/jit/metainterp/optimizeopt/dependency.py --- a/rpython/jit/metainterp/optimizeopt/dependency.py +++ b/rpython/jit/metainterp/optimizeopt/dependency.py @@ -76,14 +76,14 @@ i += 1 continue op = node.getoperation() - if op.is_guard(): + if rop.is_guard(op.opnum): descr = op.getdescr() if not descr: return False assert isinstance(descr, AbstractFailDescr) if not descr.exits_early(): return False - elif not op.is_always_pure(): + elif not rop.is_always_pure(op.opnum): return False i += 1 return True @@ -542,7 +542,7 @@ def __init__(self, loop): self.loop = loop self.label = Node(loop.label, 0) - self.nodes = [ Node(op,0) for op in loop.operations if not op.is_jit_debug() ] + self.nodes = [ Node(op,0) for op in loop.operations if not rop.is_jit_debug(op.opnum) ] for i,node in enumerate(self.nodes): node.opidx = i+1 self.inodes = [] # imaginary nodes @@ -594,9 +594,9 @@ # pass 1 for i,node in enumerate(self.nodes): op = node.op - if op.is_always_pure(): + if rop.is_always_pure(op.opnum): node.setpriority(1) - if op.is_guard(): + if rop.is_guard(op.opnum): node.setpriority(2) # the label operation defines all operations at the # beginning of the loop @@ -607,11 +607,11 @@ # In SSA form. Modifications get a new variable tracker.define(op, node) # usage of defined variables - if op.is_always_pure() or op.is_final(): + if rop.is_always_pure(op.opnum) or rop.is_final(op.opnum): # normal case every arguments definition is set for arg in op.getarglist(): tracker.depends_on_arg(arg, node) - elif op.is_guard(): + elif rop.is_guard(op.opnum): if node.exits_early(): pass else: diff --git a/rpython/jit/metainterp/optimizeopt/schedule.py b/rpython/jit/metainterp/optimizeopt/schedule.py --- a/rpython/jit/metainterp/optimizeopt/schedule.py +++ b/rpython/jit/metainterp/optimizeopt/schedule.py @@ -312,7 +312,7 @@ class LoadRestrict(OpRestrict): def opcount_filling_vector_register(self, op, vec_reg_size): - assert op.is_primitive_load() + assert rop.is_primitive_load(op.opnum) descr = op.getdescr() return vec_reg_size // descr.get_item_size_in_bytes() @@ -332,7 +332,7 @@ return descr.get_item_size_in_bytes() def opcount_filling_vector_register(self, op, vec_reg_size): - assert op.is_primitive_store() + assert rop.is_primitive_store(op.opnum) descr = op.getdescr() return vec_reg_size // descr.get_item_size_in_bytes() @@ -921,7 +921,7 @@ """ left = self.leftmost() if left.returns_void(): - if left.is_primitive_store(): + if rop.is_primitive_store(left.opnum): # make this case more general if it turns out this is # not the only case where packs need to be trashed descr = left.getdescr() diff --git a/rpython/jit/metainterp/optimizeopt/test/test_dependency.py b/rpython/jit/metainterp/optimizeopt/test/test_dependency.py --- a/rpython/jit/metainterp/optimizeopt/test/test_dependency.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_dependency.py @@ -96,7 +96,7 @@ assert oplist_i == len(oplist), msg def parse_loop(self, ops, add_label=True): - loop = self.parse(ops, postprocess=self.postprocess) + loop = self.parse(ops) loop.operations = filter(lambda op: op.getopnum() != rop.DEBUG_MERGE_POINT, loop.operations) token = JitCellToken() if add_label: @@ -230,10 +230,7 @@ for i,op in enumerate(loop.operations): print "[",i,"]",op, if op.is_guard(): - if op.rd_snapshot: - print op.rd_snapshot.boxes - else: - print op.getfailargs() + print op.getfailargs() else: print "" diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py @@ -12,7 +12,7 @@ OpHelpers, InputArgRef from rpython.jit.metainterp.resumecode import unpack_numbering from rpython.rlib.rarithmetic import LONG_BIT -from rpython.jit.tool.oparser import parse +from rpython.jit.tool.oparser import parse, convert_loop_to_trace # ____________________________________________________________ @@ -29,7 +29,7 @@ exp = parse(optops, namespace=self.namespace.copy()) expected = convert_old_style_to_targets(exp, jump=True) call_pure_results = self._convert_call_pure_results(call_pure_results) - trace = self.convert_loop_to_packed(loop) + trace = convert_loop_to_trace(loop) compile_data = compile.SimpleCompileData(trace, call_pure_results) info, ops = self._do_optimize_loop(compile_data) @@ -2029,8 +2029,6 @@ if varname not in virtuals: if strict: assert box.same_box(oparse.getvar(varname)) - else: - assert box.getvalue() == oparse.getvar(varname).getvalue() else: tag, resolved, fieldstext = virtuals[varname] if tag[0] == 'virtual': diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebridge.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebridge.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebridge.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebridge.py @@ -2,8 +2,9 @@ from rpython.jit.metainterp.optimizeopt.test.test_util import BaseTest,\ LLtypeMixin, convert_old_style_to_targets from rpython.jit.metainterp import compile +from rpython.jit.tool import oparser from rpython.jit.metainterp.resoperation import ResOperation, rop -from rpython.jit.metainterp.history import TargetToken +from rpython.jit.metainterp.history import TargetToken, IntFrontendOp, RefFrontendOp class TestOptimizeBridge(BaseTest, LLtypeMixin): enable_opts = "intbounds:rewrite:virtualize:string:earlyforce:pure:heap:unroll" @@ -12,12 +13,7 @@ inline_short_preamble=True, jump_values=None, bridge_values=None): loop = self.parse(ops) - self.set_values(loop.operations, jump_values) - if expected_loop is not None: - xxx - exp_loop = self.parse(expected_loop, postprocess=self.postprocess) - self.assert_equal(loop, convert_old_style_to_targets(exp_loop)) - info = self.unroll_and_optimize(loop, None) + info = self.unroll_and_optimize(loop, None, jump_values=jump_values) jitcell_token = compile.make_jitcell_token(None) mid_label_descr = TargetToken(jitcell_token) mid_label_descr.short_preamble = info.short_preamble @@ -29,12 +25,11 @@ info.preamble.operations[0].setdescr(start_label_descr) guards = [op for op in loop.operations if op.is_guard()] assert len(guards) == 1, "more than one guard in the loop" - bridge = self.parse(bridge_ops, postprocess=self.postprocess) - self.set_values(bridge.operations, bridge_values) - start_label = ResOperation(rop.LABEL, bridge.inputargs) + bridge = self.parse(bridge_ops) bridge.operations[-1].setdescr(jitcell_token) self.add_guard_future_condition(bridge) - data = compile.BridgeCompileData(start_label, bridge.operations, + trace = oparser.convert_loop_to_trace(bridge) + data = compile.BridgeCompileData(trace, self.convert_values(bridge.operations[-1].getarglist(), bridge_values), enable_opts=self.enable_opts, inline_short_preamble=inline_short_preamble) bridge_info, ops = self._do_optimize_loop(data) @@ -44,7 +39,7 @@ ops) bridge.inputargs = bridge_info.inputargs bridge.check_consistency(check_descr=False) - expected = self.parse(expected, postprocess=self.postprocess) + expected = self.parse(expected) self.assert_equal(bridge, convert_old_style_to_targets(expected, jump=True)) jump_bridge = bridge.operations[-1] diff --git a/rpython/jit/metainterp/optimizeopt/test/test_unroll.py b/rpython/jit/metainterp/optimizeopt/test/test_unroll.py --- a/rpython/jit/metainterp/optimizeopt/test/test_unroll.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_unroll.py @@ -19,6 +19,7 @@ VirtualStateInfo from rpython.jit.metainterp.optimizeopt import info from rpython.jit.codewriter import heaptracker +from rpython.jit.tool import oparser class FakeOptimizer(object): optearlyforce = None @@ -39,7 +40,7 @@ enable_opts = "intbounds:rewrite:virtualize:string:earlyforce:pure:heap:unroll" def optimize(self, ops): - loop = self.parse(ops, postprocess=self.postprocess) + loop = self.parse(ops) self.add_guard_future_condition(loop) operations = loop.operations jumpop = operations[-1] @@ -54,15 +55,15 @@ token = JitCellToken() start_label = ResOperation(rop.LABEL, inputargs, descr=TargetToken(token)) stop_label = ResOperation(rop.LABEL, jump_args, descr=token) - compile_data = LoopCompileData(start_label, stop_label, operations) + trace = oparser.convert_loop_to_trace(loop) + compile_data = LoopCompileData(trace, inputargs) start_state, newops = self._do_optimize_loop(compile_data) preamble.operations = newops preamble.inputargs = start_state.renamed_inputargs return start_state, loop, preamble def compare_short(self, short, expected_short): - expected_short = self.parse(expected_short, - postprocess=self.postprocess) + expected_short = self.parse(expected_short) remap = {} exp = ([ResOperation(rop.LABEL, expected_short.inputargs)] + expected_short.operations) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_util.py b/rpython/jit/metainterp/optimizeopt/test/test_util.py --- a/rpython/jit/metainterp/optimizeopt/test/test_util.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_util.py @@ -55,11 +55,11 @@ loop2 = pure_parse(ops, namespace=namespace) loop3 = pure_parse(ops.replace("i2 = int_add", "i2 = int_sub"), namespace=namespace) - assert equaloplists(loop1._get_operations(), loop2._get_operations(), + assert equaloplists(loop1.operations, loop2.operations, remap=make_remap(loop1.inputargs, loop2.inputargs)) py.test.raises(AssertionError, - "equaloplists(loop1._get_operations(), loop3._get_operations()," + "equaloplists(loop1.operations, loop3.operations," "remap=make_remap(loop1.inputargs, loop3.inputargs))") def test_equaloplists_fail_args(): @@ -509,7 +509,6 @@ # invent a GUARD_FUTURE_CONDITION to not have to change all tests if res.operations[-1].getopnum() == rop.JUMP: guard = ResOperation(rop.GUARD_FUTURE_CONDITION, []) - guard.rd_snapshot = resume.TopSnapshot(None, [], []) res.operations.insert(-1, guard) @staticmethod @@ -545,6 +544,24 @@ call_pure_results[list(k)] = v return call_pure_results + def convert_values(self, inpargs, values): + from rpython.jit.metainterp.history import IntFrontendOp, RefFrontendOp + if values: + r = [] + for arg, v in zip(inpargs, values): + if arg.type == 'i': + n = IntFrontendOp(0) + if v is not None: + n.setint(v) + else: + n = RefFrontendOp(0) + if v is not None: + n.setref_base(v) + assert arg.type == 'r' + r.append(n) + return r + return inpargs + def unroll_and_optimize(self, loop, call_pure_results=None, jump_values=None): self.add_guard_future_condition(loop) diff --git a/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py b/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py @@ -23,7 +23,7 @@ from rpython.jit.metainterp.optimizeopt.version import LoopVersionInfo from rpython.jit.backend.llsupport.descr import ArrayDescr from rpython.jit.metainterp.optimizeopt.dependency import Node, DependencyGraph -from rpython.jit.tool.oparser import OpParser +from rpython.jit.tool.oparser import OpParser, convert_loop_to_trace from rpython.jit.backend.detect_cpu import getcpuclass CPU = getcpuclass() @@ -81,8 +81,8 @@ jitdriver_sd = FakeJitDriverStaticData() def assert_vectorize(self, loop, expected_loop, call_pure_results=None): - jump = ResOperation(rop.LABEL, loop.jump.getarglist(), loop.jump.getdescr()) - compile_data = compile.LoopCompileData(loop.label, jump, loop.operations) + trace = convert_loop_to_trace(loop) + compile_data = compile.LoopCompileData(trace, loop.jump.getarglist()) state = self._do_optimize_loop(compile_data) loop.label = state[0].label_op loop.opererations = state[1] diff --git a/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py b/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py --- a/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py @@ -20,6 +20,7 @@ from rpython.jit.metainterp.resoperation import ResOperation, rop from rpython.jit.metainterp import resume, compile from rpython.jit.metainterp.optimizeopt import info +from rpython.jit.tool import oparser class FakeOptimizer(Optimizer): def __init__(self, cpu): @@ -812,7 +813,7 @@ class BaseTestBridges(BaseTest): enable_opts = "intbounds:rewrite:virtualize:string:pure:earlyforce:heap:unroll" - def _do_optimize_bridge(self, bridge, call_pure_results): + def _do_optimize_bridge(self, bridge, call_pure_results, values): from rpython.jit.metainterp.optimizeopt import optimize_trace from rpython.jit.metainterp.optimizeopt.util import args_dict @@ -827,8 +828,11 @@ if hasattr(self, 'callinfocollection'): metainterp_sd.callinfocollection = self.callinfocollection # - start_label = ResOperation(rop.LABEL, bridge.inputargs) - data = compile.BridgeCompileData(start_label, bridge.operations, + trace = oparser.convert_loop_to_trace(bridge) + + runtime_boxes = self.convert_values(bridge.operations[-1].getarglist(), + values) + data = compile.BridgeCompileData(trace, runtime_boxes, enable_opts=self.enable_opts, inline_short_preamble=True) info, newops = optimize_trace(metainterp_sd, None, data) @@ -841,24 +845,12 @@ boxvalues=None): if isinstance(loops, str): loops = (loops, ) - loops = [self.parse(loop, postprocess=self.postprocess) + loops = [self.parse(loop) for loop in loops] - bridge = self.parse(bridge, postprocess=self.postprocess) + bridge = self.parse(bridge) self.add_guard_future_condition(bridge) token = JitCellToken() - jump_args = bridge.operations[-1].getarglist() - if boxvalues is not None: - assert isinstance(boxvalues, list) - assert len(jump_args) == len(boxvalues) - for jump_arg, v in zip(jump_args, boxvalues): - jump_arg.setref_base(v) for loop in loops: - loop_jump_args = loop.operations[-1].getarglist() - if boxvalues is not None: - assert isinstance(boxvalues, list) - assert len(jump_args) == len(boxvalues) - for jump_arg, v in zip(loop_jump_args, boxvalues): - jump_arg.setref_base(v) info = self.unroll_and_optimize(loop) loop.preamble = info.preamble loop.preamble.operations[0].setdescr(TargetToken(token)) @@ -869,7 +861,7 @@ for b in bridge.inputargs + [op for op in bridge.operations]: boxes[str(b)] = b bridge.operations[-1].setdescr(token) - info = self._do_optimize_bridge(bridge, None) + info = self._do_optimize_bridge(bridge, None, boxvalues) if not info.final(): assert expected == 'RETRACE' return diff --git a/rpython/jit/metainterp/optimizeopt/vector.py b/rpython/jit/metainterp/optimizeopt/vector.py --- a/rpython/jit/metainterp/optimizeopt/vector.py +++ b/rpython/jit/metainterp/optimizeopt/vector.py @@ -660,7 +660,7 @@ # if origin_pack is None: op = lnode.getoperation() - if op.is_primitive_load(): + if rop.is_primitive_load(op.opnum): return Pair(lnode, rnode) else: return Pair(lnode, rnode) From pypy.commits at gmail.com Wed Mar 23 05:30:23 2016 From: pypy.commits at gmail.com (fijal) Date: Wed, 23 Mar 2016 02:30:23 -0700 (PDT) Subject: [pypy-commit] pypy jit-leaner-frontend: leave some notes and improve the tests Message-ID: <56f2622f.6672c20a.4facb.7bb5@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r83291:cd7cb1f484a7 Date: 2016-03-23 11:29 +0200 http://bitbucket.org/pypy/pypy/changeset/cd7cb1f484a7/ Log: leave some notes and improve the tests diff --git a/rpython/jit/metainterp/heapcache.py b/rpython/jit/metainterp/heapcache.py --- a/rpython/jit/metainterp/heapcache.py +++ b/rpython/jit/metainterp/heapcache.py @@ -5,6 +5,11 @@ from rpython.rlib.rarithmetic import r_uint32, r_uint from rpython.rlib.objectmodel import always_inline +""" A big note: we don't do heap caches on Consts, because it used +to be done with the identity of the Const instance. This gives very wonky +results at best, so we decided to not do it at all. Can be fixed with +interning of Consts (already done on trace anyway) +""" # RefFrontendOp._heapc_flags: HF_LIKELY_VIRTUAL = 0x01 diff --git a/rpython/jit/metainterp/test/test_tracingopts.py b/rpython/jit/metainterp/test/test_tracingopts.py --- a/rpython/jit/metainterp/test/test_tracingopts.py +++ b/rpython/jit/metainterp/test/test_tracingopts.py @@ -74,9 +74,14 @@ def test_heap_caching_while_tracing(self): class A: pass - a1 = A() - a2 = A() + + @jit.dont_look_inside + def get(): + return A() + def fn(n): + a1 = get() + a2 = get() if n > 0: a = a1 else: @@ -91,6 +96,8 @@ self.check_operations_history(getfield_gc_i=0) def fn(n, ca, cb): + a1 = get() + a2 = get() a1.x = n a2.x = n a = a1 From pypy.commits at gmail.com Wed Mar 23 06:12:41 2016 From: pypy.commits at gmail.com (fijal) Date: Wed, 23 Mar 2016 03:12:41 -0700 (PDT) Subject: [pypy-commit] pypy jit-leaner-frontend: work around ll2ctypes problems Message-ID: <56f26c19.cf0b1c0a.20fd6.ffffa879@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r83292:8eb9e00f3a89 Date: 2016-03-23 12:11 +0200 http://bitbucket.org/pypy/pypy/changeset/8eb9e00f3a89/ Log: work around ll2ctypes problems diff --git a/rpython/jit/metainterp/test/test_loop.py b/rpython/jit/metainterp/test/test_loop.py --- a/rpython/jit/metainterp/test/test_loop.py +++ b/rpython/jit/metainterp/test/test_loop.py @@ -236,43 +236,49 @@ self.check_trace_count_at_most(19) def test_interp_many_paths_2(self): - myjitdriver = JitDriver(greens = ['i'], reds = ['x', 'node']) - NODE = self._get_NODE() - bytecode = "xxxxxxxb" + import sys + oldlimit = sys.getrecursionlimit() + try: + sys.setrecursionlimit(10000) + myjitdriver = JitDriver(greens = ['i'], reds = ['x', 'node']) + NODE = self._get_NODE() + bytecode = "xxxxxxxb" - def can_enter_jit(i, x, node): - myjitdriver.can_enter_jit(i=i, x=x, node=node) + def can_enter_jit(i, x, node): + myjitdriver.can_enter_jit(i=i, x=x, node=node) - def f(node): - x = 0 - i = 0 - while i < len(bytecode): - myjitdriver.jit_merge_point(i=i, x=x, node=node) - op = bytecode[i] - if op == 'x': - if not node: - break - if node.value < 100: # a pseudo-random choice - x += 1 - node = node.next - elif op == 'b': - i = 0 - can_enter_jit(i, x, node) - continue - i += 1 - return x + def f(node): + x = 0 + i = 0 + while i < len(bytecode): + myjitdriver.jit_merge_point(i=i, x=x, node=node) + op = bytecode[i] + if op == 'x': + if not node: + break + if node.value < 100: # a pseudo-random choice + x += 1 + node = node.next + elif op == 'b': + i = 0 + can_enter_jit(i, x, node) + continue + i += 1 + return x - node1 = self.nullptr(NODE) - for i in range(300): - prevnode = self.malloc(NODE) - prevnode.value = pow(47, i, 199) - prevnode.next = node1 - node1 = prevnode + node1 = self.nullptr(NODE) + for i in range(300): + prevnode = self.malloc(NODE) + prevnode.value = pow(47, i, 199) + prevnode.next = node1 + node1 = prevnode - expected = f(node1) - res = self.meta_interp(f, [node1]) - assert res == expected - self.check_trace_count_at_most(19) + expected = f(node1) + res = self.meta_interp(f, [node1]) + assert res == expected + self.check_trace_count_at_most(19) + finally: + sys.setrecursionlimit(oldlimit) def test_nested_loops(self): myjitdriver = JitDriver(greens = ['i'], reds = ['x', 'y']) diff --git a/rpython/rtyper/lltypesystem/ll2ctypes.py b/rpython/rtyper/lltypesystem/ll2ctypes.py --- a/rpython/rtyper/lltypesystem/ll2ctypes.py +++ b/rpython/rtyper/lltypesystem/ll2ctypes.py @@ -464,6 +464,7 @@ if delayed_converters_was_None: for converter in delayed_converters: converter() + remove_regular_struct_content(container) def remove_regular_struct_content(container): From pypy.commits at gmail.com Wed Mar 23 07:12:06 2016 From: pypy.commits at gmail.com (fijal) Date: Wed, 23 Mar 2016 04:12:06 -0700 (PDT) Subject: [pypy-commit] pypy jit-leaner-frontend: try to workaround strange ztests Message-ID: <56f27a06.aa0ac20a.3c345.ffffa4ce@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r83293:afa5deec5a6a Date: 2016-03-23 13:11 +0200 http://bitbucket.org/pypy/pypy/changeset/afa5deec5a6a/ Log: try to workaround strange ztests diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -345,9 +345,6 @@ loop_jitcell_token = metainterp.get_procedure_token(greenkey) assert loop_jitcell_token - end_label = ResOperation(rop.LABEL, inputargs[:], - descr=loop_jitcell_token) - #cut_pos = history.get_trace_position() cut = history.get_trace_position() history.record(rop.JUMP, jumpargs[:], None, descr=loop_jitcell_token) enable_opts = jitdriver_sd.warmstate.enable_opts @@ -375,7 +372,10 @@ history.cut(cut) return None - label_token = loop_info.label_op.getdescr() + label_op = loop_info.label_op + if label_op is None: + assert False, "unreachable code" # hint for some strange tests + label_token = label_op.getdescr() assert isinstance(label_token, TargetToken) if label_token.short_preamble: metainterp_sd.logger_ops.log_short_preamble([], diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -22,7 +22,7 @@ REMOVED = AbstractResOp() class LoopInfo(object): - pass + label_op = None class BasicLoopInfo(LoopInfo): def __init__(self, inputargs, quasi_immutable_deps, jump_op): From pypy.commits at gmail.com Wed Mar 23 07:33:25 2016 From: pypy.commits at gmail.com (fijal) Date: Wed, 23 Mar 2016 04:33:25 -0700 (PDT) Subject: [pypy-commit] pypy jit-leaner-frontend: attempt to fix a strange translation issue where all floats are 0.0 Message-ID: <56f27f05.a2afc20a.e60d4.ffffac7a@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r83294:46fa7f87975d Date: 2016-03-23 13:32 +0200 http://bitbucket.org/pypy/pypy/changeset/46fa7f87975d/ Log: attempt to fix a strange translation issue where all floats are 0.0 diff --git a/rpython/translator/c/src/float.h b/rpython/translator/c/src/float.h --- a/rpython/translator/c/src/float.h +++ b/rpython/translator/c/src/float.h @@ -41,7 +41,7 @@ #ifdef HAVE_LONG_LONG #define OP_CAST_FLOAT_TO_LONGLONG(x,r) r = (long long)(x) #define OP_CAST_FLOAT_TO_ULONGLONG(x,r) r = (unsigned long long)(x) -#define OP_CONVERT_FLOAT_BYTES_TO_LONGLONG(x,r) memcpy(&r, &x, sizeof(double)) -#define OP_CONVERT_LONGLONG_BYTES_TO_FLOAT(x,r) memcpy(&r, &x, sizeof(long long)) +#define OP_CONVERT_FLOAT_BYTES_TO_LONGLONG(x,r) { double _f = x; memcpy(&r, &_f, sizeof(double)) } +#define OP_CONVERT_LONGLONG_BYTES_TO_FLOAT(x,r) { long long _f = x; memcpy(&r, &_f, sizeof(long long)) } #endif From pypy.commits at gmail.com Wed Mar 23 07:40:58 2016 From: pypy.commits at gmail.com (fijal) Date: Wed, 23 Mar 2016 04:40:58 -0700 (PDT) Subject: [pypy-commit] pypy jit-leaner-frontend: oops Message-ID: <56f280ca.c65b1c0a.394c7.ffff841b@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r83295:33fa6cc13998 Date: 2016-03-23 13:40 +0200 http://bitbucket.org/pypy/pypy/changeset/33fa6cc13998/ Log: oops diff --git a/rpython/translator/c/src/float.h b/rpython/translator/c/src/float.h --- a/rpython/translator/c/src/float.h +++ b/rpython/translator/c/src/float.h @@ -41,7 +41,7 @@ #ifdef HAVE_LONG_LONG #define OP_CAST_FLOAT_TO_LONGLONG(x,r) r = (long long)(x) #define OP_CAST_FLOAT_TO_ULONGLONG(x,r) r = (unsigned long long)(x) -#define OP_CONVERT_FLOAT_BYTES_TO_LONGLONG(x,r) { double _f = x; memcpy(&r, &_f, sizeof(double)) } -#define OP_CONVERT_LONGLONG_BYTES_TO_FLOAT(x,r) { long long _f = x; memcpy(&r, &_f, sizeof(long long)) } +#define OP_CONVERT_FLOAT_BYTES_TO_LONGLONG(x,r) { double _f = x; memcpy(&r, &_f, sizeof(double)); } +#define OP_CONVERT_LONGLONG_BYTES_TO_FLOAT(x,r) { long long _f = x; memcpy(&r, &_f, sizeof(long long)); } #endif From pypy.commits at gmail.com Wed Mar 23 09:52:24 2016 From: pypy.commits at gmail.com (cfbolz) Date: Wed, 23 Mar 2016 06:52:24 -0700 (PDT) Subject: [pypy-commit] pypy jit-leaner-frontend: fix test_tracingopts: in some cases heapcache got better, in some cases the Message-ID: <56f29f98.865a1c0a.651dd.ffffff7a@mx.google.com> Author: Carl Friedrich Bolz Branch: jit-leaner-frontend Changeset: r83296:552d6357376b Date: 2016-03-23 14:50 +0100 http://bitbucket.org/pypy/pypy/changeset/552d6357376b/ Log: fix test_tracingopts: in some cases heapcache got better, in some cases the tests depended on optimizing constants (which will be fixed later) diff --git a/rpython/jit/metainterp/test/test_tracingopts.py b/rpython/jit/metainterp/test/test_tracingopts.py --- a/rpython/jit/metainterp/test/test_tracingopts.py +++ b/rpython/jit/metainterp/test/test_tracingopts.py @@ -112,7 +112,7 @@ self.check_operations_history(getfield_gc_i=1) res = self.interp_operations(fn, [-7, 1, 1]) assert res == -7 * 2 - self.check_operations_history(getfield_gc_i=1) + self.check_operations_history(getfield_gc_i=0) def test_heap_caching_nonnull(self): class A: @@ -139,17 +139,18 @@ def test_heap_caching_while_tracing_invalidation(self): class A: pass - a1 = A() - a2 = A() @jit.dont_look_inside def f(a): a.x = 5 + @jit.dont_look_inside + def get(): + return A() l = [1] def fn(n): if n > 0: - a = a1 + a = get() else: - a = a2 + a = get() a.x = n x1 = a.x f(a) @@ -163,13 +164,14 @@ def test_heap_caching_dont_store_same(self): class A: pass - a1 = A() - a2 = A() + @jit.dont_look_inside + def get(): + return A() def fn(n): if n > 0: - a = a1 + a = get() else: - a = a2 + a = get() a.x = n a.x = n return a.x @@ -241,14 +243,16 @@ def test_array_and_getfield_interaction(self): class A: pass - a1 = A() - a2 = A() - a1.l = a2.l = [0, 0] + @jit.dont_look_inside + def get(): + a = A() + a.l = [0, 0] + return a def fn(n): if n > 0: - a = a1 + a = get() else: - a = a2 + a = get() a.l = [0, 0] a.x = 0 a.l[a.x] = n @@ -265,15 +269,17 @@ def test_promote_changes_heap_cache(self): class A: pass - a1 = A() - a2 = A() - a1.l = a2.l = [0, 0] - a1.x = a2.x = 0 + @jit.dont_look_inside + def get(): + a = A() + a.l = [0, 0] + a.x = 0 + return a def fn(n): if n > 0: - a = a1 + a = get() else: - a = a2 + a = get() a.l = [0, 0] jit.promote(a.x) a.l[a.x] = n @@ -290,13 +296,11 @@ getfield_gc_r=1) def test_promote_changes_array_cache(self): - a1 = [0, 0] - a2 = [0, 0] + @jit.dont_look_inside + def get(): + return [0, 0] def fn(n): - if n > 0: - a = a1 - else: - a = a2 + a = get() a[0] = n jit.hint(n, promote=True) x1 = a[0] @@ -312,13 +316,12 @@ def test_list_caching(self): - a1 = [0, 0] - a2 = [0, 0] + @jit.dont_look_inside + def get(): + return [0, 0] def fn(n): - if n > 0: - a = a1 - else: - a = a2 + a = get() + if not n > 0: if n < -1000: a.append(5) a[0] = n @@ -335,6 +338,8 @@ getfield_gc_r=1) def fn(n, ca, cb): + a1 = get() + a2 = get() a1[0] = n a2[0] = n a = a1 @@ -349,11 +354,11 @@ res = self.interp_operations(fn, [7, 0, 1]) assert res == 7 * 2 self.check_operations_history(getarrayitem_gc_i=1, - getfield_gc_r=3) + getfield_gc_r=2) res = self.interp_operations(fn, [-7, 1, 1]) assert res == -7 * 2 - self.check_operations_history(getarrayitem_gc_i=1, - getfield_gc_r=3) + self.check_operations_history(getarrayitem_gc_i=0, + getfield_gc_r=2) def test_list_caching_negative(self): def fn(n): @@ -451,17 +456,15 @@ def test_heap_caching_and_elidable_function(self): class A: pass - class B: pass - a1 = A() - a1.y = 6 - a2 = A() - a2.y = 13 + @jit.dont_look_inside + def get(): + return A() @jit.elidable def f(b): return b + 1 def fn(n): if n > 0: - a = a1 + a = get() else: a = A() a.x = n From pypy.commits at gmail.com Wed Mar 23 13:47:27 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 23 Mar 2016 10:47:27 -0700 (PDT) Subject: [pypy-commit] pypy jit-leaner-frontend: A failing test, derived from a failing translated pypy. Works on default. Message-ID: <56f2d6af.06b01c0a.e1580.5f21@mx.google.com> Author: Armin Rigo Branch: jit-leaner-frontend Changeset: r83298:0520b6a02d2a Date: 2016-03-23 18:46 +0100 http://bitbucket.org/pypy/pypy/changeset/0520b6a02d2a/ Log: A failing test, derived from a failing translated pypy. Works on default. diff --git a/rpython/jit/metainterp/test/test_virtualref.py b/rpython/jit/metainterp/test/test_virtualref.py --- a/rpython/jit/metainterp/test/test_virtualref.py +++ b/rpython/jit/metainterp/test/test_virtualref.py @@ -697,6 +697,58 @@ 'force_token': 2, 'setfield_gc': 1 }) + def test_vref_like_pypy(self): + myjitdriver = JitDriver(greens=['n'], reds=['i', 'k', 'ec', 'frame']) + + class ExecutionContext(object): + topframeref = vref_None + + def enter(self, frame): + frame.f_backref = self.topframeref + self.topframeref = virtual_ref(frame) + + def leave(self, frame): + frame_vref = self.topframeref + self.topframeref = frame.f_backref + f_back = frame.f_backref() + if f_back: + f_back.escaped = True + frame_vref() + virtual_ref_finish(frame_vref, frame) + + class PyFrame(object): + escaped = False + + def dispatch(ec, frame, n, i): + k = i + while True: + myjitdriver.jit_merge_point(n=n, ec=ec, frame=frame, i=i, k=k) + i += 1 + if n == 1: + execute_frame(ec, 2, i) + if i >= 10: + break + elif n == 2: + execute_frame(ec, 3, i) + if i >= k + 3: + break + elif n == 3: + if i % 3 == 0: + break + + def execute_frame(ec, n, i): + frame = PyFrame() + ec.enter(frame) + dispatch(ec, frame, n, i) + ec.leave(frame) + return n + + def entry_point(): + return execute_frame(ExecutionContext(), 1, 0) + + assert entry_point() == 1 + self.meta_interp(entry_point, [], inline=True) + class TestLLtype(VRefTests, LLJitMixin): pass From pypy.commits at gmail.com Wed Mar 23 13:51:41 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 23 Mar 2016 10:51:41 -0700 (PDT) Subject: [pypy-commit] pypy jit-leaner-frontend: Reduce a bit the test Message-ID: <56f2d7ad.2968c20a.c179e.4102@mx.google.com> Author: Armin Rigo Branch: jit-leaner-frontend Changeset: r83299:222b853e614a Date: 2016-03-23 18:51 +0100 http://bitbucket.org/pypy/pypy/changeset/222b853e614a/ Log: Reduce a bit the test diff --git a/rpython/jit/metainterp/test/test_virtualref.py b/rpython/jit/metainterp/test/test_virtualref.py --- a/rpython/jit/metainterp/test/test_virtualref.py +++ b/rpython/jit/metainterp/test/test_virtualref.py @@ -698,7 +698,7 @@ }) def test_vref_like_pypy(self): - myjitdriver = JitDriver(greens=['n'], reds=['i', 'k', 'ec', 'frame']) + myjitdriver = JitDriver(greens=['n'], reds=['i', 'ec', 'frame']) class ExecutionContext(object): topframeref = vref_None @@ -710,31 +710,26 @@ def leave(self, frame): frame_vref = self.topframeref self.topframeref = frame.f_backref - f_back = frame.f_backref() - if f_back: - f_back.escaped = True - frame_vref() + frame.f_backref() virtual_ref_finish(frame_vref, frame) class PyFrame(object): - escaped = False + pass def dispatch(ec, frame, n, i): - k = i while True: - myjitdriver.jit_merge_point(n=n, ec=ec, frame=frame, i=i, k=k) + myjitdriver.jit_merge_point(n=n, ec=ec, frame=frame, i=i) i += 1 if n == 1: - execute_frame(ec, 2, i) + execute_frame(ec, 2, 0) if i >= 10: break elif n == 2: execute_frame(ec, 3, i) - if i >= k + 3: + if i == 2: break elif n == 3: - if i % 3 == 0: - break + break def execute_frame(ec, n, i): frame = PyFrame() From pypy.commits at gmail.com Wed Mar 23 13:53:50 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 23 Mar 2016 10:53:50 -0700 (PDT) Subject: [pypy-commit] pypy jit-leaner-frontend: Reduce a little bit more Message-ID: <56f2d82e.465ec20a.c6bc6.40af@mx.google.com> Author: Armin Rigo Branch: jit-leaner-frontend Changeset: r83300:914ec40a47a8 Date: 2016-03-23 18:53 +0100 http://bitbucket.org/pypy/pypy/changeset/914ec40a47a8/ Log: Reduce a little bit more diff --git a/rpython/jit/metainterp/test/test_virtualref.py b/rpython/jit/metainterp/test/test_virtualref.py --- a/rpython/jit/metainterp/test/test_virtualref.py +++ b/rpython/jit/metainterp/test/test_virtualref.py @@ -716,30 +716,31 @@ class PyFrame(object): pass - def dispatch(ec, frame, n, i): + def dispatch(ec, frame, n): + i = 0 while True: myjitdriver.jit_merge_point(n=n, ec=ec, frame=frame, i=i) i += 1 if n == 1: - execute_frame(ec, 2, 0) + execute_frame(ec, 2) if i >= 10: break elif n == 2: - execute_frame(ec, 3, i) + execute_frame(ec, 3) if i == 2: break elif n == 3: break - def execute_frame(ec, n, i): + def execute_frame(ec, n): frame = PyFrame() ec.enter(frame) - dispatch(ec, frame, n, i) + dispatch(ec, frame, n) ec.leave(frame) return n def entry_point(): - return execute_frame(ExecutionContext(), 1, 0) + return execute_frame(ExecutionContext(), 1) assert entry_point() == 1 self.meta_interp(entry_point, [], inline=True) From pypy.commits at gmail.com Wed Mar 23 14:18:05 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 23 Mar 2016 11:18:05 -0700 (PDT) Subject: [pypy-commit] pypy default: A macro called lock_release() seems to create conflicts on some OS/X systems. Message-ID: <56f2dddd.aa09c20a.f86d.489f@mx.google.com> Author: Armin Rigo Branch: Changeset: r83301:4ad4991ec0ab Date: 2016-03-23 19:17 +0100 http://bitbucket.org/pypy/pypy/changeset/4ad4991ec0ab/ Log: A macro called lock_release() seems to create conflicts on some OS/X systems. diff --git a/rpython/translator/c/src/thread.h b/rpython/translator/c/src/thread.h --- a/rpython/translator/c/src/thread.h +++ b/rpython/translator/c/src/thread.h @@ -42,13 +42,13 @@ RPY_EXTERN long rpy_fastgil; static inline void _RPyGilAcquire(void) { - long old_fastgil = lock_test_and_set(&rpy_fastgil, 1); + long old_fastgil = pypy_lock_test_and_set(&rpy_fastgil, 1); if (old_fastgil != 0) RPyGilAcquireSlowPath(old_fastgil); } static inline void _RPyGilRelease(void) { assert(RPY_FASTGIL_LOCKED(rpy_fastgil)); - lock_release(&rpy_fastgil); + pypy_lock_release(&rpy_fastgil); } static inline long *_RPyFetchFastGil(void) { return &rpy_fastgil; diff --git a/rpython/translator/c/src/thread_gil.c b/rpython/translator/c/src/thread_gil.c --- a/rpython/translator/c/src/thread_gil.c +++ b/rpython/translator/c/src/thread_gil.c @@ -70,7 +70,7 @@ { /* Acquires the GIL. This assumes that we already did: - old_fastgil = lock_test_and_set(&rpy_fastgil, 1); + old_fastgil = pypy_lock_test_and_set(&rpy_fastgil, 1); */ if (!RPY_FASTGIL_LOCKED(old_fastgil)) { /* The fastgil was not previously locked: success. @@ -122,7 +122,7 @@ released. */ if (!RPY_FASTGIL_LOCKED(rpy_fastgil)) { - old_fastgil = lock_test_and_set(&rpy_fastgil, 1); + old_fastgil = pypy_lock_test_and_set(&rpy_fastgil, 1); if (!RPY_FASTGIL_LOCKED(old_fastgil)) /* yes, got a non-held value! Now we hold it. */ break; diff --git a/rpython/translator/c/src/thread_nt.c b/rpython/translator/c/src/thread_nt.c --- a/rpython/translator/c/src/thread_nt.c +++ b/rpython/translator/c/src/thread_nt.c @@ -245,7 +245,7 @@ LeaveCriticalSection(mutex); } -//#define lock_test_and_set(ptr, value) see thread_nt.h +//#define pypy_lock_test_and_set(ptr, value) see thread_nt.h #define atomic_increment(ptr) InterlockedIncrement(ptr) #define atomic_decrement(ptr) InterlockedDecrement(ptr) diff --git a/rpython/translator/c/src/thread_nt.h b/rpython/translator/c/src/thread_nt.h --- a/rpython/translator/c/src/thread_nt.h +++ b/rpython/translator/c/src/thread_nt.h @@ -34,8 +34,8 @@ #ifdef _M_IA64 /* On Itanium, use 'acquire' memory ordering semantics */ -#define lock_test_and_set(ptr, value) InterlockedExchangeAcquire(ptr, value) +#define pypy_lock_test_and_set(ptr, value) InterlockedExchangeAcquire(ptr,value) #else -#define lock_test_and_set(ptr, value) InterlockedExchange(ptr, value) +#define pypy_lock_test_and_set(ptr, value) InterlockedExchange(ptr, value) #endif -#define lock_release(ptr) (*((volatile long *)ptr) = 0) +#define pypy_lock_release(ptr) (*((volatile long *)ptr) = 0) diff --git a/rpython/translator/c/src/thread_pthread.c b/rpython/translator/c/src/thread_pthread.c --- a/rpython/translator/c/src/thread_pthread.c +++ b/rpython/translator/c/src/thread_pthread.c @@ -546,7 +546,7 @@ return result; } -//#define lock_test_and_set(ptr, value) see thread_pthread.h +//#define pypy_lock_test_and_set(ptr, value) see thread_pthread.h #define atomic_increment(ptr) __sync_fetch_and_add(ptr, 1) #define atomic_decrement(ptr) __sync_fetch_and_sub(ptr, 1) #define HAVE_PTHREAD_ATFORK 1 diff --git a/rpython/translator/c/src/thread_pthread.h b/rpython/translator/c/src/thread_pthread.h --- a/rpython/translator/c/src/thread_pthread.h +++ b/rpython/translator/c/src/thread_pthread.h @@ -80,5 +80,5 @@ void RPyThreadAfterFork(void); -#define lock_test_and_set(ptr, value) __sync_lock_test_and_set(ptr, value) -#define lock_release(ptr) __sync_lock_release(ptr) +#define pypy_lock_test_and_set(ptr, value) __sync_lock_test_and_set(ptr, value) +#define pypy_lock_release(ptr) __sync_lock_release(ptr) diff --git a/rpython/translator/c/src/threadlocal.c b/rpython/translator/c/src/threadlocal.c --- a/rpython/translator/c/src/threadlocal.c +++ b/rpython/translator/c/src/threadlocal.c @@ -15,14 +15,14 @@ static int check_valid(void); void _RPython_ThreadLocals_Acquire(void) { - while (!lock_test_and_set(&pypy_threadlocal_lock, 1)) { + while (!pypy_lock_test_and_set(&pypy_threadlocal_lock, 1)) { /* busy loop */ } assert(check_valid()); } void _RPython_ThreadLocals_Release(void) { assert(check_valid()); - lock_release(&pypy_threadlocal_lock); + pypy_lock_release(&pypy_threadlocal_lock); } From pypy.commits at gmail.com Wed Mar 23 14:18:36 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 23 Mar 2016 11:18:36 -0700 (PDT) Subject: [pypy-commit] pypy release-5.x: A macro called lock_release() seems to create conflicts on some OS/X systems. Message-ID: <56f2ddfc.0113c20a.d286.479b@mx.google.com> Author: Armin Rigo Branch: release-5.x Changeset: r83302:115dad60c65d Date: 2016-03-23 19:17 +0100 http://bitbucket.org/pypy/pypy/changeset/115dad60c65d/ Log: A macro called lock_release() seems to create conflicts on some OS/X systems. diff --git a/rpython/translator/c/src/thread.h b/rpython/translator/c/src/thread.h --- a/rpython/translator/c/src/thread.h +++ b/rpython/translator/c/src/thread.h @@ -42,13 +42,13 @@ RPY_EXTERN long rpy_fastgil; static inline void _RPyGilAcquire(void) { - long old_fastgil = lock_test_and_set(&rpy_fastgil, 1); + long old_fastgil = pypy_lock_test_and_set(&rpy_fastgil, 1); if (old_fastgil != 0) RPyGilAcquireSlowPath(old_fastgil); } static inline void _RPyGilRelease(void) { assert(RPY_FASTGIL_LOCKED(rpy_fastgil)); - lock_release(&rpy_fastgil); + pypy_lock_release(&rpy_fastgil); } static inline long *_RPyFetchFastGil(void) { return &rpy_fastgil; diff --git a/rpython/translator/c/src/thread_gil.c b/rpython/translator/c/src/thread_gil.c --- a/rpython/translator/c/src/thread_gil.c +++ b/rpython/translator/c/src/thread_gil.c @@ -70,7 +70,7 @@ { /* Acquires the GIL. This assumes that we already did: - old_fastgil = lock_test_and_set(&rpy_fastgil, 1); + old_fastgil = pypy_lock_test_and_set(&rpy_fastgil, 1); */ if (!RPY_FASTGIL_LOCKED(old_fastgil)) { /* The fastgil was not previously locked: success. @@ -122,7 +122,7 @@ released. */ if (!RPY_FASTGIL_LOCKED(rpy_fastgil)) { - old_fastgil = lock_test_and_set(&rpy_fastgil, 1); + old_fastgil = pypy_lock_test_and_set(&rpy_fastgil, 1); if (!RPY_FASTGIL_LOCKED(old_fastgil)) /* yes, got a non-held value! Now we hold it. */ break; diff --git a/rpython/translator/c/src/thread_nt.c b/rpython/translator/c/src/thread_nt.c --- a/rpython/translator/c/src/thread_nt.c +++ b/rpython/translator/c/src/thread_nt.c @@ -245,7 +245,7 @@ LeaveCriticalSection(mutex); } -//#define lock_test_and_set(ptr, value) see thread_nt.h +//#define pypy_lock_test_and_set(ptr, value) see thread_nt.h #define atomic_increment(ptr) InterlockedIncrement(ptr) #define atomic_decrement(ptr) InterlockedDecrement(ptr) diff --git a/rpython/translator/c/src/thread_nt.h b/rpython/translator/c/src/thread_nt.h --- a/rpython/translator/c/src/thread_nt.h +++ b/rpython/translator/c/src/thread_nt.h @@ -34,8 +34,8 @@ #ifdef _M_IA64 /* On Itanium, use 'acquire' memory ordering semantics */ -#define lock_test_and_set(ptr, value) InterlockedExchangeAcquire(ptr, value) +#define pypy_lock_test_and_set(ptr, value) InterlockedExchangeAcquire(ptr,value) #else -#define lock_test_and_set(ptr, value) InterlockedExchange(ptr, value) +#define pypy_lock_test_and_set(ptr, value) InterlockedExchange(ptr, value) #endif -#define lock_release(ptr) (*((volatile long *)ptr) = 0) +#define pypy_lock_release(ptr) (*((volatile long *)ptr) = 0) diff --git a/rpython/translator/c/src/thread_pthread.c b/rpython/translator/c/src/thread_pthread.c --- a/rpython/translator/c/src/thread_pthread.c +++ b/rpython/translator/c/src/thread_pthread.c @@ -546,7 +546,7 @@ return result; } -//#define lock_test_and_set(ptr, value) see thread_pthread.h +//#define pypy_lock_test_and_set(ptr, value) see thread_pthread.h #define atomic_increment(ptr) __sync_fetch_and_add(ptr, 1) #define atomic_decrement(ptr) __sync_fetch_and_sub(ptr, 1) #define HAVE_PTHREAD_ATFORK 1 diff --git a/rpython/translator/c/src/thread_pthread.h b/rpython/translator/c/src/thread_pthread.h --- a/rpython/translator/c/src/thread_pthread.h +++ b/rpython/translator/c/src/thread_pthread.h @@ -80,5 +80,5 @@ void RPyThreadAfterFork(void); -#define lock_test_and_set(ptr, value) __sync_lock_test_and_set(ptr, value) -#define lock_release(ptr) __sync_lock_release(ptr) +#define pypy_lock_test_and_set(ptr, value) __sync_lock_test_and_set(ptr, value) +#define pypy_lock_release(ptr) __sync_lock_release(ptr) diff --git a/rpython/translator/c/src/threadlocal.c b/rpython/translator/c/src/threadlocal.c --- a/rpython/translator/c/src/threadlocal.c +++ b/rpython/translator/c/src/threadlocal.c @@ -15,14 +15,14 @@ static int check_valid(void); void _RPython_ThreadLocals_Acquire(void) { - while (!lock_test_and_set(&pypy_threadlocal_lock, 1)) { + while (!pypy_lock_test_and_set(&pypy_threadlocal_lock, 1)) { /* busy loop */ } assert(check_valid()); } void _RPython_ThreadLocals_Release(void) { assert(check_valid()); - lock_release(&pypy_threadlocal_lock); + pypy_lock_release(&pypy_threadlocal_lock); } From pypy.commits at gmail.com Wed Mar 23 15:51:31 2016 From: pypy.commits at gmail.com (rlamy) Date: Wed, 23 Mar 2016 12:51:31 -0700 (PDT) Subject: [pypy-commit] pypy follow_symlinks: hg merge rposix-for-3 Message-ID: <56f2f3c3.4412c30a.3f14c.6ece@mx.google.com> Author: Ronan Lamy Branch: follow_symlinks Changeset: r83303:cf992a2a931c Date: 2016-03-23 17:41 +0000 http://bitbucket.org/pypy/pypy/changeset/cf992a2a931c/ Log: hg merge rposix-for-3 diff --git a/rpython/rlib/rposix.py b/rpython/rlib/rposix.py --- a/rpython/rlib/rposix.py +++ b/rpython/rlib/rposix.py @@ -1739,6 +1739,8 @@ AT_SYMLINK_NOFOLLOW = rffi_platform.DefinedConstantInteger('AT_SYMLINK_NOFOLLOW') AT_EACCESS = rffi_platform.DefinedConstantInteger('AT_EACCESS') AT_REMOVEDIR = rffi_platform.DefinedConstantInteger('AT_REMOVEDIR') + UTIME_NOW = rffi_platform.DefinedConstantInteger('UTIME_NOW') + UTIME_OMIT = rffi_platform.DefinedConstantInteger('UTIME_OMIT') TIMESPEC = rffi_platform.Struct('struct timespec', [ ('tv_sec', rffi.TIME_T), ('tv_nsec', rffi.LONG)]) @@ -1790,20 +1792,30 @@ c_futimens = external('futimens', [rffi.INT, TIMESPEC2P], rffi.INT) def futimens(fd, atime, atime_ns, mtime, mtime_ns): - l_times = lltype.malloc(TIMESPEC, 2, flavor='raw') + l_times = lltype.malloc(TIMESPEC2P.TO, 2, flavor='raw') rffi.setintfield(l_times[0], 'c_tv_sec', atime) rffi.setintfield(l_times[0], 'c_tv_nsec', atime_ns) rffi.setintfield(l_times[1], 'c_tv_sec', mtime) rffi.setintfield(l_times[1], 'c_tv_nsec', mtime_ns) error = c_futimens(fd, l_times) + lltype.free(l_times, flavor='raw') handle_posix_error('futimens', error) if HAVE_UTIMENSAT: - c_utimensat = external('utimensat', [rffi.INT, TIMESPEC2P], rffi.INT) + c_utimensat = external('utimensat', + [rffi.INT, rffi.CCHARP, TIMESPEC2P, rffi.INT], rffi.INT) def utimensat(pathname, atime, atime_ns, mtime, mtime_ns, dir_fd=AT_FDCWD, follow_symlinks=True): - l_times = lltype.malloc(TIMESPEC, 2, flavor='raw') + """Wrapper around utimensat(2) + + To set access time to the current time, pass atime_ns=UTIME_NOW, + atime is then ignored. + + To set modification time to the current time, pass mtime_ns=UTIME_NOW, + mtime is then ignored. + """ + l_times = lltype.malloc(TIMESPEC2P.TO, 2, flavor='raw') rffi.setintfield(l_times[0], 'c_tv_sec', atime) rffi.setintfield(l_times[0], 'c_tv_nsec', atime_ns) rffi.setintfield(l_times[1], 'c_tv_sec', mtime) @@ -1812,7 +1824,8 @@ flag = 0 else: flag = AT_SYMLINK_NOFOLLOW - error = c_futimens(dir_fd, pathname, l_times, flag) + error = c_utimensat(dir_fd, pathname, l_times, flag) + lltype.free(l_times, flavor='raw') handle_posix_error('utimensat', error) if HAVE_MKDIRAT: diff --git a/rpython/rlib/test/test_rposix.py b/rpython/rlib/test/test_rposix.py --- a/rpython/rlib/test/test_rposix.py +++ b/rpython/rlib/test/test_rposix.py @@ -497,6 +497,17 @@ os.close(dirfd) assert not os.path.exists(self.ufilename) + def test_utimensat(self): + def f(dirfd): + return rposix.utimensat('test_open_ascii', + 0, rposix.UTIME_NOW, 0, rposix.UTIME_NOW, dir_fd=dirfd) + + dirfd = os.open(os.path.dirname(self.ufilename), os.O_RDONLY) + try: + interpret(f, [dirfd]) # does not crash + finally: + os.close(dirfd) + class TestPosixUnicode(BasePosixUnicodeOrAscii): def _get_filename(self): From pypy.commits at gmail.com Wed Mar 23 15:51:32 2016 From: pypy.commits at gmail.com (rlamy) Date: Wed, 23 Mar 2016 12:51:32 -0700 (PDT) Subject: [pypy-commit] pypy follow_symlinks: Use utimensat() in os.utime() if available Message-ID: <56f2f3c4.c856c20a.787a3.7015@mx.google.com> Author: Ronan Lamy Branch: follow_symlinks Changeset: r83304:3869fa0518a7 Date: 2016-03-23 16:53 +0000 http://bitbucket.org/pypy/pypy/changeset/3869fa0518a7/ Log: Use utimensat() in os.utime() if available diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -1,5 +1,6 @@ import os import sys +from math import modf from rpython.rlib import rposix, rposix_stat from rpython.rlib import objectmodel, rurandom @@ -1137,8 +1138,10 @@ raise wrap_oserror(space, e) return space.wrap(ret) - at unwrap_spec(dir_fd=DirFD(available=False), follow_symlinks=kwonly(bool)) -def utime(space, w_path, w_tuple, dir_fd=DEFAULT_DIR_FD, follow_symlinks=True): + + at unwrap_spec(w_ns=kwonly(WrappedDefault(None)), + dir_fd=DirFD(rposix.HAVE_UTIMENSAT), follow_symlinks=kwonly(bool)) +def utime(space, w_path, w_times, w_ns, dir_fd=DEFAULT_DIR_FD, follow_symlinks=True): """utime(path, times=None, *, ns=None, dir_fd=None, follow_symlinks=True) Set the access and modified time of path. @@ -1164,7 +1167,28 @@ as an open file descriptor. dir_fd and follow_symlinks may not be available on your platform. If they are unavailable, using them will raise a NotImplementedError.""" - if space.is_w(w_tuple, space.w_None): + if (not space.is_w(w_times, space.w_None) and + not space.is_w(w_ns, space.w_None)): + raise oefmt(space.w_ValueError, + "utime: you may specify either 'times' or 'ns' but not both") + + if rposix.HAVE_UTIMENSAT: + path = space.fsencode_w(w_path) + try: + _utimensat(space, path, w_times, w_ns, dir_fd, follow_symlinks) + return + except OSError, e: + raise wrap_oserror2(space, e, w_path) + + if not follow_symlinks: + raise oefmt( + space.w_NotImplementedError, + "follow_symlinks unavailable on this platform") + + if not space.is_w(w_ns, space.w_None): + raise oefmt(space.w_NotImplementedError, + "utime: 'ns' unsupported on this platform on PyPy") + if space.is_w(w_times, space.w_None): try: dispatch_filename(rposix.utime, 1)(space, w_path, None) return @@ -1172,7 +1196,7 @@ raise wrap_oserror2(space, e, w_path) try: msg = "utime() arg 2 must be a tuple (atime, mtime) or None" - args_w = space.fixedview(w_tuple) + args_w = space.fixedview(w_times) if len(args_w) != 2: raise OperationError(space.w_TypeError, space.wrap(msg)) actime = space.float_w(args_w[0], allow_conversion=False) @@ -1185,6 +1209,51 @@ raise raise OperationError(space.w_TypeError, space.wrap(msg)) + +def _utimensat(space, path, w_times, w_ns, dir_fd, follow_symlinks): + if space.is_w(w_times, space.w_None) and space.is_w(w_ns, space.w_None): + atime_s = mtime_s = 0 + atime_ns = mtime_ns = rposix.UTIME_NOW + elif not space.is_w(w_times, space.w_None): + times_w = space.fixedview(w_times) + if len(times_w) != 2: + raise oefmt(space.w_TypeError, + "utime: 'ns' must be a tuple of two ints") + atime_s, atime_ns = convert_seconds(space, times_w[0]) + mtime_s, mtime_ns = convert_seconds(space, times_w[1]) + else: + args_w = space.fixedview(w_ns) + if len(args_w) != 2: + raise oefmt(space.w_TypeError, + "utime: 'ns' must be a tuple of two ints") + atime_s, atime_ns = convert_ns(space, args_w[0]) + mtime_s, mtime_ns = convert_ns(space, args_w[1]) + + rposix.utimensat( + path, atime_s, atime_ns, mtime_s, mtime_ns, + dir_fd=dir_fd, follow_symlinks=follow_symlinks) + +def convert_seconds(space, w_time): + if space.isinstance_w(w_time, space.w_float): + time = space.float_w(w_time) + intpart, floatpart = modf(time) + if floatpart < 0: + floatpart += 1. + intpart -= 1. + return int(intpart), int(floatpart*1e9) + else: + time = space.int_w(w_time) + return time, 0 + +def convert_ns(space, w_ns_time): + w_billion = space.wrap(1000000000) + w_res = space.divmod(w_ns_time, w_billion) + res_w = space.fixedview(w_res) + time_int = space.int_w(res_w[0]) + time_frac = space.int_w(res_w[1]) + return time_int, time_frac + + def uname(space): """ uname() -> (sysname, nodename, release, version, machine) From pypy.commits at gmail.com Wed Mar 23 15:55:38 2016 From: pypy.commits at gmail.com (rlamy) Date: Wed, 23 Mar 2016 12:55:38 -0700 (PDT) Subject: [pypy-commit] pypy follow_symlinks: fix an error message Message-ID: <56f2f4ba.465ec20a.c6bc6.7033@mx.google.com> Author: Ronan Lamy Branch: follow_symlinks Changeset: r83305:600c526320a0 Date: 2016-03-23 19:54 +0000 http://bitbucket.org/pypy/pypy/changeset/600c526320a0/ Log: fix an error message diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -1218,7 +1218,7 @@ times_w = space.fixedview(w_times) if len(times_w) != 2: raise oefmt(space.w_TypeError, - "utime: 'ns' must be a tuple of two ints") + "utime: 'times' must be either a tuple of two ints or None") atime_s, atime_ns = convert_seconds(space, times_w[0]) mtime_s, mtime_ns = convert_seconds(space, times_w[1]) else: From pypy.commits at gmail.com Wed Mar 23 16:20:54 2016 From: pypy.commits at gmail.com (rlamy) Date: Wed, 23 Mar 2016 13:20:54 -0700 (PDT) Subject: [pypy-commit] pypy follow_symlinks: Use faccessat() in os.access() if available Message-ID: <56f2faa6.10921c0a.930d1.ffff9bd0@mx.google.com> Author: Ronan Lamy Branch: follow_symlinks Changeset: r83306:53f2a4c897c4 Date: 2016-03-23 20:19 +0000 http://bitbucket.org/pypy/pypy/changeset/53f2a4c897c4/ Log: Use faccessat() in os.access() if available diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -136,6 +136,11 @@ def DirFD(available=False): return _DirFD if available else _DirFD_Unavailable + at specialize.arg(1, 2) +def argument_unavailable(space, funcname, arg): + return oefmt( + space.w_NotImplementedError, + "%s: %s unavailable on this platform" % (funcname, arg)) @unwrap_spec(flags=c_int, mode=c_int, dir_fd=DirFD(rposix.HAVE_OPENAT)) def open(space, w_path, flags, mode=0777, dir_fd=DEFAULT_DIR_FD): @@ -445,7 +450,7 @@ raise wrap_oserror(space, e) @unwrap_spec(mode=c_int, - dir_fd=DirFD(available=False), effective_ids=kwonly(bool), follow_symlinks=kwonly(bool)) + dir_fd=DirFD(rposix.HAVE_FACCESSAT), effective_ids=kwonly(bool), follow_symlinks=kwonly(bool)) def access(space, w_path, mode, dir_fd=DEFAULT_DIR_FD, effective_ids=True, follow_symlinks=True): """\ @@ -470,9 +475,20 @@ has the specified access to the path. The mode argument can be F_OK to test existence, or the inclusive-OR of R_OK, W_OK, and X_OK.""" + if not rposix.HAVE_FACCESSAT: + if not follow_symlinks: + raise argument_unavailable("access", "follow_symlinks") + if effective_ids: + raise argument_unavailable("access", "effective_ids") + try: - ok = dispatch_filename(rposix.access)(space, w_path, mode) - except OSError, e: + if dir_fd == DEFAULT_DIR_FD and follow_symlinks and not effective_ids: + ok = dispatch_filename(rposix.access)(space, w_path, mode) + else: + path = space.fsencode_w(w_path) + ok = rposix.faccessat(path, mode, + dir_fd, effective_ids, follow_symlinks) + except OSError as e: raise wrap_oserror2(space, e, w_path) else: return space.wrap(ok) @@ -1181,9 +1197,7 @@ raise wrap_oserror2(space, e, w_path) if not follow_symlinks: - raise oefmt( - space.w_NotImplementedError, - "follow_symlinks unavailable on this platform") + raise argument_unavailable("utime", "follow_symlinks") if not space.is_w(w_ns, space.w_None): raise oefmt(space.w_NotImplementedError, From pypy.commits at gmail.com Wed Mar 23 16:37:59 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 23 Mar 2016 13:37:59 -0700 (PDT) Subject: [pypy-commit] cffi default: Update the one-sentence catch phrase describing cffi Message-ID: <56f2fea7.6672c20a.4facb.ffff8128@mx.google.com> Author: Armin Rigo Branch: Changeset: r2651:885c2831c05c Date: 2016-03-23 21:38 +0100 http://bitbucket.org/cffi/cffi/changeset/885c2831c05c/ Log: Update the one-sentence catch phrase describing cffi diff --git a/doc/source/index.rst b/doc/source/index.rst --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -2,9 +2,9 @@ CFFI documentation ================================ -C Foreign Function Interface for Python. The goal is to provide a -convenient and reliable way to call compiled C code from Python using -interface declarations written in C. +C Foreign Function Interface for Python. Interact with almost any C +code from Python, based on C-like declarations that you can often +copy-paste from header files or documentation. * Goals_ From pypy.commits at gmail.com Wed Mar 23 17:45:17 2016 From: pypy.commits at gmail.com (mattip) Date: Wed, 23 Mar 2016 14:45:17 -0700 (PDT) Subject: [pypy-commit] extradoc extradoc: proposal for ScipyUS2016 Message-ID: <56f30e6d.2968c20a.c179e.ffff9241@mx.google.com> Author: Matti Picus Branch: extradoc Changeset: r5614:5f22d5e547b6 Date: 2016-03-23 23:44 +0200 http://bitbucket.org/pypy/extradoc/changeset/5f22d5e547b6/ Log: proposal for ScipyUS2016 diff --git a/scipyUS2016/proposal.rst b/scipyUS2016/proposal.rst new file mode 100644 --- /dev/null +++ b/scipyUS2016/proposal.rst @@ -0,0 +1,35 @@ +PyPy and the Numpy/Scipy Stack +============================== + +Abstract (500 words or less) +____________________________ + +PyPy is used in the "other" python world of web servers and text processing. +What can it do for data crunching? How can it possibly work with Numpy and the +rest of the Scientific Python data stack? In this talk I will briefly survey +what is PyPy, our two approaches to compatibility with Numpy, and what that +means for those who are looking for a drop-in solution to their processing +challenges. + +Longer Description +__________________ + +PyPy is maturing as a drop-in replacement for python 2.7. In the "other" world +of web servers and text processing, PyPy's speed on long-running processes +and compatibility with pure python packages makes it a good fit for mature +technologies looking for a quick speed increase. + +What about the world of number crunching, can PyPy possibly contribute +anything to the crowded field of ahead-of-time solutions like cython or +other just-in-time solutions like Numba? The PyPy team feels very strongly +that we can, and in this talk I will try to outline PyPy's approach to +intgrating numpy and the SciPy stack over this alternative interpreter. + +We have been working for a number of years on an alternative +implementation of the ndarray, tightly integrated to the PyPy machinary. +Reimplementing numpy completely comes with costs, like a constant need to +replicate updates when a new version of numpy is released. +We've been recently experimenting with reusing +more of C numpy through the C API and we have ideas how to merge both +approaches to have the best of both worlds. + From pypy.commits at gmail.com Wed Mar 23 17:48:53 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 23 Mar 2016 14:48:53 -0700 (PDT) Subject: [pypy-commit] pypy default: Complain explicitly if we see a getarrayitem or setarrayitem on a Message-ID: <56f30f45.e6ebc20a.ba8ae.ffff9702@mx.google.com> Author: Armin Rigo Branch: Changeset: r83307:85f1a32d4766 Date: 2016-03-23 22:48 +0100 http://bitbucket.org/pypy/pypy/changeset/85f1a32d4766/ Log: Complain explicitly if we see a getarrayitem or setarrayitem on a FixedSizeArray in the codewriter diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -688,6 +688,10 @@ ARRAY = op.args[0].concretetype.TO if self._array_of_voids(ARRAY): return [] + if isinstance(ARRAY, lltype.FixedSizeArray): + raise NotImplementedError( + "%r uses %r, which is not supported by the JIT codewriter" + % (self.graph, ARRAY)) if op.args[0] in self.vable_array_vars: # for virtualizables vars = self.vable_array_vars[op.args[0]] (v_base, arrayfielddescr, arraydescr) = vars @@ -718,6 +722,10 @@ ARRAY = op.args[0].concretetype.TO if self._array_of_voids(ARRAY): return [] + if isinstance(ARRAY, lltype.FixedSizeArray): + raise NotImplementedError( + "%r uses %r, which is not supported by the JIT codewriter" + % (self.graph, ARRAY)) if op.args[0] in self.vable_array_vars: # for virtualizables vars = self.vable_array_vars[op.args[0]] (v_base, arrayfielddescr, arraydescr) = vars diff --git a/rpython/jit/codewriter/test/test_jtransform.py b/rpython/jit/codewriter/test/test_jtransform.py --- a/rpython/jit/codewriter/test/test_jtransform.py +++ b/rpython/jit/codewriter/test/test_jtransform.py @@ -1316,6 +1316,21 @@ tr = Transformer(None, None) py.test.raises(NotImplementedError, tr.rewrite_operation, op) +def test_no_fixedsizearray(): + A = lltype.FixedSizeArray(lltype.Signed, 5) + v_x = varoftype(lltype.Ptr(A)) + op = SpaceOperation('getarrayitem', [v_x, Constant(0, lltype.Signed)], + varoftype(lltype.Signed)) + tr = Transformer(None, None) + tr.graph = 'demo' + py.test.raises(NotImplementedError, tr.rewrite_operation, op) + op = SpaceOperation('setarrayitem', [v_x, Constant(0, lltype.Signed), + Constant(42, lltype.Signed)], + varoftype(lltype.Void)) + e = py.test.raises(NotImplementedError, tr.rewrite_operation, op) + assert str(e.value) == ( + "'demo' uses %r, which is not supported by the JIT codewriter" % (A,)) + def _test_threadlocalref_get(loop_inv): from rpython.rlib.rthread import ThreadLocalField tlfield = ThreadLocalField(lltype.Signed, 'foobar_test_', From pypy.commits at gmail.com Wed Mar 23 17:59:22 2016 From: pypy.commits at gmail.com (mattip) Date: Wed, 23 Mar 2016 14:59:22 -0700 (PDT) Subject: [pypy-commit] pypy default: replace FixedSizeArray with CArrayPtr and scoped_alloc Message-ID: <56f311ba.cf0b1c0a.20fd6.ffffb303@mx.google.com> Author: mattip Branch: Changeset: r83308:3d5dcf2b84e1 Date: 2016-03-23 23:54 +0200 http://bitbucket.org/pypy/pypy/changeset/3d5dcf2b84e1/ Log: replace FixedSizeArray with CArrayPtr and scoped_alloc diff --git a/rpython/rlib/rtime.py b/rpython/rlib/rtime.py --- a/rpython/rlib/rtime.py +++ b/rpython/rlib/rtime.py @@ -148,13 +148,12 @@ if _WIN32: # hacking to avoid LARGE_INTEGER which is a union... - A = lltype.FixedSizeArray(lltype.SignedLongLong, 1) QueryPerformanceCounter = external( - 'QueryPerformanceCounter', [lltype.Ptr(A)], lltype.Void, - releasegil=False) + 'QueryPerformanceCounter', [rffi.CArrayPtr(lltype.SignedLongLong)], + lltype.Void, releasegil=False) QueryPerformanceFrequency = external( - 'QueryPerformanceFrequency', [lltype.Ptr(A)], rffi.INT, - releasegil=False) + 'QueryPerformanceFrequency', [rffi.CArrayPtr(lltype.SignedLongLong)], + rffi.INT, releasegil=False) class State(object): divisor = 0.0 counter_start = 0 @@ -178,15 +177,14 @@ releasegil=False) def win_perf_counter(): - a = lltype.malloc(A, flavor='raw') - if state.divisor == 0.0: + with lltype.scoped_alloc(rffi.CArray(rffi.lltype.SignedLongLong), 1) as a: + if state.divisor == 0.0: + QueryPerformanceCounter(a) + state.counter_start = a[0] + QueryPerformanceFrequency(a) + state.divisor = float(a[0]) QueryPerformanceCounter(a) - state.counter_start = a[0] - QueryPerformanceFrequency(a) - state.divisor = float(a[0]) - QueryPerformanceCounter(a) - diff = a[0] - state.counter_start - lltype.free(a, flavor='raw') + diff = a[0] - state.counter_start return float(diff) / state.divisor @replace_time_function('clock') From pypy.commits at gmail.com Wed Mar 23 18:13:24 2016 From: pypy.commits at gmail.com (mattip) Date: Wed, 23 Mar 2016 15:13:24 -0700 (PDT) Subject: [pypy-commit] pypy default: document merged branch Message-ID: <56f31504.03321c0a.66922.6d34@mx.google.com> Author: mattip Branch: Changeset: r83309:1f231677b3a3 Date: 2016-03-24 00:12 +0200 http://bitbucket.org/pypy/pypy/changeset/1f231677b3a3/ Log: document merged branch diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -23,3 +23,7 @@ Implement yet another strange numpy indexing compatibility; indexing by a scalar returns a scalar + +.. branch: fix_transpose_for_list_v3 + +Allow arguments to transpose to be sequences From pypy.commits at gmail.com Wed Mar 23 18:25:51 2016 From: pypy.commits at gmail.com (mattip) Date: Wed, 23 Mar 2016 15:25:51 -0700 (PDT) Subject: [pypy-commit] extradoc extradoc: rephrasing Message-ID: <56f317ef.8d571c0a.c0af1.ffffbe0e@mx.google.com> Author: Matti Picus Branch: extradoc Changeset: r5615:ec4f1768be95 Date: 2016-03-24 00:25 +0200 http://bitbucket.org/pypy/extradoc/changeset/ec4f1768be95/ Log: rephrasing diff --git a/scipyUS2016/proposal.rst b/scipyUS2016/proposal.rst --- a/scipyUS2016/proposal.rst +++ b/scipyUS2016/proposal.rst @@ -4,7 +4,7 @@ Abstract (500 words or less) ____________________________ -PyPy is used in the "other" python world of web servers and text processing. +PyPy is used successfully in the world of web servers and text processing. What can it do for data crunching? How can it possibly work with Numpy and the rest of the Scientific Python data stack? In this talk I will briefly survey what is PyPy, our two approaches to compatibility with Numpy, and what that @@ -14,7 +14,7 @@ Longer Description __________________ -PyPy is maturing as a drop-in replacement for python 2.7. In the "other" world +PyPy is maturing as a drop-in replacement for python 2.7. In the world of web servers and text processing, PyPy's speed on long-running processes and compatibility with pure python packages makes it a good fit for mature technologies looking for a quick speed increase. @@ -23,7 +23,8 @@ anything to the crowded field of ahead-of-time solutions like cython or other just-in-time solutions like Numba? The PyPy team feels very strongly that we can, and in this talk I will try to outline PyPy's approach to -intgrating numpy and the SciPy stack over this alternative interpreter. +intgrating numpy and the SciPy stack on top of this alternative interpreter, +after a brief introduction of the RPython toolchain and the PyPy interpreter. We have been working for a number of years on an alternative implementation of the ndarray, tightly integrated to the PyPy machinary. @@ -31,5 +32,5 @@ replicate updates when a new version of numpy is released. We've been recently experimenting with reusing more of C numpy through the C API and we have ideas how to merge both -approaches to have the best of both worlds. +approaches to have the best of both worlds. From pypy.commits at gmail.com Wed Mar 23 21:52:07 2016 From: pypy.commits at gmail.com (pjenvey) Date: Wed, 23 Mar 2016 18:52:07 -0700 (PDT) Subject: [pypy-commit] pypy stdlib-2.7.11: workaround hasattr masking stack overflow RuntimeErrors expected by Message-ID: <56f34847.a2afc20a.e60d4.ffffbfea@mx.google.com> Author: Philip Jenvey Branch: stdlib-2.7.11 Changeset: r83310:2dca7754d180 Date: 2016-03-23 18:51 -0700 http://bitbucket.org/pypy/pypy/changeset/2dca7754d180/ Log: workaround hasattr masking stack overflow RuntimeErrors expected by recursion tests, sigh. only test_recursive_dict_subclass_key seemed to trigger this diff --git a/lib-python/2.7/pickle.py b/lib-python/2.7/pickle.py --- a/lib-python/2.7/pickle.py +++ b/lib-python/2.7/pickle.py @@ -351,7 +351,9 @@ raise PicklingError("args from reduce() should be a tuple") # Assert that func is callable - if not hasattr(func, '__call__'): + try: + func.__call__ + except AttributeError: raise PicklingError("func from reduce should be callable") save = self.save From pypy.commits at gmail.com Thu Mar 24 00:31:09 2016 From: pypy.commits at gmail.com (rlamy) Date: Wed, 23 Mar 2016 21:31:09 -0700 (PDT) Subject: [pypy-commit] pypy follow_symlinks: Test and fix convert_seconds() Message-ID: <56f36d8d.865a1c0a.651dd.fffff6e9@mx.google.com> Author: Ronan Lamy Branch: follow_symlinks Changeset: r83311:8d4f69eaa804 Date: 2016-03-24 04:30 +0000 http://bitbucket.org/pypy/pypy/changeset/8d4f69eaa804/ Log: Test and fix convert_seconds() diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py --- a/pypy/module/posix/interp_posix.py +++ b/pypy/module/posix/interp_posix.py @@ -1250,11 +1250,11 @@ def convert_seconds(space, w_time): if space.isinstance_w(w_time, space.w_float): time = space.float_w(w_time) - intpart, floatpart = modf(time) - if floatpart < 0: - floatpart += 1. + fracpart, intpart = modf(time) + if fracpart < 0: + fracpart += 1. intpart -= 1. - return int(intpart), int(floatpart*1e9) + return int(intpart), int(fracpart*1e9) else: time = space.int_w(w_time) return time, 0 diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py --- a/pypy/module/posix/test/test_posix2.py +++ b/pypy/module/posix/test/test_posix2.py @@ -2,16 +2,22 @@ # -*- coding: utf-8 -*- from __future__ import with_statement +import os +import py +import sys +import signal + from pypy.objspace.std import StdObjSpace from rpython.tool.udir import udir from pypy.tool.pytest.objspace import gettestobjspace from pypy.conftest import pypydir from rpython.translator.c.test.test_extfunc import need_sparse_files from rpython.rlib import rposix -import os -import py -import sys -import signal +from pypy.module.posix.interp_posix import convert_seconds + +from hypothesis import given +from hypothesis.strategies import integers + def setup_module(mod): usemodules = ['binascii', 'posix', 'signal', 'struct', 'time'] @@ -540,6 +546,9 @@ assert os.stat(path).st_atime > t0 os.utime(path, (int(t0), int(t0))) assert int(os.stat(path).st_atime) == int(t0) + t1 = time() + os.utime(path, (int(t1), int(t1))) + assert int(os.stat(path).st_atime) == int(t1) def test_utime_raises(self): os = self.posix @@ -1272,3 +1281,15 @@ if os.name == 'posix': assert os.open in os.supports_dir_fd # openat() +def test_convert_seconds_simple(space): + w_time = space.wrap(123.456) + assert convert_seconds(space, w_time) == (123, 456000000) + + at given(s=integers(min_value=-2**30, max_value=2**30), ns=integers(min_value=0, max_value=10**9)) +def test_convert_seconds_full(space, s, ns): + w_time = space.wrap(s + ns * 1e-9) + sec, nsec = convert_seconds(space, w_time) + assert 0 <= nsec < 1e9 + MAX_ERR = 1e9 / 2**23 + 1 # nsec has 53 - 30 = 23 bits of precisin + err = (sec * 10**9 + nsec) - (s * 10**9 + ns) + assert -MAX_ERR < err < MAX_ERR From pypy.commits at gmail.com Thu Mar 24 02:13:43 2016 From: pypy.commits at gmail.com (rlamy) Date: Wed, 23 Mar 2016 23:13:43 -0700 (PDT) Subject: [pypy-commit] pypy rposix-for-3: Add fchmodat(), fchownat() Message-ID: <56f38597.4412c30a.3f14c.ffffef38@mx.google.com> Author: Ronan Lamy Branch: rposix-for-3 Changeset: r83312:ef94621301a9 Date: 2016-03-24 06:07 +0000 http://bitbucket.org/pypy/pypy/changeset/ef94621301a9/ Log: Add fchmodat(), fchownat() diff --git a/rpython/rlib/rposix.py b/rpython/rlib/rposix.py --- a/rpython/rlib/rposix.py +++ b/rpython/rlib/rposix.py @@ -1739,6 +1739,7 @@ AT_SYMLINK_NOFOLLOW = rffi_platform.DefinedConstantInteger('AT_SYMLINK_NOFOLLOW') AT_EACCESS = rffi_platform.DefinedConstantInteger('AT_EACCESS') AT_REMOVEDIR = rffi_platform.DefinedConstantInteger('AT_REMOVEDIR') + AT_EMPTY_PATH = rffi_platform.DefinedConstantInteger('AT_EMPTY_PATH') UTIME_NOW = rffi_platform.DefinedConstantInteger('UTIME_NOW') UTIME_OMIT = rffi_platform.DefinedConstantInteger('UTIME_OMIT') TIMESPEC = rffi_platform.Struct('struct timespec', [ @@ -1772,6 +1773,34 @@ error = c_faccessat(dir_fd, pathname, mode, flags) return error == 0 +if HAVE_FCHMODAT: + c_fchmodat = external('fchmodat', + [rffi.INT, rffi.CCHARP, rffi.INT, rffi.INT], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO,) + + def fchmodat(path, mode, dir_fd=AT_FDCWD, follow_symlinks=True): + if follow_symlinks: + flag = 0 + else: + flag = AT_SYMLINK_NOFOLLOW + error = c_fchmodat(dir_fd, path, mode, flag) + handle_posix_error('fchmodat', error) + +if HAVE_FCHOWNAT: + c_fchownat = external('fchownat', + [rffi.INT, rffi.CCHARP, rffi.INT, rffi.INT, rffi.INT], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO,) + + def fchownat(path, owner, group, dir_fd=AT_FDCWD, + follow_symlinks=True, empty_path=False): + flag = 0 + if not follow_symlinks: + flag |= AT_SYMLINK_NOFOLLOW + if empty_path: + flag |= AT_EMPTY_PATH + error = c_fchownat(dir_fd, path, owner, group, flag) + handle_posix_error('fchownat', error) + if HAVE_LINKAT: c_linkat = external('linkat', [rffi.INT, rffi.CCHARP, rffi.INT, rffi.CCHARP, rffi.INT], rffi.INT) diff --git a/rpython/rlib/test/test_rposix.py b/rpython/rlib/test/test_rposix.py --- a/rpython/rlib/test/test_rposix.py +++ b/rpython/rlib/test/test_rposix.py @@ -508,6 +508,16 @@ finally: os.close(dirfd) + def test_fchmodat(self): + def f(dirfd): + return rposix.fchmodat('test_open_ascii', 0777, dirfd) + + dirfd = os.open(os.path.dirname(self.ufilename), os.O_RDONLY) + try: + interpret(f, [dirfd]) # does not crash + finally: + os.close(dirfd) + class TestPosixUnicode(BasePosixUnicodeOrAscii): def _get_filename(self): From pypy.commits at gmail.com Thu Mar 24 06:54:42 2016 From: pypy.commits at gmail.com (fijal) Date: Thu, 24 Mar 2016 03:54:42 -0700 (PDT) Subject: [pypy-commit] pypy jit-leaner-frontend: fix the trace stiching Message-ID: <56f3c772.83301c0a.2c64.6d40@mx.google.com> Author: fijal Branch: jit-leaner-frontend Changeset: r83313:8e88a9afa548 Date: 2016-03-24 12:53 +0200 http://bitbucket.org/pypy/pypy/changeset/8e88a9afa548/ Log: fix the trace stiching diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -1622,8 +1622,8 @@ assert False if opnum == -1: opnum = rop.call_may_force_for_descr(descr) - self.metainterp.vrefs_after_residual_call(self.metainterp._last_op, - opnum, allboxes, descr, cut_pos) + cut_pos = self.metainterp.vrefs_after_residual_call( + self.metainterp._last_op, opnum, allboxes, descr, cut_pos) vablebox = None if assembler_call: vablebox, resbox = self.metainterp.direct_assembler_call( @@ -2790,7 +2790,9 @@ # during this CALL_MAY_FORCE. Mark this fact by # generating a VIRTUAL_REF_FINISH on it and replacing # it by ConstPtr(NULL). - self.stop_tracking_virtualref(i, op, opnum, arglist, descr, cut_pos) + return self.stop_tracking_virtualref(i, op, opnum, arglist, + descr, cut_pos) + return cut_pos def vable_after_residual_call(self, funcbox): vinfo = self.jitdriver_sd.virtualizable_info @@ -2821,10 +2823,12 @@ self.history.cut(cut_pos) # pop the CALL self.history.record_nospec(rop.VIRTUAL_REF_FINISH, [vrefbox, virtualbox], None) + cut_pos = self.history.get_trace_position() newop = self.history.record_nospec(opnum, arglist, descr) op.set_position(newop.get_position()) # mark by replacing it with ConstPtr(NULL) self.virtualref_boxes[i+1] = self.cpu.ts.CONST_NULL + return cut_pos def handle_possible_exception(self): if self.last_exc_value: diff --git a/rpython/jit/metainterp/test/test_virtualref.py b/rpython/jit/metainterp/test/test_virtualref.py --- a/rpython/jit/metainterp/test/test_virtualref.py +++ b/rpython/jit/metainterp/test/test_virtualref.py @@ -743,7 +743,8 @@ return execute_frame(ExecutionContext(), 1) assert entry_point() == 1 - self.meta_interp(entry_point, [], inline=True) + r = self.meta_interp(entry_point, [], inline=True) + assert r == 1 class TestLLtype(VRefTests, LLJitMixin): From pypy.commits at gmail.com Thu Mar 24 07:23:13 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 24 Mar 2016 04:23:13 -0700 (PDT) Subject: [pypy-commit] pypy.org extradoc: update links Message-ID: <56f3ce21.838d1c0a.eeed1.79d0@mx.google.com> Author: Armin Rigo Branch: extradoc Changeset: r727:136b49943029 Date: 2016-03-24 12:23 +0100 http://bitbucket.org/pypy/pypy.org/changeset/136b49943029/ Log: update links diff --git a/download.html b/download.html --- a/download.html +++ b/download.html @@ -196,7 +196,7 @@ uncompressed, they run in-place. For now you can uncompress them either somewhere in your home directory or, say, in /opt, and if you want, put a symlink from somewhere like -/usr/local/bin/pypy to /path/to/pypy-5.0.0/bin/pypy. Do +/usr/local/bin/pypy to /path/to/pypy-5.0.1/bin/pypy. Do not move or copy the executable pypy outside the tree – put a symlink to it, otherwise it will not find its libraries.

    @@ -241,7 +241,7 @@
  • Get the source code. The following packages contain the source at the same revision as the above binaries:

    Or you can checkout the current trunk using Mercurial (the trunk usually works and is of course more up-to-date):

    diff --git a/source/download.txt b/source/download.txt --- a/source/download.txt +++ b/source/download.txt @@ -201,7 +201,7 @@ uncompressed, they run in-place. For now you can uncompress them either somewhere in your home directory or, say, in ``/opt``, and if you want, put a symlink from somewhere like -``/usr/local/bin/pypy`` to ``/path/to/pypy-5.0.0/bin/pypy``. Do +``/usr/local/bin/pypy`` to ``/path/to/pypy-5.0.1/bin/pypy``. Do not move or copy the executable ``pypy`` outside the tree --- put a symlink to it, otherwise it will not find its libraries. @@ -261,9 +261,9 @@ 1. Get the source code. The following packages contain the source at the same revision as the above binaries: - * `pypy-5.0.0-src.tar.bz2`__ (sources) + * `pypy-5.0.1-src.tar.bz2`__ (sources) - .. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.0.0-src.tar.bz2 + .. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.0.1-src.tar.bz2 Or you can checkout the current trunk using Mercurial_ (the trunk usually works and is of course more up-to-date):: From pypy.commits at gmail.com Thu Mar 24 07:27:38 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 24 Mar 2016 04:27:38 -0700 (PDT) Subject: [pypy-commit] pypy.org extradoc: Point to the 4.0.0 release for ppc, with a note that there is a bug in the 5.0.0 releases Message-ID: <56f3cf2a.6672c20a.4facb.5e8f@mx.google.com> Author: Armin Rigo Branch: extradoc Changeset: r728:6d6e1e513945 Date: 2016-03-24 12:27 +0100 http://bitbucket.org/pypy/pypy.org/changeset/6d6e1e513945/ Log: Point to the 4.0.0 release for ppc, with a note that there is a bug in the 5.0.0 releases diff --git a/download.html b/download.html --- a/download.html +++ b/download.html @@ -125,8 +125,10 @@
  • FreeBSD 9.2 x86 64 bit (hopefully availabe soon) (see [1] below)
  • Windows binary (32bit) (you might need the VS 2008 runtime library installer vcredist_x86.exe.)
  • -
  • PowerPC64 Linux binary (64bit big-endian, Fedora 20) (see [1] below)
  • -
  • PowerPC64le Linux binary (64bit little-endian, Fedora 21) (see [1] below)
  • +
  • PowerPC PPC64 Linux binary (64bit big-endian, Fedora 20) (see [1] below) +(version 4.0.0; the 5.x releases are slightly buggy. you can also translate the trunk)
  • +
  • PowerPC PPC64le Linux binary (64bit little-endian, Fedora 21) (see [1] below) +(version 4.0.0; the 5.x releases are slightly buggy. you can also translate the trunk)
  • Source (tar.bz2); Source (zip). See below for more about the sources.
  • All our downloads, including previous versions. We also have a mirror, but please use only if you have troubles accessing the links above
  • @@ -390,8 +392,6 @@ 798c6e83536a5fa5ed7d6efb4d06db1a pypy-5.0.1-src.tar.bz2 928761075bcc2d01f9f884eeee105bd0 pypy-5.0.1-src.zip 2e53db6766a718084c9327a6059f8ad7 pypy-5.0.1-win32.zip -f243ff399a55f4370b6d1dc0a3650f1d pypy-5.0.0-ppc64.tar.bz2 -51fb75ae0a143faa9a5b39f094965050 pypy-5.0.0-ppc64le.tar.bz2

    pypy3-2.4.0 md5:

    @@ -421,8 +421,6 @@
     e96dad1562c4a91b26612f0fad0e70d0635399ed  pypy-5.0.1-src.tar.bz2
     f7e4cda496244eefc50323704c48c10b568937cf  pypy-5.0.1-src.zip
     f0addc0cc809e3cc3ffe2c2dd643eb6e1c95cb49  pypy-5.0.1-win32.zip
    -5620cead511ad33f9fface224544b70d72d9e4c9  pypy-5.0.0-ppc64.tar.bz2
    -6ee6b0eb574f3d29a5eaf29fdae8745fd9fe3c38  pypy-5.0.0-ppc64le.tar.bz2
     

    pypy-5.0.1 sha256:

    @@ -435,8 +433,6 @@
     1573c9284d3ec236c8e6ef3b954753932dff29462c54b5885b761d1ee68b6e05  pypy-5.0.1-src.tar.bz2
     6e343f24c5e4ea87879bc4fd299b65a2825796286319edc0b69b3681017c145f  pypy-5.0.1-src.zip
     c12254d8b1747322736d26e014744a426c6900d232c1799140fbb43f44319730  pypy-5.0.1-win32.zip
    -334a37e68cb543cf2cbcdd12379b9b770064bb70ba7fd104f1e451cfa10cdda5  pypy-5.0.0-ppc64.tar.bz2
    -e72fe5c094186f79c997000ddbaa01616def652a8d1338b75a27dfa3755eb86c  pypy-5.0.0-ppc64le.tar.bz2
     

    pypy3-2.4.0 sha1:

    diff --git a/source/download.txt b/source/download.txt
    --- a/source/download.txt
    +++ b/source/download.txt
    @@ -85,8 +85,10 @@
     * FreeBSD 9.2 x86 64 bit *(hopefully availabe soon)* (see ``[1]`` below)
     * `Windows binary (32bit)`__ (you might need the VS 2008 runtime library
       installer `vcredist_x86.exe`_.)
    -* `PowerPC64 Linux binary (64bit big-endian, Fedora 20)`__ (see ``[1]`` below)
    -* `PowerPC64le Linux binary (64bit little-endian, Fedora 21)`__ (see ``[1]`` below)
    +* `PowerPC PPC64 Linux binary (64bit big-endian, Fedora 20)`__ (see ``[1]`` below)
    +  (version 4.0.0; the 5.x releases are slightly buggy.  you can also translate the trunk)
    +* `PowerPC PPC64le Linux binary (64bit little-endian, Fedora 21)`__ (see ``[1]`` below)
    +  (version 4.0.0; the 5.x releases are slightly buggy.  you can also translate the trunk)
     * `Source (tar.bz2)`__; `Source (zip)`__.  See below for more about the sources.
     * `All our downloads,`__ including previous versions.  We also have a
       mirror_, but please use only if you have troubles accessing the links above
    @@ -98,8 +100,8 @@
     .. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.0.1-linux-armel.tar.bz2
     .. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.0.1-osx64.tar.bz2
     .. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.0.1-win32.zip
    -.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.0.1-ppc64.tar.bz2
    -.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.0.1-ppc64le.tar.bz2
    +.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-4.0.0-ppc64.tar.bz2
    +.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-4.0.0-ppc64le.tar.bz2
     .. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.0.1-src.tar.bz2
     .. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.0.1-src.zip
     .. _`vcredist_x86.exe`: http://www.microsoft.com/en-us/download/details.aspx?id=5582
    @@ -426,8 +428,6 @@
         798c6e83536a5fa5ed7d6efb4d06db1a  pypy-5.0.1-src.tar.bz2
         928761075bcc2d01f9f884eeee105bd0  pypy-5.0.1-src.zip
         2e53db6766a718084c9327a6059f8ad7  pypy-5.0.1-win32.zip
    -    f243ff399a55f4370b6d1dc0a3650f1d  pypy-5.0.0-ppc64.tar.bz2
    -    51fb75ae0a143faa9a5b39f094965050  pypy-5.0.0-ppc64le.tar.bz2
     
     pypy3-2.4.0 md5::
     
    @@ -459,8 +459,6 @@
         e96dad1562c4a91b26612f0fad0e70d0635399ed  pypy-5.0.1-src.tar.bz2
         f7e4cda496244eefc50323704c48c10b568937cf  pypy-5.0.1-src.zip
         f0addc0cc809e3cc3ffe2c2dd643eb6e1c95cb49  pypy-5.0.1-win32.zip
    -    5620cead511ad33f9fface224544b70d72d9e4c9  pypy-5.0.0-ppc64.tar.bz2
    -    6ee6b0eb574f3d29a5eaf29fdae8745fd9fe3c38  pypy-5.0.0-ppc64le.tar.bz2
     
     pypy-5.0.1 sha256::
     
    @@ -473,8 +471,6 @@
         1573c9284d3ec236c8e6ef3b954753932dff29462c54b5885b761d1ee68b6e05  pypy-5.0.1-src.tar.bz2
         6e343f24c5e4ea87879bc4fd299b65a2825796286319edc0b69b3681017c145f  pypy-5.0.1-src.zip
         c12254d8b1747322736d26e014744a426c6900d232c1799140fbb43f44319730  pypy-5.0.1-win32.zip
    -    334a37e68cb543cf2cbcdd12379b9b770064bb70ba7fd104f1e451cfa10cdda5  pypy-5.0.0-ppc64.tar.bz2
    -    e72fe5c094186f79c997000ddbaa01616def652a8d1338b75a27dfa3755eb86c  pypy-5.0.0-ppc64le.tar.bz2
     
     pypy3-2.4.0 sha1::
     
    
    From pypy.commits at gmail.com  Thu Mar 24 07:41:21 2016
    From: pypy.commits at gmail.com (krono)
    Date: Thu, 24 Mar 2016 04:41:21 -0700 (PDT)
    Subject: [pypy-commit] pypy default: Fix import in pdbplus.py
    Message-ID: <56f3d261.0357c20a.ee593.5f61@mx.google.com>
    
    Author: Tobias Pape 
    Branch: 
    Changeset: r83314:c16375b8325d
    Date: 2016-03-24 12:40 +0100
    http://bitbucket.org/pypy/pypy/changeset/c16375b8325d/
    
    Log:	Fix import in pdbplus.py
    
    diff --git a/rpython/translator/tool/pdbplus.py b/rpython/translator/tool/pdbplus.py
    --- a/rpython/translator/tool/pdbplus.py
    +++ b/rpython/translator/tool/pdbplus.py
    @@ -172,7 +172,7 @@
     (in which case prefixing with some packages in pypy is tried (see help pypyprefixes)).
     if obj is a function or method, the localized call graph is shown;
     if obj is a class or ClassDef the class definition graph is shown"""
    -        from rpython.annotator.classdef import ClassDef
    +        from rpython.annotator.classdesc import ClassDef
             from rpython.translator.tool import graphpage
             translator = self.translator
             obj = self._getobj(arg)
    
    From pypy.commits at gmail.com  Thu Mar 24 07:52:41 2016
    From: pypy.commits at gmail.com (plan_rich)
    Date: Thu, 24 Mar 2016 04:52:41 -0700 (PDT)
    Subject: [pypy-commit] pypy release-5.x: calculate index value (const scale,
     const offset) before emiting the load for ConstPtrs
    Message-ID: <56f3d509.8fb81c0a.e11e0.ffffa4ef@mx.google.com>
    
    Author: Richard Plangger 
    Branch: release-5.x
    Changeset: r83315:30178fbdc748
    Date: 2016-03-01 08:32 +0100
    http://bitbucket.org/pypy/pypy/changeset/30178fbdc748/
    
    Log:	calculate index value (const scale, const offset) before emiting the
    	load for ConstPtrs
    
    diff --git a/rpython/jit/backend/llsupport/gc.py b/rpython/jit/backend/llsupport/gc.py
    --- a/rpython/jit/backend/llsupport/gc.py
    +++ b/rpython/jit/backend/llsupport/gc.py
    @@ -164,13 +164,11 @@
                 array_index = moving_obj_tracker.get_array_index(v)
     
                 size, offset, _ = unpack_arraydescr(moving_obj_tracker.ptr_array_descr)
    -            scale = size
    +            array_index = array_index * size + offset
                 args = [moving_obj_tracker.const_ptr_gcref_array,
                         ConstInt(array_index),
    -                    ConstInt(scale),
    -                    ConstInt(offset),
                         ConstInt(size)]
    -            load_op = ResOperation(rop.GC_LOAD_INDEXED_R, args)
    +            load_op = ResOperation(rop.GC_LOAD_R, args)
                 newops.append(load_op)
                 op.setarg(arg_i, load_op)
             #
    
    From pypy.commits at gmail.com  Thu Mar 24 08:04:41 2016
    From: pypy.commits at gmail.com (fijal)
    Date: Thu, 24 Mar 2016 05:04:41 -0700 (PDT)
    Subject: [pypy-commit] pypy jit-leaner-frontend: a quick fix that might work,
     btu we need to refactor that
    Message-ID: <56f3d7d9.890bc30a.8c36f.6d32@mx.google.com>
    
    Author: fijal
    Branch: jit-leaner-frontend
    Changeset: r83316:257b7819ee50
    Date: 2016-03-24 14:03 +0200
    http://bitbucket.org/pypy/pypy/changeset/257b7819ee50/
    
    Log:	a quick fix that might work, btu we need to refactor that
    
    diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py
    --- a/rpython/jit/metainterp/pyjitpl.py
    +++ b/rpython/jit/metainterp/pyjitpl.py
    @@ -2790,8 +2790,8 @@
                     # during this CALL_MAY_FORCE.  Mark this fact by
                     # generating a VIRTUAL_REF_FINISH on it and replacing
                     # it by ConstPtr(NULL).
    -                return self.stop_tracking_virtualref(i, op, opnum, arglist,
    -                                                     descr, cut_pos)
    +                cut_pos = self.stop_tracking_virtualref(i, op, opnum, arglist,
    +                                                        descr, cut_pos)
             return cut_pos
     
         def vable_after_residual_call(self, funcbox):
    
    From pypy.commits at gmail.com  Thu Mar 24 10:36:55 2016
    From: pypy.commits at gmail.com (arigo)
    Date: Thu, 24 Mar 2016 07:36:55 -0700 (PDT)
    Subject: [pypy-commit] pypy.org extradoc: send out pypy-5.0.1+-ppc64*
    Message-ID: <56f3fb87.a2f2c20a.cffb2.ffffa3a0@mx.google.com>
    
    Author: Armin Rigo 
    Branch: extradoc
    Changeset: r729:1e94b0683a62
    Date: 2016-03-24 15:37 +0100
    http://bitbucket.org/pypy/pypy.org/changeset/1e94b0683a62/
    
    Log:	send out pypy-5.0.1+-ppc64*
    
    diff --git a/download.html b/download.html
    --- a/download.html
    +++ b/download.html
    @@ -125,10 +125,8 @@
     
  • FreeBSD 9.2 x86 64 bit (hopefully availabe soon) (see [1] below)
  • Windows binary (32bit) (you might need the VS 2008 runtime library installer vcredist_x86.exe.)
  • -
  • PowerPC PPC64 Linux binary (64bit big-endian, Fedora 20) (see [1] below) -(version 4.0.0; the 5.x releases are slightly buggy. you can also translate the trunk)
  • -
  • PowerPC PPC64le Linux binary (64bit little-endian, Fedora 21) (see [1] below) -(version 4.0.0; the 5.x releases are slightly buggy. you can also translate the trunk)
  • +
  • PowerPC PPC64 Linux binary (64bit big-endian, Fedora 20) (see [1] below)
  • +
  • PowerPC PPC64le Linux binary (64bit little-endian, Fedora 21) (see [1] below)
  • Source (tar.bz2); Source (zip). See below for more about the sources.
  • All our downloads, including previous versions. We also have a mirror, but please use only if you have troubles accessing the links above
  • @@ -433,6 +431,8 @@ 1573c9284d3ec236c8e6ef3b954753932dff29462c54b5885b761d1ee68b6e05 pypy-5.0.1-src.tar.bz2 6e343f24c5e4ea87879bc4fd299b65a2825796286319edc0b69b3681017c145f pypy-5.0.1-src.zip c12254d8b1747322736d26e014744a426c6900d232c1799140fbb43f44319730 pypy-5.0.1-win32.zip +88979979641c872ffb358ae94b1caf8e1b1bae1e382755e75da354c69283a65e pypy-5.0.1+-ppc64.tar.bz2 +53d742504a78366b833c04bd83740336aa4ddfecffeff6b2fa8728fcd6b4c8af pypy-5.0.1+-ppc64le.tar.bz2

    pypy3-2.4.0 sha1:

    diff --git a/source/download.txt b/source/download.txt
    --- a/source/download.txt
    +++ b/source/download.txt
    @@ -86,9 +86,7 @@
     * `Windows binary (32bit)`__ (you might need the VS 2008 runtime library
       installer `vcredist_x86.exe`_.)
     * `PowerPC PPC64 Linux binary (64bit big-endian, Fedora 20)`__ (see ``[1]`` below)
    -  (version 4.0.0; the 5.x releases are slightly buggy.  you can also translate the trunk)
     * `PowerPC PPC64le Linux binary (64bit little-endian, Fedora 21)`__ (see ``[1]`` below)
    -  (version 4.0.0; the 5.x releases are slightly buggy.  you can also translate the trunk)
     * `Source (tar.bz2)`__; `Source (zip)`__.  See below for more about the sources.
     * `All our downloads,`__ including previous versions.  We also have a
       mirror_, but please use only if you have troubles accessing the links above
    @@ -100,8 +98,8 @@
     .. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.0.1-linux-armel.tar.bz2
     .. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.0.1-osx64.tar.bz2
     .. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.0.1-win32.zip
    -.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-4.0.0-ppc64.tar.bz2
    -.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-4.0.0-ppc64le.tar.bz2
    +.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.0.1+-ppc64.tar.bz2
    +.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.0.1+-ppc64le.tar.bz2
     .. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.0.1-src.tar.bz2
     .. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.0.1-src.zip
     .. _`vcredist_x86.exe`: http://www.microsoft.com/en-us/download/details.aspx?id=5582
    @@ -471,6 +469,9 @@
         1573c9284d3ec236c8e6ef3b954753932dff29462c54b5885b761d1ee68b6e05  pypy-5.0.1-src.tar.bz2
         6e343f24c5e4ea87879bc4fd299b65a2825796286319edc0b69b3681017c145f  pypy-5.0.1-src.zip
         c12254d8b1747322736d26e014744a426c6900d232c1799140fbb43f44319730  pypy-5.0.1-win32.zip
    +    88979979641c872ffb358ae94b1caf8e1b1bae1e382755e75da354c69283a65e  pypy-5.0.1+-ppc64.tar.bz2
    +    53d742504a78366b833c04bd83740336aa4ddfecffeff6b2fa8728fcd6b4c8af  pypy-5.0.1+-ppc64le.tar.bz2
    +
     
     pypy3-2.4.0 sha1::
     
    
    From pypy.commits at gmail.com  Thu Mar 24 10:55:23 2016
    From: pypy.commits at gmail.com (fijal)
    Date: Thu, 24 Mar 2016 07:55:23 -0700 (PDT)
    Subject: [pypy-commit] extradoc extradoc: move that to the correct place
    Message-ID: <56f3ffdb.e853c20a.29f98.ffffa922@mx.google.com>
    
    Author: fijal
    Branch: extradoc
    Changeset: r5616:f38e3cf0a59d
    Date: 2016-03-24 16:55 +0200
    http://bitbucket.org/pypy/extradoc/changeset/f38e3cf0a59d/
    
    Log:	move that to the correct place
    
    diff --git a/scipyUS2016/proposal.rst b/talk/scipyUS2016/proposal.rst
    rename from scipyUS2016/proposal.rst
    rename to talk/scipyUS2016/proposal.rst
    
    From pypy.commits at gmail.com  Thu Mar 24 10:59:22 2016
    From: pypy.commits at gmail.com (fijal)
    Date: Thu, 24 Mar 2016 07:59:22 -0700 (PDT)
    Subject: [pypy-commit] extradoc extradoc: work on the last para
    Message-ID: <56f400ca.d33f1c0a.373db.ffffca25@mx.google.com>
    
    Author: fijal
    Branch: extradoc
    Changeset: r5617:afc5d24e8d8a
    Date: 2016-03-24 16:59 +0200
    http://bitbucket.org/pypy/extradoc/changeset/afc5d24e8d8a/
    
    Log:	work on the last para
    
    diff --git a/talk/scipyUS2016/proposal.rst b/talk/scipyUS2016/proposal.rst
    --- a/talk/scipyUS2016/proposal.rst
    +++ b/talk/scipyUS2016/proposal.rst
    @@ -29,8 +29,15 @@
     We have been working for a number of years on an alternative 
     implementation of the ndarray, tightly integrated to the PyPy machinary.
     Reimplementing numpy completely comes with costs, like a constant need to
    -replicate updates when a new version of numpy is released.
    -We've been recently experimenting with reusing
    -more of C numpy through the C API and we have ideas how to merge both 
    -approaches to have the best of both worlds. 
    +replicate updates when a new version of numpy is released and difficulty to
    +integrate with the rest of the numeric stack. We've been recently experimenting
    +with a quite different approach -- to have a good enough implementation of CPython
    +C API to be able to just use numpy, which is in the experimental stage.
    +That comes with a problem of speed of array access which now has to traverse
    +the slow C API. We propose a solution that would merge the two, where we
    +would be able to both use the current numpy as well as reimplement parts
    +that will be performance critical.
     
    +We hope that this approach will fill a gap of using pypy in the numeric
    +world, where jit-level performance can provide middle ground between
    +user-input heavy solutions like cython and the slowness of CPython.
    
    From pypy.commits at gmail.com  Thu Mar 24 11:55:21 2016
    From: pypy.commits at gmail.com (mattip)
    Date: Thu, 24 Mar 2016 08:55:21 -0700 (PDT)
    Subject: [pypy-commit] pypy cpyext-ext: merge default into branch
    Message-ID: <56f40de9.e213c20a.f93e0.ffffc3b8@mx.google.com>
    
    Author: mattip 
    Branch: cpyext-ext
    Changeset: r83317:51207eafb0bd
    Date: 2016-03-24 00:35 +0200
    http://bitbucket.org/pypy/pypy/changeset/51207eafb0bd/
    
    Log:	merge default into branch
    
    diff --git a/.hgtags b/.hgtags
    --- a/.hgtags
    +++ b/.hgtags
    @@ -19,3 +19,4 @@
     850edf14b2c75573720f59e95767335fb1affe55 release-4.0.0
     5f8302b8bf9f53056e40426f10c72151564e5b19 release-4.0.1
     246c9cf22037b11dc0e8c29ce3f291d3b8c5935a release-5.0
    +bbd45126bc691f669c4ebdfbd74456cd274c6b92 release-5.0.1
    diff --git a/pypy/doc/config/translation.gc.txt b/pypy/doc/config/translation.gc.txt
    --- a/pypy/doc/config/translation.gc.txt
    +++ b/pypy/doc/config/translation.gc.txt
    @@ -1,24 +1,26 @@
     Choose the Garbage Collector used by the translated program.
    -The good performing collectors are "hybrid" and "minimark".
    -The default is "minimark".
    +The recommended default is "incminimark".
     
       - "ref": reference counting. Takes very long to translate and the result is
    -    slow.
    +    slow.  Used only for tests.  Don't use it for real RPython programs.
     
    -  - "marksweep": naive mark & sweep.
    +  - "none": no GC.  Leaks everything.  Don't use it for real RPython
    +    programs: the rate of leaking is immense.
     
       - "semispace": a copying semi-space GC.
     
       - "generation": a generational GC using the semi-space GC for the
         older generation.
     
    -  - "boehm": use the Boehm conservative GC.
    -
       - "hybrid": a hybrid collector of "generation" together with a
         mark-n-sweep old space
     
    -  - "markcompact": a slow, but memory-efficient collector,
    -    influenced e.g. by Smalltalk systems.
    +  - "boehm": use the Boehm conservative GC.
     
       - "minimark": a generational mark-n-sweep collector with good
         performance.  Includes page marking for large arrays.
    +
    +  - "incminimark": like minimark, but adds incremental major
    +    collections.  Seems to come with no performance drawback over
    +    "minimark", so it is the default.  A few recent features of PyPy
    +    (like cpyext) are only working with this GC.
    diff --git a/pypy/doc/extradoc.rst b/pypy/doc/extradoc.rst
    --- a/pypy/doc/extradoc.rst
    +++ b/pypy/doc/extradoc.rst
    @@ -80,7 +80,7 @@
     .. _How to *not* write Virtual Machines for Dynamic Languages: https://bitbucket.org/pypy/extradoc/raw/tip/talk/dyla2007/dyla.pdf
     .. _`Tracing the Meta-Level: PyPy's Tracing JIT Compiler`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/icooolps2009/bolz-tracing-jit.pdf
     .. _`Faster than C#: Efficient Implementation of Dynamic Languages on .NET`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/icooolps2009-dotnet/cli-jit.pdf
    -.. _Automatic JIT Compiler Generation with Runtime Partial Evaluation: http://wwwold.cobra.cs.uni-duesseldorf.de/thesis/final-master.pdf
    +.. _Automatic JIT Compiler Generation with Runtime Partial Evaluation: http://stups.hhu.de/mediawiki/images/b/b9/Master_bolz.pdf
     .. _`RPython: A Step towards Reconciling Dynamically and Statically Typed OO Languages`: http://www.disi.unige.it/person/AnconaD/papers/DynamicLanguages_abstracts.html#AACM-DLS07
     .. _EU Reports: index-report.html
     .. _Hardware Transactional Memory Support for Lightweight Dynamic Language Evolution: http://sabi.net/nriley/pubs/dls6-riley.pdf
    diff --git a/pypy/doc/index-of-release-notes.rst b/pypy/doc/index-of-release-notes.rst
    --- a/pypy/doc/index-of-release-notes.rst
    +++ b/pypy/doc/index-of-release-notes.rst
    @@ -6,6 +6,7 @@
     
     .. toctree::
     
    +   release-5.0.1.rst
        release-5.0.0.rst
        release-4.0.1.rst
        release-4.0.0.rst
    diff --git a/pypy/doc/release-5.0.1.rst b/pypy/doc/release-5.0.1.rst
    new file mode 100644
    --- /dev/null
    +++ b/pypy/doc/release-5.0.1.rst
    @@ -0,0 +1,40 @@
    +==========
    +PyPy 5.0.1
    +==========
    +
    +We have released a bugfix for PyPy 5.0, after reports that the newly released
    +`lxml 3.6.0`_, which now supports PyPy 5.0 +, can `crash on large files`_.
    +Thanks to those who reported the crash. Please update, downloads are available
    +at pypy.org/download.html
    +
    +.. _`lxml 3.6.0`: https://pypi.python.org/pypi/lxml/3.6.0
    +.. _`crash on large files`: https://bitbucket.org/pypy/pypy/issues/2260
    +
    +The changes between PyPy 5.0 and 5.0.1 are only two bug fixes: one in
    +cpyext, which fixes notably (but not only) lxml; and another for a
    +corner case of the JIT.
    +
    +What is PyPy?
    +=============
    +
    +PyPy is a very compliant Python interpreter, almost a drop-in replacement for
    +CPython 2.7. It's fast (`PyPy and CPython 2.7.x`_ performance comparison)
    +due to its integrated tracing JIT compiler.
    +
    +We also welcome developers of other
    +`dynamic languages`_ to see what RPython can do for them.
    +
    +This release supports **x86** machines on most common operating systems
    +(Linux 32/64, Mac OS X 64, Windows 32, OpenBSD, FreeBSD),
    +newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux, and the
    +big- and little-endian variants of **PPC64** running Linux.
    +
    +.. _`PyPy and CPython 2.7.x`: http://speed.pypy.org
    +.. _`dynamic languages`: http://pypyjs.org
    +
    +Please update, and continue to help us make PyPy better.
    +
    +Cheers
    +
    +The PyPy Team
    +
    diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
    --- a/pypy/doc/whatsnew-head.rst
    +++ b/pypy/doc/whatsnew-head.rst
    @@ -23,3 +23,7 @@
     
     Implement yet another strange numpy indexing compatibility; indexing by a scalar 
     returns a scalar
    +
    +.. branch: fix_transpose_for_list_v3
    +
    +Allow arguments to transpose to be sequences
    diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py
    --- a/pypy/goal/targetpypystandalone.py
    +++ b/pypy/goal/targetpypystandalone.py
    @@ -327,7 +327,7 @@
                 # XXX possibly adapt options using modules
                 failures = create_cffi_import_libraries(exename, options, basedir)
                 # if failures, they were already printed
    -            print  >> sys.stderr, str(exename),'successfully built, but errors while building the above modules will be ignored'
    +            print  >> sys.stderr, str(exename),'successfully built (errors, if any, while building the above modules are ignored)'
             driver.task_build_cffi_imports = types.MethodType(task_build_cffi_imports, driver)
             driver.tasks['build_cffi_imports'] = driver.task_build_cffi_imports, [compile_goal]
             driver.default_goal = 'build_cffi_imports'
    diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py
    --- a/pypy/interpreter/baseobjspace.py
    +++ b/pypy/interpreter/baseobjspace.py
    @@ -417,7 +417,10 @@
             self.wait_for_thread_shutdown()
             w_exitfunc = self.sys.getdictvalue(self, 'exitfunc')
             if w_exitfunc is not None:
    -            self.call_function(w_exitfunc)
    +            try:
    +                self.call_function(w_exitfunc)
    +            except OperationError as e:
    +                e.write_unraisable(self, 'sys.exitfunc == ', w_exitfunc)
             from pypy.interpreter.module import Module
             for w_mod in self.builtin_modules.values():
                 if isinstance(w_mod, Module) and w_mod.startup_called:
    diff --git a/pypy/interpreter/test/test_objspace.py b/pypy/interpreter/test/test_objspace.py
    --- a/pypy/interpreter/test/test_objspace.py
    +++ b/pypy/interpreter/test/test_objspace.py
    @@ -416,3 +416,14 @@
                 i -= 1
                 assert i >= 0
                 gc.collect()
    +
    +    def test_exitfunc_catches_exceptions(self):
    +        from pypy.tool.pytest.objspace import maketestobjspace
    +        space = maketestobjspace()
    +        space.appexec([], """():
    +            import sys
    +            sys.exitfunc = lambda: this_is_an_unknown_name
    +        """)
    +        space.finish()
    +        # assert that we reach this point without getting interrupted
    +        # by the OperationError(NameError)
    diff --git a/pypy/module/_cffi_backend/ctypeptr.py b/pypy/module/_cffi_backend/ctypeptr.py
    --- a/pypy/module/_cffi_backend/ctypeptr.py
    +++ b/pypy/module/_cffi_backend/ctypeptr.py
    @@ -124,7 +124,7 @@
                             s = rffi.charp2str(ptr)
                         else:
                             s = rffi.charp2strn(ptr, length)
    -                    return space.wrap(s)
    +                    return space.wrapbytes(s)
                     #
                     # pointer to a wchar_t: builds and returns a unicode
                     if self.is_unichar_ptr_or_array():
    @@ -372,15 +372,15 @@
             rffi_fclose(self.llf)
     
     
    -def prepare_file_argument(space, fileobj):
    -    fileobj.direct_flush()
    -    if fileobj.cffi_fileobj is None:
    -        fd = fileobj.direct_fileno()
    +def prepare_file_argument(space, w_fileobj):
    +    w_fileobj.direct_flush()
    +    if w_fileobj.cffi_fileobj is None:
    +        fd = w_fileobj.direct_fileno()
             if fd < 0:
                 raise OperationError(space.w_ValueError,
                                      space.wrap("file has no OS file descriptor"))
             try:
    -            fileobj.cffi_fileobj = CffiFileObj(fd, fileobj.mode)
    +            w_fileobj.cffi_fileobj = CffiFileObj(fd, w_fileobj.mode)
             except OSError, e:
                 raise wrap_oserror(space, e)
    -    return rffi.cast(rffi.CCHARP, fileobj.cffi_fileobj.llf)
    +    return rffi.cast(rffi.CCHARP, w_fileobj.cffi_fileobj.llf)
    diff --git a/pypy/module/_vmprof/test/test__vmprof.py b/pypy/module/_vmprof/test/test__vmprof.py
    --- a/pypy/module/_vmprof/test/test__vmprof.py
    +++ b/pypy/module/_vmprof/test/test__vmprof.py
    @@ -72,9 +72,9 @@
     
         def test_enable_ovf(self):
             import _vmprof
    -        raises(_vmprof.VMProfError, _vmprof.enable, 999, 0)
    -        raises(_vmprof.VMProfError, _vmprof.enable, 999, -2.5)
    -        raises(_vmprof.VMProfError, _vmprof.enable, 999, 1e300)
    -        raises(_vmprof.VMProfError, _vmprof.enable, 999, 1e300 * 1e300)
    +        raises(_vmprof.VMProfError, _vmprof.enable, 2, 0)
    +        raises(_vmprof.VMProfError, _vmprof.enable, 2, -2.5)
    +        raises(_vmprof.VMProfError, _vmprof.enable, 2, 1e300)
    +        raises(_vmprof.VMProfError, _vmprof.enable, 2, 1e300 * 1e300)
             NaN = (1e300*1e300) / (1e300*1e300)
    -        raises(_vmprof.VMProfError, _vmprof.enable, 999, NaN)
    +        raises(_vmprof.VMProfError, _vmprof.enable, 2, NaN)
    diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py
    --- a/pypy/module/cpyext/api.py
    +++ b/pypy/module/cpyext/api.py
    @@ -937,14 +937,14 @@
         modulename = py.path.local(eci.libraries[-1])
     
         def dealloc_trigger():
    -        from pypy.module.cpyext.pyobject import _Py_Dealloc
    +        from pypy.module.cpyext.pyobject import decref
             print 'dealloc_trigger...'
             while True:
                 ob = rawrefcount.next_dead(PyObject)
                 if not ob:
                     break
                 print 'deallocating PyObject', ob
    -            _Py_Dealloc(space, ob)
    +            decref(space, ob)
             print 'dealloc_trigger DONE'
             return "RETRY"
         rawrefcount.init(dealloc_trigger)
    diff --git a/pypy/module/cpyext/bytesobject.py b/pypy/module/cpyext/bytesobject.py
    --- a/pypy/module/cpyext/bytesobject.py
    +++ b/pypy/module/cpyext/bytesobject.py
    @@ -80,7 +80,8 @@
         buflen = length + 1
         py_str.c_ob_size = length
         py_str.c_buffer = lltype.malloc(rffi.CCHARP.TO, buflen,
    -                                    flavor='raw', zero=True)
    +                                    flavor='raw', zero=True,
    +                                    add_memory_pressure=True)
         py_str.c_ob_sstate = rffi.cast(rffi.INT, 0) # SSTATE_NOT_INTERNED
         return py_str
     
    diff --git a/pypy/module/cpyext/object.py b/pypy/module/cpyext/object.py
    --- a/pypy/module/cpyext/object.py
    +++ b/pypy/module/cpyext/object.py
    @@ -18,7 +18,8 @@
     def PyObject_Malloc(space, size):
         # returns non-zero-initialized memory, like CPython
         return lltype.malloc(rffi.VOIDP.TO, size,
    -                         flavor='raw')
    +                         flavor='raw',
    +                         add_memory_pressure=True)
     
     @cpython_api([rffi.VOIDP], lltype.Void)
     def PyObject_Free(space, ptr):
    diff --git a/pypy/module/cpyext/pyobject.py b/pypy/module/cpyext/pyobject.py
    --- a/pypy/module/cpyext/pyobject.py
    +++ b/pypy/module/cpyext/pyobject.py
    @@ -50,7 +50,8 @@
                 size += itemcount * pytype.c_tp_itemsize
             assert size >= rffi.sizeof(PyObject.TO)
             buf = lltype.malloc(rffi.VOIDP.TO, size,
    -                            flavor='raw', zero=True)
    +                            flavor='raw', zero=True,
    +                            add_memory_pressure=True)
             pyobj = rffi.cast(PyObject, buf)
             pyobj.c_ob_refcnt = 1
             #pyobj.c_ob_pypy_link should get assigned very quickly
    diff --git a/pypy/module/cpyext/state.py b/pypy/module/cpyext/state.py
    --- a/pypy/module/cpyext/state.py
    +++ b/pypy/module/cpyext/state.py
    @@ -147,10 +147,10 @@
         """
     
         def perform(self, executioncontext, frame):
    -        from pypy.module.cpyext.pyobject import PyObject, _Py_Dealloc
    +        from pypy.module.cpyext.pyobject import PyObject, decref
     
             while True:
                 py_obj = rawrefcount.next_dead(PyObject)
                 if not py_obj:
                     break
    -            _Py_Dealloc(self.space, py_obj)
    +            decref(self.space, py_obj)
    diff --git a/pypy/module/cpyext/test/test_unicodeobject.py b/pypy/module/cpyext/test/test_unicodeobject.py
    --- a/pypy/module/cpyext/test/test_unicodeobject.py
    +++ b/pypy/module/cpyext/test/test_unicodeobject.py
    @@ -75,7 +75,6 @@
             assert len(s) == 4
             assert s == u'a\xe9\x00c'
     
    -
         def test_hash(self):
             module = self.import_extension('foo', [
                 ("test_hash", "METH_VARARGS",
    diff --git a/pypy/module/cpyext/tupleobject.py b/pypy/module/cpyext/tupleobject.py
    --- a/pypy/module/cpyext/tupleobject.py
    +++ b/pypy/module/cpyext/tupleobject.py
    @@ -59,7 +59,8 @@
         py_tup = rffi.cast(PyTupleObject, py_obj)
     
         py_tup.c_ob_item = lltype.malloc(ObjectItems, length,
    -                                     flavor='raw', zero=True)
    +                                     flavor='raw', zero=True,
    +                                     add_memory_pressure=True)
         py_tup.c_ob_size = length
         return py_tup
     
    @@ -70,7 +71,8 @@
         """
         items_w = space.fixedview(w_obj)
         l = len(items_w)
    -    p = lltype.malloc(ObjectItems, l, flavor='raw')
    +    p = lltype.malloc(ObjectItems, l, flavor='raw',
    +                      add_memory_pressure=True)
         i = 0
         try:
             while i < l:
    @@ -177,7 +179,8 @@
         ref = rffi.cast(PyTupleObject, ref)
         oldsize = ref.c_ob_size
         oldp = ref.c_ob_item
    -    newp = lltype.malloc(ObjectItems, newsize, zero=True, flavor='raw')
    +    newp = lltype.malloc(ObjectItems, newsize, zero=True, flavor='raw',
    +                         add_memory_pressure=True)
         try:
             if oldsize < newsize:
                 to_cp = oldsize
    diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py
    --- a/pypy/module/cpyext/typeobject.py
    +++ b/pypy/module/cpyext/typeobject.py
    @@ -502,7 +502,8 @@
                 Py_DecRef(space, w_metatype)
     
         heaptype = lltype.malloc(PyHeapTypeObject.TO,
    -                             flavor='raw', zero=True)
    +                             flavor='raw', zero=True,
    +                             add_memory_pressure=True)
         pto = heaptype.c_ht_type
         pto.c_ob_refcnt = 1
         pto.c_ob_pypy_link = 0
    diff --git a/pypy/module/cpyext/unicodeobject.py b/pypy/module/cpyext/unicodeobject.py
    --- a/pypy/module/cpyext/unicodeobject.py
    +++ b/pypy/module/cpyext/unicodeobject.py
    @@ -22,7 +22,7 @@
     PyUnicodeObjectStruct = lltype.ForwardReference()
     PyUnicodeObject = lltype.Ptr(PyUnicodeObjectStruct)
     PyUnicodeObjectFields = (PyObjectFields +
    -    (("str", rffi.CWCHARP), ("length", Py_ssize_t),
    +    (("str", rffi.CWCHARP), ("size", Py_ssize_t),
          ("hash", rffi.LONG), ("defenc", PyObject)))
     cpython_struct("PyUnicodeObject", PyUnicodeObjectFields, PyUnicodeObjectStruct)
     
    @@ -43,31 +43,6 @@
     
     Py_UNICODE = lltype.UniChar
     
    -def unicode_alloc(space, w_type, length):
    -    '''
    -    see comments with string_alloc in stringobject.py
    -    '''
    -    XXX
    -    from pypy.module.cpyext.typeobjectdefs import PyTypeObjectPtr
    -    pytype = as_pyobj(space, w_type)
    -    pytype = rffi.cast(PyTypeObjectPtr, pytype)
    -    assert pytype
    -    size = pytype.c_tp_basicsize
    -    buf = lltype.malloc(rffi.VOIDP.TO, size,
    -                        flavor='raw', zero=True)
    -    py_uni = rffi.cast(PyUnicodeObject, buf)
    -    py_uni.c_ob_refcnt = 1
    -    py_uni.c_ob_type = pytype
    -    if length > 0:
    -        py_uni.c_str = lltype.malloc(rffi.CWCHARP.TO, length+1,
    -                                        flavor='raw', zero=True)
    -        py_uni.c_length = length
    -        s = rffi.wcharpsize2unicode(py_uni.c_str, py_uni.c_length)
    -        w_obj = space.wrap(s)
    -        py_uni.c_hash = space.hash_w(w_obj)
    -        track_reference(space, rffi.cast(PyObject, py_uni), w_obj)
    -    return rffi.cast(PyObject, py_uni)
    -
     def new_empty_unicode(space, length):
         """
         Allocate a PyUnicodeObject and its buffer, but without a corresponding
    @@ -79,9 +54,10 @@
         py_uni = rffi.cast(PyUnicodeObject, py_obj)
     
         buflen = length + 1
    -    py_uni.c_length = length
    +    py_uni.c_size = length
         py_uni.c_str = lltype.malloc(rffi.CWCHARP.TO, buflen,
    -                                    flavor='raw', zero=True)
    +                                 flavor='raw', zero=True,
    +                                 add_memory_pressure=True)
         py_uni.c_hash = -1
         py_uni.c_defenc = lltype.nullptr(PyObject.TO)
         return py_uni
    @@ -89,7 +65,7 @@
     def unicode_attach(space, py_obj, w_obj):
         "Fills a newly allocated PyUnicodeObject with a unicode string"
         py_unicode = rffi.cast(PyUnicodeObject, py_obj)
    -    py_unicode.c_length = len(space.unicode_w(w_obj))
    +    py_unicode.c_size = len(space.unicode_w(w_obj))
         py_unicode.c_str = lltype.nullptr(rffi.CWCHARP.TO)
         py_unicode.c_hash = space.hash_w(w_obj)
         py_unicode.c_defenc = lltype.nullptr(PyObject.TO)
    @@ -100,7 +76,7 @@
         be modified after this call.
         """
         py_uni = rffi.cast(PyUnicodeObject, py_obj)
    -    s = rffi.wcharpsize2unicode(py_uni.c_str, py_uni.c_length)
    +    s = rffi.wcharpsize2unicode(py_uni.c_str, py_uni.c_size)
         w_obj = space.wrap(s)
         py_uni.c_hash = space.hash_w(w_obj)
         track_reference(space, py_obj, w_obj)
    @@ -259,7 +235,7 @@
     def PyUnicode_GetSize(space, ref):
         if from_ref(space, rffi.cast(PyObject, ref.c_ob_type)) is space.w_unicode:
             ref = rffi.cast(PyUnicodeObject, ref)
    -        return ref.c_length
    +        return ref.c_size
         else:
             w_obj = from_ref(space, ref)
             return space.len_w(w_obj)
    @@ -274,11 +250,11 @@
         to make sure that the wchar_t string is 0-terminated in case this is
         required by the application."""
         c_str = PyUnicode_AS_UNICODE(space, rffi.cast(PyObject, ref))
    -    c_length = ref.c_length
    +    c_size = ref.c_size
     
         # If possible, try to copy the 0-termination as well
    -    if size > c_length:
    -        size = c_length + 1
    +    if size > c_size:
    +        size = c_size + 1
     
     
         i = 0
    @@ -286,8 +262,8 @@
             buf[i] = c_str[i]
             i += 1
     
    -    if size > c_length:
    -        return c_length
    +    if size > c_size:
    +        return c_size
         else:
             return size
     
    @@ -493,7 +469,7 @@
             ref[0] = lltype.nullptr(PyObject.TO)
             raise
         to_cp = newsize
    -    oldsize = py_uni.c_length
    +    oldsize = py_uni.c_size
         if oldsize < newsize:
             to_cp = oldsize
         for i in range(to_cp):
    diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py
    --- a/pypy/module/imp/test/test_import.py
    +++ b/pypy/module/imp/test/test_import.py
    @@ -109,7 +109,7 @@
             import marshal, stat, struct, os, imp
             code = py.code.Source(p.join("x.py").read()).compile()
             s3 = marshal.dumps(code)
    -        s2 = struct.pack("i", os.stat(str(p.join("x.py")))[stat.ST_MTIME])
    +        s2 = struct.pack("= self.ndims():
    -                    raise oefmt(space.w_ValueError, "invalid axis for this array")
    -                if axes_seen[axis] is True:
    -                    raise oefmt(space.w_ValueError, "repeated axis in transpose")
    -                axes.append(axis)
    -                axes_seen[axis] = True
    -            return self.descr_get_transpose(space, axes)
    +            if len(args_w) > 1:
    +                axes = args_w
    +            else:  # Iterable in the only argument (len(arg_w) == 1 and arg_w[0] is not None)
    +                axes = space.fixedview(args_w[0])
     
    +        axes = self._checked_axes(axes, space)
    +        return self.descr_get_transpose(space, axes)
    +
    +    def _checked_axes(self, axes_raw, space):
    +        if len(axes_raw) != self.ndims():
    +            raise oefmt(space.w_ValueError, "axes don't match array")
    +        axes = []
    +        axes_seen = [False] * self.ndims()
    +        for elem in axes_raw:
    +            try:
    +                axis = support.index_w(space, elem)
    +            except OperationError:
    +                raise oefmt(space.w_TypeError, "an integer is required")
    +            if axis < 0 or axis >= self.ndims():
    +                raise oefmt(space.w_ValueError, "invalid axis for this array")
    +            if axes_seen[axis] is True:
    +                raise oefmt(space.w_ValueError, "repeated axis in transpose")
    +            axes.append(axis)
    +            axes_seen[axis] = True
    +        return axes
     
         @unwrap_spec(axis1=int, axis2=int)
         def descr_swapaxes(self, space, axis1, axis2):
    diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py
    --- a/pypy/module/micronumpy/test/test_ndarray.py
    +++ b/pypy/module/micronumpy/test/test_ndarray.py
    @@ -2960,6 +2960,36 @@
             assert (a.transpose() == b).all()
             assert (a.transpose(None) == b).all()
     
    +    def test_transpose_arg_tuple(self):
    +        import numpy as np
    +        a = np.arange(24).reshape(2, 3, 4)
    +        transpose_args = a.transpose(1, 2, 0)
    +
    +        transpose_test = a.transpose((1, 2, 0))
    +
    +        assert transpose_test.shape == (3, 4, 2)
    +        assert (transpose_args == transpose_test).all()
    +
    +    def test_transpose_arg_list(self):
    +        import numpy as np
    +        a = np.arange(24).reshape(2, 3, 4)
    +        transpose_args = a.transpose(1, 2, 0)
    +
    +        transpose_test = a.transpose([1, 2, 0])
    +
    +        assert transpose_test.shape == (3, 4, 2)
    +        assert (transpose_args == transpose_test).all()
    +
    +    def test_transpose_arg_array(self):
    +        import numpy as np
    +        a = np.arange(24).reshape(2, 3, 4)
    +        transpose_args = a.transpose(1, 2, 0)
    +
    +        transpose_test = a.transpose(np.array([1, 2, 0]))
    +
    +        assert transpose_test.shape == (3, 4, 2)
    +        assert (transpose_args == transpose_test).all()
    +
         def test_transpose_error(self):
             import numpy as np
             a = np.arange(24).reshape(2, 3, 4)
    @@ -2968,6 +2998,11 @@
             raises(ValueError, a.transpose, 1, 0, 1)
             raises(TypeError, a.transpose, 1, 0, '2')
     
    +    def test_transpose_unexpected_argument(self):
    +        import numpy as np
    +        a = np.array([[1, 2], [3, 4], [5, 6]])
    +        raises(TypeError, 'a.transpose(axes=(1,2,0))')
    +
         def test_flatiter(self):
             from numpy import array, flatiter, arange, zeros
             a = array([[10, 30], [40, 60]])
    @@ -3439,7 +3474,7 @@
     
         def test_index_int(self):
             import numpy as np
    -        a = np.array([10, 20, 30])
    +        a = np.array([10, 20, 30], dtype='int64')
             res = a[np.int64(1)]
             assert isinstance(res, np.int64)
             assert res == 20
    diff --git a/pypy/module/select/test/test_select.py b/pypy/module/select/test/test_select.py
    --- a/pypy/module/select/test/test_select.py
    +++ b/pypy/module/select/test/test_select.py
    @@ -287,8 +287,7 @@
                 t = thread.start_new_thread(pollster.poll, ())
                 try:
                     time.sleep(0.3)
    -                # TODO restore print '', if this is not the reason
    -                for i in range(5): print 'release gil select'  # to release GIL untranslated
    +                for i in range(5): print '',  # to release GIL untranslated
                     # trigger ufds array reallocation
                     for fd in rfds:
                         pollster.unregister(fd)
    diff --git a/pypy/module/thread/test/test_lock.py b/pypy/module/thread/test/test_lock.py
    --- a/pypy/module/thread/test/test_lock.py
    +++ b/pypy/module/thread/test/test_lock.py
    @@ -3,7 +3,7 @@
     import sys, os
     from pypy.module.thread.test.support import GenericTestThread
     from rpython.translator.c.test.test_genc import compile
    -import platform
    +from platform import machine
     
     
     class AppTestLock(GenericTestThread):
    @@ -64,8 +64,7 @@
             else:
                 assert self.runappdirect, "missing lock._py3k_acquire()"
     
    -    @py.test.mark.xfail(platform.machine() == 's390x',
    -                        reason='may fail this test under heavy load')
    +    @py.test.mark.xfail(machine()=='s390x', reason='may fail under heavy load')
         def test_ping_pong(self):
             # The purpose of this test is that doing a large number of ping-pongs
             # between two threads, using locks, should complete in a reasonable
    diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py
    --- a/pypy/objspace/std/mapdict.py
    +++ b/pypy/objspace/std/mapdict.py
    @@ -144,14 +144,6 @@
                 cache[name, index] = attr
             return attr
     
    -    @jit.elidable
    -    def _get_cache_attr(self, name, index):
    -        key = name, index
    -        # this method is not actually elidable, but it's fine anyway
    -        if self.cache_attrs is not None:
    -            return self.cache_attrs.get(key, None)
    -        return None
    -
         def add_attr(self, obj, name, index, w_value):
             self._reorder_and_add(obj, name, index, w_value)
             if not jit.we_are_jitted():
    diff --git a/pypy/tool/release/repackage.sh b/pypy/tool/release/repackage.sh
    --- a/pypy/tool/release/repackage.sh
    +++ b/pypy/tool/release/repackage.sh
    @@ -1,9 +1,9 @@
     # Edit these appropriately before running this script
     maj=5
     min=0
    -rev=0
    +rev=1
     branchname=release-$maj.x  # ==OR== release-$maj.$min.x
    -tagname=release-$maj.$min  # ==OR== release-$maj.$min.$rev
    +tagname=release-$maj.$min.$rev
     # This script will download latest builds from the buildmaster, rename the top
     # level directory, and repackage ready to be uploaded to bitbucket. It will also
     # download source, assuming a tag for the release already exists, and repackage them.
    diff --git a/rpython/jit/backend/detect_cpu.py b/rpython/jit/backend/detect_cpu.py
    --- a/rpython/jit/backend/detect_cpu.py
    +++ b/rpython/jit/backend/detect_cpu.py
    @@ -66,6 +66,7 @@
                 'x86_64': MODEL_X86,
                 'amd64': MODEL_X86,    # freebsd
                 'AMD64': MODEL_X86,    # win64
    +            'armv8l': MODEL_ARM,   # 32-bit ARMv8
                 'armv7l': MODEL_ARM,
                 'armv6l': MODEL_ARM,
                 'arm': MODEL_ARM,      # freebsd
    diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py
    --- a/rpython/jit/codewriter/jtransform.py
    +++ b/rpython/jit/codewriter/jtransform.py
    @@ -688,6 +688,10 @@
             ARRAY = op.args[0].concretetype.TO
             if self._array_of_voids(ARRAY):
                 return []
    +        if isinstance(ARRAY, lltype.FixedSizeArray):
    +            raise NotImplementedError(
    +                "%r uses %r, which is not supported by the JIT codewriter"
    +                % (self.graph, ARRAY))
             if op.args[0] in self.vable_array_vars:     # for virtualizables
                 vars = self.vable_array_vars[op.args[0]]
                 (v_base, arrayfielddescr, arraydescr) = vars
    @@ -718,6 +722,10 @@
             ARRAY = op.args[0].concretetype.TO
             if self._array_of_voids(ARRAY):
                 return []
    +        if isinstance(ARRAY, lltype.FixedSizeArray):
    +            raise NotImplementedError(
    +                "%r uses %r, which is not supported by the JIT codewriter"
    +                % (self.graph, ARRAY))
             if op.args[0] in self.vable_array_vars:     # for virtualizables
                 vars = self.vable_array_vars[op.args[0]]
                 (v_base, arrayfielddescr, arraydescr) = vars
    @@ -784,11 +792,13 @@
                 return []
             # check for _immutable_fields_ hints
             immut = v_inst.concretetype.TO._immutable_field(c_fieldname.value)
    +        need_live = False
             if immut:
                 if (self.callcontrol is not None and
                     self.callcontrol.could_be_green_field(v_inst.concretetype.TO,
                                                           c_fieldname.value)):
                     pure = '_greenfield'
    +                need_live = True
                 else:
                     pure = '_pure'
             else:
    @@ -815,10 +825,12 @@
                 descr1 = self.cpu.fielddescrof(
                     v_inst.concretetype.TO,
                     quasiimmut.get_mutate_field_name(c_fieldname.value))
    -            op1 = [SpaceOperation('-live-', [], None),
    +            return [SpaceOperation('-live-', [], None),
                        SpaceOperation('record_quasiimmut_field',
                                       [v_inst, descr, descr1], None),
                        op1]
    +        if need_live:
    +            return [SpaceOperation('-live-', [], None), op1]
             return op1
     
         def rewrite_op_setfield(self, op, override_type=None):
    diff --git a/rpython/jit/codewriter/test/test_jtransform.py b/rpython/jit/codewriter/test/test_jtransform.py
    --- a/rpython/jit/codewriter/test/test_jtransform.py
    +++ b/rpython/jit/codewriter/test/test_jtransform.py
    @@ -1012,7 +1012,8 @@
         v1 = varoftype(lltype.Ptr(S))
         v2 = varoftype(lltype.Char)
         op = SpaceOperation('getfield', [v1, Constant('x', lltype.Void)], v2)
    -    op1 = Transformer(FakeCPU(), FakeCC()).rewrite_operation(op)
    +    op0, op1 = Transformer(FakeCPU(), FakeCC()).rewrite_operation(op)
    +    assert op0.opname == '-live-'
         assert op1.opname == 'getfield_gc_i_greenfield'
         assert op1.args == [v1, ('fielddescr', S, 'x')]
         assert op1.result == v2
    @@ -1315,6 +1316,21 @@
         tr = Transformer(None, None)
         py.test.raises(NotImplementedError, tr.rewrite_operation, op)
     
    +def test_no_fixedsizearray():
    +    A = lltype.FixedSizeArray(lltype.Signed, 5)
    +    v_x = varoftype(lltype.Ptr(A))
    +    op = SpaceOperation('getarrayitem', [v_x, Constant(0, lltype.Signed)],
    +                        varoftype(lltype.Signed))
    +    tr = Transformer(None, None)
    +    tr.graph = 'demo'
    +    py.test.raises(NotImplementedError, tr.rewrite_operation, op)
    +    op = SpaceOperation('setarrayitem', [v_x, Constant(0, lltype.Signed),
    +                                              Constant(42, lltype.Signed)],
    +                        varoftype(lltype.Void))
    +    e = py.test.raises(NotImplementedError, tr.rewrite_operation, op)
    +    assert str(e.value) == (
    +        "'demo' uses %r, which is not supported by the JIT codewriter" % (A,))
    +
     def _test_threadlocalref_get(loop_inv):
         from rpython.rlib.rthread import ThreadLocalField
         tlfield = ThreadLocalField(lltype.Signed, 'foobar_test_',
    diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py
    --- a/rpython/memory/gc/incminimark.py
    +++ b/rpython/memory/gc/incminimark.py
    @@ -2929,10 +2929,19 @@
                 ll_assert(rc < int(REFCNT_FROM_PYPY_LIGHT * 0.99),
                           "refcount underflow from REFCNT_FROM_PYPY_LIGHT?")
                 rc -= REFCNT_FROM_PYPY
    -            self._pyobj(pyobject).ob_refcnt = rc
                 self._pyobj(pyobject).ob_pypy_link = 0
                 if rc == 0:
                     self.rrc_dealloc_pending.append(pyobject)
    +                # an object with refcnt == 0 cannot stay around waiting
    +                # for its deallocator to be called.  Some code (lxml)
    +                # expects that tp_dealloc is called immediately when
    +                # the refcnt drops to 0.  If it isn't, we get some
    +                # uncleared raw pointer that can still be used to access
    +                # the object; but (PyObject *)raw_pointer is then bogus
    +                # because after a Py_INCREF()/Py_DECREF() on it, its
    +                # tp_dealloc is also called!
    +                rc = 1
    +            self._pyobj(pyobject).ob_refcnt = rc
         _rrc_free._always_inline_ = True
     
         def rrc_major_collection_trace(self):
    diff --git a/rpython/memory/gc/test/test_rawrefcount.py b/rpython/memory/gc/test/test_rawrefcount.py
    --- a/rpython/memory/gc/test/test_rawrefcount.py
    +++ b/rpython/memory/gc/test/test_rawrefcount.py
    @@ -174,7 +174,7 @@
             p1 = check_alive(0)
             self._collect(major=True, expected_trigger=1)
             py.test.raises(RuntimeError, "p1.x")            # dead
    -        assert r1.ob_refcnt == 0
    +        assert r1.ob_refcnt == 1       # in the pending list
             assert r1.ob_pypy_link == 0
             assert self.gc.rawrefcount_next_dead() == r1addr
             assert self.gc.rawrefcount_next_dead() == llmemory.NULL
    @@ -197,7 +197,7 @@
             assert p1.x == 42
             self._collect(major=True, expected_trigger=1)
             py.test.raises(RuntimeError, "p1.x")            # dead
    -        assert r1.ob_refcnt == 0
    +        assert r1.ob_refcnt == 1
             assert r1.ob_pypy_link == 0
             assert self.gc.rawrefcount_next_dead() == r1addr
             self.gc.check_no_more_rawrefcount_state()
    @@ -214,7 +214,7 @@
             else:
                 self._collect(major=False, expected_trigger=1)
             py.test.raises(RuntimeError, "p1.x")            # dead
    -        assert r1.ob_refcnt == 0
    +        assert r1.ob_refcnt == 1
             assert r1.ob_pypy_link == 0
             assert self.gc.rawrefcount_next_dead() == r1addr
             self.gc.check_no_more_rawrefcount_state()
    @@ -252,7 +252,7 @@
                 self._collect(major=True, expected_trigger=1)
             else:
                 self._collect(major=False, expected_trigger=1)
    -        assert r1.ob_refcnt == 0     # refcnt dropped to 0
    +        assert r1.ob_refcnt == 1     # refcnt 1, in the pending list
             assert r1.ob_pypy_link == 0  # detached
             assert self.gc.rawrefcount_next_dead() == r1addr
             self.gc.check_no_more_rawrefcount_state()
    @@ -277,7 +277,7 @@
             assert self.trigger == []
             self._collect(major=True, expected_trigger=1)
             py.test.raises(RuntimeError, "p1.x")            # dead
    -        assert r1.ob_refcnt == 0
    +        assert r1.ob_refcnt == 1
             assert r1.ob_pypy_link == 0
             assert self.gc.rawrefcount_next_dead() == r1addr
             self.gc.check_no_more_rawrefcount_state()
    diff --git a/rpython/rlib/jit.py b/rpython/rlib/jit.py
    --- a/rpython/rlib/jit.py
    +++ b/rpython/rlib/jit.py
    @@ -1059,6 +1059,14 @@
         of JIT running like JIT loops compiled, aborts etc.
         An instance of this class will be available as policy.jithookiface.
         """
    +    # WARNING: You should make a single prebuilt instance of a subclass
    +    # of this class.  You can, before translation, initialize some
    +    # attributes on this instance, and then read or change these
    +    # attributes inside the methods of the subclass.  But this prebuilt
    +    # instance *must not* be seen during the normal annotation/rtyping
    +    # of the program!  A line like ``pypy_hooks.foo = ...`` must not
    +    # appear inside your interpreter's RPython code.
    +
         def on_abort(self, reason, jitdriver, greenkey, greenkey_repr, logops, operations):
             """ A hook called each time a loop is aborted with jitdriver and
             greenkey where it started, reason is a string why it got aborted
    diff --git a/rpython/rlib/rawrefcount.py b/rpython/rlib/rawrefcount.py
    --- a/rpython/rlib/rawrefcount.py
    +++ b/rpython/rlib/rawrefcount.py
    @@ -77,6 +77,12 @@
         return p
     
     def next_dead(OB_PTR_TYPE):
    +    """NOT_RPYTHON.  When the GC runs, it finds some pyobjs to be dead
    +    but cannot immediately dispose of them (it doesn't know how to call
    +    e.g. tp_dealloc(), and anyway calling it immediately would cause all
    +    sorts of bugs).  So instead, it stores them in an internal list,
    +    initially with refcnt == 1.  This pops the next item off this list.
    +    """
         if len(_d_list) == 0:
             return lltype.nullptr(OB_PTR_TYPE.TO)
         ob = _d_list.pop()
    @@ -141,6 +147,7 @@
                     ob.c_ob_refcnt -= REFCNT_FROM_PYPY
                     ob.c_ob_pypy_link = 0
                     if ob.c_ob_refcnt == 0:
    +                    ob.c_ob_refcnt = 1
                         _d_list.append(ob)
                 return None
     
    diff --git a/rpython/rlib/rposix.py b/rpython/rlib/rposix.py
    --- a/rpython/rlib/rposix.py
    +++ b/rpython/rlib/rposix.py
    @@ -22,6 +22,22 @@
         from rpython.rlib import rwin32
         from rpython.rlib.rwin32file import make_win32_traits
     
    +class CConfig:
    +    _compilation_info_ = ExternalCompilationInfo(
    +        includes=['sys/stat.h',
    +                  'unistd.h',
    +                  'fcntl.h'],
    +    )
    +    for _name in """fchdir fchmod fchmodat fchown fchownat fexecve fdopendir
    +                    fpathconf fstat fstatat fstatvfs ftruncate futimens futimes
    +                    futimesat linkat lchflags lchmod lchown lstat lutimes
    +                    mkdirat mkfifoat mknodat openat readlinkat renameat
    +                    symlinkat unlinkat utimensat""".split():
    +        locals()['HAVE_%s' % _name.upper()] = rffi_platform.Has(_name)
    +cConfig = rffi_platform.configure(CConfig)
    +globals().update(cConfig)
    +
    +
     class CConstantErrno(CConstant):
         # these accessors are used when calling get_errno() or set_errno()
         # on top of CPython
    @@ -1024,6 +1040,13 @@
             if not win32traits.MoveFile(path1, path2):
                 raise rwin32.lastSavedWindowsError()
     
    + at specialize.argtype(0, 1)
    +def replace(path1, path2):
    +    if os.name == 'nt':
    +        raise NotImplementedError(
    +            'On windows, os.replace() should overwrite the destination')
    +    return rename(path1, path2)
    +
     #___________________________________________________________________
     
     c_mkfifo = external('mkfifo', [rffi.CCHARP, rffi.MODE_T], rffi.INT,
    diff --git a/rpython/rlib/rtime.py b/rpython/rlib/rtime.py
    --- a/rpython/rlib/rtime.py
    +++ b/rpython/rlib/rtime.py
    @@ -9,7 +9,6 @@
     from rpython.rtyper.tool import rffi_platform
     from rpython.rtyper.lltypesystem import rffi, lltype
     from rpython.rlib.objectmodel import register_replacement_for
    -from rpython.rlib import jit
     from rpython.rlib.rarithmetic import intmask, UINT_MAX
     from rpython.rlib import rposix
     
    @@ -149,13 +148,12 @@
     
     if _WIN32:
         # hacking to avoid LARGE_INTEGER which is a union...
    -    A = lltype.FixedSizeArray(lltype.SignedLongLong, 1)
         QueryPerformanceCounter = external(
    -        'QueryPerformanceCounter', [lltype.Ptr(A)], lltype.Void,
    -        releasegil=False)
    +        'QueryPerformanceCounter', [rffi.CArrayPtr(lltype.SignedLongLong)],
    +         lltype.Void, releasegil=False)
         QueryPerformanceFrequency = external(
    -        'QueryPerformanceFrequency', [lltype.Ptr(A)], rffi.INT,
    -        releasegil=False)
    +        'QueryPerformanceFrequency', [rffi.CArrayPtr(lltype.SignedLongLong)], 
    +        rffi.INT, releasegil=False)
         class State(object):
             divisor = 0.0
             counter_start = 0
    @@ -170,19 +168,16 @@
                                    [lltype.Signed, lltype.Ptr(TIMESPEC)],
                                    rffi.INT, releasegil=False,
                                    compilation_info=eci_with_lrt)
    -else:
    +if need_rusage:
         RUSAGE = RUSAGE
         RUSAGE_SELF = RUSAGE_SELF or 0
         c_getrusage = external('getrusage',
                                [rffi.INT, lltype.Ptr(RUSAGE)],
    -                           lltype.Void,
    +                           rffi.INT,
                                releasegil=False)
     
    - at replace_time_function('clock')
    - at jit.dont_look_inside  # the JIT doesn't like FixedSizeArray
    -def clock():
    -    if _WIN32:
    -        a = lltype.malloc(A, flavor='raw')
    +def win_perf_counter():
    +    with lltype.scoped_alloc(rffi.CArray(rffi.lltype.SignedLongLong), 1) as a:
             if state.divisor == 0.0:
                 QueryPerformanceCounter(a)
                 state.counter_start = a[0]
    @@ -190,8 +185,12 @@
                 state.divisor = float(a[0])
             QueryPerformanceCounter(a)
             diff = a[0] - state.counter_start
    -        lltype.free(a, flavor='raw')
    -        return float(diff) / state.divisor
    +    return float(diff) / state.divisor
    +
    + at replace_time_function('clock')
    +def clock():
    +    if _WIN32:
    +        return win_perf_counter()
         elif CLOCK_PROCESS_CPUTIME_ID is not None:
             with lltype.scoped_alloc(TIMESPEC) as a:
                 c_clock_gettime(CLOCK_PROCESS_CPUTIME_ID, a)
    diff --git a/rpython/rlib/rvmprof/src/vmprof_common.h b/rpython/rlib/rvmprof/src/vmprof_common.h
    --- a/rpython/rlib/rvmprof/src/vmprof_common.h
    +++ b/rpython/rlib/rvmprof/src/vmprof_common.h
    @@ -24,14 +24,14 @@
         char padding[sizeof(long) - 1];
         char marker;
         long count, depth;
    -    void *stack[];
    +    intptr_t stack[];
     } prof_stacktrace_s;
     
     
     RPY_EXTERN
     char *vmprof_init(int fd, double interval, char *interp_name)
     {
    -    if (interval < 1e-6 || interval >= 1.0)
    +    if (!(interval >= 1e-6 && interval < 1.0))   /* also if it is NaN */
             return "bad value for 'interval'";
         prepare_interval_usec = (int)(interval * 1000000.0);
     
    diff --git a/rpython/rlib/rvmprof/src/vmprof_config.h b/rpython/rlib/rvmprof/src/vmprof_config.h
    --- a/rpython/rlib/rvmprof/src/vmprof_config.h
    +++ b/rpython/rlib/rvmprof/src/vmprof_config.h
    @@ -1,6 +1,10 @@
     #define HAVE_SYS_UCONTEXT_H
    -#if defined(__FreeBSD__)
    -#define PC_FROM_UCONTEXT uc_mcontext.mc_rip
    +#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
    +  #ifdef __i386__
    +    #define PC_FROM_UCONTEXT uc_mcontext.mc_eip
    +  #else
    +    #define PC_FROM_UCONTEXT uc_mcontext.mc_rip
    +  #endif
     #elif defined( __APPLE__)
       #if ((ULONG_MAX) == (UINT_MAX))
         #define PC_FROM_UCONTEXT uc_mcontext->__ss.__eip
    @@ -8,10 +12,10 @@
         #define PC_FROM_UCONTEXT uc_mcontext->__ss.__rip
       #endif
     #elif defined(__arm__)
    -#define PC_FROM_UCONTEXT uc_mcontext.arm_ip
    +  #define PC_FROM_UCONTEXT uc_mcontext.arm_ip
     #elif defined(__linux) && defined(__i386) && defined(__GNUC__)
    -#define PC_FROM_UCONTEXT uc_mcontext.gregs[REG_EIP]
    +  #define PC_FROM_UCONTEXT uc_mcontext.gregs[REG_EIP]
     #else
    -/* linux, gnuc */
    -#define PC_FROM_UCONTEXT uc_mcontext.gregs[REG_RIP]
    +  /* linux, gnuc */
    +  #define PC_FROM_UCONTEXT uc_mcontext.gregs[REG_RIP]
     #endif
    diff --git a/rpython/rlib/rvmprof/src/vmprof_main_win32.h b/rpython/rlib/rvmprof/src/vmprof_main_win32.h
    --- a/rpython/rlib/rvmprof/src/vmprof_main_win32.h
    +++ b/rpython/rlib/rvmprof/src/vmprof_main_win32.h
    @@ -101,7 +101,7 @@
         depth = get_stack_trace(p->vmprof_tl_stack,
                          stack->stack, MAX_STACK_DEPTH-2, ctx.Eip);
         stack->depth = depth;
    -    stack->stack[depth++] = (void*)p->thread_ident;
    +    stack->stack[depth++] = p->thread_ident;
         stack->count = 1;
         stack->marker = MARKER_STACKTRACE;
         ResumeThread(hThread);
    diff --git a/rpython/rlib/test/test_rawrefcount.py b/rpython/rlib/test/test_rawrefcount.py
    --- a/rpython/rlib/test/test_rawrefcount.py
    +++ b/rpython/rlib/test/test_rawrefcount.py
    @@ -116,7 +116,7 @@
             assert rawrefcount.next_dead(PyObject) == lltype.nullptr(PyObjectS)
             assert rawrefcount._o_list == []
             assert wr_p() is None
    -        assert ob.c_ob_refcnt == 0
    +        assert ob.c_ob_refcnt == 1       # from the pending list
             assert ob.c_ob_pypy_link == 0
             lltype.free(ob, flavor='raw')
     
    @@ -173,7 +173,7 @@
             assert rawrefcount._d_list == [ob]
             assert rawrefcount._p_list == []
             assert wr_p() is None
    -        assert ob.c_ob_refcnt == 0
    +        assert ob.c_ob_refcnt == 1       # from _d_list
             assert ob.c_ob_pypy_link == 0
             lltype.free(ob, flavor='raw')
     
    diff --git a/rpython/tool/runsubprocess.py b/rpython/tool/runsubprocess.py
    --- a/rpython/tool/runsubprocess.py
    +++ b/rpython/tool/runsubprocess.py
    @@ -20,6 +20,8 @@
     def _run(executable, args, env, cwd):
         # note that this function can be *overridden* below
         # in some cases!
    +    if sys.platform == 'win32':
    +        executable = executable.replace('/','\\')
         if isinstance(args, str):
             args = str(executable) + ' ' + args
             shell = True
    diff --git a/rpython/translator/c/src/thread.h b/rpython/translator/c/src/thread.h
    --- a/rpython/translator/c/src/thread.h
    +++ b/rpython/translator/c/src/thread.h
    @@ -42,13 +42,13 @@
     RPY_EXTERN long rpy_fastgil;
     
     static inline void _RPyGilAcquire(void) {
    -    long old_fastgil = lock_test_and_set(&rpy_fastgil, 1);
    +    long old_fastgil = pypy_lock_test_and_set(&rpy_fastgil, 1);
         if (old_fastgil != 0)
             RPyGilAcquireSlowPath(old_fastgil);
     }
     static inline void _RPyGilRelease(void) {
         assert(RPY_FASTGIL_LOCKED(rpy_fastgil));
    -    lock_release(&rpy_fastgil);
    +    pypy_lock_release(&rpy_fastgil);
     }
     static inline long *_RPyFetchFastGil(void) {
         return &rpy_fastgil;
    diff --git a/rpython/translator/c/src/thread_gil.c b/rpython/translator/c/src/thread_gil.c
    --- a/rpython/translator/c/src/thread_gil.c
    +++ b/rpython/translator/c/src/thread_gil.c
    @@ -70,7 +70,7 @@
     {
         /* Acquires the GIL.  This assumes that we already did:
     
    -          old_fastgil = lock_test_and_set(&rpy_fastgil, 1);
    +          old_fastgil = pypy_lock_test_and_set(&rpy_fastgil, 1);
          */
         if (!RPY_FASTGIL_LOCKED(old_fastgil)) {
             /* The fastgil was not previously locked: success.
    @@ -122,7 +122,7 @@
                    released.
                 */
                 if (!RPY_FASTGIL_LOCKED(rpy_fastgil)) {
    -                old_fastgil = lock_test_and_set(&rpy_fastgil, 1);
    +                old_fastgil = pypy_lock_test_and_set(&rpy_fastgil, 1);
                     if (!RPY_FASTGIL_LOCKED(old_fastgil))
                         /* yes, got a non-held value!  Now we hold it. */
                         break;
    diff --git a/rpython/translator/c/src/thread_nt.c b/rpython/translator/c/src/thread_nt.c
    --- a/rpython/translator/c/src/thread_nt.c
    +++ b/rpython/translator/c/src/thread_nt.c
    @@ -255,7 +255,7 @@
         LeaveCriticalSection(mutex);
     }
     
    -//#define lock_test_and_set(ptr, value)  see thread_nt.h
    +//#define pypy_lock_test_and_set(ptr, value)  see thread_nt.h
     #define atomic_increment(ptr)          InterlockedIncrement(ptr)
     #define atomic_decrement(ptr)          InterlockedDecrement(ptr)
     
    diff --git a/rpython/translator/c/src/thread_nt.h b/rpython/translator/c/src/thread_nt.h
    --- a/rpython/translator/c/src/thread_nt.h
    +++ b/rpython/translator/c/src/thread_nt.h
    @@ -36,8 +36,8 @@
     
     #ifdef _M_IA64
     /* On Itanium, use 'acquire' memory ordering semantics */
    -#define lock_test_and_set(ptr, value)  InterlockedExchangeAcquire(ptr, value)
    +#define pypy_lock_test_and_set(ptr, value) InterlockedExchangeAcquire(ptr,value)
     #else
    -#define lock_test_and_set(ptr, value)  InterlockedExchange(ptr, value)
    +#define pypy_lock_test_and_set(ptr, value) InterlockedExchange(ptr, value)
     #endif
    -#define lock_release(ptr)              (*((volatile long *)ptr) = 0)
    +#define pypy_lock_release(ptr)             (*((volatile long *)ptr) = 0)
    diff --git a/rpython/translator/c/src/thread_pthread.c b/rpython/translator/c/src/thread_pthread.c
    --- a/rpython/translator/c/src/thread_pthread.c
    +++ b/rpython/translator/c/src/thread_pthread.c
    @@ -37,7 +37,7 @@
     #  define THREAD_STACK_SIZE   0   /* use default stack size */
     # endif
     
    -# if (defined(__APPLE__) || defined(__FreeBSD__)) && defined(THREAD_STACK_SIZE) && THREAD_STACK_SIZE == 0
    +# if (defined(__APPLE__) || defined(__FreeBSD__) || defined(__FreeBSD_kernel__)) && defined(THREAD_STACK_SIZE) && THREAD_STACK_SIZE == 0
        /* The default stack size for new threads on OSX is small enough that
         * we'll get hard crashes instead of 'maximum recursion depth exceeded'
         * exceptions.
    @@ -85,7 +85,7 @@
     	if (tss != 0)
     		pthread_attr_setstacksize(&attrs, tss);
     #endif
    -#if defined(PTHREAD_SYSTEM_SCHED_SUPPORTED) && !defined(__FreeBSD__)
    +#if defined(PTHREAD_SYSTEM_SCHED_SUPPORTED) && !(defined(__FreeBSD__) || defined(__FreeBSD_kernel__))
             pthread_attr_setscope(&attrs, PTHREAD_SCOPE_SYSTEM);
     #endif
     
    @@ -551,7 +551,7 @@
         return result;
     }
     
    -//#define lock_test_and_set(ptr, value)  see thread_pthread.h
    +//#define pypy_lock_test_and_set(ptr, value)  see thread_pthread.h
     #define atomic_increment(ptr)          __sync_fetch_and_add(ptr, 1)
     #define atomic_decrement(ptr)          __sync_fetch_and_sub(ptr, 1)
     #define HAVE_PTHREAD_ATFORK            1
    diff --git a/rpython/translator/c/src/thread_pthread.h b/rpython/translator/c/src/thread_pthread.h
    --- a/rpython/translator/c/src/thread_pthread.h
    +++ b/rpython/translator/c/src/thread_pthread.h
    @@ -82,5 +82,5 @@
     void RPyThreadAfterFork(void);
     
     
    -#define lock_test_and_set(ptr, value)  __sync_lock_test_and_set(ptr, value)
    -#define lock_release(ptr)              __sync_lock_release(ptr)
    +#define pypy_lock_test_and_set(ptr, value)  __sync_lock_test_and_set(ptr, value)
    +#define pypy_lock_release(ptr)              __sync_lock_release(ptr)
    diff --git a/rpython/translator/c/src/threadlocal.c b/rpython/translator/c/src/threadlocal.c
    --- a/rpython/translator/c/src/threadlocal.c
    +++ b/rpython/translator/c/src/threadlocal.c
    @@ -15,14 +15,14 @@
     static int check_valid(void);
     
     void _RPython_ThreadLocals_Acquire(void) {
    -    while (!lock_test_and_set(&pypy_threadlocal_lock, 1)) {
    +    while (!pypy_lock_test_and_set(&pypy_threadlocal_lock, 1)) {
             /* busy loop */
         }
         assert(check_valid());
     }
     void _RPython_ThreadLocals_Release(void) {
         assert(check_valid());
    -    lock_release(&pypy_threadlocal_lock);
    +    pypy_lock_release(&pypy_threadlocal_lock);
     }
     
     
    
    From pypy.commits at gmail.com  Thu Mar 24 11:55:24 2016
    From: pypy.commits at gmail.com (mattip)
    Date: Thu, 24 Mar 2016 08:55:24 -0700 (PDT)
    Subject: [pypy-commit] pypy cpyext-ext: fix merge
    Message-ID: <56f40dec.0a301c0a.49916.407d@mx.google.com>
    
    Author: mattip 
    Branch: cpyext-ext
    Changeset: r83318:158eb5d4ebd0
    Date: 2016-03-24 00:50 +0200
    http://bitbucket.org/pypy/pypy/changeset/158eb5d4ebd0/
    
    Log:	fix merge
    
    diff --git a/pypy/module/cpyext/include/unicodeobject.h b/pypy/module/cpyext/include/unicodeobject.h
    --- a/pypy/module/cpyext/include/unicodeobject.h
    +++ b/pypy/module/cpyext/include/unicodeobject.h
    @@ -21,7 +21,7 @@
     typedef struct {
         PyObject_HEAD
         Py_UNICODE *str;
    -    Py_ssize_t length;
    +    Py_ssize_t size;
         long hash;                  /* Hash value; -1 if not set */
         PyObject *defenc;           /* (Default) Encoded version as Python
                                        string, or NULL; this is used for
    
    From pypy.commits at gmail.com  Thu Mar 24 11:55:25 2016
    From: pypy.commits at gmail.com (mattip)
    Date: Thu, 24 Mar 2016 08:55:25 -0700 (PDT)
    Subject: [pypy-commit] pypy cpyext-ext: fixes for win32
    Message-ID: <56f40ded.04371c0a.ca88e.ffffe071@mx.google.com>
    
    Author: mattip 
    Branch: cpyext-ext
    Changeset: r83319:ca63eb4f0726
    Date: 2016-03-24 01:42 +0200
    http://bitbucket.org/pypy/pypy/changeset/ca63eb4f0726/
    
    Log:	fixes for win32
    
    diff --git a/pypy/module/cpyext/include/floatobject.h b/pypy/module/cpyext/include/floatobject.h
    --- a/pypy/module/cpyext/include/floatobject.h
    +++ b/pypy/module/cpyext/include/floatobject.h
    @@ -3,6 +3,13 @@
     
     #ifndef Py_FLOATOBJECT_H
     #define Py_FLOATOBJECT_H
    +
    +#ifdef _MSC_VER
    +#include 
    +#include 
    +#define copysign _copysign
    +#endif
    +
     #ifdef __cplusplus
     extern "C" {
     #endif
    diff --git a/pypy/module/cpyext/test/foo3.c b/pypy/module/cpyext/test/foo3.c
    --- a/pypy/module/cpyext/test/foo3.c
    +++ b/pypy/module/cpyext/test/foo3.c
    @@ -3,8 +3,9 @@
     
     PyObject* foo3type_tp_new(PyTypeObject* metatype, PyObject* args, PyObject* kwds)
     {
    +    PyObject* newType;
         printf("in foo3type_tp_new, preprocessing...\n");
    -    PyObject* newType = PyType_Type.tp_new(metatype, args, kwds);
    +    newType = PyType_Type.tp_new(metatype, args, kwds);
         printf("in foo3type_tp_new, postprocessing...\n");
         return newType;
     }
    @@ -62,8 +63,6 @@
     /* Initialize this module. */
     #ifdef __GNUC__
     extern __attribute__((visibility("default")))
    -#else
    -extern __declspec(dllexport)
     #endif
     
     PyMODINIT_FUNC
    diff --git a/pypy/module/cpyext/test/test_bytesobject.py b/pypy/module/cpyext/test/test_bytesobject.py
    --- a/pypy/module/cpyext/test/test_bytesobject.py
    +++ b/pypy/module/cpyext/test/test_bytesobject.py
    @@ -25,14 +25,15 @@
                  """
                      PyObject* s = PyString_FromString("Hello world");
                      int result = 0;
    +                 size_t expected_size;
     
                      if(PyString_Size(s) == 11) {
                          result = 1;
                      }
                      #ifdef PYPY_VERSION
    -                    size_t expected_size = sizeof(void*)*7;
    +                    expected_size = sizeof(void*)*7;
                      #else
    -                    size_t expected_size = 37;
    +                    expected_size = 37;
                      #endif
                      if(s->ob_type->tp_basicsize != expected_size)
                      {
    diff --git a/pypy/module/cpyext/test/test_pyerrors.py b/pypy/module/cpyext/test/test_pyerrors.py
    --- a/pypy/module/cpyext/test/test_pyerrors.py
    +++ b/pypy/module/cpyext/test/test_pyerrors.py
    @@ -1,3 +1,4 @@
    +import pytest
     import sys
     import StringIO
     
    @@ -101,6 +102,7 @@
             instance = space.call_function(space.w_ValueError)
             assert api.PyExceptionInstance_Class(instance) is space.w_ValueError
     
    +    @pytest.mark.skipif("sys.platform == 'win32'")
         def test_interrupt_occurred(self, space, api):
             assert not api.PyOS_InterruptOccurred()
             import signal, os
    diff --git a/pypy/module/cpyext/test/test_unicodeobject.py b/pypy/module/cpyext/test/test_unicodeobject.py
    --- a/pypy/module/cpyext/test/test_unicodeobject.py
    +++ b/pypy/module/cpyext/test/test_unicodeobject.py
    @@ -190,14 +190,14 @@
             ar[0] = rffi.cast(PyObject, py_uni)
             api.PyUnicode_Resize(ar, 3)
             py_uni = rffi.cast(PyUnicodeObject, ar[0])
    -        assert py_uni.c_length == 3
    +        assert py_uni.c_size == 3
             assert py_uni.c_str[1] == u'b'
             assert py_uni.c_str[3] == u'\x00'
             # the same for growing
             ar[0] = rffi.cast(PyObject, py_uni)
             api.PyUnicode_Resize(ar, 10)
             py_uni = rffi.cast(PyUnicodeObject, ar[0])
    -        assert py_uni.c_length == 10
    +        assert py_uni.c_size == 10
             assert py_uni.c_str[1] == 'b'
             assert py_uni.c_str[10] == '\x00'
             Py_DecRef(space, ar[0])
    
    From pypy.commits at gmail.com  Thu Mar 24 12:07:55 2016
    From: pypy.commits at gmail.com (fijal)
    Date: Thu, 24 Mar 2016 09:07:55 -0700 (PDT)
    Subject: [pypy-commit] pypy jit-leaner-frontend: since we never allocate
     resops in the frontend, try a bit harder to have less copies
    Message-ID: <56f410db.85371c0a.8f590.ffffa988@mx.google.com>
    
    Author: fijal
    Branch: jit-leaner-frontend
    Changeset: r83320:26aeb872ebe0
    Date: 2016-03-24 18:06 +0200
    http://bitbucket.org/pypy/pypy/changeset/26aeb872ebe0/
    
    Log:	since we never allocate resops in the frontend, try a bit harder to
    	have less copies
    
    diff --git a/rpython/jit/metainterp/optimizeopt/info.py b/rpython/jit/metainterp/optimizeopt/info.py
    --- a/rpython/jit/metainterp/optimizeopt/info.py
    +++ b/rpython/jit/metainterp/optimizeopt/info.py
    @@ -144,7 +144,8 @@
                 op.set_forwarded(None)
                 optforce.emit_operation(op)
                 newop = optforce.getlastop()
    -            op.set_forwarded(newop)
    +            if newop is not op:
    +                op.set_forwarded(newop)
                 newop.set_forwarded(self)
                 descr = self.descr
                 self._is_virtual = False
    diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py
    --- a/rpython/jit/metainterp/optimizeopt/optimizer.py
    +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py
    @@ -563,7 +563,7 @@
                 return # can happen e.g. if we postpone the operation that becomes
                 # constant
             # XXX kill, requires thinking
    -        op = self.replace_op_with(op, op.opnum)
    +        #op = self.replace_op_with(op, op.opnum)
             for i in range(op.numargs()):
                 arg = self.force_box(op.getarg(i))
                 op.setarg(i, arg)
    @@ -592,7 +592,7 @@
             self._newoperations.append(op)
     
         def emit_guard_operation(self, op, pendingfields):
    -        guard_op = self.replace_op_with(op, op.getopnum())
    +        guard_op = op # self.replace_op_with(op, op.getopnum())
             opnum = guard_op.getopnum()
             # If guard_(no)_exception is merged with another previous guard, then
             # it *should* be in "some_call;guard_not_forced;guard_(no)_exception".
    
    From pypy.commits at gmail.com  Thu Mar 24 15:28:56 2016
    From: pypy.commits at gmail.com (cfbolz)
    Date: Thu, 24 Mar 2016 12:28:56 -0700 (PDT)
    Subject: [pypy-commit] pypy guard-compatible: at least print nullness of
     const ptrs
    Message-ID: <56f43ff8.455e1c0a.57a4f.fffff080@mx.google.com>
    
    Author: Carl Friedrich Bolz 
    Branch: guard-compatible
    Changeset: r83321:bcbe3b068a65
    Date: 2016-03-23 15:32 +0100
    http://bitbucket.org/pypy/pypy/changeset/bcbe3b068a65/
    
    Log:	at least print nullness of const ptrs
    
    diff --git a/rpython/jit/metainterp/compatible.py b/rpython/jit/metainterp/compatible.py
    --- a/rpython/jit/metainterp/compatible.py
    +++ b/rpython/jit/metainterp/compatible.py
    @@ -109,15 +109,18 @@
         def repr(self):
             return ""
     
    -    @staticmethod
    -    def _repr_const(arg):
    +    def _repr_const(self, arg):
             from rpython.jit.metainterp.history import ConstInt, ConstFloat, ConstPtr
             if isinstance(arg, ConstInt):
                 return str(arg.value)
             elif isinstance(arg, ConstPtr):
    -            return ""
    +            if arg.value:
    +                return ""
    +            else:
    +                return "None"
             elif isinstance(arg, ConstFloat):
                 return str(arg.getfloat())
    +        return ""
     
     class PureCallCondition(Condition):
         def __init__(self, op, metainterp_sd):
    diff --git a/rpython/jit/metainterp/test/test_compatible.py b/rpython/jit/metainterp/test/test_compatible.py
    --- a/rpython/jit/metainterp/test/test_compatible.py
    +++ b/rpython/jit/metainterp/test/test_compatible.py
    @@ -30,10 +30,10 @@
                 while n > 0:
                     driver.can_enter_jit(n=n, x=x)
                     driver.jit_merge_point(n=n, x=x)
    -                n -= g(x, 7)
    +                n -= g(x, "abc")
     
             def main():
    -            g(p1, 9) # make annotator not make argument constant
    +            g(p1, "def") # make annotator not make argument constant
                 f(100, p1)
                 f(100, p2)
                 f(100, p3)
    
    From pypy.commits at gmail.com  Thu Mar 24 15:28:58 2016
    From: pypy.commits at gmail.com (cfbolz)
    Date: Thu, 24 Mar 2016 12:28:58 -0700 (PDT)
    Subject: [pypy-commit] pypy guard-compatible: inform valgrind of the patching
    Message-ID: <56f43ffa.2179c20a.f41bb.12cc@mx.google.com>
    
    Author: Carl Friedrich Bolz 
    Branch: guard-compatible
    Changeset: r83322:9ebf2fc754a2
    Date: 2016-03-24 14:01 +0100
    http://bitbucket.org/pypy/pypy/changeset/9ebf2fc754a2/
    
    Log:	inform valgrind of the patching
    
    diff --git a/rpython/jit/backend/x86/guard_compat.py b/rpython/jit/backend/x86/guard_compat.py
    --- a/rpython/jit/backend/x86/guard_compat.py
    +++ b/rpython/jit/backend/x86/guard_compat.py
    @@ -2,7 +2,7 @@
     from rpython.rlib.objectmodel import we_are_translated
     from rpython.rtyper.lltypesystem import lltype, rffi
     from rpython.jit.backend.x86.arch import WORD, IS_X86_32, IS_X86_64
    -from rpython.jit.backend.x86 import rx86, codebuf
    +from rpython.jit.backend.x86 import rx86, codebuf, valgrind
     from rpython.jit.backend.x86.regloc import X86_64_SCRATCH_REG, imm, eax, edx
     from rpython.jit.backend.llsupport.asmmemmgr import MachineDataBlockWrapper
     from rpython.jit.metainterp.compile import GuardCompatibleDescr
    @@ -136,6 +136,7 @@
         # the old 'compatinfo' is not used any more, but will only be freed
         # when the looptoken is freed
         compatinfop[0] = rffi.cast(rffi.VOIDP, newcompatinfo)
    +    valgrind.discard_translations(rffi.cast(lltype.Signed, compatinfop), WORD)
     
         # the machine code is not updated here.  We leave it to the actual
         # guard_compatible to update it if needed.
    
    From pypy.commits at gmail.com  Thu Mar 24 15:29:00 2016
    From: pypy.commits at gmail.com (cfbolz)
    Date: Thu, 24 Mar 2016 12:29:00 -0700 (PDT)
    Subject: [pypy-commit] pypy guard-compatible: print the string values into
     the guard_compatible conditions
    Message-ID: <56f43ffc.657bc20a.64339.115e@mx.google.com>
    
    Author: Carl Friedrich Bolz 
    Branch: guard-compatible
    Changeset: r83323:c98ecc5ad0e4
    Date: 2016-03-24 20:28 +0100
    http://bitbucket.org/pypy/pypy/changeset/c98ecc5ad0e4/
    
    Log:	print the string values into the guard_compatible conditions
    
    diff --git a/rpython/jit/metainterp/compatible.py b/rpython/jit/metainterp/compatible.py
    --- a/rpython/jit/metainterp/compatible.py
    +++ b/rpython/jit/metainterp/compatible.py
    @@ -1,3 +1,4 @@
    +from rpython.rlib.objectmodel import we_are_translated
     from rpython.jit.metainterp.history import newconst
     from rpython.jit.codewriter import longlong
     from rpython.jit.metainterp.resoperation import rop
    @@ -111,10 +112,19 @@
     
         def _repr_const(self, arg):
             from rpython.jit.metainterp.history import ConstInt, ConstFloat, ConstPtr
    +        from rpython.rtyper.annlowlevel import llstr, hlstr
    +        from rpython.rtyper.lltypesystem import llmemory, rstr, rffi, lltype
    +
             if isinstance(arg, ConstInt):
                 return str(arg.value)
             elif isinstance(arg, ConstPtr):
                 if arg.value:
    +                # through all the layers and back
    +                if we_are_translated():
    +                    tid = self.metainterp_sd.cpu.get_actual_typeid(arg.getref_base())
    +                    sid = self.metainterp_sd.cpu.get_actual_typeid(rffi.cast(llmemory.GCREF, llstr("abc")))
    +                    if sid == tid:
    +                        return hlstr(rffi.cast(lltype.Ptr(rstr.STR), arg.getref_base()))
                     return ""
                 else:
                     return "None"
    
    From pypy.commits at gmail.com  Thu Mar 24 15:31:29 2016
    From: pypy.commits at gmail.com (mattip)
    Date: Thu, 24 Mar 2016 12:31:29 -0700 (PDT)
    Subject: [pypy-commit] pypy cpyext-ext: test, add half of PyObject_Realloc
    Message-ID: <56f44091.6672c20a.4facb.1429@mx.google.com>
    
    Author: mattip 
    Branch: cpyext-ext
    Changeset: r83324:ed8ef8873c00
    Date: 2016-03-24 21:29 +0200
    http://bitbucket.org/pypy/pypy/changeset/ed8ef8873c00/
    
    Log:	test, add half of PyObject_Realloc
    
    diff --git a/pypy/module/cpyext/include/object.h b/pypy/module/cpyext/include/object.h
    --- a/pypy/module/cpyext/include/object.h
    +++ b/pypy/module/cpyext/include/object.h
    @@ -570,7 +570,7 @@
     PyAPI_FUNC(int) PyObject_CheckReadBuffer(PyObject *);
     
     #define PyObject_MALLOC         PyObject_Malloc
    -/* #define PyObject_REALLOC        PyObject_Realloc  NotImplemented */
    +#define PyObject_REALLOC        PyObject_Realloc
     #define PyObject_FREE           PyObject_Free
     #define PyObject_Del            PyObject_Free
     #define PyObject_DEL            PyObject_Free
    diff --git a/pypy/module/cpyext/object.py b/pypy/module/cpyext/object.py
    --- a/pypy/module/cpyext/object.py
    +++ b/pypy/module/cpyext/object.py
    @@ -21,6 +21,15 @@
                              flavor='raw',
                              add_memory_pressure=True)
     
    + at cpython_api([rffi.VOIDP, Py_ssize_t], rffi.VOIDP)
    +def PyObject_Realloc(space, ptr, size):
    +    if not lltype.cast_ptr_to_int(ptr):
    +        return lltype.malloc(rffi.VOIDP.TO, size,
    +                         flavor='raw',
    +                         add_memory_pressure=True)
    +    # XXX FIXME
    +    return lltype.nullptr(rffi.CCHARP.TO)
    +
     @cpython_api([rffi.VOIDP], lltype.Void)
     def PyObject_Free(space, ptr):
         lltype.free(ptr, flavor='raw')
    diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py
    --- a/pypy/module/cpyext/test/test_cpyext.py
    +++ b/pypy/module/cpyext/test/test_cpyext.py
    @@ -106,6 +106,7 @@
         """
         if sys.platform == 'win32':
             kwds["compile_extra"] = ["/we4013"]
    +        kwds["link_extra"] = ["/LIBPATH:" + os.path.join(sys.exec_prefix, 'libs')]
         elif sys.platform == 'darwin':
             pass
         elif sys.platform.startswith('linux'):
    diff --git a/pypy/module/cpyext/test/test_object.py b/pypy/module/cpyext/test/test_object.py
    --- a/pypy/module/cpyext/test/test_object.py
    +++ b/pypy/module/cpyext/test/test_object.py
    @@ -231,6 +231,24 @@
             assert type(x) is int
             assert x == -424344
     
    +    def test_object_realloc(self):
    +        module = self.import_extension('foo', [
    +            ("realloctest", "METH_NOARGS",
    +             """
    +                 PyObject * ret;
    +                 char *copy, *orig = PyObject_MALLOC(12);
    +                 memcpy(orig, "hello world", 12);
    +                 copy = PyObject_REALLOC(orig, 15);
    +                 if (copy == NULL)
    +                     Py_RETURN_NONE;
    +                 ret = PyString_FromString(copy, 12);
    +                 PyObject_Free(orig);
    +                 PyObject_Free(copy);
    +                 return ret;  
    +             """)])
    +        x = module.realloctest()
    +        assert x == 'hello world'
    +
         def test_TypeCheck(self):
             module = self.import_extension('foo', [
                 ("typecheck", "METH_VARARGS",
    
    From pypy.commits at gmail.com  Thu Mar 24 20:35:11 2016
    From: pypy.commits at gmail.com (rlamy)
    Date: Thu, 24 Mar 2016 17:35:11 -0700 (PDT)
    Subject: [pypy-commit] pypy rposix-for-3: Test and fix symlinkat()
    Message-ID: <56f487bf.a3abc20a.9c261.6b2c@mx.google.com>
    
    Author: Ronan Lamy 
    Branch: rposix-for-3
    Changeset: r83325:756915d867d0
    Date: 2016-03-25 00:34 +0000
    http://bitbucket.org/pypy/pypy/changeset/756915d867d0/
    
    Log:	Test and fix symlinkat()
    
    diff --git a/rpython/rlib/rposix.py b/rpython/rlib/rposix.py
    --- a/rpython/rlib/rposix.py
    +++ b/rpython/rlib/rposix.py
    @@ -1905,11 +1905,11 @@
     
     if HAVE_SYMLINKAT:
         c_symlinkat = external('symlinkat',
    -        [rffi.CCHARP, rffi.CCHARP, rffi.INT], rffi.INT,
    +        [rffi.CCHARP, rffi.INT, rffi.CCHARP], rffi.INT,
             save_err=rffi.RFFI_SAVE_ERRNO)
     
         def symlinkat(src, dst, dir_fd=AT_FDCWD):
    -        error = c_symlinkat(src, dst, dir_fd)
    +        error = c_symlinkat(src, dir_fd, dst)
             handle_posix_error('symlinkat', error)
     
     if HAVE_OPENAT:
    diff --git a/rpython/rlib/test/test_rposix.py b/rpython/rlib/test/test_rposix.py
    --- a/rpython/rlib/test/test_rposix.py
    +++ b/rpython/rlib/test/test_rposix.py
    @@ -536,3 +536,12 @@
                 os.open('/tmp/t', 0, 0)
                 os.open(u'/tmp/t', 0, 0)
             compile(f, ())
    +
    +def test_symlinkat(tmpdir):
    +    tmpdir.join('file').write('text')
    +    dirfd = os.open(str(tmpdir), os.O_RDONLY)
    +    try:
    +        rposix.symlinkat('file', 'link', dir_fd=dirfd)
    +        assert os.readlink(str(tmpdir.join('link'))) == 'file'
    +    finally:
    +        os.close(dirfd)
    
    From pypy.commits at gmail.com  Thu Mar 24 20:49:52 2016
    From: pypy.commits at gmail.com (rlamy)
    Date: Thu, 24 Mar 2016 17:49:52 -0700 (PDT)
    Subject: [pypy-commit] pypy follow_symlinks: hg merge rposix-for-3
    Message-ID: <56f48b30.c856c20a.787a3.702e@mx.google.com>
    
    Author: Ronan Lamy 
    Branch: follow_symlinks
    Changeset: r83326:8ecd99407360
    Date: 2016-03-25 00:36 +0000
    http://bitbucket.org/pypy/pypy/changeset/8ecd99407360/
    
    Log:	hg merge rposix-for-3
    
    diff --git a/rpython/rlib/rposix.py b/rpython/rlib/rposix.py
    --- a/rpython/rlib/rposix.py
    +++ b/rpython/rlib/rposix.py
    @@ -1739,6 +1739,7 @@
         AT_SYMLINK_NOFOLLOW = rffi_platform.DefinedConstantInteger('AT_SYMLINK_NOFOLLOW')
         AT_EACCESS = rffi_platform.DefinedConstantInteger('AT_EACCESS')
         AT_REMOVEDIR = rffi_platform.DefinedConstantInteger('AT_REMOVEDIR')
    +    AT_EMPTY_PATH = rffi_platform.DefinedConstantInteger('AT_EMPTY_PATH')
         UTIME_NOW = rffi_platform.DefinedConstantInteger('UTIME_NOW')
         UTIME_OMIT = rffi_platform.DefinedConstantInteger('UTIME_OMIT')
         TIMESPEC = rffi_platform.Struct('struct timespec', [
    @@ -1772,6 +1773,34 @@
             error = c_faccessat(dir_fd, pathname, mode, flags)
             return error == 0
     
    +if HAVE_FCHMODAT:
    +    c_fchmodat = external('fchmodat',
    +        [rffi.INT, rffi.CCHARP, rffi.INT, rffi.INT], rffi.INT,
    +        save_err=rffi.RFFI_SAVE_ERRNO,)
    +
    +    def fchmodat(path, mode, dir_fd=AT_FDCWD, follow_symlinks=True):
    +        if follow_symlinks:
    +            flag = 0
    +        else:
    +            flag = AT_SYMLINK_NOFOLLOW
    +        error = c_fchmodat(dir_fd, path, mode, flag)
    +        handle_posix_error('fchmodat', error)
    +
    +if HAVE_FCHOWNAT:
    +    c_fchownat = external('fchownat',
    +        [rffi.INT, rffi.CCHARP, rffi.INT, rffi.INT, rffi.INT], rffi.INT,
    +        save_err=rffi.RFFI_SAVE_ERRNO,)
    +
    +    def fchownat(path, owner, group, dir_fd=AT_FDCWD,
    +            follow_symlinks=True, empty_path=False):
    +        flag = 0
    +        if not follow_symlinks:
    +            flag |= AT_SYMLINK_NOFOLLOW
    +        if empty_path:
    +            flag |= AT_EMPTY_PATH
    +        error = c_fchownat(dir_fd, path, owner, group, flag)
    +        handle_posix_error('fchownat', error)
    +
     if HAVE_LINKAT:
         c_linkat = external('linkat',
             [rffi.INT, rffi.CCHARP, rffi.INT, rffi.CCHARP, rffi.INT], rffi.INT)
    @@ -1876,11 +1905,11 @@
     
     if HAVE_SYMLINKAT:
         c_symlinkat = external('symlinkat',
    -        [rffi.CCHARP, rffi.CCHARP, rffi.INT], rffi.INT,
    +        [rffi.CCHARP, rffi.INT, rffi.CCHARP], rffi.INT,
             save_err=rffi.RFFI_SAVE_ERRNO)
     
         def symlinkat(src, dst, dir_fd=AT_FDCWD):
    -        error = c_symlinkat(src, dst, dir_fd)
    +        error = c_symlinkat(src, dir_fd, dst)
             handle_posix_error('symlinkat', error)
     
     if HAVE_OPENAT:
    diff --git a/rpython/rlib/test/test_rposix.py b/rpython/rlib/test/test_rposix.py
    --- a/rpython/rlib/test/test_rposix.py
    +++ b/rpython/rlib/test/test_rposix.py
    @@ -508,6 +508,16 @@
             finally:
                 os.close(dirfd)
     
    +    def test_fchmodat(self):
    +        def f(dirfd):
    +            return rposix.fchmodat('test_open_ascii', 0777, dirfd)
    +
    +        dirfd = os.open(os.path.dirname(self.ufilename), os.O_RDONLY)
    +        try:
    +            interpret(f, [dirfd])  # does not crash
    +        finally:
    +            os.close(dirfd)
    +
     
     class TestPosixUnicode(BasePosixUnicodeOrAscii):
         def _get_filename(self):
    @@ -526,3 +536,12 @@
                 os.open('/tmp/t', 0, 0)
                 os.open(u'/tmp/t', 0, 0)
             compile(f, ())
    +
    +def test_symlinkat(tmpdir):
    +    tmpdir.join('file').write('text')
    +    dirfd = os.open(str(tmpdir), os.O_RDONLY)
    +    try:
    +        rposix.symlinkat('file', 'link', dir_fd=dirfd)
    +        assert os.readlink(str(tmpdir.join('link'))) == 'file'
    +    finally:
    +        os.close(dirfd)
    
    From pypy.commits at gmail.com  Thu Mar 24 20:49:54 2016
    From: pypy.commits at gmail.com (rlamy)
    Date: Thu, 24 Mar 2016 17:49:54 -0700 (PDT)
    Subject: [pypy-commit] pypy follow_symlinks: Add test for symlink(..,
     dir_fd=...)
    Message-ID: <56f48b32.d4e01c0a.bb63.5178@mx.google.com>
    
    Author: Ronan Lamy 
    Branch: follow_symlinks
    Changeset: r83327:57c933e9ad24
    Date: 2016-03-25 00:48 +0000
    http://bitbucket.org/pypy/pypy/changeset/57c933e9ad24/
    
    Log:	Add test for symlink(.., dir_fd=...)
    
    diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py
    --- a/pypy/module/posix/test/test_posix2.py
    +++ b/pypy/module/posix/test/test_posix2.py
    @@ -993,6 +993,19 @@
                     data = f.read()
                     assert data == "who cares?"
     
    +        # XXX skip test if dir_fd is unsupported
    +        def test_symlink_fd(self):
    +            posix = self.posix
    +            bytes_dir = self.bytes_dir
    +            f = posix.open(bytes_dir, posix.O_RDONLY)
    +            try:
    +                posix.symlink('somefile', 'somelink', dir_fd=f)
    +                assert (posix.readlink(bytes_dir + '/somelink'.encode()) ==
    +                        'somefile'.encode())
    +            finally:
    +                posix.close(f)
    +                posix.unlink(bytes_dir + '/somelink'.encode())
    +
         if hasattr(os, 'ftruncate'):
             def test_truncate(self):
                 posix = self.posix
    @@ -1281,6 +1294,7 @@
             if os.name == 'posix':
                 assert os.open in os.supports_dir_fd  # openat()
     
    +
     def test_convert_seconds_simple(space):
         w_time = space.wrap(123.456)
         assert convert_seconds(space, w_time) == (123, 456000000)
    
    From pypy.commits at gmail.com  Thu Mar 24 21:09:33 2016
    From: pypy.commits at gmail.com (rlamy)
    Date: Thu, 24 Mar 2016 18:09:33 -0700 (PDT)
    Subject: [pypy-commit] pypy follow_symlinks: Split off interp-level tests
     from test_posix2.py
    Message-ID: <56f48fcd.c52f1c0a.85ced.5261@mx.google.com>
    
    Author: Ronan Lamy 
    Branch: follow_symlinks
    Changeset: r83328:7682a6f35d12
    Date: 2016-03-25 01:08 +0000
    http://bitbucket.org/pypy/pypy/changeset/7682a6f35d12/
    
    Log:	Split off interp-level tests from test_posix2.py
    
    	This prevents -A tests from trying to import hypothesis.
    
    diff --git a/pypy/module/posix/test/test_posix2.py b/pypy/module/posix/test/test_posix2.py
    --- a/pypy/module/posix/test/test_posix2.py
    +++ b/pypy/module/posix/test/test_posix2.py
    @@ -1,22 +1,14 @@
    -
     # -*- coding: utf-8 -*-
     
    -from __future__ import with_statement
     import os
     import py
     import sys
     import signal
     
    -from pypy.objspace.std import StdObjSpace
     from rpython.tool.udir import udir
     from pypy.tool.pytest.objspace import gettestobjspace
    -from pypy.conftest import pypydir
     from rpython.translator.c.test.test_extfunc import need_sparse_files
     from rpython.rlib import rposix
    -from pypy.module.posix.interp_posix import convert_seconds
    -
    -from hypothesis import given
    -from hypothesis.strategies import integers
     
     
     def setup_module(mod):
    @@ -52,7 +44,6 @@
         # space.call_method(space.getbuiltinmodule('sys'), 'getfilesystemencoding')
     
     
    -
     GET_POSIX = "(): import %s as m ; return m" % os.name
     
     
    @@ -1249,38 +1240,6 @@
             assert content == b"test"
     
     
    -class TestPexpect(object):
    -    # XXX replace with AppExpectTest class as soon as possible
    -    def setup_class(cls):
    -        try:
    -            import pexpect
    -        except ImportError:
    -            py.test.skip("pexpect not found")
    -
    -    def _spawn(self, *args, **kwds):
    -        import pexpect
    -        kwds.setdefault('timeout', 600)
    -        print 'SPAWN:', args, kwds
    -        child = pexpect.spawn(*args, maxread=5000, **kwds)
    -        child.logfile = sys.stdout
    -        return child
    -
    -    def spawn(self, argv):
    -        py_py = py.path.local(pypydir).join('bin', 'pyinteractive.py')
    -        return self._spawn(sys.executable, [str(py_py), '-S'] + argv)
    -
    -    def test_ttyname(self):
    -        source = py.code.Source("""
    -        import os, sys
    -        assert os.ttyname(sys.stdin.fileno())
    -        print('ok!')
    -        """)
    -        f = udir.join("test_ttyname.py")
    -        f.write(source)
    -        child = self.spawn([str(f)])
    -        child.expect('ok!')
    -
    -
     class AppTestFdVariants:
         # Tests variant functions which also accept file descriptors,
         # dir_fd and follow_symlinks.
    @@ -1294,16 +1253,3 @@
             if os.name == 'posix':
                 assert os.open in os.supports_dir_fd  # openat()
     
    -
    -def test_convert_seconds_simple(space):
    -    w_time = space.wrap(123.456)
    -    assert convert_seconds(space, w_time) == (123, 456000000)
    -
    - at given(s=integers(min_value=-2**30, max_value=2**30), ns=integers(min_value=0, max_value=10**9))
    -def test_convert_seconds_full(space, s, ns):
    -    w_time = space.wrap(s + ns * 1e-9)
    -    sec, nsec = convert_seconds(space, w_time)
    -    assert 0 <= nsec < 1e9
    -    MAX_ERR = 1e9 / 2**23 + 1  # nsec has 53 - 30 = 23 bits of precisin
    -    err = (sec * 10**9 + nsec) - (s * 10**9 + ns)
    -    assert -MAX_ERR < err < MAX_ERR
    
    From pypy.commits at gmail.com  Fri Mar 25 04:03:55 2016
    From: pypy.commits at gmail.com (cfbolz)
    Date: Fri, 25 Mar 2016 01:03:55 -0700 (PDT)
    Subject: [pypy-commit] pypy default: go via the space to benefit from the
     annspecialcase
    Message-ID: <56f4f0eb.2976c20a.d610c.ffffc66d@mx.google.com>
    
    Author: Carl Friedrich Bolz 
    Branch: 
    Changeset: r83329:336e2da610d5
    Date: 2016-03-25 09:03 +0100
    http://bitbucket.org/pypy/pypy/changeset/336e2da610d5/
    
    Log:	go via the space to benefit from the annspecialcase
    
    diff --git a/pypy/objspace/std/objectobject.py b/pypy/objspace/std/objectobject.py
    --- a/pypy/objspace/std/objectobject.py
    +++ b/pypy/objspace/std/objectobject.py
    @@ -110,7 +110,7 @@
     def descr__init__(space, w_obj, __args__):
         # don't allow arguments unless __new__ is overridden
         w_type = space.type(w_obj)
    -    w_parent_new, _ = w_type.lookup_where('__new__')
    +    w_parent_new, _ = space.lookup_in_type_where(w_type, '__new__')
         if w_parent_new is space.w_object:
             try:
                 __args__.fixedunpack(0)
    
    From pypy.commits at gmail.com  Fri Mar 25 05:22:32 2016
    From: pypy.commits at gmail.com (plan_rich)
    Date: Fri, 25 Mar 2016 02:22:32 -0700 (PDT)
    Subject: [pypy-commit] pypy new-jit-log: modifying the jit log. length is
     not written as a header but always before a string
    Message-ID: <56f50358.c9161c0a.689ce.ffffbfb3@mx.google.com>
    
    Author: Richard Plangger 
    Branch: new-jit-log
    Changeset: r83330:0dfb4f8b2119
    Date: 2016-03-24 17:51 +0100
    http://bitbucket.org/pypy/pypy/changeset/0dfb4f8b2119/
    
    Log:	modifying the jit log. length is not written as a header but always
    	before a string
    
    diff --git a/rpython/jit/backend/llsupport/asmmemmgr.py b/rpython/jit/backend/llsupport/asmmemmgr.py
    --- a/rpython/jit/backend/llsupport/asmmemmgr.py
    +++ b/rpython/jit/backend/llsupport/asmmemmgr.py
    @@ -287,11 +287,14 @@
                 targetindex -= self.SUBBLOCK_SIZE
             assert not block
     
    -    def copy_core_dump(self, addr, offset=0):
    +    def copy_core_dump(self, addr, offset=0, count=-1):
             HEX = '0123456789ABCDEF'
             dump = []
    -        src = rffi.cast(rffi.CCHARP, addr + offset)
    -        for p in range(self.get_relative_pos()):
    +        src = rffi.cast(rffi.CCHARP, addr)
    +        end = self.get_relative_pos()
    +        if count != -1:
    +            end = offset + count
    +        for p in range(offset, end):
                 o = ord(src[p])
                 dump.append(HEX[o >> 4])
                 dump.append(HEX[o & 15])
    diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py
    --- a/rpython/jit/backend/x86/assembler.py
    +++ b/rpython/jit/backend/x86/assembler.py
    @@ -534,7 +534,7 @@
             looptoken._ll_function_addr = rawstart
             if logger:
                 log = logger.log_trace(MARK_TRACE_ASM, None, self.mc)
    -            log.write(inputargs, operations, None, ops_offset)
    +            log.write(inputargs, operations, None, ops_offset, unique_id=unique_id)
             self.fixup_target_tokens(rawstart)
             self.teardown()
             # oprofile support
    diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py
    --- a/rpython/jit/metainterp/compile.py
    +++ b/rpython/jit/metainterp/compile.py
    @@ -482,7 +482,7 @@
     def do_compile_loop(jd_id, unique_id, metainterp_sd, inputargs, operations,
                         looptoken, log=True, name='', memo=None):
         _log = metainterp_sd.jitlog.log_trace(MARK_TRACE_OPT, metainterp_sd, None)
    -    _log.write(inputargs, operations)
    +    _log.write(inputargs, operations, name=name, unique_id=unique_id)
         # TODO remove old
         metainterp_sd.logger_ops.log_loop(inputargs, operations, -2,
                                           'compiling', None, name, memo)
    diff --git a/rpython/jit/metainterp/jitlog.py b/rpython/jit/metainterp/jitlog.py
    --- a/rpython/jit/metainterp/jitlog.py
    +++ b/rpython/jit/metainterp/jitlog.py
    @@ -3,6 +3,7 @@
     from rpython.jit.metainterp.history import ConstInt, ConstFloat
     from rpython.rlib.objectmodel import we_are_translated
     from rpython.rtyper.lltypesystem import lltype, llmemory, rffi
    +from rpython.rlib.objectmodel import compute_unique_id
     import sys
     import weakref
     
    @@ -31,21 +32,25 @@
         def __init__(self):
             self.cintf = cintf.setup()
             self.memo = {}
    +        self.is_setup = False
     
         def setup_once(self):
    +        self.is_setup = True
             self.cintf.jitlog_try_init_using_env()
             if self.cintf.jitlog_filter(0x0):
                 return
             count = len(resoperations.opname)
             mark = MARK_RESOP_META
             for opnum, opname in resoperations.opname.items():
    -            line = self.encode_le_16bit(opnum) + opname.lower()
    +            line = self.encode_le_16bit(opnum) + self.encode_str(opname.lower())
                 self.write_marked(mark, line)
     
         def teardown(self):
             self.cintf.jitlog_teardown()
     
         def write_marked(self, mark, line):
    +        if not self.is_setup:
    +            self.setup_once()
             self.cintf.jitlog_write_marked(mark, line, len(line))
     
         def log_trace(self, tag, metainterp_sd, mc, memo=None):
    @@ -65,6 +70,9 @@
             lst = [le_addr, le_len, le_addr]
             self.cintf.jitlog_filter(MARK_ASM_PATCH, ''.join(lst))
     
    +    def encode_str(self, string):
    +        return self.encode_le_32bit(len(string)) + string
    +
         def encode_le_16bit(self, val):
             return chr((val >> 0) & 0xff) + chr((val >> 8) & 0xff)
     
    @@ -92,24 +100,36 @@
         def __init__(self, tag, memo, metainterp_sd, mc, logger):
             self.memo = memo
             self.metainterp_sd = metainterp_sd
    +        self.ts = None
             if self.metainterp_sd is not None:
                 self.ts = metainterp_sd.cpu.ts
             self.tag = tag
             self.mc = mc
             self.logger = logger
     
    -    def write(self, args, ops, faildescr=None, ops_offset={}):
    +    def write(self, args, ops, faildescr=None, ops_offset={},
    +              name=None, unique_id=None):
             log = self.logger
     
    +        if not name:
    +            name = ''
             # write the initial tag
             if faildescr is None:
    -            log.write_marked(self.tag, 'loop')
    +            string = self.logger.encode_str('loop') + \
    +                     self.logger.encode_le_addr(unique_id or 0) + \
    +                     self.logger.encode_str(name or '')
    +            log.write_marked(self.tag, string)
             else:
    -            log.write_marked(self.tag, 'bridge')
    +            unique_id = compute_unique_id(faildescr)
    +            string = self.logger.encode_str('bridge') + \
    +                     self.logger.encode_le_addr(unique_id) + \
    +                     self.logger.encode_str(name or '')
    +            log.write_marked(self.tag, string)
     
             # input args
             str_args = [self.var_to_str(arg) for arg in args]
    -        log.write_marked(MARK_INPUT_ARGS, ','.join(str_args))
    +        string = self.logger.encode_str(','.join(str_args))
    +        log.write_marked(MARK_INPUT_ARGS, string)
     
             # assembler address (to not duplicate it in write_code_dump)
             if self.mc is not None:
    @@ -138,12 +158,15 @@
             descr = op.getdescr()
             le_opnum = self.logger.encode_le_16bit(op.getopnum())
             str_res = self.var_to_str(op)
    -        line = le_opnum + ','.join([str_res] + str_args)
    +        line = ','.join([str_res] + str_args)
             if descr:
                 descr_str = descr.repr_of_descr()
    -            return MARK_RESOP_DESCR, line + ',' + descr_str
    +            line = line + ',' + descr_str
    +            string = self.logger.encode_str(line)
    +            return MARK_RESOP_DESCR, le_opnum + string
             else:
    -            return MARK_RESOP, line
    +            string = self.logger.encode_str(line)
    +            return MARK_RESOP, le_opnum + string
     
     
         def write_core_dump(self, operations, i, op, ops_offset):
    @@ -177,9 +200,11 @@
             else:
                 end_offset = ops_offset[op2]
     
    -        dump = self.mc.copy_core_dump(self.mc.absolute_addr(), start_offset)
    +        count = end_offset - start_offset
    +        dump = self.mc.copy_core_dump(self.mc.absolute_addr(), start_offset, count)
             offset = self.logger.encode_le_16bit(start_offset)
    -        self.logger.write_marked(MARK_ASM, offset + dump)
    +        edump = self.logger.encode_str(dump)
    +        self.logger.write_marked(MARK_ASM, offset + edump)
     
         def var_to_str(self, arg):
             try:
    diff --git a/rpython/jit/metainterp/optimizeopt/__init__.py b/rpython/jit/metainterp/optimizeopt/__init__.py
    --- a/rpython/jit/metainterp/optimizeopt/__init__.py
    +++ b/rpython/jit/metainterp/optimizeopt/__init__.py
    @@ -55,8 +55,9 @@
         debug_start("jit-optimize")
         inputargs = compile_data.start_label.getarglist()
         try:
    -        log = metainterp_sd.jitlog.log_trace(MARK_TRACE, metainterp_sd, None)
    -        log.write(inputargs, compile_data.operations)
    +        # TODO missing the unique id
    +        # TODO log = metainterp_sd.jitlog.log_trace(MARK_TRACE, metainterp_sd, None)
    +        # TODO log.write(inputargs, compile_data.operations)
             #
             metainterp_sd.logger_noopt.log_loop(inputargs,
                                                 compile_data.operations,
    diff --git a/rpython/rlib/rvmprof/src/jitlog_main.h b/rpython/rlib/rvmprof/src/jitlog_main.h
    --- a/rpython/rlib/rvmprof/src/jitlog_main.h
    +++ b/rpython/rlib/rvmprof/src/jitlog_main.h
    @@ -68,17 +68,6 @@
     #endif
           }
         }
    -    if (!jitlog_fd) {
    -        jitlog_fd = 2;
    -        // TODO
    -        //if (isatty(2))
    -        //  {
    -        //    debug_start_colors_1 = "\033[1m\033[31m";
    -        //    debug_start_colors_2 = "\033[31m";
    -        //    debug_stop_colors = "\033[0m";
    -        //  }
    -    }
    -
         jitlog_ready = 1;
     }
     
    @@ -87,6 +76,7 @@
     {
         jitlog_fd = fd;
         jitlog_prefix = strdup(prefix);
    +    jitlog_ready = 1;
         return NULL;
     }
     
    @@ -111,13 +101,8 @@
     {
         if (!jitlog_ready) { return; }
     
    -    char header[5];
    +    char header[1];
         header[0] = tag;
    -    // little endian 32 bit singed int
    -    header[1] = length & 0xff;
    -    header[2] = (length >> 8) & 0xff;
    -    header[3] = (length >> 16) & 0xff;
    -    header[4] = (length >> 24) & 0xff;
    -    write(jitlog_fd, (const char*)&header, 5);
    +    write(jitlog_fd, (const char*)&header, 1);
         write(jitlog_fd, text, length);
     }
    
    From pypy.commits at gmail.com  Fri Mar 25 05:22:34 2016
    From: pypy.commits at gmail.com (plan_rich)
    Date: Fri, 25 Mar 2016 02:22:34 -0700 (PDT)
    Subject: [pypy-commit] pypy new-jit-log: test to generate a jitlog
    Message-ID: <56f5035a.2106c20a.717c5.ffffb11e@mx.google.com>
    
    Author: Richard Plangger 
    Branch: new-jit-log
    Changeset: r83331:a3cbc9a1c5d6
    Date: 2016-03-24 17:53 +0100
    http://bitbucket.org/pypy/pypy/changeset/a3cbc9a1c5d6/
    
    Log:	test to generate a jitlog
    
    diff --git a/rpython/jit/backend/x86/test/test_jitlog.py b/rpython/jit/backend/x86/test/test_jitlog.py
    new file mode 100644
    --- /dev/null
    +++ b/rpython/jit/backend/x86/test/test_jitlog.py
    @@ -0,0 +1,37 @@
    +import re
    +from rpython.rlib import debug
    +from rpython.jit.tool.oparser import pure_parse
    +from rpython.jit.metainterp import logger
    +from rpython.jit.metainterp.typesystem import llhelper
    +from StringIO import StringIO
    +from rpython.jit.metainterp.optimizeopt.util import equaloplists
    +from rpython.jit.metainterp.history import AbstractDescr, JitCellToken, BasicFailDescr, BasicFinalDescr
    +from rpython.jit.backend.model import AbstractCPU
    +from rpython.rlib.jit import JitDriver
    +from rpython.jit.metainterp.test.support import LLJitMixin
    +from rpython.jit.backend.x86.test.test_basic import Jit386Mixin
    +from rpython.rlib.rvmprof import rvmprof
    +import tempfile
    +
    +class TestLogger(Jit386Mixin):
    +
    +    def test_log_loop(self):
    +        myjitdriver = JitDriver(greens = [], reds = ['x', 'y', 'res'])
    +        vmprof = rvmprof.VMProf()
    +        fileno, name = tempfile.mkstemp()
    +        def f(x, y):
    +            res = 0
    +            vmprof.enable(fileno, 0.1)
    +            while y > 0:
    +                myjitdriver.can_enter_jit(x=x, y=y, res=res)
    +                myjitdriver.jit_merge_point(x=x, y=y, res=res)
    +                res += x
    +                if res > 40:
    +                    res += 1
    +                    res -= 2
    +                    res += 1
    +                y -= 1
    +            return res
    +        res = self.meta_interp(f, [6, 20])
    +        self.check_trace_count(2)
    +        print(name)
    
    From pypy.commits at gmail.com  Fri Mar 25 05:24:35 2016
    From: pypy.commits at gmail.com (fijal)
    Date: Fri, 25 Mar 2016 02:24:35 -0700 (PDT)
    Subject: [pypy-commit] pypy jit-leaner-frontend: try to shorten the ranges
     and numbers
    Message-ID: <56f503d3.4577c20a.cd58e.ffffd988@mx.google.com>
    
    Author: fijal
    Branch: jit-leaner-frontend
    Changeset: r83332:e625c46190c9
    Date: 2016-03-25 11:23 +0200
    http://bitbucket.org/pypy/pypy/changeset/e625c46190c9/
    
    Log:	try to shorten the ranges and numbers
    
    diff --git a/rpython/jit/metainterp/opencoder.py b/rpython/jit/metainterp/opencoder.py
    --- a/rpython/jit/metainterp/opencoder.py
    +++ b/rpython/jit/metainterp/opencoder.py
    @@ -76,7 +76,7 @@
                      metainterp_sd=None):
             self.trace = trace
             self.metainterp_sd = metainterp_sd
    -        self._cache = [None] * trace._count
    +        self._cache = [None] * trace._index
             if force_inputargs is not None:
                 # the trace here is cut and we're working from
                 # inputargs that are in the middle, shuffle stuff around a bit
    @@ -92,6 +92,7 @@
             self.start = start
             self.pos = start
             self._count = start
    +        self._index = start
             self.start_index = start
             self.end = end
     
    @@ -152,7 +153,9 @@
                 if rop.is_guard(opnum):
                     self.get_snapshot_iter(descr_index).update_liveranges(
                         index, liveranges)
    -        return index + 1
    +        if opclasses[opnum].type != 'v':
    +            return index + 1
    +        return index
     
         def next(self):
             opnum = self._next()
    @@ -180,16 +183,18 @@
                 assert isinstance(res, GuardResOp)
                 res.rd_resume_position = descr_index
             if res.type != 'v':
    -            self._cache[self._count] = res
    +            self._cache[self._index] = res
    +            self._index += 1
             self._count += 1
             return res
     
     class CutTrace(BaseTrace):
    -    def __init__(self, trace, start, count, inputargs):
    +    def __init__(self, trace, start, count, index, inputargs):
             self.trace = trace
             self.start = start
             self.inputargs = inputargs
             self.count = count
    +        self.index = index
     
         def cut_at(self, cut):
             assert cut[1] > self.count
    @@ -199,7 +204,8 @@
             iter = TraceIterator(self.trace, self.start, self.trace._pos,
                                  self.inputargs, metainterp_sd=metainterp_sd)
             iter._count = self.count
    -        iter.start_index = self.count
    +        iter.start_index = self.index
    +        iter._index = self.index
             return iter
     
     def combine_uint(index1, index2):
    @@ -246,7 +252,8 @@
             self._snapshots = []
             for i, inparg in enumerate(inputargs):
                 inparg.set_position(i)
    -        self._count = len(inputargs)
    +        self._count = len(inputargs) # total count
    +        self._index = len(inputargs) # "position" of resulting resops
             self._start = len(inputargs)
             self._pos = self._start
             self.inputargs = inputargs
    @@ -281,14 +288,15 @@
             return self._pos
     
         def cut_point(self):
    -        return self._pos, self._count
    +        return self._pos, self._count, self._index
     
         def cut_at(self, end):
             self._pos = end[0]
             self._count = end[1]
    +        self._index = end[2]
     
    -    def cut_trace_from(self, (start, count), inputargs):
    -        return CutTrace(self, start, count, inputargs)
    +    def cut_trace_from(self, (start, count, index), inputargs):
    +        return CutTrace(self, start, count, index, inputargs)
     
         def _encode(self, box):
             if isinstance(box, Const):
    @@ -334,7 +342,7 @@
                 assert False, "unreachable code"
     
         def record_op(self, opnum, argboxes, descr=None):
    -        pos = self._count
    +        pos = self._index
             self.append(opnum)
             expected_arity = oparity[opnum]
             if expected_arity == -1:
    @@ -351,6 +359,8 @@
                 else:
                     self.append(self._encode_descr(descr))
             self._count += 1
    +        if opclasses[opnum].type != 'v':
    +            self._index += 1
             return pos
     
         def _encode_descr(self, descr):
    @@ -404,7 +414,7 @@
     
         def get_live_ranges(self, metainterp_sd):
             t = self.get_iter(metainterp_sd)
    -        liveranges = [0] * self._count
    +        liveranges = [0] * self._index
             index = t._count
             while not t.done():
                 index = t.next_element_update_live_range(index, liveranges)
    @@ -427,7 +437,8 @@
                 if self._deadranges[0] == self._count:
                     return self._deadranges[1]
             liveranges = self.get_live_ranges(metainterp_sd)
    -        deadranges = [0] * (self._count + 1)
    +        deadranges = [0] * (self._index + 2)
    +        assert len(deadranges) == len(liveranges) + 2
             for i in range(self._start, len(liveranges)):
                 elem = liveranges[i]
                 if elem:
    diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py
    --- a/rpython/jit/metainterp/optimizeopt/optimizer.py
    +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py
    @@ -520,7 +520,8 @@
                     break
                 self.first_optimization.propagate_forward(op)
                 trace.kill_cache_at(deadranges[i + trace.start_index])
    -            i += 1
    +            if op.type != 'v':
    +                i += 1
             # accumulate counters
             if flush:
                 self.flush()
    diff --git a/rpython/jit/metainterp/test/test_opencoder.py b/rpython/jit/metainterp/test/test_opencoder.py
    --- a/rpython/jit/metainterp/test/test_opencoder.py
    +++ b/rpython/jit/metainterp/test/test_opencoder.py
    @@ -187,7 +187,7 @@
             p0 = FakeOp(t.record_op(rop.NEW_WITH_VTABLE, [], descr=SomeDescr()))
             t.record_op(rop.GUARD_TRUE, [i0])
             resume.capture_resumedata([], [i1, i2, p0], [p0, i1], t)
    -        assert t.get_live_ranges(metainterp_sd) == [4, 4, 4, 4, 0]
    +        assert t.get_live_ranges(metainterp_sd) == [4, 4, 4, 4]
     
         def test_deadranges(self):
             i0, i1, i2 = IntFrontendOp(0), IntFrontendOp(0), IntFrontendOp(0)
    @@ -203,4 +203,4 @@
             t.record_op(rop.ESCAPE_N, [ConstInt(3)])
             t.record_op(rop.ESCAPE_N, [ConstInt(3)])
             t.record_op(rop.FINISH, [i4])
    -        assert t.get_dead_ranges(metainterp_sd) == [0, 0, 0, 0, 0, 3, 0, 5, 0, 0, 0, 0, 0, 6]
    +        assert t.get_dead_ranges(metainterp_sd) == [0, 0, 0, 0, 0, 3, 4, 5]
    
    From pypy.commits at gmail.com  Fri Mar 25 05:29:58 2016
    From: pypy.commits at gmail.com (fijal)
    Date: Fri, 25 Mar 2016 02:29:58 -0700 (PDT)
    Subject: [pypy-commit] pypy jit-leaner-frontend: fix rpython
    Message-ID: <56f50516.0357c20a.ee593.ffffd723@mx.google.com>
    
    Author: fijal
    Branch: jit-leaner-frontend
    Changeset: r83333:1becdcc6a2b2
    Date: 2016-03-25 11:29 +0200
    http://bitbucket.org/pypy/pypy/changeset/1becdcc6a2b2/
    
    Log:	fix rpython
    
    diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py
    --- a/rpython/jit/metainterp/pyjitpl.py
    +++ b/rpython/jit/metainterp/pyjitpl.py
    @@ -2328,7 +2328,7 @@
     
         def _compile_and_run_once(self, original_boxes):
             self.initialize_state_from_start(original_boxes)
    -        self.current_merge_points = [(original_boxes, (0, 0))]
    +        self.current_merge_points = [(original_boxes, (0, 0, 0))]
             num_green_args = self.jitdriver_sd.num_green_args
             original_greenkey = original_boxes[:num_green_args]
             self.resumekey = compile.ResumeFromInterpDescr(original_greenkey)
    
    From pypy.commits at gmail.com  Fri Mar 25 05:37:16 2016
    From: pypy.commits at gmail.com (fijal)
    Date: Fri, 25 Mar 2016 02:37:16 -0700 (PDT)
    Subject: [pypy-commit] pypy jit-leaner-frontend: one more
    Message-ID: <56f506cc.07b71c0a.81a57.ffffb8b9@mx.google.com>
    
    Author: fijal
    Branch: jit-leaner-frontend
    Changeset: r83334:dfe36bc75fec
    Date: 2016-03-25 11:36 +0200
    http://bitbucket.org/pypy/pypy/changeset/dfe36bc75fec/
    
    Log:	one more
    
    diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py
    --- a/rpython/jit/metainterp/compile.py
    +++ b/rpython/jit/metainterp/compile.py
    @@ -261,7 +261,7 @@
         jitcell_token = make_jitcell_token(jitdriver_sd)
         cut_at = history.get_trace_position()
         history.record(rop.JUMP, jumpargs, None, descr=jitcell_token)
    -    if start != (0, 0):
    +    if start != (0, 0, 0):
             trace = trace.cut_trace_from(start, inputargs)
         if 'unroll' not in enable_opts or not metainterp.cpu.supports_guard_gc_type:
             return compile_simple_loop(metainterp, greenkey, trace, jumpargs,
    
    From pypy.commits at gmail.com  Fri Mar 25 05:42:45 2016
    From: pypy.commits at gmail.com (fijal)
    Date: Fri, 25 Mar 2016 02:42:45 -0700 (PDT)
    Subject: [pypy-commit] pypy jit-leaner-frontend: grrr rpython
    Message-ID: <56f50815.cf0b1c0a.b6639.ffffc91f@mx.google.com>
    
    Author: fijal
    Branch: jit-leaner-frontend
    Changeset: r83335:bb8c353b9254
    Date: 2016-03-25 11:41 +0200
    http://bitbucket.org/pypy/pypy/changeset/bb8c353b9254/
    
    Log:	grrr rpython
    
    diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py
    --- a/rpython/jit/metainterp/pyjitpl.py
    +++ b/rpython/jit/metainterp/pyjitpl.py
    @@ -1915,7 +1915,7 @@
             self.last_exc_value = lltype.nullptr(rclass.OBJECT)
             self.forced_virtualizable = None
             self.partial_trace = None
    -        self.retracing_from = (-1, -1)
    +        self.retracing_from = (-1, -1, -1)
             self.call_pure_results = args_dict()
             self.heapcache = HeapCache()
     
    
    From pypy.commits at gmail.com  Fri Mar 25 05:45:47 2016
    From: pypy.commits at gmail.com (fijal)
    Date: Fri, 25 Mar 2016 02:45:47 -0700 (PDT)
    Subject: [pypy-commit] pypy jit-leaner-frontend: make sure we have two
     different names for two different exceptions
    Message-ID: <56f508cb.8673c20a.70fec.ffffde97@mx.google.com>
    
    Author: fijal
    Branch: jit-leaner-frontend
    Changeset: r83336:a544bbe29381
    Date: 2016-03-25 11:44 +0200
    http://bitbucket.org/pypy/pypy/changeset/a544bbe29381/
    
    Log:	make sure we have two different names for two different exceptions
    
    diff --git a/rpython/jit/metainterp/opencoder.py b/rpython/jit/metainterp/opencoder.py
    --- a/rpython/jit/metainterp/opencoder.py
    +++ b/rpython/jit/metainterp/opencoder.py
    @@ -23,7 +23,7 @@
     MIN_SHORT = -2**15 + 1
     MAX_SHORT = 2**15 - 1
     
    -class TagOverflow(Exception):
    +class FrontendTagOverflow(Exception):
         pass
     
     class BaseTrace(object):
    @@ -263,7 +263,7 @@
                 # grow by 2X
                 self._ops = self._ops + [rffi.cast(rffi.SHORT, -15)] * len(self._ops)
             if not MIN_SHORT < v < MAX_SHORT:
    -            raise TagOverflow
    +            raise FrontendTagOverflow
             self._ops[self._pos] = rffi.cast(rffi.SHORT, v)
             self._pos += 1
     
    
    From pypy.commits at gmail.com  Fri Mar 25 06:15:20 2016
    From: pypy.commits at gmail.com (fijal)
    Date: Fri, 25 Mar 2016 03:15:20 -0700 (PDT)
    Subject: [pypy-commit] pypy jit-leaner-frontend: use a much more lightweight
     approach (We don't care about the order of iterations of snapshots here)
    Message-ID: <56f50fb8.8fb81c0a.e67a6.ffffc4ae@mx.google.com>
    
    Author: fijal
    Branch: jit-leaner-frontend
    Changeset: r83337:2e1402dc3f14
    Date: 2016-03-25 12:14 +0200
    http://bitbucket.org/pypy/pypy/changeset/2e1402dc3f14/
    
    Log:	use a much more lightweight approach (We don't care about the order
    	of iterations of snapshots here)
    
    diff --git a/rpython/jit/metainterp/opencoder.py b/rpython/jit/metainterp/opencoder.py
    --- a/rpython/jit/metainterp/opencoder.py
    +++ b/rpython/jit/metainterp/opencoder.py
    @@ -50,20 +50,6 @@
         def get(self, index):
             return self.main_iter._untag(index)
     
    -    def _update_liverange(self, item, index, liveranges):
    -        tag, v = untag(item)
    -        if tag == TAGBOX:
    -            liveranges[v] = index
    -
    -    def update_liveranges(self, index, liveranges):
    -        for item in self.vable_array:
    -            self._update_liverange(item, index, liveranges)
    -        for item in self.vref_array:
    -            self._update_liverange(item, index, liveranges)
    -        for frame in self.framestack:
    -            for item in frame.box_array:
    -                self._update_liverange(item, index, liveranges)
    -
         def unpack_jitcode_pc(self, snapshot):
             return unpack_uint(snapshot.packed_jitcode_pc)
     
    @@ -71,6 +57,22 @@
             # NOT_RPYTHON
             return [self.get(i) for i in arr]
     
    +def _update_liverange(item, index, liveranges):
    +    tag, v = untag(item)
    +    if tag == TAGBOX:
    +        liveranges[v] = index
    +
    +def update_liveranges(snapshot, index, liveranges):
    +    assert isinstance(snapshot, TopSnapshot)
    +    for item in snapshot.vable_array:
    +        _update_liverange(item, index, liveranges)
    +    for item in snapshot.vref_array:
    +        _update_liverange(item, index, liveranges)
    +    while snapshot:
    +        for item in snapshot.box_array:
    +            _update_liverange(item, index, liveranges)
    +        snapshot = snapshot.prev
    +
     class TraceIterator(BaseTrace):
         def __init__(self, trace, start, end, force_inputargs=None,
                      metainterp_sd=None):
    @@ -151,8 +153,8 @@
             if opwithdescr[opnum]:
                 descr_index = self._next()
                 if rop.is_guard(opnum):
    -                self.get_snapshot_iter(descr_index).update_liveranges(
    -                    index, liveranges)
    +                update_liveranges(self.trace._snapshots[descr_index], index, 
    +                                  liveranges)
             if opclasses[opnum].type != 'v':
                 return index + 1
             return index
    
    From pypy.commits at gmail.com  Fri Mar 25 06:16:19 2016
    From: pypy.commits at gmail.com (fijal)
    Date: Fri, 25 Mar 2016 03:16:19 -0700 (PDT)
    Subject: [pypy-commit] pypy jit-leaner-frontend: fix test_compile
    Message-ID: <56f50ff3.88c8c20a.575f5.ffffe534@mx.google.com>
    
    Author: fijal
    Branch: jit-leaner-frontend
    Changeset: r83338:b9528bcd43d0
    Date: 2016-03-25 12:15 +0200
    http://bitbucket.org/pypy/pypy/changeset/b9528bcd43d0/
    
    Log:	fix test_compile
    
    diff --git a/rpython/jit/metainterp/test/test_compile.py b/rpython/jit/metainterp/test/test_compile.py
    --- a/rpython/jit/metainterp/test/test_compile.py
    +++ b/rpython/jit/metainterp/test/test_compile.py
    @@ -99,7 +99,7 @@
         metainterp.history.trace = t
         #
         greenkey = 'faked'
    -    target_token = compile_loop(metainterp, greenkey, (0, 0),
    +    target_token = compile_loop(metainterp, greenkey, (0, 0, 0),
                                     t.inputargs,
                                     [t._mapping[x] for x in loop.operations[-1].getarglist()],
                                     None)
    
    From pypy.commits at gmail.com  Fri Mar 25 06:58:18 2016
    From: pypy.commits at gmail.com (mattip)
    Date: Fri, 25 Mar 2016 03:58:18 -0700 (PDT)
    Subject: [pypy-commit] pypy cpyext-ext: size -> length in unicode for
     cpython compatibility
    Message-ID: <56f519ca.07b71c0a.81a57.ffffd443@mx.google.com>
    
    Author: mattip 
    Branch: cpyext-ext
    Changeset: r83340:2af4fbd6f8e1
    Date: 2016-03-25 13:56 +0300
    http://bitbucket.org/pypy/pypy/changeset/2af4fbd6f8e1/
    
    Log:	size -> length in unicode for cpython compatibility
    
    diff --git a/pypy/module/cpyext/include/unicodeobject.h b/pypy/module/cpyext/include/unicodeobject.h
    --- a/pypy/module/cpyext/include/unicodeobject.h
    +++ b/pypy/module/cpyext/include/unicodeobject.h
    @@ -21,7 +21,7 @@
     typedef struct {
         PyObject_HEAD
         Py_UNICODE *str;
    -    Py_ssize_t size;
    +    Py_ssize_t length;
         long hash;                  /* Hash value; -1 if not set */
         PyObject *defenc;           /* (Default) Encoded version as Python
                                        string, or NULL; this is used for
    diff --git a/pypy/module/cpyext/test/test_unicodeobject.py b/pypy/module/cpyext/test/test_unicodeobject.py
    --- a/pypy/module/cpyext/test/test_unicodeobject.py
    +++ b/pypy/module/cpyext/test/test_unicodeobject.py
    @@ -190,14 +190,14 @@
             ar[0] = rffi.cast(PyObject, py_uni)
             api.PyUnicode_Resize(ar, 3)
             py_uni = rffi.cast(PyUnicodeObject, ar[0])
    -        assert py_uni.c_size == 3
    +        assert py_uni.c_length == 3
             assert py_uni.c_str[1] == u'b'
             assert py_uni.c_str[3] == u'\x00'
             # the same for growing
             ar[0] = rffi.cast(PyObject, py_uni)
             api.PyUnicode_Resize(ar, 10)
             py_uni = rffi.cast(PyUnicodeObject, ar[0])
    -        assert py_uni.c_size == 10
    +        assert py_uni.c_length == 10
             assert py_uni.c_str[1] == 'b'
             assert py_uni.c_str[10] == '\x00'
             Py_DecRef(space, ar[0])
    diff --git a/pypy/module/cpyext/unicodeobject.py b/pypy/module/cpyext/unicodeobject.py
    --- a/pypy/module/cpyext/unicodeobject.py
    +++ b/pypy/module/cpyext/unicodeobject.py
    @@ -22,7 +22,7 @@
     PyUnicodeObjectStruct = lltype.ForwardReference()
     PyUnicodeObject = lltype.Ptr(PyUnicodeObjectStruct)
     PyUnicodeObjectFields = (PyObjectFields +
    -    (("str", rffi.CWCHARP), ("size", Py_ssize_t),
    +    (("str", rffi.CWCHARP), ("length", Py_ssize_t),
          ("hash", rffi.LONG), ("defenc", PyObject)))
     cpython_struct("PyUnicodeObject", PyUnicodeObjectFields, PyUnicodeObjectStruct)
     
    @@ -54,7 +54,7 @@
         py_uni = rffi.cast(PyUnicodeObject, py_obj)
     
         buflen = length + 1
    -    py_uni.c_size = length
    +    py_uni.c_length = length
         py_uni.c_str = lltype.malloc(rffi.CWCHARP.TO, buflen,
                                      flavor='raw', zero=True,
                                      add_memory_pressure=True)
    @@ -65,7 +65,7 @@
     def unicode_attach(space, py_obj, w_obj):
         "Fills a newly allocated PyUnicodeObject with a unicode string"
         py_unicode = rffi.cast(PyUnicodeObject, py_obj)
    -    py_unicode.c_size = len(space.unicode_w(w_obj))
    +    py_unicode.c_length = len(space.unicode_w(w_obj))
         py_unicode.c_str = lltype.nullptr(rffi.CWCHARP.TO)
         py_unicode.c_hash = space.hash_w(w_obj)
         py_unicode.c_defenc = lltype.nullptr(PyObject.TO)
    @@ -76,7 +76,7 @@
         be modified after this call.
         """
         py_uni = rffi.cast(PyUnicodeObject, py_obj)
    -    s = rffi.wcharpsize2unicode(py_uni.c_str, py_uni.c_size)
    +    s = rffi.wcharpsize2unicode(py_uni.c_str, py_uni.c_length)
         w_obj = space.wrap(s)
         py_uni.c_hash = space.hash_w(w_obj)
         track_reference(space, py_obj, w_obj)
    @@ -235,7 +235,7 @@
     def PyUnicode_GetSize(space, ref):
         if from_ref(space, rffi.cast(PyObject, ref.c_ob_type)) is space.w_unicode:
             ref = rffi.cast(PyUnicodeObject, ref)
    -        return ref.c_size
    +        return ref.c_length
         else:
             w_obj = from_ref(space, ref)
             return space.len_w(w_obj)
    @@ -250,11 +250,11 @@
         to make sure that the wchar_t string is 0-terminated in case this is
         required by the application."""
         c_str = PyUnicode_AS_UNICODE(space, rffi.cast(PyObject, ref))
    -    c_size = ref.c_size
    +    c_length = ref.c_length
     
         # If possible, try to copy the 0-termination as well
    -    if size > c_size:
    -        size = c_size + 1
    +    if size > c_length:
    +        size = c_length + 1
     
     
         i = 0
    @@ -262,8 +262,8 @@
             buf[i] = c_str[i]
             i += 1
     
    -    if size > c_size:
    -        return c_size
    +    if size > c_length:
    +        return c_length
         else:
             return size
     
    @@ -469,7 +469,7 @@
             ref[0] = lltype.nullptr(PyObject.TO)
             raise
         to_cp = newsize
    -    oldsize = py_uni.c_size
    +    oldsize = py_uni.c_length
         if oldsize < newsize:
             to_cp = oldsize
         for i in range(to_cp):
    
    From pypy.commits at gmail.com  Fri Mar 25 06:58:16 2016
    From: pypy.commits at gmail.com (mattip)
    Date: Fri, 25 Mar 2016 03:58:16 -0700 (PDT)
    Subject: [pypy-commit] pypy cpyext-ext: fix
    Message-ID: <56f519c8.41d91c0a.cad3c.ffffc27d@mx.google.com>
    
    Author: mattip 
    Branch: cpyext-ext
    Changeset: r83339:ee6bf67aebc7
    Date: 2016-03-25 11:14 +0300
    http://bitbucket.org/pypy/pypy/changeset/ee6bf67aebc7/
    
    Log:	fix
    
    diff --git a/pypy/module/cpyext/object.py b/pypy/module/cpyext/object.py
    --- a/pypy/module/cpyext/object.py
    +++ b/pypy/module/cpyext/object.py
    @@ -28,7 +28,7 @@
                              flavor='raw',
                              add_memory_pressure=True)
         # XXX FIXME
    -    return lltype.nullptr(rffi.CCHARP.TO)
    +    return lltype.nullptr(rffi.VOIDP.TO)
     
     @cpython_api([rffi.VOIDP], lltype.Void)
     def PyObject_Free(space, ptr):
    
    From pypy.commits at gmail.com  Fri Mar 25 07:09:18 2016
    From: pypy.commits at gmail.com (fijal)
    Date: Fri, 25 Mar 2016 04:09:18 -0700 (PDT)
    Subject: [pypy-commit] pypy jit-leaner-frontend: whack a bit where we store
     metainterp_sd. Additionally use USHORT as opposed to SHORT
    Message-ID: <56f51c5e.c5301c0a.a6f32.ffffd8a1@mx.google.com>
    
    Author: fijal
    Branch: jit-leaner-frontend
    Changeset: r83341:3136716f1017
    Date: 2016-03-25 13:08 +0200
    http://bitbucket.org/pypy/pypy/changeset/3136716f1017/
    
    Log:	whack a bit where we store metainterp_sd. Additionally use USHORT as
    	opposed to SHORT
    
    diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py
    --- a/rpython/jit/metainterp/compile.py
    +++ b/rpython/jit/metainterp/compile.py
    @@ -76,7 +76,7 @@
     
             #assert not unroll
             opt = Optimizer(metainterp_sd, jitdriver_sd, optimizations)
    -        return opt.propagate_all_forward(self.trace.get_iter(metainterp_sd),
    +        return opt.propagate_all_forward(self.trace.get_iter(),
                 self.call_pure_results)
     
     class BridgeCompileData(CompileData):
    diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py
    --- a/rpython/jit/metainterp/history.py
    +++ b/rpython/jit/metainterp/history.py
    @@ -674,10 +674,10 @@
             self.consts = []
             self._cache = []
     
    -    def set_inputargs(self, inpargs):
    +    def set_inputargs(self, inpargs, metainterp_sd):
             from rpython.jit.metainterp.opencoder import Trace
     
    -        self.trace = Trace(inpargs)
    +        self.trace = Trace(inpargs, metainterp_sd)
             self.inputargs = inpargs
             if self._cache:
                 # hack to record the ops *after* we know our inputargs
    @@ -860,7 +860,7 @@
     
         def check_history(self, expected=None, **check):
             insns = {}
    -        t = self.history.trace.get_iter(self.metainterp_sd)
    +        t = self.history.trace.get_iter()
             while not t.done():
                 op = t.next()
                 opname = op.getopname()
    diff --git a/rpython/jit/metainterp/logger.py b/rpython/jit/metainterp/logger.py
    --- a/rpython/jit/metainterp/logger.py
    +++ b/rpython/jit/metainterp/logger.py
    @@ -20,7 +20,7 @@
     
         def _unpack_trace(self, trace):
             ops = []
    -        i = trace.get_iter(self.metainterp_sd)
    +        i = trace.get_iter()
             while not i.done():
                 ops.append(i.next())
             return i.inputargs, ops
    diff --git a/rpython/jit/metainterp/opencoder.py b/rpython/jit/metainterp/opencoder.py
    --- a/rpython/jit/metainterp/opencoder.py
    +++ b/rpython/jit/metainterp/opencoder.py
    @@ -18,10 +18,13 @@
     TAGINT, TAGCONSTPTR, TAGCONSTOTHER, TAGBOX = range(4)
     TAGMASK = 0x3
     TAGSHIFT = 2
    -SMALL_INT_STOP  = 2 ** (15 - TAGSHIFT)
    -SMALL_INT_START = -SMALL_INT_STOP
    -MIN_SHORT = -2**15 + 1
    -MAX_SHORT = 2**15 - 1
    +
    +STORAGE_TP = rffi.USHORT
    +MAX_SIZE = 2**16-1
    +SMALL_INT_STOP  = (2 ** (15 - TAGSHIFT)) - 1
    +SMALL_INT_START = -SMALL_INT_STOP # we might want to distribute them uneven
    +MIN_SHORT = 0
    +MAX_SHORT = 2**16 - 1
     
     class FrontendTagOverflow(Exception):
         pass
    @@ -78,6 +81,7 @@
                      metainterp_sd=None):
             self.trace = trace
             self.metainterp_sd = metainterp_sd
    +        self.all_descr_len = len(metainterp_sd.all_descrs)
             self._cache = [None] * trace._index
             if force_inputargs is not None:
                 # the trace here is cut and we're working from
    @@ -98,8 +102,8 @@
             self.start_index = start
             self.end = end
     
    -    def get_dead_ranges(self, metainterp_sd=None):
    -        return self.trace.get_dead_ranges(self.metainterp_sd)
    +    def get_dead_ranges(self):
    +        return self.trace.get_dead_ranges()
     
         def kill_cache_at(self, pos):
             if pos:
    @@ -123,7 +127,7 @@
             if tag == TAGBOX:
                 return self._get(v)
             elif tag == TAGINT:
    -            return ConstInt(v)
    +            return ConstInt(v + SMALL_INT_START)
             elif tag == TAGCONSTPTR:
                 return ConstPtr(self.trace._refs[v])
             elif tag == TAGCONSTOTHER:
    @@ -174,10 +178,10 @@
                 if descr_index == 0 or rop.is_guard(opnum):
                     descr = None
                 else:
    -                if descr_index < 0:
    -                    descr = self.metainterp_sd.all_descrs[-descr_index-1]
    +                if descr_index < self.all_descr_len + 1:
    +                    descr = self.metainterp_sd.all_descrs[descr_index - 1]
                     else:
    -                    descr = self.trace._descrs[descr_index]
    +                    descr = self.trace._descrs[descr_index - self.all_descr_len - 1]
             else:
                 descr = None
             res = ResOperation(opnum, args, descr=descr)
    @@ -202,9 +206,10 @@
             assert cut[1] > self.count
             self.trace.cut_at(cut)
     
    -    def get_iter(self, metainterp_sd=None):
    +    def get_iter(self):
             iter = TraceIterator(self.trace, self.start, self.trace._pos,
    -                             self.inputargs, metainterp_sd=metainterp_sd)
    +                             self.inputargs,
    +                             metainterp_sd=self.trace.metainterp_sd)
             iter._count = self.count
             iter.start_index = self.index
             iter._index = self.index
    @@ -237,8 +242,8 @@
     class Trace(BaseTrace):
         _deadranges = (-1, None)
     
    -    def __init__(self, inputargs):
    -        self._ops = [rffi.cast(rffi.SHORT, -15)] * 30000
    +    def __init__(self, inputargs, metainterp_sd):
    +        self._ops = [rffi.cast(STORAGE_TP, 0)] * MAX_SIZE
             self._pos = 0
             self._consts_bigint = 0
             self._consts_float = 0
    @@ -259,14 +264,15 @@
             self._start = len(inputargs)
             self._pos = self._start
             self.inputargs = inputargs
    +        self.metainterp_sd = metainterp_sd
     
         def append(self, v):
             if self._pos >= len(self._ops):
                 # grow by 2X
    -            self._ops = self._ops + [rffi.cast(rffi.SHORT, -15)] * len(self._ops)
    -        if not MIN_SHORT < v < MAX_SHORT:
    +            self._ops = self._ops + [rffi.cast(STORAGE_TP, 0)] * len(self._ops)
    +        if not MIN_SHORT <= v <= MAX_SHORT:
                 raise FrontendTagOverflow
    -        self._ops[self._pos] = rffi.cast(rffi.SHORT, v)
    +        self._ops[self._pos] = rffi.cast(STORAGE_TP, v)
             self._pos += 1
     
         def done(self):
    @@ -305,7 +311,7 @@
                 if (isinstance(box, ConstInt) and
                     isinstance(box.getint(), int) and # symbolics
                     SMALL_INT_START <= box.getint() < SMALL_INT_STOP):
    -                return tag(TAGINT, box.getint())
    +                return tag(TAGINT, box.getint() - SMALL_INT_START)
                 elif isinstance(box, ConstInt):
                     self._consts_bigint += 1
                     if not isinstance(box.getint(), int):
    @@ -367,18 +373,18 @@
     
         def _encode_descr(self, descr):
             if descr.descr_index != -1:
    -            return -descr.descr_index-1
    +            return descr.descr_index + 1
             self._descrs.append(descr)
    -        return len(self._descrs) - 1
    +        return len(self._descrs) - 1 + len(self.metainterp_sd.all_descrs) + 1
     
         def _list_of_boxes(self, boxes):
    -        array = [rffi.cast(rffi.SHORT, 0)] * len(boxes)
    +        array = [rffi.cast(STORAGE_TP, 0)] * len(boxes)
             for i in range(len(boxes)):
                 array[i] = self._encode(boxes[i])
             return array
     
         def new_array(self, lgt):
    -        return [rffi.cast(rffi.SHORT, 0)] * lgt
    +        return [rffi.cast(STORAGE_TP, 0)] * lgt
     
         def create_top_snapshot(self, jitcode, pc, frame, flag, vable_boxes, vref_boxes):
             self._total_snapshots += 1
    @@ -390,7 +396,7 @@
             assert rffi.cast(lltype.Signed, self._ops[self._pos - 1]) == 0
             # guards have no descr
             self._snapshots.append(s)
    -        self._ops[self._pos - 1] = rffi.cast(rffi.SHORT, len(self._snapshots) - 1)
    +        self._ops[self._pos - 1] = rffi.cast(STORAGE_TP, len(self._snapshots) - 1)
             return s
     
         def create_empty_top_snapshot(self, vable_boxes, vref_boxes):
    @@ -402,7 +408,7 @@
             assert rffi.cast(lltype.Signed, self._ops[self._pos - 1]) == 0
             # guards have no descr
             self._snapshots.append(s)
    -        self._ops[self._pos - 1] = rffi.cast(rffi.SHORT, len(self._snapshots) - 1)
    +        self._ops[self._pos - 1] = rffi.cast(STORAGE_TP, len(self._snapshots) - 1)
             return s
     
         def create_snapshot(self, jitcode, pc, frame, flag):
    @@ -410,19 +416,19 @@
             array = frame.get_list_of_active_boxes(flag, self.new_array, self._encode)
             return Snapshot(combine_uint(jitcode.index, pc), array)
     
    -    def get_iter(self, metainterp_sd=None):
    -        assert metainterp_sd
    -        return TraceIterator(self, self._start, self._pos, metainterp_sd=metainterp_sd)
    +    def get_iter(self):
    +        return TraceIterator(self, self._start, self._pos,
    +                             metainterp_sd=self.metainterp_sd)
     
    -    def get_live_ranges(self, metainterp_sd):
    -        t = self.get_iter(metainterp_sd)
    +    def get_live_ranges(self):
    +        t = self.get_iter()
             liveranges = [0] * self._index
             index = t._count
             while not t.done():
                 index = t.next_element_update_live_range(index, liveranges)
             return liveranges
     
    -    def get_dead_ranges(self, metainterp_sd=None):
    +    def get_dead_ranges(self):
             """ Same as get_live_ranges, but returns a list of "dying" indexes,
             such as for each index x, the number found there is for sure dead
             before x
    @@ -438,7 +444,7 @@
             if self._deadranges != (-1, None):
                 if self._deadranges[0] == self._count:
                     return self._deadranges[1]
    -        liveranges = self.get_live_ranges(metainterp_sd)
    +        liveranges = self.get_live_ranges()
             deadranges = [0] * (self._index + 2)
             assert len(deadranges) == len(liveranges) + 2
             for i in range(self._start, len(liveranges)):
    @@ -448,8 +454,8 @@
             self._deadranges = (self._count, deadranges)
             return deadranges
     
    -    def unpack(self, metainterp_sd):
    -        iter = self.get_iter(metainterp_sd)
    +    def unpack(self):
    +        iter = self.get_iter()
             ops = []
             while not iter.done():
                 ops.append(iter.next())
    diff --git a/rpython/jit/metainterp/optimizeopt/unroll.py b/rpython/jit/metainterp/optimizeopt/unroll.py
    --- a/rpython/jit/metainterp/optimizeopt/unroll.py
    +++ b/rpython/jit/metainterp/optimizeopt/unroll.py
    @@ -123,8 +123,7 @@
         
         def optimize_preamble(self, trace, runtime_boxes, call_pure_results, memo):
             info, newops = self.optimizer.propagate_all_forward(
    -            trace.get_iter(self.optimizer.metainterp_sd),
    -            call_pure_results, flush=False)
    +            trace.get_iter(), call_pure_results, flush=False)
             exported_state = self.export_state(info.jump_op.getarglist(),
                                                info.inputargs,
                                                runtime_boxes, memo)
    @@ -136,7 +135,7 @@
     
         def optimize_peeled_loop(self, trace, celltoken, state,
                                  call_pure_results, inline_short_preamble=True):
    -        trace = trace.get_iter(self.optimizer.metainterp_sd)
    +        trace = trace.get_iter()
             try:
                 label_args = self.import_state(trace.inputargs, state)
             except VirtualStatesCantMatch:
    @@ -227,7 +226,7 @@
     
         def optimize_bridge(self, trace, runtime_boxes, call_pure_results,
                             inline_short_preamble, box_names_memo):
    -        trace = trace.get_iter(self.optimizer.metainterp_sd)
    +        trace = trace.get_iter()
             self._check_no_forwarding([trace.inputargs])
             info, ops = self.optimizer.propagate_all_forward(trace,
                 call_pure_results, False)
    diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py
    --- a/rpython/jit/metainterp/pyjitpl.py
    +++ b/rpython/jit/metainterp/pyjitpl.py
    @@ -2332,7 +2332,8 @@
             num_green_args = self.jitdriver_sd.num_green_args
             original_greenkey = original_boxes[:num_green_args]
             self.resumekey = compile.ResumeFromInterpDescr(original_greenkey)
    -        self.history.set_inputargs(original_boxes[num_green_args:])
    +        self.history.set_inputargs(original_boxes[num_green_args:],
    +                                   self.staticdata)
             self.seen_loop_header_for_jdindex = -1
             try:
                 self.interpret()
    @@ -2552,7 +2553,7 @@
                 except ChangeFrame:
                     pass
             else:
    -            self.history.set_inputargs(inputargs)
    +            self.history.set_inputargs(inputargs, self.staticdata)
                 assert not exception
     
         def get_procedure_token(self, greenkey, with_compiled_targets=False):
    diff --git a/rpython/jit/metainterp/test/test_opencoder.py b/rpython/jit/metainterp/test/test_opencoder.py
    --- a/rpython/jit/metainterp/test/test_opencoder.py
    +++ b/rpython/jit/metainterp/test/test_opencoder.py
    @@ -17,7 +17,7 @@
         pass
     
     class metainterp_sd(object):
    -    pass
    +    all_descrs = []
     
     class FakeOp(AbstractResOp):
         def __init__(self, pos):
    
    From pypy.commits at gmail.com  Fri Mar 25 07:12:04 2016
    From: pypy.commits at gmail.com (plan_rich)
    Date: Fri, 25 Mar 2016 04:12:04 -0700 (PDT)
    Subject: [pypy-commit] pypy jit-leaner-frontend: adapting the test interface
     for test_vecopt
    Message-ID: <56f51d04.07b71c0a.81a57.ffffd8f5@mx.google.com>
    
    Author: Richard Plangger 
    Branch: jit-leaner-frontend
    Changeset: r83342:5e609847041f
    Date: 2016-03-22 17:09 +0100
    http://bitbucket.org/pypy/pypy/changeset/5e609847041f/
    
    Log:	adapting the test interface for test_vecopt
    
    diff --git a/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py b/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py
    --- a/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py
    +++ b/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py
    @@ -81,7 +81,9 @@
         jitdriver_sd = FakeJitDriverStaticData()
     
         def assert_vectorize(self, loop, expected_loop, call_pure_results=None):
    -        trace = convert_loop_to_trace(loop)
    +        jump = ResOperation(rop.LABEL, loop.jump.getarglist(), loop.jump.getdescr())
    +        trace = Trace(loop.label, jump, loop.operations)
    +        trace = self.convert_loop_to_packed(loop)
             compile_data = compile.LoopCompileData(trace, loop.jump.getarglist())
             state = self._do_optimize_loop(compile_data)
             loop.label = state[0].label_op
    
    From pypy.commits at gmail.com  Fri Mar 25 07:12:06 2016
    From: pypy.commits at gmail.com (plan_rich)
    Date: Fri, 25 Mar 2016 04:12:06 -0700 (PDT)
    Subject: [pypy-commit] pypy jit-leaner-frontend: remove descr after
     parse_loop in assert_vectorize,
     this is need to successfully convert the loop to a trace obj
    Message-ID: <56f51d06.83301c0a.13a68.ffffe739@mx.google.com>
    
    Author: Richard Plangger 
    Branch: jit-leaner-frontend
    Changeset: r83343:2bbf906956f4
    Date: 2016-03-25 11:08 +0100
    http://bitbucket.org/pypy/pypy/changeset/2bbf906956f4/
    
    Log:	remove descr after parse_loop in assert_vectorize, this is need to
    	successfully convert the loop to a trace obj
    
    diff --git a/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py b/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py
    --- a/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py
    +++ b/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py
    @@ -82,12 +82,17 @@
     
         def assert_vectorize(self, loop, expected_loop, call_pure_results=None):
             jump = ResOperation(rop.LABEL, loop.jump.getarglist(), loop.jump.getdescr())
    -        trace = Trace(loop.label, jump, loop.operations)
    -        trace = self.convert_loop_to_packed(loop)
    +        # convert_loop_to_trace assumes that there are no descriptors
    +        # but because this optimization pass is after the normal optimization pass
    +        # parse_loop already set artificial resume descr!
    +        for op in loop.operations:
    +            if op.is_guard():
    +                op.setdescr(None)
    +        trace = convert_loop_to_trace(loop)
             compile_data = compile.LoopCompileData(trace, loop.jump.getarglist())
             state = self._do_optimize_loop(compile_data)
             loop.label = state[0].label_op
    -        loop.opererations = state[1]
    +        loop.operations = state[1]
             self.assert_equal(loop, expected_loop)
     
         def vectoroptimizer(self, loop):
    
    From pypy.commits at gmail.com  Fri Mar 25 07:12:08 2016
    From: pypy.commits at gmail.com (plan_rich)
    Date: Fri, 25 Mar 2016 04:12:08 -0700 (PDT)
    Subject: [pypy-commit] pypy jit-leaner-frontend: instead of invoking
     _do_optimize_loop, optimize_vector is called instead,
     the additional other optimizations are not necessary
    Message-ID: <56f51d08.a3f6c20a.2d20d.05e1@mx.google.com>
    
    Author: Richard Plangger 
    Branch: jit-leaner-frontend
    Changeset: r83344:e5c868861aa2
    Date: 2016-03-25 12:09 +0100
    http://bitbucket.org/pypy/pypy/changeset/e5c868861aa2/
    
    Log:	instead of invoking _do_optimize_loop, optimize_vector is called
    	instead, the additional other optimizations are not necessary
    
    diff --git a/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py b/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py
    --- a/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py
    +++ b/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py
    @@ -14,9 +14,10 @@
     from rpython.jit.metainterp.optimizeopt.vector import (VectorizingOptimizer,
             MemoryRef, isomorphic, Pair, NotAVectorizeableLoop, VectorLoop,
             NotAProfitableLoop, GuardStrengthenOpt, CostModel, X86_CostModel,
    -        PackSet)
    +        PackSet, optimize_vector)
     from rpython.jit.metainterp.optimizeopt.schedule import (Scheduler,
             SchedulerState, VecScheduleState, Pack)
    +from rpython.jit.metainterp.optimizeopt.optimizer import BasicLoopInfo
     from rpython.jit.metainterp.optimize import InvalidLoop
     from rpython.jit.metainterp import compile
     from rpython.jit.metainterp.resoperation import rop, ResOperation
    @@ -74,6 +75,12 @@
     
     ARCH_VEC_REG_SIZE = 16
     
    +class FakeWarmState(object):
    +    vec_all = False
    +    vec_cost = 0
    +
    +
    +
     class VecTestHelper(DependencyBaseTest):
     
         enable_opts = "intbounds:rewrite:virtualize:string:earlyforce:pure:heap"
    @@ -81,18 +88,17 @@
         jitdriver_sd = FakeJitDriverStaticData()
     
         def assert_vectorize(self, loop, expected_loop, call_pure_results=None):
    -        jump = ResOperation(rop.LABEL, loop.jump.getarglist(), loop.jump.getdescr())
    -        # convert_loop_to_trace assumes that there are no descriptors
    -        # but because this optimization pass is after the normal optimization pass
    -        # parse_loop already set artificial resume descr!
    -        for op in loop.operations:
    -            if op.is_guard():
    -                op.setdescr(None)
    -        trace = convert_loop_to_trace(loop)
    -        compile_data = compile.LoopCompileData(trace, loop.jump.getarglist())
    -        state = self._do_optimize_loop(compile_data)
    -        loop.label = state[0].label_op
    -        loop.operations = state[1]
    +        jump = ResOperation(rop.JUMP, loop.jump.getarglist(), loop.jump.getdescr())
    +        metainterp_sd = FakeMetaInterpStaticData(self.cpu)
    +        warmstate = FakeWarmState()
    +        loop.operations += [loop.jump]
    +        loop_info = BasicLoopInfo(loop.jump.getarglist(), None, jump)
    +        loop_info.label_op = ResOperation(rop.LABEL, loop.jump.getarglist(), loop.jump.getdescr())
    +        optimize_vector(None, metainterp_sd, self.jitdriver_sd, warmstate,
    +                        loop_info, loop.operations)
    +        loop.operations = loop.operations[:-1]
    +        #loop.label = state[0].label_op
    +        #loop.operations = state[1]
             self.assert_equal(loop, expected_loop)
     
         def vectoroptimizer(self, loop):
    diff --git a/rpython/jit/metainterp/optimizeopt/vector.py b/rpython/jit/metainterp/optimizeopt/vector.py
    --- a/rpython/jit/metainterp/optimizeopt/vector.py
    +++ b/rpython/jit/metainterp/optimizeopt/vector.py
    @@ -117,7 +117,7 @@
         user_code = not jitdriver_sd.vec and warmstate.vec_all
         e = len(loop_ops)-1
         assert e > 0
    -    assert loop_ops[e].is_final()
    +    assert rop.is_final(loop_ops[e].getopnum())
         loop = VectorLoop(loop_info.label_op, loop_ops[:e], loop_ops[-1])
         if user_code and user_loop_bail_fast_path(loop, warmstate):
             return loop_info, loop_ops
    
    From pypy.commits at gmail.com  Fri Mar 25 07:16:54 2016
    From: pypy.commits at gmail.com (fijal)
    Date: Fri, 25 Mar 2016 04:16:54 -0700 (PDT)
    Subject: [pypy-commit] pypy jit-leaner-frontend: fix more tests
    Message-ID: <56f51e26.465ec20a.c6bc6.022e@mx.google.com>
    
    Author: fijal
    Branch: jit-leaner-frontend
    Changeset: r83345:40b5c0262fc4
    Date: 2016-03-25 13:12 +0200
    http://bitbucket.org/pypy/pypy/changeset/40b5c0262fc4/
    
    Log:	fix more tests
    
    diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py
    --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py
    +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebasic.py
    @@ -29,7 +29,7 @@
             exp = parse(optops, namespace=self.namespace.copy())
             expected = convert_old_style_to_targets(exp, jump=True)
             call_pure_results = self._convert_call_pure_results(call_pure_results)
    -        trace = convert_loop_to_trace(loop)
    +        trace = convert_loop_to_trace(loop, FakeMetaInterpStaticData(self.cpu))
             compile_data = compile.SimpleCompileData(trace,
                                                      call_pure_results)
             info, ops = self._do_optimize_loop(compile_data)
    diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizebridge.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizebridge.py
    --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizebridge.py
    +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizebridge.py
    @@ -1,6 +1,6 @@
     
     from rpython.jit.metainterp.optimizeopt.test.test_util import BaseTest,\
    -     LLtypeMixin, convert_old_style_to_targets
    +     LLtypeMixin, convert_old_style_to_targets, FakeMetaInterpStaticData
     from rpython.jit.metainterp import compile
     from rpython.jit.tool import oparser
     from rpython.jit.metainterp.resoperation import ResOperation, rop
    @@ -28,7 +28,7 @@
             bridge = self.parse(bridge_ops)
             bridge.operations[-1].setdescr(jitcell_token)
             self.add_guard_future_condition(bridge)
    -        trace = oparser.convert_loop_to_trace(bridge)
    +        trace = oparser.convert_loop_to_trace(bridge, FakeMetaInterpStaticData(self.cpu))
             data = compile.BridgeCompileData(trace, self.convert_values(bridge.operations[-1].getarglist(), bridge_values),
                                              enable_opts=self.enable_opts,
                                 inline_short_preamble=inline_short_preamble)
    diff --git a/rpython/jit/metainterp/optimizeopt/test/test_unroll.py b/rpython/jit/metainterp/optimizeopt/test/test_unroll.py
    --- a/rpython/jit/metainterp/optimizeopt/test/test_unroll.py
    +++ b/rpython/jit/metainterp/optimizeopt/test/test_unroll.py
    @@ -5,7 +5,7 @@
     import py
     
     from rpython.jit.metainterp.optimizeopt.test.test_util import BaseTest,\
    -     LLtypeMixin
    +     LLtypeMixin, FakeMetaInterpStaticData
     from rpython.jit.metainterp.optimizeopt.util import equaloplists
     from rpython.jit.metainterp.history import (TreeLoop, ConstInt,
                                                 JitCellToken, TargetToken)
    @@ -53,9 +53,7 @@
             preamble = TreeLoop('preamble')
     
             token = JitCellToken()
    -        start_label = ResOperation(rop.LABEL, inputargs, descr=TargetToken(token))
    -        stop_label = ResOperation(rop.LABEL, jump_args, descr=token)
    -        trace = oparser.convert_loop_to_trace(loop)
    +        trace = oparser.convert_loop_to_trace(loop, FakeMetaInterpStaticData(self.cpu))
             compile_data = LoopCompileData(trace, inputargs)
             start_state, newops = self._do_optimize_loop(compile_data)
             preamble.operations = newops
    diff --git a/rpython/jit/metainterp/optimizeopt/test/test_util.py b/rpython/jit/metainterp/optimizeopt/test/test_util.py
    --- a/rpython/jit/metainterp/optimizeopt/test/test_util.py
    +++ b/rpython/jit/metainterp/optimizeopt/test/test_util.py
    @@ -441,6 +441,7 @@
         vec = False
     
     class FakeMetaInterpStaticData(object):
    +    all_descrs = []
     
         def __init__(self, cpu):
             self.cpu = cpu
    @@ -574,7 +575,7 @@
             #                           descr=jump_op.getdescr())
             #end_label = jump_op.copy_and_change(opnum=rop.LABEL)
             call_pure_results = self._convert_call_pure_results(call_pure_results)
    -        t = convert_loop_to_trace(loop)
    +        t = convert_loop_to_trace(loop, FakeMetaInterpStaticData(self.cpu))
             preamble_data = compile.LoopCompileData(t, runtime_boxes,
                                                     call_pure_results)
             start_state, preamble_ops = self._do_optimize_loop(preamble_data)
    diff --git a/rpython/jit/metainterp/test/test_compile.py b/rpython/jit/metainterp/test/test_compile.py
    --- a/rpython/jit/metainterp/test/test_compile.py
    +++ b/rpython/jit/metainterp/test/test_compile.py
    @@ -94,7 +94,7 @@
         metainterp.staticdata = staticdata
         metainterp.cpu = cpu
         metainterp.history = History()
    -    t = convert_loop_to_trace(loop)
    +    t = convert_loop_to_trace(loop, staticdata)
         metainterp.history.inputargs = t.inputargs
         metainterp.history.trace = t
         #
    diff --git a/rpython/jit/tool/oparser.py b/rpython/jit/tool/oparser.py
    --- a/rpython/jit/tool/oparser.py
    +++ b/rpython/jit/tool/oparser.py
    @@ -424,7 +424,7 @@
             assert inp.type == 'f'
             return history.FloatFrontendOp
     
    -def convert_loop_to_trace(loop, skip_last=False):
    +def convert_loop_to_trace(loop, metainterp_sd, skip_last=False):
         from rpython.jit.metainterp.opencoder import Trace
         from rpython.jit.metainterp.test.test_opencoder import FakeFrame
         from rpython.jit.metainterp import history, resume
    @@ -442,7 +442,7 @@
         mapping = {}
         for one, two in zip(loop.inputargs, inputargs):
             mapping[one] = two
    -    trace = Trace(inputargs)
    +    trace = Trace(inputargs, metainterp_sd)
         ops = loop.operations
         if skip_last:
             ops = ops[:-1]
    
    From pypy.commits at gmail.com  Fri Mar 25 07:16:55 2016
    From: pypy.commits at gmail.com (fijal)
    Date: Fri, 25 Mar 2016 04:16:55 -0700 (PDT)
    Subject: [pypy-commit] pypy jit-leaner-frontend: merge
    Message-ID: <56f51e27.07b71c0a.81a57.ffffda73@mx.google.com>
    
    Author: fijal
    Branch: jit-leaner-frontend
    Changeset: r83346:1c4479dff8cd
    Date: 2016-03-25 13:15 +0200
    http://bitbucket.org/pypy/pypy/changeset/1c4479dff8cd/
    
    Log:	merge
    
    diff --git a/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py b/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py
    --- a/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py
    +++ b/rpython/jit/metainterp/optimizeopt/test/test_vecopt.py
    @@ -14,9 +14,10 @@
     from rpython.jit.metainterp.optimizeopt.vector import (VectorizingOptimizer,
             MemoryRef, isomorphic, Pair, NotAVectorizeableLoop, VectorLoop,
             NotAProfitableLoop, GuardStrengthenOpt, CostModel, X86_CostModel,
    -        PackSet)
    +        PackSet, optimize_vector)
     from rpython.jit.metainterp.optimizeopt.schedule import (Scheduler,
             SchedulerState, VecScheduleState, Pack)
    +from rpython.jit.metainterp.optimizeopt.optimizer import BasicLoopInfo
     from rpython.jit.metainterp.optimize import InvalidLoop
     from rpython.jit.metainterp import compile
     from rpython.jit.metainterp.resoperation import rop, ResOperation
    @@ -74,6 +75,12 @@
     
     ARCH_VEC_REG_SIZE = 16
     
    +class FakeWarmState(object):
    +    vec_all = False
    +    vec_cost = 0
    +
    +
    +
     class VecTestHelper(DependencyBaseTest):
     
         enable_opts = "intbounds:rewrite:virtualize:string:earlyforce:pure:heap"
    @@ -81,11 +88,17 @@
         jitdriver_sd = FakeJitDriverStaticData()
     
         def assert_vectorize(self, loop, expected_loop, call_pure_results=None):
    -        trace = convert_loop_to_trace(loop)
    -        compile_data = compile.LoopCompileData(trace, loop.jump.getarglist())
    -        state = self._do_optimize_loop(compile_data)
    -        loop.label = state[0].label_op
    -        loop.opererations = state[1]
    +        jump = ResOperation(rop.JUMP, loop.jump.getarglist(), loop.jump.getdescr())
    +        metainterp_sd = FakeMetaInterpStaticData(self.cpu)
    +        warmstate = FakeWarmState()
    +        loop.operations += [loop.jump]
    +        loop_info = BasicLoopInfo(loop.jump.getarglist(), None, jump)
    +        loop_info.label_op = ResOperation(rop.LABEL, loop.jump.getarglist(), loop.jump.getdescr())
    +        optimize_vector(None, metainterp_sd, self.jitdriver_sd, warmstate,
    +                        loop_info, loop.operations)
    +        loop.operations = loop.operations[:-1]
    +        #loop.label = state[0].label_op
    +        #loop.operations = state[1]
             self.assert_equal(loop, expected_loop)
     
         def vectoroptimizer(self, loop):
    diff --git a/rpython/jit/metainterp/optimizeopt/vector.py b/rpython/jit/metainterp/optimizeopt/vector.py
    --- a/rpython/jit/metainterp/optimizeopt/vector.py
    +++ b/rpython/jit/metainterp/optimizeopt/vector.py
    @@ -117,7 +117,7 @@
         user_code = not jitdriver_sd.vec and warmstate.vec_all
         e = len(loop_ops)-1
         assert e > 0
    -    assert loop_ops[e].is_final()
    +    assert rop.is_final(loop_ops[e].getopnum())
         loop = VectorLoop(loop_info.label_op, loop_ops[:e], loop_ops[-1])
         if user_code and user_loop_bail_fast_path(loop, warmstate):
             return loop_info, loop_ops
    
    From pypy.commits at gmail.com  Fri Mar 25 07:22:07 2016
    From: pypy.commits at gmail.com (plan_rich)
    Date: Fri, 25 Mar 2016 04:22:07 -0700 (PDT)
    Subject: [pypy-commit] pypy jit-leaner-frontend: missing argument,
     now optimizeopt tests all pass
    Message-ID: <56f51f5f.552d1c0a.2d357.ffffe3e7@mx.google.com>
    
    Author: Richard Plangger 
    Branch: jit-leaner-frontend
    Changeset: r83347:c8d47f78736b
    Date: 2016-03-25 12:21 +0100
    http://bitbucket.org/pypy/pypy/changeset/c8d47f78736b/
    
    Log:	missing argument, now optimizeopt tests all pass
    
    diff --git a/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py b/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py
    --- a/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py
    +++ b/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py
    @@ -828,7 +828,7 @@
             if hasattr(self, 'callinfocollection'):
                 metainterp_sd.callinfocollection = self.callinfocollection
             #
    -        trace = oparser.convert_loop_to_trace(bridge)
    +        trace = oparser.convert_loop_to_trace(bridge, metainterp_sd)
     
             runtime_boxes = self.convert_values(bridge.operations[-1].getarglist(),
                                                 values)
    
    From pypy.commits at gmail.com  Fri Mar 25 07:38:50 2016
    From: pypy.commits at gmail.com (plan_rich)
    Date: Fri, 25 Mar 2016 04:38:50 -0700 (PDT)
    Subject: [pypy-commit] pypy jit-leaner-frontend: test_vector passing again.
     mostly issues with methods moved to rop from ResOperation
    Message-ID: <56f5234a.6bb8c20a.3a155.0e88@mx.google.com>
    
    Author: Richard Plangger 
    Branch: jit-leaner-frontend
    Changeset: r83348:472192cef9c7
    Date: 2016-03-25 12:38 +0100
    http://bitbucket.org/pypy/pypy/changeset/472192cef9c7/
    
    Log:	test_vector passing again. mostly issues with methods moved to rop
    	from ResOperation
    
    diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py
    --- a/rpython/jit/metainterp/compile.py
    +++ b/rpython/jit/metainterp/compile.py
    @@ -295,7 +295,6 @@
             return None
     
         if ((warmstate.vec and jitdriver_sd.vec) or warmstate.vec_all):
    -        assert False
             from rpython.jit.metainterp.optimizeopt.vector import optimize_vector
             loop_info, loop_ops = optimize_vector(trace, metainterp_sd,
                                                   jitdriver_sd, warmstate,
    diff --git a/rpython/jit/metainterp/optimizeopt/vector.py b/rpython/jit/metainterp/optimizeopt/vector.py
    --- a/rpython/jit/metainterp/optimizeopt/vector.py
    +++ b/rpython/jit/metainterp/optimizeopt/vector.py
    @@ -177,7 +177,7 @@
         guard_count = 0
         at_least_one_array_access = True
         for i,op in enumerate(loop.operations):
    -        if op.is_jit_debug():
    +        if rop.is_jit_debug(op):
                 continue
     
             if op.vector >= 0 and not op.is_guard():
    @@ -190,7 +190,7 @@
     
             if warmstate.vec_ratio > 0.0:
                 # blacklist
    -            if op.is_call() or rop.is_call_assembler(op):
    +            if rop.is_call(op) or rop.is_call_assembler(op):
                     return True
     
             if op.is_guard():
    
    From pypy.commits at gmail.com  Fri Mar 25 07:48:41 2016
    From: pypy.commits at gmail.com (fijal)
    Date: Fri, 25 Mar 2016 04:48:41 -0700 (PDT)
    Subject: [pypy-commit] pypy jit-leaner-frontend: improve casts
    Message-ID: <56f52599.918e1c0a.c3038.fffff6a1@mx.google.com>
    
    Author: fijal
    Branch: jit-leaner-frontend
    Changeset: r83349:3fe35633374e
    Date: 2016-03-25 13:47 +0200
    http://bitbucket.org/pypy/pypy/changeset/3fe35633374e/
    
    Log:	improve casts
    
    diff --git a/rpython/jit/metainterp/opencoder.py b/rpython/jit/metainterp/opencoder.py
    --- a/rpython/jit/metainterp/opencoder.py
    +++ b/rpython/jit/metainterp/opencoder.py
    @@ -26,6 +26,11 @@
     MIN_SHORT = 0
     MAX_SHORT = 2**16 - 1
     
    +def expand_sizes_to_signed():
    +    """ This function will make sure we can use sizes all the
    +    way up to lltype.Signed for indexes everywhere
    +    """
    +
     class FrontendTagOverflow(Exception):
         pass
     
    @@ -464,7 +469,7 @@
     def tag(kind, pos):
         #if not SMALL_INT_START <= pos < SMALL_INT_STOP:
         #    raise some error
    -    return (pos << TAGSHIFT) | kind
    +    return rffi.cast(STORAGE_TP, (pos << TAGSHIFT) | kind)
     
     def untag(tagged):
         return intmask(tagged) & TAGMASK, intmask(tagged) >> TAGSHIFT
    
    From pypy.commits at gmail.com  Fri Mar 25 07:48:43 2016
    From: pypy.commits at gmail.com (fijal)
    Date: Fri, 25 Mar 2016 04:48:43 -0700 (PDT)
    Subject: [pypy-commit] pypy jit-leaner-frontend: merge
    Message-ID: <56f5259b.c52f1c0a.85ced.ffffe6fa@mx.google.com>
    
    Author: fijal
    Branch: jit-leaner-frontend
    Changeset: r83350:3de6e9371d12
    Date: 2016-03-25 13:47 +0200
    http://bitbucket.org/pypy/pypy/changeset/3de6e9371d12/
    
    Log:	merge
    
    diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py
    --- a/rpython/jit/metainterp/compile.py
    +++ b/rpython/jit/metainterp/compile.py
    @@ -295,7 +295,6 @@
             return None
     
         if ((warmstate.vec and jitdriver_sd.vec) or warmstate.vec_all):
    -        assert False
             from rpython.jit.metainterp.optimizeopt.vector import optimize_vector
             loop_info, loop_ops = optimize_vector(trace, metainterp_sd,
                                                   jitdriver_sd, warmstate,
    diff --git a/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py b/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py
    --- a/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py
    +++ b/rpython/jit/metainterp/optimizeopt/test/test_virtualstate.py
    @@ -828,7 +828,7 @@
             if hasattr(self, 'callinfocollection'):
                 metainterp_sd.callinfocollection = self.callinfocollection
             #
    -        trace = oparser.convert_loop_to_trace(bridge)
    +        trace = oparser.convert_loop_to_trace(bridge, metainterp_sd)
     
             runtime_boxes = self.convert_values(bridge.operations[-1].getarglist(),
                                                 values)
    diff --git a/rpython/jit/metainterp/optimizeopt/vector.py b/rpython/jit/metainterp/optimizeopt/vector.py
    --- a/rpython/jit/metainterp/optimizeopt/vector.py
    +++ b/rpython/jit/metainterp/optimizeopt/vector.py
    @@ -177,7 +177,7 @@
         guard_count = 0
         at_least_one_array_access = True
         for i,op in enumerate(loop.operations):
    -        if op.is_jit_debug():
    +        if rop.is_jit_debug(op):
                 continue
     
             if op.vector >= 0 and not op.is_guard():
    @@ -190,7 +190,7 @@
     
             if warmstate.vec_ratio > 0.0:
                 # blacklist
    -            if op.is_call() or rop.is_call_assembler(op):
    +            if rop.is_call(op) or rop.is_call_assembler(op):
                     return True
     
             if op.is_guard():
    
    From pypy.commits at gmail.com  Fri Mar 25 07:56:11 2016
    From: pypy.commits at gmail.com (fijal)
    Date: Fri, 25 Mar 2016 04:56:11 -0700 (PDT)
    Subject: [pypy-commit] pypy jit-leaner-frontend: more fight
    Message-ID: <56f5275b.d4df1c0a.368ce.ffffefb6@mx.google.com>
    
    Author: fijal
    Branch: jit-leaner-frontend
    Changeset: r83351:8f6875e3ff4a
    Date: 2016-03-25 13:55 +0200
    http://bitbucket.org/pypy/pypy/changeset/8f6875e3ff4a/
    
    Log:	more fight
    
    diff --git a/rpython/jit/metainterp/opencoder.py b/rpython/jit/metainterp/opencoder.py
    --- a/rpython/jit/metainterp/opencoder.py
    +++ b/rpython/jit/metainterp/opencoder.py
    @@ -391,9 +391,12 @@
         def new_array(self, lgt):
             return [rffi.cast(STORAGE_TP, 0)] * lgt
     
    +    def _encode_cast(self, i):
    +        return rffi.cast(STORAGE_TP, self._encode(i))
    +
         def create_top_snapshot(self, jitcode, pc, frame, flag, vable_boxes, vref_boxes):
             self._total_snapshots += 1
    -        array = frame.get_list_of_active_boxes(flag, self.new_array, self._encode)
    +        array = frame.get_list_of_active_boxes(flag, self.new_array, self._encode_cast)
             vable_array = self._list_of_boxes(vable_boxes)
             vref_array = self._list_of_boxes(vref_boxes)
             s = TopSnapshot(combine_uint(jitcode.index, pc), array, vable_array,
    @@ -418,7 +421,7 @@
     
         def create_snapshot(self, jitcode, pc, frame, flag):
             self._total_snapshots += 1
    -        array = frame.get_list_of_active_boxes(flag, self.new_array, self._encode)
    +        array = frame.get_list_of_active_boxes(flag, self.new_array, self._encode_cast)
             return Snapshot(combine_uint(jitcode.index, pc), array)
     
         def get_iter(self):
    @@ -469,7 +472,7 @@
     def tag(kind, pos):
         #if not SMALL_INT_START <= pos < SMALL_INT_STOP:
         #    raise some error
    -    return rffi.cast(STORAGE_TP, (pos << TAGSHIFT) | kind)
    +    return (pos << TAGSHIFT) | kind
     
     def untag(tagged):
         return intmask(tagged) & TAGMASK, intmask(tagged) >> TAGSHIFT
    
    From pypy.commits at gmail.com  Fri Mar 25 08:05:32 2016
    From: pypy.commits at gmail.com (fijal)
    Date: Fri, 25 Mar 2016 05:05:32 -0700 (PDT)
    Subject: [pypy-commit] pypy jit-leaner-frontend: more casting fun
    Message-ID: <56f5298c.02931c0a.b329.fffff441@mx.google.com>
    
    Author: fijal
    Branch: jit-leaner-frontend
    Changeset: r83352:bd8cbbe9d553
    Date: 2016-03-25 14:04 +0200
    http://bitbucket.org/pypy/pypy/changeset/bd8cbbe9d553/
    
    Log:	more casting fun
    
    diff --git a/rpython/jit/metainterp/opencoder.py b/rpython/jit/metainterp/opencoder.py
    --- a/rpython/jit/metainterp/opencoder.py
    +++ b/rpython/jit/metainterp/opencoder.py
    @@ -385,7 +385,7 @@
         def _list_of_boxes(self, boxes):
             array = [rffi.cast(STORAGE_TP, 0)] * len(boxes)
             for i in range(len(boxes)):
    -            array[i] = self._encode(boxes[i])
    +            array[i] = self._encode_cast(boxes[i])
             return array
     
         def new_array(self, lgt):
    
    From pypy.commits at gmail.com  Fri Mar 25 08:24:53 2016
    From: pypy.commits at gmail.com (fijal)
    Date: Fri, 25 Mar 2016 05:24:53 -0700 (PDT)
    Subject: [pypy-commit] pypy jit-leaner-frontend: specialize untag
    Message-ID: <56f52e15.4a811c0a.d2eff.ffffff94@mx.google.com>
    
    Author: fijal
    Branch: jit-leaner-frontend
    Changeset: r83353:cd2f966ad0ed
    Date: 2016-03-25 14:24 +0200
    http://bitbucket.org/pypy/pypy/changeset/cd2f966ad0ed/
    
    Log:	specialize untag
    
    diff --git a/rpython/jit/metainterp/opencoder.py b/rpython/jit/metainterp/opencoder.py
    --- a/rpython/jit/metainterp/opencoder.py
    +++ b/rpython/jit/metainterp/opencoder.py
    @@ -11,7 +11,7 @@
         ResOperation, oparity, rop, opwithdescr, GuardResOp, IntOp, FloatOp, RefOp,\
         opclasses
     from rpython.rlib.rarithmetic import intmask, r_uint
    -from rpython.rlib.objectmodel import we_are_translated
    +from rpython.rlib.objectmodel import we_are_translated, specialize
     from rpython.rtyper.lltypesystem import rffi, lltype, llmemory
     from rpython.jit.metainterp.typesystem import llhelper
     
    @@ -474,5 +474,6 @@
         #    raise some error
         return (pos << TAGSHIFT) | kind
     
    + at specialize.ll()
     def untag(tagged):
         return intmask(tagged) & TAGMASK, intmask(tagged) >> TAGSHIFT
    
    From pypy.commits at gmail.com  Fri Mar 25 08:32:11 2016
    From: pypy.commits at gmail.com (fijal)
    Date: Fri, 25 Mar 2016 05:32:11 -0700 (PDT)
    Subject: [pypy-commit] pypy jit-leaner-frontend: one more cast
    Message-ID: <56f52fcb.d3921c0a.412ff.fffff1ab@mx.google.com>
    
    Author: fijal
    Branch: jit-leaner-frontend
    Changeset: r83354:93c65e378da3
    Date: 2016-03-25 14:31 +0200
    http://bitbucket.org/pypy/pypy/changeset/93c65e378da3/
    
    Log:	one more cast
    
    diff --git a/rpython/jit/metainterp/resume.py b/rpython/jit/metainterp/resume.py
    --- a/rpython/jit/metainterp/resume.py
    +++ b/rpython/jit/metainterp/resume.py
    @@ -232,7 +232,7 @@
             v = state.v
             liveboxes = state.liveboxes
             for item in arr:
    -            box = iter.get(item)
    +            box = iter.get(rffi.cast(lltype.Signed, item))
                 box = optimizer.get_box_replacement(box)
     
                 if isinstance(box, Const):
    
    From pypy.commits at gmail.com  Fri Mar 25 08:39:39 2016
    From: pypy.commits at gmail.com (fijal)
    Date: Fri, 25 Mar 2016 05:39:39 -0700 (PDT)
    Subject: [pypy-commit] pypy jit-leaner-frontend: fix some untested
     vectorization
    Message-ID: <56f5318b.c85b1c0a.667c3.20e0@mx.google.com>
    
    Author: fijal
    Branch: jit-leaner-frontend
    Changeset: r83355:1c9965d436b0
    Date: 2016-03-25 14:38 +0200
    http://bitbucket.org/pypy/pypy/changeset/1c9965d436b0/
    
    Log:	fix some untested vectorization
    
    diff --git a/rpython/jit/metainterp/optimizeopt/vector.py b/rpython/jit/metainterp/optimizeopt/vector.py
    --- a/rpython/jit/metainterp/optimizeopt/vector.py
    +++ b/rpython/jit/metainterp/optimizeopt/vector.py
    @@ -177,23 +177,23 @@
         guard_count = 0
         at_least_one_array_access = True
         for i,op in enumerate(loop.operations):
    -        if rop.is_jit_debug(op):
    +        if rop.is_jit_debug(op.opnum):
                 continue
     
    -        if op.vector >= 0 and not op.is_guard():
    +        if op.vector >= 0 and not rop.is_guard(op.opnum):
                 vector_instr += 1
     
             resop_count += 1
     
    -        if op.is_primitive_array_access():
    +        if rop.is_primitive_array_access(op.opnum):
                 at_least_one_array_access = True
     
             if warmstate.vec_ratio > 0.0:
                 # blacklist
    -            if rop.is_call(op) or rop.is_call_assembler(op):
    +            if rop.is_call(op.opnum) or rop.is_call_assembler(op.opnum):
                     return True
     
    -        if op.is_guard():
    +        if rop.is_guard(op.opnum):
                 guard_count += 1
     
         if not at_least_one_array_access:
    
    From pypy.commits at gmail.com  Fri Mar 25 08:45:08 2016
    From: pypy.commits at gmail.com (fijal)
    Date: Fri, 25 Mar 2016 05:45:08 -0700 (PDT)
    Subject: [pypy-commit] pypy jit-leaner-frontend: too eager :-)
    Message-ID: <56f532d4.10921c0a.a2cf3.054f@mx.google.com>
    
    Author: fijal
    Branch: jit-leaner-frontend
    Changeset: r83356:acd5207f951c
    Date: 2016-03-25 14:44 +0200
    http://bitbucket.org/pypy/pypy/changeset/acd5207f951c/
    
    Log:	too eager :-)
    
    diff --git a/rpython/jit/metainterp/optimizeopt/vector.py b/rpython/jit/metainterp/optimizeopt/vector.py
    --- a/rpython/jit/metainterp/optimizeopt/vector.py
    +++ b/rpython/jit/metainterp/optimizeopt/vector.py
    @@ -185,7 +185,7 @@
     
             resop_count += 1
     
    -        if rop.is_primitive_array_access(op.opnum):
    +        if op.is_primitive_array_access():
                 at_least_one_array_access = True
     
             if warmstate.vec_ratio > 0.0:
    
    From pypy.commits at gmail.com  Fri Mar 25 08:52:38 2016
    From: pypy.commits at gmail.com (fijal)
    Date: Fri, 25 Mar 2016 05:52:38 -0700 (PDT)
    Subject: [pypy-commit] pypy jit-leaner-frontend: one more thing
    Message-ID: <56f53496.02931c0a.b329.03fc@mx.google.com>
    
    Author: fijal
    Branch: jit-leaner-frontend
    Changeset: r83357:03de28875dfc
    Date: 2016-03-25 14:51 +0200
    http://bitbucket.org/pypy/pypy/changeset/03de28875dfc/
    
    Log:	one more thing
    
    diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py
    --- a/rpython/jit/metainterp/pyjitpl.py
    +++ b/rpython/jit/metainterp/pyjitpl.py
    @@ -2543,7 +2543,7 @@
                 op2 = self.history.record(rop.SAVE_EXCEPTION, [], exception)
                 self.history._cache = self.history._cache[i:] + self.history._cache[:i]
                 self.history.record(rop.RESTORE_EXCEPTION, [op1, op2], None)
    -            self.history.set_inputargs(inputargs)
    +            self.history.set_inputargs(inputargs, self.staticdata)
                 if exception_obj:
                     self.execute_ll_raised(exception_obj)
                 else:
    
    From pypy.commits at gmail.com  Fri Mar 25 09:26:17 2016
    From: pypy.commits at gmail.com (fijal)
    Date: Fri, 25 Mar 2016 06:26:17 -0700 (PDT)
    Subject: [pypy-commit] pypy jit-leaner-frontend: whack until tests pass
    Message-ID: <56f53c79.c85b1c0a.667c3.30b0@mx.google.com>
    
    Author: fijal
    Branch: jit-leaner-frontend
    Changeset: r83358:b29b22264d8f
    Date: 2016-03-25 15:25 +0200
    http://bitbucket.org/pypy/pypy/changeset/b29b22264d8f/
    
    Log:	whack until tests pass
    
    diff --git a/rpython/jit/metainterp/opencoder.py b/rpython/jit/metainterp/opencoder.py
    --- a/rpython/jit/metainterp/opencoder.py
    +++ b/rpython/jit/metainterp/opencoder.py
    @@ -30,6 +30,10 @@
         """ This function will make sure we can use sizes all the
         way up to lltype.Signed for indexes everywhere
         """
    +    globals()['STORAGE_TP'] = lltype.Signed
    +    globals()['MAX_SIZE'] = 2**31-1
    +    globals()['MIN_SHORT'] = -2**31
    +    globals()['MAX_SHORT'] = 2**31 - 1
     
     class FrontendTagOverflow(Exception):
         pass
    diff --git a/rpython/jit/metainterp/test/test_compile.py b/rpython/jit/metainterp/test/test_compile.py
    --- a/rpython/jit/metainterp/test/test_compile.py
    +++ b/rpython/jit/metainterp/test/test_compile.py
    @@ -54,7 +54,7 @@
         loopnumbering = 0
     
     class FakeMetaInterpStaticData(object):
    -
    +    all_descrs = []
         logger_noopt = FakeLogger()
         logger_ops = FakeLogger()
         config = get_combined_translation_config(translating=True)
    diff --git a/rpython/jit/metainterp/test/test_opencoder.py b/rpython/jit/metainterp/test/test_opencoder.py
    --- a/rpython/jit/metainterp/test/test_opencoder.py
    +++ b/rpython/jit/metainterp/test/test_opencoder.py
    @@ -54,7 +54,7 @@
     
     class TestOpencoder(object):
         def unpack(self, t):
    -        iter = t.get_iter(metainterp_sd)
    +        iter = t.get_iter()
             l = []
             while not iter.done():
                 op = iter.next()
    @@ -65,7 +65,7 @@
     
         def test_simple_iterator(self):
             i0, i1 = IntFrontendOp(0), IntFrontendOp(0)
    -        t = Trace([i0, i1])
    +        t = Trace([i0, i1], metainterp_sd)
             add = FakeOp(t.record_op(rop.INT_ADD, [i0, i1]))
             t.record_op(rop.INT_ADD, [add, ConstInt(1)])
             (i0, i1), l, _ = self.unpack(t)
    @@ -79,7 +79,7 @@
     
         def test_rd_snapshot(self):
             i0, i1 = IntFrontendOp(0), IntFrontendOp(0)
    -        t = Trace([i0, i1])
    +        t = Trace([i0, i1], metainterp_sd)
             add = FakeOp(t.record_op(rop.INT_ADD, [i0, i1]))
             t.record_op(rop.GUARD_FALSE, [add])
             # now we write rd_snapshot and friends
    @@ -103,7 +103,7 @@
     
         def test_read_snapshot_interface(self):
             i0, i1, i2 = IntFrontendOp(0), IntFrontendOp(0), IntFrontendOp(0)
    -        t = Trace([i0, i1, i2])
    +        t = Trace([i0, i1, i2], metainterp_sd)
             t.record_op(rop.GUARD_TRUE, [i1])
             frame0 = FakeFrame(1, JitCode(2), [i0, i1])
             frame1 = FakeFrame(3, JitCode(4), [i2, i2])
    @@ -138,7 +138,7 @@
         @given(lists_of_operations())
         def xxx_test_random_snapshot(self, lst):
             inputargs, ops = lst
    -        t = Trace(inputargs)
    +        t = Trace(inputargs, metainterp_sd)
             for op in ops:
                 newop = FakeOp(t.record_op(op.getopnum(), op.getarglist()))
                 newop.orig_op = op
    @@ -157,7 +157,7 @@
     
         def test_cut_trace_from(self):
             i0, i1, i2 = IntFrontendOp(0), IntFrontendOp(0), IntFrontendOp(0)
    -        t = Trace([i0, i1, i2])
    +        t = Trace([i0, i1, i2], metainterp_sd)
             add1 = FakeOp(t.record_op(rop.INT_ADD, [i0, i1]))
             cut_point = t.cut_point()
             add2 = FakeOp(t.record_op(rop.INT_ADD, [add1, i1]))
    @@ -172,7 +172,7 @@
     
         def test_virtualizable_virtualref(self):
             i0, i1, i2 = IntFrontendOp(0), IntFrontendOp(0), IntFrontendOp(0)
    -        t = Trace([i0, i1, i2])
    +        t = Trace([i0, i1, i2], metainterp_sd)
             p0 = FakeOp(t.record_op(rop.NEW_WITH_VTABLE, [], descr=SomeDescr()))
             t.record_op(rop.GUARD_TRUE, [i0])
             resume.capture_resumedata([], [i1, i2, p0], [p0, i1], t)
    @@ -183,15 +183,15 @@
     
         def test_liveranges(self):
             i0, i1, i2 = IntFrontendOp(0), IntFrontendOp(0), IntFrontendOp(0)
    -        t = Trace([i0, i1, i2])
    +        t = Trace([i0, i1, i2], metainterp_sd)
             p0 = FakeOp(t.record_op(rop.NEW_WITH_VTABLE, [], descr=SomeDescr()))
             t.record_op(rop.GUARD_TRUE, [i0])
             resume.capture_resumedata([], [i1, i2, p0], [p0, i1], t)
    -        assert t.get_live_ranges(metainterp_sd) == [4, 4, 4, 4]
    +        assert t.get_live_ranges() == [4, 4, 4, 4]
     
         def test_deadranges(self):
             i0, i1, i2 = IntFrontendOp(0), IntFrontendOp(0), IntFrontendOp(0)
    -        t = Trace([i0, i1, i2])
    +        t = Trace([i0, i1, i2], metainterp_sd)
             p0 = FakeOp(t.record_op(rop.NEW_WITH_VTABLE, [], descr=SomeDescr()))
             t.record_op(rop.GUARD_TRUE, [i0])
             resume.capture_resumedata([], [i1, i2, p0], [p0, i1], t)
    @@ -203,4 +203,4 @@
             t.record_op(rop.ESCAPE_N, [ConstInt(3)])
             t.record_op(rop.ESCAPE_N, [ConstInt(3)])
             t.record_op(rop.FINISH, [i4])
    -        assert t.get_dead_ranges(metainterp_sd) == [0, 0, 0, 0, 0, 3, 4, 5]
    +        assert t.get_dead_ranges() == [0, 0, 0, 0, 0, 3, 4, 5]
    diff --git a/rpython/jit/metainterp/test/test_pyjitpl.py b/rpython/jit/metainterp/test/test_pyjitpl.py
    --- a/rpython/jit/metainterp/test/test_pyjitpl.py
    +++ b/rpython/jit/metainterp/test/test_pyjitpl.py
    @@ -74,6 +74,7 @@
     def test_remove_consts_and_duplicates():
         class FakeStaticData:
             cpu = None
    +        all_descrs = []
             warmrunnerdesc = None
         def is_another_box_like(box, referencebox):
             assert box is not referencebox
    @@ -89,13 +90,13 @@
         c3 = ConstInt(3)
         boxes = [b1, b2, b1, c3]
         dup = {}
    -    metainterp.history.set_inputargs([b1, b2])
    +    metainterp.history.set_inputargs([b1, b2], FakeStaticData())
         metainterp.remove_consts_and_duplicates(boxes, 4, dup)
         assert boxes[0] is b1
         assert boxes[1] is b2
         assert is_another_box_like(boxes[2], b1)
         assert is_another_box_like(boxes[3], c3)
    -    inp, operations = metainterp.history.trace.unpack(metainterp.staticdata)
    +    inp, operations = metainterp.history.trace.unpack()
         remap = dict(zip([b1, b2], inp))
         assert equaloplists(operations, [
             ResOperation(rop.SAME_AS_I, [b1]),
    diff --git a/rpython/jit/metainterp/test/test_resume.py b/rpython/jit/metainterp/test/test_resume.py
    --- a/rpython/jit/metainterp/test/test_resume.py
    +++ b/rpython/jit/metainterp/test/test_resume.py
    @@ -536,6 +536,7 @@
     
     class FakeMetaInterpStaticData:
         cpu = LLtypeMixin.cpu
    +    all_descrs = []
     
         class options:
             failargs_limit = 100
    @@ -830,7 +831,8 @@
         c1, c2, c3, c4 = [ConstInt(1), ConstInt(2), ConstInt(3), ConstInt(4)]    
     
         env = [b1, c1, b2, b1, c2]
    -    t = Trace([b1, b2, b3, b4, b5])
    +    metainterp_sd = FakeMetaInterpStaticData()
    +    t = Trace([b1, b2, b3, b4, b5], metainterp_sd)
         snap = t.create_snapshot(FakeJitCode("jitcode", 0), 0, Frame(env), False)
         env1 = [c3, b3, b1, c1]
         t.append(0) # descr index
    @@ -842,11 +844,10 @@
         env3 = [c3, b3, b1, c3]
         env4 = [c3, b4, b1, c3]
         env5 = [b1, b4, b5]
    -    metainterp_sd = FakeMetaInterpStaticData()
     
         memo = ResumeDataLoopMemo(metainterp_sd)
     
    -    iter = t.get_iter(metainterp_sd)
    +    iter = t.get_iter()
         b1, b2, b3, b4, b5 = iter.inputargs
         numb, liveboxes, v = memo.number(FakeOptimizer(), 0, iter)
         assert v == 0
    @@ -934,10 +935,10 @@
            min_size=1))
     def test_ResumeDataLoopMemo_random(lst):
         inpargs = [box for box in lst if not isinstance(box, Const)]
    -    t = Trace(inpargs)
    +    metainterp_sd = FakeMetaInterpStaticData()
    +    t = Trace(inpargs, metainterp_sd)
         t.append(0)
    -    metainterp_sd = FakeMetaInterpStaticData()
    -    i = t.get_iter(metainterp_sd)
    +    i = t.get_iter()
         t.create_top_snapshot(FakeJitCode("", 0), 0, Frame(lst), False, [], [])
         memo = ResumeDataLoopMemo(metainterp_sd)
         num, liveboxes, v = memo.number(FakeOptimizer(), 0, i)
    @@ -1036,7 +1037,8 @@
         return newboxes
     
     def make_storage(b1, b2, b3):
    -    t = Trace([box for box in [b1, b2, b3] if not isinstance(box, Const)])
    +    t = Trace([box for box in [b1, b2, b3] if not isinstance(box, Const)],
    +              FakeMetaInterpStaticData())
         t.append(0)
         storage = Storage()
         snap1 = t.create_snapshot(FakeJitCode("code3", 41), 42,
    @@ -1055,7 +1057,7 @@
         storage, t = make_storage(b1s, b2s, b3s)
         metainterp_sd = FakeMetaInterpStaticData()
         memo = ResumeDataLoopMemo(metainterp_sd)  
    -    i = t.get_iter(metainterp_sd)  
    +    i = t.get_iter()
         modifier = ResumeDataVirtualAdder(FakeOptimizer(i), storage, storage, i, memo)
         liveboxes = modifier.finish(FakeOptimizer(i))
         cpu = MyCPU([])
    @@ -1073,7 +1075,7 @@
         storage, t = make_storage(b1s, b2s, b3s)
         metainterp_sd = FakeMetaInterpStaticData()
         memo = ResumeDataLoopMemo(metainterp_sd)
    -    i = t.get_iter(metainterp_sd)
    +    i = t.get_iter()
         modifier = ResumeDataVirtualAdder(FakeOptimizer(i), storage, storage, i, memo)
         modifier.finish(FakeOptimizer(i))
         assert len(memo.consts) == 2
    @@ -1081,7 +1083,7 @@
     
         b1s, b2s, b3s = [ConstInt(sys.maxint), ConstInt(2**17), ConstInt(-65)]
         storage2, t = make_storage(b1s, b2s, b3s)
    -    i = t.get_iter(metainterp_sd)
    +    i = t.get_iter()
         modifier2 = ResumeDataVirtualAdder(FakeOptimizer(i), storage2, storage2,
                                            i, memo)
         modifier2.finish(FakeOptimizer(i))
    
    From pypy.commits at gmail.com  Fri Mar 25 09:28:38 2016
    From: pypy.commits at gmail.com (fijal)
    Date: Fri, 25 Mar 2016 06:28:38 -0700 (PDT)
    Subject: [pypy-commit] pypy jit-leaner-frontend: fix test_random
    Message-ID: <56f53d06.e6ebc20a.ba8ae.31a3@mx.google.com>
    
    Author: fijal
    Branch: jit-leaner-frontend
    Changeset: r83359:0b0fac585895
    Date: 2016-03-25 15:27 +0200
    http://bitbucket.org/pypy/pypy/changeset/0b0fac585895/
    
    Log:	fix test_random
    
    diff --git a/rpython/jit/backend/test/test_random.py b/rpython/jit/backend/test/test_random.py
    --- a/rpython/jit/backend/test/test_random.py
    +++ b/rpython/jit/backend/test/test_random.py
    @@ -718,7 +718,7 @@
             assert not hasattr(loop, '_targettoken')
             for i in range(position):
                 op = loop.operations[i]
    -            if (not op.has_no_side_effect()
    +            if (not rop.has_no_side_effect(op.opnum)
                         or op.type not in (INT, FLOAT)):
                     position = i
                     break       # cannot move the LABEL later
    
    From pypy.commits at gmail.com  Fri Mar 25 10:06:24 2016
    From: pypy.commits at gmail.com (fijal)
    Date: Fri, 25 Mar 2016 07:06:24 -0700 (PDT)
    Subject: [pypy-commit] pypy jit-leaner-frontend: try to make an option for
     having two different models in case one wants to have really long traces
    Message-ID: <56f545e0.4577c20a.cd58e.3993@mx.google.com>
    
    Author: fijal
    Branch: jit-leaner-frontend
    Changeset: r83360:f364f082cc14
    Date: 2016-03-25 16:05 +0200
    http://bitbucket.org/pypy/pypy/changeset/f364f082cc14/
    
    Log:	try to make an option for having two different models in case one
    	wants to have really long traces
    
    diff --git a/rpython/config/translationoption.py b/rpython/config/translationoption.py
    --- a/rpython/config/translationoption.py
    +++ b/rpython/config/translationoption.py
    @@ -126,6 +126,9 @@
         ChoiceOption("jit_profiler", "integrate profiler support into the JIT",
                      ["off", "oprofile"],
                      default="off"),
    +    ChoiceOption("jit_opencoder_model", "the model limits the maximal length"
    +                 " of traces. Use big if you want to go bigger than "
    +                 "the default", ["big", "normal"], default="normal"),
         BoolOption("check_str_without_nul",
                    "Forbid NUL chars in strings in some external function calls",
                    default=False, cmdline=None),
    diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py
    --- a/rpython/jit/metainterp/history.py
    +++ b/rpython/jit/metainterp/history.py
    @@ -4,6 +4,7 @@
     from rpython.rlib.objectmodel import compute_unique_id, specialize
     from rpython.rlib.rarithmetic import r_int64, is_valid_int
     from rpython.rlib.rarithmetic import LONG_BIT, intmask, r_uint
    +from rpython.rlib.jit import Counters
     
     from rpython.conftest import option
     
    @@ -12,6 +13,7 @@
         opclasses
     from rpython.jit.codewriter import heaptracker, longlong
     import weakref
    +from rpython.jit.metainterp import jitexc
     
     # ____________________________________________________________
     
    @@ -25,6 +27,15 @@
     
     FAILARGS_LIMIT = 1000
     
    +class SwitchToBlackhole(jitexc.JitException):
    +    def __init__(self, reason, raising_exception=False):
    +        self.reason = reason
    +        self.raising_exception = raising_exception
    +        # ^^^ must be set to True if the SwitchToBlackhole is raised at a
    +        #     point where the exception on metainterp.last_exc_value
    +        #     is supposed to be raised.  The default False means that it
    +        #     should just be copied into the blackhole interp, but not raised.
    +
     def getkind(TYPE, supports_floats=True,
                       supports_longlong=True,
                       supports_singlefloats=True):
    @@ -712,12 +723,23 @@
                 assert lltype.typeOf(value) == llmemory.GCREF
                 op.setref_base(value)
     
    +    def _record_op(self, opnum, argboxes, descr=None):
    +        from rpython.jit.metainterp.opencoder import FrontendTagOverflow
    +
    +        try:
    +            return self.trace.record_op(opnum, argboxes, descr)
    +        except FrontendTagOverflow:
    +            # note that with the default settings this one should not
    +            # happen - however if we hit that case, we don't get
    +            # anything disabled
    +            raise SwitchToBlackhole(Counters.ABORT_TOO_LONG)
    +
         @specialize.argtype(3)
         def record(self, opnum, argboxes, value, descr=None):
             if self.trace is None:
                 pos = 2**14 - 1
             else:
    -            pos = self.trace.record_op(opnum, argboxes, descr)
    +            pos = self._record_op(opnum, argboxes, descr)
             if value is None:
                 op = FrontendOp(pos)
             elif isinstance(value, bool):
    @@ -735,7 +757,7 @@
     
         def record_nospec(self, opnum, argboxes, descr=None):
             tp = opclasses[opnum].type
    -        pos = self.trace.record_op(opnum, argboxes, descr)
    +        pos = self._record_op(opnum, argboxes, descr)
             if tp == 'v':
                 return FrontendOp(pos)
             elif tp == 'i':
    diff --git a/rpython/jit/metainterp/opencoder.py b/rpython/jit/metainterp/opencoder.py
    --- a/rpython/jit/metainterp/opencoder.py
    +++ b/rpython/jit/metainterp/opencoder.py
    @@ -19,21 +19,32 @@
     TAGMASK = 0x3
     TAGSHIFT = 2
     
    -STORAGE_TP = rffi.USHORT
    -MAX_SIZE = 2**16-1
    +class Model:
    +    STORAGE_TP = rffi.USHORT
    +    # this is the initial size of the trace - note that we probably
    +    # want something that would fit the inital "max_trace_length"
    +    INIT_SIZE = 30000
    +    MIN_SHORT = 0
    +    MAX_SHORT = 2**16 - 1
    +    check_range = True
    +
    +class BigModel:
    +    INIT_SIZE = 30000
    +    STORAGE_TP = lltype.Signed
    +    check_range = False
    +    # we can move SMALL ints here, if necessary
    +
    + at specialize.memo()
    +def get_model(self):
    +    return getattr(self.metainterp_sd, 'opencoder_model', Model)
    +
     SMALL_INT_STOP  = (2 ** (15 - TAGSHIFT)) - 1
     SMALL_INT_START = -SMALL_INT_STOP # we might want to distribute them uneven
    -MIN_SHORT = 0
    -MAX_SHORT = 2**16 - 1
     
     def expand_sizes_to_signed():
         """ This function will make sure we can use sizes all the
         way up to lltype.Signed for indexes everywhere
         """
    -    globals()['STORAGE_TP'] = lltype.Signed
    -    globals()['MAX_SIZE'] = 2**31-1
    -    globals()['MIN_SHORT'] = -2**31
    -    globals()['MAX_SHORT'] = 2**31 - 1
     
     class FrontendTagOverflow(Exception):
         pass
    @@ -252,7 +263,8 @@
         _deadranges = (-1, None)
     
         def __init__(self, inputargs, metainterp_sd):
    -        self._ops = [rffi.cast(STORAGE_TP, 0)] * MAX_SIZE
    +        self.metainterp_sd = metainterp_sd
    +        self._ops = [rffi.cast(get_model(self).STORAGE_TP, 0)] * get_model(self).INIT_SIZE
             self._pos = 0
             self._consts_bigint = 0
             self._consts_float = 0
    @@ -273,15 +285,16 @@
             self._start = len(inputargs)
             self._pos = self._start
             self.inputargs = inputargs
    -        self.metainterp_sd = metainterp_sd
     
         def append(self, v):
    +        model = get_model(self)
             if self._pos >= len(self._ops):
                 # grow by 2X
    -            self._ops = self._ops + [rffi.cast(STORAGE_TP, 0)] * len(self._ops)
    -        if not MIN_SHORT <= v <= MAX_SHORT:
    -            raise FrontendTagOverflow
    -        self._ops[self._pos] = rffi.cast(STORAGE_TP, v)
    +            self._ops = self._ops + [rffi.cast(model.STORAGE_TP, 0)] * len(self._ops)
    +        if model.check_range:
    +            if not model.MIN_SHORT <= v <= model.MAX_SHORT:
    +                raise FrontendTagOverflow
    +        self._ops[self._pos] = rffi.cast(model.STORAGE_TP, v)
             self._pos += 1
     
         def done(self):
    @@ -387,16 +400,16 @@
             return len(self._descrs) - 1 + len(self.metainterp_sd.all_descrs) + 1
     
         def _list_of_boxes(self, boxes):
    -        array = [rffi.cast(STORAGE_TP, 0)] * len(boxes)
    +        array = [rffi.cast(get_model(self).STORAGE_TP, 0)] * len(boxes)
             for i in range(len(boxes)):
                 array[i] = self._encode_cast(boxes[i])
             return array
     
         def new_array(self, lgt):
    -        return [rffi.cast(STORAGE_TP, 0)] * lgt
    +        return [rffi.cast(get_model(self).STORAGE_TP, 0)] * lgt
     
         def _encode_cast(self, i):
    -        return rffi.cast(STORAGE_TP, self._encode(i))
    +        return rffi.cast(get_model(self).STORAGE_TP, self._encode(i))
     
         def create_top_snapshot(self, jitcode, pc, frame, flag, vable_boxes, vref_boxes):
             self._total_snapshots += 1
    @@ -408,7 +421,7 @@
             assert rffi.cast(lltype.Signed, self._ops[self._pos - 1]) == 0
             # guards have no descr
             self._snapshots.append(s)
    -        self._ops[self._pos - 1] = rffi.cast(STORAGE_TP, len(self._snapshots) - 1)
    +        self._ops[self._pos - 1] = rffi.cast(get_model(self).STORAGE_TP, len(self._snapshots) - 1)
             return s
     
         def create_empty_top_snapshot(self, vable_boxes, vref_boxes):
    @@ -420,7 +433,7 @@
             assert rffi.cast(lltype.Signed, self._ops[self._pos - 1]) == 0
             # guards have no descr
             self._snapshots.append(s)
    -        self._ops[self._pos - 1] = rffi.cast(STORAGE_TP, len(self._snapshots) - 1)
    +        self._ops[self._pos - 1] = rffi.cast(get_model(self).STORAGE_TP, len(self._snapshots) - 1)
             return s
     
         def create_snapshot(self, jitcode, pc, frame, flag):
    @@ -474,8 +487,6 @@
             return iter.inputargs, ops
     
     def tag(kind, pos):
    -    #if not SMALL_INT_START <= pos < SMALL_INT_STOP:
    -    #    raise some error
         return (pos << TAGSHIFT) | kind
     
     @specialize.ll()
    diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py
    --- a/rpython/jit/metainterp/pyjitpl.py
    +++ b/rpython/jit/metainterp/pyjitpl.py
    @@ -8,7 +8,7 @@
     from rpython.jit.metainterp import history, compile, resume, executor, jitexc
     from rpython.jit.metainterp.heapcache import HeapCache
     from rpython.jit.metainterp.history import (Const, ConstInt, ConstPtr,
    -    ConstFloat, TargetToken, MissingValue)
    +    ConstFloat, TargetToken, MissingValue, SwitchToBlackhole)
     from rpython.jit.metainterp.jitprof import EmptyProfiler
     from rpython.jit.metainterp.logger import Logger
     from rpython.jit.metainterp.optimizeopt.util import args_dict
    @@ -3187,15 +3187,6 @@
         """Raised after we mutated metainterp.framestack, in order to force
         it to reload the current top-of-stack frame that gets interpreted."""
     
    -class SwitchToBlackhole(jitexc.JitException):
    -    def __init__(self, reason, raising_exception=False):
    -        self.reason = reason
    -        self.raising_exception = raising_exception
    -        # ^^^ must be set to True if the SwitchToBlackhole is raised at a
    -        #     point where the exception on metainterp.last_exc_value
    -        #     is supposed to be raised.  The default False means that it
    -        #     should just be copied into the blackhole interp, but not raised.
    -
     NOT_HANDLED = history.CONST_FALSE
     
     # ____________________________________________________________
    diff --git a/rpython/jit/metainterp/warmspot.py b/rpython/jit/metainterp/warmspot.py
    --- a/rpython/jit/metainterp/warmspot.py
    +++ b/rpython/jit/metainterp/warmspot.py
    @@ -239,7 +239,8 @@
             elif self.opt.listops:
                 self.prejit_optimizations_minimal_inline(policy, graphs)
     
    -        self.build_meta_interp(ProfilerClass)
    +        self.build_meta_interp(ProfilerClass,
    +                             translator.config.translation.jit_opencoder_model)
             self.make_args_specifications()
             #
             from rpython.jit.metainterp.virtualref import VirtualRefInfo
    @@ -478,11 +479,16 @@
                 cpu.supports_singlefloats = False
             self.cpu = cpu
     
    -    def build_meta_interp(self, ProfilerClass):
    +    def build_meta_interp(self, ProfilerClass, opencoder_model):
    +        from rpython.jit.metainterp.opencoder import Model, BigModel
             self.metainterp_sd = MetaInterpStaticData(self.cpu,
                                                       self.opt,
                                                       ProfilerClass=ProfilerClass,
                                                       warmrunnerdesc=self)
    +        if opencoder_model == 'big':
    +            self.metainterp_sd.opencoder_model = BigModel
    +        else:
    +            self.metainterp_sd.opencoder_model = Model            
             self.stats.metainterp_sd = self.metainterp_sd
     
         def make_virtualizable_infos(self):
    
    From pypy.commits at gmail.com  Fri Mar 25 10:17:45 2016
    From: pypy.commits at gmail.com (fijal)
    Date: Fri, 25 Mar 2016 07:17:45 -0700 (PDT)
    Subject: [pypy-commit] pypy jit-leaner-frontend: be a bit more specific
     about problems
    Message-ID: <56f54889.e213c20a.f93e0.417c@mx.google.com>
    
    Author: fijal
    Branch: jit-leaner-frontend
    Changeset: r83361:dc43144bc017
    Date: 2016-03-25 16:16 +0200
    http://bitbucket.org/pypy/pypy/changeset/dc43144bc017/
    
    Log:	be a bit more specific about problems
    
    diff --git a/rpython/rlib/jit.py b/rpython/rlib/jit.py
    --- a/rpython/rlib/jit.py
    +++ b/rpython/rlib/jit.py
    @@ -782,6 +782,12 @@
         """Reset one of the tunable JIT parameters to its default value."""
         _set_param(driver, name, None)
     
    +class TraceLimitTooHigh(Exception):
    +    """ This is raised when the trace limit is too high for the chosen
    +    opencoder model, recompile your interpreter with 'big' as
    +    jit_opencoder_model
    +    """
    +
     def set_user_param(driver, text):
         """Set the tunable JIT parameters from a user-supplied string
         following the format 'param=value,param=value', or 'off' to
    @@ -809,6 +815,8 @@
                 for name1, _ in unroll_parameters:
                     if name1 == name and name1 != 'enable_opts':
                         try:
    +                        if name1 == 'trace_limit' and int(value) > 2**14:
    +                            raise TraceLimitTooHigh
                             set_param(driver, name1, int(value))
                         except ValueError:
                             raise
    
    From pypy.commits at gmail.com  Fri Mar 25 10:22:37 2016
    From: pypy.commits at gmail.com (fijal)
    Date: Fri, 25 Mar 2016 07:22:37 -0700 (PDT)
    Subject: [pypy-commit] pypy jit-leaner-frontend: reenable jit hooks
    Message-ID: <56f549ad.a151c20a.7046d.3dc9@mx.google.com>
    
    Author: fijal
    Branch: jit-leaner-frontend
    Changeset: r83362:2e8ef5f3792a
    Date: 2016-03-25 16:21 +0200
    http://bitbucket.org/pypy/pypy/changeset/2e8ef5f3792a/
    
    Log:	reenable jit hooks
    
    diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py
    --- a/pypy/goal/targetpypystandalone.py
    +++ b/pypy/goal/targetpypystandalone.py
    @@ -336,7 +336,7 @@
         def jitpolicy(self, driver):
             from pypy.module.pypyjit.policy import PyPyJitPolicy
             from pypy.module.pypyjit.hooks import pypy_hooks
    -        return PyPyJitPolicy() #pypy_hooks)
    +        return PyPyJitPolicy(pypy_hooks)
     
         def get_entry_point(self, config):
             from pypy.tool.lib_pypy import import_from_lib_pypy
    diff --git a/pypy/module/pypyjit/__init__.py b/pypy/module/pypyjit/__init__.py
    --- a/pypy/module/pypyjit/__init__.py
    +++ b/pypy/module/pypyjit/__init__.py
    @@ -12,9 +12,9 @@
             'dont_trace_here': 'interp_jit.dont_trace_here',
             'trace_next_iteration': 'interp_jit.trace_next_iteration',
             'trace_next_iteration_hash': 'interp_jit.trace_next_iteration_hash',
    -        #'set_compile_hook': 'interp_resop.set_compile_hook',
    -        #'set_abort_hook': 'interp_resop.set_abort_hook',
    -        #'set_trace_too_long_hook': 'interp_resop.set_trace_too_long_hook',
    +        'set_compile_hook': 'interp_resop.set_compile_hook',
    +        'set_abort_hook': 'interp_resop.set_abort_hook',
    +        'set_trace_too_long_hook': 'interp_resop.set_trace_too_long_hook',
             'get_stats_snapshot': 'interp_resop.get_stats_snapshot',
             'get_stats_asmmemmgr': 'interp_resop.get_stats_asmmemmgr',
             # those things are disabled because they have bugs, but if
    @@ -23,10 +23,10 @@
             # correct loop_runs if PYPYLOG is correct
             #'enable_debug': 'interp_resop.enable_debug',
             #'disable_debug': 'interp_resop.disable_debug',
    -        #'ResOperation': 'interp_resop.WrappedOp',
    -        #'GuardOp': 'interp_resop.GuardOp',
    -        #'DebugMergePoint': 'interp_resop.DebugMergePoint',
    -        #'JitLoopInfo': 'interp_resop.W_JitLoopInfo',
    +        'ResOperation': 'interp_resop.WrappedOp',
    +        'GuardOp': 'interp_resop.GuardOp',
    +        'DebugMergePoint': 'interp_resop.DebugMergePoint',
    +        'JitLoopInfo': 'interp_resop.W_JitLoopInfo',
             'PARAMETER_DOCS': 'space.wrap(rpython.rlib.jit.PARAMETER_DOCS)',
         }
     
    diff --git a/pypy/module/pypyjit/interp_resop.py b/pypy/module/pypyjit/interp_resop.py
    --- a/pypy/module/pypyjit/interp_resop.py
    +++ b/pypy/module/pypyjit/interp_resop.py
    @@ -249,7 +249,7 @@
                     ofs = debug_info.asminfo.ops_offset
                 else:
                     ofs = {}
    -            _, ops = debug_info.trace.unpack()
    +            ops = debug_info.operations
                 self.w_ops = space.newlist(wrap_oplist(space, logops, ops, ofs))
             else:
                 self.w_ops = space.w_None
    
    From pypy.commits at gmail.com  Fri Mar 25 11:51:14 2016
    From: pypy.commits at gmail.com (fijal)
    Date: Fri, 25 Mar 2016 08:51:14 -0700 (PDT)
    Subject: [pypy-commit] pypy remove-frame-forcing-in-executioncontext: remove
     forcing of vref to see what happens
    Message-ID: <56f55e72.6614c20a.5dc36.55ca@mx.google.com>
    
    Author: fijal
    Branch: remove-frame-forcing-in-executioncontext
    Changeset: r83363:811209af057f
    Date: 2016-03-25 17:50 +0200
    http://bitbucket.org/pypy/pypy/changeset/811209af057f/
    
    Log:	remove forcing of vref to see what happens
    
    diff --git a/pypy/interpreter/error.py b/pypy/interpreter/error.py
    --- a/pypy/interpreter/error.py
    +++ b/pypy/interpreter/error.py
    @@ -278,17 +278,9 @@
     
         def get_traceback(self):
             """Calling this marks the PyTraceback as escaped, i.e. it becomes
    -        accessible and inspectable by app-level Python code.  For the JIT.
    -        Note that this has no effect if there are already several traceback
    -        frames recorded, because in this case they are already marked as
    -        escaping by executioncontext.leave() being called with
    -        got_exception=True.
    +        accessible and inspectable by app-level Python code.
             """
    -        from pypy.interpreter.pytraceback import PyTraceback
    -        tb = self._application_traceback
    -        if tb is not None and isinstance(tb, PyTraceback):
    -            tb.frame.mark_as_escaped()
    -        return tb
    +        return self._application_traceback
     
         def set_traceback(self, traceback):
             """Set the current traceback.  It should either be a traceback
    diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py
    --- a/pypy/interpreter/executioncontext.py
    +++ b/pypy/interpreter/executioncontext.py
    @@ -74,15 +74,6 @@
             finally:
                 frame_vref = self.topframeref
                 self.topframeref = frame.f_backref
    -            if frame.escaped or got_exception:
    -                # if this frame escaped to applevel, we must ensure that also
    -                # f_back does
    -                f_back = frame.f_backref()
    -                if f_back:
    -                    f_back.mark_as_escaped()
    -                # force the frame (from the JIT point of view), so that it can
    -                # be accessed also later
    -                frame_vref()
                 jit.virtual_ref_finish(frame_vref, frame)
     
         # ________________________________________________________________
    diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py
    --- a/pypy/interpreter/pyframe.py
    +++ b/pypy/interpreter/pyframe.py
    @@ -65,7 +65,6 @@
         last_exception           = None
         f_backref                = jit.vref_None
         
    -    escaped                  = False  # see mark_as_escaped()
         debugdata                = None
     
         pycode = None # code object executed by that frame
    @@ -152,15 +151,6 @@
             assert isinstance(cell, Cell)
             return cell
     
    -    def mark_as_escaped(self):
    -        """
    -        Must be called on frames that are exposed to applevel, e.g. by
    -        sys._getframe().  This ensures that the virtualref holding the frame
    -        is properly forced by ec.leave(), and thus the frame will be still
    -        accessible even after the corresponding C stack died.
    -        """
    -        self.escaped = True
    -
         def append_block(self, block):
             assert block.previous is self.lastblock
             self.lastblock = block
    diff --git a/pypy/module/sys/vm.py b/pypy/module/sys/vm.py
    --- a/pypy/module/sys/vm.py
    +++ b/pypy/module/sys/vm.py
    @@ -37,7 +37,6 @@
                 raise OperationError(space.w_ValueError,
                                      space.wrap("call stack is not deep enough"))
             if depth == 0:
    -            f.mark_as_escaped()
                 return space.wrap(f)
             depth -= 1
             f = ec.getnextframe_nohidden(f)
    
    From pypy.commits at gmail.com  Fri Mar 25 14:54:59 2016
    From: pypy.commits at gmail.com (fijal)
    Date: Fri, 25 Mar 2016 11:54:59 -0700 (PDT)
    Subject: [pypy-commit] pypy default: (fijal, arigo) Merge jit-leaner-frontend
    Message-ID: <56f58983.8b941c0a.23da3.0686@mx.google.com>
    
    Author: fijal
    Branch: 
    Changeset: r83364:913dd4cf97ff
    Date: 2016-03-25 20:54 +0200
    http://bitbucket.org/pypy/pypy/changeset/913dd4cf97ff/
    
    Log:	(fijal, arigo) Merge jit-leaner-frontend
    
    	This branch separates resoperations used in optimizer from the ones
    	used in the frontend, which now uses a more compact way to store
    	traces. Additionally the heapcache has been reworked to the new
    	model. The net effects are ~20% improvements in the speed of
    	tracing. There is potential for more work which would avoid
    	allocating ResOperations that are not emitted completely
    
    diff too long, truncating to 2000 out of 8331 lines
    
    diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py
    --- a/pypy/config/pypyoption.py
    +++ b/pypy/config/pypyoption.py
    @@ -46,7 +46,6 @@
     except detect_cpu.ProcessorAutodetectError:
         pass
     
    -
     translation_modules = default_modules.copy()
     translation_modules.update([
         "fcntl", "time", "select", "signal", "_rawffi", "zlib", "struct", "_md5",
    diff --git a/pypy/tool/gdb_pypy.py b/pypy/tool/gdb_pypy.py
    --- a/pypy/tool/gdb_pypy.py
    +++ b/pypy/tool/gdb_pypy.py
    @@ -288,9 +288,11 @@
                 RPyListPrinter.recursive = True
                 try:
                     itemlist = []
    -                for i in range(length):
    +                for i in range(min(length, MAX_DISPLAY_LENGTH)):
                         item = items[i]
                         itemlist.append(str(item))    # may recurse here
    +                if length > MAX_DISPLAY_LENGTH:
    +                    itemlist.append("...")
                     str_items = ', '.join(itemlist)
                 finally:
                     RPyListPrinter.recursive = False
    diff --git a/rpython/config/translationoption.py b/rpython/config/translationoption.py
    --- a/rpython/config/translationoption.py
    +++ b/rpython/config/translationoption.py
    @@ -126,6 +126,9 @@
         ChoiceOption("jit_profiler", "integrate profiler support into the JIT",
                      ["off", "oprofile"],
                      default="off"),
    +    ChoiceOption("jit_opencoder_model", "the model limits the maximal length"
    +                 " of traces. Use big if you want to go bigger than "
    +                 "the default", ["big", "normal"], default="normal"),
         BoolOption("check_str_without_nul",
                    "Forbid NUL chars in strings in some external function calls",
                    default=False, cmdline=None),
    diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py
    --- a/rpython/jit/backend/llgraph/runner.py
    +++ b/rpython/jit/backend/llgraph/runner.py
    @@ -455,7 +455,7 @@
                     if box is not frame.current_op:
                         value = frame.env[box]
                     else:
    -                    value = box.getvalue()    # 0 or 0.0 or NULL
    +                    value = 0 # box.getvalue()    # 0 or 0.0 or NULL
                 else:
                     value = None
                 values.append(value)
    @@ -472,6 +472,13 @@
     
         # ------------------------------------------------------------
     
    +    def setup_descrs(self):
    +        all_descrs = []
    +        for k, v in self.descrs.iteritems():
    +            v.descr_index = len(all_descrs)
    +            all_descrs.append(v)
    +        return all_descrs
    +
         def calldescrof(self, FUNC, ARGS, RESULT, effect_info):
             key = ('call', getkind(RESULT),
                    tuple([getkind(A) for A in ARGS]),
    diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py
    --- a/rpython/jit/backend/llsupport/assembler.py
    +++ b/rpython/jit/backend/llsupport/assembler.py
    @@ -331,7 +331,7 @@
             counter = self._register_counter(tp, number, token)
             c_adr = ConstInt(rffi.cast(lltype.Signed, counter))
             operations.append(
    -            ResOperation(rop.INCREMENT_DEBUG_COUNTER, [c_adr], None))
    +            ResOperation(rop.INCREMENT_DEBUG_COUNTER, [c_adr]))
     
         def _register_counter(self, tp, number, token):
             # YYY very minor leak -- we need the counters to stay alive
    diff --git a/rpython/jit/backend/llsupport/descr.py b/rpython/jit/backend/llsupport/descr.py
    --- a/rpython/jit/backend/llsupport/descr.py
    +++ b/rpython/jit/backend/llsupport/descr.py
    @@ -21,6 +21,30 @@
             self._cache_call = {}
             self._cache_interiorfield = {}
     
    +    def setup_descrs(self):
    +        all_descrs = []
    +        for k, v in self._cache_size.iteritems():
    +            v.descr_index = len(all_descrs)
    +            all_descrs.append(v)
    +        for k, v in self._cache_field.iteritems():
    +            for k1, v1 in v.iteritems():
    +                v1.descr_index = len(all_descrs)
    +                all_descrs.append(v1)
    +        for k, v in self._cache_array.iteritems():
    +            v.descr_index = len(all_descrs)
    +            all_descrs.append(v)
    +        for k, v in self._cache_arraylen.iteritems():
    +            v.descr_index = len(all_descrs)
    +            all_descrs.append(v)
    +        for k, v in self._cache_call.iteritems():
    +            v.descr_index = len(all_descrs)
    +            all_descrs.append(v)
    +        for k, v in self._cache_interiorfield.iteritems():
    +            v.descr_index = len(all_descrs)
    +            all_descrs.append(v)
    +        assert len(all_descrs) < 2**15
    +        return all_descrs
    +
         def init_size_descr(self, STRUCT, sizedescr):
             pass
     
    diff --git a/rpython/jit/backend/llsupport/llmodel.py b/rpython/jit/backend/llsupport/llmodel.py
    --- a/rpython/jit/backend/llsupport/llmodel.py
    +++ b/rpython/jit/backend/llsupport/llmodel.py
    @@ -316,6 +316,9 @@
                 return ll_frame
             return execute_token
     
    +    def setup_descrs(self):
    +        return self.gc_ll_descr.setup_descrs()
    +
         # ------------------- helpers and descriptions --------------------
     
         @staticmethod
    diff --git a/rpython/jit/backend/llsupport/regalloc.py b/rpython/jit/backend/llsupport/regalloc.py
    --- a/rpython/jit/backend/llsupport/regalloc.py
    +++ b/rpython/jit/backend/llsupport/regalloc.py
    @@ -683,7 +683,7 @@
         for i in range(len(operations)-1, -1, -1):
             op = operations[i]
             if op.type != 'v':
    -            if op not in last_used and op.has_no_side_effect():
    +            if op not in last_used and rop.has_no_side_effect(op.opnum):
                     continue
             opnum = op.getopnum()
             for j in range(op.numargs()):
    @@ -695,7 +695,7 @@
                 if opnum != rop.JUMP and opnum != rop.LABEL:
                     if arg not in last_real_usage:
                         last_real_usage[arg] = i
    -        if op.is_guard():
    +        if rop.is_guard(op.opnum):
                 for arg in op.getfailargs():
                     if arg is None: # hole
                         continue
    @@ -732,14 +732,7 @@
         return longevity, last_real_usage
     
     def is_comparison_or_ovf_op(opnum):
    -    from rpython.jit.metainterp.resoperation import opclasses
    -    cls = opclasses[opnum]
    -    # hack hack: in theory they are instance method, but they don't use
    -    # any instance field, we can use a fake object
    -    class Fake(cls):
    -        pass
    -    op = Fake()
    -    return op.is_comparison() or op.is_ovf()
    +    return rop.is_comparison(opnum) or rop.is_ovf(opnum)
     
     def valid_addressing_size(size):
         return size == 1 or size == 2 or size == 4 or size == 8
    diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py
    --- a/rpython/jit/backend/llsupport/rewrite.py
    +++ b/rpython/jit/backend/llsupport/rewrite.py
    @@ -103,7 +103,7 @@
                         orig_op.set_forwarded(op)
                         replaced = True
                     op.setarg(i, arg)
    -        if op.is_guard():
    +        if rop.is_guard(op.opnum):
                 if not replaced:
                     op = op.copy_and_change(op.getopnum())
                     orig_op.set_forwarded(op)
    @@ -212,7 +212,7 @@
             #                self._emit_mul_if_factor_offset_not_supported(v_length, scale, 0)
             #        op.setarg(1, ConstInt(scale))
             #        op.setarg(2, v_length)
    -        if op.is_getarrayitem() or \
    +        if rop.is_getarrayitem(opnum) or \
                opnum in (rop.GETARRAYITEM_RAW_I,
                          rop.GETARRAYITEM_RAW_F):
                 self.handle_getarrayitem(op)
    @@ -324,13 +324,13 @@
                 if self.transform_to_gc_load(op):
                     continue
                 # ---------- turn NEWxxx into CALL_MALLOC_xxx ----------
    -            if op.is_malloc():
    +            if rop.is_malloc(op.opnum):
                     self.handle_malloc_operation(op)
                     continue
    -            if (op.is_guard() or
    +            if (rop.is_guard(op.opnum) or
                         self.could_merge_with_next_guard(op, i, operations)):
                     self.emit_pending_zeros()
    -            elif op.can_malloc():
    +            elif rop.can_malloc(op.opnum):
                     self.emitting_an_operation_that_can_collect()
                 elif op.getopnum() == rop.LABEL:
                     self.emitting_an_operation_that_can_collect()
    @@ -370,8 +370,8 @@
             # return True in cases where the operation and the following guard
             # should likely remain together.  Simplified version of
             # can_merge_with_next_guard() in llsupport/regalloc.py.
    -        if not op.is_comparison():
    -            return op.is_ovf()    # int_xxx_ovf() / guard_no_overflow()
    +        if not rop.is_comparison(op.opnum):
    +            return rop.is_ovf(op.opnum)    # int_xxx_ovf() / guard_no_overflow()
             if i + 1 >= len(operations):
                 return False
             next_op = operations[i + 1]
    @@ -400,7 +400,6 @@
             # it's hard to test all cases).  Rewrite it away.
             value = int(opnum == rop.GUARD_FALSE)
             op1 = ResOperation(rop.SAME_AS_I, [ConstInt(value)])
    -        op1.setint(value)
             self.emit_op(op1)
             lst = op.getfailargs()[:]
             lst[i] = op1
    @@ -633,8 +632,7 @@
                 args = [frame, arglist[jd.index_of_virtualizable]]
             else:
                 args = [frame]
    -        call_asm = ResOperation(op.getopnum(), args,
    -                                op.getdescr())
    +        call_asm = ResOperation(op.getopnum(), args, descr=op.getdescr())
             self.replace_op_with(self.get_box_replacement(op), call_asm)
             self.emit_op(call_asm)
     
    @@ -708,7 +706,7 @@
         def _gen_call_malloc_gc(self, args, v_result, descr):
             """Generate a CALL_MALLOC_GC with the given args."""
             self.emitting_an_operation_that_can_collect()
    -        op = ResOperation(rop.CALL_MALLOC_GC, args, descr)
    +        op = ResOperation(rop.CALL_MALLOC_GC, args, descr=descr)
             self.replace_op_with(v_result, op)
             self.emit_op(op)
             # In general, don't add v_result to write_barrier_applied:
    diff --git a/rpython/jit/backend/test/test_ll_random.py b/rpython/jit/backend/test/test_ll_random.py
    --- a/rpython/jit/backend/test/test_ll_random.py
    +++ b/rpython/jit/backend/test/test_ll_random.py
    @@ -2,6 +2,7 @@
     from rpython.rtyper.lltypesystem import lltype, llmemory, rffi, rstr
     from rpython.rtyper import rclass
     from rpython.jit.backend.test import test_random
    +from rpython.jit.backend.test.test_random import getint, getref_base, getref
     from rpython.jit.metainterp.resoperation import ResOperation, rop, optypes
     from rpython.jit.metainterp.history import ConstInt, ConstPtr, getkind
     from rpython.jit.codewriter import heaptracker
    @@ -169,7 +170,7 @@
             if length == 0:
                 raise test_random.CannotProduceOperation
             v_index = r.choice(self.intvars)
    -        if not (0 <= v_index.getint() < length):
    +        if not (0 <= getint(v_index) < length):
                 v_index = ConstInt(r.random_integer() % length)
             return v_index
     
    @@ -311,7 +312,7 @@
         def field_descr(self, builder, r):
             v, A = builder.get_structptr_var(r, type=lltype.Array,
                                              array_of_structs=True)
    -        array = v.getref(lltype.Ptr(A))
    +        array = getref(lltype.Ptr(A), v)
             v_index = builder.get_index(len(array), r)
             choice = []
             for name in A.OF._names:
    @@ -344,7 +345,7 @@
                     w = ConstInt(r.random_integer())
                 else:
                     w = r.choice(builder.intvars)
    -            value = w.getint()
    +            value = getint(w)
                 if rffi.cast(lltype.Signed, rffi.cast(TYPE, value)) == value:
                     break
             builder.do(self.opnum, [v, w], descr)
    @@ -357,7 +358,7 @@
                     w = ConstInt(r.random_integer())
                 else:
                     w = r.choice(builder.intvars)
    -            value = w.getint()
    +            value = getint(w)
                 if rffi.cast(lltype.Signed, rffi.cast(TYPE, value)) == value:
                     break
             builder.do(self.opnum, [v, v_index, w], descr)
    @@ -389,7 +390,7 @@
     class GetArrayItemOperation(ArrayOperation):
         def field_descr(self, builder, r):
             v, A = builder.get_arrayptr_var(r)
    -        array = v.getref(lltype.Ptr(A))
    +        array = getref(lltype.Ptr(A), v)
             v_index = builder.get_index(len(array), r)
             descr = self.array_descr(builder, A)
             return v, A, v_index, descr
    @@ -411,7 +412,7 @@
                     w = ConstInt(r.random_integer())
                 else:
                     w = r.choice(builder.intvars)
    -            value = w.getint()
    +            value = getint(w)
                 if rffi.cast(lltype.Signed, rffi.cast(A.OF, value)) == value:
                     break
             builder.do(self.opnum, [v, v_index, w], descr)
    @@ -455,7 +456,7 @@
             v_ptr = builder.do(self.opnum, [v_length])
             getattr(builder, self.builder_cache).append(v_ptr)
             # Initialize the string. Is there a better way to do this?
    -        for i in range(v_length.getint()):
    +        for i in range(getint(v_length)):
                 v_index = ConstInt(i)
                 v_char = ConstInt(r.random_integer() % self.max)
                 builder.do(self.set_char, [v_ptr, v_index, v_char])
    @@ -471,9 +472,9 @@
             current = getattr(builder, self.builder_cache)
             if current and r.random() < .8:
                 v_string = r.choice(current)
    -            string = v_string.getref(self.ptr)
    +            string = getref(self.ptr, v_string)
             else:
    -            string = self.alloc(builder.get_index(500, r).getint())
    +            string = self.alloc(getint(builder.get_index(500, r)))
                 v_string = ConstPtr(lltype.cast_opaque_ptr(llmemory.GCREF, string))
                 current.append(v_string)
             for i in range(len(string.chars)):
    @@ -484,7 +485,7 @@
     class AbstractGetItemOperation(AbstractStringOperation):
         def produce_into(self, builder, r):
             v_string = self.get_string(builder, r)
    -        v_index = builder.get_index(len(v_string.getref(self.ptr).chars), r)
    +        v_index = builder.get_index(len(getref(self.ptr, v_string).chars), r)
             builder.do(self.opnum, [v_string, v_index])
     
     class AbstractSetItemOperation(AbstractStringOperation):
    @@ -492,7 +493,7 @@
             v_string = self.get_string(builder, r)
             if isinstance(v_string, ConstPtr):
                 raise test_random.CannotProduceOperation  # setitem(Const, ...)
    -        v_index = builder.get_index(len(v_string.getref(self.ptr).chars), r)
    +        v_index = builder.get_index(len(getref(self.ptr, v_string).chars), r)
             v_target = ConstInt(r.random_integer() % self.max)
             builder.do(self.opnum, [v_string, v_index, v_target])
     
    @@ -505,15 +506,15 @@
         def produce_into(self, builder, r):
             v_srcstring = self.get_string(builder, r)
             v_dststring = self.get_string(builder, r)
    -        src = v_srcstring.getref(self.ptr)
    -        dst = v_dststring.getref(self.ptr)
    +        src = getref(self.ptr, v_srcstring)
    +        dst = getref(self.ptr, v_dststring)
             if src == dst:                                # because it's not a
                 raise test_random.CannotProduceOperation  # memmove(), but memcpy()
             srclen = len(src.chars)
             dstlen = len(dst.chars)
             v_length = builder.get_index(min(srclen, dstlen), r)
    -        v_srcstart = builder.get_index(srclen - v_length.getint() + 1, r)
    -        v_dststart = builder.get_index(dstlen - v_length.getint() + 1, r)
    +        v_srcstart = builder.get_index(srclen - getint(v_length) + 1, r)
    +        v_dststart = builder.get_index(dstlen - getint(v_length) + 1, r)
             builder.do(self.opnum, [v_srcstring, v_dststring,
                                     v_srcstart, v_dststart, v_length])
     
    @@ -585,7 +586,7 @@
             """ % funcargs).compile()
             vtableptr = v._hints['vtable']._as_ptr()
             d = {
    -            'ptr': S.getref_base(),
    +            'ptr': getref_base(S),
                 'vtable' : vtableptr,
                 'LLException' : LLException,
                 }
    diff --git a/rpython/jit/backend/test/test_random.py b/rpython/jit/backend/test/test_random.py
    --- a/rpython/jit/backend/test/test_random.py
    +++ b/rpython/jit/backend/test/test_random.py
    @@ -11,11 +11,9 @@
     from rpython.jit.metainterp.executor import _execute_arglist, wrap_constant
     from rpython.jit.metainterp.resoperation import opname
     from rpython.jit.codewriter import longlong
    -from rpython.rtyper.lltypesystem import lltype, rstr
    +from rpython.rtyper.lltypesystem import lltype, llmemory, rstr
     from rpython.rtyper import rclass
     
    -class PleaseRewriteMe(Exception):
    -    pass
     
     class DummyLoop(object):
         def __init__(self, subops):
    @@ -27,6 +25,41 @@
         def execute_raised(self, exc, constant=False):
             self._got_exc = exc
     
    +
    +def getint(v):
    +    if isinstance(v, (ConstInt, InputArgInt)):
    +        return v.getint()
    +    else:
    +        return v._example_int
    +
    +def getfloatstorage(v):
    +    if isinstance(v, (ConstFloat, InputArgFloat)):
    +        return v.getfloatstorage()
    +    else:
    +        return v._example_float
    +
    +def getfloat(v):
    +    return longlong.getrealfloat(getfloatstorage(v))
    +
    +def getref_base(v):
    +    if isinstance(v, (ConstPtr, InputArgRef)):
    +        return v.getref_base()
    +    else:
    +        return v._example_ref
    +
    +def getref(PTR, v):
    +    return lltype.cast_opaque_ptr(PTR, getref_base(v))
    +
    +def constbox(v):
    +    if v.type == INT:
    +        return ConstInt(getint(v))
    +    if v.type == FLOAT:
    +        return ConstFloat(getfloatstorage(v))
    +    if v.type == REF:
    +        return ConstPtr(getref_base(v))
    +    assert 0, v.type
    +
    +
     class OperationBuilder(object):
         def __init__(self, cpu, loop, vars):
             self.cpu = cpu
    @@ -57,11 +90,21 @@
         def do(self, opnum, argboxes, descr=None):
             self.fakemetainterp._got_exc = None
             op = ResOperation(opnum, argboxes, descr)
    +        argboxes = map(constbox, argboxes)
             result = _execute_arglist(self.cpu, self.fakemetainterp,
                                       opnum, argboxes, descr)
             if result is not None:
    -            c_result = wrap_constant(result)
    -            op.copy_value_from(c_result)
    +            if lltype.typeOf(result) == lltype.Signed:
    +                op._example_int = result
    +            elif isinstance(result, bool):
    +                op._example_int = int(result)
    +            elif lltype.typeOf(result) == longlong.FLOATSTORAGE:
    +                op._example_float = result
    +            elif isinstance(result, float):
    +                op._example_float = longlong.getfloatstorage(result)
    +            else:
    +                assert lltype.typeOf(result) == llmemory.GCREF
    +                op._example_ref = result
             self.loop.operations.append(op)
             return op
     
    @@ -101,7 +144,7 @@
                 if v in names:
                     args.append(names[v])
                 elif isinstance(v, ConstPtr):
    -                assert not v.getref_base() # otherwise should be in the names
    +                assert not getref_base(v) # otherwise should be in the names
                     args.append('ConstPtr(lltype.nullptr(llmemory.GCREF.TO))')
                 elif isinstance(v, ConstFloat):
                     args.append('ConstFloat(longlong.getfloatstorage(%r))'
    @@ -198,10 +241,10 @@
             #
             def writevar(v, nameprefix, init=''):
                 if nameprefix == 'const_ptr':
    -                if not v.getref_base():
    +                if not getref_base(v):
                         return 'lltype.nullptr(llmemory.GCREF.TO)'
    -                TYPE = v.getref_base()._obj.ORIGTYPE
    -                cont = lltype.cast_opaque_ptr(TYPE, v.getref_base())
    +                TYPE = getref_base(v)._obj.ORIGTYPE
    +                cont = lltype.cast_opaque_ptr(TYPE, getref_base(v))
                     if TYPE.TO._is_varsize():
                         if isinstance(TYPE.TO, lltype.GcStruct):
                             lgt = len(cont.chars)
    @@ -252,9 +295,9 @@
                 for i, v in enumerate(self.loop.inputargs):
                     assert not isinstance(v, Const)
                     if v.type == FLOAT:
    -                    vals.append("longlong.getfloatstorage(%r)" % v.getfloat())
    +                    vals.append("longlong.getfloatstorage(%r)" % getfloat(v))
                     else:
    -                    vals.append("%r" % v.getint())
    +                    vals.append("%r" % getint(v))
                 print >>s, '    loop_args = [%s]' % ", ".join(vals)
             print >>s, '    frame = cpu.execute_token(looptoken, *loop_args)'
             if self.should_fail_by is None:
    @@ -264,10 +307,10 @@
             for i, v in enumerate(fail_args):
                 if v.type == FLOAT:
                     print >>s, ('    assert longlong.getrealfloat('
    -                    'cpu.get_float_value(frame, %d)) == %r' % (i, v.getfloatstorage()))
    +                    'cpu.get_float_value(frame, %d)) == %r' % (i, getfloatstorage(v)))
                 else:
                     print >>s, ('    assert cpu.get_int_value(frame, %d) == %d'
    -                            % (i, v.getint()))
    +                            % (i, getint(v)))
             self.names = names
             s.flush()
     
    @@ -295,7 +338,7 @@
                     builder.intvars.append(v_result)
                     boolres = self.boolres
                     if boolres == 'sometimes':
    -                    boolres = v_result.getint() in [0, 1]
    +                    boolres = getint(v_result) in [0, 1]
                     if boolres:
                         builder.boolvars.append(v_result)
                 elif v_result.type == FLOAT:
    @@ -346,10 +389,10 @@
                 v_second = ConstInt((value & self.and_mask) | self.or_mask)
             else:
                 v = r.choice(builder.intvars)
    -            v_value = v.getint()
    +            v_value = getint(v)
                 if (v_value & self.and_mask) != v_value:
                     v = builder.do(rop.INT_AND, [v, ConstInt(self.and_mask)])
    -            v_value = v.getint()
    +            v_value = getint(v)
                 if (v_value | self.or_mask) != v_value:
                     v = builder.do(rop.INT_OR, [v, ConstInt(self.or_mask)])
                 v_second = v
    @@ -395,9 +438,9 @@
                 v_second = ConstFloat(r.random_float_storage())
             else:
                 v_second = r.choice(builder.floatvars)
    -        if abs(v_first.getfloat()) > 1E100 or abs(v_second.getfloat()) > 1E100:
    +        if abs(getfloat(v_first)) > 1E100 or abs(getfloat(v_second)) > 1E100:
                 raise CannotProduceOperation     # avoid infinities
    -        if abs(v_second.getfloat()) < 1E-100:
    +        if abs(getfloat(v_second)) < 1E-100:
                 raise CannotProduceOperation     # e.g. division by zero error
             self.put(builder, [v_first, v_second])
     
    @@ -432,7 +475,7 @@
             if not builder.floatvars:
                 raise CannotProduceOperation
             box = r.choice(builder.floatvars)
    -        if not (-sys.maxint-1 <= box.getfloat() <= sys.maxint):
    +        if not (-sys.maxint-1 <= getfloat(box) <= sys.maxint):
                 raise CannotProduceOperation      # would give an overflow
             self.put(builder, [box])
     
    @@ -440,8 +483,8 @@
         def gen_guard(self, builder, r):
             v = builder.get_bool_var(r)
             op = ResOperation(self.opnum, [v])
    -        passing = ((self.opnum == rop.GUARD_TRUE and v.getint()) or
    -                   (self.opnum == rop.GUARD_FALSE and not v.getint()))
    +        passing = ((self.opnum == rop.GUARD_TRUE and getint(v)) or
    +                   (self.opnum == rop.GUARD_FALSE and not getint(v)))
             return op, passing
     
         def produce_into(self, builder, r):
    @@ -459,8 +502,8 @@
                 raise CannotProduceOperation
             box = r.choice(builder.ptrvars)[0]
             op = ResOperation(self.opnum, [box])
    -        passing = ((self.opnum == rop.GUARD_NONNULL and box.getref_base()) or
    -                   (self.opnum == rop.GUARD_ISNULL and not box.getref_base()))
    +        passing = ((self.opnum == rop.GUARD_NONNULL and getref_base(box)) or
    +                   (self.opnum == rop.GUARD_ISNULL and not getref_base(box)))
             return op, passing
     
     class GuardValueOperation(GuardOperation):
    @@ -470,14 +513,14 @@
                 other = r.choice(builder.intvars)
             else:
                 if r.random() < 0.75:
    -                value = v.getint()
    +                value = getint(v)
                 elif r.random() < 0.5:
    -                value = v.getint() ^ 1
    +                value = getint(v) ^ 1
                 else:
                     value = r.random_integer()
                 other = ConstInt(value)
             op = ResOperation(self.opnum, [v, other])
    -        return op, (v.getint() == other.getint())
    +        return op, (getint(v) == getint(other))
     
     # ____________________________________________________________
     
    @@ -675,7 +718,7 @@
             assert not hasattr(loop, '_targettoken')
             for i in range(position):
                 op = loop.operations[i]
    -            if (not op.has_no_side_effect()
    +            if (not rop.has_no_side_effect(op.opnum)
                         or op.type not in (INT, FLOAT)):
                     position = i
                     break       # cannot move the LABEL later
    @@ -728,9 +771,9 @@
             self.expected = {}
             for v in endvars:
                 if v.type == INT:
    -                self.expected[v] = v.getint()
    +                self.expected[v] = getint(v)
                 elif v.type == FLOAT:
    -                self.expected[v] = v.getfloatstorage()
    +                self.expected[v] = getfloatstorage(v)
                 else:
                     assert 0, v.type
     
    @@ -742,7 +785,7 @@
                 args = []
                 for box in self.startvars:
                     if box not in self.loop.inputargs:
    -                    box = box.constbox()
    +                    box = constbox(box)
                     args.append(box)
                 self.cpu.compile_loop(self.loop.inputargs,
                                       [ResOperation(rop.JUMP, args,
    @@ -760,7 +803,7 @@
     
         def clear_state(self):
             for v, S, fields in self.prebuilt_ptr_consts:
    -            container = v.getref_base()._obj.container
    +            container = getref_base(v)._obj.container
                 for name, value in fields.items():
                     if isinstance(name, str):
                         setattr(container, name, value)
    @@ -781,9 +824,9 @@
             arguments = []
             for box in self.loop.inputargs:
                 if box.type == INT:
    -                arguments.append(box.getint())
    +                arguments.append(getint(box))
                 elif box.type == FLOAT:
    -                arguments.append(box.getfloatstorage())
    +                arguments.append(getfloatstorage(box))
                 else:
                     assert 0, box.type
             deadframe = cpu.execute_token(self.runjitcelltoken(), *arguments)
    @@ -795,7 +838,7 @@
                 if v not in self.expected:
                     assert v.getopnum() == rop.SAME_AS_I   # special case
                     assert isinstance(v.getarg(0), ConstInt)
    -                self.expected[v] = v.getarg(0).getint()
    +                self.expected[v] = getint(v.getarg(0))
                 if v.type == FLOAT:
                     value = cpu.get_float_value(deadframe, i)
                 else:
    @@ -807,7 +850,7 @@
                     )
             exc = cpu.grab_exc_value(deadframe)
             if (self.guard_op is not None and
    -            self.guard_op.is_guard_exception()):
    +            rop.is_guard_exception(self.guard_op.getopnum())):
                 if self.guard_op.getopnum() == rop.GUARD_NO_EXCEPTION:
                     do_assert(exc,
                               "grab_exc_value() should not be %r" % (exc,))
    @@ -840,7 +883,7 @@
             # generate the branch: a sequence of operations that ends in a FINISH
             subloop = DummyLoop([])
             self.subloops.append(subloop)   # keep around for debugging
    -        if guard_op.is_guard_exception():
    +        if rop.is_guard_exception(guard_op.getopnum()):
                 subloop.operations.append(exc_handling(guard_op))
             bridge_builder = self.builder.fork(self.builder.cpu, subloop,
                                                op.getfailargs()[:])
    @@ -876,9 +919,9 @@
                 args = []
                 for x in subset:
                     if x.type == INT:
    -                    args.append(InputArgInt(x.getint()))
    +                    args.append(InputArgInt(getint(x)))
                     elif x.type == FLOAT:
    -                    args.append(InputArgFloat(x.getfloatstorage()))
    +                    args.append(InputArgFloat(getfloatstorage(x)))
                     else:
                         assert 0, x.type
                 rl = RandomLoop(self.builder.cpu, self.builder.fork,
    diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py
    --- a/rpython/jit/backend/x86/regalloc.py
    +++ b/rpython/jit/backend/x86/regalloc.py
    @@ -358,11 +358,11 @@
                 assert self.assembler.mc._frame_size == DEFAULT_FRAME_BYTES
                 self.rm.position = i
                 self.xrm.position = i
    -            if op.has_no_side_effect() and op not in self.longevity:
    +            if rop.has_no_side_effect(op.opnum) and op not in self.longevity:
                     i += 1
                     self.possibly_free_vars_for_op(op)
                     continue
    -            if not we_are_translated() and op.getopnum() == -127:
    +            if not we_are_translated() and op.getopnum() == rop.FORCE_SPILL:
                     self._consider_force_spill(op)
                 else:
                     oplist[op.getopnum()](self, op)
    diff --git a/rpython/jit/metainterp/blackhole.py b/rpython/jit/metainterp/blackhole.py
    --- a/rpython/jit/metainterp/blackhole.py
    +++ b/rpython/jit/metainterp/blackhole.py
    @@ -1585,7 +1585,6 @@
         def _done_with_this_frame(self):
             # rare case: we only get there if the blackhole interps all returned
             # normally (in general we get a ContinueRunningNormally exception).
    -        sd = self.builder.metainterp_sd
             kind = self._return_type
             if kind == 'v':
                 raise jitexc.DoneWithThisFrameVoid()
    diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py
    --- a/rpython/jit/metainterp/compile.py
    +++ b/rpython/jit/metainterp/compile.py
    @@ -27,12 +27,11 @@
     
     class CompileData(object):
         memo = None
    +    log_noopt = True
         
         def forget_optimization_info(self):
    -        for arg in self.start_label.getarglist():
    +        for arg in self.trace.inputargs:
                 arg.set_forwarded(None)
    -        for op in self.operations:
    -            op.set_forwarded(None)
     
     class LoopCompileData(CompileData):
         """ An object that accumulates all of the necessary info for
    @@ -40,15 +39,13 @@
     
         This is the case of label() ops label()
         """
    -    def __init__(self, start_label, end_label, operations,
    -                 call_pure_results=None, enable_opts=None):
    -        self.start_label = start_label
    -        self.end_label = end_label
    +    def __init__(self, trace, runtime_boxes, call_pure_results=None,
    +                 enable_opts=None):
             self.enable_opts = enable_opts
    -        assert start_label.getopnum() == rop.LABEL
    -        assert end_label.getopnum() == rop.LABEL
    -        self.operations = operations
    +        self.trace = trace
             self.call_pure_results = call_pure_results
    +        assert runtime_boxes is not None
    +        self.runtime_boxes = runtime_boxes
     
         def optimize(self, metainterp_sd, jitdriver_sd, optimizations, unroll):
             from rpython.jit.metainterp.optimizeopt.unroll import (UnrollOptimizer,
    @@ -56,23 +53,21 @@
     
             if unroll:
                 opt = UnrollOptimizer(metainterp_sd, jitdriver_sd, optimizations)
    -            return opt.optimize_preamble(self.start_label, self.end_label,
    -                                         self.operations,
    +            return opt.optimize_preamble(self.trace,
    +                                         self.runtime_boxes,
                                              self.call_pure_results,
                                              self.box_names_memo)
             else:
                 opt = Optimizer(metainterp_sd, jitdriver_sd, optimizations)
    -            return opt.propagate_all_forward(self.start_label.getarglist(),
    -               self.operations, self.call_pure_results)
    +            return opt.propagate_all_forward(self.trace, self.call_pure_results)
     
     class SimpleCompileData(CompileData):
         """ This represents label() ops jump with no extra info associated with
         the label
         """
    -    def __init__(self, start_label, operations, call_pure_results=None,
    +    def __init__(self, trace, call_pure_results=None,
                      enable_opts=None):
    -        self.start_label = start_label
    -        self.operations = operations
    +        self.trace = trace
             self.call_pure_results = call_pure_results
             self.enable_opts = enable_opts
     
    @@ -81,17 +76,17 @@
     
             #assert not unroll
             opt = Optimizer(metainterp_sd, jitdriver_sd, optimizations)
    -        return opt.propagate_all_forward(self.start_label.getarglist(),
    -            self.operations, self.call_pure_results)
    +        return opt.propagate_all_forward(self.trace.get_iter(),
    +            self.call_pure_results)
     
     class BridgeCompileData(CompileData):
         """ This represents ops() with a jump at the end that goes to some
         loop, we need to deal with virtual state and inlining of short preamble
         """
    -    def __init__(self, start_label, operations, call_pure_results=None,
    +    def __init__(self, trace, runtime_boxes, call_pure_results=None,
                      enable_opts=None, inline_short_preamble=False):
    -        self.start_label = start_label
    -        self.operations = operations
    +        self.trace = trace
    +        self.runtime_boxes = runtime_boxes
             self.call_pure_results = call_pure_results
             self.enable_opts = enable_opts
             self.inline_short_preamble = inline_short_preamble
    @@ -100,7 +95,7 @@
             from rpython.jit.metainterp.optimizeopt.unroll import UnrollOptimizer
     
             opt = UnrollOptimizer(metainterp_sd, jitdriver_sd, optimizations)
    -        return opt.optimize_bridge(self.start_label, self.operations,
    +        return opt.optimize_bridge(self.trace, self.runtime_boxes,
                                        self.call_pure_results,
                                        self.inline_short_preamble,
                                        self.box_names_memo)
    @@ -109,12 +104,13 @@
         """ This represents label() ops jump with extra info that's from the
         run of LoopCompileData. Jump goes to the same label
         """
    -    def __init__(self, start_label, end_jump, operations, state,
    +    log_noopt = False
    +
    +    def __init__(self, trace, celltoken, state,
                      call_pure_results=None, enable_opts=None,
                      inline_short_preamble=True):
    -        self.start_label = start_label
    -        self.end_jump = end_jump
    -        self.operations = operations
    +        self.trace = trace
    +        self.celltoken = celltoken
             self.enable_opts = enable_opts
             self.state = state
             self.call_pure_results = call_pure_results
    @@ -125,9 +121,8 @@
     
             assert unroll # we should not be here if it's disabled
             opt = UnrollOptimizer(metainterp_sd, jitdriver_sd, optimizations)
    -        return opt.optimize_peeled_loop(self.start_label, self.end_jump,
    -            self.operations, self.state, self.call_pure_results,
    -            self.inline_short_preamble)
    +        return opt.optimize_peeled_loop(self.trace, self.celltoken, self.state,
    +            self.call_pure_results, self.inline_short_preamble)
     
     def show_procedures(metainterp_sd, procedure=None, error=None):
         # debugging
    @@ -208,23 +203,21 @@
     # ____________________________________________________________
     
     
    -def compile_simple_loop(metainterp, greenkey, start, inputargs, ops, jumpargs,
    -                        enable_opts):
    +def compile_simple_loop(metainterp, greenkey, trace, runtime_args, enable_opts,
    +                        cut_at):
         from rpython.jit.metainterp.optimizeopt import optimize_trace
     
         jitdriver_sd = metainterp.jitdriver_sd
         metainterp_sd = metainterp.staticdata
         jitcell_token = make_jitcell_token(jitdriver_sd)
    -    label = ResOperation(rop.LABEL, inputargs[:], descr=jitcell_token)
    -    jump_op = ResOperation(rop.JUMP, jumpargs[:], descr=jitcell_token)
         call_pure_results = metainterp.call_pure_results
    -    data = SimpleCompileData(label, ops + [jump_op],
    -                                 call_pure_results=call_pure_results,
    -                                 enable_opts=enable_opts)
    +    data = SimpleCompileData(trace, call_pure_results=call_pure_results,
    +                             enable_opts=enable_opts)
         try:
             loop_info, ops = optimize_trace(metainterp_sd, jitdriver_sd,
                                             data, metainterp.box_names_memo)
         except InvalidLoop:
    +        trace.cut_at(cut_at)
             return None
         loop = create_empty_loop(metainterp)
         loop.original_jitcell_token = jitcell_token
    @@ -241,7 +234,7 @@
             loop.check_consistency()
         jitcell_token.target_tokens = [target_token]
         send_loop_to_backend(greenkey, jitdriver_sd, metainterp_sd, loop, "loop",
    -                         inputargs, metainterp.box_names_memo)
    +                         runtime_args, metainterp.box_names_memo)
         record_loop_or_bridge(metainterp_sd, loop)
         return target_token
     
    @@ -255,6 +248,7 @@
         metainterp_sd = metainterp.staticdata
         jitdriver_sd = metainterp.jitdriver_sd
         history = metainterp.history
    +    trace = history.trace
         warmstate = jitdriver_sd.warmstate
     
         enable_opts = jitdriver_sd.warmstate.enable_opts
    @@ -264,16 +258,16 @@
             enable_opts = enable_opts.copy()
             del enable_opts['unroll']
     
    -    ops = history.operations[start:]
    +    jitcell_token = make_jitcell_token(jitdriver_sd)
    +    cut_at = history.get_trace_position()
    +    history.record(rop.JUMP, jumpargs, None, descr=jitcell_token)
    +    if start != (0, 0, 0):
    +        trace = trace.cut_trace_from(start, inputargs)
         if 'unroll' not in enable_opts or not metainterp.cpu.supports_guard_gc_type:
    -        return compile_simple_loop(metainterp, greenkey, start, inputargs, ops,
    -                                   jumpargs, enable_opts)
    -    jitcell_token = make_jitcell_token(jitdriver_sd)
    -    label = ResOperation(rop.LABEL, inputargs,
    -                         descr=TargetToken(jitcell_token))
    -    end_label = ResOperation(rop.LABEL, jumpargs, descr=jitcell_token)
    +        return compile_simple_loop(metainterp, greenkey, trace, jumpargs,
    +                                   enable_opts, cut_at)
         call_pure_results = metainterp.call_pure_results
    -    preamble_data = LoopCompileData(label, end_label, ops,
    +    preamble_data = LoopCompileData(trace, jumpargs,
                                         call_pure_results=call_pure_results,
                                         enable_opts=enable_opts)
         try:
    @@ -281,17 +275,15 @@
                                                        preamble_data,
                                                        metainterp.box_names_memo)
         except InvalidLoop:
    +        history.cut(cut_at)
             return None
     
         metainterp_sd = metainterp.staticdata
         jitdriver_sd = metainterp.jitdriver_sd
    -    end_label = ResOperation(rop.LABEL, inputargs,
    -                             descr=jitcell_token)
    -    jump_op = ResOperation(rop.JUMP, jumpargs, descr=jitcell_token)
         start_descr = TargetToken(jitcell_token,
                                   original_jitcell_token=jitcell_token)
         jitcell_token.target_tokens = [start_descr]
    -    loop_data = UnrolledLoopData(end_label, jump_op, ops, start_state,
    +    loop_data = UnrolledLoopData(trace, jitcell_token, start_state,
                                      call_pure_results=call_pure_results,
                                      enable_opts=enable_opts)
         try:
    @@ -299,11 +291,12 @@
                                                  loop_data,
                                                  metainterp.box_names_memo)
         except InvalidLoop:
    +        history.cut(cut_at)
             return None
     
         if ((warmstate.vec and jitdriver_sd.vec) or warmstate.vec_all):
             from rpython.jit.metainterp.optimizeopt.vector import optimize_vector
    -        loop_info, loop_ops = optimize_vector(metainterp_sd,
    +        loop_info, loop_ops = optimize_vector(trace, metainterp_sd,
                                                   jitdriver_sd, warmstate,
                                                   loop_info, loop_ops,
                                                   jitcell_token)
    @@ -342,22 +335,20 @@
         to the first operation.
         """
         from rpython.jit.metainterp.optimizeopt import optimize_trace
    -    from rpython.jit.metainterp.optimizeopt.optimizer import BasicLoopInfo
     
    -    history = metainterp.history
    +    trace = metainterp.history.trace.cut_trace_from(start, inputargs)
         metainterp_sd = metainterp.staticdata
         jitdriver_sd = metainterp.jitdriver_sd
    +    history = metainterp.history
     
         loop_jitcell_token = metainterp.get_procedure_token(greenkey)
         assert loop_jitcell_token
     
    -    end_label = ResOperation(rop.LABEL, inputargs[:],
    -                             descr=loop_jitcell_token)
    -    jump_op = ResOperation(rop.JUMP, jumpargs[:], descr=loop_jitcell_token)
    +    cut = history.get_trace_position()
    +    history.record(rop.JUMP, jumpargs[:], None, descr=loop_jitcell_token)
         enable_opts = jitdriver_sd.warmstate.enable_opts
    -    ops = history.operations[start:]
         call_pure_results = metainterp.call_pure_results
    -    loop_data = UnrolledLoopData(end_label, jump_op, ops, start_state,
    +    loop_data = UnrolledLoopData(trace, loop_jitcell_token, start_state,
                                      call_pure_results=call_pure_results,
                                      enable_opts=enable_opts)
         try:
    @@ -366,8 +357,9 @@
                                                  metainterp.box_names_memo)
         except InvalidLoop:
             # Fall back on jumping directly to preamble
    -        jump_op = ResOperation(rop.JUMP, inputargs[:], descr=loop_jitcell_token)
    -        loop_data = UnrolledLoopData(end_label, jump_op, [jump_op], start_state,
    +        history.cut(cut)
    +        history.record(rop.JUMP, jumpargs[:], None, descr=loop_jitcell_token)
    +        loop_data = UnrolledLoopData(trace, loop_jitcell_token, start_state,
                                          call_pure_results=call_pure_results,
                                          enable_opts=enable_opts,
                                          inline_short_preamble=False)
    @@ -376,9 +368,13 @@
                                                      loop_data,
                                                      metainterp.box_names_memo)
             except InvalidLoop:
    +            history.cut(cut)
                 return None
     
    -    label_token = loop_info.label_op.getdescr()
    +    label_op = loop_info.label_op
    +    if label_op is None:
    +        assert False, "unreachable code" # hint for some strange tests
    +    label_token = label_op.getdescr()
         assert isinstance(label_token, TargetToken)
         if label_token.short_preamble:
             metainterp_sd.logger_ops.log_short_preamble([],
    @@ -445,13 +441,13 @@
             box = inputargs[i]
             opnum = OpHelpers.getfield_for_descr(descr)
             emit_op(extra_ops,
    -                ResOperation(opnum, [vable_box], descr))
    +                ResOperation(opnum, [vable_box], descr=descr))
             box.set_forwarded(extra_ops[-1])
             i += 1
         arrayindex = 0
         for descr in vinfo.array_field_descrs:
             arraylen = vinfo.get_array_length(vable, arrayindex)
    -        arrayop = ResOperation(rop.GETFIELD_GC_R, [vable_box], descr)
    +        arrayop = ResOperation(rop.GETFIELD_GC_R, [vable_box], descr=descr)
             emit_op(extra_ops, arrayop)
             arraydescr = vinfo.array_descrs[arrayindex]
             assert i + arraylen <= len(inputargs)
    @@ -1017,9 +1013,9 @@
             metainterp_sd.stats.add_jitcell_token(jitcell_token)
     
     
    -def compile_trace(metainterp, resumekey):
    +def compile_trace(metainterp, resumekey, runtime_boxes):
         """Try to compile a new bridge leading from the beginning of the history
    -    to some existing place.
    +    to some existging place.
         """
     
         from rpython.jit.metainterp.optimizeopt import optimize_trace
    @@ -1037,20 +1033,19 @@
         else:
             inline_short_preamble = True
         inputargs = metainterp.history.inputargs[:]
    -    operations = metainterp.history.operations
    -    label = ResOperation(rop.LABEL, inputargs)
    +    trace = metainterp.history.trace
         jitdriver_sd = metainterp.jitdriver_sd
         enable_opts = jitdriver_sd.warmstate.enable_opts
     
         call_pure_results = metainterp.call_pure_results
     
    -    if operations[-1].getopnum() == rop.JUMP:
    -        data = BridgeCompileData(label, operations[:],
    +    if metainterp.history.ends_with_jump:
    +        data = BridgeCompileData(trace, runtime_boxes,
                                      call_pure_results=call_pure_results,
                                      enable_opts=enable_opts,
                                      inline_short_preamble=inline_short_preamble)
         else:
    -        data = SimpleCompileData(label, operations[:],
    +        data = SimpleCompileData(trace,
                                      call_pure_results=call_pure_results,
                                      enable_opts=enable_opts)
         try:
    diff --git a/rpython/jit/metainterp/executor.py b/rpython/jit/metainterp/executor.py
    --- a/rpython/jit/metainterp/executor.py
    +++ b/rpython/jit/metainterp/executor.py
    @@ -9,7 +9,7 @@
     from rpython.jit.metainterp.history import INT, REF, FLOAT, VOID, AbstractDescr
     from rpython.jit.metainterp.history import ConstInt, ConstFloat, ConstPtr
     from rpython.jit.metainterp import resoperation
    -from rpython.jit.metainterp.resoperation import rop
    +from rpython.jit.metainterp.resoperation import rop, opname
     from rpython.jit.metainterp.blackhole import BlackholeInterpreter, NULL
     from rpython.jit.codewriter import longlong
     
    @@ -314,7 +314,8 @@
     
     def _make_execute_list():
         execute_by_num_args = {}
    -    for key, value in rop.__dict__.items():
    +    for key in opname.values():
    +        value = getattr(rop, key)
             if not key.startswith('_'):
                 if (rop._FINAL_FIRST <= value <= rop._FINAL_LAST or
                     rop._GUARD_FIRST <= value <= rop._GUARD_LAST):
    @@ -384,6 +385,11 @@
                              rop.CALL_MALLOC_NURSERY_VARSIZE_FRAME,
                              rop.NURSERY_PTR_INCREMENT,
                              rop.LABEL,
    +                         rop.ESCAPE_I,
    +                         rop.ESCAPE_N,
    +                         rop.ESCAPE_R,
    +                         rop.ESCAPE_F,
    +                         rop.FORCE_SPILL,
                              rop.SAVE_EXC_CLASS,
                              rop.SAVE_EXCEPTION,
                              rop.RESTORE_EXCEPTION,
    diff --git a/rpython/jit/metainterp/graphpage.py b/rpython/jit/metainterp/graphpage.py
    --- a/rpython/jit/metainterp/graphpage.py
    +++ b/rpython/jit/metainterp/graphpage.py
    @@ -170,7 +170,8 @@
             while True:
                 op = operations[opindex]
                 op_repr = op.repr(self.memo, graytext=True)
    -            if op.getopnum() == rop.DEBUG_MERGE_POINT:
    +            if (op.getopnum() == rop.DEBUG_MERGE_POINT and
    +                    self.metainterp_sd is not None):
                     jd_sd = self.metainterp_sd.jitdrivers_sd[op.getarg(0).getint()]
                     if jd_sd._get_printable_location_ptr:
                         s = jd_sd.warmstate.get_location_str(op.getarglist()[3:])
    diff --git a/rpython/jit/metainterp/heapcache.py b/rpython/jit/metainterp/heapcache.py
    --- a/rpython/jit/metainterp/heapcache.py
    +++ b/rpython/jit/metainterp/heapcache.py
    @@ -1,33 +1,59 @@
    -from rpython.jit.metainterp.history import ConstInt
    +from rpython.jit.metainterp.history import Const, ConstInt
    +from rpython.jit.metainterp.history import FrontendOp, RefFrontendOp
     from rpython.jit.metainterp.resoperation import rop, OpHelpers
    +from rpython.jit.metainterp.executor import constant_from_op
    +from rpython.rlib.rarithmetic import r_uint32, r_uint
    +from rpython.rlib.objectmodel import always_inline
     
    -class HeapCacheValue(object):
    -    def __init__(self, box):
    -        self.box = box
    -        self.likely_virtual = False
    -        self.reset_keep_likely_virtual()
    +""" A big note: we don't do heap caches on Consts, because it used
    +to be done with the identity of the Const instance. This gives very wonky
    +results at best, so we decided to not do it at all. Can be fixed with
    +interning of Consts (already done on trace anyway)
    +"""
     
    -    def reset_keep_likely_virtual(self):
    -        self.known_class = False
    -        self.known_nullity = False
    -        # did we see the allocation during tracing?
    -        self.seen_allocation = False
    -        self.is_unescaped = False
    -        self.nonstandard_virtualizable = False
    -        self.length = None
    -        self.dependencies = None
    +# RefFrontendOp._heapc_flags:
    +HF_LIKELY_VIRTUAL  = 0x01
    +HF_KNOWN_CLASS     = 0x02
    +HF_KNOWN_NULLITY   = 0x04
    +HF_SEEN_ALLOCATION = 0x08   # did we see the allocation during tracing?
    +HF_IS_UNESCAPED    = 0x10
    +HF_NONSTD_VABLE    = 0x20
     
    -    def __repr__(self):
    -        return 'HeapCacheValue(%s)' % (self.box, )
    +_HF_VERSION_INC    = 0x40   # must be last
    +_HF_VERSION_MAX    = r_uint(2 ** 32 - _HF_VERSION_INC)
    +
    + at always_inline
    +def add_flags(ref_frontend_op, flags):
    +    f = ref_frontend_op._get_heapc_flags()
    +    f |= r_uint(flags)
    +    ref_frontend_op._set_heapc_flags(f)
    +
    + at always_inline
    +def remove_flags(ref_frontend_op, flags):
    +    f = ref_frontend_op._get_heapc_flags()
    +    f &= r_uint(~flags)
    +    ref_frontend_op._set_heapc_flags(f)
    +
    + at always_inline
    +def test_flags(ref_frontend_op, flags):
    +    f = ref_frontend_op._get_heapc_flags()
    +    return bool(f & r_uint(flags))
    +
    +def maybe_replace_with_const(box):
    +    if not isinstance(box, Const) and box.is_replaced_with_const():
    +        return constant_from_op(box)
    +    else:
    +        return box
     
     
     class CacheEntry(object):
    -    def __init__(self):
    -        # both are {from_value: to_value} dicts
    +    def __init__(self, heapcache):
    +        # both are {from_ref_box: to_field_box} dicts
             # the first is for boxes where we did not see the allocation, the
             # second for anything else. the reason that distinction makes sense is
             # because if we saw the allocation, we know it cannot alias with
             # anything else where we saw the allocation.
    +        self.heapcache = heapcache
             self.cache_anything = {}
             self.cache_seen_allocation = {}
     
    @@ -36,112 +62,137 @@
                 self.cache_seen_allocation.clear()
             self.cache_anything.clear()
     
    -    def _getdict(self, value):
    -        if value.seen_allocation:
    +    def _seen_alloc(self, ref_box):
    +        if not isinstance(ref_box, RefFrontendOp):
    +            return False
    +        return self.heapcache._check_flag(ref_box, HF_SEEN_ALLOCATION)
    +
    +    def _getdict(self, seen_alloc):
    +        if seen_alloc:
                 return self.cache_seen_allocation
             else:
                 return self.cache_anything
     
    -    def do_write_with_aliasing(self, value, fieldvalue):
    -        self._clear_cache_on_write(value.seen_allocation)
    -        self._getdict(value)[value] = fieldvalue
    +    def do_write_with_aliasing(self, ref_box, fieldbox):
    +        seen_alloc = self._seen_alloc(ref_box)
    +        self._clear_cache_on_write(seen_alloc)
    +        self._getdict(seen_alloc)[ref_box] = fieldbox
     
    -    def read(self, value):
    -        return self._getdict(value).get(value, None)
    +    def read(self, ref_box):
    +        dict = self._getdict(self._seen_alloc(ref_box))
    +        try:
    +            res_box = dict[ref_box]
    +        except KeyError:
    +            return None
    +        return maybe_replace_with_const(res_box)
     
    -    def read_now_known(self, value, fieldvalue):
    -        self._getdict(value)[value] = fieldvalue
    +    def read_now_known(self, ref_box, fieldbox):
    +        self._getdict(self._seen_alloc(ref_box))[ref_box] = fieldbox
     
         def invalidate_unescaped(self):
             self._invalidate_unescaped(self.cache_anything)
             self._invalidate_unescaped(self.cache_seen_allocation)
     
         def _invalidate_unescaped(self, d):
    -        for value in d.keys():
    -            if not value.is_unescaped:
    -                del d[value]
    +        for ref_box in d.keys():
    +            if not self.heapcache.is_unescaped(ref_box):
    +                del d[ref_box]
     
     
     class FieldUpdater(object):
    -    def __init__(self, heapcache, value, cache, fieldvalue):
    -        self.heapcache = heapcache
    -        self.value = value
    +    def __init__(self, ref_box, cache, fieldbox):
    +        self.ref_box = ref_box
             self.cache = cache
    -        if fieldvalue is not None:
    -            self.currfieldbox = fieldvalue.box
    -        else:
    -            self.currfieldbox = None
    +        self.currfieldbox = fieldbox     # <= read directly from pyjitpl.py
     
         def getfield_now_known(self, fieldbox):
    -        fieldvalue = self.heapcache.getvalue(fieldbox)
    -        self.cache.read_now_known(self.value, fieldvalue)
    +        self.cache.read_now_known(self.ref_box, fieldbox)
     
         def setfield(self, fieldbox):
    -        fieldvalue = self.heapcache.getvalue(fieldbox)
    -        self.cache.do_write_with_aliasing(self.value, fieldvalue)
    +        self.cache.do_write_with_aliasing(self.ref_box, fieldbox)
    +
    +class DummyFieldUpdater(FieldUpdater):
    +    def __init__(self):
    +        self.currfieldbox = None
    +
    +    def getfield_now_known(self, fieldbox):
    +        pass
    +
    +    def setfield(self, fieldbox):
    +        pass
    +
    +dummy_field_updater = DummyFieldUpdater()
     
     
     class HeapCache(object):
         def __init__(self):
    +        # Works with flags stored on RefFrontendOp._heapc_flags.
    +        # There are two ways to do a global resetting of these flags:
    +        # reset() and reset_keep_likely_virtual().  The basic idea is
    +        # to use a version number in each RefFrontendOp, and in order
    +        # to reset the flags globally, we increment the global version
    +        # number in this class.  Then when we read '_heapc_flags' we
    +        # also check if the associated version number is up-to-date
    +        # or not.  More precisely, we have two global version numbers
    +        # here: 'head_version' and 'likely_virtual_version'.  Normally
    +        # we use 'head_version'.  For is_likely_virtual() though, we
    +        # use the other, older version number.
    +        self.head_version = r_uint(0)
    +        self.likely_virtual_version = r_uint(0)
             self.reset()
     
         def reset(self):
    -        # maps boxes to values
    -        self.values = {}
    -        # store the boxes that contain newly allocated objects, this maps the
    -        # boxes to a bool, the bool indicates whether or not the object has
    -        # escaped the trace or not (True means the box never escaped, False
    -        # means it did escape), its presences in the mapping shows that it was
    -        # allocated inside the trace
    -        #if trace_branch:
    -            #self.new_boxes = {}
    -        #    pass
    -        #else:
    -            #for box in self.new_boxes:
    -            #    self.new_boxes[box] = False
    -        #    pass
    -        #if reset_virtuals:
    -        #    self.likely_virtuals = {}      # only for jit.isvirtual()
    -        # Tracks which boxes should be marked as escaped when the key box
    -        # escapes.
    -        #self.dependencies = {}
    -
    +        # Global reset of all flags.  Update both version numbers so
    +        # that any access to '_heapc_flags' will be marked as outdated.
    +        assert self.head_version < _HF_VERSION_MAX
    +        self.head_version += _HF_VERSION_INC
    +        self.likely_virtual_version = self.head_version
    +        #
             # heap cache
             # maps descrs to CacheEntry
             self.heap_cache = {}
             # heap array cache
    -        # maps descrs to {index: {from_value: to_value}} dicts
    +        # maps descrs to {index: CacheEntry} dicts
             self.heap_array_cache = {}
     
         def reset_keep_likely_virtuals(self):
    -        for value in self.values.itervalues():
    -            value.reset_keep_likely_virtual()
    +        # Update only 'head_version', but 'likely_virtual_version' remains
    +        # at its older value.
    +        assert self.head_version < _HF_VERSION_MAX
    +        self.head_version += _HF_VERSION_INC
             self.heap_cache = {}
             self.heap_array_cache = {}
     
    -    def getvalue(self, box, create=True):
    -        value = self.values.get(box, None)
    -        if not value and create:
    -            value = self.values[box] = HeapCacheValue(box)
    -        return value
    +    @always_inline
    +    def test_head_version(self, ref_frontend_op):
    +        return ref_frontend_op._get_heapc_flags() >= self.head_version
     
    -    def getvalues(self, boxes):
    -        return [self.getvalue(box) for box in boxes]
    +    @always_inline
    +    def test_likely_virtual_version(self, ref_frontend_op):
    +        return ref_frontend_op._get_heapc_flags() >= self.likely_virtual_version
    +
    +    def update_version(self, ref_frontend_op):
    +        """Ensure the version of 'ref_frontend_op' is current.  If not,
    +        it will update 'ref_frontend_op' (removing most flags currently set).
    +        """
    +        if not self.test_head_version(ref_frontend_op):
    +            f = self.head_version
    +            if (self.test_likely_virtual_version(ref_frontend_op) and
    +                test_flags(ref_frontend_op, HF_LIKELY_VIRTUAL)):
    +                f |= HF_LIKELY_VIRTUAL
    +            ref_frontend_op._set_heapc_flags(f)
    +            ref_frontend_op._heapc_deps = None
     
         def invalidate_caches(self, opnum, descr, argboxes):
             self.mark_escaped(opnum, descr, argboxes)
             self.clear_caches(opnum, descr, argboxes)
     
         def _escape_from_write(self, box, fieldbox):
    -        value = self.getvalue(box, create=False)
    -        fieldvalue = self.getvalue(fieldbox, create=False)
    -        if (value is not None and value.is_unescaped and
    -                fieldvalue is not None and fieldvalue.is_unescaped):
    -            if value.dependencies is None:
    -                value.dependencies = []
    -            value.dependencies.append(fieldvalue)
    -        elif fieldvalue is not None:
    -            self._escape(fieldvalue)
    +        if self.is_unescaped(box) and self.is_unescaped(fieldbox):
    +            deps = self._get_deps(box)
    +            deps.append(fieldbox)
    +        elif fieldbox is not None:
    +            self._escape_box(fieldbox)
     
         def mark_escaped(self, opnum, descr, argboxes):
             if opnum == rop.SETFIELD_GC:
    @@ -176,19 +227,20 @@
                     self._escape_box(box)
     
         def _escape_box(self, box):
    -        value = self.getvalue(box, create=False)
    -        if not value:
    -            return
    -        self._escape(value)
    -
    -    def _escape(self, value):
    -        value.is_unescaped = False
    -        value.likely_virtual = False
    -        deps = value.dependencies
    -        value.dependencies = None
    -        if deps is not None:
    -            for dep in deps:
    -                self._escape(dep)
    +        if isinstance(box, RefFrontendOp):
    +            remove_flags(box, HF_LIKELY_VIRTUAL | HF_IS_UNESCAPED)
    +            deps = box._heapc_deps
    +            if deps is not None:
    +                if not self.test_head_version(box):
    +                    box._heapc_deps = None
    +                else:
    +                    # 'deps[0]' is abused to store the array length, keep it
    +                    if deps[0] is None:
    +                        box._heapc_deps = None
    +                    else:
    +                        box._heapc_deps = [deps[0]]
    +                    for i in range(1, len(deps)):
    +                        self._escape_box(deps[i])
     
         def clear_caches(self, opnum, descr, argboxes):
             if (opnum == rop.SETFIELD_GC or
    @@ -241,7 +293,8 @@
             self.reset_keep_likely_virtuals()
     
         def _clear_caches_arraycopy(self, opnum, desrc, argboxes, effectinfo):
    -        seen_allocation_of_target = self.getvalue(argboxes[2]).seen_allocation
    +        seen_allocation_of_target = self._check_flag(
    +                                            argboxes[2], HF_SEEN_ALLOCATION)
             if (
                 isinstance(argboxes[3], ConstInt) and
                 isinstance(argboxes[4], ConstInt) and
    @@ -285,74 +338,82 @@
                 return
             self.reset_keep_likely_virtuals()
     
    +    def _get_deps(self, box):
    +        if not isinstance(box, RefFrontendOp):
    +            return None
    +        self.update_version(box)
    +        if box._heapc_deps is None:
    +            box._heapc_deps = [None]
    +        return box._heapc_deps
    +
    +    def _check_flag(self, box, flag):
    +        return (isinstance(box, RefFrontendOp) and
    +                    self.test_head_version(box) and
    +                    test_flags(box, flag))
    +
    +    def _set_flag(self, box, flag):
    +        assert isinstance(box, RefFrontendOp)
    +        self.update_version(box)
    +        add_flags(box, flag)
    +
         def is_class_known(self, box):
    -        value = self.getvalue(box, create=False)
    -        if value:
    -            return value.known_class
    -        return False
    +        return self._check_flag(box, HF_KNOWN_CLASS)
     
         def class_now_known(self, box):
    -        self.getvalue(box).known_class = True
    +        if isinstance(box, Const):
    +            return
    +        self._set_flag(box, HF_KNOWN_CLASS)
     
         def is_nullity_known(self, box):
    -        value = self.getvalue(box, create=False)
    -        if value:
    -            return value.known_nullity
    -        return False
    +        if isinstance(box, Const):
    +            return bool(box.getref_base())
    +        return self._check_flag(box, HF_KNOWN_NULLITY)
     
         def nullity_now_known(self, box):
    -        self.getvalue(box).known_nullity = True
    +        if isinstance(box, Const):
    +            return
    +        self._set_flag(box, HF_KNOWN_NULLITY)
     
         def is_nonstandard_virtualizable(self, box):
    -        value = self.getvalue(box, create=False)
    -        if value:
    -            return value.nonstandard_virtualizable
    -        return False
    +        return self._check_flag(box, HF_NONSTD_VABLE)
     
         def nonstandard_virtualizables_now_known(self, box):
    -        self.getvalue(box).nonstandard_virtualizable = True
    +        self._set_flag(box, HF_NONSTD_VABLE)
     
         def is_unescaped(self, box):
    -        value = self.getvalue(box, create=False)
    -        if value:
    -            return value.is_unescaped
    -        return False
    +        return self._check_flag(box, HF_IS_UNESCAPED)
     
         def is_likely_virtual(self, box):
    -        value = self.getvalue(box, create=False)
    -        if value:
    -            return value.likely_virtual
    -        return False
    +        # note: this is different from _check_flag()
    +        return (isinstance(box, RefFrontendOp) and
    +                self.test_likely_virtual_version(box) and
    +                test_flags(box, HF_LIKELY_VIRTUAL))
     
         def new(self, box):
    -        value = self.getvalue(box)
    -        value.is_unescaped = True
    -        value.likely_virtual = True
    -        value.seen_allocation = True
    +        assert isinstance(box, RefFrontendOp)
    +        self.update_version(box)
    +        add_flags(box, HF_LIKELY_VIRTUAL | HF_SEEN_ALLOCATION | HF_IS_UNESCAPED)
     
         def new_array(self, box, lengthbox):
             self.new(box)
             self.arraylen_now_known(box, lengthbox)
     
         def getfield(self, box, descr):
    -        value = self.getvalue(box, create=False)
    -        if value:
    -            cache = self.heap_cache.get(descr, None)
    -            if cache:
    -                tovalue = cache.read(value)
    -                if tovalue:
    -                    return tovalue.box
    +        cache = self.heap_cache.get(descr, None)
    +        if cache:
    +            return cache.read(box)
             return None
     
         def get_field_updater(self, box, descr):
    -        value = self.getvalue(box)
    +        if not isinstance(box, RefFrontendOp):
    +            return dummy_field_updater
             cache = self.heap_cache.get(descr, None)
             if cache is None:
    -            cache = self.heap_cache[descr] = CacheEntry()
    -            fieldvalue = None
    +            cache = self.heap_cache[descr] = CacheEntry(self)
    +            fieldbox = None
             else:
    -            fieldvalue = cache.read(value)
    -        return FieldUpdater(self, value, cache, fieldvalue)
    +            fieldbox = cache.read(box)
    +        return FieldUpdater(box, cache, fieldbox)
     
         def getfield_now_known(self, box, descr, fieldbox):
             upd = self.get_field_updater(box, descr)
    @@ -365,17 +426,12 @@
         def getarrayitem(self, box, indexbox, descr):
             if not isinstance(indexbox, ConstInt):
                 return None
    -        value = self.getvalue(box, create=False)
    -        if value is None:
    -            return None
             index = indexbox.getint()
             cache = self.heap_array_cache.get(descr, None)
             if cache:
                 indexcache = cache.get(index, None)
                 if indexcache is not None:
    -                resvalue = indexcache.read(value)
    -                if resvalue:
    -                    return resvalue.box
    +                return indexcache.read(box)
             return None
     
         def _get_or_make_array_cache_entry(self, indexbox, descr):
    @@ -385,16 +441,14 @@
             cache = self.heap_array_cache.setdefault(descr, {})
             indexcache = cache.get(index, None)
             if indexcache is None:
    -            cache[index] = indexcache = CacheEntry()
    +            cache[index] = indexcache = CacheEntry(self)
             return indexcache
     
     
         def getarrayitem_now_known(self, box, indexbox, fieldbox, descr):
    -        value = self.getvalue(box)
    -        fieldvalue = self.getvalue(fieldbox)
             indexcache = self._get_or_make_array_cache_entry(indexbox, descr)
             if indexcache:
    -            indexcache.read_now_known(value, fieldvalue)
    +            indexcache.read_now_known(box, fieldbox)
     
         def setarrayitem(self, box, indexbox, fieldbox, descr):
             if not isinstance(indexbox, ConstInt):
    @@ -402,25 +456,31 @@
                 if cache is not None:
                     cache.clear()
                 return
    -        value = self.getvalue(box)
    -        fieldvalue = self.getvalue(fieldbox)
             indexcache = self._get_or_make_array_cache_entry(indexbox, descr)
             if indexcache:
    -            indexcache.do_write_with_aliasing(value, fieldvalue)
    +            indexcache.do_write_with_aliasing(box, fieldbox)
     
         def arraylen(self, box):
    -        value = self.getvalue(box, create=False)
    -        if value and value.length:
    -            return value.length.box
    +        if (isinstance(box, RefFrontendOp) and
    +            self.test_head_version(box) and
    +            box._heapc_deps is not None):
    +            res_box = box._heapc_deps[0]
    +            if res_box is not None:
    +                return maybe_replace_with_const(res_box)
             return None
     
         def arraylen_now_known(self, box, lengthbox):
    -        value = self.getvalue(box)
    -        value.length = self.getvalue(lengthbox)
    +        # we store in '_heapc_deps' a list of boxes: the *first* box is
    +        # the known length or None, and the remaining boxes are the
    +        # regular dependencies.
    +        if isinstance(box, Const):
    +            return
    +        deps = self._get_deps(box)
    +        assert deps is not None
    +        deps[0] = lengthbox
     
         def replace_box(self, oldbox, newbox):
    -        value = self.getvalue(oldbox, create=False)
    -        if value is None:
    -            return
    -        value.box = newbox
    -        self.values[newbox] = value
    +        # here, only for replacing a box with a const
    +        if isinstance(oldbox, FrontendOp) and isinstance(newbox, Const):
    +            assert newbox.same_constant(constant_from_op(oldbox))
    +            oldbox.set_replaced_with_const()
    diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py
    --- a/rpython/jit/metainterp/history.py
    +++ b/rpython/jit/metainterp/history.py
    @@ -3,12 +3,17 @@
     from rpython.rlib.objectmodel import we_are_translated, Symbolic
     from rpython.rlib.objectmodel import compute_unique_id, specialize
     from rpython.rlib.rarithmetic import r_int64, is_valid_int
    +from rpython.rlib.rarithmetic import LONG_BIT, intmask, r_uint
    +from rpython.rlib.jit import Counters
     
     from rpython.conftest import option
     
    -from rpython.jit.metainterp.resoperation import ResOperation, rop, AbstractValue
    +from rpython.jit.metainterp.resoperation import ResOperation, rop,\
    +    AbstractValue, oparity, AbstractResOp, IntOp, RefOp, FloatOp,\
    +    opclasses
     from rpython.jit.codewriter import heaptracker, longlong
     import weakref
    +from rpython.jit.metainterp import jitexc
     
     # ____________________________________________________________
     
    @@ -22,6 +27,15 @@
     
     FAILARGS_LIMIT = 1000
     
    +class SwitchToBlackhole(jitexc.JitException):
    +    def __init__(self, reason, raising_exception=False):
    +        self.reason = reason
    +        self.raising_exception = raising_exception
    +        # ^^^ must be set to True if the SwitchToBlackhole is raised at a
    +        #     point where the exception on metainterp.last_exc_value
    +        #     is supposed to be raised.  The default False means that it
    +        #     should just be copied into the blackhole interp, but not raised.
    +
     def getkind(TYPE, supports_floats=True,
                       supports_longlong=True,
                       supports_singlefloats=True):
    @@ -72,57 +86,10 @@
                             ) #compute_unique_id(box))
     
     
    -class XxxAbstractValue(object):
    -    __slots__ = ()
    -
    -    def getint(self):
    -        raise NotImplementedError
    -
    -    def getfloatstorage(self):
    -        raise NotImplementedError
    -
    -    def getfloat(self):
    -        return longlong.getrealfloat(self.getfloatstorage())
    -
    -    def getref_base(self):
    -        raise NotImplementedError
    -
    -    def getref(self, TYPE):
    -        raise NotImplementedError
    -    getref._annspecialcase_ = 'specialize:arg(1)'
    -
    -    def constbox(self):
    -        raise NotImplementedError
    -
    -    def getaddr(self):
    -        "Only for raw addresses (BoxInt & ConstInt), not for GC addresses"
    -        raise NotImplementedError
    -
    -    def sort_key(self):
    -        raise NotImplementedError
    -
    -    def nonnull(self):
    -        raise NotImplementedError
    -
    -    def repr_rpython(self):
    -        return '%s' % self
    -
    -    def _get_str(self):
    -        raise NotImplementedError
    -
    -    def same_box(self, other):
    -        return self is other
    -
    -    def same_shape(self, other):
    -        # only structured containers can compare their shape (vector box)
    -        return True
    -
    -    def getaccum(self):
    -        return None
    -
     class AbstractDescr(AbstractValue):
    -    __slots__ = ()
    +    __slots__ = ('descr_index',)
         llopaque = True
    +    descr_index = -1
     
         def repr_of_descr(self):
             return '%r' % (self,)
    @@ -204,7 +171,7 @@
     
     
     class Const(AbstractValue):
    -    __slots__ = ()
    +    _attrs_ = ()
     
         @staticmethod
         def _new(x):
    @@ -638,43 +605,174 @@
     # ____________________________________________________________
     
     
    +FO_REPLACED_WITH_CONST = r_uint(1)
    +FO_POSITION_SHIFT      = 1
    +FO_POSITION_MASK       = r_uint(0xFFFFFFFE)
    +
    +
    +class FrontendOp(AbstractResOp):
    +    type = 'v'
    +    _attrs_ = ('position_and_flags',)
    +
    +    def __init__(self, pos):
    +        # p is the 32-bit position shifted left by one (might be negative,
    +        # but casted to the 32-bit UINT type)
    +        p = rffi.cast(rffi.UINT, pos << FO_POSITION_SHIFT)
    +        self.position_and_flags = r_uint(p)    # zero-extended to a full word
    +
    +    def get_position(self):
    +        # p is the signed 32-bit position, from self.position_and_flags
    +        p = rffi.cast(rffi.INT, self.position_and_flags)
    +        return intmask(p) >> FO_POSITION_SHIFT
    +
    +    def set_position(self, new_pos):
    +        assert new_pos >= 0
    +        self.position_and_flags &= ~FO_POSITION_MASK
    +        self.position_and_flags |= r_uint(new_pos << FO_POSITION_SHIFT)
    +
    +    def is_replaced_with_const(self):
    +        return bool(self.position_and_flags & FO_REPLACED_WITH_CONST)
    +
    +    def set_replaced_with_const(self):
    +        self.position_and_flags |= FO_REPLACED_WITH_CONST
    +
    +    def __repr__(self):
    +        return '%s(0x%x)' % (self.__class__.__name__, self.position_and_flags)
    +
    +class IntFrontendOp(IntOp, FrontendOp):
    +    _attrs_ = ('position_and_flags', '_resint')
    +
    +    def copy_value_from(self, other):
    +        self._resint = other.getint()
    +
    +class FloatFrontendOp(FloatOp, FrontendOp):
    +    _attrs_ = ('position_and_flags', '_resfloat')
    +
    +    def copy_value_from(self, other):
    +        self._resfloat = other.getfloatstorage()
    +
    +class RefFrontendOp(RefOp, FrontendOp):
    +    _attrs_ = ('position_and_flags', '_resref', '_heapc_deps')
    +    if LONG_BIT == 32:
    +        _attrs_ += ('_heapc_flags',)   # on 64 bit, this gets stored into the
    +        _heapc_flags = r_uint(0)       # high 32 bits of 'position_and_flags'
    +    _heapc_deps = None
    +
    +    def copy_value_from(self, other):
    +        self._resref = other.getref_base()
    +
    +    if LONG_BIT == 32:
    +        def _get_heapc_flags(self):
    +            return self._heapc_flags
    +        def _set_heapc_flags(self, value):
    +            self._heapc_flags = value
    +    else:
    +        def _get_heapc_flags(self):
    +            return self.position_and_flags >> 32
    +        def _set_heapc_flags(self, value):
    +            self.position_and_flags = (
    +                (self.position_and_flags & 0xFFFFFFFF) |
    +                (value << 32))
    +
    +
     class History(object):
    +    ends_with_jump = False
    +    trace = None
    +
         def __init__(self):
    -        self.inputargs = None
    -        self.operations = []
    +        self.descr_cache = {}
    +        self.descrs = {}
    +        self.consts = []
    +        self._cache = []
    +
    +    def set_inputargs(self, inpargs, metainterp_sd):
    +        from rpython.jit.metainterp.opencoder import Trace
    +
    +        self.trace = Trace(inpargs, metainterp_sd)
    +        self.inputargs = inpargs
    +        if self._cache:
    +            # hack to record the ops *after* we know our inputargs
    +            for (opnum, argboxes, op, descr) in self._cache:
    +                pos = self.trace.record_op(opnum, argboxes, descr)
    +                op.set_position(pos)
    +            self._cache = None
    +
    +    def length(self):
    +        return self.trace._count - len(self.trace.inputargs)
    +
    +    def get_trace_position(self):
    +        return self.trace.cut_point()
    +
    +    def cut(self, cut_at):
    +        self.trace.cut_at(cut_at)
    +
    +    def any_operation(self):
    +        return self.trace._count > self.trace._start
    +
    +    @specialize.argtype(2)
    +    def set_op_value(self, op, value):
    +        if value is None:
    +            return        
    +        elif isinstance(value, bool):
    +            op.setint(int(value))
    +        elif lltype.typeOf(value) == lltype.Signed:
    +            op.setint(value)
    +        elif lltype.typeOf(value) is longlong.FLOATSTORAGE:
    +            op.setfloatstorage(value)
    +        else:
    +            assert lltype.typeOf(value) == llmemory.GCREF
    +            op.setref_base(value)
    +
    +    def _record_op(self, opnum, argboxes, descr=None):
    +        from rpython.jit.metainterp.opencoder import FrontendTagOverflow
    +
    +        try:
    +            return self.trace.record_op(opnum, argboxes, descr)
    +        except FrontendTagOverflow:
    +            # note that with the default settings this one should not
    +            # happen - however if we hit that case, we don't get
    +            # anything disabled
    +            raise SwitchToBlackhole(Counters.ABORT_TOO_LONG)
     
         @specialize.argtype(3)
         def record(self, opnum, argboxes, value, descr=None):
    -        op = ResOperation(opnum, argboxes, descr)
    +        if self.trace is None:
    +            pos = 2**14 - 1
    +        else:
    +            pos = self._record_op(opnum, argboxes, descr)
             if value is None:
    -            assert op.type == 'v'
    +            op = FrontendOp(pos)
             elif isinstance(value, bool):
    -            assert op.type == 'i'
    -            op.setint(int(value))
    +            op = IntFrontendOp(pos)
             elif lltype.typeOf(value) == lltype.Signed:
    -            assert op.type == 'i'
    -            op.setint(value)
    +            op = IntFrontendOp(pos)
             elif lltype.typeOf(value) is longlong.FLOATSTORAGE:
    -            assert op.type == 'f'
    -            op.setfloatstorage(value)
    +            op = FloatFrontendOp(pos)
             else:
    -            assert lltype.typeOf(value) == llmemory.GCREF
    -            assert op.type == 'r'
    -            op.setref_base(value)
    -        self.operations.append(op)
    +            op = RefFrontendOp(pos)
    +        if self.trace is None:
    +            self._cache.append((opnum, argboxes, op, descr))
    +        self.set_op_value(op, value)
             return op
     
    +    def record_nospec(self, opnum, argboxes, descr=None):
    +        tp = opclasses[opnum].type
    +        pos = self._record_op(opnum, argboxes, descr)
    +        if tp == 'v':
    +            return FrontendOp(pos)
    +        elif tp == 'i':
    +            return IntFrontendOp(pos)
    +        elif tp == 'f':
    +            return FloatFrontendOp(pos)
    +        assert tp == 'r'
    +        return RefFrontendOp(pos)
    +
         def record_default_val(self, opnum, argboxes, descr=None):
    -        op = ResOperation(opnum, argboxes, descr)
    -        assert op.is_same_as()
    +        assert rop.is_same_as(opnum)
    +        op = self.record_nospec(opnum, argboxes, descr)
             op.copy_value_from(argboxes[0])
    -        self.operations.append(op)
             return op
     
    -    def substitute_operation(self, position, opnum, argboxes, descr=None):
    -        resbox = self.operations[position].result
    -        op = ResOperation(opnum, argboxes, resbox, descr)
    -        self.operations[position] = op
     
     # ____________________________________________________________
     
    @@ -720,15 +818,15 @@
         compiled_count = 0
         enter_count = 0
         aborted_count = 0
    -    operations = None
     
    -    def __init__(self):
    +    def __init__(self, metainterp_sd):
             self.loops = []
             self.locations = []
             self.aborted_keys = []
             self.invalidated_token_numbers = set()    # <- not RPython
             self.jitcell_token_wrefs = []
             self.jitcell_dicts = []                   # <- not RPython
    +        self.metainterp_sd = metainterp_sd
     
         def clear(self):
             del self.loops[:]
    @@ -747,7 +845,7 @@
             self.jitcell_token_wrefs.append(weakref.ref(token))
     
         def set_history(self, history):
    -        self.operations = history.operations
    +        self.history = history
     
         def aborted(self):
             self.aborted_count += 1
    @@ -784,7 +882,9 @@
     
         def check_history(self, expected=None, **check):
             insns = {}
    -        for op in self.operations:
    +        t = self.history.trace.get_iter()
    +        while not t.done():
    +            op = t.next()
                 opname = op.getopname()
                 insns[opname] = insns.get(opname, 0) + 1
             if expected is not None:
    diff --git a/rpython/jit/metainterp/logger.py b/rpython/jit/metainterp/logger.py
    --- a/rpython/jit/metainterp/logger.py
    +++ b/rpython/jit/metainterp/logger.py
    @@ -12,6 +12,19 @@
             self.metainterp_sd = metainterp_sd
             self.guard_number = guard_number
     
    +    def log_loop_from_trace(self, trace, memo):
    +        if not have_debug_prints():
    +            return
    +        inputargs, ops = self._unpack_trace(trace)
    +        self.log_loop(inputargs, ops, memo=memo)
    +
    +    def _unpack_trace(self, trace):
    +        ops = []
    +        i = trace.get_iter()
    +        while not i.done():
    +            ops.append(i.next())
    +        return i.inputargs, ops
    +
         def log_loop(self, inputargs, operations, number=0, type=None,
                      ops_offset=None, name='', memo=None):
             if type is None:
    @@ -82,8 +95,11 @@
             debug_stop("jit-log-short-preamble")
             return logops
     
    -    def log_abort_loop(self, inputargs, operations, memo=None):
    +    def log_abort_loop(self, trace, memo=None):
             debug_start("jit-abort-log")
    +        if not have_debug_prints():
    +            return
    +        inputargs, operations = self._unpack_trace(trace)
             logops = self._log_operations(inputargs, operations, ops_offset=None,
                                           memo=memo)
             debug_stop("jit-abort-log")
    
    From pypy.commits at gmail.com  Fri Mar 25 14:56:02 2016
    From: pypy.commits at gmail.com (fijal)
    Date: Fri, 25 Mar 2016 11:56:02 -0700 (PDT)
    Subject: [pypy-commit] pypy default: fix whatsnew
    Message-ID: <56f589c2.02931c0a.25700.0340@mx.google.com>
    
    Author: fijal
    Branch: 
    Changeset: r83365:857e78c019da
    Date: 2016-03-25 20:55 +0200
    http://bitbucket.org/pypy/pypy/changeset/857e78c019da/
    
    Log:	fix whatsnew
    
    diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
    --- a/pypy/doc/whatsnew-head.rst
    +++ b/pypy/doc/whatsnew-head.rst
    @@ -27,3 +27,8 @@
     .. branch: fix_transpose_for_list_v3
     
     Allow arguments to transpose to be sequences
    +
    +.. branch: jit-leaner-frontend
    +
    +Improve the tracing speed in the frontend as well as heapcache by using a more compact representation
    +of traces
    \ No newline at end of file
    
    From pypy.commits at gmail.com  Fri Mar 25 15:09:57 2016
    From: pypy.commits at gmail.com (fijal)
    Date: Fri, 25 Mar 2016 12:09:57 -0700 (PDT)
    Subject: [pypy-commit] pypy jit-leaner-frontend: close merged branch
    Message-ID: <56f58d05.13821c0a.4b41f.0b43@mx.google.com>
    
    Author: fijal
    Branch: jit-leaner-frontend
    Changeset: r83366:5044de96586a
    Date: 2016-03-25 21:08 +0200
    http://bitbucket.org/pypy/pypy/changeset/5044de96586a/
    
    Log:	close merged branch
    
    
    From pypy.commits at gmail.com  Fri Mar 25 15:09:59 2016
    From: pypy.commits at gmail.com (fijal)
    Date: Fri, 25 Mar 2016 12:09:59 -0700 (PDT)
    Subject: [pypy-commit] pypy default: remerge jit-leaner-frontend
    Message-ID: <56f58d07.4a811c0a.1db8.0c66@mx.google.com>
    
    Author: fijal
    Branch: 
    Changeset: r83367:2094e6aab3b2
    Date: 2016-03-25 21:09 +0200
    http://bitbucket.org/pypy/pypy/changeset/2094e6aab3b2/
    
    Log:	remerge jit-leaner-frontend
    
    
    From pypy.commits at gmail.com  Fri Mar 25 15:33:57 2016
    From: pypy.commits at gmail.com (fijal)
    Date: Fri, 25 Mar 2016 12:33:57 -0700 (PDT)
    Subject: [pypy-commit] pypy faster-traceback: try to get a raw way of
     getting traceback quickly
    Message-ID: <56f592a5.c85b1c0a.19cb6.214c@mx.google.com>
    
    Author: fijal
    Branch: faster-traceback
    Changeset: r83368:5c1292c4398f
    Date: 2016-03-25 21:32 +0200
    http://bitbucket.org/pypy/pypy/changeset/5c1292c4398f/
    
    Log:	try to get a raw way of getting traceback quickly
    
    diff --git a/pypy/module/__pypy__/interp_traceback.py b/pypy/module/__pypy__/interp_traceback.py
    new file mode 100644
    diff --git a/pypy/module/_vmprof/__init__.py b/pypy/module/_vmprof/__init__.py
    --- a/pypy/module/_vmprof/__init__.py
    +++ b/pypy/module/_vmprof/__init__.py
    @@ -13,6 +13,8 @@
             'disable': 'interp_vmprof.disable',
             'write_all_code_objects': 'interp_vmprof.write_all_code_objects',
             'VMProfError': 'space.fromcache(interp_vmprof.Cache).w_VMProfError',
    +        'get_fast_traceback' : 'interp_vmprof.get_fast_traceback',
    +        'code_get_unique_id' : 'interp_vmprof.code_get_unique_id',
         }
     
     
    diff --git a/pypy/module/_vmprof/interp_vmprof.py b/pypy/module/_vmprof/interp_vmprof.py
    --- a/pypy/module/_vmprof/interp_vmprof.py
    +++ b/pypy/module/_vmprof/interp_vmprof.py
    @@ -5,6 +5,9 @@
     from pypy.interpreter.baseobjspace import W_Root
     from rpython.rlib import rvmprof
     
    +from rpython.rtyper.lltypesystem import lltype
    +
    +
     # ____________________________________________________________
     
     
    @@ -82,3 +85,20 @@
             rvmprof.disable()
         except rvmprof.VMProfError, e:
             raise VMProfError(space, e)
    +
    +
    +def get_fast_traceback(space):
    +    MAX_SIZE = 1000
    +    l = []
    +
    +    with lltype.scoped_alloc(lltype.Signed, MAX_SIZE) as buf:
    +        n = rvmprof._get_vmprof().cintf.get_stack_trace_default(
    +            buf, MAX_SIZE)
    +        for i in range(n):
    +            l.append(buf[i])
    +    return space.newlist_int(l)
    +
    + at unwrap_spec(w_code=PyCode)
    +def code_get_unique_id(space, w_code):
    +    return space.wrap(w_code._vmprof_unique_id)
    +
    diff --git a/rpython/rlib/rvmprof/cintf.py b/rpython/rlib/rvmprof/cintf.py
    --- a/rpython/rlib/rvmprof/cintf.py
    +++ b/rpython/rlib/rvmprof/cintf.py
    @@ -55,6 +55,10 @@
                                                 [rffi.INT], lltype.Void,
                                                 compilation_info=eci,
                                                 _nowrapper=True)
    +    vmprof_get_stack_trace_default = rffi.llexternal(
    +        "vmprof_get_stack_trace_default",
    +        [rffi.CArrayPtr(lltype.Signed), rffi.INT],
    +        rffi.INT, compilation_info=eci, threadsafe=False)
         return CInterface(locals())
     
     
    diff --git a/rpython/rlib/rvmprof/src/vmprof_common.h b/rpython/rlib/rvmprof/src/vmprof_common.h
    --- a/rpython/rlib/rvmprof/src/vmprof_common.h
    +++ b/rpython/rlib/rvmprof/src/vmprof_common.h
    @@ -119,3 +119,8 @@
         return 0;
     }
     #endif
    +
    +static int get_stack_trace_default(intptr_t *result, int max_depth)
    +{
    +    return get_stack_trace(get_vmprof_stack(), result, max_depth, 0);
    +}
    
    From pypy.commits at gmail.com  Fri Mar 25 16:20:37 2016
    From: pypy.commits at gmail.com (fijal)
    Date: Fri, 25 Mar 2016 13:20:37 -0700 (PDT)
    Subject: [pypy-commit] pypy default: fix
    Message-ID: <56f59d95.13821c0a.4b41f.21b0@mx.google.com>
    
    Author: fijal
    Branch: 
    Changeset: r83370:55bbf61581a3
    Date: 2016-03-25 22:19 +0200
    http://bitbucket.org/pypy/pypy/changeset/55bbf61581a3/
    
    Log:	fix
    
    diff --git a/rpython/jit/metainterp/opencoder.py b/rpython/jit/metainterp/opencoder.py
    --- a/rpython/jit/metainterp/opencoder.py
    +++ b/rpython/jit/metainterp/opencoder.py
    @@ -34,9 +34,12 @@
         check_range = False
         # we can move SMALL ints here, if necessary
     
    +def get_model(self):
    +    return _get_model(self.metainterp_sd)
    +
     @specialize.memo()
    -def get_model(self):
    -    return getattr(self.metainterp_sd, 'opencoder_model', Model)
    +def _get_model(metainterp_sd):
    +    return getattr(metainterp_sd, 'opencoder_model', Model)
     
     SMALL_INT_STOP  = (2 ** (15 - TAGSHIFT)) - 1
     SMALL_INT_START = -SMALL_INT_STOP # we might want to distribute them uneven
    
    From pypy.commits at gmail.com  Fri Mar 25 16:20:35 2016
    From: pypy.commits at gmail.com (fijal)
    Date: Fri, 25 Mar 2016 13:20:35 -0700 (PDT)
    Subject: [pypy-commit] pypy faster-traceback: work work work
    Message-ID: <56f59d93.0a301c0a.4ecc3.2a31@mx.google.com>
    
    Author: fijal
    Branch: faster-traceback
    Changeset: r83369:13d1a1f6b325
    Date: 2016-03-25 22:18 +0200
    http://bitbucket.org/pypy/pypy/changeset/13d1a1f6b325/
    
    Log:	work work work
    
    diff --git a/pypy/module/_vmprof/interp_vmprof.py b/pypy/module/_vmprof/interp_vmprof.py
    --- a/pypy/module/_vmprof/interp_vmprof.py
    +++ b/pypy/module/_vmprof/interp_vmprof.py
    @@ -5,7 +5,7 @@
     from pypy.interpreter.baseobjspace import W_Root
     from rpython.rlib import rvmprof
     
    -from rpython.rtyper.lltypesystem import lltype
    +from rpython.rtyper.lltypesystem import lltype, rffi
     
     
     # ____________________________________________________________
    @@ -91,8 +91,8 @@
         MAX_SIZE = 1000
         l = []
     
    -    with lltype.scoped_alloc(lltype.Signed, MAX_SIZE) as buf:
    -        n = rvmprof._get_vmprof().cintf.get_stack_trace_default(
    +    with lltype.scoped_alloc(rffi.CArray(lltype.Signed), MAX_SIZE) as buf:
    +        n = rvmprof._get_vmprof().cintf.vmprof_get_stack_trace_default(
                 buf, MAX_SIZE)
             for i in range(n):
                 l.append(buf[i])
    diff --git a/rpython/jit/metainterp/opencoder.py b/rpython/jit/metainterp/opencoder.py
    --- a/rpython/jit/metainterp/opencoder.py
    +++ b/rpython/jit/metainterp/opencoder.py
    @@ -34,9 +34,12 @@
         check_range = False
         # we can move SMALL ints here, if necessary
     
    +def get_model(self):
    +    return _get_model(self.metainterp_sd)
    +
     @specialize.memo()
    -def get_model(self):
    -    return getattr(self.metainterp_sd, 'opencoder_model', Model)
    +def _get_model(metainterp_sd):
    +    return getattr(metainterp_sd, 'opencoder_model', Model)
     
     SMALL_INT_STOP  = (2 ** (15 - TAGSHIFT)) - 1
     SMALL_INT_START = -SMALL_INT_STOP # we might want to distribute them uneven
    diff --git a/rpython/rlib/rvmprof/cintf.py b/rpython/rlib/rvmprof/cintf.py
    --- a/rpython/rlib/rvmprof/cintf.py
    +++ b/rpython/rlib/rvmprof/cintf.py
    @@ -58,7 +58,7 @@
         vmprof_get_stack_trace_default = rffi.llexternal(
             "vmprof_get_stack_trace_default",
             [rffi.CArrayPtr(lltype.Signed), rffi.INT],
    -        rffi.INT, compilation_info=eci, threadsafe=False)
    +        rffi.INT, compilation_info=eci, releasegil=False)
         return CInterface(locals())
     
     
    
    From pypy.commits at gmail.com  Fri Mar 25 16:43:02 2016
    From: pypy.commits at gmail.com (fijal)
    Date: Fri, 25 Mar 2016 13:43:02 -0700 (PDT)
    Subject: [pypy-commit] pypy default: pass list of resops here
    Message-ID: <56f5a2d6.6507c20a.adb18.ffffb885@mx.google.com>
    
    Author: fijal
    Branch: 
    Changeset: r83371:09249cf47b13
    Date: 2016-03-25 22:42 +0200
    http://bitbucket.org/pypy/pypy/changeset/09249cf47b13/
    
    Log:	pass list of resops here
    
    diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py
    --- a/rpython/jit/metainterp/pyjitpl.py
    +++ b/rpython/jit/metainterp/pyjitpl.py
    @@ -2256,7 +2256,7 @@
                         jd_sd.warmstate.get_location_str(greenkey),
                         self.staticdata.logger_ops._make_log_operations(
                             self.box_names_memo),
    -                    self.history.trace)
    +                    self.history.trace.unpack()[1])
                 if self.aborted_tracing_jitdriver is not None:
                     jd_sd = self.aborted_tracing_jitdriver
                     greenkey = self.aborted_tracing_greenkey
    
    From pypy.commits at gmail.com  Fri Mar 25 16:43:20 2016
    From: pypy.commits at gmail.com (rlamy)
    Date: Fri, 25 Mar 2016 13:43:20 -0700 (PDT)
    Subject: [pypy-commit] pypy follow_symlinks: Implement missing features in
     chmod()
    Message-ID: <56f5a2e8.657bc20a.64339.ffffba67@mx.google.com>
    
    Author: Ronan Lamy 
    Branch: follow_symlinks
    Changeset: r83372:d8c010696602
    Date: 2016-03-25 20:42 +0000
    http://bitbucket.org/pypy/pypy/changeset/d8c010696602/
    
    Log:	Implement missing features in chmod()
    
    diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py
    --- a/pypy/module/posix/interp_posix.py
    +++ b/pypy/module/posix/interp_posix.py
    @@ -1,6 +1,7 @@
     import os
     import sys
     from math import modf
    +from errno import ENOTSUP, EOPNOTSUPP
     
     from rpython.rlib import rposix, rposix_stat
     from rpython.rlib import objectmodel, rurandom
    @@ -113,19 +114,22 @@
         DEFAULT_DIR_FD = -100
     DIR_FD_AVAILABLE = False
     
    -def _unwrap_fd(space, w_value):
    +def unwrap_fd(space, w_value):
    +    return space.c_int_w(w_value)
    +
    +def _unwrap_dirfd(space, w_value):
         if space.is_none(w_value):
             return DEFAULT_DIR_FD
         else:
    -        return space.c_int_w(w_value)
    +        return unwrap_fd(space, w_value)
     
     class _DirFD(Unwrapper):
         def unwrap(self, space, w_value):
    -        return _unwrap_fd(space, w_value)
    +        return _unwrap_dirfd(space, w_value)
     
     class _DirFD_Unavailable(Unwrapper):
         def unwrap(self, space, w_value):
    -        dir_fd = _unwrap_fd(space, w_value)
    +        dir_fd = unwrap_fd(space, w_value)
             if dir_fd == DEFAULT_DIR_FD:
                 return dir_fd
             else:
    @@ -140,7 +144,7 @@
     def argument_unavailable(space, funcname, arg):
         return oefmt(
                 space.w_NotImplementedError,
    -            "%s: %s unavailable on this platform" % (funcname, arg))
    +            "%s: %s unavailable on this platform", funcname, arg)
     
     @unwrap_spec(flags=c_int, mode=c_int, dir_fd=DirFD(rposix.HAVE_OPENAT))
     def open(space, w_path, flags, mode=0777, dir_fd=DEFAULT_DIR_FD):
    @@ -477,9 +481,9 @@
       of R_OK, W_OK, and X_OK."""
         if not rposix.HAVE_FACCESSAT:
             if not follow_symlinks:
    -            raise argument_unavailable("access", "follow_symlinks")
    +            raise argument_unavailable(space, "access", "follow_symlinks")
             if effective_ids:
    -            raise argument_unavailable("access", "effective_ids")
    +            raise argument_unavailable(space, "access", "effective_ids")
     
         try:
             if dir_fd == DEFAULT_DIR_FD and follow_symlinks and not effective_ids:
    @@ -773,7 +777,7 @@
             raise wrap_oserror(space, e)
         return space.newtuple([space.wrap(fd1), space.wrap(fd2)])
     
    - at unwrap_spec(mode=c_int, dir_fd=DirFD(available=False), follow_symlinks=kwonly(bool))
    + at unwrap_spec(mode=c_int, dir_fd=DirFD(rposix.HAVE_FCHMODAT), follow_symlinks=kwonly(bool))
     def chmod(space, w_path, mode, dir_fd=DEFAULT_DIR_FD, follow_symlinks=True):
         """chmod(path, mode, *, dir_fd=None, follow_symlinks=True)
     
    @@ -791,21 +795,55 @@
       an open file descriptor.
     dir_fd and follow_symlinks may not be implemented on your platform.
       If they are unavailable, using them will raise a NotImplementedError."""
    +    if not rposix.HAVE_FCHMODAT:
    +        if not follow_symlinks:
    +            raise argument_unavailable(space, "chmod", "follow_symlinks")
    +        else:
    +            try:
    +                dispatch_filename(rposix.chmod)(space, w_path, mode)
    +                return
    +            except OSError as e:
    +                raise wrap_oserror2(space, e, w_path)
    +
         try:
    -        dispatch_filename(rposix.chmod)(space, w_path, mode)
    -    except OSError, e:
    -        raise wrap_oserror2(space, e, w_path)
    +        path = space.fsencode_w(w_path)
    +    except OperationError as operr:
    +        if not space.isinstance_w(w_path, space.w_int):
    +            raise oefmt(space.w_TypeError,
    +                "argument should be string, bytes or integer, not %T", w_path)
    +        fd = unwrap_fd(space, w_path)
    +        _chmod_fd(space, fd, mode)
    +    else:
    +        try:
    +            _chmod_path(path, mode, dir_fd, follow_symlinks)
    +        except OSError as e:
    +            if not follow_symlinks and e.errno in (ENOTSUP, EOPNOTSUPP):
    +                # fchmodat() doesn't actually implement follow_symlinks=False
    +                # so raise NotImplementedError in this case
    +                raise argument_unavailable(space, "chmod", "follow_symlinks")
    +            else:
    +                raise wrap_oserror2(space, e, w_path)
     
    - at unwrap_spec(mode=c_int)
    -def fchmod(space, w_fd, mode):
    -    """Change the access permissions of the file given by file
    -descriptor fd."""
    -    fd = space.c_filedescriptor_w(w_fd)
    +def _chmod_path(path, mode, dir_fd, follow_symlinks):
    +    if dir_fd != DEFAULT_DIR_FD or not follow_symlinks:
    +        rposix.fchmodat(path, mode, dir_fd, follow_symlinks)
    +    else:
    +        rposix.chmod(path, mode)
    +
    +def _chmod_fd(space, fd, mode):
         try:
             os.fchmod(fd, mode)
    -    except OSError, e:
    +    except OSError as e:
             raise wrap_oserror(space, e)
     
    +
    + at unwrap_spec(fd=c_int, mode=c_int)
    +def fchmod(space, fd, mode):
    +    """\
    +    Change the access permissions of the file given by file descriptor fd.
    +    """
    +    _chmod_fd(space, fd, mode)
    +
     @unwrap_spec(src_dir_fd=DirFD(available=False), dst_dir_fd=DirFD(available=False))
     def rename(space, w_old, w_new,
             src_dir_fd=DEFAULT_DIR_FD, dst_dir_fd=DEFAULT_DIR_FD):
    @@ -1197,7 +1235,7 @@
                 raise wrap_oserror2(space, e, w_path)
     
         if not follow_symlinks:
    -        raise argument_unavailable("utime", "follow_symlinks")
    +        raise argument_unavailable(space, "utime", "follow_symlinks")
     
         if not space.is_w(w_ns, space.w_None):
             raise oefmt(space.w_NotImplementedError,
    
    From pypy.commits at gmail.com  Fri Mar 25 17:01:54 2016
    From: pypy.commits at gmail.com (rlamy)
    Date: Fri, 25 Mar 2016 14:01:54 -0700 (PDT)
    Subject: [pypy-commit] pypy follow_symlinks: Make 'times' argument in
     os.utime() optional
    Message-ID: <56f5a742.47afc20a.b9240.4e1a@mx.google.com>
    
    Author: Ronan Lamy 
    Branch: follow_symlinks
    Changeset: r83373:8c01214d65d7
    Date: 2016-03-25 21:01 +0000
    http://bitbucket.org/pypy/pypy/changeset/8c01214d65d7/
    
    Log:	Make 'times' argument in os.utime() optional
    
    diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py
    --- a/pypy/module/posix/interp_posix.py
    +++ b/pypy/module/posix/interp_posix.py
    @@ -1193,7 +1193,7 @@
         return space.wrap(ret)
     
     
    - at unwrap_spec(w_ns=kwonly(WrappedDefault(None)),
    + at unwrap_spec(w_times=WrappedDefault(None), w_ns=kwonly(WrappedDefault(None)),
         dir_fd=DirFD(rposix.HAVE_UTIMENSAT), follow_symlinks=kwonly(bool))
     def utime(space, w_path, w_times, w_ns, dir_fd=DEFAULT_DIR_FD, follow_symlinks=True):
         """utime(path, times=None, *, ns=None, dir_fd=None, follow_symlinks=True)
    
    From pypy.commits at gmail.com  Fri Mar 25 17:15:40 2016
    From: pypy.commits at gmail.com (cfbolz)
    Date: Fri, 25 Mar 2016 14:15:40 -0700 (PDT)
    Subject: [pypy-commit] pypy guard-compatible: merge default
    Message-ID: <56f5aa7c.6bb8c20a.3a155.ffffcc0e@mx.google.com>
    
    Author: Carl Friedrich Bolz 
    Branch: guard-compatible
    Changeset: r83375:ba422164355b
    Date: 2016-03-25 09:25 +0100
    http://bitbucket.org/pypy/pypy/changeset/ba422164355b/
    
    Log:	merge default
    
    diff --git a/.hgtags b/.hgtags
    --- a/.hgtags
    +++ b/.hgtags
    @@ -19,3 +19,4 @@
     850edf14b2c75573720f59e95767335fb1affe55 release-4.0.0
     5f8302b8bf9f53056e40426f10c72151564e5b19 release-4.0.1
     246c9cf22037b11dc0e8c29ce3f291d3b8c5935a release-5.0
    +bbd45126bc691f669c4ebdfbd74456cd274c6b92 release-5.0.1
    diff --git a/pypy/doc/config/translation.gc.txt b/pypy/doc/config/translation.gc.txt
    --- a/pypy/doc/config/translation.gc.txt
    +++ b/pypy/doc/config/translation.gc.txt
    @@ -1,24 +1,26 @@
     Choose the Garbage Collector used by the translated program.
    -The good performing collectors are "hybrid" and "minimark".
    -The default is "minimark".
    +The recommended default is "incminimark".
     
       - "ref": reference counting. Takes very long to translate and the result is
    -    slow.
    +    slow.  Used only for tests.  Don't use it for real RPython programs.
     
    -  - "marksweep": naive mark & sweep.
    +  - "none": no GC.  Leaks everything.  Don't use it for real RPython
    +    programs: the rate of leaking is immense.
     
       - "semispace": a copying semi-space GC.
     
       - "generation": a generational GC using the semi-space GC for the
         older generation.
     
    -  - "boehm": use the Boehm conservative GC.
    -
       - "hybrid": a hybrid collector of "generation" together with a
         mark-n-sweep old space
     
    -  - "markcompact": a slow, but memory-efficient collector,
    -    influenced e.g. by Smalltalk systems.
    +  - "boehm": use the Boehm conservative GC.
     
       - "minimark": a generational mark-n-sweep collector with good
         performance.  Includes page marking for large arrays.
    +
    +  - "incminimark": like minimark, but adds incremental major
    +    collections.  Seems to come with no performance drawback over
    +    "minimark", so it is the default.  A few recent features of PyPy
    +    (like cpyext) are only working with this GC.
    diff --git a/pypy/doc/extradoc.rst b/pypy/doc/extradoc.rst
    --- a/pypy/doc/extradoc.rst
    +++ b/pypy/doc/extradoc.rst
    @@ -80,7 +80,7 @@
     .. _How to *not* write Virtual Machines for Dynamic Languages: https://bitbucket.org/pypy/extradoc/raw/tip/talk/dyla2007/dyla.pdf
     .. _`Tracing the Meta-Level: PyPy's Tracing JIT Compiler`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/icooolps2009/bolz-tracing-jit.pdf
     .. _`Faster than C#: Efficient Implementation of Dynamic Languages on .NET`: https://bitbucket.org/pypy/extradoc/raw/tip/talk/icooolps2009-dotnet/cli-jit.pdf
    -.. _Automatic JIT Compiler Generation with Runtime Partial Evaluation: http://wwwold.cobra.cs.uni-duesseldorf.de/thesis/final-master.pdf
    +.. _Automatic JIT Compiler Generation with Runtime Partial Evaluation: http://stups.hhu.de/mediawiki/images/b/b9/Master_bolz.pdf
     .. _`RPython: A Step towards Reconciling Dynamically and Statically Typed OO Languages`: http://www.disi.unige.it/person/AnconaD/papers/DynamicLanguages_abstracts.html#AACM-DLS07
     .. _EU Reports: index-report.html
     .. _Hardware Transactional Memory Support for Lightweight Dynamic Language Evolution: http://sabi.net/nriley/pubs/dls6-riley.pdf
    diff --git a/pypy/doc/index-of-release-notes.rst b/pypy/doc/index-of-release-notes.rst
    --- a/pypy/doc/index-of-release-notes.rst
    +++ b/pypy/doc/index-of-release-notes.rst
    @@ -6,6 +6,7 @@
     
     .. toctree::
     
    +   release-5.0.1.rst
        release-5.0.0.rst
        release-4.0.1.rst
        release-4.0.0.rst
    diff --git a/pypy/doc/release-5.0.1.rst b/pypy/doc/release-5.0.1.rst
    new file mode 100644
    --- /dev/null
    +++ b/pypy/doc/release-5.0.1.rst
    @@ -0,0 +1,40 @@
    +==========
    +PyPy 5.0.1
    +==========
    +
    +We have released a bugfix for PyPy 5.0, after reports that the newly released
    +`lxml 3.6.0`_, which now supports PyPy 5.0 +, can `crash on large files`_.
    +Thanks to those who reported the crash. Please update, downloads are available
    +at pypy.org/download.html
    +
    +.. _`lxml 3.6.0`: https://pypi.python.org/pypi/lxml/3.6.0
    +.. _`crash on large files`: https://bitbucket.org/pypy/pypy/issues/2260
    +
    +The changes between PyPy 5.0 and 5.0.1 are only two bug fixes: one in
    +cpyext, which fixes notably (but not only) lxml; and another for a
    +corner case of the JIT.
    +
    +What is PyPy?
    +=============
    +
    +PyPy is a very compliant Python interpreter, almost a drop-in replacement for
    +CPython 2.7. It's fast (`PyPy and CPython 2.7.x`_ performance comparison)
    +due to its integrated tracing JIT compiler.
    +
    +We also welcome developers of other
    +`dynamic languages`_ to see what RPython can do for them.
    +
    +This release supports **x86** machines on most common operating systems
    +(Linux 32/64, Mac OS X 64, Windows 32, OpenBSD, FreeBSD),
    +newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux, and the
    +big- and little-endian variants of **PPC64** running Linux.
    +
    +.. _`PyPy and CPython 2.7.x`: http://speed.pypy.org
    +.. _`dynamic languages`: http://pypyjs.org
    +
    +Please update, and continue to help us make PyPy better.
    +
    +Cheers
    +
    +The PyPy Team
    +
    diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py
    --- a/pypy/goal/targetpypystandalone.py
    +++ b/pypy/goal/targetpypystandalone.py
    @@ -327,7 +327,7 @@
                 # XXX possibly adapt options using modules
                 failures = create_cffi_import_libraries(exename, options, basedir)
                 # if failures, they were already printed
    -            print  >> sys.stderr, str(exename),'successfully built, but errors while building the above modules will be ignored'
    +            print  >> sys.stderr, str(exename),'successfully built (errors, if any, while building the above modules are ignored)'
             driver.task_build_cffi_imports = types.MethodType(task_build_cffi_imports, driver)
             driver.tasks['build_cffi_imports'] = driver.task_build_cffi_imports, [compile_goal]
             driver.default_goal = 'build_cffi_imports'
    diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py
    --- a/pypy/interpreter/baseobjspace.py
    +++ b/pypy/interpreter/baseobjspace.py
    @@ -419,7 +419,10 @@
             self.wait_for_thread_shutdown()
             w_exitfunc = self.sys.getdictvalue(self, 'exitfunc')
             if w_exitfunc is not None:
    -            self.call_function(w_exitfunc)
    +            try:
    +                self.call_function(w_exitfunc)
    +            except OperationError as e:
    +                e.write_unraisable(self, 'sys.exitfunc == ', w_exitfunc)
             from pypy.interpreter.module import Module
             for w_mod in self.builtin_modules.values():
                 if isinstance(w_mod, Module) and w_mod.startup_called:
    diff --git a/pypy/interpreter/test/test_objspace.py b/pypy/interpreter/test/test_objspace.py
    --- a/pypy/interpreter/test/test_objspace.py
    +++ b/pypy/interpreter/test/test_objspace.py
    @@ -416,3 +416,14 @@
                 i -= 1
                 assert i >= 0
                 gc.collect()
    +
    +    def test_exitfunc_catches_exceptions(self):
    +        from pypy.tool.pytest.objspace import maketestobjspace
    +        space = maketestobjspace()
    +        space.appexec([], """():
    +            import sys
    +            sys.exitfunc = lambda: this_is_an_unknown_name
    +        """)
    +        space.finish()
    +        # assert that we reach this point without getting interrupted
    +        # by the OperationError(NameError)
    diff --git a/pypy/module/_cffi_backend/ctypeptr.py b/pypy/module/_cffi_backend/ctypeptr.py
    --- a/pypy/module/_cffi_backend/ctypeptr.py
    +++ b/pypy/module/_cffi_backend/ctypeptr.py
    @@ -124,7 +124,7 @@
                             s = rffi.charp2str(ptr)
                         else:
                             s = rffi.charp2strn(ptr, length)
    -                    return space.wrap(s)
    +                    return space.wrapbytes(s)
                     #
                     # pointer to a wchar_t: builds and returns a unicode
                     if self.is_unichar_ptr_or_array():
    @@ -372,15 +372,15 @@
             rffi_fclose(self.llf)
     
     
    -def prepare_file_argument(space, fileobj):
    -    fileobj.direct_flush()
    -    if fileobj.cffi_fileobj is None:
    -        fd = fileobj.direct_fileno()
    +def prepare_file_argument(space, w_fileobj):
    +    w_fileobj.direct_flush()
    +    if w_fileobj.cffi_fileobj is None:
    +        fd = w_fileobj.direct_fileno()
             if fd < 0:
                 raise OperationError(space.w_ValueError,
                                      space.wrap("file has no OS file descriptor"))
             try:
    -            fileobj.cffi_fileobj = CffiFileObj(fd, fileobj.mode)
    +            w_fileobj.cffi_fileobj = CffiFileObj(fd, w_fileobj.mode)
             except OSError, e:
                 raise wrap_oserror(space, e)
    -    return rffi.cast(rffi.CCHARP, fileobj.cffi_fileobj.llf)
    +    return rffi.cast(rffi.CCHARP, w_fileobj.cffi_fileobj.llf)
    diff --git a/pypy/module/_vmprof/test/test__vmprof.py b/pypy/module/_vmprof/test/test__vmprof.py
    --- a/pypy/module/_vmprof/test/test__vmprof.py
    +++ b/pypy/module/_vmprof/test/test__vmprof.py
    @@ -72,9 +72,9 @@
     
         def test_enable_ovf(self):
             import _vmprof
    -        raises(_vmprof.VMProfError, _vmprof.enable, 999, 0)
    -        raises(_vmprof.VMProfError, _vmprof.enable, 999, -2.5)
    -        raises(_vmprof.VMProfError, _vmprof.enable, 999, 1e300)
    -        raises(_vmprof.VMProfError, _vmprof.enable, 999, 1e300 * 1e300)
    +        raises(_vmprof.VMProfError, _vmprof.enable, 2, 0)
    +        raises(_vmprof.VMProfError, _vmprof.enable, 2, -2.5)
    +        raises(_vmprof.VMProfError, _vmprof.enable, 2, 1e300)
    +        raises(_vmprof.VMProfError, _vmprof.enable, 2, 1e300 * 1e300)
             NaN = (1e300*1e300) / (1e300*1e300)
    -        raises(_vmprof.VMProfError, _vmprof.enable, 999, NaN)
    +        raises(_vmprof.VMProfError, _vmprof.enable, 2, NaN)
    diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py
    --- a/pypy/module/cpyext/api.py
    +++ b/pypy/module/cpyext/api.py
    @@ -833,14 +833,14 @@
         modulename = py.path.local(eci.libraries[-1])
     
         def dealloc_trigger():
    -        from pypy.module.cpyext.pyobject import _Py_Dealloc
    +        from pypy.module.cpyext.pyobject import decref
             print 'dealloc_trigger...'
             while True:
                 ob = rawrefcount.next_dead(PyObject)
                 if not ob:
                     break
                 print ob
    -            _Py_Dealloc(space, ob)
    +            decref(space, ob)
             print 'dealloc_trigger DONE'
             return "RETRY"
         rawrefcount.init(dealloc_trigger)
    diff --git a/pypy/module/cpyext/bytesobject.py b/pypy/module/cpyext/bytesobject.py
    --- a/pypy/module/cpyext/bytesobject.py
    +++ b/pypy/module/cpyext/bytesobject.py
    @@ -1,4 +1,4 @@
    -from pypy.interpreter.error import OperationError
    +from pypy.interpreter.error import OperationError, oefmt
     from rpython.rtyper.lltypesystem import rffi, lltype
     from pypy.module.cpyext.api import (
         cpython_api, cpython_struct, bootstrap_function, build_type_checkers,
    @@ -80,7 +80,8 @@
         buflen = length + 1
         py_str.c_size = length
         py_str.c_buffer = lltype.malloc(rffi.CCHARP.TO, buflen,
    -                                    flavor='raw', zero=True)
    +                                    flavor='raw', zero=True,
    +                                    add_memory_pressure=True)
         return py_str
     
     def string_attach(space, py_obj, w_obj):
    @@ -133,8 +134,14 @@
         if from_ref(space, rffi.cast(PyObject, ref.c_ob_type)) is space.w_str:
             pass    # typecheck returned "ok" without forcing 'ref' at all
         elif not PyString_Check(space, ref):   # otherwise, use the alternate way
    -        raise OperationError(space.w_TypeError, space.wrap(
    -            "PyString_AsString only support strings"))
    +        from pypy.module.cpyext.unicodeobject import (
    +            PyUnicode_Check, _PyUnicode_AsDefaultEncodedString)
    +        if PyUnicode_Check(space, ref):
    +            ref = _PyUnicode_AsDefaultEncodedString(space, ref, lltype.nullptr(rffi.CCHARP.TO))
    +        else:
    +            raise oefmt(space.w_TypeError,
    +                        "expected string or Unicode object, %T found",
    +                        from_ref(space, ref))
         ref_str = rffi.cast(PyStringObject, ref)
         if not ref_str.c_buffer:
             # copy string buffer
    @@ -146,8 +153,14 @@
     @cpython_api([PyObject, rffi.CCHARPP, rffi.CArrayPtr(Py_ssize_t)], rffi.INT_real, error=-1)
     def PyString_AsStringAndSize(space, ref, buffer, length):
         if not PyString_Check(space, ref):
    -        raise OperationError(space.w_TypeError, space.wrap(
    -            "PyString_AsStringAndSize only support strings"))
    +        from pypy.module.cpyext.unicodeobject import (
    +            PyUnicode_Check, _PyUnicode_AsDefaultEncodedString)
    +        if PyUnicode_Check(space, ref):
    +            ref = _PyUnicode_AsDefaultEncodedString(space, ref, lltype.nullptr(rffi.CCHARP.TO))
    +        else:
    +            raise oefmt(space.w_TypeError,
    +                        "expected string or Unicode object, %T found",
    +                        from_ref(space, ref))
         ref_str = rffi.cast(PyStringObject, ref)
         if not ref_str.c_buffer:
             # copy string buffer
    diff --git a/pypy/module/cpyext/include/unicodeobject.h b/pypy/module/cpyext/include/unicodeobject.h
    --- a/pypy/module/cpyext/include/unicodeobject.h
    +++ b/pypy/module/cpyext/include/unicodeobject.h
    @@ -20,8 +20,12 @@
     
     typedef struct {
         PyObject_HEAD
    -    Py_UNICODE *buffer;
    +    Py_UNICODE *str;
         Py_ssize_t size;
    +    long hash;                  /* Hash value; -1 if not set */
    +    PyObject *defenc;           /* (Default) Encoded version as Python
    +                                   string, or NULL; this is used for
    +                                   implementing the buffer protocol */
     } PyUnicodeObject;
     
     
    diff --git a/pypy/module/cpyext/object.py b/pypy/module/cpyext/object.py
    --- a/pypy/module/cpyext/object.py
    +++ b/pypy/module/cpyext/object.py
    @@ -17,7 +17,8 @@
     @cpython_api([Py_ssize_t], rffi.VOIDP)
     def PyObject_MALLOC(space, size):
         return lltype.malloc(rffi.VOIDP.TO, size,
    -                         flavor='raw', zero=True)
    +                         flavor='raw', zero=True,
    +                         add_memory_pressure=True)
     
     @cpython_api([rffi.VOIDP], lltype.Void)
     def PyObject_FREE(space, ptr):
    diff --git a/pypy/module/cpyext/pyobject.py b/pypy/module/cpyext/pyobject.py
    --- a/pypy/module/cpyext/pyobject.py
    +++ b/pypy/module/cpyext/pyobject.py
    @@ -50,7 +50,8 @@
                 size += itemcount * pytype.c_tp_itemsize
             assert size >= rffi.sizeof(PyObject.TO)
             buf = lltype.malloc(rffi.VOIDP.TO, size,
    -                            flavor='raw', zero=True)
    +                            flavor='raw', zero=True,
    +                            add_memory_pressure=True)
             pyobj = rffi.cast(PyObject, buf)
             pyobj.c_ob_refcnt = 1
             pyobj.c_ob_type = pytype
    diff --git a/pypy/module/cpyext/state.py b/pypy/module/cpyext/state.py
    --- a/pypy/module/cpyext/state.py
    +++ b/pypy/module/cpyext/state.py
    @@ -147,10 +147,10 @@
         """
     
         def perform(self, executioncontext, frame):
    -        from pypy.module.cpyext.pyobject import PyObject, _Py_Dealloc
    +        from pypy.module.cpyext.pyobject import PyObject, decref
     
             while True:
                 py_obj = rawrefcount.next_dead(PyObject)
                 if not py_obj:
                     break
    -            _Py_Dealloc(self.space, py_obj)
    +            decref(self.space, py_obj)
    diff --git a/pypy/module/cpyext/test/test_bytesobject.py b/pypy/module/cpyext/test/test_bytesobject.py
    --- a/pypy/module/cpyext/test/test_bytesobject.py
    +++ b/pypy/module/cpyext/test/test_bytesobject.py
    @@ -139,6 +139,44 @@
                 ])
             module.getstring()
     
    +    def test_py_string_as_string_Unicode(self):
    +        module = self.import_extension('foo', [
    +            ("getstring_unicode", "METH_NOARGS",
    +             """
    +                 Py_UNICODE chars[] = {'t', 'e', 's', 't'};
    +                 PyObject* u1 = PyUnicode_FromUnicode(chars, 4);
    +                 char *buf;
    +                 buf = PyString_AsString(u1);
    +                 if (buf == NULL)
    +                     return NULL;
    +                 if (buf[3] != 't') {
    +                     PyErr_SetString(PyExc_AssertionError, "Bad conversion");
    +                     return NULL;
    +                 }
    +                 Py_DECREF(u1);
    +                 Py_INCREF(Py_None);
    +                 return Py_None;
    +             """),
    +            ("getstringandsize_unicode", "METH_NOARGS",
    +             """
    +                 Py_UNICODE chars[] = {'t', 'e', 's', 't'};
    +                 PyObject* u1 = PyUnicode_FromUnicode(chars, 4);
    +                 char *buf;
    +                 Py_ssize_t len;
    +                 if (PyString_AsStringAndSize(u1, &buf, &len) < 0)
    +                     return NULL;
    +                 if (len != 4) {
    +                     PyErr_SetString(PyExc_AssertionError, "Bad Length");
    +                     return NULL;
    +                 }
    +                 Py_DECREF(u1);
    +                 Py_INCREF(Py_None);
    +                 return Py_None;
    +             """),
    +            ])
    +        module.getstring_unicode()
    +        module.getstringandsize_unicode()
    +
         def test_format_v(self):
             module = self.import_extension('foo', [
                 ("test_string_format_v", "METH_VARARGS",
    diff --git a/pypy/module/cpyext/test/test_unicodeobject.py b/pypy/module/cpyext/test/test_unicodeobject.py
    --- a/pypy/module/cpyext/test/test_unicodeobject.py
    +++ b/pypy/module/cpyext/test/test_unicodeobject.py
    @@ -24,7 +24,7 @@
                      if(PyUnicode_GetSize(s) == 11) {
                          result = 1;
                      }
    -                 if(s->ob_type->tp_basicsize != sizeof(void*)*5)
    +                 if(s->ob_type->tp_basicsize != sizeof(void*)*7)
                          result = 0;
                      Py_DECREF(s);
                      return PyBool_FromLong(result);
    @@ -66,6 +66,7 @@
                      c = PyUnicode_AsUnicode(s);
                      c[0] = 'a';
                      c[1] = 0xe9;
    +                 c[2] = 0x00;
                      c[3] = 'c';
                      return s;
                  """),
    @@ -74,7 +75,35 @@
             assert len(s) == 4
             assert s == u'a�\x00c'
     
    +    def test_hash(self):
    +        module = self.import_extension('foo', [
    +            ("test_hash", "METH_VARARGS",
    +             '''
    +                PyObject* obj = (PyTuple_GetItem(args, 0));
    +                long hash = ((PyUnicodeObject*)obj)->hash;
    +                return PyLong_FromLong(hash);  
    +             '''
    +             ),
    +            ])
    +        res = module.test_hash(u"xyz")
    +        assert res == hash(u'xyz')
     
    +    def test_default_encoded_string(self):
    +        module = self.import_extension('foo', [
    +            ("test_default_encoded_string", "METH_O",
    +             '''
    +                PyObject* result = _PyUnicode_AsDefaultEncodedString(args, "replace");
    +                Py_INCREF(result);
    +                return result;
    +             '''
    +             ),
    +            ])
    +        res = module.test_default_encoded_string(u"xyz")
    +        assert isinstance(res, str)
    +        assert res == 'xyz'
    +        res = module.test_default_encoded_string(u"caf\xe9")
    +        assert isinstance(res, str)
    +        assert res == 'caf?'
     
     class TestUnicode(BaseApiTest):
         def test_unicodeobject(self, space, api):
    @@ -155,22 +184,22 @@
         def test_unicode_resize(self, space, api):
             py_uni = new_empty_unicode(space, 10)
             ar = lltype.malloc(PyObjectP.TO, 1, flavor='raw')
    -        py_uni.c_buffer[0] = u'a'
    -        py_uni.c_buffer[1] = u'b'
    -        py_uni.c_buffer[2] = u'c'
    +        py_uni.c_str[0] = u'a'
    +        py_uni.c_str[1] = u'b'
    +        py_uni.c_str[2] = u'c'
             ar[0] = rffi.cast(PyObject, py_uni)
             api.PyUnicode_Resize(ar, 3)
             py_uni = rffi.cast(PyUnicodeObject, ar[0])
             assert py_uni.c_size == 3
    -        assert py_uni.c_buffer[1] == u'b'
    -        assert py_uni.c_buffer[3] == u'\x00'
    +        assert py_uni.c_str[1] == u'b'
    +        assert py_uni.c_str[3] == u'\x00'
             # the same for growing
             ar[0] = rffi.cast(PyObject, py_uni)
             api.PyUnicode_Resize(ar, 10)
             py_uni = rffi.cast(PyUnicodeObject, ar[0])
             assert py_uni.c_size == 10
    -        assert py_uni.c_buffer[1] == 'b'
    -        assert py_uni.c_buffer[10] == '\x00'
    +        assert py_uni.c_str[1] == 'b'
    +        assert py_uni.c_str[10] == '\x00'
             Py_DecRef(space, ar[0])
             lltype.free(ar, flavor='raw')
     
    diff --git a/pypy/module/cpyext/tupleobject.py b/pypy/module/cpyext/tupleobject.py
    --- a/pypy/module/cpyext/tupleobject.py
    +++ b/pypy/module/cpyext/tupleobject.py
    @@ -59,7 +59,8 @@
         py_tup = rffi.cast(PyTupleObject, py_obj)
     
         py_tup.c_ob_item = lltype.malloc(ObjectItems, length,
    -                                     flavor='raw', zero=True)
    +                                     flavor='raw', zero=True,
    +                                     add_memory_pressure=True)
         py_tup.c_ob_size = length
         return py_tup
     
    @@ -70,7 +71,8 @@
         """
         items_w = space.fixedview(w_obj)
         l = len(items_w)
    -    p = lltype.malloc(ObjectItems, l, flavor='raw')
    +    p = lltype.malloc(ObjectItems, l, flavor='raw',
    +                      add_memory_pressure=True)
         i = 0
         try:
             while i < l:
    @@ -177,7 +179,8 @@
         ref = rffi.cast(PyTupleObject, ref)
         oldsize = ref.c_ob_size
         oldp = ref.c_ob_item
    -    newp = lltype.malloc(ObjectItems, newsize, zero=True, flavor='raw')
    +    newp = lltype.malloc(ObjectItems, newsize, zero=True, flavor='raw',
    +                         add_memory_pressure=True)
         try:
             if oldsize < newsize:
                 to_cp = oldsize
    diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py
    --- a/pypy/module/cpyext/typeobject.py
    +++ b/pypy/module/cpyext/typeobject.py
    @@ -421,7 +421,8 @@
                 Py_DecRef(space, w_metatype)
     
         heaptype = lltype.malloc(PyHeapTypeObject.TO,
    -                             flavor='raw', zero=True)
    +                             flavor='raw', zero=True,
    +                             add_memory_pressure=True)
         pto = heaptype.c_ht_type
         pto.c_ob_refcnt = 1
         pto.c_ob_type = metatype
    diff --git a/pypy/module/cpyext/unicodeobject.py b/pypy/module/cpyext/unicodeobject.py
    --- a/pypy/module/cpyext/unicodeobject.py
    +++ b/pypy/module/cpyext/unicodeobject.py
    @@ -22,7 +22,8 @@
     PyUnicodeObjectStruct = lltype.ForwardReference()
     PyUnicodeObject = lltype.Ptr(PyUnicodeObjectStruct)
     PyUnicodeObjectFields = (PyObjectFields +
    -    (("buffer", rffi.CWCHARP), ("size", Py_ssize_t)))
    +    (("str", rffi.CWCHARP), ("size", Py_ssize_t),
    +     ("hash", rffi.LONG), ("defenc", PyObject)))
     cpython_struct("PyUnicodeObject", PyUnicodeObjectFields, PyUnicodeObjectStruct)
     
     @bootstrap_function
    @@ -54,15 +55,20 @@
     
         buflen = length + 1
         py_uni.c_size = length
    -    py_uni.c_buffer = lltype.malloc(rffi.CWCHARP.TO, buflen,
    -                                    flavor='raw', zero=True)
    +    py_uni.c_str = lltype.malloc(rffi.CWCHARP.TO, buflen,
    +                                 flavor='raw', zero=True,
    +                                 add_memory_pressure=True)
    +    py_uni.c_hash = -1
    +    py_uni.c_defenc = lltype.nullptr(PyObject.TO)
         return py_uni
     
     def unicode_attach(space, py_obj, w_obj):
         "Fills a newly allocated PyUnicodeObject with a unicode string"
         py_unicode = rffi.cast(PyUnicodeObject, py_obj)
         py_unicode.c_size = len(space.unicode_w(w_obj))
    -    py_unicode.c_buffer = lltype.nullptr(rffi.CWCHARP.TO)
    +    py_unicode.c_str = lltype.nullptr(rffi.CWCHARP.TO)
    +    py_unicode.c_hash = space.hash_w(w_obj)
    +    py_unicode.c_defenc = lltype.nullptr(PyObject.TO)
     
     def unicode_realize(space, py_obj):
         """
    @@ -70,17 +76,20 @@
         be modified after this call.
         """
         py_uni = rffi.cast(PyUnicodeObject, py_obj)
    -    s = rffi.wcharpsize2unicode(py_uni.c_buffer, py_uni.c_size)
    +    s = rffi.wcharpsize2unicode(py_uni.c_str, py_uni.c_size)
         w_obj = space.wrap(s)
    +    py_uni.c_hash = space.hash_w(w_obj)
         track_reference(space, py_obj, w_obj)
         return w_obj
     
     @cpython_api([PyObject], lltype.Void, header=None)
     def unicode_dealloc(space, py_obj):
         py_unicode = rffi.cast(PyUnicodeObject, py_obj)
    -    if py_unicode.c_buffer:
    -        lltype.free(py_unicode.c_buffer, flavor="raw")
    +    if py_unicode.c_str:
    +        lltype.free(py_unicode.c_str, flavor="raw")
         from pypy.module.cpyext.object import PyObject_dealloc
    +    if py_unicode.c_defenc:
    +        PyObject_dealloc(space, py_unicode.c_defenc)
         PyObject_dealloc(space, py_obj)
     
     @cpython_api([Py_UNICODE], rffi.INT_real, error=CANNOT_FAIL)
    @@ -204,12 +213,12 @@
         """Return a pointer to the internal Py_UNICODE buffer of the object.  ref
         has to be a PyUnicodeObject (not checked)."""
         ref_unicode = rffi.cast(PyUnicodeObject, ref)
    -    if not ref_unicode.c_buffer:
    +    if not ref_unicode.c_str:
             # Copy unicode buffer
             w_unicode = from_ref(space, ref)
             u = space.unicode_w(w_unicode)
    -        ref_unicode.c_buffer = rffi.unicode2wcharp(u)
    -    return ref_unicode.c_buffer
    +        ref_unicode.c_str = rffi.unicode2wcharp(u)
    +    return ref_unicode.c_str
     
     @cpython_api([PyObject], rffi.CWCHARP)
     def PyUnicode_AsUnicode(space, ref):
    @@ -240,7 +249,7 @@
         string may or may not be 0-terminated.  It is the responsibility of the caller
         to make sure that the wchar_t string is 0-terminated in case this is
         required by the application."""
    -    c_buffer = PyUnicode_AS_UNICODE(space, rffi.cast(PyObject, ref))
    +    c_str = PyUnicode_AS_UNICODE(space, rffi.cast(PyObject, ref))
         c_size = ref.c_size
     
         # If possible, try to copy the 0-termination as well
    @@ -250,7 +259,7 @@
     
         i = 0
         while i < size:
    -        buf[i] = c_buffer[i]
    +        buf[i] = c_str[i]
             i += 1
     
         if size > c_size:
    @@ -342,8 +351,15 @@
         return PyUnicode_FromUnicode(space, wchar_p, length)
     
     @cpython_api([PyObject, CONST_STRING], PyObject)
    -def _PyUnicode_AsDefaultEncodedString(space, w_unicode, errors):
    -    return PyUnicode_AsEncodedString(space, w_unicode, lltype.nullptr(rffi.CCHARP.TO), errors)
    +def _PyUnicode_AsDefaultEncodedString(space, ref, errors):
    +    # Returns a borrowed reference.
    +    py_uni = rffi.cast(PyUnicodeObject, ref)
    +    if not py_uni.c_defenc:
    +        py_uni.c_defenc = make_ref(
    +            space, PyUnicode_AsEncodedString(
    +                space, ref,
    +                lltype.nullptr(rffi.CCHARP.TO), errors))
    +    return py_uni.c_defenc
     
     @cpython_api([CONST_STRING, Py_ssize_t, CONST_STRING, CONST_STRING], PyObject)
     def PyUnicode_Decode(space, s, size, encoding, errors):
    @@ -443,7 +459,7 @@
     def PyUnicode_Resize(space, ref, newsize):
         # XXX always create a new string so far
         py_uni = rffi.cast(PyUnicodeObject, ref[0])
    -    if not py_uni.c_buffer:
    +    if not py_uni.c_str:
             raise OperationError(space.w_SystemError, space.wrap(
                 "PyUnicode_Resize called on already created string"))
         try:
    @@ -457,7 +473,7 @@
         if oldsize < newsize:
             to_cp = oldsize
         for i in range(to_cp):
    -        py_newuni.c_buffer[i] = py_uni.c_buffer[i]
    +        py_newuni.c_str[i] = py_uni.c_str[i]
         Py_DecRef(space, ref[0])
         ref[0] = rffi.cast(PyObject, py_newuni)
         return 0
    diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py
    --- a/pypy/module/imp/test/test_import.py
    +++ b/pypy/module/imp/test/test_import.py
    @@ -109,7 +109,7 @@
             import marshal, stat, struct, os, imp
             code = py.code.Source(p.join("x.py").read()).compile()
             s3 = marshal.dumps(code)
    -        s2 = struct.pack("i", os.stat(str(p.join("x.py")))[stat.ST_MTIME])
    +        s2 = struct.pack("= self.ndims():
    -                    raise oefmt(space.w_ValueError, "invalid axis for this array")
    -                if axes_seen[axis] is True:
    -                    raise oefmt(space.w_ValueError, "repeated axis in transpose")
    -                axes.append(axis)
    -                axes_seen[axis] = True
    -            return self.descr_get_transpose(space, axes)
    +            if len(args_w) > 1:
    +                axes = args_w
    +            else:  # Iterable in the only argument (len(arg_w) == 1 and arg_w[0] is not None)
    +                axes = space.fixedview(args_w[0])
     
    +        axes = self._checked_axes(axes, space)
    +        return self.descr_get_transpose(space, axes)
    +
    +    def _checked_axes(self, axes_raw, space):
    +        if len(axes_raw) != self.ndims():
    +            raise oefmt(space.w_ValueError, "axes don't match array")
    +        axes = []
    +        axes_seen = [False] * self.ndims()
    +        for elem in axes_raw:
    +            try:
    +                axis = support.index_w(space, elem)
    +            except OperationError:
    +                raise oefmt(space.w_TypeError, "an integer is required")
    +            if axis < 0 or axis >= self.ndims():
    +                raise oefmt(space.w_ValueError, "invalid axis for this array")
    +            if axes_seen[axis] is True:
    +                raise oefmt(space.w_ValueError, "repeated axis in transpose")
    +            axes.append(axis)
    +            axes_seen[axis] = True
    +        return axes
     
         @unwrap_spec(axis1=int, axis2=int)
         def descr_swapaxes(self, space, axis1, axis2):
    diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py
    --- a/pypy/module/micronumpy/test/test_ndarray.py
    +++ b/pypy/module/micronumpy/test/test_ndarray.py
    @@ -2960,6 +2960,36 @@
             assert (a.transpose() == b).all()
             assert (a.transpose(None) == b).all()
     
    +    def test_transpose_arg_tuple(self):
    +        import numpy as np
    +        a = np.arange(24).reshape(2, 3, 4)
    +        transpose_args = a.transpose(1, 2, 0)
    +
    +        transpose_test = a.transpose((1, 2, 0))
    +
    +        assert transpose_test.shape == (3, 4, 2)
    +        assert (transpose_args == transpose_test).all()
    +
    +    def test_transpose_arg_list(self):
    +        import numpy as np
    +        a = np.arange(24).reshape(2, 3, 4)
    +        transpose_args = a.transpose(1, 2, 0)
    +
    +        transpose_test = a.transpose([1, 2, 0])
    +
    +        assert transpose_test.shape == (3, 4, 2)
    +        assert (transpose_args == transpose_test).all()
    +
    +    def test_transpose_arg_array(self):
    +        import numpy as np
    +        a = np.arange(24).reshape(2, 3, 4)
    +        transpose_args = a.transpose(1, 2, 0)
    +
    +        transpose_test = a.transpose(np.array([1, 2, 0]))
    +
    +        assert transpose_test.shape == (3, 4, 2)
    +        assert (transpose_args == transpose_test).all()
    +
         def test_transpose_error(self):
             import numpy as np
             a = np.arange(24).reshape(2, 3, 4)
    @@ -2968,6 +2998,11 @@
             raises(ValueError, a.transpose, 1, 0, 1)
             raises(TypeError, a.transpose, 1, 0, '2')
     
    +    def test_transpose_unexpected_argument(self):
    +        import numpy as np
    +        a = np.array([[1, 2], [3, 4], [5, 6]])
    +        raises(TypeError, 'a.transpose(axes=(1,2,0))')
    +
         def test_flatiter(self):
             from numpy import array, flatiter, arange, zeros
             a = array([[10, 30], [40, 60]])
    diff --git a/pypy/module/select/test/test_select.py b/pypy/module/select/test/test_select.py
    --- a/pypy/module/select/test/test_select.py
    +++ b/pypy/module/select/test/test_select.py
    @@ -287,8 +287,7 @@
                 t = thread.start_new_thread(pollster.poll, ())
                 try:
                     time.sleep(0.3)
    -                # TODO restore print '', if this is not the reason
    -                for i in range(5): print 'release gil select'  # to release GIL untranslated
    +                for i in range(5): print '',  # to release GIL untranslated
                     # trigger ufds array reallocation
                     for fd in rfds:
                         pollster.unregister(fd)
    diff --git a/pypy/module/thread/test/test_lock.py b/pypy/module/thread/test/test_lock.py
    --- a/pypy/module/thread/test/test_lock.py
    +++ b/pypy/module/thread/test/test_lock.py
    @@ -3,7 +3,7 @@
     import sys, os
     from pypy.module.thread.test.support import GenericTestThread
     from rpython.translator.c.test.test_genc import compile
    -import platform
    +from platform import machine
     
     
     class AppTestLock(GenericTestThread):
    @@ -64,8 +64,7 @@
             else:
                 assert self.runappdirect, "missing lock._py3k_acquire()"
     
    -    @py.test.mark.xfail(platform.machine() == 's390x',
    -                        reason='may fail this test under heavy load')
    +    @py.test.mark.xfail(machine()=='s390x', reason='may fail under heavy load')
         def test_ping_pong(self):
             # The purpose of this test is that doing a large number of ping-pongs
             # between two threads, using locks, should complete in a reasonable
    diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py
    --- a/pypy/objspace/std/mapdict.py
    +++ b/pypy/objspace/std/mapdict.py
    @@ -185,14 +185,6 @@
                 cache[name, index] = attr
             return attr
     
    -    @jit.elidable
    -    def _get_cache_attr(self, name, index):
    -        key = name, index
    -        # this method is not actually elidable, but it's fine anyway
    -        if self.cache_attrs is not None:
    -            return self.cache_attrs.get(key, None)
    -        return None
    -
         def add_attr(self, obj, name, index, w_value):
             self._reorder_and_add(obj, name, index, w_value)
             if not jit.we_are_jitted():
    diff --git a/pypy/tool/release/repackage.sh b/pypy/tool/release/repackage.sh
    --- a/pypy/tool/release/repackage.sh
    +++ b/pypy/tool/release/repackage.sh
    @@ -1,9 +1,9 @@
     # Edit these appropriately before running this script
     maj=5
     min=0
    -rev=0
    +rev=1
     branchname=release-$maj.x  # ==OR== release-$maj.$min.x
    -tagname=release-$maj.$min  # ==OR== release-$maj.$min.$rev
    +tagname=release-$maj.$min.$rev
     # This script will download latest builds from the buildmaster, rename the top
     # level directory, and repackage ready to be uploaded to bitbucket. It will also
     # download source, assuming a tag for the release already exists, and repackage them.
    diff --git a/rpython/jit/backend/detect_cpu.py b/rpython/jit/backend/detect_cpu.py
    --- a/rpython/jit/backend/detect_cpu.py
    +++ b/rpython/jit/backend/detect_cpu.py
    @@ -66,6 +66,7 @@
                 'x86_64': MODEL_X86,
                 'amd64': MODEL_X86,    # freebsd
                 'AMD64': MODEL_X86,    # win64
    +            'armv8l': MODEL_ARM,   # 32-bit ARMv8
                 'armv7l': MODEL_ARM,
                 'armv6l': MODEL_ARM,
                 'arm': MODEL_ARM,      # freebsd
    diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py
    --- a/rpython/jit/codewriter/jtransform.py
    +++ b/rpython/jit/codewriter/jtransform.py
    @@ -792,11 +792,13 @@
                 return []
             # check for _immutable_fields_ hints
             immut = v_inst.concretetype.TO._immutable_field(c_fieldname.value)
    +        need_live = False
             if immut:
                 if (self.callcontrol is not None and
                     self.callcontrol.could_be_green_field(v_inst.concretetype.TO,
                                                           c_fieldname.value)):
                     pure = '_greenfield'
    +                need_live = True
                 else:
                     pure = '_pure'
             else:
    @@ -823,10 +825,12 @@
                 descr1 = self.cpu.fielddescrof(
                     v_inst.concretetype.TO,
                     quasiimmut.get_mutate_field_name(c_fieldname.value))
    -            op1 = [SpaceOperation('-live-', [], None),
    +            return [SpaceOperation('-live-', [], None),
                        SpaceOperation('record_quasiimmut_field',
                                       [v_inst, descr, descr1], None),
                        op1]
    +        if need_live:
    +            return [SpaceOperation('-live-', [], None), op1]
             return op1
     
         def rewrite_op_setfield(self, op, override_type=None):
    diff --git a/rpython/jit/codewriter/test/test_jtransform.py b/rpython/jit/codewriter/test/test_jtransform.py
    --- a/rpython/jit/codewriter/test/test_jtransform.py
    +++ b/rpython/jit/codewriter/test/test_jtransform.py
    @@ -1029,7 +1029,8 @@
         v1 = varoftype(lltype.Ptr(S))
         v2 = varoftype(lltype.Char)
         op = SpaceOperation('getfield', [v1, Constant('x', lltype.Void)], v2)
    -    op1 = Transformer(FakeCPU(), FakeCC()).rewrite_operation(op)
    +    op0, op1 = Transformer(FakeCPU(), FakeCC()).rewrite_operation(op)
    +    assert op0.opname == '-live-'
         assert op1.opname == 'getfield_gc_i_greenfield'
         assert op1.args == [v1, ('fielddescr', S, 'x')]
         assert op1.result == v2
    diff --git a/rpython/memory/gc/incminimark.py b/rpython/memory/gc/incminimark.py
    --- a/rpython/memory/gc/incminimark.py
    +++ b/rpython/memory/gc/incminimark.py
    @@ -2929,10 +2929,19 @@
                 ll_assert(rc < int(REFCNT_FROM_PYPY_LIGHT * 0.99),
                           "refcount underflow from REFCNT_FROM_PYPY_LIGHT?")
                 rc -= REFCNT_FROM_PYPY
    -            self._pyobj(pyobject).ob_refcnt = rc
                 self._pyobj(pyobject).ob_pypy_link = 0
                 if rc == 0:
                     self.rrc_dealloc_pending.append(pyobject)
    +                # an object with refcnt == 0 cannot stay around waiting
    +                # for its deallocator to be called.  Some code (lxml)
    +                # expects that tp_dealloc is called immediately when
    +                # the refcnt drops to 0.  If it isn't, we get some
    +                # uncleared raw pointer that can still be used to access
    +                # the object; but (PyObject *)raw_pointer is then bogus
    +                # because after a Py_INCREF()/Py_DECREF() on it, its
    +                # tp_dealloc is also called!
    +                rc = 1
    +            self._pyobj(pyobject).ob_refcnt = rc
         _rrc_free._always_inline_ = True
     
         def rrc_major_collection_trace(self):
    diff --git a/rpython/memory/gc/test/test_rawrefcount.py b/rpython/memory/gc/test/test_rawrefcount.py
    --- a/rpython/memory/gc/test/test_rawrefcount.py
    +++ b/rpython/memory/gc/test/test_rawrefcount.py
    @@ -174,7 +174,7 @@
             p1 = check_alive(0)
             self._collect(major=True, expected_trigger=1)
             py.test.raises(RuntimeError, "p1.x")            # dead
    -        assert r1.ob_refcnt == 0
    +        assert r1.ob_refcnt == 1       # in the pending list
             assert r1.ob_pypy_link == 0
             assert self.gc.rawrefcount_next_dead() == r1addr
             assert self.gc.rawrefcount_next_dead() == llmemory.NULL
    @@ -197,7 +197,7 @@
             assert p1.x == 42
             self._collect(major=True, expected_trigger=1)
             py.test.raises(RuntimeError, "p1.x")            # dead
    -        assert r1.ob_refcnt == 0
    +        assert r1.ob_refcnt == 1
             assert r1.ob_pypy_link == 0
             assert self.gc.rawrefcount_next_dead() == r1addr
             self.gc.check_no_more_rawrefcount_state()
    @@ -214,7 +214,7 @@
             else:
                 self._collect(major=False, expected_trigger=1)
             py.test.raises(RuntimeError, "p1.x")            # dead
    -        assert r1.ob_refcnt == 0
    +        assert r1.ob_refcnt == 1
             assert r1.ob_pypy_link == 0
             assert self.gc.rawrefcount_next_dead() == r1addr
             self.gc.check_no_more_rawrefcount_state()
    @@ -252,7 +252,7 @@
                 self._collect(major=True, expected_trigger=1)
             else:
                 self._collect(major=False, expected_trigger=1)
    -        assert r1.ob_refcnt == 0     # refcnt dropped to 0
    +        assert r1.ob_refcnt == 1     # refcnt 1, in the pending list
             assert r1.ob_pypy_link == 0  # detached
             assert self.gc.rawrefcount_next_dead() == r1addr
             self.gc.check_no_more_rawrefcount_state()
    @@ -277,7 +277,7 @@
             assert self.trigger == []
             self._collect(major=True, expected_trigger=1)
             py.test.raises(RuntimeError, "p1.x")            # dead
    -        assert r1.ob_refcnt == 0
    +        assert r1.ob_refcnt == 1
             assert r1.ob_pypy_link == 0
             assert self.gc.rawrefcount_next_dead() == r1addr
             self.gc.check_no_more_rawrefcount_state()
    diff --git a/rpython/rlib/jit.py b/rpython/rlib/jit.py
    --- a/rpython/rlib/jit.py
    +++ b/rpython/rlib/jit.py
    @@ -1099,6 +1099,14 @@
         of JIT running like JIT loops compiled, aborts etc.
         An instance of this class will be available as policy.jithookiface.
         """
    +    # WARNING: You should make a single prebuilt instance of a subclass
    +    # of this class.  You can, before translation, initialize some
    +    # attributes on this instance, and then read or change these
    +    # attributes inside the methods of the subclass.  But this prebuilt
    +    # instance *must not* be seen during the normal annotation/rtyping
    +    # of the program!  A line like ``pypy_hooks.foo = ...`` must not
    +    # appear inside your interpreter's RPython code.
    +
         def on_abort(self, reason, jitdriver, greenkey, greenkey_repr, logops, operations):
             """ A hook called each time a loop is aborted with jitdriver and
             greenkey where it started, reason is a string why it got aborted
    diff --git a/rpython/rlib/rawrefcount.py b/rpython/rlib/rawrefcount.py
    --- a/rpython/rlib/rawrefcount.py
    +++ b/rpython/rlib/rawrefcount.py
    @@ -72,6 +72,12 @@
         return p
     
     def next_dead(OB_PTR_TYPE):
    +    """NOT_RPYTHON.  When the GC runs, it finds some pyobjs to be dead
    +    but cannot immediately dispose of them (it doesn't know how to call
    +    e.g. tp_dealloc(), and anyway calling it immediately would cause all
    +    sorts of bugs).  So instead, it stores them in an internal list,
    +    initially with refcnt == 1.  This pops the next item off this list.
    +    """
         if len(_d_list) == 0:
             return lltype.nullptr(OB_PTR_TYPE.TO)
         ob = _d_list.pop()
    @@ -136,6 +142,7 @@
                     ob.c_ob_refcnt -= REFCNT_FROM_PYPY
                     ob.c_ob_pypy_link = 0
                     if ob.c_ob_refcnt == 0:
    +                    ob.c_ob_refcnt = 1
                         _d_list.append(ob)
                 return None
     
    diff --git a/rpython/rlib/rposix.py b/rpython/rlib/rposix.py
    --- a/rpython/rlib/rposix.py
    +++ b/rpython/rlib/rposix.py
    @@ -22,6 +22,22 @@
         from rpython.rlib import rwin32
         from rpython.rlib.rwin32file import make_win32_traits
     
    +class CConfig:
    +    _compilation_info_ = ExternalCompilationInfo(
    +        includes=['sys/stat.h',
    +                  'unistd.h',
    +                  'fcntl.h'],
    +    )
    +    for _name in """fchdir fchmod fchmodat fchown fchownat fexecve fdopendir
    +                    fpathconf fstat fstatat fstatvfs ftruncate futimens futimes
    +                    futimesat linkat lchflags lchmod lchown lstat lutimes
    +                    mkdirat mkfifoat mknodat openat readlinkat renameat
    +                    symlinkat unlinkat utimensat""".split():
    +        locals()['HAVE_%s' % _name.upper()] = rffi_platform.Has(_name)
    +cConfig = rffi_platform.configure(CConfig)
    +globals().update(cConfig)
    +
    +
     class CConstantErrno(CConstant):
         # these accessors are used when calling get_errno() or set_errno()
         # on top of CPython
    @@ -1024,6 +1040,13 @@
             if not win32traits.MoveFile(path1, path2):
                 raise rwin32.lastSavedWindowsError()
     
    + at specialize.argtype(0, 1)
    +def replace(path1, path2):
    +    if os.name == 'nt':
    +        raise NotImplementedError(
    +            'On windows, os.replace() should overwrite the destination')
    +    return rename(path1, path2)
    +
     #___________________________________________________________________
     
     c_mkfifo = external('mkfifo', [rffi.CCHARP, rffi.MODE_T], rffi.INT,
    diff --git a/rpython/rlib/rtime.py b/rpython/rlib/rtime.py
    --- a/rpython/rlib/rtime.py
    +++ b/rpython/rlib/rtime.py
    @@ -9,7 +9,6 @@
     from rpython.rtyper.tool import rffi_platform
     from rpython.rtyper.lltypesystem import rffi, lltype
     from rpython.rlib.objectmodel import register_replacement_for
    -from rpython.rlib import jit
     from rpython.rlib.rarithmetic import intmask, UINT_MAX
     from rpython.rlib import rposix
     
    @@ -170,28 +169,30 @@
                                    [lltype.Signed, lltype.Ptr(TIMESPEC)],
                                    rffi.INT, releasegil=False,
                                    compilation_info=eci_with_lrt)
    -else:
    +if need_rusage:
         RUSAGE = RUSAGE
         RUSAGE_SELF = RUSAGE_SELF or 0
         c_getrusage = external('getrusage',
                                [rffi.INT, lltype.Ptr(RUSAGE)],
    -                           lltype.Void,
    +                           rffi.INT,
                                releasegil=False)
     
    +def win_perf_counter():
    +    a = lltype.malloc(A, flavor='raw')
    +    if state.divisor == 0.0:
    +        QueryPerformanceCounter(a)
    +        state.counter_start = a[0]
    +        QueryPerformanceFrequency(a)
    +        state.divisor = float(a[0])
    +    QueryPerformanceCounter(a)
    +    diff = a[0] - state.counter_start
    +    lltype.free(a, flavor='raw')
    +    return float(diff) / state.divisor
    +
     @replace_time_function('clock')
    - at jit.dont_look_inside  # the JIT doesn't like FixedSizeArray
     def clock():
         if _WIN32:
    -        a = lltype.malloc(A, flavor='raw')
    -        if state.divisor == 0.0:
    -            QueryPerformanceCounter(a)
    -            state.counter_start = a[0]
    -            QueryPerformanceFrequency(a)
    -            state.divisor = float(a[0])
    -        QueryPerformanceCounter(a)
    -        diff = a[0] - state.counter_start
    -        lltype.free(a, flavor='raw')
    -        return float(diff) / state.divisor
    +        return win_perf_counter()
         elif CLOCK_PROCESS_CPUTIME_ID is not None:
             with lltype.scoped_alloc(TIMESPEC) as a:
                 c_clock_gettime(CLOCK_PROCESS_CPUTIME_ID, a)
    diff --git a/rpython/rlib/rvmprof/src/vmprof_common.h b/rpython/rlib/rvmprof/src/vmprof_common.h
    --- a/rpython/rlib/rvmprof/src/vmprof_common.h
    +++ b/rpython/rlib/rvmprof/src/vmprof_common.h
    @@ -24,14 +24,14 @@
         char padding[sizeof(long) - 1];
         char marker;
         long count, depth;
    -    void *stack[];
    +    intptr_t stack[];
     } prof_stacktrace_s;
     
     
     RPY_EXTERN
     char *vmprof_init(int fd, double interval, char *interp_name)
     {
    -    if (interval < 1e-6 || interval >= 1.0)
    +    if (!(interval >= 1e-6 && interval < 1.0))   /* also if it is NaN */
             return "bad value for 'interval'";
         prepare_interval_usec = (int)(interval * 1000000.0);
     
    diff --git a/rpython/rlib/rvmprof/src/vmprof_config.h b/rpython/rlib/rvmprof/src/vmprof_config.h
    --- a/rpython/rlib/rvmprof/src/vmprof_config.h
    +++ b/rpython/rlib/rvmprof/src/vmprof_config.h
    @@ -1,6 +1,10 @@
     #define HAVE_SYS_UCONTEXT_H
    -#if defined(__FreeBSD__)
    -#define PC_FROM_UCONTEXT uc_mcontext.mc_rip
    +#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
    +  #ifdef __i386__
    +    #define PC_FROM_UCONTEXT uc_mcontext.mc_eip
    +  #else
    +    #define PC_FROM_UCONTEXT uc_mcontext.mc_rip
    +  #endif
     #elif defined( __APPLE__)
       #if ((ULONG_MAX) == (UINT_MAX))
         #define PC_FROM_UCONTEXT uc_mcontext->__ss.__eip
    @@ -8,10 +12,10 @@
         #define PC_FROM_UCONTEXT uc_mcontext->__ss.__rip
       #endif
     #elif defined(__arm__)
    -#define PC_FROM_UCONTEXT uc_mcontext.arm_ip
    +  #define PC_FROM_UCONTEXT uc_mcontext.arm_ip
     #elif defined(__linux) && defined(__i386) && defined(__GNUC__)
    -#define PC_FROM_UCONTEXT uc_mcontext.gregs[REG_EIP]
    +  #define PC_FROM_UCONTEXT uc_mcontext.gregs[REG_EIP]
     #else
    -/* linux, gnuc */
    -#define PC_FROM_UCONTEXT uc_mcontext.gregs[REG_RIP]
    +  /* linux, gnuc */
    +  #define PC_FROM_UCONTEXT uc_mcontext.gregs[REG_RIP]
     #endif
    diff --git a/rpython/rlib/rvmprof/src/vmprof_main_win32.h b/rpython/rlib/rvmprof/src/vmprof_main_win32.h
    --- a/rpython/rlib/rvmprof/src/vmprof_main_win32.h
    +++ b/rpython/rlib/rvmprof/src/vmprof_main_win32.h
    @@ -101,7 +101,7 @@
         depth = get_stack_trace(p->vmprof_tl_stack,
                          stack->stack, MAX_STACK_DEPTH-2, ctx.Eip);
         stack->depth = depth;
    -    stack->stack[depth++] = (void*)p->thread_ident;
    +    stack->stack[depth++] = p->thread_ident;
         stack->count = 1;
         stack->marker = MARKER_STACKTRACE;
         ResumeThread(hThread);
    diff --git a/rpython/rlib/test/test_rawrefcount.py b/rpython/rlib/test/test_rawrefcount.py
    --- a/rpython/rlib/test/test_rawrefcount.py
    +++ b/rpython/rlib/test/test_rawrefcount.py
    @@ -116,7 +116,7 @@
             assert rawrefcount.next_dead(PyObject) == lltype.nullptr(PyObjectS)
             assert rawrefcount._o_list == []
             assert wr_p() is None
    -        assert ob.c_ob_refcnt == 0
    +        assert ob.c_ob_refcnt == 1       # from the pending list
             assert ob.c_ob_pypy_link == 0
             lltype.free(ob, flavor='raw')
     
    @@ -173,7 +173,7 @@
             assert rawrefcount._d_list == [ob]
             assert rawrefcount._p_list == []
             assert wr_p() is None
    -        assert ob.c_ob_refcnt == 0
    +        assert ob.c_ob_refcnt == 1       # from _d_list
             assert ob.c_ob_pypy_link == 0
             lltype.free(ob, flavor='raw')
     
    diff --git a/rpython/tool/runsubprocess.py b/rpython/tool/runsubprocess.py
    --- a/rpython/tool/runsubprocess.py
    +++ b/rpython/tool/runsubprocess.py
    @@ -20,6 +20,8 @@
     def _run(executable, args, env, cwd):
         # note that this function can be *overridden* below
         # in some cases!
    +    if sys.platform == 'win32':
    +        executable = executable.replace('/','\\')
         if isinstance(args, str):
             args = str(executable) + ' ' + args
             shell = True
    diff --git a/rpython/translator/c/src/thread_pthread.c b/rpython/translator/c/src/thread_pthread.c
    --- a/rpython/translator/c/src/thread_pthread.c
    +++ b/rpython/translator/c/src/thread_pthread.c
    @@ -37,7 +37,7 @@
     #  define THREAD_STACK_SIZE   0   /* use default stack size */
     # endif
     
    -# if (defined(__APPLE__) || defined(__FreeBSD__)) && defined(THREAD_STACK_SIZE) && THREAD_STACK_SIZE == 0
    +# if (defined(__APPLE__) || defined(__FreeBSD__) || defined(__FreeBSD_kernel__)) && defined(THREAD_STACK_SIZE) && THREAD_STACK_SIZE == 0
        /* The default stack size for new threads on OSX is small enough that
         * we'll get hard crashes instead of 'maximum recursion depth exceeded'
         * exceptions.
    @@ -84,7 +84,7 @@
     	if (tss != 0)
     		pthread_attr_setstacksize(&attrs, tss);
     #endif
    -#if defined(PTHREAD_SYSTEM_SCHED_SUPPORTED) && !defined(__FreeBSD__)
    +#if defined(PTHREAD_SYSTEM_SCHED_SUPPORTED) && !(defined(__FreeBSD__) || defined(__FreeBSD_kernel__))
             pthread_attr_setscope(&attrs, PTHREAD_SCOPE_SYSTEM);
     #endif
     
    
    From pypy.commits at gmail.com  Fri Mar 25 17:15:43 2016
    From: pypy.commits at gmail.com (cfbolz)
    Date: Fri, 25 Mar 2016 14:15:43 -0700 (PDT)
    Subject: [pypy-commit] pypy guard-compatible: grumble
    Message-ID: <56f5aa7f.2976c20a.d610c.ffffcc27@mx.google.com>
    
    Author: Carl Friedrich Bolz 
    Branch: guard-compatible
    Changeset: r83377:dd33eb1a6551
    Date: 2016-03-25 16:39 +0100
    http://bitbucket.org/pypy/pypy/changeset/dd33eb1a6551/
    
    Log:	grumble
    
    diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py
    --- a/rpython/jit/metainterp/pyjitpl.py
    +++ b/rpython/jit/metainterp/pyjitpl.py
    @@ -2218,7 +2218,7 @@
     
     
         def attach_debug_info(self, op):
    -        if (op is not None and self.framestack is not None):
    +        if (op is not None and self.framestack):
                 if not we_are_translated():
                     op.pc = self.framestack[-1].pc
                 op.rpyfunc = self.framestack[-1].jitcode.name
    
    From pypy.commits at gmail.com  Fri Mar 25 17:15:38 2016
    From: pypy.commits at gmail.com (cfbolz)
    Date: Fri, 25 Mar 2016 14:15:38 -0700 (PDT)
    Subject: [pypy-commit] pypy guard-compatible: do annspecialcase for lookup
     after the mapdict stuff
    Message-ID: <56f5aa7a.c856c20a.787a3.ffffc825@mx.google.com>
    
    Author: Carl Friedrich Bolz 
    Branch: guard-compatible
    Changeset: r83374:a5db7ce2b186
    Date: 2016-03-25 09:23 +0100
    http://bitbucket.org/pypy/pypy/changeset/a5db7ce2b186/
    
    Log:	do annspecialcase for lookup after the mapdict stuff
    
    diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py
    --- a/pypy/objspace/std/mapdict.py
    +++ b/pypy/objspace/std/mapdict.py
    @@ -1145,11 +1145,11 @@
     # ____________________________________________________________
     # various functions that replace objspace implementations
     
    + at objectmodel.specialize.arg_or_var(2)
     def mapdict_lookup(space, w_obj, name):
         if we_are_jitted():
             map = w_obj._get_mapdict_map_no_promote()
             if map is not None:
                 return map._type_lookup(name)
    -    w_type = space.type(w_obj)
    -    return w_type.lookup(name)
    +    return space._lookup(w_obj, name)
     
    diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py
    --- a/pypy/objspace/std/objspace.py
    +++ b/pypy/objspace/std/objspace.py
    @@ -323,10 +323,15 @@
             jit.promote(w_obj.__class__)
             return w_obj.getclass(self)
     
    +    @specialize.arg_or_var(2)
         def lookup(self, w_obj, name):
             if self.config.objspace.std.withmapdict:
                 from pypy.objspace.std.mapdict import mapdict_lookup
                 return mapdict_lookup(self, w_obj, name)
    +        # an indirection for the benefit of mapdict
    +        return self._lookup(w_obj, name)
    +
    +    def _lookup(self, w_obj, name):
             w_type = self.type(w_obj)
             return w_type.lookup(name)
         lookup._annspecialcase_ = 'specialize:lookup'
    
    From pypy.commits at gmail.com  Fri Mar 25 17:15:45 2016
    From: pypy.commits at gmail.com (cfbolz)
    Date: Fri, 25 Mar 2016 14:15:45 -0700 (PDT)
    Subject: [pypy-commit] pypy guard-compatible: oops,
     move the _annspecialcase_ to the new function
    Message-ID: <56f5aa81.d4b61c0a.69316.2b0d@mx.google.com>
    
    Author: Carl Friedrich Bolz 
    Branch: guard-compatible
    Changeset: r83378:d5f8ea5bac55
    Date: 2016-03-25 22:14 +0100
    http://bitbucket.org/pypy/pypy/changeset/d5f8ea5bac55/
    
    Log:	oops, move the _annspecialcase_ to the new function
    
    diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py
    --- a/pypy/objspace/std/objspace.py
    +++ b/pypy/objspace/std/objspace.py
    @@ -334,7 +334,7 @@
         def _lookup(self, w_obj, name):
             w_type = self.type(w_obj)
             return w_type.lookup(name)
    -    lookup._annspecialcase_ = 'specialize:lookup'
    +    _lookup._annspecialcase_ = 'specialize:lookup'
     
         def lookup_in_type_where(self, w_type, name):
             return w_type.lookup_where(name)
    
    From pypy.commits at gmail.com  Fri Mar 25 17:15:47 2016
    From: pypy.commits at gmail.com (cfbolz)
    Date: Fri, 25 Mar 2016 14:15:47 -0700 (PDT)
    Subject: [pypy-commit] pypy guard-compatible: print a bit less
    Message-ID: <56f5aa83.890bc30a.8c36f.ffffc11e@mx.google.com>
    
    Author: Carl Friedrich Bolz 
    Branch: guard-compatible
    Changeset: r83379:9c4f6b7c5e3e
    Date: 2016-03-25 22:14 +0100
    http://bitbucket.org/pypy/pypy/changeset/9c4f6b7c5e3e/
    
    Log:	print a bit less
    
    diff --git a/rpython/jit/metainterp/compatible.py b/rpython/jit/metainterp/compatible.py
    --- a/rpython/jit/metainterp/compatible.py
    +++ b/rpython/jit/metainterp/compatible.py
    @@ -36,6 +36,8 @@
                 if oldcond.same_cond(cond, res):
                     return
             cond.activate(res, optimizer)
    +        if self.conditions and self.conditions.debug_mp_str == cond.debug_mp_str:
    +            cond.debug_mp_str = ''
             self.conditions.append(cond)
     
         def register_quasi_immut_field(self, op):
    
    From pypy.commits at gmail.com  Fri Mar 25 17:15:42 2016
    From: pypy.commits at gmail.com (cfbolz)
    Date: Fri, 25 Mar 2016 14:15:42 -0700 (PDT)
    Subject: [pypy-commit] pypy guard-compatible: store more debug info on the
     Conditions, as it can be very hard to figure out
    Message-ID: <56f5aa7e.e7bec20a.64c87.ffffc982@mx.google.com>
    
    Author: Carl Friedrich Bolz 
    Branch: guard-compatible
    Changeset: r83376:90e798a6ec1f
    Date: 2016-03-25 14:37 +0100
    http://bitbucket.org/pypy/pypy/changeset/90e798a6ec1f/
    
    Log:	store more debug info on the Conditions, as it can be very hard to
    	figure out where they come from
    
    diff --git a/rpython/jit/metainterp/compatible.py b/rpython/jit/metainterp/compatible.py
    --- a/rpython/jit/metainterp/compatible.py
    +++ b/rpython/jit/metainterp/compatible.py
    @@ -55,11 +55,11 @@
             copied_op = op.copy()
             copied_op.setarg(1, self.known_valid)
             if op.numargs() == 2:
    -            return copied_op, PureCallCondition(op, optimizer.metainterp_sd)
    +            return copied_op, PureCallCondition(op, optimizer)
             arg2 = copied_op.getarg(2)
             if arg2.is_constant():
                 # already a constant, can just use PureCallCondition
    -            return copied_op, PureCallCondition(op, optimizer.metainterp_sd)
    +            return copied_op, PureCallCondition(op, optimizer)
     
             # really simple-minded pattern matching
             # the order of things is like this:
    @@ -85,15 +85,25 @@
             copied_op.setarg(2, qmutdescr.constantfieldbox)
             self.last_quasi_immut_field_op = None
             return copied_op, QuasiimmutGetfieldAndPureCallCondition(
    -                op, qmutdescr, optimizer.metainterp_sd)
    +                op, qmutdescr, optimizer)
     
         def repr_of_conditions(self, argrepr="?"):
             return "\n".join([cond.repr(argrepr) for cond in self.conditions])
     
     
     class Condition(object):
    -    def __init__(self, metainterp_sd):
    -        self.metainterp_sd = metainterp_sd
    +    def __init__(self, optimizer):
    +        self.metainterp_sd = optimizer.metainterp_sd
    +        # XXX maybe too expensive
    +        op = optimizer._last_debug_merge_point
    +        if op:
    +            jd_sd = self.metainterp_sd.jitdrivers_sd[op.getarg(0).getint()]
    +            s = jd_sd.warmstate.get_location_str(op.getarglist()[3:])
    +            s = s.replace(',', '.') # we use comma for argument splitting
    +        else:
    +            s = ''
    +        self.debug_mp_str = s
    +        self.rpyfunc = None
     
         def check(self, cpu, ref):
             raise NotImplementedError
    @@ -133,12 +143,13 @@
             return ""
     
     class PureCallCondition(Condition):
    -    def __init__(self, op, metainterp_sd):
    -        Condition.__init__(self, metainterp_sd)
    +    def __init__(self, op, optimizer):
    +        Condition.__init__(self, optimizer)
             args = op.getarglist()[:]
             args[1] = None
             self.args = args
             self.descr = op.getdescr()
    +        self.rpyfunc = op.rpyfunc
     
         def check(self, cpu, ref):
             from rpython.rlib.debug import debug_print, debug_start, debug_stop
    @@ -186,17 +197,23 @@
                 extra = ''
             else:
                 extra = ', ' + ', '.join([self._repr_const(arg) for arg in self.args[2:]])
    -        return "compatible if %s == %s(%s%s)" % (result, funcname, argrepr, extra)
    +        res = "compatible if %s == %s(%s%s)" % (result, funcname, argrepr, extra)
    +        if self.rpyfunc:
    +            res = "%s: %s" % (self.rpyfunc, res)
    +        if self.debug_mp_str:
    +            res = self.debug_mp_str + "\n" + res
    +        return res
     
     
     class QuasiimmutGetfieldAndPureCallCondition(PureCallCondition):
    -    def __init__(self, op, qmutdescr, metainterp_sd):
    -        Condition.__init__(self, metainterp_sd)
    +    def __init__(self, op, qmutdescr, optimizer):
    +        Condition.__init__(self, optimizer)
             args = op.getarglist()[:]
             args[1] = None
             args[2] = None
             self.args = args
             self.descr = op.getdescr()
    +        self.rpyfunc = op.rpyfunc
             self.qmut = qmutdescr.qmut
             self.mutatefielddescr = qmutdescr.mutatefielddescr
             self.fielddescr = qmutdescr.fielddescr
    @@ -266,5 +283,10 @@
             else:
                 extra = ', ' + ', '.join([self._repr_const(arg) for arg in self.args[3:]])
             attrname = self.fielddescr.repr_of_descr()
    -        return "compatible if %s == %s(%s, %s.%s%s)" % (
    +        res = "compatible if %s == %s(%s, %s.%s%s)" % (
                     result, funcname, argrepr, argrepr, attrname, extra)
    +        if self.rpyfunc:
    +            res = "%s: %s" % (self.rpyfunc, res)
    +        if self.debug_mp_str:
    +            res = self.debug_mp_str + "\n" + res
    +        return res
    diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py
    --- a/rpython/jit/metainterp/optimizeopt/optimizer.py
    +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py
    @@ -264,6 +264,7 @@
             self.optunroll = None
     
             self._last_guard_op = None
    +        self._last_debug_merge_point = None
     
             self.set_optimizations(optimizations)
             self.setup()
    @@ -886,6 +887,7 @@
         # FIXME: Is this still needed?
     
         def optimize_DEBUG_MERGE_POINT(self, op):
    +        self._last_debug_merge_point = op
             self.emit_operation(op)
     
         def optimize_JIT_DEBUG(self, op):
    diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py
    --- a/rpython/jit/metainterp/pyjitpl.py
    +++ b/rpython/jit/metainterp/pyjitpl.py
    @@ -1903,6 +1903,7 @@
         cancel_count = 0
         exported_state = None
         last_exc_box = None
    +    framestack = None
     
         def __init__(self, staticdata, jitdriver_sd):
             self.staticdata = staticdata
    @@ -2217,10 +2218,10 @@
     
     
         def attach_debug_info(self, op):
    -        if (not we_are_translated() and op is not None
    -            and getattr(self, 'framestack', None)):
    -            op.pc = self.framestack[-1].pc
    -            op.name = self.framestack[-1].jitcode.name
    +        if (op is not None and self.framestack is not None):
    +            if not we_are_translated():
    +                op.pc = self.framestack[-1].pc
    +            op.rpyfunc = self.framestack[-1].jitcode.name
     
         def execute_raised(self, exception, constant=False):
             if isinstance(exception, jitexc.JitException):
    diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py
    --- a/rpython/jit/metainterp/resoperation.py
    +++ b/rpython/jit/metainterp/resoperation.py
    @@ -249,10 +249,10 @@
     class AbstractResOp(AbstractResOpOrInputArg):
         """The central ResOperation class, representing one operation."""
     
    -    _attrs_ = ()
    +    _attrs_ = ('rpyfunc', )
     
         # debug
    -    name = ""
    +    rpyfunc = ""
         pc = 0
         opnum = 0
         _cls_has_bool_result = False
    @@ -331,6 +331,7 @@
             if descr is DONT_CHANGE:
                 descr = None
             newop = ResOperation(opnum, args, descr)
    +        newop.rpyfunc = self.rpyfunc
             if self.type != 'v':
                 newop.copy_value_from(self)
             return newop
    @@ -351,8 +352,8 @@
             #    sres = '%s = ' % (self.result,)
             else:
                 sres = ''
    -        if self.name:
    -            prefix = "%s:%s   " % (self.name, self.pc)
    +        if self.rpyfunc:
    +            prefix = "%s:%s   " % (self.rpyfunc, self.pc)
                 if graytext:
                     prefix = "\f%s\f" % prefix
             else:
    
    From pypy.commits at gmail.com  Fri Mar 25 22:25:39 2016
    From: pypy.commits at gmail.com (stefanor)
    Date: Fri, 25 Mar 2016 19:25:39 -0700 (PDT)
    Subject: [pypy-commit] pypy default: Only execute NEON instructions on CPUs
     supporting NEON
    Message-ID: <56f5f323.85371c0a.f740a.686f@mx.google.com>
    
    Author: Stefano Rivera 
    Branch: 
    Changeset: r83380:fc95c9347679
    Date: 2016-03-25 21:24 -0500
    http://bitbucket.org/pypy/pypy/changeset/fc95c9347679/
    
    Log:	Only execute NEON instructions on CPUs supporting NEON
    
    	On Debian armhf, NEON is not mandated. In fact Debian's buildds
    	don't support it.
    
    diff --git a/rpython/jit/backend/arm/detect.py b/rpython/jit/backend/arm/detect.py
    --- a/rpython/jit/backend/arm/detect.py
    +++ b/rpython/jit/backend/arm/detect.py
    @@ -1,6 +1,7 @@
     import os
     
     from rpython.translator.tool.cbuild import ExternalCompilationInfo
    +from rpython.rtyper.lltypesystem import lltype, rffi
     from rpython.rtyper.tool import rffi_platform
     from rpython.rlib.clibffi import FFI_DEFAULT_ABI, FFI_SYSV, FFI_VFP
     from rpython.translator.platform import CompilationError
    @@ -15,6 +16,7 @@
         asm volatile("VMOV s0, s1");
     }
         """])
    +getauxval = rffi.llexternal("getauxval", [lltype.Unsigned], lltype.Unsigned)
     
     def detect_hardfloat():
         return FFI_DEFAULT_ABI == FFI_VFP
    @@ -63,3 +65,10 @@
                         "falling back to", "ARMv%d" % n)
         debug_stop("jit-backend-arch")
         return n
    +
    +
    +def detect_neon():
    +    AT_HWCAP = 16
    +    HWCAP_NEON = 1 << 12
    +    hwcap = getauxval(AT_HWCAP)
    +    return bool(hwcap & HWCAP_NEON)
    diff --git a/rpython/jit/backend/arm/opassembler.py b/rpython/jit/backend/arm/opassembler.py
    --- a/rpython/jit/backend/arm/opassembler.py
    +++ b/rpython/jit/backend/arm/opassembler.py
    @@ -1092,8 +1092,8 @@
             self.mc.VCVT_int_to_float(res.value, r.svfp_ip.value)
             return fcond
     
    -    # the following five instructions are only ARMv7;
    -    # regalloc.py won't call them at all on ARMv6
    +    # the following five instructions are only ARMv7 with NEON;
    +    # regalloc.py won't call them at all, in other cases
         emit_opx_llong_add = gen_emit_float_op('llong_add', 'VADD_i64')
         emit_opx_llong_sub = gen_emit_float_op('llong_sub', 'VSUB_i64')
         emit_opx_llong_and = gen_emit_float_op('llong_and', 'VAND_i64')
    diff --git a/rpython/jit/backend/arm/regalloc.py b/rpython/jit/backend/arm/regalloc.py
    --- a/rpython/jit/backend/arm/regalloc.py
    +++ b/rpython/jit/backend/arm/regalloc.py
    @@ -530,7 +530,7 @@
                                 EffectInfo.OS_LLONG_AND,
                                 EffectInfo.OS_LLONG_OR,
                                 EffectInfo.OS_LLONG_XOR):
    -                if self.cpu.cpuinfo.arch_version >= 7:
    +                if self.cpu.cpuinfo.neon:
                         args = self._prepare_llong_binop_xx(op, fcond)
                         self.perform_extra(op, args, fcond)
                         return
    diff --git a/rpython/jit/backend/arm/runner.py b/rpython/jit/backend/arm/runner.py
    --- a/rpython/jit/backend/arm/runner.py
    +++ b/rpython/jit/backend/arm/runner.py
    @@ -7,13 +7,14 @@
     from rpython.rlib.jit_hooks import LOOP_RUN_CONTAINER
     from rpython.rtyper.lltypesystem import lltype, llmemory
     from rpython.jit.backend.arm.detect import detect_hardfloat
    -from rpython.jit.backend.arm.detect import detect_arch_version
    +from rpython.jit.backend.arm.detect import detect_arch_version, detect_neon
     
     jitframe.STATICSIZE = JITFRAME_FIXED_SIZE
     
     class CPUInfo(object):
         hf_abi = False
         arch_version = 6
    +    neon = False
     
     class AbstractARMCPU(AbstractLLCPU):
     
    @@ -48,6 +49,7 @@
         def setup_once(self):
             self.cpuinfo.arch_version = detect_arch_version()
             self.cpuinfo.hf_abi = detect_hardfloat()
    +        self.cpuinfo.neon = detect_neon()
             #self.codemap.setup()
             self.assembler.setup_once()
     
    
    From pypy.commits at gmail.com  Fri Mar 25 22:44:11 2016
    From: pypy.commits at gmail.com (rlamy)
    Date: Fri, 25 Mar 2016 19:44:11 -0700 (PDT)
    Subject: [pypy-commit] pypy follow_symlinks: Implement missing features in
     chown()
    Message-ID: <56f5f77b.8bdd1c0a.141b4.776b@mx.google.com>
    
    Author: Ronan Lamy 
    Branch: follow_symlinks
    Changeset: r83381:10b3f4bf59cd
    Date: 2016-03-26 02:43 +0000
    http://bitbucket.org/pypy/pypy/changeset/10b3f4bf59cd/
    
    Log:	Implement missing features in chown()
    
    diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py
    --- a/pypy/module/posix/interp_posix.py
    +++ b/pypy/module/posix/interp_posix.py
    @@ -1698,9 +1698,9 @@
         return space.wrap(res)
     
     @unwrap_spec(
    -    path='fsencode', uid=c_uid_t, gid=c_gid_t,
    -    dir_fd=DirFD(available=False), follow_symlinks=kwonly(bool))
    -def chown(space, path, uid, gid, dir_fd=DEFAULT_DIR_FD, follow_symlinks=True):
    +    uid=c_uid_t, gid=c_gid_t,
    +    dir_fd=DirFD(rposix.HAVE_FCHOWNAT), follow_symlinks=kwonly(bool))
    +def chown(space, w_path, uid, gid, dir_fd=DEFAULT_DIR_FD, follow_symlinks=True):
         """chown(path, uid, gid, *, dir_fd=None, follow_symlinks=True)
     
     Change the owner and group id of path to the numeric uid and gid.
    @@ -1719,10 +1719,43 @@
       If they are unavailable, using them will raise a NotImplementedError."""
         check_uid_range(space, uid)
         check_uid_range(space, gid)
    +    if not (rposix.HAVE_LCHOWN or rposix.HAVE_FCHMODAT):
    +        if not follow_symlinks:
    +            raise argument_unavailable(space, 'chown', 'follow_symlinks')
         try:
    -        os.chown(path, uid, gid)
    -    except OSError, e:
    -        raise wrap_oserror(space, e, path)
    +        path = space.fsencode_w(w_path)
    +    except OperationError:
    +        if not space.isinstance_w(w_path, space.w_int):
    +            raise oefmt(space.w_TypeError,
    +                "argument should be string, bytes or integer, not %T", w_path)
    +        # File descriptor case
    +        fd = unwrap_fd(space, w_path)
    +        if dir_fd != DEFAULT_DIR_FD:
    +            raise oefmt(space.w_ValueError,
    +                "chown: can't specify both dir_fd and fd")
    +        if not follow_symlinks:
    +            raise oefmt(space.w_ValueError,
    +                "chown: cannnot use fd and follow_symlinks together")
    +        try:
    +            os.fchown(fd, uid, gid)
    +        except OSError as e:
    +            raise wrap_oserror(space, e)
    +    else:
    +        # String case
    +        try:
    +            if (rposix.HAVE_LCHOWN and
    +                    dir_fd == DEFAULT_DIR_FD and not follow_symlinks):
    +                os.lchown(path, uid, gid)
    +            elif rposix.HAVE_FCHOWNAT and (
    +                    not follow_symlinks or dir_fd != DEFAULT_DIR_FD):
    +                rposix.fchownat(path, uid, gid, dir_fd, follow_symlinks)
    +            else:
    +                assert follow_symlinks
    +                assert dir_fd == DEFAULT_DIR_FD
    +                os.chown(path, uid, gid)
    +        except OSError as e:
    +            raise wrap_oserror2(space, e, w_path)
    +
     
     @unwrap_spec(path='fsencode', uid=c_uid_t, gid=c_gid_t)
     def lchown(space, path, uid, gid):
    
    From pypy.commits at gmail.com  Fri Mar 25 23:06:38 2016
    From: pypy.commits at gmail.com (rlamy)
    Date: Fri, 25 Mar 2016 20:06:38 -0700 (PDT)
    Subject: [pypy-commit] pypy follow_symlinks: workaround for missing
     errno.ENOTSUP (e.g. on the buildbot's PyPy)
    Message-ID: <56f5fcbe.4c181c0a.fe661.72af@mx.google.com>
    
    Author: Ronan Lamy 
    Branch: follow_symlinks
    Changeset: r83382:fb2b32a805d3
    Date: 2016-03-26 03:05 +0000
    http://bitbucket.org/pypy/pypy/changeset/fb2b32a805d3/
    
    Log:	workaround for missing errno.ENOTSUP (e.g. on the buildbot's PyPy)
    
    diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py
    --- a/pypy/module/posix/interp_posix.py
    +++ b/pypy/module/posix/interp_posix.py
    @@ -1,7 +1,12 @@
     import os
     import sys
     from math import modf
    -from errno import ENOTSUP, EOPNOTSUPP
    +from errno import EOPNOTSUPP
    +try:
    +    from errno import ENOTSUP
    +except ImportError:
    +    # some Pythons don't have errno.ENOTSUP
    +    ENOTSUP = 0
     
     from rpython.rlib import rposix, rposix_stat
     from rpython.rlib import objectmodel, rurandom
    
    From pypy.commits at gmail.com  Fri Mar 25 23:30:49 2016
    From: pypy.commits at gmail.com (rlamy)
    Date: Fri, 25 Mar 2016 20:30:49 -0700 (PDT)
    Subject: [pypy-commit] pypy follow_symlinks: Support *_dir_fd arguments in
     posix.link()
    Message-ID: <56f60269.034cc20a.d21a9.1269@mx.google.com>
    
    Author: Ronan Lamy 
    Branch: follow_symlinks
    Changeset: r83383:3da1fa9db8d2
    Date: 2016-03-26 03:29 +0000
    http://bitbucket.org/pypy/pypy/changeset/3da1fa9db8d2/
    
    Log:	Support *_dir_fd arguments in posix.link()
    
    diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py
    --- a/pypy/module/posix/interp_posix.py
    +++ b/pypy/module/posix/interp_posix.py
    @@ -964,7 +964,7 @@
     
     @unwrap_spec(
         src='fsencode', dst='fsencode',
    -    src_dir_fd=DirFD(available=False), dst_dir_fd=DirFD(available=False),
    +    src_dir_fd=DirFD(rposix.HAVE_LINKAT), dst_dir_fd=DirFD(rposix.HAVE_LINKAT),
         follow_symlinks=kwonly(bool))
     def link(
             space, src, dst,
    @@ -985,8 +985,12 @@
       platform.  If they are unavailable, using them will raise a
       NotImplementedError."""
         try:
    -        os.link(src, dst)
    -    except OSError, e:
    +        if (src_dir_fd != DEFAULT_DIR_FD or dst_dir_fd != DEFAULT_DIR_FD
    +                or not follow_symlinks):
    +            rposix.linkat(src, dst, src_dir_fd, dst_dir_fd, follow_symlinks)
    +        else:
    +            rposix.link(src, dst)
    +    except OSError as e:
             raise wrap_oserror(space, e)
     
     
    
    From pypy.commits at gmail.com  Sat Mar 26 00:10:37 2016
    From: pypy.commits at gmail.com (rlamy)
    Date: Fri, 25 Mar 2016 21:10:37 -0700 (PDT)
    Subject: [pypy-commit] pypy rposix-for-3: Add renameat()
    Message-ID: <56f60bbd.2106c20a.717c5.ffffeecf@mx.google.com>
    
    Author: Ronan Lamy 
    Branch: rposix-for-3
    Changeset: r83384:57f52c369d21
    Date: 2016-03-26 04:09 +0000
    http://bitbucket.org/pypy/pypy/changeset/57f52c369d21/
    
    Log:	Add renameat()
    
    diff --git a/rpython/rlib/rposix.py b/rpython/rlib/rposix.py
    --- a/rpython/rlib/rposix.py
    +++ b/rpython/rlib/rposix.py
    @@ -1903,6 +1903,17 @@
             lltype.free(buf, flavor='raw')
             return result
     
    +if HAVE_RENAMEAT:
    +    c_renameat = external(
    +        'renameat',
    +        [rffi.INT, rffi.CCHARP, rffi.INT, rffi.CCHARP], rffi.INT,
    +        save_err=rffi.RFFI_SAVE_ERRNO)
    +
    +    def renameat(src, dst, src_dir_fd=AT_FDCWD, dst_dir_fd=AT_FDCWD):
    +        error = c_renameat(src_dir_fd, src, dst_dir_fd, dst)
    +        handle_posix_error('renameat', error)
    +
    +
     if HAVE_SYMLINKAT:
         c_symlinkat = external('symlinkat',
             [rffi.CCHARP, rffi.INT, rffi.CCHARP], rffi.INT,
    diff --git a/rpython/rlib/test/test_rposix.py b/rpython/rlib/test/test_rposix.py
    --- a/rpython/rlib/test/test_rposix.py
    +++ b/rpython/rlib/test/test_rposix.py
    @@ -545,3 +545,14 @@
             assert os.readlink(str(tmpdir.join('link'))) == 'file'
         finally:
             os.close(dirfd)
    +
    +
    +def test_renameat(tmpdir):
    +    tmpdir.join('file').write('text')
    +    dirfd = os.open(str(tmpdir), os.O_RDONLY)
    +    try:
    +        rposix.renameat('file', 'file2', src_dir_fd=dirfd, dst_dir_fd=dirfd)
    +    finally:
    +        os.close(dirfd)
    +    assert tmpdir.join('file').check(exists=False)
    +    assert tmpdir.join('file2').check(exists=True)
    
    From pypy.commits at gmail.com  Sat Mar 26 00:15:39 2016
    From: pypy.commits at gmail.com (rlamy)
    Date: Fri, 25 Mar 2016 21:15:39 -0700 (PDT)
    Subject: [pypy-commit] pypy follow_symlinks: Support *_dir_fd arguments in
     rename() and replace()
    Message-ID: <56f60ceb.29cec20a.3df9a.23a6@mx.google.com>
    
    Author: Ronan Lamy 
    Branch: follow_symlinks
    Changeset: r83386:b7d85ab453c9
    Date: 2016-03-26 04:14 +0000
    http://bitbucket.org/pypy/pypy/changeset/b7d85ab453c9/
    
    Log:	Support *_dir_fd arguments in rename() and replace()
    
    diff --git a/pypy/module/posix/interp_posix.py b/pypy/module/posix/interp_posix.py
    --- a/pypy/module/posix/interp_posix.py
    +++ b/pypy/module/posix/interp_posix.py
    @@ -849,8 +849,9 @@
         """
         _chmod_fd(space, fd, mode)
     
    - at unwrap_spec(src_dir_fd=DirFD(available=False), dst_dir_fd=DirFD(available=False))
    -def rename(space, w_old, w_new,
    + at unwrap_spec(src_dir_fd=DirFD(rposix.HAVE_RENAMEAT),
    +        dst_dir_fd=DirFD(rposix.HAVE_RENAMEAT))
    +def rename(space, w_src, w_dst,
             src_dir_fd=DEFAULT_DIR_FD, dst_dir_fd=DEFAULT_DIR_FD):
         """rename(src, dst, *, src_dir_fd=None, dst_dir_fd=None)
     
    @@ -862,12 +863,18 @@
     src_dir_fd and dst_dir_fd, may not be implemented on your platform.
       If they are unavailable, using them will raise a NotImplementedError."""
         try:
    -        dispatch_filename_2(rposix.rename)(space, w_old, w_new)
    +        if (src_dir_fd != DEFAULT_DIR_FD or dst_dir_fd != DEFAULT_DIR_FD):
    +            src = space.fsencode_w(w_src)
    +            dst = space.fsencode_w(w_dst)
    +            rposix.renameat(src, dst, src_dir_fd, dst_dir_fd)
    +        else:
    +            dispatch_filename_2(rposix.rename)(space, w_src, w_dst)
         except OSError, e:
             raise wrap_oserror(space, e)
     
    - at unwrap_spec(src_dir_fd=DirFD(available=False), dst_dir_fd=DirFD(available=False))
    -def replace(space, w_old, w_new,
    + at unwrap_spec(src_dir_fd=DirFD(rposix.HAVE_RENAMEAT),
    +        dst_dir_fd=DirFD(rposix.HAVE_RENAMEAT))
    +def replace(space, w_src, w_dst,
             src_dir_fd=DEFAULT_DIR_FD, dst_dir_fd=DEFAULT_DIR_FD):
         """replace(src, dst, *, src_dir_fd=None, dst_dir_fd=None)
     
    @@ -879,8 +886,13 @@
     src_dir_fd and dst_dir_fd, may not be implemented on your platform.
       If they are unavailable, using them will raise a NotImplementedError."""
         try:
    -        dispatch_filename_2(rposix.replace)(space, w_old, w_new)
    -    except OSError, e:
    +        if (src_dir_fd != DEFAULT_DIR_FD or dst_dir_fd != DEFAULT_DIR_FD):
    +            src = space.fsencode_w(w_src)
    +            dst = space.fsencode_w(w_dst)
    +            rposix.renameat(src, dst, src_dir_fd, dst_dir_fd)
    +        else:
    +            dispatch_filename_2(rposix.replace)(space, w_src, w_dst)
    +    except OSError as e:
             raise wrap_oserror(space, e)
     
     @unwrap_spec(mode=c_int, dir_fd=DirFD(rposix.HAVE_MKFIFOAT))
    
    From pypy.commits at gmail.com  Sat Mar 26 00:15:37 2016
    From: pypy.commits at gmail.com (rlamy)
    Date: Fri, 25 Mar 2016 21:15:37 -0700 (PDT)
    Subject: [pypy-commit] pypy follow_symlinks: hg merge rposix-for-3
    Message-ID: <56f60ce9.8d571c0a.1a9e0.ffff842a@mx.google.com>
    
    Author: Ronan Lamy 
    Branch: follow_symlinks
    Changeset: r83385:b61310248d8f
    Date: 2016-03-26 04:11 +0000
    http://bitbucket.org/pypy/pypy/changeset/b61310248d8f/
    
    Log:	hg merge rposix-for-3
    
    diff --git a/rpython/rlib/rposix.py b/rpython/rlib/rposix.py
    --- a/rpython/rlib/rposix.py
    +++ b/rpython/rlib/rposix.py
    @@ -1903,6 +1903,17 @@
             lltype.free(buf, flavor='raw')
             return result
     
    +if HAVE_RENAMEAT:
    +    c_renameat = external(
    +        'renameat',
    +        [rffi.INT, rffi.CCHARP, rffi.INT, rffi.CCHARP], rffi.INT,
    +        save_err=rffi.RFFI_SAVE_ERRNO)
    +
    +    def renameat(src, dst, src_dir_fd=AT_FDCWD, dst_dir_fd=AT_FDCWD):
    +        error = c_renameat(src_dir_fd, src, dst_dir_fd, dst)
    +        handle_posix_error('renameat', error)
    +
    +
     if HAVE_SYMLINKAT:
         c_symlinkat = external('symlinkat',
             [rffi.CCHARP, rffi.INT, rffi.CCHARP], rffi.INT,
    diff --git a/rpython/rlib/test/test_rposix.py b/rpython/rlib/test/test_rposix.py
    --- a/rpython/rlib/test/test_rposix.py
    +++ b/rpython/rlib/test/test_rposix.py
    @@ -545,3 +545,14 @@
             assert os.readlink(str(tmpdir.join('link'))) == 'file'
         finally:
             os.close(dirfd)
    +
    +
    +def test_renameat(tmpdir):
    +    tmpdir.join('file').write('text')
    +    dirfd = os.open(str(tmpdir), os.O_RDONLY)
    +    try:
    +        rposix.renameat('file', 'file2', src_dir_fd=dirfd, dst_dir_fd=dirfd)
    +    finally:
    +        os.close(dirfd)
    +    assert tmpdir.join('file').check(exists=False)
    +    assert tmpdir.join('file2').check(exists=True)
    
    From pypy.commits at gmail.com  Sat Mar 26 14:43:55 2016
    From: pypy.commits at gmail.com (fijal)
    Date: Sat, 26 Mar 2016 11:43:55 -0700 (PDT)
    Subject: [pypy-commit] pypy faster-traceback: pass list of resops here
    Message-ID: <56f6d86b.d4e01c0a.6dce3.fffff966@mx.google.com>
    
    Author: fijal
    Branch: faster-traceback
    Changeset: r83387:8349cbe273c6
    Date: 2016-03-25 22:42 +0200
    http://bitbucket.org/pypy/pypy/changeset/8349cbe273c6/
    
    Log:	pass list of resops here
    
    diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py
    --- a/rpython/jit/metainterp/pyjitpl.py
    +++ b/rpython/jit/metainterp/pyjitpl.py
    @@ -2256,7 +2256,7 @@
                         jd_sd.warmstate.get_location_str(greenkey),
                         self.staticdata.logger_ops._make_log_operations(
                             self.box_names_memo),
    -                    self.history.trace)
    +                    self.history.trace.unpack()[1])
                 if self.aborted_tracing_jitdriver is not None:
                     jd_sd = self.aborted_tracing_jitdriver
                     greenkey = self.aborted_tracing_greenkey
    
    From pypy.commits at gmail.com  Sat Mar 26 14:43:57 2016
    From: pypy.commits at gmail.com (fijal)
    Date: Sat, 26 Mar 2016 11:43:57 -0700 (PDT)
    Subject: [pypy-commit] pypy default: fix the most obvious 32bit problem
    Message-ID: <56f6d86d.657bc20a.64339.15db@mx.google.com>
    
    Author: fijal
    Branch: 
    Changeset: r83388:7baba70b412c
    Date: 2016-03-26 20:43 +0200
    http://bitbucket.org/pypy/pypy/changeset/7baba70b412c/
    
    Log:	fix the most obvious 32bit problem
    
    diff --git a/rpython/jit/metainterp/opencoder.py b/rpython/jit/metainterp/opencoder.py
    --- a/rpython/jit/metainterp/opencoder.py
    +++ b/rpython/jit/metainterp/opencoder.py
    @@ -354,7 +354,7 @@
                     # don't intern float constants
                     self._consts_float += 1
                     v = (len(self._floats) << 1) | 1
    -                self._floats.append(box.getfloat())
    +                self._floats.append(box.getfloatstorage())
                     return tag(TAGCONSTOTHER, v)
                 else:
                     self._consts_ptr += 1
    
    From pypy.commits at gmail.com  Sat Mar 26 14:46:48 2016
    From: pypy.commits at gmail.com (fijal)
    Date: Sat, 26 Mar 2016 11:46:48 -0700 (PDT)
    Subject: [pypy-commit] pypy default: the most obvious fix on arm
    Message-ID: <56f6d918.04371c0a.e0a36.fffffdfe@mx.google.com>
    
    Author: fijal
    Branch: 
    Changeset: r83389:67480c50c1ab
    Date: 2016-03-26 20:44 +0200
    http://bitbucket.org/pypy/pypy/changeset/67480c50c1ab/
    
    Log:	the most obvious fix on arm
    
    diff --git a/rpython/jit/backend/arm/assembler.py b/rpython/jit/backend/arm/assembler.py
    --- a/rpython/jit/backend/arm/assembler.py
    +++ b/rpython/jit/backend/arm/assembler.py
    @@ -939,9 +939,9 @@
                 op = operations[i]
                 self.mc.mark_op(op)
                 opnum = op.getopnum()
    -            if op.has_no_side_effect() and op not in regalloc.longevity:
    +            if rop.has_no_side_effect(opnum) and op not in regalloc.longevity:
                     regalloc.possibly_free_vars_for_op(op)
    -            elif not we_are_translated() and op.getopnum() == -127:
    +            elif not we_are_translated() and op.getopnum() == rop.FORCE_SPILL:
                     regalloc.prepare_force_spill(op, fcond)
                 else:
                     arglocs = regalloc_operations[opnum](regalloc, op, fcond)
    @@ -949,7 +949,7 @@
                         fcond = asm_operations[opnum](self, op, arglocs,
                                                             regalloc, fcond)
                         assert fcond is not None
    -            if op.is_guard():
    +            if rop.is_guard(opnum):
                     regalloc.possibly_free_vars(op.getfailargs())
                 if op.type != 'v':
                     regalloc.possibly_free_var(op)
    
    From pypy.commits at gmail.com  Sat Mar 26 14:46:50 2016
    From: pypy.commits at gmail.com (fijal)
    Date: Sat, 26 Mar 2016 11:46:50 -0700 (PDT)
    Subject: [pypy-commit] pypy default: fix more copy-pastes of the same
     function
    Message-ID: <56f6d91a.657bc20a.64339.16fa@mx.google.com>
    
    Author: fijal
    Branch: 
    Changeset: r83390:3c26d7439762
    Date: 2016-03-26 20:46 +0200
    http://bitbucket.org/pypy/pypy/changeset/3c26d7439762/
    
    Log:	fix more copy-pastes of the same function
    
    diff --git a/rpython/jit/backend/ppc/regalloc.py b/rpython/jit/backend/ppc/regalloc.py
    --- a/rpython/jit/backend/ppc/regalloc.py
    +++ b/rpython/jit/backend/ppc/regalloc.py
    @@ -286,7 +286,8 @@
                 self.assembler.mc.mark_op(op)
                 self.rm.position = i
                 self.fprm.position = i
    -            if op.has_no_side_effect() and op not in self.longevity:
    +            opnum = op.opnum
    +            if rop.has_no_side_effect(opnum) and op not in self.longevity:
                     i += 1
                     self.possibly_free_vars_for_op(op)
                     continue
    @@ -298,8 +299,7 @@
                     else:
                         self.fprm.temp_boxes.append(box)
                 #
    -            opnum = op.getopnum()
    -            if not we_are_translated() and opnum == -127:
    +            if not we_are_translated() and opnum == rop.FORCE_SPILL:
                     self._consider_force_spill(op)
                 else:
                     arglocs = oplist[opnum](self, op)
    diff --git a/rpython/jit/backend/zarch/regalloc.py b/rpython/jit/backend/zarch/regalloc.py
    --- a/rpython/jit/backend/zarch/regalloc.py
    +++ b/rpython/jit/backend/zarch/regalloc.py
    @@ -476,7 +476,8 @@
                 self.assembler.mc.mark_op(op)
                 self.rm.position = i
                 self.fprm.position = i
    -            if op.has_no_side_effect() and op not in self.longevity:
    +            opnum = op.getopnum()
    +            if rop.has_no_side_effect(opnum) and op not in self.longevity:
                     i += 1
                     self.possibly_free_vars_for_op(op)
                     continue
    @@ -488,8 +489,7 @@
                     else:
                         self.fprm.temp_boxes.append(box)
                 #
    -            opnum = op.getopnum()
    -            if not we_are_translated() and opnum == -127:
    +            if not we_are_translated() and opnum == rop.FORCE_SPILL:
                     self._consider_force_spill(op)
                 else:
                     arglocs = prepare_oplist[opnum](self, op)
    
    From pypy.commits at gmail.com  Sun Mar 27 11:09:14 2016
    From: pypy.commits at gmail.com (mattip)
    Date: Sun, 27 Mar 2016 08:09:14 -0700 (PDT)
    Subject: [pypy-commit] pypy cpyext-ext: test,
     implement PyFile_FromFile (mostly), PyFile_AsFile
    Message-ID: <56f7f79a.6bb8c20a.3a155.664f@mx.google.com>
    
    Author: mattip 
    Branch: cpyext-ext
    Changeset: r83391:a280f13c5a2f
    Date: 2016-03-27 18:08 +0300
    http://bitbucket.org/pypy/pypy/changeset/a280f13c5a2f/
    
    Log:	test, implement PyFile_FromFile (mostly), PyFile_AsFile
    
    diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py
    --- a/pypy/module/cpyext/api.py
    +++ b/pypy/module/cpyext/api.py
    @@ -93,6 +93,8 @@
         fileno = rffi.llexternal('fileno', [FILEP], rffi.INT)
     
     fopen = rffi.llexternal('fopen', [CONST_STRING, CONST_STRING], FILEP)
    +fdopen = rffi.llexternal('fdopen', [rffi.INT, CONST_STRING], FILEP,
    +                         save_err=rffi.RFFI_SAVE_ERRNO)
     
     _fclose = rffi.llexternal('fclose', [FILEP], rffi.INT)
     def fclose(fp):
    diff --git a/pypy/module/cpyext/pyfile.py b/pypy/module/cpyext/pyfile.py
    --- a/pypy/module/cpyext/pyfile.py
    +++ b/pypy/module/cpyext/pyfile.py
    @@ -1,9 +1,10 @@
     from rpython.rtyper.lltypesystem import rffi, lltype
     from pypy.module.cpyext.api import (
    -    cpython_api, CANNOT_FAIL, CONST_STRING, FILEP, build_type_checkers)
    +    cpython_api, CANNOT_FAIL, CONST_STRING, FILEP, build_type_checkers, fdopen,
    +    fileno)
     from pypy.module.cpyext.pyobject import PyObject
     from pypy.module.cpyext.object import Py_PRINT_RAW
    -from pypy.interpreter.error import OperationError
    +from pypy.interpreter.error import OperationError, oefmt
     from pypy.module._file.interp_file import W_File
     
     PyFile_Check, PyFile_CheckExact = build_type_checkers("File", W_File)
    @@ -22,9 +23,8 @@
         try:
             w_readline = space.getattr(w_obj, space.wrap('readline'))
         except OperationError:
    -        raise OperationError(
    -            space.w_TypeError, space.wrap(
    -            "argument must be a file, or have a readline() method."))
    +        raise oefmt(space.w_TypeError, 
    +            "argument must be a file, or have a readline() method.")
     
         n = rffi.cast(lltype.Signed, n)
         if space.is_true(space.gt(space.wrap(n), space.wrap(0))):
    @@ -52,14 +52,23 @@
         If the caller will ever use the returned FILE* object while
         the GIL is released it must also call the PyFile_IncUseCount() and
         PyFile_DecUseCount() functions as appropriate."""
    -    raise NotImplementedError
    +    assert isinstance(w_p, W_File)
    +    return fdopen(space.int_w(space.call_method(w_p, 'fileno')), 
    +                     w_p.mode)
     
     @cpython_api([FILEP, CONST_STRING, CONST_STRING, rffi.VOIDP], PyObject)
     def PyFile_FromFile(space, fp, name, mode, close):
         """Create a new PyFileObject from the already-open standard C file
         pointer, fp.  The function close will be called when the file should be
         closed.  Return NULL on failure."""
    -    raise NotImplementedError
    +    if close:
    +        raise oefmt(space.w_NotImplementedError, 
    +            'PyFromFile(..., close) with close function not implemented')
    +    w_ret = space.allocate_instance(W_File, space.gettypefor(W_File))
    +    w_ret.w_name = space.wrap(rffi.charp2str(name))
    +    w_ret.check_mode_ok(rffi.charp2str(mode))
    +    w_ret.fp = fp
    +    return w_ret
     
     @cpython_api([PyObject, rffi.INT_real], lltype.Void)
     def PyFile_SetBufSize(space, w_file, n):
    diff --git a/pypy/module/cpyext/test/test_pyfile.py b/pypy/module/cpyext/test/test_pyfile.py
    --- a/pypy/module/cpyext/test/test_pyfile.py
    +++ b/pypy/module/cpyext/test/test_pyfile.py
    @@ -61,7 +61,16 @@
             assert space.str_w(api.PyFile_Name(w_file)) == name
     
         def test_file_fromfile(self, space, api):
    -        api.PyFile_FromFile()
    +        name = str(udir / "_test_file")
    +        with rffi.scoped_str2charp(name) as filename:
    +            with rffi.scoped_str2charp("wb") as mode:
    +                w_file = api.PyFile_FromString(filename, mode)
    +                fp = api.PyFile_AsFile(w_file)
    +                assert fp is not None
    +                w_file2 = api.PyFile_FromFile(fp, filename, mode, None)
    +        assert w_file2 is not None
    +        assert api.PyFile_Check(w_file2)
    +        assert space.str_w(api.PyFile_Name(w_file2)) == name
     
         @pytest.mark.xfail
         def test_file_setbufsize(self, space, api):
    
    From pypy.commits at gmail.com  Sun Mar 27 11:16:29 2016
    From: pypy.commits at gmail.com (plan_rich)
    Date: Sun, 27 Mar 2016 08:16:29 -0700 (PDT)
    Subject: [pypy-commit] pypy release-5.x: macros (e.g. WCOREDUMP) got
     parameter type Signed, on little endian this does not make a difference,
     but it does on big endian. changed to rffi.INT
    Message-ID: <56f7f94d.06b01c0a.acd46.469e@mx.google.com>
    
    Author: Richard Plangger 
    Branch: release-5.x
    Changeset: r83393:78860f97739e
    Date: 2016-01-22 14:56 +0100
    http://bitbucket.org/pypy/pypy/changeset/78860f97739e/
    
    Log:	macros (e.g. WCOREDUMP) got parameter type Signed, on little endian
    	this does not make a difference, but it does on big endian. changed
    	to rffi.INT
    
    diff --git a/rpython/rlib/rposix.py b/rpython/rlib/rposix.py
    --- a/rpython/rlib/rposix.py
    +++ b/rpython/rlib/rposix.py
    @@ -827,7 +827,7 @@
             lltype.free(status_p, flavor='raw')
     
     def _make_waitmacro(name):
    -    c_func = external(name, [lltype.Signed], lltype.Signed,
    +    c_func = external(name, [rffi.INT], lltype.Signed,
                           macro=_MACRO_ON_POSIX)
         returning_int = name in ('WEXITSTATUS', 'WSTOPSIG', 'WTERMSIG')
     
    
    From pypy.commits at gmail.com  Sun Mar 27 11:16:28 2016
    From: pypy.commits at gmail.com (arigo)
    Date: Sun, 27 Mar 2016 08:16:28 -0700 (PDT)
    Subject: [pypy-commit] pypy default: The exact same issue occurs on ppc64
     big-endian
    Message-ID: <56f7f94c.83301c0a.af8bd.47ba@mx.google.com>
    
    Author: Armin Rigo 
    Branch: 
    Changeset: r83392:98b19c56182b
    Date: 2016-03-27 17:14 +0200
    http://bitbucket.org/pypy/pypy/changeset/98b19c56182b/
    
    Log:	The exact same issue occurs on ppc64 big-endian
    
    diff --git a/rpython/rlib/rposix.py b/rpython/rlib/rposix.py
    --- a/rpython/rlib/rposix.py
    +++ b/rpython/rlib/rposix.py
    @@ -849,7 +849,7 @@
         # reason: legacy code required a union wait. see
         # https://sourceware.org/bugzilla/show_bug.cgi?id=19613
         # for more details. If this get's fixed we can use lltype.Signed
    -    # again.
    +    # again.  (The exact same issue occurs on ppc64 big-endian.)
         c_func = external(name, [rffi.INT], lltype.Signed,
                           macro=_MACRO_ON_POSIX)
         returning_int = name in ('WEXITSTATUS', 'WSTOPSIG', 'WTERMSIG')
    
    From pypy.commits at gmail.com  Sun Mar 27 11:16:32 2016
    From: pypy.commits at gmail.com (arigo)
    Date: Sun, 27 Mar 2016 08:16:32 -0700 (PDT)
    Subject: [pypy-commit] pypy default: merge heads
    Message-ID: <56f7f950.03321c0a.e5066.39ca@mx.google.com>
    
    Author: Armin Rigo 
    Branch: 
    Changeset: r83394:060cbab9d669
    Date: 2016-03-27 17:15 +0200
    http://bitbucket.org/pypy/pypy/changeset/060cbab9d669/
    
    Log:	merge heads
    
    diff too long, truncating to 2000 out of 8524 lines
    
    diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py
    --- a/pypy/config/pypyoption.py
    +++ b/pypy/config/pypyoption.py
    @@ -46,7 +46,6 @@
     except detect_cpu.ProcessorAutodetectError:
         pass
     
    -
     translation_modules = default_modules.copy()
     translation_modules.update([
         "fcntl", "time", "select", "signal", "_rawffi", "zlib", "struct", "_md5",
    diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
    --- a/pypy/doc/whatsnew-head.rst
    +++ b/pypy/doc/whatsnew-head.rst
    @@ -27,3 +27,8 @@
     .. branch: fix_transpose_for_list_v3
     
     Allow arguments to transpose to be sequences
    +
    +.. branch: jit-leaner-frontend
    +
    +Improve the tracing speed in the frontend as well as heapcache by using a more compact representation
    +of traces
    \ No newline at end of file
    diff --git a/pypy/objspace/std/objectobject.py b/pypy/objspace/std/objectobject.py
    --- a/pypy/objspace/std/objectobject.py
    +++ b/pypy/objspace/std/objectobject.py
    @@ -110,7 +110,7 @@
     def descr__init__(space, w_obj, __args__):
         # don't allow arguments unless __new__ is overridden
         w_type = space.type(w_obj)
    -    w_parent_new, _ = w_type.lookup_where('__new__')
    +    w_parent_new, _ = space.lookup_in_type_where(w_type, '__new__')
         if w_parent_new is space.w_object:
             try:
                 __args__.fixedunpack(0)
    diff --git a/pypy/tool/gdb_pypy.py b/pypy/tool/gdb_pypy.py
    --- a/pypy/tool/gdb_pypy.py
    +++ b/pypy/tool/gdb_pypy.py
    @@ -288,9 +288,11 @@
                 RPyListPrinter.recursive = True
                 try:
                     itemlist = []
    -                for i in range(length):
    +                for i in range(min(length, MAX_DISPLAY_LENGTH)):
                         item = items[i]
                         itemlist.append(str(item))    # may recurse here
    +                if length > MAX_DISPLAY_LENGTH:
    +                    itemlist.append("...")
                     str_items = ', '.join(itemlist)
                 finally:
                     RPyListPrinter.recursive = False
    diff --git a/rpython/config/translationoption.py b/rpython/config/translationoption.py
    --- a/rpython/config/translationoption.py
    +++ b/rpython/config/translationoption.py
    @@ -126,6 +126,9 @@
         ChoiceOption("jit_profiler", "integrate profiler support into the JIT",
                      ["off", "oprofile"],
                      default="off"),
    +    ChoiceOption("jit_opencoder_model", "the model limits the maximal length"
    +                 " of traces. Use big if you want to go bigger than "
    +                 "the default", ["big", "normal"], default="normal"),
         BoolOption("check_str_without_nul",
                    "Forbid NUL chars in strings in some external function calls",
                    default=False, cmdline=None),
    diff --git a/rpython/jit/backend/arm/assembler.py b/rpython/jit/backend/arm/assembler.py
    --- a/rpython/jit/backend/arm/assembler.py
    +++ b/rpython/jit/backend/arm/assembler.py
    @@ -939,9 +939,9 @@
                 op = operations[i]
                 self.mc.mark_op(op)
                 opnum = op.getopnum()
    -            if op.has_no_side_effect() and op not in regalloc.longevity:
    +            if rop.has_no_side_effect(opnum) and op not in regalloc.longevity:
                     regalloc.possibly_free_vars_for_op(op)
    -            elif not we_are_translated() and op.getopnum() == -127:
    +            elif not we_are_translated() and op.getopnum() == rop.FORCE_SPILL:
                     regalloc.prepare_force_spill(op, fcond)
                 else:
                     arglocs = regalloc_operations[opnum](regalloc, op, fcond)
    @@ -949,7 +949,7 @@
                         fcond = asm_operations[opnum](self, op, arglocs,
                                                             regalloc, fcond)
                         assert fcond is not None
    -            if op.is_guard():
    +            if rop.is_guard(opnum):
                     regalloc.possibly_free_vars(op.getfailargs())
                 if op.type != 'v':
                     regalloc.possibly_free_var(op)
    diff --git a/rpython/jit/backend/arm/detect.py b/rpython/jit/backend/arm/detect.py
    --- a/rpython/jit/backend/arm/detect.py
    +++ b/rpython/jit/backend/arm/detect.py
    @@ -1,6 +1,7 @@
     import os
     
     from rpython.translator.tool.cbuild import ExternalCompilationInfo
    +from rpython.rtyper.lltypesystem import lltype, rffi
     from rpython.rtyper.tool import rffi_platform
     from rpython.rlib.clibffi import FFI_DEFAULT_ABI, FFI_SYSV, FFI_VFP
     from rpython.translator.platform import CompilationError
    @@ -15,6 +16,7 @@
         asm volatile("VMOV s0, s1");
     }
         """])
    +getauxval = rffi.llexternal("getauxval", [lltype.Unsigned], lltype.Unsigned)
     
     def detect_hardfloat():
         return FFI_DEFAULT_ABI == FFI_VFP
    @@ -63,3 +65,10 @@
                         "falling back to", "ARMv%d" % n)
         debug_stop("jit-backend-arch")
         return n
    +
    +
    +def detect_neon():
    +    AT_HWCAP = 16
    +    HWCAP_NEON = 1 << 12
    +    hwcap = getauxval(AT_HWCAP)
    +    return bool(hwcap & HWCAP_NEON)
    diff --git a/rpython/jit/backend/arm/opassembler.py b/rpython/jit/backend/arm/opassembler.py
    --- a/rpython/jit/backend/arm/opassembler.py
    +++ b/rpython/jit/backend/arm/opassembler.py
    @@ -1092,8 +1092,8 @@
             self.mc.VCVT_int_to_float(res.value, r.svfp_ip.value)
             return fcond
     
    -    # the following five instructions are only ARMv7;
    -    # regalloc.py won't call them at all on ARMv6
    +    # the following five instructions are only ARMv7 with NEON;
    +    # regalloc.py won't call them at all, in other cases
         emit_opx_llong_add = gen_emit_float_op('llong_add', 'VADD_i64')
         emit_opx_llong_sub = gen_emit_float_op('llong_sub', 'VSUB_i64')
         emit_opx_llong_and = gen_emit_float_op('llong_and', 'VAND_i64')
    diff --git a/rpython/jit/backend/arm/regalloc.py b/rpython/jit/backend/arm/regalloc.py
    --- a/rpython/jit/backend/arm/regalloc.py
    +++ b/rpython/jit/backend/arm/regalloc.py
    @@ -530,7 +530,7 @@
                                 EffectInfo.OS_LLONG_AND,
                                 EffectInfo.OS_LLONG_OR,
                                 EffectInfo.OS_LLONG_XOR):
    -                if self.cpu.cpuinfo.arch_version >= 7:
    +                if self.cpu.cpuinfo.neon:
                         args = self._prepare_llong_binop_xx(op, fcond)
                         self.perform_extra(op, args, fcond)
                         return
    diff --git a/rpython/jit/backend/arm/runner.py b/rpython/jit/backend/arm/runner.py
    --- a/rpython/jit/backend/arm/runner.py
    +++ b/rpython/jit/backend/arm/runner.py
    @@ -7,13 +7,14 @@
     from rpython.rlib.jit_hooks import LOOP_RUN_CONTAINER
     from rpython.rtyper.lltypesystem import lltype, llmemory
     from rpython.jit.backend.arm.detect import detect_hardfloat
    -from rpython.jit.backend.arm.detect import detect_arch_version
    +from rpython.jit.backend.arm.detect import detect_arch_version, detect_neon
     
     jitframe.STATICSIZE = JITFRAME_FIXED_SIZE
     
     class CPUInfo(object):
         hf_abi = False
         arch_version = 6
    +    neon = False
     
     class AbstractARMCPU(AbstractLLCPU):
     
    @@ -48,6 +49,7 @@
         def setup_once(self):
             self.cpuinfo.arch_version = detect_arch_version()
             self.cpuinfo.hf_abi = detect_hardfloat()
    +        self.cpuinfo.neon = detect_neon()
             #self.codemap.setup()
             self.assembler.setup_once()
     
    diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py
    --- a/rpython/jit/backend/llgraph/runner.py
    +++ b/rpython/jit/backend/llgraph/runner.py
    @@ -455,7 +455,7 @@
                     if box is not frame.current_op:
                         value = frame.env[box]
                     else:
    -                    value = box.getvalue()    # 0 or 0.0 or NULL
    +                    value = 0 # box.getvalue()    # 0 or 0.0 or NULL
                 else:
                     value = None
                 values.append(value)
    @@ -472,6 +472,13 @@
     
         # ------------------------------------------------------------
     
    +    def setup_descrs(self):
    +        all_descrs = []
    +        for k, v in self.descrs.iteritems():
    +            v.descr_index = len(all_descrs)
    +            all_descrs.append(v)
    +        return all_descrs
    +
         def calldescrof(self, FUNC, ARGS, RESULT, effect_info):
             key = ('call', getkind(RESULT),
                    tuple([getkind(A) for A in ARGS]),
    diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py
    --- a/rpython/jit/backend/llsupport/assembler.py
    +++ b/rpython/jit/backend/llsupport/assembler.py
    @@ -331,7 +331,7 @@
             counter = self._register_counter(tp, number, token)
             c_adr = ConstInt(rffi.cast(lltype.Signed, counter))
             operations.append(
    -            ResOperation(rop.INCREMENT_DEBUG_COUNTER, [c_adr], None))
    +            ResOperation(rop.INCREMENT_DEBUG_COUNTER, [c_adr]))
     
         def _register_counter(self, tp, number, token):
             # YYY very minor leak -- we need the counters to stay alive
    diff --git a/rpython/jit/backend/llsupport/descr.py b/rpython/jit/backend/llsupport/descr.py
    --- a/rpython/jit/backend/llsupport/descr.py
    +++ b/rpython/jit/backend/llsupport/descr.py
    @@ -21,6 +21,30 @@
             self._cache_call = {}
             self._cache_interiorfield = {}
     
    +    def setup_descrs(self):
    +        all_descrs = []
    +        for k, v in self._cache_size.iteritems():
    +            v.descr_index = len(all_descrs)
    +            all_descrs.append(v)
    +        for k, v in self._cache_field.iteritems():
    +            for k1, v1 in v.iteritems():
    +                v1.descr_index = len(all_descrs)
    +                all_descrs.append(v1)
    +        for k, v in self._cache_array.iteritems():
    +            v.descr_index = len(all_descrs)
    +            all_descrs.append(v)
    +        for k, v in self._cache_arraylen.iteritems():
    +            v.descr_index = len(all_descrs)
    +            all_descrs.append(v)
    +        for k, v in self._cache_call.iteritems():
    +            v.descr_index = len(all_descrs)
    +            all_descrs.append(v)
    +        for k, v in self._cache_interiorfield.iteritems():
    +            v.descr_index = len(all_descrs)
    +            all_descrs.append(v)
    +        assert len(all_descrs) < 2**15
    +        return all_descrs
    +
         def init_size_descr(self, STRUCT, sizedescr):
             pass
     
    diff --git a/rpython/jit/backend/llsupport/llmodel.py b/rpython/jit/backend/llsupport/llmodel.py
    --- a/rpython/jit/backend/llsupport/llmodel.py
    +++ b/rpython/jit/backend/llsupport/llmodel.py
    @@ -316,6 +316,9 @@
                 return ll_frame
             return execute_token
     
    +    def setup_descrs(self):
    +        return self.gc_ll_descr.setup_descrs()
    +
         # ------------------- helpers and descriptions --------------------
     
         @staticmethod
    diff --git a/rpython/jit/backend/llsupport/regalloc.py b/rpython/jit/backend/llsupport/regalloc.py
    --- a/rpython/jit/backend/llsupport/regalloc.py
    +++ b/rpython/jit/backend/llsupport/regalloc.py
    @@ -683,7 +683,7 @@
         for i in range(len(operations)-1, -1, -1):
             op = operations[i]
             if op.type != 'v':
    -            if op not in last_used and op.has_no_side_effect():
    +            if op not in last_used and rop.has_no_side_effect(op.opnum):
                     continue
             opnum = op.getopnum()
             for j in range(op.numargs()):
    @@ -695,7 +695,7 @@
                 if opnum != rop.JUMP and opnum != rop.LABEL:
                     if arg not in last_real_usage:
                         last_real_usage[arg] = i
    -        if op.is_guard():
    +        if rop.is_guard(op.opnum):
                 for arg in op.getfailargs():
                     if arg is None: # hole
                         continue
    @@ -732,14 +732,7 @@
         return longevity, last_real_usage
     
     def is_comparison_or_ovf_op(opnum):
    -    from rpython.jit.metainterp.resoperation import opclasses
    -    cls = opclasses[opnum]
    -    # hack hack: in theory they are instance method, but they don't use
    -    # any instance field, we can use a fake object
    -    class Fake(cls):
    -        pass
    -    op = Fake()
    -    return op.is_comparison() or op.is_ovf()
    +    return rop.is_comparison(opnum) or rop.is_ovf(opnum)
     
     def valid_addressing_size(size):
         return size == 1 or size == 2 or size == 4 or size == 8
    diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py
    --- a/rpython/jit/backend/llsupport/rewrite.py
    +++ b/rpython/jit/backend/llsupport/rewrite.py
    @@ -103,7 +103,7 @@
                         orig_op.set_forwarded(op)
                         replaced = True
                     op.setarg(i, arg)
    -        if op.is_guard():
    +        if rop.is_guard(op.opnum):
                 if not replaced:
                     op = op.copy_and_change(op.getopnum())
                     orig_op.set_forwarded(op)
    @@ -212,7 +212,7 @@
             #                self._emit_mul_if_factor_offset_not_supported(v_length, scale, 0)
             #        op.setarg(1, ConstInt(scale))
             #        op.setarg(2, v_length)
    -        if op.is_getarrayitem() or \
    +        if rop.is_getarrayitem(opnum) or \
                opnum in (rop.GETARRAYITEM_RAW_I,
                          rop.GETARRAYITEM_RAW_F):
                 self.handle_getarrayitem(op)
    @@ -324,13 +324,13 @@
                 if self.transform_to_gc_load(op):
                     continue
                 # ---------- turn NEWxxx into CALL_MALLOC_xxx ----------
    -            if op.is_malloc():
    +            if rop.is_malloc(op.opnum):
                     self.handle_malloc_operation(op)
                     continue
    -            if (op.is_guard() or
    +            if (rop.is_guard(op.opnum) or
                         self.could_merge_with_next_guard(op, i, operations)):
                     self.emit_pending_zeros()
    -            elif op.can_malloc():
    +            elif rop.can_malloc(op.opnum):
                     self.emitting_an_operation_that_can_collect()
                 elif op.getopnum() == rop.LABEL:
                     self.emitting_an_operation_that_can_collect()
    @@ -370,8 +370,8 @@
             # return True in cases where the operation and the following guard
             # should likely remain together.  Simplified version of
             # can_merge_with_next_guard() in llsupport/regalloc.py.
    -        if not op.is_comparison():
    -            return op.is_ovf()    # int_xxx_ovf() / guard_no_overflow()
    +        if not rop.is_comparison(op.opnum):
    +            return rop.is_ovf(op.opnum)    # int_xxx_ovf() / guard_no_overflow()
             if i + 1 >= len(operations):
                 return False
             next_op = operations[i + 1]
    @@ -400,7 +400,6 @@
             # it's hard to test all cases).  Rewrite it away.
             value = int(opnum == rop.GUARD_FALSE)
             op1 = ResOperation(rop.SAME_AS_I, [ConstInt(value)])
    -        op1.setint(value)
             self.emit_op(op1)
             lst = op.getfailargs()[:]
             lst[i] = op1
    @@ -633,8 +632,7 @@
                 args = [frame, arglist[jd.index_of_virtualizable]]
             else:
                 args = [frame]
    -        call_asm = ResOperation(op.getopnum(), args,
    -                                op.getdescr())
    +        call_asm = ResOperation(op.getopnum(), args, descr=op.getdescr())
             self.replace_op_with(self.get_box_replacement(op), call_asm)
             self.emit_op(call_asm)
     
    @@ -708,7 +706,7 @@
         def _gen_call_malloc_gc(self, args, v_result, descr):
             """Generate a CALL_MALLOC_GC with the given args."""
             self.emitting_an_operation_that_can_collect()
    -        op = ResOperation(rop.CALL_MALLOC_GC, args, descr)
    +        op = ResOperation(rop.CALL_MALLOC_GC, args, descr=descr)
             self.replace_op_with(v_result, op)
             self.emit_op(op)
             # In general, don't add v_result to write_barrier_applied:
    diff --git a/rpython/jit/backend/ppc/regalloc.py b/rpython/jit/backend/ppc/regalloc.py
    --- a/rpython/jit/backend/ppc/regalloc.py
    +++ b/rpython/jit/backend/ppc/regalloc.py
    @@ -286,7 +286,8 @@
                 self.assembler.mc.mark_op(op)
                 self.rm.position = i
                 self.fprm.position = i
    -            if op.has_no_side_effect() and op not in self.longevity:
    +            opnum = op.opnum
    +            if rop.has_no_side_effect(opnum) and op not in self.longevity:
                     i += 1
                     self.possibly_free_vars_for_op(op)
                     continue
    @@ -298,8 +299,7 @@
                     else:
                         self.fprm.temp_boxes.append(box)
                 #
    -            opnum = op.getopnum()
    -            if not we_are_translated() and opnum == -127:
    +            if not we_are_translated() and opnum == rop.FORCE_SPILL:
                     self._consider_force_spill(op)
                 else:
                     arglocs = oplist[opnum](self, op)
    diff --git a/rpython/jit/backend/test/test_ll_random.py b/rpython/jit/backend/test/test_ll_random.py
    --- a/rpython/jit/backend/test/test_ll_random.py
    +++ b/rpython/jit/backend/test/test_ll_random.py
    @@ -2,6 +2,7 @@
     from rpython.rtyper.lltypesystem import lltype, llmemory, rffi, rstr
     from rpython.rtyper import rclass
     from rpython.jit.backend.test import test_random
    +from rpython.jit.backend.test.test_random import getint, getref_base, getref
     from rpython.jit.metainterp.resoperation import ResOperation, rop, optypes
     from rpython.jit.metainterp.history import ConstInt, ConstPtr, getkind
     from rpython.jit.codewriter import heaptracker
    @@ -169,7 +170,7 @@
             if length == 0:
                 raise test_random.CannotProduceOperation
             v_index = r.choice(self.intvars)
    -        if not (0 <= v_index.getint() < length):
    +        if not (0 <= getint(v_index) < length):
                 v_index = ConstInt(r.random_integer() % length)
             return v_index
     
    @@ -311,7 +312,7 @@
         def field_descr(self, builder, r):
             v, A = builder.get_structptr_var(r, type=lltype.Array,
                                              array_of_structs=True)
    -        array = v.getref(lltype.Ptr(A))
    +        array = getref(lltype.Ptr(A), v)
             v_index = builder.get_index(len(array), r)
             choice = []
             for name in A.OF._names:
    @@ -344,7 +345,7 @@
                     w = ConstInt(r.random_integer())
                 else:
                     w = r.choice(builder.intvars)
    -            value = w.getint()
    +            value = getint(w)
                 if rffi.cast(lltype.Signed, rffi.cast(TYPE, value)) == value:
                     break
             builder.do(self.opnum, [v, w], descr)
    @@ -357,7 +358,7 @@
                     w = ConstInt(r.random_integer())
                 else:
                     w = r.choice(builder.intvars)
    -            value = w.getint()
    +            value = getint(w)
                 if rffi.cast(lltype.Signed, rffi.cast(TYPE, value)) == value:
                     break
             builder.do(self.opnum, [v, v_index, w], descr)
    @@ -389,7 +390,7 @@
     class GetArrayItemOperation(ArrayOperation):
         def field_descr(self, builder, r):
             v, A = builder.get_arrayptr_var(r)
    -        array = v.getref(lltype.Ptr(A))
    +        array = getref(lltype.Ptr(A), v)
             v_index = builder.get_index(len(array), r)
             descr = self.array_descr(builder, A)
             return v, A, v_index, descr
    @@ -411,7 +412,7 @@
                     w = ConstInt(r.random_integer())
                 else:
                     w = r.choice(builder.intvars)
    -            value = w.getint()
    +            value = getint(w)
                 if rffi.cast(lltype.Signed, rffi.cast(A.OF, value)) == value:
                     break
             builder.do(self.opnum, [v, v_index, w], descr)
    @@ -455,7 +456,7 @@
             v_ptr = builder.do(self.opnum, [v_length])
             getattr(builder, self.builder_cache).append(v_ptr)
             # Initialize the string. Is there a better way to do this?
    -        for i in range(v_length.getint()):
    +        for i in range(getint(v_length)):
                 v_index = ConstInt(i)
                 v_char = ConstInt(r.random_integer() % self.max)
                 builder.do(self.set_char, [v_ptr, v_index, v_char])
    @@ -471,9 +472,9 @@
             current = getattr(builder, self.builder_cache)
             if current and r.random() < .8:
                 v_string = r.choice(current)
    -            string = v_string.getref(self.ptr)
    +            string = getref(self.ptr, v_string)
             else:
    -            string = self.alloc(builder.get_index(500, r).getint())
    +            string = self.alloc(getint(builder.get_index(500, r)))
                 v_string = ConstPtr(lltype.cast_opaque_ptr(llmemory.GCREF, string))
                 current.append(v_string)
             for i in range(len(string.chars)):
    @@ -484,7 +485,7 @@
     class AbstractGetItemOperation(AbstractStringOperation):
         def produce_into(self, builder, r):
             v_string = self.get_string(builder, r)
    -        v_index = builder.get_index(len(v_string.getref(self.ptr).chars), r)
    +        v_index = builder.get_index(len(getref(self.ptr, v_string).chars), r)
             builder.do(self.opnum, [v_string, v_index])
     
     class AbstractSetItemOperation(AbstractStringOperation):
    @@ -492,7 +493,7 @@
             v_string = self.get_string(builder, r)
             if isinstance(v_string, ConstPtr):
                 raise test_random.CannotProduceOperation  # setitem(Const, ...)
    -        v_index = builder.get_index(len(v_string.getref(self.ptr).chars), r)
    +        v_index = builder.get_index(len(getref(self.ptr, v_string).chars), r)
             v_target = ConstInt(r.random_integer() % self.max)
             builder.do(self.opnum, [v_string, v_index, v_target])
     
    @@ -505,15 +506,15 @@
         def produce_into(self, builder, r):
             v_srcstring = self.get_string(builder, r)
             v_dststring = self.get_string(builder, r)
    -        src = v_srcstring.getref(self.ptr)
    -        dst = v_dststring.getref(self.ptr)
    +        src = getref(self.ptr, v_srcstring)
    +        dst = getref(self.ptr, v_dststring)
             if src == dst:                                # because it's not a
                 raise test_random.CannotProduceOperation  # memmove(), but memcpy()
             srclen = len(src.chars)
             dstlen = len(dst.chars)
             v_length = builder.get_index(min(srclen, dstlen), r)
    -        v_srcstart = builder.get_index(srclen - v_length.getint() + 1, r)
    -        v_dststart = builder.get_index(dstlen - v_length.getint() + 1, r)
    +        v_srcstart = builder.get_index(srclen - getint(v_length) + 1, r)
    +        v_dststart = builder.get_index(dstlen - getint(v_length) + 1, r)
             builder.do(self.opnum, [v_srcstring, v_dststring,
                                     v_srcstart, v_dststart, v_length])
     
    @@ -585,7 +586,7 @@
             """ % funcargs).compile()
             vtableptr = v._hints['vtable']._as_ptr()
             d = {
    -            'ptr': S.getref_base(),
    +            'ptr': getref_base(S),
                 'vtable' : vtableptr,
                 'LLException' : LLException,
                 }
    diff --git a/rpython/jit/backend/test/test_random.py b/rpython/jit/backend/test/test_random.py
    --- a/rpython/jit/backend/test/test_random.py
    +++ b/rpython/jit/backend/test/test_random.py
    @@ -11,11 +11,9 @@
     from rpython.jit.metainterp.executor import _execute_arglist, wrap_constant
     from rpython.jit.metainterp.resoperation import opname
     from rpython.jit.codewriter import longlong
    -from rpython.rtyper.lltypesystem import lltype, rstr
    +from rpython.rtyper.lltypesystem import lltype, llmemory, rstr
     from rpython.rtyper import rclass
     
    -class PleaseRewriteMe(Exception):
    -    pass
     
     class DummyLoop(object):
         def __init__(self, subops):
    @@ -27,6 +25,41 @@
         def execute_raised(self, exc, constant=False):
             self._got_exc = exc
     
    +
    +def getint(v):
    +    if isinstance(v, (ConstInt, InputArgInt)):
    +        return v.getint()
    +    else:
    +        return v._example_int
    +
    +def getfloatstorage(v):
    +    if isinstance(v, (ConstFloat, InputArgFloat)):
    +        return v.getfloatstorage()
    +    else:
    +        return v._example_float
    +
    +def getfloat(v):
    +    return longlong.getrealfloat(getfloatstorage(v))
    +
    +def getref_base(v):
    +    if isinstance(v, (ConstPtr, InputArgRef)):
    +        return v.getref_base()
    +    else:
    +        return v._example_ref
    +
    +def getref(PTR, v):
    +    return lltype.cast_opaque_ptr(PTR, getref_base(v))
    +
    +def constbox(v):
    +    if v.type == INT:
    +        return ConstInt(getint(v))
    +    if v.type == FLOAT:
    +        return ConstFloat(getfloatstorage(v))
    +    if v.type == REF:
    +        return ConstPtr(getref_base(v))
    +    assert 0, v.type
    +
    +
     class OperationBuilder(object):
         def __init__(self, cpu, loop, vars):
             self.cpu = cpu
    @@ -57,11 +90,21 @@
         def do(self, opnum, argboxes, descr=None):
             self.fakemetainterp._got_exc = None
             op = ResOperation(opnum, argboxes, descr)
    +        argboxes = map(constbox, argboxes)
             result = _execute_arglist(self.cpu, self.fakemetainterp,
                                       opnum, argboxes, descr)
             if result is not None:
    -            c_result = wrap_constant(result)
    -            op.copy_value_from(c_result)
    +            if lltype.typeOf(result) == lltype.Signed:
    +                op._example_int = result
    +            elif isinstance(result, bool):
    +                op._example_int = int(result)
    +            elif lltype.typeOf(result) == longlong.FLOATSTORAGE:
    +                op._example_float = result
    +            elif isinstance(result, float):
    +                op._example_float = longlong.getfloatstorage(result)
    +            else:
    +                assert lltype.typeOf(result) == llmemory.GCREF
    +                op._example_ref = result
             self.loop.operations.append(op)
             return op
     
    @@ -101,7 +144,7 @@
                 if v in names:
                     args.append(names[v])
                 elif isinstance(v, ConstPtr):
    -                assert not v.getref_base() # otherwise should be in the names
    +                assert not getref_base(v) # otherwise should be in the names
                     args.append('ConstPtr(lltype.nullptr(llmemory.GCREF.TO))')
                 elif isinstance(v, ConstFloat):
                     args.append('ConstFloat(longlong.getfloatstorage(%r))'
    @@ -198,10 +241,10 @@
             #
             def writevar(v, nameprefix, init=''):
                 if nameprefix == 'const_ptr':
    -                if not v.getref_base():
    +                if not getref_base(v):
                         return 'lltype.nullptr(llmemory.GCREF.TO)'
    -                TYPE = v.getref_base()._obj.ORIGTYPE
    -                cont = lltype.cast_opaque_ptr(TYPE, v.getref_base())
    +                TYPE = getref_base(v)._obj.ORIGTYPE
    +                cont = lltype.cast_opaque_ptr(TYPE, getref_base(v))
                     if TYPE.TO._is_varsize():
                         if isinstance(TYPE.TO, lltype.GcStruct):
                             lgt = len(cont.chars)
    @@ -252,9 +295,9 @@
                 for i, v in enumerate(self.loop.inputargs):
                     assert not isinstance(v, Const)
                     if v.type == FLOAT:
    -                    vals.append("longlong.getfloatstorage(%r)" % v.getfloat())
    +                    vals.append("longlong.getfloatstorage(%r)" % getfloat(v))
                     else:
    -                    vals.append("%r" % v.getint())
    +                    vals.append("%r" % getint(v))
                 print >>s, '    loop_args = [%s]' % ", ".join(vals)
             print >>s, '    frame = cpu.execute_token(looptoken, *loop_args)'
             if self.should_fail_by is None:
    @@ -264,10 +307,10 @@
             for i, v in enumerate(fail_args):
                 if v.type == FLOAT:
                     print >>s, ('    assert longlong.getrealfloat('
    -                    'cpu.get_float_value(frame, %d)) == %r' % (i, v.getfloatstorage()))
    +                    'cpu.get_float_value(frame, %d)) == %r' % (i, getfloatstorage(v)))
                 else:
                     print >>s, ('    assert cpu.get_int_value(frame, %d) == %d'
    -                            % (i, v.getint()))
    +                            % (i, getint(v)))
             self.names = names
             s.flush()
     
    @@ -295,7 +338,7 @@
                     builder.intvars.append(v_result)
                     boolres = self.boolres
                     if boolres == 'sometimes':
    -                    boolres = v_result.getint() in [0, 1]
    +                    boolres = getint(v_result) in [0, 1]
                     if boolres:
                         builder.boolvars.append(v_result)
                 elif v_result.type == FLOAT:
    @@ -346,10 +389,10 @@
                 v_second = ConstInt((value & self.and_mask) | self.or_mask)
             else:
                 v = r.choice(builder.intvars)
    -            v_value = v.getint()
    +            v_value = getint(v)
                 if (v_value & self.and_mask) != v_value:
                     v = builder.do(rop.INT_AND, [v, ConstInt(self.and_mask)])
    -            v_value = v.getint()
    +            v_value = getint(v)
                 if (v_value | self.or_mask) != v_value:
                     v = builder.do(rop.INT_OR, [v, ConstInt(self.or_mask)])
                 v_second = v
    @@ -395,9 +438,9 @@
                 v_second = ConstFloat(r.random_float_storage())
             else:
                 v_second = r.choice(builder.floatvars)
    -        if abs(v_first.getfloat()) > 1E100 or abs(v_second.getfloat()) > 1E100:
    +        if abs(getfloat(v_first)) > 1E100 or abs(getfloat(v_second)) > 1E100:
                 raise CannotProduceOperation     # avoid infinities
    -        if abs(v_second.getfloat()) < 1E-100:
    +        if abs(getfloat(v_second)) < 1E-100:
                 raise CannotProduceOperation     # e.g. division by zero error
             self.put(builder, [v_first, v_second])
     
    @@ -432,7 +475,7 @@
             if not builder.floatvars:
                 raise CannotProduceOperation
             box = r.choice(builder.floatvars)
    -        if not (-sys.maxint-1 <= box.getfloat() <= sys.maxint):
    +        if not (-sys.maxint-1 <= getfloat(box) <= sys.maxint):
                 raise CannotProduceOperation      # would give an overflow
             self.put(builder, [box])
     
    @@ -440,8 +483,8 @@
         def gen_guard(self, builder, r):
             v = builder.get_bool_var(r)
             op = ResOperation(self.opnum, [v])
    -        passing = ((self.opnum == rop.GUARD_TRUE and v.getint()) or
    -                   (self.opnum == rop.GUARD_FALSE and not v.getint()))
    +        passing = ((self.opnum == rop.GUARD_TRUE and getint(v)) or
    +                   (self.opnum == rop.GUARD_FALSE and not getint(v)))
             return op, passing
     
         def produce_into(self, builder, r):
    @@ -459,8 +502,8 @@
                 raise CannotProduceOperation
             box = r.choice(builder.ptrvars)[0]
             op = ResOperation(self.opnum, [box])
    -        passing = ((self.opnum == rop.GUARD_NONNULL and box.getref_base()) or
    -                   (self.opnum == rop.GUARD_ISNULL and not box.getref_base()))
    +        passing = ((self.opnum == rop.GUARD_NONNULL and getref_base(box)) or
    +                   (self.opnum == rop.GUARD_ISNULL and not getref_base(box)))
             return op, passing
     
     class GuardValueOperation(GuardOperation):
    @@ -470,14 +513,14 @@
                 other = r.choice(builder.intvars)
             else:
                 if r.random() < 0.75:
    -                value = v.getint()
    +                value = getint(v)
                 elif r.random() < 0.5:
    -                value = v.getint() ^ 1
    +                value = getint(v) ^ 1
                 else:
                     value = r.random_integer()
                 other = ConstInt(value)
             op = ResOperation(self.opnum, [v, other])
    -        return op, (v.getint() == other.getint())
    +        return op, (getint(v) == getint(other))
     
     # ____________________________________________________________
     
    @@ -675,7 +718,7 @@
             assert not hasattr(loop, '_targettoken')
             for i in range(position):
                 op = loop.operations[i]
    -            if (not op.has_no_side_effect()
    +            if (not rop.has_no_side_effect(op.opnum)
                         or op.type not in (INT, FLOAT)):
                     position = i
                     break       # cannot move the LABEL later
    @@ -728,9 +771,9 @@
             self.expected = {}
             for v in endvars:
                 if v.type == INT:
    -                self.expected[v] = v.getint()
    +                self.expected[v] = getint(v)
                 elif v.type == FLOAT:
    -                self.expected[v] = v.getfloatstorage()
    +                self.expected[v] = getfloatstorage(v)
                 else:
                     assert 0, v.type
     
    @@ -742,7 +785,7 @@
                 args = []
                 for box in self.startvars:
                     if box not in self.loop.inputargs:
    -                    box = box.constbox()
    +                    box = constbox(box)
                     args.append(box)
                 self.cpu.compile_loop(self.loop.inputargs,
                                       [ResOperation(rop.JUMP, args,
    @@ -760,7 +803,7 @@
     
         def clear_state(self):
             for v, S, fields in self.prebuilt_ptr_consts:
    -            container = v.getref_base()._obj.container
    +            container = getref_base(v)._obj.container
                 for name, value in fields.items():
                     if isinstance(name, str):
                         setattr(container, name, value)
    @@ -781,9 +824,9 @@
             arguments = []
             for box in self.loop.inputargs:
                 if box.type == INT:
    -                arguments.append(box.getint())
    +                arguments.append(getint(box))
                 elif box.type == FLOAT:
    -                arguments.append(box.getfloatstorage())
    +                arguments.append(getfloatstorage(box))
                 else:
                     assert 0, box.type
             deadframe = cpu.execute_token(self.runjitcelltoken(), *arguments)
    @@ -795,7 +838,7 @@
                 if v not in self.expected:
                     assert v.getopnum() == rop.SAME_AS_I   # special case
                     assert isinstance(v.getarg(0), ConstInt)
    -                self.expected[v] = v.getarg(0).getint()
    +                self.expected[v] = getint(v.getarg(0))
                 if v.type == FLOAT:
                     value = cpu.get_float_value(deadframe, i)
                 else:
    @@ -807,7 +850,7 @@
                     )
             exc = cpu.grab_exc_value(deadframe)
             if (self.guard_op is not None and
    -            self.guard_op.is_guard_exception()):
    +            rop.is_guard_exception(self.guard_op.getopnum())):
                 if self.guard_op.getopnum() == rop.GUARD_NO_EXCEPTION:
                     do_assert(exc,
                               "grab_exc_value() should not be %r" % (exc,))
    @@ -840,7 +883,7 @@
             # generate the branch: a sequence of operations that ends in a FINISH
             subloop = DummyLoop([])
             self.subloops.append(subloop)   # keep around for debugging
    -        if guard_op.is_guard_exception():
    +        if rop.is_guard_exception(guard_op.getopnum()):
                 subloop.operations.append(exc_handling(guard_op))
             bridge_builder = self.builder.fork(self.builder.cpu, subloop,
                                                op.getfailargs()[:])
    @@ -876,9 +919,9 @@
                 args = []
                 for x in subset:
                     if x.type == INT:
    -                    args.append(InputArgInt(x.getint()))
    +                    args.append(InputArgInt(getint(x)))
                     elif x.type == FLOAT:
    -                    args.append(InputArgFloat(x.getfloatstorage()))
    +                    args.append(InputArgFloat(getfloatstorage(x)))
                     else:
                         assert 0, x.type
                 rl = RandomLoop(self.builder.cpu, self.builder.fork,
    diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py
    --- a/rpython/jit/backend/x86/regalloc.py
    +++ b/rpython/jit/backend/x86/regalloc.py
    @@ -358,11 +358,11 @@
                 assert self.assembler.mc._frame_size == DEFAULT_FRAME_BYTES
                 self.rm.position = i
                 self.xrm.position = i
    -            if op.has_no_side_effect() and op not in self.longevity:
    +            if rop.has_no_side_effect(op.opnum) and op not in self.longevity:
                     i += 1
                     self.possibly_free_vars_for_op(op)
                     continue
    -            if not we_are_translated() and op.getopnum() == -127:
    +            if not we_are_translated() and op.getopnum() == rop.FORCE_SPILL:
                     self._consider_force_spill(op)
                 else:
                     oplist[op.getopnum()](self, op)
    diff --git a/rpython/jit/backend/zarch/regalloc.py b/rpython/jit/backend/zarch/regalloc.py
    --- a/rpython/jit/backend/zarch/regalloc.py
    +++ b/rpython/jit/backend/zarch/regalloc.py
    @@ -476,7 +476,8 @@
                 self.assembler.mc.mark_op(op)
                 self.rm.position = i
                 self.fprm.position = i
    -            if op.has_no_side_effect() and op not in self.longevity:
    +            opnum = op.getopnum()
    +            if rop.has_no_side_effect(opnum) and op not in self.longevity:
                     i += 1
                     self.possibly_free_vars_for_op(op)
                     continue
    @@ -488,8 +489,7 @@
                     else:
                         self.fprm.temp_boxes.append(box)
                 #
    -            opnum = op.getopnum()
    -            if not we_are_translated() and opnum == -127:
    +            if not we_are_translated() and opnum == rop.FORCE_SPILL:
                     self._consider_force_spill(op)
                 else:
                     arglocs = prepare_oplist[opnum](self, op)
    diff --git a/rpython/jit/metainterp/blackhole.py b/rpython/jit/metainterp/blackhole.py
    --- a/rpython/jit/metainterp/blackhole.py
    +++ b/rpython/jit/metainterp/blackhole.py
    @@ -1585,7 +1585,6 @@
         def _done_with_this_frame(self):
             # rare case: we only get there if the blackhole interps all returned
             # normally (in general we get a ContinueRunningNormally exception).
    -        sd = self.builder.metainterp_sd
             kind = self._return_type
             if kind == 'v':
                 raise jitexc.DoneWithThisFrameVoid()
    diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py
    --- a/rpython/jit/metainterp/compile.py
    +++ b/rpython/jit/metainterp/compile.py
    @@ -27,12 +27,11 @@
     
     class CompileData(object):
         memo = None
    +    log_noopt = True
         
         def forget_optimization_info(self):
    -        for arg in self.start_label.getarglist():
    +        for arg in self.trace.inputargs:
                 arg.set_forwarded(None)
    -        for op in self.operations:
    -            op.set_forwarded(None)
     
     class LoopCompileData(CompileData):
         """ An object that accumulates all of the necessary info for
    @@ -40,15 +39,13 @@
     
         This is the case of label() ops label()
         """
    -    def __init__(self, start_label, end_label, operations,
    -                 call_pure_results=None, enable_opts=None):
    -        self.start_label = start_label
    -        self.end_label = end_label
    +    def __init__(self, trace, runtime_boxes, call_pure_results=None,
    +                 enable_opts=None):
             self.enable_opts = enable_opts
    -        assert start_label.getopnum() == rop.LABEL
    -        assert end_label.getopnum() == rop.LABEL
    -        self.operations = operations
    +        self.trace = trace
             self.call_pure_results = call_pure_results
    +        assert runtime_boxes is not None
    +        self.runtime_boxes = runtime_boxes
     
         def optimize(self, metainterp_sd, jitdriver_sd, optimizations, unroll):
             from rpython.jit.metainterp.optimizeopt.unroll import (UnrollOptimizer,
    @@ -56,23 +53,21 @@
     
             if unroll:
                 opt = UnrollOptimizer(metainterp_sd, jitdriver_sd, optimizations)
    -            return opt.optimize_preamble(self.start_label, self.end_label,
    -                                         self.operations,
    +            return opt.optimize_preamble(self.trace,
    +                                         self.runtime_boxes,
                                              self.call_pure_results,
                                              self.box_names_memo)
             else:
                 opt = Optimizer(metainterp_sd, jitdriver_sd, optimizations)
    -            return opt.propagate_all_forward(self.start_label.getarglist(),
    -               self.operations, self.call_pure_results)
    +            return opt.propagate_all_forward(self.trace, self.call_pure_results)
     
     class SimpleCompileData(CompileData):
         """ This represents label() ops jump with no extra info associated with
         the label
         """
    -    def __init__(self, start_label, operations, call_pure_results=None,
    +    def __init__(self, trace, call_pure_results=None,
                      enable_opts=None):
    -        self.start_label = start_label
    -        self.operations = operations
    +        self.trace = trace
             self.call_pure_results = call_pure_results
             self.enable_opts = enable_opts
     
    @@ -81,17 +76,17 @@
     
             #assert not unroll
             opt = Optimizer(metainterp_sd, jitdriver_sd, optimizations)
    -        return opt.propagate_all_forward(self.start_label.getarglist(),
    -            self.operations, self.call_pure_results)
    +        return opt.propagate_all_forward(self.trace.get_iter(),
    +            self.call_pure_results)
     
     class BridgeCompileData(CompileData):
         """ This represents ops() with a jump at the end that goes to some
         loop, we need to deal with virtual state and inlining of short preamble
         """
    -    def __init__(self, start_label, operations, call_pure_results=None,
    +    def __init__(self, trace, runtime_boxes, call_pure_results=None,
                      enable_opts=None, inline_short_preamble=False):
    -        self.start_label = start_label
    -        self.operations = operations
    +        self.trace = trace
    +        self.runtime_boxes = runtime_boxes
             self.call_pure_results = call_pure_results
             self.enable_opts = enable_opts
             self.inline_short_preamble = inline_short_preamble
    @@ -100,7 +95,7 @@
             from rpython.jit.metainterp.optimizeopt.unroll import UnrollOptimizer
     
             opt = UnrollOptimizer(metainterp_sd, jitdriver_sd, optimizations)
    -        return opt.optimize_bridge(self.start_label, self.operations,
    +        return opt.optimize_bridge(self.trace, self.runtime_boxes,
                                        self.call_pure_results,
                                        self.inline_short_preamble,
                                        self.box_names_memo)
    @@ -109,12 +104,13 @@
         """ This represents label() ops jump with extra info that's from the
         run of LoopCompileData. Jump goes to the same label
         """
    -    def __init__(self, start_label, end_jump, operations, state,
    +    log_noopt = False
    +
    +    def __init__(self, trace, celltoken, state,
                      call_pure_results=None, enable_opts=None,
                      inline_short_preamble=True):
    -        self.start_label = start_label
    -        self.end_jump = end_jump
    -        self.operations = operations
    +        self.trace = trace
    +        self.celltoken = celltoken
             self.enable_opts = enable_opts
             self.state = state
             self.call_pure_results = call_pure_results
    @@ -125,9 +121,8 @@
     
             assert unroll # we should not be here if it's disabled
             opt = UnrollOptimizer(metainterp_sd, jitdriver_sd, optimizations)
    -        return opt.optimize_peeled_loop(self.start_label, self.end_jump,
    -            self.operations, self.state, self.call_pure_results,
    -            self.inline_short_preamble)
    +        return opt.optimize_peeled_loop(self.trace, self.celltoken, self.state,
    +            self.call_pure_results, self.inline_short_preamble)
     
     def show_procedures(metainterp_sd, procedure=None, error=None):
         # debugging
    @@ -208,23 +203,21 @@
     # ____________________________________________________________
     
     
    -def compile_simple_loop(metainterp, greenkey, start, inputargs, ops, jumpargs,
    -                        enable_opts):
    +def compile_simple_loop(metainterp, greenkey, trace, runtime_args, enable_opts,
    +                        cut_at):
         from rpython.jit.metainterp.optimizeopt import optimize_trace
     
         jitdriver_sd = metainterp.jitdriver_sd
         metainterp_sd = metainterp.staticdata
         jitcell_token = make_jitcell_token(jitdriver_sd)
    -    label = ResOperation(rop.LABEL, inputargs[:], descr=jitcell_token)
    -    jump_op = ResOperation(rop.JUMP, jumpargs[:], descr=jitcell_token)
         call_pure_results = metainterp.call_pure_results
    -    data = SimpleCompileData(label, ops + [jump_op],
    -                                 call_pure_results=call_pure_results,
    -                                 enable_opts=enable_opts)
    +    data = SimpleCompileData(trace, call_pure_results=call_pure_results,
    +                             enable_opts=enable_opts)
         try:
             loop_info, ops = optimize_trace(metainterp_sd, jitdriver_sd,
                                             data, metainterp.box_names_memo)
         except InvalidLoop:
    +        trace.cut_at(cut_at)
             return None
         loop = create_empty_loop(metainterp)
         loop.original_jitcell_token = jitcell_token
    @@ -241,7 +234,7 @@
             loop.check_consistency()
         jitcell_token.target_tokens = [target_token]
         send_loop_to_backend(greenkey, jitdriver_sd, metainterp_sd, loop, "loop",
    -                         inputargs, metainterp.box_names_memo)
    +                         runtime_args, metainterp.box_names_memo)
         record_loop_or_bridge(metainterp_sd, loop)
         return target_token
     
    @@ -255,6 +248,7 @@
         metainterp_sd = metainterp.staticdata
         jitdriver_sd = metainterp.jitdriver_sd
         history = metainterp.history
    +    trace = history.trace
         warmstate = jitdriver_sd.warmstate
     
         enable_opts = jitdriver_sd.warmstate.enable_opts
    @@ -264,16 +258,16 @@
             enable_opts = enable_opts.copy()
             del enable_opts['unroll']
     
    -    ops = history.operations[start:]
    +    jitcell_token = make_jitcell_token(jitdriver_sd)
    +    cut_at = history.get_trace_position()
    +    history.record(rop.JUMP, jumpargs, None, descr=jitcell_token)
    +    if start != (0, 0, 0):
    +        trace = trace.cut_trace_from(start, inputargs)
         if 'unroll' not in enable_opts or not metainterp.cpu.supports_guard_gc_type:
    -        return compile_simple_loop(metainterp, greenkey, start, inputargs, ops,
    -                                   jumpargs, enable_opts)
    -    jitcell_token = make_jitcell_token(jitdriver_sd)
    -    label = ResOperation(rop.LABEL, inputargs,
    -                         descr=TargetToken(jitcell_token))
    -    end_label = ResOperation(rop.LABEL, jumpargs, descr=jitcell_token)
    +        return compile_simple_loop(metainterp, greenkey, trace, jumpargs,
    +                                   enable_opts, cut_at)
         call_pure_results = metainterp.call_pure_results
    -    preamble_data = LoopCompileData(label, end_label, ops,
    +    preamble_data = LoopCompileData(trace, jumpargs,
                                         call_pure_results=call_pure_results,
                                         enable_opts=enable_opts)
         try:
    @@ -281,17 +275,15 @@
                                                        preamble_data,
                                                        metainterp.box_names_memo)
         except InvalidLoop:
    +        history.cut(cut_at)
             return None
     
         metainterp_sd = metainterp.staticdata
         jitdriver_sd = metainterp.jitdriver_sd
    -    end_label = ResOperation(rop.LABEL, inputargs,
    -                             descr=jitcell_token)
    -    jump_op = ResOperation(rop.JUMP, jumpargs, descr=jitcell_token)
         start_descr = TargetToken(jitcell_token,
                                   original_jitcell_token=jitcell_token)
         jitcell_token.target_tokens = [start_descr]
    -    loop_data = UnrolledLoopData(end_label, jump_op, ops, start_state,
    +    loop_data = UnrolledLoopData(trace, jitcell_token, start_state,
                                      call_pure_results=call_pure_results,
                                      enable_opts=enable_opts)
         try:
    @@ -299,11 +291,12 @@
                                                  loop_data,
                                                  metainterp.box_names_memo)
         except InvalidLoop:
    +        history.cut(cut_at)
             return None
     
         if ((warmstate.vec and jitdriver_sd.vec) or warmstate.vec_all):
             from rpython.jit.metainterp.optimizeopt.vector import optimize_vector
    -        loop_info, loop_ops = optimize_vector(metainterp_sd,
    +        loop_info, loop_ops = optimize_vector(trace, metainterp_sd,
                                                   jitdriver_sd, warmstate,
                                                   loop_info, loop_ops,
                                                   jitcell_token)
    @@ -342,22 +335,20 @@
         to the first operation.
         """
         from rpython.jit.metainterp.optimizeopt import optimize_trace
    -    from rpython.jit.metainterp.optimizeopt.optimizer import BasicLoopInfo
     
    -    history = metainterp.history
    +    trace = metainterp.history.trace.cut_trace_from(start, inputargs)
         metainterp_sd = metainterp.staticdata
         jitdriver_sd = metainterp.jitdriver_sd
    +    history = metainterp.history
     
         loop_jitcell_token = metainterp.get_procedure_token(greenkey)
         assert loop_jitcell_token
     
    -    end_label = ResOperation(rop.LABEL, inputargs[:],
    -                             descr=loop_jitcell_token)
    -    jump_op = ResOperation(rop.JUMP, jumpargs[:], descr=loop_jitcell_token)
    +    cut = history.get_trace_position()
    +    history.record(rop.JUMP, jumpargs[:], None, descr=loop_jitcell_token)
         enable_opts = jitdriver_sd.warmstate.enable_opts
    -    ops = history.operations[start:]
         call_pure_results = metainterp.call_pure_results
    -    loop_data = UnrolledLoopData(end_label, jump_op, ops, start_state,
    +    loop_data = UnrolledLoopData(trace, loop_jitcell_token, start_state,
                                      call_pure_results=call_pure_results,
                                      enable_opts=enable_opts)
         try:
    @@ -366,8 +357,9 @@
                                                  metainterp.box_names_memo)
         except InvalidLoop:
             # Fall back on jumping directly to preamble
    -        jump_op = ResOperation(rop.JUMP, inputargs[:], descr=loop_jitcell_token)
    -        loop_data = UnrolledLoopData(end_label, jump_op, [jump_op], start_state,
    +        history.cut(cut)
    +        history.record(rop.JUMP, jumpargs[:], None, descr=loop_jitcell_token)
    +        loop_data = UnrolledLoopData(trace, loop_jitcell_token, start_state,
                                          call_pure_results=call_pure_results,
                                          enable_opts=enable_opts,
                                          inline_short_preamble=False)
    @@ -376,9 +368,13 @@
                                                      loop_data,
                                                      metainterp.box_names_memo)
             except InvalidLoop:
    +            history.cut(cut)
                 return None
     
    -    label_token = loop_info.label_op.getdescr()
    +    label_op = loop_info.label_op
    +    if label_op is None:
    +        assert False, "unreachable code" # hint for some strange tests
    +    label_token = label_op.getdescr()
         assert isinstance(label_token, TargetToken)
         if label_token.short_preamble:
             metainterp_sd.logger_ops.log_short_preamble([],
    @@ -445,13 +441,13 @@
             box = inputargs[i]
             opnum = OpHelpers.getfield_for_descr(descr)
             emit_op(extra_ops,
    -                ResOperation(opnum, [vable_box], descr))
    +                ResOperation(opnum, [vable_box], descr=descr))
             box.set_forwarded(extra_ops[-1])
             i += 1
         arrayindex = 0
         for descr in vinfo.array_field_descrs:
             arraylen = vinfo.get_array_length(vable, arrayindex)
    -        arrayop = ResOperation(rop.GETFIELD_GC_R, [vable_box], descr)
    +        arrayop = ResOperation(rop.GETFIELD_GC_R, [vable_box], descr=descr)
             emit_op(extra_ops, arrayop)
             arraydescr = vinfo.array_descrs[arrayindex]
             assert i + arraylen <= len(inputargs)
    @@ -1017,9 +1013,9 @@
             metainterp_sd.stats.add_jitcell_token(jitcell_token)
     
     
    -def compile_trace(metainterp, resumekey):
    +def compile_trace(metainterp, resumekey, runtime_boxes):
         """Try to compile a new bridge leading from the beginning of the history
    -    to some existing place.
    +    to some existging place.
         """
     
         from rpython.jit.metainterp.optimizeopt import optimize_trace
    @@ -1037,20 +1033,19 @@
         else:
             inline_short_preamble = True
         inputargs = metainterp.history.inputargs[:]
    -    operations = metainterp.history.operations
    -    label = ResOperation(rop.LABEL, inputargs)
    +    trace = metainterp.history.trace
         jitdriver_sd = metainterp.jitdriver_sd
         enable_opts = jitdriver_sd.warmstate.enable_opts
     
         call_pure_results = metainterp.call_pure_results
     
    -    if operations[-1].getopnum() == rop.JUMP:
    -        data = BridgeCompileData(label, operations[:],
    +    if metainterp.history.ends_with_jump:
    +        data = BridgeCompileData(trace, runtime_boxes,
                                      call_pure_results=call_pure_results,
                                      enable_opts=enable_opts,
                                      inline_short_preamble=inline_short_preamble)
         else:
    -        data = SimpleCompileData(label, operations[:],
    +        data = SimpleCompileData(trace,
                                      call_pure_results=call_pure_results,
                                      enable_opts=enable_opts)
         try:
    diff --git a/rpython/jit/metainterp/executor.py b/rpython/jit/metainterp/executor.py
    --- a/rpython/jit/metainterp/executor.py
    +++ b/rpython/jit/metainterp/executor.py
    @@ -9,7 +9,7 @@
     from rpython.jit.metainterp.history import INT, REF, FLOAT, VOID, AbstractDescr
     from rpython.jit.metainterp.history import ConstInt, ConstFloat, ConstPtr
     from rpython.jit.metainterp import resoperation
    -from rpython.jit.metainterp.resoperation import rop
    +from rpython.jit.metainterp.resoperation import rop, opname
     from rpython.jit.metainterp.blackhole import BlackholeInterpreter, NULL
     from rpython.jit.codewriter import longlong
     
    @@ -314,7 +314,8 @@
     
     def _make_execute_list():
         execute_by_num_args = {}
    -    for key, value in rop.__dict__.items():
    +    for key in opname.values():
    +        value = getattr(rop, key)
             if not key.startswith('_'):
                 if (rop._FINAL_FIRST <= value <= rop._FINAL_LAST or
                     rop._GUARD_FIRST <= value <= rop._GUARD_LAST):
    @@ -384,6 +385,11 @@
                              rop.CALL_MALLOC_NURSERY_VARSIZE_FRAME,
                              rop.NURSERY_PTR_INCREMENT,
                              rop.LABEL,
    +                         rop.ESCAPE_I,
    +                         rop.ESCAPE_N,
    +                         rop.ESCAPE_R,
    +                         rop.ESCAPE_F,
    +                         rop.FORCE_SPILL,
                              rop.SAVE_EXC_CLASS,
                              rop.SAVE_EXCEPTION,
                              rop.RESTORE_EXCEPTION,
    diff --git a/rpython/jit/metainterp/graphpage.py b/rpython/jit/metainterp/graphpage.py
    --- a/rpython/jit/metainterp/graphpage.py
    +++ b/rpython/jit/metainterp/graphpage.py
    @@ -170,7 +170,8 @@
             while True:
                 op = operations[opindex]
                 op_repr = op.repr(self.memo, graytext=True)
    -            if op.getopnum() == rop.DEBUG_MERGE_POINT:
    +            if (op.getopnum() == rop.DEBUG_MERGE_POINT and
    +                    self.metainterp_sd is not None):
                     jd_sd = self.metainterp_sd.jitdrivers_sd[op.getarg(0).getint()]
                     if jd_sd._get_printable_location_ptr:
                         s = jd_sd.warmstate.get_location_str(op.getarglist()[3:])
    diff --git a/rpython/jit/metainterp/heapcache.py b/rpython/jit/metainterp/heapcache.py
    --- a/rpython/jit/metainterp/heapcache.py
    +++ b/rpython/jit/metainterp/heapcache.py
    @@ -1,33 +1,59 @@
    -from rpython.jit.metainterp.history import ConstInt
    +from rpython.jit.metainterp.history import Const, ConstInt
    +from rpython.jit.metainterp.history import FrontendOp, RefFrontendOp
     from rpython.jit.metainterp.resoperation import rop, OpHelpers
    +from rpython.jit.metainterp.executor import constant_from_op
    +from rpython.rlib.rarithmetic import r_uint32, r_uint
    +from rpython.rlib.objectmodel import always_inline
     
    -class HeapCacheValue(object):
    -    def __init__(self, box):
    -        self.box = box
    -        self.likely_virtual = False
    -        self.reset_keep_likely_virtual()
    +""" A big note: we don't do heap caches on Consts, because it used
    +to be done with the identity of the Const instance. This gives very wonky
    +results at best, so we decided to not do it at all. Can be fixed with
    +interning of Consts (already done on trace anyway)
    +"""
     
    -    def reset_keep_likely_virtual(self):
    -        self.known_class = False
    -        self.known_nullity = False
    -        # did we see the allocation during tracing?
    -        self.seen_allocation = False
    -        self.is_unescaped = False
    -        self.nonstandard_virtualizable = False
    -        self.length = None
    -        self.dependencies = None
    +# RefFrontendOp._heapc_flags:
    +HF_LIKELY_VIRTUAL  = 0x01
    +HF_KNOWN_CLASS     = 0x02
    +HF_KNOWN_NULLITY   = 0x04
    +HF_SEEN_ALLOCATION = 0x08   # did we see the allocation during tracing?
    +HF_IS_UNESCAPED    = 0x10
    +HF_NONSTD_VABLE    = 0x20
     
    -    def __repr__(self):
    -        return 'HeapCacheValue(%s)' % (self.box, )
    +_HF_VERSION_INC    = 0x40   # must be last
    +_HF_VERSION_MAX    = r_uint(2 ** 32 - _HF_VERSION_INC)
    +
    + at always_inline
    +def add_flags(ref_frontend_op, flags):
    +    f = ref_frontend_op._get_heapc_flags()
    +    f |= r_uint(flags)
    +    ref_frontend_op._set_heapc_flags(f)
    +
    + at always_inline
    +def remove_flags(ref_frontend_op, flags):
    +    f = ref_frontend_op._get_heapc_flags()
    +    f &= r_uint(~flags)
    +    ref_frontend_op._set_heapc_flags(f)
    +
    + at always_inline
    +def test_flags(ref_frontend_op, flags):
    +    f = ref_frontend_op._get_heapc_flags()
    +    return bool(f & r_uint(flags))
    +
    +def maybe_replace_with_const(box):
    +    if not isinstance(box, Const) and box.is_replaced_with_const():
    +        return constant_from_op(box)
    +    else:
    +        return box
     
     
     class CacheEntry(object):
    -    def __init__(self):
    -        # both are {from_value: to_value} dicts
    +    def __init__(self, heapcache):
    +        # both are {from_ref_box: to_field_box} dicts
             # the first is for boxes where we did not see the allocation, the
             # second for anything else. the reason that distinction makes sense is
             # because if we saw the allocation, we know it cannot alias with
             # anything else where we saw the allocation.
    +        self.heapcache = heapcache
             self.cache_anything = {}
             self.cache_seen_allocation = {}
     
    @@ -36,112 +62,137 @@
                 self.cache_seen_allocation.clear()
             self.cache_anything.clear()
     
    -    def _getdict(self, value):
    -        if value.seen_allocation:
    +    def _seen_alloc(self, ref_box):
    +        if not isinstance(ref_box, RefFrontendOp):
    +            return False
    +        return self.heapcache._check_flag(ref_box, HF_SEEN_ALLOCATION)
    +
    +    def _getdict(self, seen_alloc):
    +        if seen_alloc:
                 return self.cache_seen_allocation
             else:
                 return self.cache_anything
     
    -    def do_write_with_aliasing(self, value, fieldvalue):
    -        self._clear_cache_on_write(value.seen_allocation)
    -        self._getdict(value)[value] = fieldvalue
    +    def do_write_with_aliasing(self, ref_box, fieldbox):
    +        seen_alloc = self._seen_alloc(ref_box)
    +        self._clear_cache_on_write(seen_alloc)
    +        self._getdict(seen_alloc)[ref_box] = fieldbox
     
    -    def read(self, value):
    -        return self._getdict(value).get(value, None)
    +    def read(self, ref_box):
    +        dict = self._getdict(self._seen_alloc(ref_box))
    +        try:
    +            res_box = dict[ref_box]
    +        except KeyError:
    +            return None
    +        return maybe_replace_with_const(res_box)
     
    -    def read_now_known(self, value, fieldvalue):
    -        self._getdict(value)[value] = fieldvalue
    +    def read_now_known(self, ref_box, fieldbox):
    +        self._getdict(self._seen_alloc(ref_box))[ref_box] = fieldbox
     
         def invalidate_unescaped(self):
             self._invalidate_unescaped(self.cache_anything)
             self._invalidate_unescaped(self.cache_seen_allocation)
     
         def _invalidate_unescaped(self, d):
    -        for value in d.keys():
    -            if not value.is_unescaped:
    -                del d[value]
    +        for ref_box in d.keys():
    +            if not self.heapcache.is_unescaped(ref_box):
    +                del d[ref_box]
     
     
     class FieldUpdater(object):
    -    def __init__(self, heapcache, value, cache, fieldvalue):
    -        self.heapcache = heapcache
    -        self.value = value
    +    def __init__(self, ref_box, cache, fieldbox):
    +        self.ref_box = ref_box
             self.cache = cache
    -        if fieldvalue is not None:
    -            self.currfieldbox = fieldvalue.box
    -        else:
    -            self.currfieldbox = None
    +        self.currfieldbox = fieldbox     # <= read directly from pyjitpl.py
     
         def getfield_now_known(self, fieldbox):
    -        fieldvalue = self.heapcache.getvalue(fieldbox)
    -        self.cache.read_now_known(self.value, fieldvalue)
    +        self.cache.read_now_known(self.ref_box, fieldbox)
     
         def setfield(self, fieldbox):
    -        fieldvalue = self.heapcache.getvalue(fieldbox)
    -        self.cache.do_write_with_aliasing(self.value, fieldvalue)
    +        self.cache.do_write_with_aliasing(self.ref_box, fieldbox)
    +
    +class DummyFieldUpdater(FieldUpdater):
    +    def __init__(self):
    +        self.currfieldbox = None
    +
    +    def getfield_now_known(self, fieldbox):
    +        pass
    +
    +    def setfield(self, fieldbox):
    +        pass
    +
    +dummy_field_updater = DummyFieldUpdater()
     
     
     class HeapCache(object):
         def __init__(self):
    +        # Works with flags stored on RefFrontendOp._heapc_flags.
    +        # There are two ways to do a global resetting of these flags:
    +        # reset() and reset_keep_likely_virtual().  The basic idea is
    +        # to use a version number in each RefFrontendOp, and in order
    +        # to reset the flags globally, we increment the global version
    +        # number in this class.  Then when we read '_heapc_flags' we
    +        # also check if the associated version number is up-to-date
    +        # or not.  More precisely, we have two global version numbers
    +        # here: 'head_version' and 'likely_virtual_version'.  Normally
    +        # we use 'head_version'.  For is_likely_virtual() though, we
    +        # use the other, older version number.
    +        self.head_version = r_uint(0)
    +        self.likely_virtual_version = r_uint(0)
             self.reset()
     
         def reset(self):
    -        # maps boxes to values
    -        self.values = {}
    -        # store the boxes that contain newly allocated objects, this maps the
    -        # boxes to a bool, the bool indicates whether or not the object has
    -        # escaped the trace or not (True means the box never escaped, False
    -        # means it did escape), its presences in the mapping shows that it was
    -        # allocated inside the trace
    -        #if trace_branch:
    -            #self.new_boxes = {}
    -        #    pass
    -        #else:
    -            #for box in self.new_boxes:
    -            #    self.new_boxes[box] = False
    -        #    pass
    -        #if reset_virtuals:
    -        #    self.likely_virtuals = {}      # only for jit.isvirtual()
    -        # Tracks which boxes should be marked as escaped when the key box
    -        # escapes.
    -        #self.dependencies = {}
    -
    +        # Global reset of all flags.  Update both version numbers so
    +        # that any access to '_heapc_flags' will be marked as outdated.
    +        assert self.head_version < _HF_VERSION_MAX
    +        self.head_version += _HF_VERSION_INC
    +        self.likely_virtual_version = self.head_version
    +        #
             # heap cache
             # maps descrs to CacheEntry
             self.heap_cache = {}
             # heap array cache
    -        # maps descrs to {index: {from_value: to_value}} dicts
    +        # maps descrs to {index: CacheEntry} dicts
             self.heap_array_cache = {}
     
         def reset_keep_likely_virtuals(self):
    -        for value in self.values.itervalues():
    -            value.reset_keep_likely_virtual()
    +        # Update only 'head_version', but 'likely_virtual_version' remains
    +        # at its older value.
    +        assert self.head_version < _HF_VERSION_MAX
    +        self.head_version += _HF_VERSION_INC
             self.heap_cache = {}
             self.heap_array_cache = {}
     
    -    def getvalue(self, box, create=True):
    -        value = self.values.get(box, None)
    -        if not value and create:
    -            value = self.values[box] = HeapCacheValue(box)
    -        return value
    +    @always_inline
    +    def test_head_version(self, ref_frontend_op):
    +        return ref_frontend_op._get_heapc_flags() >= self.head_version
     
    -    def getvalues(self, boxes):
    -        return [self.getvalue(box) for box in boxes]
    +    @always_inline
    +    def test_likely_virtual_version(self, ref_frontend_op):
    +        return ref_frontend_op._get_heapc_flags() >= self.likely_virtual_version
    +
    +    def update_version(self, ref_frontend_op):
    +        """Ensure the version of 'ref_frontend_op' is current.  If not,
    +        it will update 'ref_frontend_op' (removing most flags currently set).
    +        """
    +        if not self.test_head_version(ref_frontend_op):
    +            f = self.head_version
    +            if (self.test_likely_virtual_version(ref_frontend_op) and
    +                test_flags(ref_frontend_op, HF_LIKELY_VIRTUAL)):
    +                f |= HF_LIKELY_VIRTUAL
    +            ref_frontend_op._set_heapc_flags(f)
    +            ref_frontend_op._heapc_deps = None
     
         def invalidate_caches(self, opnum, descr, argboxes):
             self.mark_escaped(opnum, descr, argboxes)
             self.clear_caches(opnum, descr, argboxes)
     
         def _escape_from_write(self, box, fieldbox):
    -        value = self.getvalue(box, create=False)
    -        fieldvalue = self.getvalue(fieldbox, create=False)
    -        if (value is not None and value.is_unescaped and
    -                fieldvalue is not None and fieldvalue.is_unescaped):
    -            if value.dependencies is None:
    -                value.dependencies = []
    -            value.dependencies.append(fieldvalue)
    -        elif fieldvalue is not None:
    -            self._escape(fieldvalue)
    +        if self.is_unescaped(box) and self.is_unescaped(fieldbox):
    +            deps = self._get_deps(box)
    +            deps.append(fieldbox)
    +        elif fieldbox is not None:
    +            self._escape_box(fieldbox)
     
         def mark_escaped(self, opnum, descr, argboxes):
             if opnum == rop.SETFIELD_GC:
    @@ -176,19 +227,20 @@
                     self._escape_box(box)
     
         def _escape_box(self, box):
    -        value = self.getvalue(box, create=False)
    -        if not value:
    -            return
    -        self._escape(value)
    -
    -    def _escape(self, value):
    -        value.is_unescaped = False
    -        value.likely_virtual = False
    -        deps = value.dependencies
    -        value.dependencies = None
    -        if deps is not None:
    -            for dep in deps:
    -                self._escape(dep)
    +        if isinstance(box, RefFrontendOp):
    +            remove_flags(box, HF_LIKELY_VIRTUAL | HF_IS_UNESCAPED)
    +            deps = box._heapc_deps
    +            if deps is not None:
    +                if not self.test_head_version(box):
    +                    box._heapc_deps = None
    +                else:
    +                    # 'deps[0]' is abused to store the array length, keep it
    +                    if deps[0] is None:
    +                        box._heapc_deps = None
    +                    else:
    +                        box._heapc_deps = [deps[0]]
    +                    for i in range(1, len(deps)):
    +                        self._escape_box(deps[i])
     
         def clear_caches(self, opnum, descr, argboxes):
             if (opnum == rop.SETFIELD_GC or
    @@ -241,7 +293,8 @@
             self.reset_keep_likely_virtuals()
     
         def _clear_caches_arraycopy(self, opnum, desrc, argboxes, effectinfo):
    -        seen_allocation_of_target = self.getvalue(argboxes[2]).seen_allocation
    +        seen_allocation_of_target = self._check_flag(
    +                                            argboxes[2], HF_SEEN_ALLOCATION)
             if (
                 isinstance(argboxes[3], ConstInt) and
                 isinstance(argboxes[4], ConstInt) and
    @@ -285,74 +338,82 @@
                 return
             self.reset_keep_likely_virtuals()
     
    +    def _get_deps(self, box):
    +        if not isinstance(box, RefFrontendOp):
    +            return None
    +        self.update_version(box)
    +        if box._heapc_deps is None:
    +            box._heapc_deps = [None]
    +        return box._heapc_deps
    +
    +    def _check_flag(self, box, flag):
    +        return (isinstance(box, RefFrontendOp) and
    +                    self.test_head_version(box) and
    +                    test_flags(box, flag))
    +
    +    def _set_flag(self, box, flag):
    +        assert isinstance(box, RefFrontendOp)
    +        self.update_version(box)
    +        add_flags(box, flag)
    +
         def is_class_known(self, box):
    -        value = self.getvalue(box, create=False)
    -        if value:
    -            return value.known_class
    -        return False
    +        return self._check_flag(box, HF_KNOWN_CLASS)
     
         def class_now_known(self, box):
    -        self.getvalue(box).known_class = True
    +        if isinstance(box, Const):
    +            return
    +        self._set_flag(box, HF_KNOWN_CLASS)
     
         def is_nullity_known(self, box):
    -        value = self.getvalue(box, create=False)
    -        if value:
    -            return value.known_nullity
    -        return False
    +        if isinstance(box, Const):
    +            return bool(box.getref_base())
    +        return self._check_flag(box, HF_KNOWN_NULLITY)
     
         def nullity_now_known(self, box):
    -        self.getvalue(box).known_nullity = True
    +        if isinstance(box, Const):
    +            return
    +        self._set_flag(box, HF_KNOWN_NULLITY)
     
         def is_nonstandard_virtualizable(self, box):
    -        value = self.getvalue(box, create=False)
    -        if value:
    -            return value.nonstandard_virtualizable
    -        return False
    +        return self._check_flag(box, HF_NONSTD_VABLE)
     
         def nonstandard_virtualizables_now_known(self, box):
    -        self.getvalue(box).nonstandard_virtualizable = True
    +        self._set_flag(box, HF_NONSTD_VABLE)
     
         def is_unescaped(self, box):
    -        value = self.getvalue(box, create=False)
    -        if value:
    -            return value.is_unescaped
    -        return False
    +        return self._check_flag(box, HF_IS_UNESCAPED)
     
         def is_likely_virtual(self, box):
    -        value = self.getvalue(box, create=False)
    -        if value:
    -            return value.likely_virtual
    -        return False
    +        # note: this is different from _check_flag()
    +        return (isinstance(box, RefFrontendOp) and
    +                self.test_likely_virtual_version(box) and
    +                test_flags(box, HF_LIKELY_VIRTUAL))
     
         def new(self, box):
    -        value = self.getvalue(box)
    -        value.is_unescaped = True
    -        value.likely_virtual = True
    -        value.seen_allocation = True
    +        assert isinstance(box, RefFrontendOp)
    +        self.update_version(box)
    +        add_flags(box, HF_LIKELY_VIRTUAL | HF_SEEN_ALLOCATION | HF_IS_UNESCAPED)
     
         def new_array(self, box, lengthbox):
             self.new(box)
             self.arraylen_now_known(box, lengthbox)
     
         def getfield(self, box, descr):
    -        value = self.getvalue(box, create=False)
    -        if value:
    -            cache = self.heap_cache.get(descr, None)
    -            if cache:
    -                tovalue = cache.read(value)
    -                if tovalue:
    -                    return tovalue.box
    +        cache = self.heap_cache.get(descr, None)
    +        if cache:
    +            return cache.read(box)
             return None
     
         def get_field_updater(self, box, descr):
    -        value = self.getvalue(box)
    +        if not isinstance(box, RefFrontendOp):
    +            return dummy_field_updater
             cache = self.heap_cache.get(descr, None)
             if cache is None:
    -            cache = self.heap_cache[descr] = CacheEntry()
    -            fieldvalue = None
    +            cache = self.heap_cache[descr] = CacheEntry(self)
    +            fieldbox = None
             else:
    -            fieldvalue = cache.read(value)
    -        return FieldUpdater(self, value, cache, fieldvalue)
    +            fieldbox = cache.read(box)
    +        return FieldUpdater(box, cache, fieldbox)
     
         def getfield_now_known(self, box, descr, fieldbox):
             upd = self.get_field_updater(box, descr)
    @@ -365,17 +426,12 @@
         def getarrayitem(self, box, indexbox, descr):
             if not isinstance(indexbox, ConstInt):
                 return None
    -        value = self.getvalue(box, create=False)
    -        if value is None:
    -            return None
             index = indexbox.getint()
             cache = self.heap_array_cache.get(descr, None)
             if cache:
                 indexcache = cache.get(index, None)
                 if indexcache is not None:
    -                resvalue = indexcache.read(value)
    -                if resvalue:
    -                    return resvalue.box
    +                return indexcache.read(box)
             return None
     
         def _get_or_make_array_cache_entry(self, indexbox, descr):
    @@ -385,16 +441,14 @@
             cache = self.heap_array_cache.setdefault(descr, {})
             indexcache = cache.get(index, None)
             if indexcache is None:
    -            cache[index] = indexcache = CacheEntry()
    +            cache[index] = indexcache = CacheEntry(self)
             return indexcache
     
     
         def getarrayitem_now_known(self, box, indexbox, fieldbox, descr):
    -        value = self.getvalue(box)
    -        fieldvalue = self.getvalue(fieldbox)
             indexcache = self._get_or_make_array_cache_entry(indexbox, descr)
             if indexcache:
    -            indexcache.read_now_known(value, fieldvalue)
    +            indexcache.read_now_known(box, fieldbox)
     
         def setarrayitem(self, box, indexbox, fieldbox, descr):
             if not isinstance(indexbox, ConstInt):
    @@ -402,25 +456,31 @@
                 if cache is not None:
                     cache.clear()
                 return
    -        value = self.getvalue(box)
    -        fieldvalue = self.getvalue(fieldbox)
             indexcache = self._get_or_make_array_cache_entry(indexbox, descr)
             if indexcache:
    -            indexcache.do_write_with_aliasing(value, fieldvalue)
    +            indexcache.do_write_with_aliasing(box, fieldbox)
     
         def arraylen(self, box):
    -        value = self.getvalue(box, create=False)
    -        if value and value.length:
    -            return value.length.box
    +        if (isinstance(box, RefFrontendOp) and
    +            self.test_head_version(box) and
    +            box._heapc_deps is not None):
    +            res_box = box._heapc_deps[0]
    +            if res_box is not None:
    +                return maybe_replace_with_const(res_box)
             return None
     
         def arraylen_now_known(self, box, lengthbox):
    -        value = self.getvalue(box)
    -        value.length = self.getvalue(lengthbox)
    +        # we store in '_heapc_deps' a list of boxes: the *first* box is
    +        # the known length or None, and the remaining boxes are the
    +        # regular dependencies.
    +        if isinstance(box, Const):
    +            return
    +        deps = self._get_deps(box)
    +        assert deps is not None
    +        deps[0] = lengthbox
     
         def replace_box(self, oldbox, newbox):
    -        value = self.getvalue(oldbox, create=False)
    -        if value is None:
    -            return
    -        value.box = newbox
    -        self.values[newbox] = value
    +        # here, only for replacing a box with a const
    +        if isinstance(oldbox, FrontendOp) and isinstance(newbox, Const):
    +            assert newbox.same_constant(constant_from_op(oldbox))
    +            oldbox.set_replaced_with_const()
    diff --git a/rpython/jit/metainterp/history.py b/rpython/jit/metainterp/history.py
    --- a/rpython/jit/metainterp/history.py
    +++ b/rpython/jit/metainterp/history.py
    @@ -3,12 +3,17 @@
     from rpython.rlib.objectmodel import we_are_translated, Symbolic
     from rpython.rlib.objectmodel import compute_unique_id, specialize
     from rpython.rlib.rarithmetic import r_int64, is_valid_int
    +from rpython.rlib.rarithmetic import LONG_BIT, intmask, r_uint
    +from rpython.rlib.jit import Counters
     
     from rpython.conftest import option
     
    -from rpython.jit.metainterp.resoperation import ResOperation, rop, AbstractValue
    +from rpython.jit.metainterp.resoperation import ResOperation, rop,\
    +    AbstractValue, oparity, AbstractResOp, IntOp, RefOp, FloatOp,\
    +    opclasses
     from rpython.jit.codewriter import heaptracker, longlong
     import weakref
    +from rpython.jit.metainterp import jitexc
     
     # ____________________________________________________________
     
    @@ -22,6 +27,15 @@
     
     FAILARGS_LIMIT = 1000
     
    +class SwitchToBlackhole(jitexc.JitException):
    +    def __init__(self, reason, raising_exception=False):
    +        self.reason = reason
    +        self.raising_exception = raising_exception
    +        # ^^^ must be set to True if the SwitchToBlackhole is raised at a
    +        #     point where the exception on metainterp.last_exc_value
    +        #     is supposed to be raised.  The default False means that it
    +        #     should just be copied into the blackhole interp, but not raised.
    +
     def getkind(TYPE, supports_floats=True,
                       supports_longlong=True,
                       supports_singlefloats=True):
    @@ -72,57 +86,10 @@
                             ) #compute_unique_id(box))
     
     
    -class XxxAbstractValue(object):
    -    __slots__ = ()
    -
    -    def getint(self):
    -        raise NotImplementedError
    -
    -    def getfloatstorage(self):
    -        raise NotImplementedError
    -
    -    def getfloat(self):
    -        return longlong.getrealfloat(self.getfloatstorage())
    -
    -    def getref_base(self):
    -        raise NotImplementedError
    -
    -    def getref(self, TYPE):
    -        raise NotImplementedError
    -    getref._annspecialcase_ = 'specialize:arg(1)'
    -
    -    def constbox(self):
    -        raise NotImplementedError
    -
    -    def getaddr(self):
    -        "Only for raw addresses (BoxInt & ConstInt), not for GC addresses"
    -        raise NotImplementedError
    -
    -    def sort_key(self):
    -        raise NotImplementedError
    -
    -    def nonnull(self):
    -        raise NotImplementedError
    -
    -    def repr_rpython(self):
    -        return '%s' % self
    -
    -    def _get_str(self):
    -        raise NotImplementedError
    -
    -    def same_box(self, other):
    -        return self is other
    -
    -    def same_shape(self, other):
    -        # only structured containers can compare their shape (vector box)
    -        return True
    -
    -    def getaccum(self):
    -        return None
    -
     class AbstractDescr(AbstractValue):
    -    __slots__ = ()
    +    __slots__ = ('descr_index',)
         llopaque = True
    +    descr_index = -1
     
         def repr_of_descr(self):
             return '%r' % (self,)
    @@ -204,7 +171,7 @@
     
     
     class Const(AbstractValue):
    -    __slots__ = ()
    +    _attrs_ = ()
     
         @staticmethod
         def _new(x):
    @@ -638,43 +605,174 @@
     # ____________________________________________________________
     
     
    +FO_REPLACED_WITH_CONST = r_uint(1)
    +FO_POSITION_SHIFT      = 1
    +FO_POSITION_MASK       = r_uint(0xFFFFFFFE)
    +
    +
    +class FrontendOp(AbstractResOp):
    +    type = 'v'
    +    _attrs_ = ('position_and_flags',)
    +
    +    def __init__(self, pos):
    +        # p is the 32-bit position shifted left by one (might be negative,
    +        # but casted to the 32-bit UINT type)
    +        p = rffi.cast(rffi.UINT, pos << FO_POSITION_SHIFT)
    +        self.position_and_flags = r_uint(p)    # zero-extended to a full word
    +
    +    def get_position(self):
    +        # p is the signed 32-bit position, from self.position_and_flags
    +        p = rffi.cast(rffi.INT, self.position_and_flags)
    +        return intmask(p) >> FO_POSITION_SHIFT
    +
    +    def set_position(self, new_pos):
    +        assert new_pos >= 0
    +        self.position_and_flags &= ~FO_POSITION_MASK
    +        self.position_and_flags |= r_uint(new_pos << FO_POSITION_SHIFT)
    +
    +    def is_replaced_with_const(self):
    +        return bool(self.position_and_flags & FO_REPLACED_WITH_CONST)
    +
    +    def set_replaced_with_const(self):
    +        self.position_and_flags |= FO_REPLACED_WITH_CONST
    +
    +    def __repr__(self):
    +        return '%s(0x%x)' % (self.__class__.__name__, self.position_and_flags)
    +
    +class IntFrontendOp(IntOp, FrontendOp):
    +    _attrs_ = ('position_and_flags', '_resint')
    +
    +    def copy_value_from(self, other):
    +        self._resint = other.getint()
    +
    +class FloatFrontendOp(FloatOp, FrontendOp):
    +    _attrs_ = ('position_and_flags', '_resfloat')
    +
    +    def copy_value_from(self, other):
    +        self._resfloat = other.getfloatstorage()
    +
    +class RefFrontendOp(RefOp, FrontendOp):
    +    _attrs_ = ('position_and_flags', '_resref', '_heapc_deps')
    +    if LONG_BIT == 32:
    +        _attrs_ += ('_heapc_flags',)   # on 64 bit, this gets stored into the
    +        _heapc_flags = r_uint(0)       # high 32 bits of 'position_and_flags'
    +    _heapc_deps = None
    +
    +    def copy_value_from(self, other):
    +        self._resref = other.getref_base()
    +
    +    if LONG_BIT == 32:
    +        def _get_heapc_flags(self):
    +            return self._heapc_flags
    +        def _set_heapc_flags(self, value):
    +            self._heapc_flags = value
    +    else:
    +        def _get_heapc_flags(self):
    +            return self.position_and_flags >> 32
    +        def _set_heapc_flags(self, value):
    +            self.position_and_flags = (
    +                (self.position_and_flags & 0xFFFFFFFF) |
    +                (value << 32))
    +
    +
     class History(object):
    +    ends_with_jump = False
    +    trace = None
    +
         def __init__(self):
    -        self.inputargs = None
    -        self.operations = []
    +        self.descr_cache = {}
    +        self.descrs = {}
    +        self.consts = []
    +        self._cache = []
    +
    +    def set_inputargs(self, inpargs, metainterp_sd):
    +        from rpython.jit.metainterp.opencoder import Trace
    +
    +        self.trace = Trace(inpargs, metainterp_sd)
    +        self.inputargs = inpargs
    +        if self._cache:
    
    From pypy.commits at gmail.com  Sun Mar 27 13:08:21 2016
    From: pypy.commits at gmail.com (stefanor)
    Date: Sun, 27 Mar 2016 10:08:21 -0700 (PDT)
    Subject: [pypy-commit] pypy default: Implement our own getauxval() so we can
     support glibc < 2.16
    Message-ID: <56f81385.a151c20a.7046d.ffff8577@mx.google.com>
    
    Author: Stefano Rivera 
    Branch: 
    Changeset: r83395:968b2dd3d289
    Date: 2016-03-27 13:07 -0400
    http://bitbucket.org/pypy/pypy/changeset/968b2dd3d289/
    
    Log:	Implement our own getauxval() so we can support glibc < 2.16
    
    diff --git a/rpython/jit/backend/arm/detect.py b/rpython/jit/backend/arm/detect.py
    --- a/rpython/jit/backend/arm/detect.py
    +++ b/rpython/jit/backend/arm/detect.py
    @@ -1,7 +1,6 @@
     import os
     
     from rpython.translator.tool.cbuild import ExternalCompilationInfo
    -from rpython.rtyper.lltypesystem import lltype, rffi
     from rpython.rtyper.tool import rffi_platform
     from rpython.rlib.clibffi import FFI_DEFAULT_ABI, FFI_SYSV, FFI_VFP
     from rpython.translator.platform import CompilationError
    @@ -16,7 +15,6 @@
         asm volatile("VMOV s0, s1");
     }
         """])
    -getauxval = rffi.llexternal("getauxval", [lltype.Unsigned], lltype.Unsigned)
     
     def detect_hardfloat():
         return FFI_DEFAULT_ABI == FFI_VFP
    @@ -67,6 +65,43 @@
         return n
     
     
    +# Once we can rely on the availability of glibc >= 2.16, replace this with:
    +# from rpython.rtyper.lltypesystem import lltype, rffi
    +# getauxval = rffi.llexternal("getauxval", [lltype.Unsigned], lltype.Unsigned)
    +def getauxval(type_, filename='/proc/self/auxv'):
    +    fd = os.open(filename, os.O_RDONLY, 0644)
    +
    +    buf_size = 2048
    +    struct_size = 8  # 2x uint32
    +    try:
    +        buf = os.read(fd, buf_size)
    +    finally:
    +        os.close(fd)
    +
    +    i = 0
    +    while i <= buf_size - struct_size:
    +        if buf[i] == 0:
    +            i += 1
    +            continue
    +
    +        # We only support little-endian ARM
    +        a_type = (ord(buf[i]) |
    +                  (ord(buf[i+1]) << 8) |
    +                  (ord(buf[i+2]) << 16) |
    +                  (ord(buf[i+3]) << 24))
    +
    +        if a_type != type_:
    +            i += struct_size
    +
    +        a_val = (ord(buf[i+4]) |
    +                 (ord(buf[i+5]) << 8) |
    +                 (ord(buf[i+6]) << 16) |
    +                 (ord(buf[i+7]) << 24))
    +        return a_val
    +
    +    raise KeyError('failed to find auxval type: %i' % type_)
    +
    +
     def detect_neon():
         AT_HWCAP = 16
         HWCAP_NEON = 1 << 12
    diff --git a/rpython/jit/backend/arm/test/test_detect.py b/rpython/jit/backend/arm/test/test_detect.py
    --- a/rpython/jit/backend/arm/test/test_detect.py
    +++ b/rpython/jit/backend/arm/test/test_detect.py
    @@ -1,6 +1,6 @@
     import py
     from rpython.tool.udir import udir
    -from rpython.jit.backend.arm.detect import detect_arch_version
    +from rpython.jit.backend.arm.detect import detect_arch_version, getauxval
     
     cpuinfo = "Processor : ARMv%d-compatible processor rev 7 (v6l)"""
     cpuinfo2 = """processor       : 0
    @@ -29,6 +29,19 @@
     address sizes   : 36 bits physical, 48 bits virtual
     power management:
     """
    +# From a Marvell Armada 370/XP
    +auxv = (
    +    '\x10\x00\x00\x00\xd7\xa8\x1e\x00\x06\x00\x00\x00\x00\x10\x00\x00\x11\x00'
    +    '\x00\x00d\x00\x00\x00\x03\x00\x00\x004\x00\x01\x00\x04\x00\x00\x00 \x00'
    +    '\x00\x00\x05\x00\x00\x00\t\x00\x00\x00\x07\x00\x00\x00\x00\xe0\xf3\xb6'
    +    '\x08\x00\x00\x00\x00\x00\x00\x00\t\x00\x00\x00t\xcf\x04\x00\x0b\x00\x00'
    +    '\x000\x0c\x00\x00\x0c\x00\x00\x000\x0c\x00\x00\r\x00\x00\x000\x0c\x00\x00'
    +    '\x0e\x00\x00\x000\x0c\x00\x00\x17\x00\x00\x00\x00\x00\x00\x00\x19\x00\x00'
    +    '\x00\x8a\xf3\x87\xbe\x1a\x00\x00\x00\x00\x00\x00\x00\x1f\x00\x00\x00\xec'
    +    '\xff\x87\xbe\x0f\x00\x00\x00\x9a\xf3\x87\xbe\x00\x00\x00\x00\x00\x00\x00'
    +    '\x00'
    +)
    +
     
     def write_cpuinfo(info):
         filepath = udir.join('get_arch_version')
    @@ -46,3 +59,10 @@
         py.test.raises(ValueError,
                 'detect_arch_version(write_cpuinfo(cpuinfo % 5))')
         assert detect_arch_version(write_cpuinfo(cpuinfo2)) == 6
    +
    +
    +def test_getauxval_no_neon():
    +    path = udir.join('auxv')
    +    path.write(auxv, 'wb')
    +    AT_HWCAP = 16
    +    assert getauxval(AT_HWCAP, filename=str(path)) == 2009303
    
    From pypy.commits at gmail.com  Sun Mar 27 13:27:28 2016
    From: pypy.commits at gmail.com (arigo)
    Date: Sun, 27 Mar 2016 10:27:28 -0700 (PDT)
    Subject: [pypy-commit] cffi default: ffi.list_types()
    Message-ID: <56f81800.02931c0a.109a4.6f7d@mx.google.com>
    
    Author: Armin Rigo 
    Branch: 
    Changeset: r2652:efcb421203b3
    Date: 2016-03-27 19:27 +0200
    http://bitbucket.org/cffi/cffi/changeset/efcb421203b3/
    
    Log:	ffi.list_types()
    
    diff --git a/c/ffi_obj.c b/c/ffi_obj.c
    --- a/c/ffi_obj.c
    +++ b/c/ffi_obj.c
    @@ -862,6 +862,57 @@
         return x;
     }
     
    +PyDoc_STRVAR(ffi_list_types_doc,
    +"Build and return a list of all user type names known in this FFI instance.\n"
    +"\n"
    +"Contains typedef names (sorted in alphabetical order), followed by the\n"
    +"'struct xxx' (sorted) and finally the 'union xxx' (sorted as well).");
    +
    +static PyObject *ffi_list_types(FFIObject *self, PyObject *noargs)
    +{
    +    int is_union, look_for_union;
    +    Py_ssize_t i, n1 = self->types_builder.ctx.num_typenames;
    +    Py_ssize_t n23 = self->types_builder.ctx.num_struct_unions;
    +    PyObject *o, *result = PyList_New(n1);
    +    if (result == NULL)
    +        return NULL;
    +
    +    for (i = 0; i < n1; i++) {
    +        o = PyText_FromString(self->types_builder.ctx.typenames[i].name);
    +        if (o == NULL)
    +            goto error;
    +        PyList_SET_ITEM(result, i, o);
    +    }
    +
    +    for (look_for_union = 0; look_for_union < 2; look_for_union++) {
    +        for (i = 0; i < n23; i++) {
    +            const struct _cffi_struct_union_s *s;
    +            int err;
    +
    +            s = &self->types_builder.ctx.struct_unions[i];
    +            if (s->name[0] == '$')
    +                continue;
    +
    +            is_union = (s->flags & _CFFI_F_UNION) != 0;
    +            if (is_union != look_for_union)
    +                continue;
    +
    +            o = PyText_FromFormat(is_union ? "union %s" : "struct %s", s->name);
    +            if (o == NULL)
    +                goto error;
    +            err = PyList_Append(result, o);
    +            Py_DECREF(o);
    +            if (err < 0)
    +                goto error;
    +        }
    +    }
    +    return result;
    +
    + error:
    +    Py_DECREF(result);
    +    return NULL;
    +}
    +
     PyDoc_STRVAR(ffi_memmove_doc,
     "ffi.memmove(dest, src, n) copies n bytes of memory from src to dest.\n"
     "\n"
    @@ -1030,6 +1081,7 @@
     #endif
      {"init_once",  (PyCFunction)ffi_init_once,  METH_VKW,     ffi_init_once_doc},
      {"integer_const",(PyCFunction)ffi_int_const,METH_VKW,     ffi_int_const_doc},
    + {"list_types", (PyCFunction)ffi_list_types, METH_NOARGS,  ffi_list_types_doc},
      {"memmove",    (PyCFunction)ffi_memmove,    METH_VKW,     ffi_memmove_doc},
      {"new",        (PyCFunction)ffi_new,        METH_VKW,     ffi_new_doc},
     {"new_allocator",(PyCFunction)ffi_new_allocator,METH_VKW,ffi_new_allocator_doc},
    diff --git a/cffi/api.py b/cffi/api.py
    --- a/cffi/api.py
    +++ b/cffi/api.py
    @@ -721,6 +721,27 @@
             raise ValueError("ffi.def_extern() is only available on API-mode FFI "
                              "objects")
     
    +    def list_types(self):
    +        """Build and return a list of all user type names known in this FFI
    +        instance.  Contains typedef names (sorted in alphabetical order),
    +        followed by the 'struct xxx' (sorted) and finally the 'union xxx'
    +        (sorted as well).
    +        """
    +        typedefs = []
    +        structs = []
    +        unions = []
    +        for key in self._parser._declarations:
    +            if key.startswith('typedef '):
    +                typedefs.append(key[8:])
    +            elif key.startswith('struct '):
    +                structs.append(key)
    +            elif key.startswith('union '):
    +                unions.append(key)
    +        typedefs.sort()
    +        structs.sort()
    +        unions.sort()
    +        return typedefs + structs + unions
    +
     
     def _load_backend_lib(backend, name, flags):
         if name is None:
    diff --git a/cffi/recompiler.py b/cffi/recompiler.py
    --- a/cffi/recompiler.py
    +++ b/cffi/recompiler.py
    @@ -1484,4 +1484,7 @@
         def typeof_disabled(*args, **kwds):
             raise NotImplementedError
         ffi._typeof = typeof_disabled
    +    for name in dir(ffi):
    +        if not name.startswith('_') and not hasattr(module.ffi, name):
    +            setattr(ffi, name, NotImplemented)
         return module.lib
    diff --git a/testing/cffi0/test_ffi_backend.py b/testing/cffi0/test_ffi_backend.py
    --- a/testing/cffi0/test_ffi_backend.py
    +++ b/testing/cffi0/test_ffi_backend.py
    @@ -423,3 +423,51 @@
         def test_ffi_def_extern(self):
             ffi = FFI()
             py.test.raises(ValueError, ffi.def_extern)
    +
    +    def test_introspect_typedef(self):
    +        ffi = FFI()
    +        ffi.cdef("typedef int foo_t;")
    +        assert ffi.list_types() == ['foo_t']
    +        assert ffi.typeof('foo_t').kind == 'primitive'
    +        assert ffi.typeof('foo_t').cname == 'int'
    +        #
    +        ffi.cdef("typedef signed char a_t, c_t, g_t, b_t;")
    +        assert ffi.list_types() == ['a_t', 'b_t', 'c_t', 'foo_t', 'g_t']
    +
    +    def test_introspect_struct(self):
    +        ffi = FFI()
    +        ffi.cdef("struct foo_s { int a; };")
    +        assert ffi.list_types() == ['struct foo_s']
    +        assert ffi.typeof('struct foo_s').kind == 'struct'
    +        assert ffi.typeof('struct foo_s').cname == 'struct foo_s'
    +
    +    def test_introspect_union(self):
    +        ffi = FFI()
    +        ffi.cdef("union foo_s { int a; };")
    +        assert ffi.list_types() == ['union foo_s']
    +        assert ffi.typeof('union foo_s').kind == 'union'
    +        assert ffi.typeof('union foo_s').cname == 'union foo_s'
    +
    +    def test_introspect_struct_and_typedef(self):
    +        ffi = FFI()
    +        ffi.cdef("typedef struct { int a; } foo_t;")
    +        assert ffi.list_types() == ['foo_t']
    +        assert ffi.typeof('foo_t').kind == 'struct'
    +        assert ffi.typeof('foo_t').cname == 'foo_t'
    +
    +    def test_introspect_included_type(self):
    +        ffi1 = FFI()
    +        ffi2 = FFI()
    +        ffi1.cdef("typedef signed char schar_t; struct sint_t { int x; };")
    +        ffi2.include(ffi1)
    +        assert ffi1.list_types() == sorted(ffi2.list_types()) == [
    +            'schar_t', 'struct sint_t']
    +
    +    def test_introspect_order(self):
    +        ffi = FFI()
    +        ffi.cdef("union aaa { int a; }; typedef struct ccc { int a; } b;")
    +        ffi.cdef("union g   { int a; }; typedef struct cc  { int a; } bbb;")
    +        ffi.cdef("union aa  { int a; }; typedef struct a   { int a; } bb;")
    +        assert ffi.list_types() == ['b', 'bb', 'bbb',
    +                                    'struct a', 'struct cc', 'struct ccc',
    +                                    'union aa', 'union aaa', 'union g']
    diff --git a/testing/cffi1/test_recompiler.py b/testing/cffi1/test_recompiler.py
    --- a/testing/cffi1/test_recompiler.py
    +++ b/testing/cffi1/test_recompiler.py
    @@ -1743,3 +1743,125 @@
         lib.mycb1 = lib.foo
         assert lib.mycb1(200) == 242
         assert lib.indirect_call(300) == 342
    +
    +def test_introspect_function():
    +    ffi = FFI()
    +    ffi.cdef("float f1(double);")
    +    lib = verify(ffi, 'test_introspect_function', """
    +        float f1(double x) { return x; }
    +    """)
    +    assert dir(lib) == ['f1']
    +    FUNC = ffi.typeof(lib.f1)
    +    assert FUNC.kind == 'function'
    +    assert FUNC.args[0].cname == 'double'
    +    assert FUNC.result.cname == 'float'
    +    assert ffi.typeof(ffi.addressof(lib, 'f1')) is FUNC
    +
    +def test_introspect_global_var():
    +    ffi = FFI()
    +    ffi.cdef("float g1;")
    +    lib = verify(ffi, 'test_introspect_global_var', """
    +        float g1;
    +    """)
    +    assert dir(lib) == ['g1']
    +    FLOATPTR = ffi.typeof(ffi.addressof(lib, 'g1'))
    +    assert FLOATPTR.kind == 'pointer'
    +    assert FLOATPTR.item.cname == 'float'
    +
    +def test_introspect_global_var_array():
    +    ffi = FFI()
    +    ffi.cdef("float g1[100];")
    +    lib = verify(ffi, 'test_introspect_global_var_array', """
    +        float g1[100];
    +    """)
    +    assert dir(lib) == ['g1']
    +    FLOATARRAYPTR = ffi.typeof(ffi.addressof(lib, 'g1'))
    +    assert FLOATARRAYPTR.kind == 'pointer'
    +    assert FLOATARRAYPTR.item.kind == 'array'
    +    assert FLOATARRAYPTR.item.length == 100
    +    assert ffi.typeof(lib.g1) is FLOATARRAYPTR.item
    +
    +def test_introspect_integer_const():
    +    ffi = FFI()
    +    ffi.cdef("#define FOO 42")
    +    lib = verify(ffi, 'test_introspect_integer_const', """
    +        #define FOO 42
    +    """)
    +    assert dir(lib) == ['FOO']
    +    assert lib.FOO == ffi.integer_const('FOO') == 42
    +
    +def test_introspect_typedef():
    +    ffi = FFI()
    +    ffi.cdef("typedef int foo_t;")
    +    lib = verify(ffi, 'test_introspect_typedef', """
    +        typedef int foo_t;
    +    """)
    +    assert ffi.list_types() == ['foo_t']
    +    assert ffi.typeof('foo_t').kind == 'primitive'
    +    assert ffi.typeof('foo_t').cname == 'int'
    +
    +def test_introspect_typedef_multiple():
    +    ffi = FFI()
    +    ffi.cdef("typedef signed char a_t, c_t, g_t, b_t;")
    +    lib = verify(ffi, 'test_introspect_typedef_multiple', """
    +        typedef signed char a_t, c_t, g_t, b_t;
    +    """)
    +    assert ffi.list_types() == ['a_t', 'b_t', 'c_t', 'g_t']
    +
    +def test_introspect_struct():
    +    ffi = FFI()
    +    ffi.cdef("struct foo_s { int a; };")
    +    lib = verify(ffi, 'test_introspect_struct', """
    +        struct foo_s { int a; };
    +    """)
    +    assert ffi.list_types() == ['struct foo_s']
    +    assert ffi.typeof('struct foo_s').kind == 'struct'
    +    assert ffi.typeof('struct foo_s').cname == 'struct foo_s'
    +
    +def test_introspect_union():
    +    ffi = FFI()
    +    ffi.cdef("union foo_s { int a; };")
    +    lib = verify(ffi, 'test_introspect_union', """
    +        union foo_s { int a; };
    +    """)
    +    assert ffi.list_types() == ['union foo_s']
    +    assert ffi.typeof('union foo_s').kind == 'union'
    +    assert ffi.typeof('union foo_s').cname == 'union foo_s'
    +
    +def test_introspect_struct_and_typedef():
    +    ffi = FFI()
    +    ffi.cdef("typedef struct { int a; } foo_t;")
    +    lib = verify(ffi, 'test_introspect_struct_and_typedef', """
    +        typedef struct { int a; } foo_t;
    +    """)
    +    assert ffi.list_types() == ['foo_t']
    +    assert ffi.typeof('foo_t').kind == 'struct'
    +    assert ffi.typeof('foo_t').cname == 'foo_t'
    +
    +def test_introspect_included_type():
    +    SOURCE = """
    +        typedef signed char schar_t;
    +        struct sint_t { int x; };
    +    """
    +    ffi1 = FFI()
    +    ffi1.cdef(SOURCE)
    +    ffi2 = FFI()
    +    ffi2.include(ffi1)
    +    verify(ffi1, "test_introspect_included_type_parent", SOURCE)
    +    verify(ffi2, "test_introspect_included_type", SOURCE)
    +    assert ffi1.list_types() == ffi2.list_types() == [
    +        'schar_t', 'struct sint_t']
    +
    +def test_introspect_order():
    +    ffi = FFI()
    +    ffi.cdef("union aaa { int a; }; typedef struct ccc { int a; } b;")
    +    ffi.cdef("union g   { int a; }; typedef struct cc  { int a; } bbb;")
    +    ffi.cdef("union aa  { int a; }; typedef struct a   { int a; } bb;")
    +    verify(ffi, "test_introspect_order", """
    +        union aaa { int a; }; typedef struct ccc { int a; } b;
    +        union g   { int a; }; typedef struct cc  { int a; } bbb;
    +        union aa  { int a; }; typedef struct a   { int a; } bb;
    +    """)
    +    assert ffi.list_types() == ['b', 'bb', 'bbb',
    +                                'struct a', 'struct cc', 'struct ccc',
    +                                'union aa', 'union aaa', 'union g']
    diff --git a/testing/cffi1/test_verify1.py b/testing/cffi1/test_verify1.py
    --- a/testing/cffi1/test_verify1.py
    +++ b/testing/cffi1/test_verify1.py
    @@ -694,25 +694,14 @@
         assert ffi.string(ffi.cast('enum ee', 11)) == "EE2"
         assert ffi.string(ffi.cast('enum ee', -10)) == "EE3"
         #
    -    # try again
    -    ffi.verify("enum ee { EE1=10, EE2, EE3=-10, EE4 };")
    -    assert ffi.string(ffi.cast('enum ee', 11)) == "EE2"
    -    #
         assert ffi.typeof("enum ee").relements == {'EE1': 10, 'EE2': 11, 'EE3': -10}
         assert ffi.typeof("enum ee").elements == {10: 'EE1', 11: 'EE2', -10: 'EE3'}
     
     def test_full_enum():
         ffi = FFI()
         ffi.cdef("enum ee { EE1, EE2, EE3 };")
    -    ffi.verify("enum ee { EE1, EE2, EE3 };")
    -    py.test.raises(VerificationError, ffi.verify, "enum ee { EE1, EE2 };")
    -    # disabled: for now, we always accept and fix transparently constant values
    -    #e = py.test.raises(VerificationError, ffi.verify,
    -    #                   "enum ee { EE1, EE3, EE2 };")
    -    #assert str(e.value) == 'enum ee: EE2 has the real value 2, not 1'
    -    # extra items cannot be seen and have no bad consequence anyway
    -    lib = ffi.verify("enum ee { EE1, EE2, EE3, EE4 };")
    -    assert lib.EE3 == 2
    +    lib = ffi.verify("enum ee { EE1, EE2, EE3 };")
    +    assert [lib.EE1, lib.EE2, lib.EE3] == [0, 1, 2]
     
     def test_enum_usage():
         ffi = FFI()
    
    From pypy.commits at gmail.com  Sun Mar 27 16:43:11 2016
    From: pypy.commits at gmail.com (plan_rich)
    Date: Sun, 27 Mar 2016 13:43:11 -0700 (PDT)
    Subject: [pypy-commit] pypy release-5.x: half way through the ndarray tests
     (endian issues)
    Message-ID: <56f845df.88bc1c0a.cfa45.ffff9e57@mx.google.com>
    
    Author: Richard Plangger 
    Branch: release-5.x
    Changeset: r83397:a2399bd7c38d
    Date: 2016-01-22 10:56 +0100
    http://bitbucket.org/pypy/pypy/changeset/a2399bd7c38d/
    
    Log:	half way through the ndarray tests (endian issues) (grafted from
    	d1a60e57594660bb283bb0bcbbabdfc3780b832e)
    
    diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py
    --- a/pypy/module/micronumpy/test/test_ndarray.py
    +++ b/pypy/module/micronumpy/test/test_ndarray.py
    @@ -1791,6 +1791,7 @@
     
         def test_scalar_view(self):
             from numpy import array
    +        import sys
             a = array(3, dtype='int32')
             b = a.view(dtype='float32')
             assert b.shape == ()
    @@ -1799,17 +1800,27 @@
             assert exc.value[0] == "new type not compatible with array."
             exc = raises(TypeError, a.view, 'string')
             assert exc.value[0] == "data-type must not be 0-sized"
    -        assert a.view('S4') == '\x03'
    +        if sys.byteorder == 'big':
    +            assert a.view('S4') == '\x00\x00\x00\x03'
    +        else:
    +            assert a.view('S4') == '\x03'
             a = array('abc1', dtype='c')
             assert (a == ['a', 'b', 'c', '1']).all()
             assert a.view('S4') == 'abc1'
             b = a.view([('a', 'i2'), ('b', 'i2')])
             assert b.shape == (1,)
    -        assert b[0][0] == 25185
    -        assert b[0][1] == 12643
    +        if sys.byteorder == 'big':
    +            assert b[0][0] == 0x6162
    +            assert b[0][1] == 0x6331
    +        else:
    +            assert b[0][0] == 25185
    +            assert b[0][1] == 12643
             a = array([(1, 2)], dtype=[('a', 'int64'), ('b', 'int64')])[0]
             assert a.shape == ()
    -        assert a.view('S16') == '\x01' + '\x00' * 7 + '\x02'
    +        if sys.byteorder == 'big':
    +            assert a.view('S16') == '\x00' * 7 + '\x01' + '\x00' * 7 + '\x02'
    +        else:
    +            assert a.view('S16') == '\x01' + '\x00' * 7 + '\x02'
             a = array(2, dtype='
    
    Author: Richard Plangger 
    Branch: release-5.x
    Changeset: r83396:9d2afadd0b72
    Date: 2016-01-21 20:37 +0100
    http://bitbucket.org/pypy/pypy/changeset/9d2afadd0b72/
    
    Log:	advancing the pointer to correctly return the value written from
    	libffi, the reason why this does not happen on e.g. ppc bigendian 64
    	bit is: libffi ppc casts the return value to the requested type
    	(s390x does not and we have discussed this enough already) (grafted
    	from 08606f22af4a38ecf9d4b5cb2207c5f0b3b32766)
    
    diff --git a/pypy/module/_rawffi/interp_rawffi.py b/pypy/module/_rawffi/interp_rawffi.py
    --- a/pypy/module/_rawffi/interp_rawffi.py
    +++ b/pypy/module/_rawffi/interp_rawffi.py
    @@ -1,8 +1,10 @@
    +import sys
     from pypy.interpreter.baseobjspace import W_Root
     from pypy.interpreter.error import OperationError, oefmt, wrap_oserror
     from pypy.interpreter.gateway import interp2app, unwrap_spec
     from pypy.interpreter.typedef import TypeDef, GetSetProperty
     
    +from rpython.jit.backend.llsupport.symbolic import WORD
     from rpython.rlib.clibffi import *
     from rpython.rtyper.lltypesystem import lltype, rffi
     from rpython.rtyper.tool import rffi_platform
    @@ -19,6 +21,8 @@
     from pypy.module._rawffi.buffer import RawFFIBuffer
     from pypy.module._rawffi.tracker import tracker
     
    +BIGENDIAN = sys.byteorder == 'big'
    +
     TYPEMAP = {
         # XXX A mess with unsigned/signed/normal chars :-/
         'c' : ffi_type_uchar,
    @@ -331,10 +335,14 @@
                 if tracker.DO_TRACING:
                     ll_buf = rffi.cast(lltype.Signed, self.ll_buffer)
                     tracker.trace_allocation(ll_buf, self)
    +        self._ll_buffer = self.ll_buffer
     
         def getbuffer(self, space):
             return space.wrap(rffi.cast(lltype.Unsigned, self.ll_buffer))
     
    +    def buffer_advance(self, n):
    +        self.ll_buffer = rffi.ptradd(self.ll_buffer, n)
    +
         def byptr(self, space):
             from pypy.module._rawffi.array import ARRAY_OF_PTRS
             array = ARRAY_OF_PTRS.allocate(space, 1)
    @@ -342,16 +350,17 @@
             return space.wrap(array)
     
         def free(self, space):
    -        if not self.ll_buffer:
    +        if not self._ll_buffer:
                 raise segfault_exception(space, "freeing NULL pointer")
             self._free()
     
         def _free(self):
             if tracker.DO_TRACING:
    -            ll_buf = rffi.cast(lltype.Signed, self.ll_buffer)
    +            ll_buf = rffi.cast(lltype.Signed, self._ll_buffer)
                 tracker.trace_free(ll_buf)
    -        lltype.free(self.ll_buffer, flavor='raw')
    +        lltype.free(self._ll_buffer, flavor='raw')
             self.ll_buffer = lltype.nullptr(rffi.VOIDP.TO)
    +        self._ll_buffer = self.ll_buffer
     
         def buffer_w(self, space, flags):
             return RawFFIBuffer(self)
    @@ -497,6 +506,11 @@
                     result = self.resshape.allocate(space, 1, autofree=True)
                     # adjust_return_size() was used here on result.ll_buffer
                     self.ptr.call(args_ll, result.ll_buffer)
    +                if BIGENDIAN and result.shape.size < WORD:
    +                    # we get a 8 byte value in big endian
    +                    n = WORD - result.shape.size
    +                    result.buffer_advance(n)
    +
                     return space.wrap(result)
                 else:
                     self.ptr.call(args_ll, lltype.nullptr(rffi.VOIDP.TO))
    
    From pypy.commits at gmail.com  Sun Mar 27 16:43:12 2016
    From: pypy.commits at gmail.com (plan_rich)
    Date: Sun, 27 Mar 2016 13:43:12 -0700 (PDT)
    Subject: [pypy-commit] pypy release-5.x: and the other part of the bigendian
     issues (micronumpy tests)
    Message-ID: <56f845e0.cc811c0a.d468f.ffffa618@mx.google.com>
    
    Author: Richard Plangger 
    Branch: release-5.x
    Changeset: r83398:d48bab9286fe
    Date: 2016-01-22 12:00 +0100
    http://bitbucket.org/pypy/pypy/changeset/d48bab9286fe/
    
    Log:	and the other part of the bigendian issues (micronumpy tests)
    	(grafted from 47a85e21bb1b8eaa8e6688244187e7ff493c214c)
    
    diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py
    --- a/pypy/module/micronumpy/test/test_ndarray.py
    +++ b/pypy/module/micronumpy/test/test_ndarray.py
    @@ -3537,7 +3537,11 @@
             BaseNumpyAppTest.setup_class.im_func(cls)
             cls.w_data = cls.space.wrap(struct.pack('dddd', 1, 2, 3, 4))
             cls.w_fdata = cls.space.wrap(struct.pack('f', 2.3))
    -        cls.w_float16val = cls.space.wrap('\x00E') # 5.0 in float16
    +        import sys
    +        if sys.byteorder == 'big':
    +            cls.w_float16val = cls.space.wrap('E\x00') # 5.0 in float16
    +        else:
    +            cls.w_float16val = cls.space.wrap('\x00E') # 5.0 in float16
             cls.w_float32val = cls.space.wrap(struct.pack('f', 5.2))
             cls.w_float64val = cls.space.wrap(struct.pack('d', 300.4))
             cls.w_ulongval = cls.space.wrap(struct.pack('L', 12))
    @@ -3645,9 +3649,15 @@
             assert (t == []).all()
             u = fromstring("\x01\x00\x00\x00\x00\x00\x00\x00", dtype=int)
             if sys.maxint > 2 ** 31 - 1:
    -            assert (u == [1]).all()
    +            if sys.byteorder == 'big':
    +                assert (u == [0x0100000000000000]).all()
    +            else:
    +                assert (u == [1]).all()
             else:
    -            assert (u == [1, 0]).all()
    +            if sys.byteorder == 'big':
    +                assert (u == [0x01000000, 0]).all()
    +            else:
    +                assert (u == [1, 0]).all()
             v = fromstring("abcd", dtype="|S2")
             assert v[0] == "ab"
             assert v[1] == "cd"
    @@ -3704,9 +3714,15 @@
             k = fromstring(self.float16val, dtype='float16')
             assert k[0] == dtype('float16').type(5.)
             dt =  array([5], dtype='longfloat').dtype
    +        print(dt.itemsize)
             if dt.itemsize == 8:
    -            m = fromstring('\x00\x00\x00\x00\x00\x00\x14@',
    -                           dtype='float64')
    +            import sys
    +            if sys.byteorder == 'big':
    +                m = fromstring('@\x14\x00\x00\x00\x00\x00\x00',
    +                               dtype='float64')
    +            else:
    +                m = fromstring('\x00\x00\x00\x00\x00\x00\x14@',
    +                               dtype='float64')
             elif dt.itemsize == 12:
                 m = fromstring('\x00\x00\x00\x00\x00\x00\x00\xa0\x01@\x00\x00',
                                dtype='float96')
    @@ -3728,8 +3744,13 @@
     
         def test_tostring(self):
             from numpy import array
    -        assert array([1, 2, 3], 'i2').tostring() == '\x01\x00\x02\x00\x03\x00'
    -        assert array([1, 2, 3], 'i2')[::2].tostring() == '\x01\x00\x03\x00'
    +        import sys
    +        if sys.byteorder == 'big':
    +            assert array([1, 2, 3], 'i2').tostring() == '\x00\x01\x00\x02\x00\x03'
    +            assert array([1, 2, 3], 'i2')[::2].tostring() == '\x00\x01\x00\x03'
    +        else:
    +            assert array([1, 2, 3], 'i2').tostring() == '\x01\x00\x02\x00\x03\x00'
    +            assert array([1, 2, 3], 'i2')[::2].tostring() == '\x01\x00\x03\x00'
             assert array([1, 2, 3], 'i2')[::2].tostring() == '\x00\x01\x00\x03'
             assert array(0, dtype='i2').tostring() == '\x00\x00'
    @@ -4225,7 +4246,10 @@
             v = a.view(('float32', 4))
             assert v.dtype == np.dtype('float32')
             assert v.shape == (10, 4)
    -        assert v[0][-1] == 2.53125
    +        if sys.byteorder == 'big':
    +            assert v[0][-2] == 2.53125
    +        else:
    +            assert v[0][-1] == 2.53125
             exc = raises(ValueError, "a.view(('float32', 2))")
             assert exc.value[0] == 'new type not compatible with array.'
     
    diff --git a/pypy/module/micronumpy/test/test_scalar.py b/pypy/module/micronumpy/test/test_scalar.py
    --- a/pypy/module/micronumpy/test/test_scalar.py
    +++ b/pypy/module/micronumpy/test/test_scalar.py
    @@ -109,6 +109,7 @@
     
         def test_pickle(self):
             from numpy import dtype, zeros
    +        import sys
             try:
                 from numpy.core.multiarray import scalar
             except ImportError:
    @@ -119,9 +120,11 @@
             f = dtype('float64').type(13.37)
             c = dtype('complex128').type(13 + 37.j)
     
    -        assert i.__reduce__() == (scalar, (dtype('int32'), '9\x05\x00\x00'))
    -        assert f.__reduce__() == (scalar, (dtype('float64'), '=\n\xd7\xa3p\xbd*@'))
    -        assert c.__reduce__() == (scalar, (dtype('complex128'), '\x00\x00\x00\x00\x00\x00*@\x00\x00\x00\x00\x00\x80B@'))
    +        swap = lambda s: (''.join(reversed(s))) if sys.byteorder == 'big' else s
    +        assert i.__reduce__() == (scalar, (dtype('int32'), swap('9\x05\x00\x00')))
    +        assert f.__reduce__() == (scalar, (dtype('float64'), swap('=\n\xd7\xa3p\xbd*@')))
    +        assert c.__reduce__() == (scalar, (dtype('complex128'), swap('\x00\x00\x00\x00\x00\x00*@') + \
    +                                                                swap('\x00\x00\x00\x00\x00\x80B@')))
     
             assert loads(dumps(i)) == i
             assert loads(dumps(f)) == f
    @@ -256,13 +259,20 @@
             assert t < 7e-323
             t = s.view('complex64')
             assert type(t) is np.complex64
    -        assert 0 < t.real < 1
    -        assert t.imag == 0
    +        if sys.byteorder == 'big':
    +            assert 0 < t.imag < 1
    +            assert t.real == 0
    +        else:
    +            assert 0 < t.real < 1
    +            assert t.imag == 0
             exc = raises(TypeError, s.view, 'string')
             assert exc.value[0] == "data-type must not be 0-sized"
             t = s.view('S8')
             assert type(t) is np.string_
    -        assert t == '\x0c'
    +        if sys.byteorder == 'big':
    +            assert t == '\x00' * 7 + '\x0c'
    +        else:
    +            assert t == '\x0c'
             s = np.dtype('string').type('abc1')
             assert s.view('S4') == 'abc1'
             if '__pypy__' in sys.builtin_module_names:
    diff --git a/pypy/module/micronumpy/test/test_selection.py b/pypy/module/micronumpy/test/test_selection.py
    --- a/pypy/module/micronumpy/test/test_selection.py
    +++ b/pypy/module/micronumpy/test/test_selection.py
    @@ -327,10 +327,15 @@
     # tests from numpy/core/tests/test_regression.py
         def test_sort_bigendian(self):
             from numpy import array, dtype
    -        a = array(range(11), dtype='float64')
    -        c = a.astype(dtype(''
             D.__module__ = 'mod'
             mod = new.module('mod')
             mod.D = D
    @@ -510,7 +511,7 @@
                 tp9
                 Rp10
                 (I3
    -            S'<'
    +            S'{E}'
                 p11
                 NNNI-1
                 I-1
    @@ -520,7 +521,7 @@
                 S'\x00\x00\x00\x00\x00\x00\xf0?\x00\x00\x00\x00\x00\x00\x00@'
                 p13
                 tp14
    -            b.'''.replace('            ','')
    +            b.'''.replace('            ','').format(E=E)
             for ss,sn in zip(s.split('\n')[1:],s_from_numpy.split('\n')[1:]):
                 if len(ss)>10:
                     # ignore binary data, it will be checked later
    
    From pypy.commits at gmail.com  Sun Mar 27 16:43:14 2016
    From: pypy.commits at gmail.com (plan_rich)
    Date: Sun, 27 Mar 2016 13:43:14 -0700 (PDT)
    Subject: [pypy-commit] pypy release-5.x: fixed callsite of clibffi with the
     same big endian issues as found yesterday evening
    Message-ID: <56f845e2.2976c20a.d610c.ffffd046@mx.google.com>
    
    Author: Richard Plangger 
    Branch: release-5.x
    Changeset: r83399:c8ccc2e362ae
    Date: 2016-01-22 12:51 +0100
    http://bitbucket.org/pypy/pypy/changeset/c8ccc2e362ae/
    
    Log:	fixed callsite of clibffi with the same big endian issues as found
    	yesterday evening (grafted from
    	6840459f9b22b6e1071bf3e1aa37cb0cca978e68)
    
    diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py
    --- a/pypy/module/micronumpy/test/test_ndarray.py
    +++ b/pypy/module/micronumpy/test/test_ndarray.py
    @@ -4246,6 +4246,7 @@
             v = a.view(('float32', 4))
             assert v.dtype == np.dtype('float32')
             assert v.shape == (10, 4)
    +        import sys
             if sys.byteorder == 'big':
                 assert v[0][-2] == 2.53125
             else:
    diff --git a/rpython/rlib/clibffi.py b/rpython/rlib/clibffi.py
    --- a/rpython/rlib/clibffi.py
    +++ b/rpython/rlib/clibffi.py
    @@ -597,6 +597,9 @@
                 size = adjust_return_size(intmask(restype.c_size))
                 self.ll_result = lltype.malloc(rffi.VOIDP.TO, size,
                                                flavor='raw')
    +            self.restype_size = intmask(restype.c_size)
    +        else:
    +            self.restype_size = -1
     
         def push_arg(self, value):
             #if self.pushed_args == self.argnum:
    @@ -633,7 +636,12 @@
                                 rffi.cast(VOIDPP, self.ll_args))
             if RES_TP is not lltype.Void:
                 TP = lltype.Ptr(rffi.CArray(RES_TP))
    -            res = rffi.cast(TP, self.ll_result)[0]
    +            ptr = self.ll_result
    +            if _BIG_ENDIAN and self.restype_size != -1:
    +                # we get a 8 byte value in big endian
    +                n = rffi.sizeof(lltype.Signed) - self.restype_size
    +                ptr = rffi.ptradd(ptr, n)
    +            res = rffi.cast(TP, ptr)[0]
             else:
                 res = None
             self._clean_args()
    diff --git a/rpython/rlib/rstruct/test/test_runpack.py b/rpython/rlib/rstruct/test/test_runpack.py
    --- a/rpython/rlib/rstruct/test/test_runpack.py
    +++ b/rpython/rlib/rstruct/test/test_runpack.py
    @@ -6,11 +6,13 @@
     
     class TestRStruct(BaseRtypingTest):
         def test_unpack(self):
    +        import sys
             pad = '\x00' * (LONG_BIT//8-1)    # 3 or 7 null bytes
             def fn():
                 return runpack('sll', 'a'+pad+'\x03'+pad+'\x04'+pad)[1]
    -        assert fn() == 3
    -        assert self.interpret(fn, []) == 3
    +        result = 3 if sys.byteorder == 'little' else 3 << (LONG_BIT-8)
    +        assert fn() == result
    +        assert self.interpret(fn, []) == result
     
         def test_unpack_2(self):
             data = struct.pack('iiii', 0, 1, 2, 4)
    diff --git a/rpython/rlib/test/test_clibffi.py b/rpython/rlib/test/test_clibffi.py
    --- a/rpython/rlib/test/test_clibffi.py
    +++ b/rpython/rlib/test/test_clibffi.py
    @@ -181,11 +181,12 @@
                 p_a2 = rffi.cast(rffi.VOIDPP, ll_args[1])[0]
                 a1 = rffi.cast(rffi.INTP, p_a1)[0]
                 a2 = rffi.cast(rffi.INTP, p_a2)[0]
    -            res = rffi.cast(rffi.INTP, ll_res)
    +            res = rffi.cast(rffi.SIGNEDP, ll_res)
    +            # must store a full ffi arg!
                 if a1 > a2:
    -                res[0] = rffi.cast(rffi.INT, 1)
    +                res[0] = 1
                 else:
    -                res[0] = rffi.cast(rffi.INT, -1)
    +                res[0] = -1
     
             ptr = CallbackFuncPtr([ffi_type_pointer, ffi_type_pointer],
                                   ffi_type_sint, callback)
    
    From pypy.commits at gmail.com  Sun Mar 27 16:43:16 2016
    From: pypy.commits at gmail.com (plan_rich)
    Date: Sun, 27 Mar 2016 13:43:16 -0700 (PDT)
    Subject: [pypy-commit] pypy release-5.x: call int should provide rffi.INT
     instead of rffi.SIGNED, this works on little endian, but not big
    Message-ID: <56f845e4.890bc30a.8c36f.ffffc2ce@mx.google.com>
    
    Author: Richard Plangger 
    Branch: release-5.x
    Changeset: r83400:403d1e816438
    Date: 2016-02-05 20:46 +0100
    http://bitbucket.org/pypy/pypy/changeset/403d1e816438/
    
    Log:	call int should provide rffi.INT instead of rffi.SIGNED, this works
    	on little endian, but not big (grafted from
    	ca18f251c6bfc448ba2d8a46ef5f982f1fc35874)
    
    diff --git a/rpython/rlib/libffi.py b/rpython/rlib/libffi.py
    --- a/rpython/rlib/libffi.py
    +++ b/rpython/rlib/libffi.py
    @@ -326,7 +326,7 @@
         #@jit.oopspec('libffi_call_int(self, funcsym, ll_args)')
         @jit.dont_look_inside
         def _do_call_int(self, funcsym, ll_args):
    -        return self._do_call(funcsym, ll_args, rffi.SIGNED)
    +        return self._do_call(funcsym, ll_args, rffi.INT)
     
         #@jit.oopspec('libffi_call_float(self, funcsym, ll_args)')
         @jit.dont_look_inside
    
    From pypy.commits at gmail.com  Sun Mar 27 16:43:22 2016
    From: pypy.commits at gmail.com (arigo)
    Date: Sun, 27 Mar 2016 13:43:22 -0700 (PDT)
    Subject: [pypy-commit] pypy default: Untranslated,
     we can't reliably call c_open()
    Message-ID: <56f845ea.85b01c0a.ed107.ffff9e72@mx.google.com>
    
    Author: Armin Rigo 
    Branch: 
    Changeset: r83404:76d5c1f7ff72
    Date: 2016-03-27 22:41 +0200
    http://bitbucket.org/pypy/pypy/changeset/76d5c1f7ff72/
    
    Log:	Untranslated, we can't reliably call c_open() because its precise
    	signature is (char*, int, ...) but we're pretending it is (char*,
    	int, mode_t). Usually it makes no difference, but on some platforms
    	it does.
    
    diff --git a/rpython/rlib/rposix.py b/rpython/rlib/rposix.py
    --- a/rpython/rlib/rposix.py
    +++ b/rpython/rlib/rposix.py
    @@ -10,7 +10,8 @@
         _CYGWIN, _MACRO_ON_POSIX, UNDERSCORE_ON_WIN32, _WIN32,
         _prefer_unicode, _preferred_traits)
     from rpython.rlib.objectmodel import (
    -    specialize, enforceargs, register_replacement_for, NOT_CONSTANT)
    +    specialize, enforceargs, register_replacement_for, NOT_CONSTANT,
    +    we_are_translated)
     from rpython.rlib.rarithmetic import intmask, widen
     from rpython.rlib.signature import signature
     from rpython.tool.sourcetools import func_renamer
    @@ -391,8 +392,15 @@
     def open(path, flags, mode):
         if _prefer_unicode(path):
             fd = c_wopen(_as_unicode0(path), flags, mode)
    +    elif we_are_translated():
    +        fd = c_open(_as_bytes0(path), flags, mode)
         else:
    -        fd = c_open(_as_bytes0(path), flags, mode)
    +        # Untranslated, we can't reliably call c_open()
    +        # because its precise signature is (char*, int, ...)
    +        # but we're pretending it is (char*, int, mode_t).
    +        # Usually it makes no difference, but on some
    +        # platforms it does.
    +        fd = os.open(_as_bytes0(path), flags, mode)
         return handle_posix_error('open', fd)
     
     c_read = external(UNDERSCORE_ON_WIN32 + 'read',
    
    From pypy.commits at gmail.com  Sun Mar 27 16:43:17 2016
    From: pypy.commits at gmail.com (plan_rich)
    Date: Sun, 27 Mar 2016 13:43:17 -0700 (PDT)
    Subject: [pypy-commit] pypy release-5.x: ffi call fixed in deprecated api
     that is still used (fix before that was not sufficient),
     fixed legacy tests test_libffi
    Message-ID: <56f845e5.a151c20a.7046d.ffffc8f5@mx.google.com>
    
    Author: Richard Plangger 
    Branch: release-5.x
    Changeset: r83401:d1470cfc1db9
    Date: 2016-02-05 22:37 +0100
    http://bitbucket.org/pypy/pypy/changeset/d1470cfc1db9/
    
    Log:	ffi call fixed in deprecated api that is still used (fix before that
    	was not sufficient), fixed legacy tests test_libffi (grafted from
    	d40d932f8349ce3ab308e5ddf6483a8936ba3eaf)
    
    diff --git a/pypy/module/_rawffi/interp_rawffi.py b/pypy/module/_rawffi/interp_rawffi.py
    --- a/pypy/module/_rawffi/interp_rawffi.py
    +++ b/pypy/module/_rawffi/interp_rawffi.py
    @@ -4,7 +4,6 @@
     from pypy.interpreter.gateway import interp2app, unwrap_spec
     from pypy.interpreter.typedef import TypeDef, GetSetProperty
     
    -from rpython.jit.backend.llsupport.symbolic import WORD
     from rpython.rlib.clibffi import *
     from rpython.rtyper.lltypesystem import lltype, rffi
     from rpython.rtyper.tool import rffi_platform
    @@ -447,6 +446,9 @@
             self.ptr = ptr
             self.argshapes = argshapes
             self.resshape = resshape
    +        self.narrow_integer = False
    +        if resshape is not None:
    +            self.narrow_integer = resshape.itemcode.lower() in ('c','h','i')
     
         def getbuffer(self, space):
             return space.wrap(rffi.cast(lltype.Unsigned, self.ptr.funcsym))
    @@ -506,9 +508,9 @@
                     result = self.resshape.allocate(space, 1, autofree=True)
                     # adjust_return_size() was used here on result.ll_buffer
                     self.ptr.call(args_ll, result.ll_buffer)
    -                if BIGENDIAN and result.shape.size < WORD:
    +                if BIGENDIAN and self.narrow_integer:
                         # we get a 8 byte value in big endian
    -                    n = WORD - result.shape.size
    +                    n = rffi.sizeof(lltype.Signed) - result.shape.size
                         result.buffer_advance(n)
     
                     return space.wrap(result)
    diff --git a/rpython/rlib/clibffi.py b/rpython/rlib/clibffi.py
    --- a/rpython/rlib/clibffi.py
    +++ b/rpython/rlib/clibffi.py
    @@ -594,10 +594,10 @@
                                                 intmask(argtypes[i].c_size),
                                                 flavor='raw')
             if restype != ffi_type_void:
    -            size = adjust_return_size(intmask(restype.c_size))
    +            self.restype_size = intmask(restype.c_size)
    +            size = adjust_return_size(self.restype_size)
                 self.ll_result = lltype.malloc(rffi.VOIDP.TO, size,
                                                flavor='raw')
    -            self.restype_size = intmask(restype.c_size)
             else:
                 self.restype_size = -1
     
    @@ -637,7 +637,7 @@
             if RES_TP is not lltype.Void:
                 TP = lltype.Ptr(rffi.CArray(RES_TP))
                 ptr = self.ll_result
    -            if _BIG_ENDIAN and self.restype_size != -1:
    +            if _BIG_ENDIAN and RES_TP in TYPE_MAP_INT:
                     # we get a 8 byte value in big endian
                     n = rffi.sizeof(lltype.Signed) - self.restype_size
                     ptr = rffi.ptradd(ptr, n)
    diff --git a/rpython/rlib/libffi.py b/rpython/rlib/libffi.py
    --- a/rpython/rlib/libffi.py
    +++ b/rpython/rlib/libffi.py
    @@ -4,6 +4,7 @@
     from __future__ import with_statement
     
     from rpython.rtyper.lltypesystem import rffi, lltype
    +from rpython.rlib.unroll import unrolling_iterable
     from rpython.rlib.objectmodel import specialize, enforceargs
     from rpython.rlib.rarithmetic import intmask, r_uint, r_singlefloat, r_longlong
     from rpython.rlib import jit
    @@ -15,6 +16,9 @@
     from rpython.rlib.rdynload import DLLHANDLE
     
     import os
    +import sys
    +
    +_BIG_ENDIAN = sys.byteorder == 'big'
     
     class types(object):
         """
    @@ -211,6 +215,8 @@
     
     # ======================================================================
     
    +NARROW_INTEGER_TYPES = unrolling_iterable([rffi.CHAR,
    +    rffi.UCHAR, rffi.SHORT, rffi.USHORT, rffi.INT, rffi.UINT])
     
     class Func(AbstractFuncPtr):
     
    @@ -263,7 +269,12 @@
                 res = self._do_call_raw(self.funcsym, ll_args)
             elif _fits_into_signed(RESULT):
                 assert not types.is_struct(self.restype)
    -            res = self._do_call_int(self.funcsym, ll_args)
    +            for res in NARROW_INTEGER_TYPES:
    +                if RESULT is res:
    +                    res = self._do_call_int(self.funcsym, ll_args, rffi.CHAR)
    +                    break
    +            else:
    +                res = self._do_call_int(self.funcsym, ll_args, rffi.SIGNED)
             elif RESULT is rffi.DOUBLE:
                 return self._do_call_float(self.funcsym, ll_args)
             elif RESULT is rffi.FLOAT:
    @@ -325,8 +336,9 @@
     
         #@jit.oopspec('libffi_call_int(self, funcsym, ll_args)')
         @jit.dont_look_inside
    -    def _do_call_int(self, funcsym, ll_args):
    -        return self._do_call(funcsym, ll_args, rffi.INT)
    +    @specialize.arg(3)
    +    def _do_call_int(self, funcsym, ll_args, TP):
    +        return self._do_call(funcsym, ll_args, TP)
     
         #@jit.oopspec('libffi_call_float(self, funcsym, ll_args)')
         @jit.dont_look_inside
    @@ -368,10 +380,10 @@
         @specialize.arg(3)
         def _do_call(self, funcsym, ll_args, RESULT):
             # XXX: check len(args)?
    -        ll_result = lltype.nullptr(rffi.CCHARP.TO)
    +        ll_result = lltype.nullptr(rffi.VOIDP.TO)
             if self.restype != types.void:
                 size = adjust_return_size(intmask(self.restype.c_size))
    -            ll_result = lltype.malloc(rffi.CCHARP.TO, size,
    +            ll_result = lltype.malloc(rffi.VOIDP.TO, size,
                                           flavor='raw')
             ffires = c_ffi_call(self.ll_cif,
                                 self.funcsym,
    @@ -379,14 +391,20 @@
                                 rffi.cast(rffi.VOIDPP, ll_args))
             if RESULT is not lltype.Void:
                 TP = lltype.Ptr(rffi.CArray(RESULT))
    -            buf = rffi.cast(TP, ll_result)
                 if types.is_struct(self.restype):
                     assert RESULT == rffi.SIGNED
                     # for structs, we directly return the buffer and transfer the
                     # ownership
    +                buf = rffi.cast(TP, ll_result)
                     res = rffi.cast(RESULT, buf)
                 else:
    -                res = buf[0]
    +                if _BIG_ENDIAN and types.getkind(self.restype) in ('i','u'):
    +                    ptr = ll_result
    +                    n = rffi.sizeof(lltype.Signed) - self.restype.c_size
    +                    ptr = rffi.ptradd(ptr, n)
    +                    res = rffi.cast(TP, ptr)[0]
    +                else:
    +                    res = rffi.cast(TP, ll_result)[0]
             else:
                 res = None
             self._free_buffers(ll_result, ll_args)
    diff --git a/rpython/rlib/test/test_libffi.py b/rpython/rlib/test/test_libffi.py
    --- a/rpython/rlib/test/test_libffi.py
    +++ b/rpython/rlib/test/test_libffi.py
    @@ -274,7 +274,7 @@
             """
             libfoo = self.get_libfoo()
             func = (libfoo, 'diff_xy', [types.sint, types.signed], types.sint)
    -        res = self.call(func, [50, 8], lltype.Signed)
    +        res = self.call(func, [50, 8], rffi.INT)
             assert res == 42
     
         def test_simple(self):
    @@ -287,7 +287,7 @@
             """
             libfoo = self.get_libfoo()
             func = (libfoo, 'sum_xy', [types.sint, types.double], types.sint)
    -        res = self.call(func, [38, 4.2], lltype.Signed, jitif=["floats"])
    +        res = self.call(func, [38, 4.2], rffi.INT, jitif=["floats"])
             assert res == 42
     
         def test_float_result(self):
    @@ -319,7 +319,7 @@
             """
             libfoo = self.get_libfoo()
             func = (libfoo, 'many_args', [types.uchar, types.sint], types.sint)
    -        res = self.call(func, [chr(20), 22], rffi.SIGNED)
    +        res = self.call(func, [chr(20), 22], rffi.INT)
             assert res == 42
     
         def test_char_args(self):
    @@ -418,12 +418,12 @@
             set_dummy = (libfoo, 'set_dummy', [types.sint], types.void)
             get_dummy = (libfoo, 'get_dummy', [], types.sint)
             #
    -        initval = self.call(get_dummy, [], rffi.SIGNED)
    +        initval = self.call(get_dummy, [], rffi.INT)
             #
             res = self.call(set_dummy, [initval+1], lltype.Void)
             assert res is None
             #
    -        res = self.call(get_dummy, [], rffi.SIGNED)
    +        res = self.call(get_dummy, [], rffi.INT)
             assert res == initval+1
     
         def test_single_float_args(self):
    
    From pypy.commits at gmail.com  Sun Mar 27 16:43:19 2016
    From: pypy.commits at gmail.com (plan_rich)
    Date: Sun, 27 Mar 2016 13:43:19 -0700 (PDT)
    Subject: [pypy-commit] pypy release-5.x: _rawffi bigendian issue in
     callbacks,
     callback writes narrow integer directly to MSB of 64 bit value on s390x
     (wrong when value is passed along)
    Message-ID: <56f845e7.41d91c0a.353e8.ffff9d3c@mx.google.com>
    
    Author: Richard Plangger 
    Branch: release-5.x
    Changeset: r83402:3624c8c2c3be
    Date: 2016-02-10 18:07 +0100
    http://bitbucket.org/pypy/pypy/changeset/3624c8c2c3be/
    
    Log:	_rawffi bigendian issue in callbacks, callback writes narrow integer
    	directly to MSB of 64 bit value on s390x (wrong when value is passed
    	along) (grafted from 74ebd8669f961c3ba5835f052356a9000298af98)
    
    diff --git a/pypy/module/_rawffi/callback.py b/pypy/module/_rawffi/callback.py
    --- a/pypy/module/_rawffi/callback.py
    +++ b/pypy/module/_rawffi/callback.py
    @@ -1,17 +1,23 @@
    -
    +import sys
     from pypy.interpreter.gateway import interp2app, unwrap_spec
     from pypy.interpreter.typedef import TypeDef, GetSetProperty
     from rpython.rtyper.lltypesystem import lltype, rffi
     from pypy.module._rawffi.interp_rawffi import write_ptr
     from pypy.module._rawffi.structure import W_Structure
     from pypy.module._rawffi.interp_rawffi import (W_DataInstance, letter2tp,
    -     unwrap_value, unpack_argshapes, got_libffi_error)
    +     unwrap_value, unpack_argshapes, got_libffi_error, is_narrow_integer_type,
    +     LL_TYPEMAP, NARROW_INTEGER_TYPES)
     from rpython.rlib.clibffi import USERDATA_P, CallbackFuncPtr, FUNCFLAG_CDECL
     from rpython.rlib.clibffi import ffi_type_void, LibFFIError
     from rpython.rlib import rweakref
     from pypy.module._rawffi.tracker import tracker
     from pypy.interpreter.error import OperationError
     from pypy.interpreter import gateway
    +from rpython.rlib.unroll import unrolling_iterable
    +
    +BIGENDIAN = sys.byteorder == 'big'
    +
    +unroll_narrow_integer_types = unrolling_iterable(NARROW_INTEGER_TYPES)
     
     app = gateway.applevel('''
         def tbprint(tb, err):
    @@ -42,8 +48,17 @@
                     args_w[i] = space.wrap(rffi.cast(rffi.ULONG, ll_args[i]))
             w_res = space.call(w_callable, space.newtuple(args_w))
             if callback_ptr.result is not None: # don't return void
    -            unwrap_value(space, write_ptr, ll_res, 0,
    -                         callback_ptr.result, w_res)
    +            ptr = ll_res
    +            letter = callback_ptr.result
    +            if BIGENDIAN:
    +                # take care of narrow integers!
    +                for int_type in unroll_narrow_integer_types:
    +                    if int_type == letter:
    +                        T = LL_TYPEMAP[int_type]
    +                        n = rffi.sizeof(lltype.Signed) - rffi.sizeof(T)
    +                        ptr = rffi.ptradd(ptr, n)
    +                        break
    +            unwrap_value(space, write_ptr, ptr, 0, letter, w_res)
         except OperationError, e:
             tbprint(space, space.wrap(e.get_traceback()),
                     space.wrap(e.errorstr(space)))
    diff --git a/pypy/module/_rawffi/interp_rawffi.py b/pypy/module/_rawffi/interp_rawffi.py
    --- a/pypy/module/_rawffi/interp_rawffi.py
    +++ b/pypy/module/_rawffi/interp_rawffi.py
    @@ -440,6 +440,10 @@
                              space.wrap("cannot directly read value"))
     wrap_value._annspecialcase_ = 'specialize:arg(1)'
     
    +NARROW_INTEGER_TYPES = 'cbhiBIH?'
    +
    +def is_narrow_integer_type(letter):
    +    return letter in NARROW_INTEGER_TYPES
     
     class W_FuncPtr(W_Root):
         def __init__(self, space, ptr, argshapes, resshape):
    @@ -448,7 +452,7 @@
             self.resshape = resshape
             self.narrow_integer = False
             if resshape is not None:
    -            self.narrow_integer = resshape.itemcode.lower() in ('c','h','i')
    +            self.narrow_integer = is_narrow_integer_type(resshape.itemcode.lower())
     
         def getbuffer(self, space):
             return space.wrap(rffi.cast(lltype.Unsigned, self.ptr.funcsym))
    @@ -512,7 +516,6 @@
                         # we get a 8 byte value in big endian
                         n = rffi.sizeof(lltype.Signed) - result.shape.size
                         result.buffer_advance(n)
    -
                     return space.wrap(result)
                 else:
                     self.ptr.call(args_ll, lltype.nullptr(rffi.VOIDP.TO))
    
    From pypy.commits at gmail.com  Sun Mar 27 16:43:21 2016
    From: pypy.commits at gmail.com (arigo)
    Date: Sun, 27 Mar 2016 13:43:21 -0700 (PDT)
    Subject: [pypy-commit] pypy release-5.x: Untranslated,
     we can't reliably call c_open()
    Message-ID: <56f845e9.654fc20a.8b9ed.ffffc43f@mx.google.com>
    
    Author: Armin Rigo 
    Branch: release-5.x
    Changeset: r83403:c71f672e9491
    Date: 2016-03-27 22:41 +0200
    http://bitbucket.org/pypy/pypy/changeset/c71f672e9491/
    
    Log:	Untranslated, we can't reliably call c_open() because its precise
    	signature is (char*, int, ...) but we're pretending it is (char*,
    	int, mode_t). Usually it makes no difference, but on some platforms
    	it does.
    
    diff --git a/rpython/rlib/rposix.py b/rpython/rlib/rposix.py
    --- a/rpython/rlib/rposix.py
    +++ b/rpython/rlib/rposix.py
    @@ -10,7 +10,8 @@
         _CYGWIN, _MACRO_ON_POSIX, UNDERSCORE_ON_WIN32, _WIN32,
         _prefer_unicode, _preferred_traits)
     from rpython.rlib.objectmodel import (
    -    specialize, enforceargs, register_replacement_for, NOT_CONSTANT)
    +    specialize, enforceargs, register_replacement_for, NOT_CONSTANT,
    +    we_are_translated)
     from rpython.rlib.rarithmetic import intmask, widen
     from rpython.rlib.signature import signature
     from rpython.tool.sourcetools import func_renamer
    @@ -375,8 +376,15 @@
     def open(path, flags, mode):
         if _prefer_unicode(path):
             fd = c_wopen(_as_unicode0(path), flags, mode)
    +    elif we_are_translated():
    +        fd = c_open(_as_bytes0(path), flags, mode)
         else:
    -        fd = c_open(_as_bytes0(path), flags, mode)
    +        # Untranslated, we can't reliably call c_open()
    +        # because its precise signature is (char*, int, ...)
    +        # but we're pretending it is (char*, int, mode_t).
    +        # Usually it makes no difference, but on some
    +        # platforms it does.
    +        fd = os.open(_as_bytes0(path), flags, mode)
         return handle_posix_error('open', fd)
     
     c_read = external(UNDERSCORE_ON_WIN32 + 'read',
    
    From pypy.commits at gmail.com  Sun Mar 27 16:49:49 2016
    From: pypy.commits at gmail.com (stefanor)
    Date: Sun, 27 Mar 2016 13:49:49 -0700 (PDT)
    Subject: [pypy-commit] pypy default: It's a byte, not an integer
    Message-ID: <56f8476d.06b01c0a.acd46.ffffab74@mx.google.com>
    
    Author: Stefano Rivera 
    Branch: 
    Changeset: r83405:3b76d0c3f842
    Date: 2016-03-27 16:49 -0400
    http://bitbucket.org/pypy/pypy/changeset/3b76d0c3f842/
    
    Log:	It's a byte, not an integer
    
    diff --git a/rpython/jit/backend/arm/detect.py b/rpython/jit/backend/arm/detect.py
    --- a/rpython/jit/backend/arm/detect.py
    +++ b/rpython/jit/backend/arm/detect.py
    @@ -80,7 +80,7 @@
     
         i = 0
         while i <= buf_size - struct_size:
    -        if buf[i] == 0:
    +        if buf[i] == '\x00':
                 i += 1
                 continue
     
    
    From pypy.commits at gmail.com  Sun Mar 27 16:59:42 2016
    From: pypy.commits at gmail.com (arigo)
    Date: Sun, 27 Mar 2016 13:59:42 -0700 (PDT)
    Subject: [pypy-commit] pypy default: Backed out changeset 76d5c1f7ff72
    Message-ID: <56f849be.4412c30a.3f14c.ffffc3fd@mx.google.com>
    
    Author: Armin Rigo 
    Branch: 
    Changeset: r83406:4349150d3ab1
    Date: 2016-03-27 22:58 +0200
    http://bitbucket.org/pypy/pypy/changeset/4349150d3ab1/
    
    Log:	Backed out changeset 76d5c1f7ff72
    
    diff --git a/rpython/rlib/rposix.py b/rpython/rlib/rposix.py
    --- a/rpython/rlib/rposix.py
    +++ b/rpython/rlib/rposix.py
    @@ -10,8 +10,7 @@
         _CYGWIN, _MACRO_ON_POSIX, UNDERSCORE_ON_WIN32, _WIN32,
         _prefer_unicode, _preferred_traits)
     from rpython.rlib.objectmodel import (
    -    specialize, enforceargs, register_replacement_for, NOT_CONSTANT,
    -    we_are_translated)
    +    specialize, enforceargs, register_replacement_for, NOT_CONSTANT)
     from rpython.rlib.rarithmetic import intmask, widen
     from rpython.rlib.signature import signature
     from rpython.tool.sourcetools import func_renamer
    @@ -392,15 +391,8 @@
     def open(path, flags, mode):
         if _prefer_unicode(path):
             fd = c_wopen(_as_unicode0(path), flags, mode)
    -    elif we_are_translated():
    +    else:
             fd = c_open(_as_bytes0(path), flags, mode)
    -    else:
    -        # Untranslated, we can't reliably call c_open()
    -        # because its precise signature is (char*, int, ...)
    -        # but we're pretending it is (char*, int, mode_t).
    -        # Usually it makes no difference, but on some
    -        # platforms it does.
    -        fd = os.open(_as_bytes0(path), flags, mode)
         return handle_posix_error('open', fd)
     
     c_read = external(UNDERSCORE_ON_WIN32 + 'read',
    
    From pypy.commits at gmail.com  Sun Mar 27 16:59:44 2016
    From: pypy.commits at gmail.com (arigo)
    Date: Sun, 27 Mar 2016 13:59:44 -0700 (PDT)
    Subject: [pypy-commit] pypy release-5.x: Backed out changeset c71f672e9491
    Message-ID: <56f849c0.e7bec20a.64c87.ffffccde@mx.google.com>
    
    Author: Armin Rigo 
    Branch: release-5.x
    Changeset: r83407:dec2ab0ea59c
    Date: 2016-03-27 22:58 +0200
    http://bitbucket.org/pypy/pypy/changeset/dec2ab0ea59c/
    
    Log:	Backed out changeset c71f672e9491
    
    diff --git a/rpython/rlib/rposix.py b/rpython/rlib/rposix.py
    --- a/rpython/rlib/rposix.py
    +++ b/rpython/rlib/rposix.py
    @@ -10,8 +10,7 @@
         _CYGWIN, _MACRO_ON_POSIX, UNDERSCORE_ON_WIN32, _WIN32,
         _prefer_unicode, _preferred_traits)
     from rpython.rlib.objectmodel import (
    -    specialize, enforceargs, register_replacement_for, NOT_CONSTANT,
    -    we_are_translated)
    +    specialize, enforceargs, register_replacement_for, NOT_CONSTANT)
     from rpython.rlib.rarithmetic import intmask, widen
     from rpython.rlib.signature import signature
     from rpython.tool.sourcetools import func_renamer
    @@ -376,15 +375,8 @@
     def open(path, flags, mode):
         if _prefer_unicode(path):
             fd = c_wopen(_as_unicode0(path), flags, mode)
    -    elif we_are_translated():
    +    else:
             fd = c_open(_as_bytes0(path), flags, mode)
    -    else:
    -        # Untranslated, we can't reliably call c_open()
    -        # because its precise signature is (char*, int, ...)
    -        # but we're pretending it is (char*, int, mode_t).
    -        # Usually it makes no difference, but on some
    -        # platforms it does.
    -        fd = os.open(_as_bytes0(path), flags, mode)
         return handle_posix_error('open', fd)
     
     c_read = external(UNDERSCORE_ON_WIN32 + 'read',
    
    From pypy.commits at gmail.com  Sun Mar 27 16:59:46 2016
    From: pypy.commits at gmail.com (arigo)
    Date: Sun, 27 Mar 2016 13:59:46 -0700 (PDT)
    Subject: [pypy-commit] pypy default: merge heads
    Message-ID: <56f849c2.6bb8c20a.3a155.ffffd062@mx.google.com>
    
    Author: Armin Rigo 
    Branch: 
    Changeset: r83408:9a18329069c4
    Date: 2016-03-27 22:59 +0200
    http://bitbucket.org/pypy/pypy/changeset/9a18329069c4/
    
    Log:	merge heads
    
    diff --git a/rpython/jit/backend/arm/detect.py b/rpython/jit/backend/arm/detect.py
    --- a/rpython/jit/backend/arm/detect.py
    +++ b/rpython/jit/backend/arm/detect.py
    @@ -80,7 +80,7 @@
     
         i = 0
         while i <= buf_size - struct_size:
    -        if buf[i] == 0:
    +        if buf[i] == '\x00':
                 i += 1
                 continue
     
    
    From pypy.commits at gmail.com  Sun Mar 27 21:31:10 2016
    From: pypy.commits at gmail.com (stefanor)
    Date: Sun, 27 Mar 2016 18:31:10 -0700 (PDT)
    Subject: [pypy-commit] pypy default: Only %d is supported in rpython
    Message-ID: <56f8895e.e6ebc20a.ba8ae.0ed6@mx.google.com>
    
    Author: Stefano Rivera 
    Branch: 
    Changeset: r83409:37a69ad9f918
    Date: 2016-03-27 21:29 -0400
    http://bitbucket.org/pypy/pypy/changeset/37a69ad9f918/
    
    Log:	Only %d is supported in rpython
    
    diff --git a/rpython/jit/backend/arm/detect.py b/rpython/jit/backend/arm/detect.py
    --- a/rpython/jit/backend/arm/detect.py
    +++ b/rpython/jit/backend/arm/detect.py
    @@ -99,7 +99,7 @@
                      (ord(buf[i+7]) << 24))
             return a_val
     
    -    raise KeyError('failed to find auxval type: %i' % type_)
    +    raise KeyError('failed to find auxval type: %d' % type_)
     
     
     def detect_neon():
    
    From pypy.commits at gmail.com  Sun Mar 27 22:30:50 2016
    From: pypy.commits at gmail.com (arigo)
    Date: Sun, 27 Mar 2016 19:30:50 -0700 (PDT)
    Subject: [pypy-commit] pypy release-5.x: hg revert to 78860f97739e. All
     commits that come after that are backed
    Message-ID: <56f8975a.82561c0a.bf9a3.fffff947@mx.google.com>
    
    Author: Armin Rigo 
    Branch: release-5.x
    Changeset: r83410:b5d52a043b5e
    Date: 2016-03-28 00:35 +0200
    http://bitbucket.org/pypy/pypy/changeset/b5d52a043b5e/
    
    Log:	hg revert to 78860f97739e. All commits that come after that are
    	backed out (all grafts). A pypy at 78860f97739e translates on
    	ppc64be and the result itself is able to run a further translation;
    	this is not the case after the grafts.
    
    	To investivate on default.
    
    diff --git a/pypy/module/_rawffi/callback.py b/pypy/module/_rawffi/callback.py
    --- a/pypy/module/_rawffi/callback.py
    +++ b/pypy/module/_rawffi/callback.py
    @@ -1,23 +1,17 @@
    -import sys
    +
     from pypy.interpreter.gateway import interp2app, unwrap_spec
     from pypy.interpreter.typedef import TypeDef, GetSetProperty
     from rpython.rtyper.lltypesystem import lltype, rffi
     from pypy.module._rawffi.interp_rawffi import write_ptr
     from pypy.module._rawffi.structure import W_Structure
     from pypy.module._rawffi.interp_rawffi import (W_DataInstance, letter2tp,
    -     unwrap_value, unpack_argshapes, got_libffi_error, is_narrow_integer_type,
    -     LL_TYPEMAP, NARROW_INTEGER_TYPES)
    +     unwrap_value, unpack_argshapes, got_libffi_error)
     from rpython.rlib.clibffi import USERDATA_P, CallbackFuncPtr, FUNCFLAG_CDECL
     from rpython.rlib.clibffi import ffi_type_void, LibFFIError
     from rpython.rlib import rweakref
     from pypy.module._rawffi.tracker import tracker
     from pypy.interpreter.error import OperationError
     from pypy.interpreter import gateway
    -from rpython.rlib.unroll import unrolling_iterable
    -
    -BIGENDIAN = sys.byteorder == 'big'
    -
    -unroll_narrow_integer_types = unrolling_iterable(NARROW_INTEGER_TYPES)
     
     app = gateway.applevel('''
         def tbprint(tb, err):
    @@ -48,17 +42,8 @@
                     args_w[i] = space.wrap(rffi.cast(rffi.ULONG, ll_args[i]))
             w_res = space.call(w_callable, space.newtuple(args_w))
             if callback_ptr.result is not None: # don't return void
    -            ptr = ll_res
    -            letter = callback_ptr.result
    -            if BIGENDIAN:
    -                # take care of narrow integers!
    -                for int_type in unroll_narrow_integer_types:
    -                    if int_type == letter:
    -                        T = LL_TYPEMAP[int_type]
    -                        n = rffi.sizeof(lltype.Signed) - rffi.sizeof(T)
    -                        ptr = rffi.ptradd(ptr, n)
    -                        break
    -            unwrap_value(space, write_ptr, ptr, 0, letter, w_res)
    +            unwrap_value(space, write_ptr, ll_res, 0,
    +                         callback_ptr.result, w_res)
         except OperationError, e:
             tbprint(space, space.wrap(e.get_traceback()),
                     space.wrap(e.errorstr(space)))
    diff --git a/pypy/module/_rawffi/interp_rawffi.py b/pypy/module/_rawffi/interp_rawffi.py
    --- a/pypy/module/_rawffi/interp_rawffi.py
    +++ b/pypy/module/_rawffi/interp_rawffi.py
    @@ -1,4 +1,3 @@
    -import sys
     from pypy.interpreter.baseobjspace import W_Root
     from pypy.interpreter.error import OperationError, oefmt, wrap_oserror
     from pypy.interpreter.gateway import interp2app, unwrap_spec
    @@ -20,8 +19,6 @@
     from pypy.module._rawffi.buffer import RawFFIBuffer
     from pypy.module._rawffi.tracker import tracker
     
    -BIGENDIAN = sys.byteorder == 'big'
    -
     TYPEMAP = {
         # XXX A mess with unsigned/signed/normal chars :-/
         'c' : ffi_type_uchar,
    @@ -334,14 +331,10 @@
                 if tracker.DO_TRACING:
                     ll_buf = rffi.cast(lltype.Signed, self.ll_buffer)
                     tracker.trace_allocation(ll_buf, self)
    -        self._ll_buffer = self.ll_buffer
     
         def getbuffer(self, space):
             return space.wrap(rffi.cast(lltype.Unsigned, self.ll_buffer))
     
    -    def buffer_advance(self, n):
    -        self.ll_buffer = rffi.ptradd(self.ll_buffer, n)
    -
         def byptr(self, space):
             from pypy.module._rawffi.array import ARRAY_OF_PTRS
             array = ARRAY_OF_PTRS.allocate(space, 1)
    @@ -349,17 +342,16 @@
             return space.wrap(array)
     
         def free(self, space):
    -        if not self._ll_buffer:
    +        if not self.ll_buffer:
                 raise segfault_exception(space, "freeing NULL pointer")
             self._free()
     
         def _free(self):
             if tracker.DO_TRACING:
    -            ll_buf = rffi.cast(lltype.Signed, self._ll_buffer)
    +            ll_buf = rffi.cast(lltype.Signed, self.ll_buffer)
                 tracker.trace_free(ll_buf)
    -        lltype.free(self._ll_buffer, flavor='raw')
    +        lltype.free(self.ll_buffer, flavor='raw')
             self.ll_buffer = lltype.nullptr(rffi.VOIDP.TO)
    -        self._ll_buffer = self.ll_buffer
     
         def buffer_w(self, space, flags):
             return RawFFIBuffer(self)
    @@ -440,19 +432,12 @@
                              space.wrap("cannot directly read value"))
     wrap_value._annspecialcase_ = 'specialize:arg(1)'
     
    -NARROW_INTEGER_TYPES = 'cbhiBIH?'
    -
    -def is_narrow_integer_type(letter):
    -    return letter in NARROW_INTEGER_TYPES
     
     class W_FuncPtr(W_Root):
         def __init__(self, space, ptr, argshapes, resshape):
             self.ptr = ptr
             self.argshapes = argshapes
             self.resshape = resshape
    -        self.narrow_integer = False
    -        if resshape is not None:
    -            self.narrow_integer = is_narrow_integer_type(resshape.itemcode.lower())
     
         def getbuffer(self, space):
             return space.wrap(rffi.cast(lltype.Unsigned, self.ptr.funcsym))
    @@ -512,10 +497,6 @@
                     result = self.resshape.allocate(space, 1, autofree=True)
                     # adjust_return_size() was used here on result.ll_buffer
                     self.ptr.call(args_ll, result.ll_buffer)
    -                if BIGENDIAN and self.narrow_integer:
    -                    # we get a 8 byte value in big endian
    -                    n = rffi.sizeof(lltype.Signed) - result.shape.size
    -                    result.buffer_advance(n)
                     return space.wrap(result)
                 else:
                     self.ptr.call(args_ll, lltype.nullptr(rffi.VOIDP.TO))
    diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py
    --- a/pypy/module/micronumpy/test/test_ndarray.py
    +++ b/pypy/module/micronumpy/test/test_ndarray.py
    @@ -1791,7 +1791,6 @@
     
         def test_scalar_view(self):
             from numpy import array
    -        import sys
             a = array(3, dtype='int32')
             b = a.view(dtype='float32')
             assert b.shape == ()
    @@ -1800,27 +1799,17 @@
             assert exc.value[0] == "new type not compatible with array."
             exc = raises(TypeError, a.view, 'string')
             assert exc.value[0] == "data-type must not be 0-sized"
    -        if sys.byteorder == 'big':
    -            assert a.view('S4') == '\x00\x00\x00\x03'
    -        else:
    -            assert a.view('S4') == '\x03'
    +        assert a.view('S4') == '\x03'
             a = array('abc1', dtype='c')
             assert (a == ['a', 'b', 'c', '1']).all()
             assert a.view('S4') == 'abc1'
             b = a.view([('a', 'i2'), ('b', 'i2')])
             assert b.shape == (1,)
    -        if sys.byteorder == 'big':
    -            assert b[0][0] == 0x6162
    -            assert b[0][1] == 0x6331
    -        else:
    -            assert b[0][0] == 25185
    -            assert b[0][1] == 12643
    +        assert b[0][0] == 25185
    +        assert b[0][1] == 12643
             a = array([(1, 2)], dtype=[('a', 'int64'), ('b', 'int64')])[0]
             assert a.shape == ()
    -        if sys.byteorder == 'big':
    -            assert a.view('S16') == '\x00' * 7 + '\x01' + '\x00' * 7 + '\x02'
    -        else:
    -            assert a.view('S16') == '\x01' + '\x00' * 7 + '\x02'
    +        assert a.view('S16') == '\x01' + '\x00' * 7 + '\x02'
             a = array(2, dtype=' 2 ** 31 - 1:
    -            if sys.byteorder == 'big':
    -                assert (u == [0x0100000000000000]).all()
    -            else:
    -                assert (u == [1]).all()
    +            assert (u == [1]).all()
             else:
    -            if sys.byteorder == 'big':
    -                assert (u == [0x01000000, 0]).all()
    -            else:
    -                assert (u == [1, 0]).all()
    +            assert (u == [1, 0]).all()
             v = fromstring("abcd", dtype="|S2")
             assert v[0] == "ab"
             assert v[1] == "cd"
    @@ -3714,15 +3668,9 @@
             k = fromstring(self.float16val, dtype='float16')
             assert k[0] == dtype('float16').type(5.)
             dt =  array([5], dtype='longfloat').dtype
    -        print(dt.itemsize)
             if dt.itemsize == 8:
    -            import sys
    -            if sys.byteorder == 'big':
    -                m = fromstring('@\x14\x00\x00\x00\x00\x00\x00',
    -                               dtype='float64')
    -            else:
    -                m = fromstring('\x00\x00\x00\x00\x00\x00\x14@',
    -                               dtype='float64')
    +            m = fromstring('\x00\x00\x00\x00\x00\x00\x14@',
    +                           dtype='float64')
             elif dt.itemsize == 12:
                 m = fromstring('\x00\x00\x00\x00\x00\x00\x00\xa0\x01@\x00\x00',
                                dtype='float96')
    @@ -3744,13 +3692,8 @@
     
         def test_tostring(self):
             from numpy import array
    -        import sys
    -        if sys.byteorder == 'big':
    -            assert array([1, 2, 3], 'i2').tostring() == '\x00\x01\x00\x02\x00\x03'
    -            assert array([1, 2, 3], 'i2')[::2].tostring() == '\x00\x01\x00\x03'
    -        else:
    -            assert array([1, 2, 3], 'i2').tostring() == '\x01\x00\x02\x00\x03\x00'
    -            assert array([1, 2, 3], 'i2')[::2].tostring() == '\x01\x00\x03\x00'
    +        assert array([1, 2, 3], 'i2').tostring() == '\x01\x00\x02\x00\x03\x00'
    +        assert array([1, 2, 3], 'i2')[::2].tostring() == '\x01\x00\x03\x00'
             assert array([1, 2, 3], 'i2')[::2].tostring() == '\x00\x01\x00\x03'
             assert array(0, dtype='i2').tostring() == '\x00\x00'
    @@ -4246,11 +4189,7 @@
             v = a.view(('float32', 4))
             assert v.dtype == np.dtype('float32')
             assert v.shape == (10, 4)
    -        import sys
    -        if sys.byteorder == 'big':
    -            assert v[0][-2] == 2.53125
    -        else:
    -            assert v[0][-1] == 2.53125
    +        assert v[0][-1] == 2.53125
             exc = raises(ValueError, "a.view(('float32', 2))")
             assert exc.value[0] == 'new type not compatible with array.'
     
    diff --git a/pypy/module/micronumpy/test/test_scalar.py b/pypy/module/micronumpy/test/test_scalar.py
    --- a/pypy/module/micronumpy/test/test_scalar.py
    +++ b/pypy/module/micronumpy/test/test_scalar.py
    @@ -109,7 +109,6 @@
     
         def test_pickle(self):
             from numpy import dtype, zeros
    -        import sys
             try:
                 from numpy.core.multiarray import scalar
             except ImportError:
    @@ -120,11 +119,9 @@
             f = dtype('float64').type(13.37)
             c = dtype('complex128').type(13 + 37.j)
     
    -        swap = lambda s: (''.join(reversed(s))) if sys.byteorder == 'big' else s
    -        assert i.__reduce__() == (scalar, (dtype('int32'), swap('9\x05\x00\x00')))
    -        assert f.__reduce__() == (scalar, (dtype('float64'), swap('=\n\xd7\xa3p\xbd*@')))
    -        assert c.__reduce__() == (scalar, (dtype('complex128'), swap('\x00\x00\x00\x00\x00\x00*@') + \
    -                                                                swap('\x00\x00\x00\x00\x00\x80B@')))
    +        assert i.__reduce__() == (scalar, (dtype('int32'), '9\x05\x00\x00'))
    +        assert f.__reduce__() == (scalar, (dtype('float64'), '=\n\xd7\xa3p\xbd*@'))
    +        assert c.__reduce__() == (scalar, (dtype('complex128'), '\x00\x00\x00\x00\x00\x00*@\x00\x00\x00\x00\x00\x80B@'))
     
             assert loads(dumps(i)) == i
             assert loads(dumps(f)) == f
    @@ -259,20 +256,13 @@
             assert t < 7e-323
             t = s.view('complex64')
             assert type(t) is np.complex64
    -        if sys.byteorder == 'big':
    -            assert 0 < t.imag < 1
    -            assert t.real == 0
    -        else:
    -            assert 0 < t.real < 1
    -            assert t.imag == 0
    +        assert 0 < t.real < 1
    +        assert t.imag == 0
             exc = raises(TypeError, s.view, 'string')
             assert exc.value[0] == "data-type must not be 0-sized"
             t = s.view('S8')
             assert type(t) is np.string_
    -        if sys.byteorder == 'big':
    -            assert t == '\x00' * 7 + '\x0c'
    -        else:
    -            assert t == '\x0c'
    +        assert t == '\x0c'
             s = np.dtype('string').type('abc1')
             assert s.view('S4') == 'abc1'
             if '__pypy__' in sys.builtin_module_names:
    diff --git a/pypy/module/micronumpy/test/test_selection.py b/pypy/module/micronumpy/test/test_selection.py
    --- a/pypy/module/micronumpy/test/test_selection.py
    +++ b/pypy/module/micronumpy/test/test_selection.py
    @@ -327,15 +327,10 @@
     # tests from numpy/core/tests/test_regression.py
         def test_sort_bigendian(self):
             from numpy import array, dtype
    -        import sys
    -
    -        # little endian sorting for big endian machine
    -        # is not yet supported! IMPL ME
    -        if sys.byteorder == 'little':
    -            a = array(range(11), dtype='float64')
    -            c = a.astype(dtype(''
             D.__module__ = 'mod'
             mod = new.module('mod')
             mod.D = D
    @@ -511,7 +510,7 @@
                 tp9
                 Rp10
                 (I3
    -            S'{E}'
    +            S'<'
                 p11
                 NNNI-1
                 I-1
    @@ -521,7 +520,7 @@
                 S'\x00\x00\x00\x00\x00\x00\xf0?\x00\x00\x00\x00\x00\x00\x00@'
                 p13
                 tp14
    -            b.'''.replace('            ','').format(E=E)
    +            b.'''.replace('            ','')
             for ss,sn in zip(s.split('\n')[1:],s_from_numpy.split('\n')[1:]):
                 if len(ss)>10:
                     # ignore binary data, it will be checked later
    diff --git a/rpython/rlib/clibffi.py b/rpython/rlib/clibffi.py
    --- a/rpython/rlib/clibffi.py
    +++ b/rpython/rlib/clibffi.py
    @@ -594,12 +594,9 @@
                                                 intmask(argtypes[i].c_size),
                                                 flavor='raw')
             if restype != ffi_type_void:
    -            self.restype_size = intmask(restype.c_size)
    -            size = adjust_return_size(self.restype_size)
    +            size = adjust_return_size(intmask(restype.c_size))
                 self.ll_result = lltype.malloc(rffi.VOIDP.TO, size,
                                                flavor='raw')
    -        else:
    -            self.restype_size = -1
     
         def push_arg(self, value):
             #if self.pushed_args == self.argnum:
    @@ -636,12 +633,7 @@
                                 rffi.cast(VOIDPP, self.ll_args))
             if RES_TP is not lltype.Void:
                 TP = lltype.Ptr(rffi.CArray(RES_TP))
    -            ptr = self.ll_result
    -            if _BIG_ENDIAN and RES_TP in TYPE_MAP_INT:
    -                # we get a 8 byte value in big endian
    -                n = rffi.sizeof(lltype.Signed) - self.restype_size
    -                ptr = rffi.ptradd(ptr, n)
    -            res = rffi.cast(TP, ptr)[0]
    +            res = rffi.cast(TP, self.ll_result)[0]
             else:
                 res = None
             self._clean_args()
    diff --git a/rpython/rlib/libffi.py b/rpython/rlib/libffi.py
    --- a/rpython/rlib/libffi.py
    +++ b/rpython/rlib/libffi.py
    @@ -4,7 +4,6 @@
     from __future__ import with_statement
     
     from rpython.rtyper.lltypesystem import rffi, lltype
    -from rpython.rlib.unroll import unrolling_iterable
     from rpython.rlib.objectmodel import specialize, enforceargs
     from rpython.rlib.rarithmetic import intmask, r_uint, r_singlefloat, r_longlong
     from rpython.rlib import jit
    @@ -16,9 +15,6 @@
     from rpython.rlib.rdynload import DLLHANDLE
     
     import os
    -import sys
    -
    -_BIG_ENDIAN = sys.byteorder == 'big'
     
     class types(object):
         """
    @@ -215,8 +211,6 @@
     
     # ======================================================================
     
    -NARROW_INTEGER_TYPES = unrolling_iterable([rffi.CHAR,
    -    rffi.UCHAR, rffi.SHORT, rffi.USHORT, rffi.INT, rffi.UINT])
     
     class Func(AbstractFuncPtr):
     
    @@ -269,12 +263,7 @@
                 res = self._do_call_raw(self.funcsym, ll_args)
             elif _fits_into_signed(RESULT):
                 assert not types.is_struct(self.restype)
    -            for res in NARROW_INTEGER_TYPES:
    -                if RESULT is res:
    -                    res = self._do_call_int(self.funcsym, ll_args, rffi.CHAR)
    -                    break
    -            else:
    -                res = self._do_call_int(self.funcsym, ll_args, rffi.SIGNED)
    +            res = self._do_call_int(self.funcsym, ll_args)
             elif RESULT is rffi.DOUBLE:
                 return self._do_call_float(self.funcsym, ll_args)
             elif RESULT is rffi.FLOAT:
    @@ -336,9 +325,8 @@
     
         #@jit.oopspec('libffi_call_int(self, funcsym, ll_args)')
         @jit.dont_look_inside
    -    @specialize.arg(3)
    -    def _do_call_int(self, funcsym, ll_args, TP):
    -        return self._do_call(funcsym, ll_args, TP)
    +    def _do_call_int(self, funcsym, ll_args):
    +        return self._do_call(funcsym, ll_args, rffi.SIGNED)
     
         #@jit.oopspec('libffi_call_float(self, funcsym, ll_args)')
         @jit.dont_look_inside
    @@ -380,10 +368,10 @@
         @specialize.arg(3)
         def _do_call(self, funcsym, ll_args, RESULT):
             # XXX: check len(args)?
    -        ll_result = lltype.nullptr(rffi.VOIDP.TO)
    +        ll_result = lltype.nullptr(rffi.CCHARP.TO)
             if self.restype != types.void:
                 size = adjust_return_size(intmask(self.restype.c_size))
    -            ll_result = lltype.malloc(rffi.VOIDP.TO, size,
    +            ll_result = lltype.malloc(rffi.CCHARP.TO, size,
                                           flavor='raw')
             ffires = c_ffi_call(self.ll_cif,
                                 self.funcsym,
    @@ -391,20 +379,14 @@
                                 rffi.cast(rffi.VOIDPP, ll_args))
             if RESULT is not lltype.Void:
                 TP = lltype.Ptr(rffi.CArray(RESULT))
    +            buf = rffi.cast(TP, ll_result)
                 if types.is_struct(self.restype):
                     assert RESULT == rffi.SIGNED
                     # for structs, we directly return the buffer and transfer the
                     # ownership
    -                buf = rffi.cast(TP, ll_result)
                     res = rffi.cast(RESULT, buf)
                 else:
    -                if _BIG_ENDIAN and types.getkind(self.restype) in ('i','u'):
    -                    ptr = ll_result
    -                    n = rffi.sizeof(lltype.Signed) - self.restype.c_size
    -                    ptr = rffi.ptradd(ptr, n)
    -                    res = rffi.cast(TP, ptr)[0]
    -                else:
    -                    res = rffi.cast(TP, ll_result)[0]
    +                res = buf[0]
             else:
                 res = None
             self._free_buffers(ll_result, ll_args)
    diff --git a/rpython/rlib/rstruct/test/test_runpack.py b/rpython/rlib/rstruct/test/test_runpack.py
    --- a/rpython/rlib/rstruct/test/test_runpack.py
    +++ b/rpython/rlib/rstruct/test/test_runpack.py
    @@ -6,13 +6,11 @@
     
     class TestRStruct(BaseRtypingTest):
         def test_unpack(self):
    -        import sys
             pad = '\x00' * (LONG_BIT//8-1)    # 3 or 7 null bytes
             def fn():
                 return runpack('sll', 'a'+pad+'\x03'+pad+'\x04'+pad)[1]
    -        result = 3 if sys.byteorder == 'little' else 3 << (LONG_BIT-8)
    -        assert fn() == result
    -        assert self.interpret(fn, []) == result
    +        assert fn() == 3
    +        assert self.interpret(fn, []) == 3
     
         def test_unpack_2(self):
             data = struct.pack('iiii', 0, 1, 2, 4)
    diff --git a/rpython/rlib/test/test_clibffi.py b/rpython/rlib/test/test_clibffi.py
    --- a/rpython/rlib/test/test_clibffi.py
    +++ b/rpython/rlib/test/test_clibffi.py
    @@ -181,12 +181,11 @@
                 p_a2 = rffi.cast(rffi.VOIDPP, ll_args[1])[0]
                 a1 = rffi.cast(rffi.INTP, p_a1)[0]
                 a2 = rffi.cast(rffi.INTP, p_a2)[0]
    -            res = rffi.cast(rffi.SIGNEDP, ll_res)
    -            # must store a full ffi arg!
    +            res = rffi.cast(rffi.INTP, ll_res)
                 if a1 > a2:
    -                res[0] = 1
    +                res[0] = rffi.cast(rffi.INT, 1)
                 else:
    -                res[0] = -1
    +                res[0] = rffi.cast(rffi.INT, -1)
     
             ptr = CallbackFuncPtr([ffi_type_pointer, ffi_type_pointer],
                                   ffi_type_sint, callback)
    diff --git a/rpython/rlib/test/test_libffi.py b/rpython/rlib/test/test_libffi.py
    --- a/rpython/rlib/test/test_libffi.py
    +++ b/rpython/rlib/test/test_libffi.py
    @@ -274,7 +274,7 @@
             """
             libfoo = self.get_libfoo()
             func = (libfoo, 'diff_xy', [types.sint, types.signed], types.sint)
    -        res = self.call(func, [50, 8], rffi.INT)
    +        res = self.call(func, [50, 8], lltype.Signed)
             assert res == 42
     
         def test_simple(self):
    @@ -287,7 +287,7 @@
             """
             libfoo = self.get_libfoo()
             func = (libfoo, 'sum_xy', [types.sint, types.double], types.sint)
    -        res = self.call(func, [38, 4.2], rffi.INT, jitif=["floats"])
    +        res = self.call(func, [38, 4.2], lltype.Signed, jitif=["floats"])
             assert res == 42
     
         def test_float_result(self):
    @@ -319,7 +319,7 @@
             """
             libfoo = self.get_libfoo()
             func = (libfoo, 'many_args', [types.uchar, types.sint], types.sint)
    -        res = self.call(func, [chr(20), 22], rffi.INT)
    +        res = self.call(func, [chr(20), 22], rffi.SIGNED)
             assert res == 42
     
         def test_char_args(self):
    @@ -418,12 +418,12 @@
             set_dummy = (libfoo, 'set_dummy', [types.sint], types.void)
             get_dummy = (libfoo, 'get_dummy', [], types.sint)
             #
    -        initval = self.call(get_dummy, [], rffi.INT)
    +        initval = self.call(get_dummy, [], rffi.SIGNED)
             #
             res = self.call(set_dummy, [initval+1], lltype.Void)
             assert res is None
             #
    -        res = self.call(get_dummy, [], rffi.INT)
    +        res = self.call(get_dummy, [], rffi.SIGNED)
             assert res == initval+1
     
         def test_single_float_args(self):
    
    From pypy.commits at gmail.com  Sun Mar 27 22:35:28 2016
    From: pypy.commits at gmail.com (arigo)
    Date: Sun, 27 Mar 2016 19:35:28 -0700 (PDT)
    Subject: [pypy-commit] pypy.org extradoc: Update the release on ppc64
    Message-ID: <56f89870.c856c20a.787a3.177f@mx.google.com>
    
    Author: Armin Rigo 
    Branch: extradoc
    Changeset: r730:98f225afe423
    Date: 2016-03-28 04:35 +0200
    http://bitbucket.org/pypy/pypy.org/changeset/98f225afe423/
    
    Log:	Update the release on ppc64
    
    diff --git a/download.html b/download.html
    --- a/download.html
    +++ b/download.html
    @@ -125,7 +125,7 @@
     
  • FreeBSD 9.2 x86 64 bit (hopefully availabe soon) (see [1] below)
  • Windows binary (32bit) (you might need the VS 2008 runtime library installer vcredist_x86.exe.)
  • -
  • PowerPC PPC64 Linux binary (64bit big-endian, Fedora 20) (see [1] below)
  • +
  • PowerPC PPC64 Linux binary (64bit big-endian, Fedora 20) (see [1] below)
  • PowerPC PPC64le Linux binary (64bit little-endian, Fedora 21) (see [1] below)
  • Source (tar.bz2); Source (zip). See below for more about the sources.
  • All our downloads, including previous versions. We also have a @@ -431,7 +431,7 @@ 1573c9284d3ec236c8e6ef3b954753932dff29462c54b5885b761d1ee68b6e05 pypy-5.0.1-src.tar.bz2 6e343f24c5e4ea87879bc4fd299b65a2825796286319edc0b69b3681017c145f pypy-5.0.1-src.zip c12254d8b1747322736d26e014744a426c6900d232c1799140fbb43f44319730 pypy-5.0.1-win32.zip -88979979641c872ffb358ae94b1caf8e1b1bae1e382755e75da354c69283a65e pypy-5.0.1+-ppc64.tar.bz2 +3373b1d51fc610b962e0b535087073f2cc921ab0269ba2896b140ab4a56588fd pypy-5.0.1++-ppc64.tar.bz2 53d742504a78366b833c04bd83740336aa4ddfecffeff6b2fa8728fcd6b4c8af pypy-5.0.1+-ppc64le.tar.bz2
  • pypy3-2.4.0 sha1:

    diff --git a/source/download.txt b/source/download.txt --- a/source/download.txt +++ b/source/download.txt @@ -98,7 +98,7 @@ .. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.0.1-linux-armel.tar.bz2 .. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.0.1-osx64.tar.bz2 .. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.0.1-win32.zip -.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.0.1+-ppc64.tar.bz2 +.. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.0.1++-ppc64.tar.bz2 .. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.0.1+-ppc64le.tar.bz2 .. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.0.1-src.tar.bz2 .. __: https://bitbucket.org/pypy/pypy/downloads/pypy-5.0.1-src.zip @@ -469,7 +469,7 @@ 1573c9284d3ec236c8e6ef3b954753932dff29462c54b5885b761d1ee68b6e05 pypy-5.0.1-src.tar.bz2 6e343f24c5e4ea87879bc4fd299b65a2825796286319edc0b69b3681017c145f pypy-5.0.1-src.zip c12254d8b1747322736d26e014744a426c6900d232c1799140fbb43f44319730 pypy-5.0.1-win32.zip - 88979979641c872ffb358ae94b1caf8e1b1bae1e382755e75da354c69283a65e pypy-5.0.1+-ppc64.tar.bz2 + 3373b1d51fc610b962e0b535087073f2cc921ab0269ba2896b140ab4a56588fd pypy-5.0.1++-ppc64.tar.bz2 53d742504a78366b833c04bd83740336aa4ddfecffeff6b2fa8728fcd6b4c8af pypy-5.0.1+-ppc64le.tar.bz2 From pypy.commits at gmail.com Mon Mar 28 09:20:05 2016 From: pypy.commits at gmail.com (arigo) Date: Mon, 28 Mar 2016 06:20:05 -0700 (PDT) Subject: [pypy-commit] pypy default: Fix broken parsing logic Message-ID: <56f92f85.03321c0a.e5066.ffffb997@mx.google.com> Author: Armin Rigo Branch: Changeset: r83411:f5820433cbf9 Date: 2016-03-28 15:19 +0200 http://bitbucket.org/pypy/pypy/changeset/f5820433cbf9/ Log: Fix broken parsing logic diff --git a/rpython/jit/backend/arm/detect.py b/rpython/jit/backend/arm/detect.py --- a/rpython/jit/backend/arm/detect.py +++ b/rpython/jit/backend/arm/detect.py @@ -78,28 +78,25 @@ finally: os.close(fd) + # decode chunks of 8 bytes (a_type, a_val), and + # return the a_val whose a_type corresponds to type_, + # or zero if not found. i = 0 while i <= buf_size - struct_size: - if buf[i] == '\x00': - i += 1 - continue - # We only support little-endian ARM a_type = (ord(buf[i]) | (ord(buf[i+1]) << 8) | (ord(buf[i+2]) << 16) | (ord(buf[i+3]) << 24)) - - if a_type != type_: - i += struct_size - a_val = (ord(buf[i+4]) | (ord(buf[i+5]) << 8) | (ord(buf[i+6]) << 16) | (ord(buf[i+7]) << 24)) - return a_val + i += struct_size + if a_type == type_: + return a_val - raise KeyError('failed to find auxval type: %d' % type_) + return 0 def detect_neon(): From pypy.commits at gmail.com Mon Mar 28 12:15:07 2016 From: pypy.commits at gmail.com (mattip) Date: Mon, 28 Mar 2016 09:15:07 -0700 (PDT) Subject: [pypy-commit] pypy win32-lib-name: fix for case where cwd is not pypy/goal Message-ID: <56f9588b.d4b61c0a.1fb29.fffff471@mx.google.com> Author: mattip Branch: win32-lib-name Changeset: r83412:a1bfc9e7f00c Date: 2016-03-28 19:14 +0300 http://bitbucket.org/pypy/pypy/changeset/a1bfc9e7f00c/ Log: fix for case where cwd is not pypy/goal diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -240,8 +240,9 @@ "when --shared is on (it is by default). " "See issue #1971.") if sys.platform == 'win32': - config.translation.libname = '..\\..\\libs\\python27.lib' - thisdir.join('..', '..', 'libs').ensure(dir=1) + libdir = thisdir.join('..', '..', 'libs') + libdir.ensure(dir=1) + config.translation.libname = str(libdir.join('python27.lib')) if config.translation.thread: config.objspace.usemodules.thread = True diff --git a/rpython/translator/driver.py b/rpython/translator/driver.py --- a/rpython/translator/driver.py +++ b/rpython/translator/driver.py @@ -490,10 +490,12 @@ # for pypy, the import library is renamed and moved to # libs/python27.lib, according to the pragma in pyconfig.h libname = self.config.translation.libname - libname = libname or soname.new(ext='lib').basename - libname = str(newsoname.dirpath().join(libname)) - shutil.copyfile(str(soname.new(ext='lib')), libname) - self.log.info("copied: %s" % (libname,)) + oldlibname = soname.new(ext='lib') + if not libname: + libname = oldlibname.basename + libname = str(newsoname.dirpath().join(libname)) + shutil.copyfile(str(oldlibname), libname) + self.log.info("copied: %s to %s" % (oldlibname, libname,)) # the pdb file goes in the same place as pypy(w).exe ext_to_copy = ['pdb',] for ext in ext_to_copy: From pypy.commits at gmail.com Tue Mar 29 02:59:41 2016 From: pypy.commits at gmail.com (fijal) Date: Mon, 28 Mar 2016 23:59:41 -0700 (PDT) Subject: [pypy-commit] pypy faster-traceback: fixes Message-ID: <56fa27dd.d4b61c0a.1fb29.ffffd0c2@mx.google.com> Author: fijal Branch: faster-traceback Changeset: r83413:02b7658fdc94 Date: 2016-03-29 08:58 +0200 http://bitbucket.org/pypy/pypy/changeset/02b7658fdc94/ Log: fixes diff --git a/rpython/jit/metainterp/opencoder.py b/rpython/jit/metainterp/opencoder.py --- a/rpython/jit/metainterp/opencoder.py +++ b/rpython/jit/metainterp/opencoder.py @@ -354,7 +354,7 @@ # don't intern float constants self._consts_float += 1 v = (len(self._floats) << 1) | 1 - self._floats.append(box.getfloat()) + self._floats.append(box.getfloatstorage()) return tag(TAGCONSTOTHER, v) else: self._consts_ptr += 1 diff --git a/rpython/rlib/rvmprof/cintf.py b/rpython/rlib/rvmprof/cintf.py --- a/rpython/rlib/rvmprof/cintf.py +++ b/rpython/rlib/rvmprof/cintf.py @@ -56,7 +56,7 @@ compilation_info=eci, _nowrapper=True) vmprof_get_stack_trace_default = rffi.llexternal( - "vmprof_get_stack_trace_default", + "get_stack_trace_default", [rffi.CArrayPtr(lltype.Signed), rffi.INT], rffi.INT, compilation_info=eci, releasegil=False) return CInterface(locals()) diff --git a/rpython/rlib/rvmprof/src/vmprof_common.h b/rpython/rlib/rvmprof/src/vmprof_common.h --- a/rpython/rlib/rvmprof/src/vmprof_common.h +++ b/rpython/rlib/rvmprof/src/vmprof_common.h @@ -120,7 +120,7 @@ } #endif -static int get_stack_trace_default(intptr_t *result, int max_depth) +int get_stack_trace_default(intptr_t *result, int max_depth) { return get_stack_trace(get_vmprof_stack(), result, max_depth, 0); } From pypy.commits at gmail.com Tue Mar 29 04:51:16 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 29 Mar 2016 01:51:16 -0700 (PDT) Subject: [pypy-commit] pypy default: Test and fix: we could get in obscure cases an AssertionError that Message-ID: <56fa4204.85b01c0a.ed107.02fe@mx.google.com> Author: Armin Rigo Branch: Changeset: r83414:bd677c5dd9b8 Date: 2016-03-29 10:50 +0200 http://bitbucket.org/pypy/pypy/changeset/bd677c5dd9b8/ Log: Test and fix: we could get in obscure cases an AssertionError that should really be just an InvalidLoop diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py --- a/rpython/jit/metainterp/optimizeopt/optimizer.py +++ b/rpython/jit/metainterp/optimizeopt/optimizer.py @@ -7,6 +7,7 @@ from rpython.jit.metainterp.resoperation import rop, AbstractResOp, GuardResOp,\ OpHelpers, ResOperation from rpython.jit.metainterp.optimizeopt import info +from rpython.jit.metainterp.optimize import InvalidLoop from rpython.jit.metainterp.typesystem import llhelper from rpython.rlib.objectmodel import specialize, we_are_translated from rpython.rlib.debug import debug_print @@ -411,11 +412,14 @@ def make_constant(self, box, constbox): assert isinstance(constbox, Const) box = self.get_box_replacement(box) - if not we_are_translated(): # safety-check - if (box.get_forwarded() is not None and - isinstance(constbox, ConstInt) and - not isinstance(box.get_forwarded(), info.AbstractRawPtrInfo)): - assert box.get_forwarded().contains(constbox.getint()) + # safety-check: if the constant is outside the bounds for the + # box, then it is an invalid loop + if (box.get_forwarded() is not None and + isinstance(constbox, ConstInt) and + not isinstance(box.get_forwarded(), info.AbstractRawPtrInfo)): + if not box.get_forwarded().contains(constbox.getint()): + raise InvalidLoop("a box is turned into constant that is " + "outside the range allowed for that box") if box.is_constant(): return if box.type == 'r' and box.get_forwarded() is not None: diff --git a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py --- a/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py +++ b/rpython/jit/metainterp/optimizeopt/test/test_optimizeopt.py @@ -3072,6 +3072,16 @@ """ self.raises(InvalidLoop, self.optimize_loop, ops, ops) + def test_invalid_guard_value_after_bounds(self): + ops = """ + [i0] + i1 = int_gt(i0, 5) + guard_true(i1) [] + guard_value(i0, 2) [] + jump() + """ + self.raises(InvalidLoop, self.optimize_loop, ops, ops) + def test_guard_class_oois(self): ops = """ [p1] From pypy.commits at gmail.com Tue Mar 29 08:14:39 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 29 Mar 2016 05:14:39 -0700 (PDT) Subject: [pypy-commit] pypy default: Fix warning Message-ID: <56fa71af.a3abc20a.9c261.ffffa14a@mx.google.com> Author: Armin Rigo Branch: Changeset: r83415:3fc855a83e91 Date: 2016-03-29 14:14 +0200 http://bitbucket.org/pypy/pypy/changeset/3fc855a83e91/ Log: Fix warning diff --git a/rpython/rlib/rvmprof/src/vmprof_main.h b/rpython/rlib/rvmprof/src/vmprof_main.h --- a/rpython/rlib/rvmprof/src/vmprof_main.h +++ b/rpython/rlib/rvmprof/src/vmprof_main.h @@ -290,7 +290,7 @@ static int close_profile(void) { - unsigned char marker = MARKER_TRAILER; + char marker = MARKER_TRAILER; if (_write_all(&marker, 1) < 0) return -1; From pypy.commits at gmail.com Tue Mar 29 08:16:37 2016 From: pypy.commits at gmail.com (fijal) Date: Tue, 29 Mar 2016 05:16:37 -0700 (PDT) Subject: [pypy-commit] pypy faster-traceback: remove outermost enter/leave frame, see what happens Message-ID: <56fa7225.47afc20a.b9240.2398@mx.google.com> Author: fijal Branch: faster-traceback Changeset: r83416:88009e2e49fb Date: 2016-03-29 14:15 +0200 http://bitbucket.org/pypy/pypy/changeset/88009e2e49fb/ Log: remove outermost enter/leave frame, see what happens diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py --- a/rpython/jit/metainterp/pyjitpl.py +++ b/rpython/jit/metainterp/pyjitpl.py @@ -1945,16 +1945,17 @@ jitcode.jitdriver_sd.jitdriver.is_recursive) #return self.jitdriver_sd is not None and jitcode is self.jitdriver_sd.mainjitcode - def newframe(self, jitcode, greenkey=None): + def newframe(self, jitcode, greenkey=None, enter_portal_frame=True): if jitcode.jitdriver_sd: self.portal_call_depth += 1 self.call_ids.append(self.current_call_id) - unique_id = -1 - if greenkey is not None: - unique_id = jitcode.jitdriver_sd.warmstate.get_unique_id( - greenkey) - jd_no = jitcode.jitdriver_sd.index - self.enter_portal_frame(jd_no, unique_id) + if enter_portal_frame: + unique_id = -1 + if greenkey is not None: + unique_id = jitcode.jitdriver_sd.warmstate.get_unique_id( + greenkey) + jd_no = jitcode.jitdriver_sd.index + self.enter_portal_frame(jd_no, unique_id) self.current_call_id += 1 if greenkey is not None and self.is_main_jitcode(jitcode): self.portal_trace_positions.append( @@ -1995,6 +1996,8 @@ def finishframe(self, resultbox, leave_portal_frame=True): # handle a non-exceptional return from the current frame self.last_exc_value = lltype.nullptr(rclass.OBJECT) + if leave_portal_frame and len(self.framestack) == 1: + leave_portal_frame = False # don't emit for the last one self.popframe(leave_portal_frame=leave_portal_frame) if self.framestack: if resultbox is not None: @@ -2033,7 +2036,8 @@ target = ord(code[position+1]) | (ord(code[position+2])<<8) frame.pc = target raise ChangeFrame - self.popframe() + # emit leave_portal_frame for all but last + self.popframe(leave_portal_frame=bool(self.framestack)) try: self.compile_exit_frame_with_exception(self.last_exc_box) except SwitchToBlackhole, stb: @@ -2714,7 +2718,8 @@ # ----- make a new frame ----- self.portal_call_depth = -1 # always one portal around self.framestack = [] - f = self.newframe(self.jitdriver_sd.mainjitcode) + f = self.newframe(self.jitdriver_sd.mainjitcode, + enter_portal_frame=False) f.setup_call(original_boxes) assert self.portal_call_depth == 0 self.virtualref_boxes = [] From pypy.commits at gmail.com Tue Mar 29 08:18:11 2016 From: pypy.commits at gmail.com (fijal) Date: Tue, 29 Mar 2016 05:18:11 -0700 (PDT) Subject: [pypy-commit] pypy default: fix the test Message-ID: <56fa7283.838d1c0a.7e73a.7d07@mx.google.com> Author: fijal Branch: Changeset: r83417:7abe7ba251e5 Date: 2016-03-29 14:16 +0200 http://bitbucket.org/pypy/pypy/changeset/7abe7ba251e5/ Log: fix the test diff --git a/rpython/jit/metainterp/test/test_jitiface.py b/rpython/jit/metainterp/test/test_jitiface.py --- a/rpython/jit/metainterp/test/test_jitiface.py +++ b/rpython/jit/metainterp/test/test_jitiface.py @@ -18,12 +18,12 @@ reasons = [] class MyJitIface(JitHookInterface): - def on_abort(self, reason, jitdriver, greenkey, greenkey_repr, logops, trace): + def on_abort(self, reason, jitdriver, greenkey, greenkey_repr, logops, ops): assert jitdriver is myjitdriver assert len(greenkey) == 1 reasons.append(reason) assert greenkey_repr == 'blah' - assert trace.length() > 1 + assert len(ops) > 1 iface = MyJitIface() From pypy.commits at gmail.com Tue Mar 29 09:49:47 2016 From: pypy.commits at gmail.com (cfbolz) Date: Tue, 29 Mar 2016 06:49:47 -0700 (PDT) Subject: [pypy-commit] pypy guard-compatible: promote the result of elidable_compatible if the other arguments are constant Message-ID: <56fa87fb.6bb8c20a.3a155.ffffd28a@mx.google.com> Author: Carl Friedrich Bolz Branch: guard-compatible Changeset: r83418:a208c212e736 Date: 2016-03-29 13:11 +0200 http://bitbucket.org/pypy/pypy/changeset/a208c212e736/ Log: promote the result of elidable_compatible if the other arguments are constant diff --git a/rpython/rlib/jit.py b/rpython/rlib/jit.py --- a/rpython/rlib/jit.py +++ b/rpython/rlib/jit.py @@ -156,16 +156,28 @@ """ def decorate(func): elidable(func) + def _all_args_const(*args): + if len(args) == 0: + return True + if len(args) == 1: + return isconstant(args[0]) + return isconstant(args[0]) and _all_args_const(*args[1:]) def wrapped_func(x, *args): assert x is not None x = hint(x, promote_compatible=True) if quasi_immut_field_name_for_second_arg is not None: - return func(x, getattr(x, quasi_immut_field_name_for_second_arg), *args) - return func(x, *args) + result = func(x, getattr(x, quasi_immut_field_name_for_second_arg), *args) + else: + result = func(x, *args) + if _all_args_const(*args): + promote(result) # make the tracer treat it as a constant + return result + wrapped_func.func_name = "elidable_compatible_%s" % (func.func_name, ) return wrapped_func return decorate + def dont_look_inside(func): """ Make sure the JIT does not trace inside decorated function (it becomes a call instead) diff --git a/rpython/rlib/test/test_jit.py b/rpython/rlib/test/test_jit.py --- a/rpython/rlib/test/test_jit.py +++ b/rpython/rlib/test/test_jit.py @@ -143,7 +143,7 @@ res = self.interpret(f, [2]) assert res == 5 - def test_elidable_promote(self): + def test_elidable_compatible(self): class A(object): pass a1 = A() @@ -153,16 +153,21 @@ @elidable_compatible() def g(a): return a.x + @elidable_compatible() + def h(a, b, c): + return a.x + b + c def f(x): if x == 1: a = a1 else: a = a2 - return g(a) + return g(a) + h(a, 2, 0) + assert f(1) == 4 + assert f(4) == 6 res = self.interpret(f, [1]) - assert res == 1 + assert res == 4 res = self.interpret(f, [4]) - assert res == 2 + assert res == 6 def test_elidable_promote_args(self): @elidable_promote(promote_args='0') From pypy.commits at gmail.com Tue Mar 29 09:49:49 2016 From: pypy.commits at gmail.com (cfbolz) Date: Tue, 29 Mar 2016 06:49:49 -0700 (PDT) Subject: [pypy-commit] pypy guard-compatible: fix translation Message-ID: <56fa87fd.41d91c0a.353e8.ffff9523@mx.google.com> Author: Carl Friedrich Bolz Branch: guard-compatible Changeset: r83419:7bb835521a2e Date: 2016-03-29 12:18 +0100 http://bitbucket.org/pypy/pypy/changeset/7bb835521a2e/ Log: fix translation diff --git a/rpython/jit/metainterp/compatible.py b/rpython/jit/metainterp/compatible.py --- a/rpython/jit/metainterp/compatible.py +++ b/rpython/jit/metainterp/compatible.py @@ -36,7 +36,7 @@ if oldcond.same_cond(cond, res): return cond.activate(res, optimizer) - if self.conditions and self.conditions.debug_mp_str == cond.debug_mp_str: + if self.conditions and self.conditions[-1].debug_mp_str == cond.debug_mp_str: cond.debug_mp_str = '' self.conditions.append(cond) From pypy.commits at gmail.com Tue Mar 29 11:32:47 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 29 Mar 2016 08:32:47 -0700 (PDT) Subject: [pypy-commit] pypy default: For a "func-in-small-set" attribute, initialize the class vtable to Message-ID: <56faa01f.6774c20a.1ffa0.fffff5b5@mx.google.com> Author: Armin Rigo Branch: Changeset: r83421:1f344b91d8ab Date: 2016-03-29 17:26 +0200 http://bitbucket.org/pypy/pypy/changeset/1f344b91d8ab/ Log: For a "func-in-small-set" attribute, initialize the class vtable to '\xff' if there is no corresponding function, instead of the default of 0 which will map to a random function. diff --git a/rpython/rtyper/rclass.py b/rpython/rtyper/rclass.py --- a/rpython/rtyper/rclass.py +++ b/rpython/rtyper/rclass.py @@ -310,10 +310,15 @@ # setup class attributes: for each attribute name at the level # of 'r_parentcls', look up its value in the class def assign(mangled_name, value): - if (isinstance(value, Constant) and - isinstance(value.value, staticmethod)): - value = Constant(value.value.__get__(42)) # staticmethod => bare function - llvalue = r.convert_desc_or_const(value) + if value is None: + llvalue = r.special_uninitialized_value() + if llvalue is None: + return + else: + if (isinstance(value, Constant) and + isinstance(value.value, staticmethod)): + value = Constant(value.value.__get__(42)) # staticmethod => bare function + llvalue = r.convert_desc_or_const(value) setattr(vtable, mangled_name, llvalue) for fldname in r_parentcls.clsfields: @@ -321,8 +326,7 @@ if r.lowleveltype is Void: continue value = self.classdef.classdesc.read_attribute(fldname, None) - if value is not None: - assign(mangled_name, value) + assign(mangled_name, value) # extra PBC attributes for (access_set, attr), (mangled_name, r) in r_parentcls.pbcfields.items(): if self.classdef.classdesc not in access_set.descs: @@ -330,8 +334,7 @@ if r.lowleveltype is Void: continue attrvalue = self.classdef.classdesc.read_attribute(attr, None) - if attrvalue is not None: - assign(mangled_name, attrvalue) + assign(mangled_name, attrvalue) def fill_vtable_root(self, vtable): """Initialize the head of the vtable.""" diff --git a/rpython/rtyper/rmodel.py b/rpython/rtyper/rmodel.py --- a/rpython/rtyper/rmodel.py +++ b/rpython/rtyper/rmodel.py @@ -125,6 +125,9 @@ self, value)) return value + def special_uninitialized_value(self): + return None + def get_ll_eq_function(self): """Return an eq(x,y) function to use to compare two low-level values of this Repr. diff --git a/rpython/rtyper/rpbc.py b/rpython/rtyper/rpbc.py --- a/rpython/rtyper/rpbc.py +++ b/rpython/rtyper/rpbc.py @@ -431,10 +431,14 @@ if isinstance(value, types.MethodType) and value.im_self is None: value = value.im_func # unbound method -> bare function if value is None: + assert self.descriptions[0] is None return chr(0) funcdesc = self.rtyper.annotator.bookkeeper.getdesc(value) return self.convert_desc(funcdesc) + def special_uninitialized_value(self): + return chr(0xFF) + def dispatcher(self, shape, index, argtypes, resulttype): key = shape, index, tuple(argtypes), resulttype if key in self._dispatch_cache: diff --git a/rpython/rtyper/test/test_rpbc.py b/rpython/rtyper/test/test_rpbc.py --- a/rpython/rtyper/test/test_rpbc.py +++ b/rpython/rtyper/test/test_rpbc.py @@ -1947,6 +1947,30 @@ kwds['config'] = self.config return TestRPBC.interpret(fn, args, **kwds) + def test_class_missing_base_method_should_crash(self): + class Base(object): + pass # no method 'm' here + class A(Base): + def m(self): + return 42 + class B(Base): + def m(self): + return 63 + def g(n): + if n == 1: + return A() + elif n == 2: + return B() + else: + return Base() + def f(n): + return g(n).m() + + assert self.interpret(f, [1]) == 42 + assert self.interpret(f, [2]) == 63 + e = py.test.raises(ValueError, self.interpret, f, [3]) + assert str(e.value).startswith(r"exit case '\xff' not found") + def test_smallfuncsets_basic(): from rpython.translator.translator import TranslationContext, graphof from rpython.config.translationoption import get_combined_translation_config From pypy.commits at gmail.com Tue Mar 29 12:56:24 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 29 Mar 2016 09:56:24 -0700 (PDT) Subject: [pypy-commit] pypy default: On 32-bit we get mini-functions called '__x86.get_pc_thunk.*' at the Message-ID: <56fab3b8.2106c20a.717c5.ffffe796@mx.google.com> Author: Armin Rigo Branch: Changeset: r83422:c98b94183543 Date: 2016-03-29 17:37 +0200 http://bitbucket.org/pypy/pypy/changeset/c98b94183543/ Log: On 32-bit we get mini-functions called '__x86.get_pc_thunk.*' at the end of some assembler files, when compiled *without* optimizations. These functions don't have a '.size' closing them. Skip them. diff --git a/rpython/translator/c/gcc/trackgcroot.py b/rpython/translator/c/gcc/trackgcroot.py --- a/rpython/translator/c/gcc/trackgcroot.py +++ b/rpython/translator/c/gcc/trackgcroot.py @@ -1507,7 +1507,8 @@ functionlines = [] in_function = False for line in iterlines: - if self.FunctionGcRootTracker.r_functionstart.match(line): + match = self.FunctionGcRootTracker.r_functionstart.match(line) + if match and not match.group(1).startswith('__x86.get_pc_thunk.'): assert not in_function, ( "missed the end of the previous function") yield False, functionlines From pypy.commits at gmail.com Tue Mar 29 12:56:27 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 29 Mar 2016 09:56:27 -0700 (PDT) Subject: [pypy-commit] pypy default: merge heads Message-ID: <56fab3bb.41d91c0a.353e8.ffffe1b9@mx.google.com> Author: Armin Rigo Branch: Changeset: r83424:a7bb9851bb51 Date: 2016-03-29 18:26 +0200 http://bitbucket.org/pypy/pypy/changeset/a7bb9851bb51/ Log: merge heads diff --git a/rpython/translator/c/gcc/trackgcroot.py b/rpython/translator/c/gcc/trackgcroot.py --- a/rpython/translator/c/gcc/trackgcroot.py +++ b/rpython/translator/c/gcc/trackgcroot.py @@ -1123,7 +1123,7 @@ REG2LOC = dict((_reg, LOC_REG | ((_i+1)<<2)) for _i, _reg in enumerate(CALLEE_SAVE_REGISTERS)) OPERAND = r'(?:[-\w$%+.:@"]+(?:[(][\w%,]+[)])?|[(][\w%,]+[)])' - LABEL = r'([a-zA-Z_$.][a-zA-Z0-9_$@.]*)' + LABEL = r'([a-zA-Z_$.][a-zA-Z0-9_$.]*)(?:@[@a-zA-Z0-9_$.]*)?' OFFSET_LABELS = 2**30 TOP_OF_STACK_MINUS_WORD = '-4(%esp)' @@ -1185,7 +1185,7 @@ REG2LOC = dict((_reg, LOC_REG | ((_i+1)<<2)) for _i, _reg in enumerate(CALLEE_SAVE_REGISTERS)) OPERAND = r'(?:[-\w$%+.:@"]+(?:[(][\w%,]+[)])?|[(][\w%,]+[)])' - LABEL = r'([a-zA-Z_$.][a-zA-Z0-9_$@.]*)' + LABEL = r'([a-zA-Z_$.][a-zA-Z0-9_$.]*)(?:@[@a-zA-Z0-9_$.]*)?' OFFSET_LABELS = 2**30 TOP_OF_STACK_MINUS_WORD = '-8(%rsp)' From pypy.commits at gmail.com Tue Mar 29 12:56:26 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 29 Mar 2016 09:56:26 -0700 (PDT) Subject: [pypy-commit] pypy default: Don't capture the '@xxx' part of labels inside the label itself. It means a ".long Message-ID: <56fab3ba.01adc20a.a8fb.071d@mx.google.com> Author: Armin Rigo Branch: Changeset: r83423:7c96ec092272 Date: 2016-03-29 13:31 +0200 http://bitbucket.org/pypy/pypy/changeset/7c96ec092272/ Log: Don't capture the '@xxx' part of labels inside the label itself. It means a ".long .L123 at GOTOFF" fails to match a ".L123:" somewhere else. diff --git a/rpython/translator/c/gcc/trackgcroot.py b/rpython/translator/c/gcc/trackgcroot.py --- a/rpython/translator/c/gcc/trackgcroot.py +++ b/rpython/translator/c/gcc/trackgcroot.py @@ -1123,7 +1123,7 @@ REG2LOC = dict((_reg, LOC_REG | ((_i+1)<<2)) for _i, _reg in enumerate(CALLEE_SAVE_REGISTERS)) OPERAND = r'(?:[-\w$%+.:@"]+(?:[(][\w%,]+[)])?|[(][\w%,]+[)])' - LABEL = r'([a-zA-Z_$.][a-zA-Z0-9_$@.]*)' + LABEL = r'([a-zA-Z_$.][a-zA-Z0-9_$.]*)(?:@[@a-zA-Z0-9_$.]*)?' OFFSET_LABELS = 2**30 TOP_OF_STACK_MINUS_WORD = '-4(%esp)' @@ -1185,7 +1185,7 @@ REG2LOC = dict((_reg, LOC_REG | ((_i+1)<<2)) for _i, _reg in enumerate(CALLEE_SAVE_REGISTERS)) OPERAND = r'(?:[-\w$%+.:@"]+(?:[(][\w%,]+[)])?|[(][\w%,]+[)])' - LABEL = r'([a-zA-Z_$.][a-zA-Z0-9_$@.]*)' + LABEL = r'([a-zA-Z_$.][a-zA-Z0-9_$.]*)(?:@[@a-zA-Z0-9_$.]*)?' OFFSET_LABELS = 2**30 TOP_OF_STACK_MINUS_WORD = '-8(%rsp)' From pypy.commits at gmail.com Tue Mar 29 12:56:30 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 29 Mar 2016 09:56:30 -0700 (PDT) Subject: [pypy-commit] pypy default: Fix the C sources to avoid exporting the CPython-like names to Message-ID: <56fab3be.10921c0a.12120.16ea@mx.google.com> Author: Armin Rigo Branch: Changeset: r83425:0ac7c9d76839 Date: 2016-03-29 18:55 +0200 http://bitbucket.org/pypy/pypy/changeset/0ac7c9d76839/ Log: Fix the C sources to avoid exporting the CPython-like names to common_header.h diff --git a/pypy/module/_multibytecodec/src/cjkcodecs/cjkcodecs.h b/pypy/module/_multibytecodec/src/cjkcodecs/cjkcodecs.h --- a/pypy/module/_multibytecodec/src/cjkcodecs/cjkcodecs.h +++ b/pypy/module/_multibytecodec/src/cjkcodecs/cjkcodecs.h @@ -10,6 +10,7 @@ #define _CJKCODECS_H_ #include "src/cjkcodecs/multibytecodec.h" +#include "src/cjkcodecs/fixnames.h" /* a unicode "undefined" codepoint */ diff --git a/pypy/module/_multibytecodec/src/cjkcodecs/fixnames.h b/pypy/module/_multibytecodec/src/cjkcodecs/fixnames.h new file mode 100644 --- /dev/null +++ b/pypy/module/_multibytecodec/src/cjkcodecs/fixnames.h @@ -0,0 +1,9 @@ + +/* this is only included from the .c files in this directory: rename + these pypymbc-prefixed names to locally define the CPython names */ +typedef pypymbc_ssize_t Py_ssize_t; +#define PY_SSIZE_T_MAX ((Py_ssize_t)(((size_t) -1) >> 1)) +#define Py_UNICODE_SIZE pypymbc_UNICODE_SIZE +typedef pypymbc_wchar_t Py_UNICODE; +typedef pypymbc_ucs4_t ucs4_t; +typedef pypymbc_ucs2_t ucs2_t, DBCHAR; diff --git a/pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.c b/pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.c --- a/pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.c +++ b/pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.c @@ -1,6 +1,7 @@ #include #include #include "src/cjkcodecs/multibytecodec.h" +#include "src/cjkcodecs/fixnames.h" struct pypy_cjk_dec_s *pypy_cjk_dec_new(const MultibyteCodec *codec) diff --git a/pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.h b/pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.h --- a/pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.h +++ b/pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.h @@ -9,31 +9,28 @@ #include #ifdef _WIN64 -typedef __int64 ssize_t +typedef __int64 pypymbc_ssize_t #elif defined(_WIN32) -typedef int ssize_t; +typedef int pypymbc_ssize_t; #else #include -#endif - -#ifndef Py_UNICODE_SIZE -#ifdef _WIN32 -#define Py_UNICODE_SIZE 2 -#else -#define Py_UNICODE_SIZE 4 -#endif -typedef wchar_t Py_UNICODE; -typedef ssize_t Py_ssize_t; -#define PY_SSIZE_T_MAX ((Py_ssize_t)(((size_t) -1) >> 1)) +typedef ssize_t pypymbc_ssize_t; #endif #ifdef _WIN32 -typedef unsigned int ucs4_t; -typedef unsigned short ucs2_t, DBCHAR; +#define pypymbc_UNICODE_SIZE 2 +#else +#define pypymbc_UNICODE_SIZE 4 +#endif +typedef wchar_t pypymbc_wchar_t; + +#ifdef _WIN32 +typedef unsigned int pypymbc_ucs4_t; +typedef unsigned short pypymbc_ucs2_t; #else #include -typedef uint32_t ucs4_t; -typedef uint16_t ucs2_t, DBCHAR; +typedef uint32_t pypymbc_ucs4_t; +typedef uint16_t pypymbc_ucs2_t; #endif @@ -42,28 +39,28 @@ void *p; int i; unsigned char c[8]; - ucs2_t u2[4]; - ucs4_t u4[2]; + pypymbc_ucs2_t u2[4]; + pypymbc_ucs4_t u4[2]; } MultibyteCodec_State; typedef int (*mbcodec_init)(const void *config); -typedef Py_ssize_t (*mbencode_func)(MultibyteCodec_State *state, +typedef pypymbc_ssize_t (*mbencode_func)(MultibyteCodec_State *state, const void *config, - const Py_UNICODE **inbuf, Py_ssize_t inleft, - unsigned char **outbuf, Py_ssize_t outleft, + const pypymbc_wchar_t **inbuf, pypymbc_ssize_t inleft, + unsigned char **outbuf, pypymbc_ssize_t outleft, int flags); typedef int (*mbencodeinit_func)(MultibyteCodec_State *state, const void *config); -typedef Py_ssize_t (*mbencodereset_func)(MultibyteCodec_State *state, +typedef pypymbc_ssize_t (*mbencodereset_func)(MultibyteCodec_State *state, const void *config, - unsigned char **outbuf, Py_ssize_t outleft); -typedef Py_ssize_t (*mbdecode_func)(MultibyteCodec_State *state, + unsigned char **outbuf, pypymbc_ssize_t outleft); +typedef pypymbc_ssize_t (*mbdecode_func)(MultibyteCodec_State *state, const void *config, - const unsigned char **inbuf, Py_ssize_t inleft, - Py_UNICODE **outbuf, Py_ssize_t outleft); + const unsigned char **inbuf, pypymbc_ssize_t inleft, + pypymbc_wchar_t **outbuf, pypymbc_ssize_t outleft); typedef int (*mbdecodeinit_func)(MultibyteCodec_State *state, const void *config); -typedef Py_ssize_t (*mbdecodereset_func)(MultibyteCodec_State *state, +typedef pypymbc_ssize_t (*mbdecodereset_func)(MultibyteCodec_State *state, const void *config); typedef struct MultibyteCodec_s { @@ -94,59 +91,59 @@ const MultibyteCodec *codec; MultibyteCodec_State state; const unsigned char *inbuf_start, *inbuf, *inbuf_end; - Py_UNICODE *outbuf_start, *outbuf, *outbuf_end; + pypymbc_wchar_t *outbuf_start, *outbuf, *outbuf_end; }; RPY_EXTERN struct pypy_cjk_dec_s *pypy_cjk_dec_new(const MultibyteCodec *codec); RPY_EXTERN -Py_ssize_t pypy_cjk_dec_init(struct pypy_cjk_dec_s *d, - char *inbuf, Py_ssize_t inlen); +pypymbc_ssize_t pypy_cjk_dec_init(struct pypy_cjk_dec_s *d, + char *inbuf, pypymbc_ssize_t inlen); RPY_EXTERN void pypy_cjk_dec_free(struct pypy_cjk_dec_s *); RPY_EXTERN -Py_ssize_t pypy_cjk_dec_chunk(struct pypy_cjk_dec_s *); +pypymbc_ssize_t pypy_cjk_dec_chunk(struct pypy_cjk_dec_s *); RPY_EXTERN -Py_UNICODE *pypy_cjk_dec_outbuf(struct pypy_cjk_dec_s *); +pypymbc_wchar_t *pypy_cjk_dec_outbuf(struct pypy_cjk_dec_s *); RPY_EXTERN -Py_ssize_t pypy_cjk_dec_outlen(struct pypy_cjk_dec_s *); +pypymbc_ssize_t pypy_cjk_dec_outlen(struct pypy_cjk_dec_s *); RPY_EXTERN -Py_ssize_t pypy_cjk_dec_inbuf_remaining(struct pypy_cjk_dec_s *d); +pypymbc_ssize_t pypy_cjk_dec_inbuf_remaining(struct pypy_cjk_dec_s *d); RPY_EXTERN -Py_ssize_t pypy_cjk_dec_inbuf_consumed(struct pypy_cjk_dec_s* d); +pypymbc_ssize_t pypy_cjk_dec_inbuf_consumed(struct pypy_cjk_dec_s* d); RPY_EXTERN -Py_ssize_t pypy_cjk_dec_replace_on_error(struct pypy_cjk_dec_s* d, - Py_UNICODE *, Py_ssize_t, Py_ssize_t); +pypymbc_ssize_t pypy_cjk_dec_replace_on_error(struct pypy_cjk_dec_s* d, + pypymbc_wchar_t *, pypymbc_ssize_t, pypymbc_ssize_t); struct pypy_cjk_enc_s { const MultibyteCodec *codec; MultibyteCodec_State state; - const Py_UNICODE *inbuf_start, *inbuf, *inbuf_end; + const pypymbc_wchar_t *inbuf_start, *inbuf, *inbuf_end; unsigned char *outbuf_start, *outbuf, *outbuf_end; }; RPY_EXTERN struct pypy_cjk_enc_s *pypy_cjk_enc_new(const MultibyteCodec *codec); RPY_EXTERN -Py_ssize_t pypy_cjk_enc_init(struct pypy_cjk_enc_s *d, - Py_UNICODE *inbuf, Py_ssize_t inlen); +pypymbc_ssize_t pypy_cjk_enc_init(struct pypy_cjk_enc_s *d, + pypymbc_wchar_t *inbuf, pypymbc_ssize_t inlen); RPY_EXTERN void pypy_cjk_enc_free(struct pypy_cjk_enc_s *); RPY_EXTERN -Py_ssize_t pypy_cjk_enc_chunk(struct pypy_cjk_enc_s *, Py_ssize_t); +pypymbc_ssize_t pypy_cjk_enc_chunk(struct pypy_cjk_enc_s *, pypymbc_ssize_t); RPY_EXTERN -Py_ssize_t pypy_cjk_enc_reset(struct pypy_cjk_enc_s *); +pypymbc_ssize_t pypy_cjk_enc_reset(struct pypy_cjk_enc_s *); RPY_EXTERN char *pypy_cjk_enc_outbuf(struct pypy_cjk_enc_s *); RPY_EXTERN -Py_ssize_t pypy_cjk_enc_outlen(struct pypy_cjk_enc_s *); +pypymbc_ssize_t pypy_cjk_enc_outlen(struct pypy_cjk_enc_s *); RPY_EXTERN -Py_ssize_t pypy_cjk_enc_inbuf_remaining(struct pypy_cjk_enc_s *d); +pypymbc_ssize_t pypy_cjk_enc_inbuf_remaining(struct pypy_cjk_enc_s *d); RPY_EXTERN -Py_ssize_t pypy_cjk_enc_inbuf_consumed(struct pypy_cjk_enc_s* d); +pypymbc_ssize_t pypy_cjk_enc_inbuf_consumed(struct pypy_cjk_enc_s* d); RPY_EXTERN -Py_ssize_t pypy_cjk_enc_replace_on_error(struct pypy_cjk_enc_s* d, - char *, Py_ssize_t, Py_ssize_t); +pypymbc_ssize_t pypy_cjk_enc_replace_on_error(struct pypy_cjk_enc_s* d, + char *, pypymbc_ssize_t, pypymbc_ssize_t); RPY_EXTERN const MultibyteCodec *pypy_cjk_enc_getcodec(struct pypy_cjk_enc_s *); @@ -191,5 +188,7 @@ DEFINE_CODEC(big5) DEFINE_CODEC(cp950) +#undef DEFINE_CODEC + #endif From pypy.commits at gmail.com Tue Mar 29 13:13:56 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 29 Mar 2016 10:13:56 -0700 (PDT) Subject: [pypy-commit] pypy default: Hack for "make"ing asmgcc programs in debug mode: this helps on linux32 Message-ID: <56fab7d4.8d571c0a.b4d9.19aa@mx.google.com> Author: Armin Rigo Branch: Changeset: r83426:424023029f67 Date: 2016-03-29 19:13 +0200 http://bitbucket.org/pypy/pypy/changeset/424023029f67/ Log: Hack for "make"ing asmgcc programs in debug mode: this helps on linux32 diff --git a/rpython/translator/c/genc.py b/rpython/translator/c/genc.py --- a/rpython/translator/c/genc.py +++ b/rpython/translator/c/genc.py @@ -414,10 +414,12 @@ if self.config.translation.gcrootfinder == 'asmgcc': if self.translator.platform.name == 'msvc': raise Exception("msvc no longer supports asmgcc") + _extra = '' if self.config.translation.shared: - mk.definition('DEBUGFLAGS', '-O2 -fomit-frame-pointer -g -fPIC') - else: - mk.definition('DEBUGFLAGS', '-O2 -fomit-frame-pointer -g') + _extra = ' -fPIC' + _extra += ' -fdisable-tree-fnsplit' # seems to help + mk.definition('DEBUGFLAGS', + '-O2 -fomit-frame-pointer -g'+ _extra) if self.config.translation.shared: mk.definition('PYPY_MAIN_FUNCTION', "pypy_main_startup") From pypy.commits at gmail.com Tue Mar 29 14:04:17 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 29 Mar 2016 11:04:17 -0700 (PDT) Subject: [pypy-commit] cffi default: With ffi.compile(verbose=True), also print the name of the C (or Python) Message-ID: <56fac3a1.2976c20a.d610c.36f8@mx.google.com> Author: Armin Rigo Branch: Changeset: r2653:f58f14dad850 Date: 2016-03-29 20:04 +0200 http://bitbucket.org/cffi/cffi/changeset/f58f14dad850/ Log: With ffi.compile(verbose=True), also print the name of the C (or Python) file being generated diff --git a/cffi/recompiler.py b/cffi/recompiler.py --- a/cffi/recompiler.py +++ b/cffi/recompiler.py @@ -1319,7 +1319,9 @@ s = s.encode('ascii') super(NativeIO, self).write(s) -def _make_c_or_py_source(ffi, module_name, preamble, target_file): +def _make_c_or_py_source(ffi, module_name, preamble, target_file, verbose): + if verbose: + print("generating %s" % (target_file,)) recompiler = Recompiler(ffi, module_name, target_is_python=(preamble is None)) recompiler.collect_type_table() @@ -1331,6 +1333,8 @@ with open(target_file, 'r') as f1: if f1.read(len(output) + 1) != output: raise IOError + if verbose: + print("(already up-to-date)") return False # already up-to-date except IOError: tmp_file = '%s.~%d' % (target_file, os.getpid()) @@ -1343,12 +1347,14 @@ os.rename(tmp_file, target_file) return True -def make_c_source(ffi, module_name, preamble, target_c_file): +def make_c_source(ffi, module_name, preamble, target_c_file, verbose=False): assert preamble is not None - return _make_c_or_py_source(ffi, module_name, preamble, target_c_file) + return _make_c_or_py_source(ffi, module_name, preamble, target_c_file, + verbose) -def make_py_source(ffi, module_name, target_py_file): - return _make_c_or_py_source(ffi, module_name, None, target_py_file) +def make_py_source(ffi, module_name, target_py_file, verbose=False): + return _make_c_or_py_source(ffi, module_name, None, target_py_file, + verbose) def _modname_to_file(outputdir, modname, extension): parts = modname.split('.') @@ -1438,7 +1444,8 @@ target = '*' # ext = ffiplatform.get_extension(ext_c_file, module_name, **kwds) - updated = make_c_source(ffi, module_name, preamble, c_file) + updated = make_c_source(ffi, module_name, preamble, c_file, + verbose=compiler_verbose) if call_c_compiler: patchlist = [] cwd = os.getcwd() @@ -1458,7 +1465,8 @@ else: if c_file is None: c_file, _ = _modname_to_file(tmpdir, module_name, '.py') - updated = make_py_source(ffi, module_name, c_file) + updated = make_py_source(ffi, module_name, c_file, + verbose=compiler_verbose) if call_c_compiler: return c_file else: diff --git a/demo/gmp_build.py b/demo/gmp_build.py --- a/demo/gmp_build.py +++ b/demo/gmp_build.py @@ -23,5 +23,4 @@ libraries=['gmp', 'm']) if __name__ == '__main__': - ffi.compile() - + ffi.compile(verbose=True) From pypy.commits at gmail.com Tue Mar 29 14:08:26 2016 From: pypy.commits at gmail.com (arigo) Date: Tue, 29 Mar 2016 11:08:26 -0700 (PDT) Subject: [pypy-commit] cffi default: Document ffi.list_types() Message-ID: <56fac49a.0aecc20a.acfb8.6dda@mx.google.com> Author: Armin Rigo Branch: Changeset: r2654:0e5d809db377 Date: 2016-03-29 20:08 +0200 http://bitbucket.org/cffi/cffi/changeset/0e5d809db377/ Log: Document ffi.list_types() diff --git a/doc/source/using.rst b/doc/source/using.rst --- a/doc/source/using.rst +++ b/doc/source/using.rst @@ -1228,6 +1228,11 @@ .. __: https://bitbucket.org/cffi/cffi/issues/233/ +**ffi.list_types()**: builds and returns a list of all user type names +known in this FFI instance. The list contains typedef names (sorted in +alphabetical order), followed by the 'struct xxx' (sorted) and finally +the 'union xxx' (sorted as well). *New in version 1.6.* + .. _`Preparing and Distributing modules`: cdef.html#loading-libraries diff --git a/doc/source/whatsnew.rst b/doc/source/whatsnew.rst --- a/doc/source/whatsnew.rst +++ b/doc/source/whatsnew.rst @@ -3,6 +3,12 @@ ====================== +v1.6 +==== + +* ffi.list_types() + + v1.5.2 ====== From pypy.commits at gmail.com Wed Mar 30 03:37:54 2016 From: pypy.commits at gmail.com (cfbolz) Date: Wed, 30 Mar 2016 00:37:54 -0700 (PDT) Subject: [pypy-commit] pypy guard-compatible: a shorter pre-optimized trace Message-ID: <56fb8252.89941c0a.b1471.fffff09e@mx.google.com> Author: Carl Friedrich Bolz Branch: guard-compatible Changeset: r83427:9fee3802b752 Date: 2016-03-29 15:50 +0200 http://bitbucket.org/pypy/pypy/changeset/9fee3802b752/ Log: a shorter pre-optimized trace diff --git a/pypy/objspace/std/callmethod.py b/pypy/objspace/std/callmethod.py --- a/pypy/objspace/std/callmethod.py +++ b/pypy/objspace/std/callmethod.py @@ -42,18 +42,22 @@ w_value = None safe = False + w_descr = None if space.config.objspace.std.withmapdict and jit.we_are_jitted(): # compute safeness without reading the type map = w_obj._get_mapdict_map_no_promote() if map is not None and map._type_safe_to_do_getattr(): safe = True + name = space.str_w(w_name) + w_descr = map._type_lookup_safe(name) else: w_type = space.type(w_obj) safe = w_type.has_object_getattribute() + if safe: + name = space.str_w(w_name) + w_descr = space.lookup(w_obj, name) if safe: - name = space.str_w(w_name) - w_descr = space.lookup(w_obj, name) if w_descr is None: # this handles directly the common case # module.function(args..) @@ -129,11 +133,12 @@ map = w_obj._get_mapdict_map_no_promote() if map is not None and map._type_safe_to_do_getattr(): safe = True + w_descr = map._type_lookup_safe(methname) else: w_type = space.type(w_obj) safe = w_type.has_object_getattribute() + w_descr = space.lookup(w_obj, methname) if safe: - w_descr = space.lookup(w_obj, methname) typ = type(w_descr) if typ is function.Function or typ is function.FunctionWithFixedCode: w_value = w_obj.getdictvalue(space, methname) diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -327,6 +327,9 @@ def _type_lookup(self, name): if not self._type_safe_to_do_getattr(): return self.getclass_from_terminator().lookup(name) + return self._type_lookup_safe(name) + + def _type_lookup_safe(self, name): w_descr = self._type_lookup_pure(name) if isinstance(w_descr, MutableCell): w_descr = w_descr.unwrap_cell(self.space) From pypy.commits at gmail.com Wed Mar 30 03:37:56 2016 From: pypy.commits at gmail.com (cfbolz) Date: Wed, 30 Mar 2016 00:37:56 -0700 (PDT) Subject: [pypy-commit] pypy guard-compatible: some more improvements of the pre-optimization traces Message-ID: <56fb8254.a3abc20a.a4e35.ffffb872@mx.google.com> Author: Carl Friedrich Bolz Branch: guard-compatible Changeset: r83428:5e6b03561580 Date: 2016-03-29 17:08 +0200 http://bitbucket.org/pypy/pypy/changeset/5e6b03561580/ Log: some more improvements of the pre-optimization traces diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -26,6 +26,8 @@ # note: we use "x * NUM_DIGITS_POW2" instead of "x << NUM_DIGITS" because # we want to propagate knowledge that the result cannot be negative +NOATTR = -1 +NOATTR_DEVOLVED_TERMINATOR = -2 class Version(object): pass @@ -55,20 +57,14 @@ def _get_terminator(self): return self.terminator - @jit.elidable_compatible() - def _get_terminator_if_devolved(self): - if isinstance(self.terminator, DevolvedDictTerminator): - return self.terminator - return None - def read(self, obj, name, index): storageindex = self.find_map_storageindex(name, index) - if storageindex == -1: + if storageindex == NOATTR: + return None + if storageindex == NOATTR_DEVOLVED_TERMINATOR: # XXX can improve the devolved case - terminator = self._get_terminator_if_devolved() - if terminator is not None: - return terminator._read_terminator(obj, name, index) - return None + terminator = self._get_terminator() + return terminator._read_terminator(obj, name, index) #if ( # XXX in the guard_compatible world the following isconstant may never be true? # jit.isconstant(attr.storageindex) and # jit.isconstant(obj) and @@ -84,7 +80,7 @@ def write(self, obj, name, index, w_value): storageindex = self.find_map_storageindex(name, index) - if storageindex == -1: + if storageindex < 0: return self._get_terminator()._write_terminator(obj, name, index, w_value) obj._mapdict_write_storage(storageindex, w_value) return True @@ -100,9 +96,15 @@ @jit.elidable_compatible() def find_map_storageindex(self, name, index): + """ return an index of the attributes, or a negative number if the + attribute is not there. returns -1 if the attribute does not exist and + the object does *not* have a devolved terminator, and -2 if the + terminator *is* devolved """ attr = self.find_map_attr(name, index) + if isinstance(self.terminator, DevolvedDictTerminator): + return NOATTR_DEVOLVED_TERMINATOR if attr is None: - return -1 + return NOATTR return attr.storageindex @jit.dont_look_inside From pypy.commits at gmail.com Wed Mar 30 03:37:59 2016 From: pypy.commits at gmail.com (cfbolz) Date: Wed, 30 Mar 2016 00:37:59 -0700 (PDT) Subject: [pypy-commit] pypy guard-compatible: nonsense Message-ID: <56fb8257.86351c0a.637de.ffffb5d0@mx.google.com> Author: Carl Friedrich Bolz Branch: guard-compatible Changeset: r83430:20a9a53c287b Date: 2016-03-29 17:53 +0200 http://bitbucket.org/pypy/pypy/changeset/20a9a53c287b/ Log: nonsense diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -101,9 +101,9 @@ the object does *not* have a devolved terminator, and -2 if the terminator *is* devolved """ attr = self.find_map_attr(name, index) - if isinstance(self.terminator, DevolvedDictTerminator): - return NOATTR_DEVOLVED_TERMINATOR if attr is None: + if isinstance(self.terminator, DevolvedDictTerminator): + return NOATTR_DEVOLVED_TERMINATOR return NOATTR return attr.storageindex From pypy.commits at gmail.com Wed Mar 30 03:38:01 2016 From: pypy.commits at gmail.com (cfbolz) Date: Wed, 30 Mar 2016 00:38:01 -0700 (PDT) Subject: [pypy-commit] pypy guard-compatible: add elidable_compatible version of isinstance checks Message-ID: <56fb8259.0113c20a.5044a.ffffb6ec@mx.google.com> Author: Carl Friedrich Bolz Branch: guard-compatible Changeset: r83431:743ac8d23501 Date: 2016-03-30 09:29 +0200 http://bitbucket.org/pypy/pypy/changeset/743ac8d23501/ Log: add elidable_compatible version of isinstance checks diff --git a/pypy/objspace/descroperation.py b/pypy/objspace/descroperation.py --- a/pypy/objspace/descroperation.py +++ b/pypy/objspace/descroperation.py @@ -521,10 +521,12 @@ return space.get_and_call_function(w_check, w_type, w_sub) def isinstance_allow_override(space, w_inst, w_type): - if space.type(w_inst) is w_type: + if not jit.we_are_jitted() and space.type(w_inst) is w_type: return space.w_True # fast path copied from cpython w_check = space.lookup(w_type, "__instancecheck__") if w_check is not None: + if space.type(w_inst) is w_type: + return space.w_True # fast path copied from cpython return space.get_and_call_function(w_check, w_type, w_inst) else: return space.isinstance(w_inst, w_type) diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -345,6 +345,11 @@ name, w_type.version_tag())[1] return w_res + @jit.elidable_compatible(quasi_immut_field_name_for_second_arg="version") + def _type_issubtype(self, version, w_type): + from pypy.objspace.std.typeobject import _issubtype + return _issubtype(self.terminator.w_cls, w_type) + class Terminator(AbstractAttribute): _immutable_fields_ = ['w_cls'] @@ -1150,3 +1155,12 @@ return map._type_lookup(name) return space._lookup(w_obj, name) + +def mapdict_type_isinstance(space, w_obj, w_type): + if we_are_jitted(): + map = w_obj._get_mapdict_map() + if map is not None and map.version is not None: + version_tag = w_type.version_tag() + if version_tag is not None: + return map._type_issubtype(w_type) + return space.type(w_obj).issubtype(w_type) diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -651,6 +651,9 @@ assert w_inst is not None if isinstance(w_inst, cls): return True + if self.config.objspace.std.withmapdict: + from pypy.objspace.std.mapdict import mapdict_type_isinstance + return mapdict_type_isinstance(self, w_inst, w_type) return self.type(w_inst).issubtype(w_type) @specialize.memo() From pypy.commits at gmail.com Wed Mar 30 03:38:03 2016 From: pypy.commits at gmail.com (cfbolz) Date: Wed, 30 Mar 2016 00:38:03 -0700 (PDT) Subject: [pypy-commit] pypy guard-compatible: can use the cached version inside the elidable_compatible function Message-ID: <56fb825b.47afc20a.4465b.ffffbb99@mx.google.com> Author: Carl Friedrich Bolz Branch: guard-compatible Changeset: r83432:7fef35dc1817 Date: 2016-03-30 09:30 +0200 http://bitbucket.org/pypy/pypy/changeset/7fef35dc1817/ Log: can use the cached version inside the elidable_compatible function diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -322,9 +322,7 @@ # own __getattribute__ if version is None: return False - w_type = self.terminator.w_cls - w_descr = self._type_lookup_pure('__getattribute__') - return w_descr is object_getattribute(self.space) + return w_type.has_object_getattribute() def _type_lookup(self, name): if not self._type_safe_to_do_getattr(): From pypy.commits at gmail.com Wed Mar 30 03:38:05 2016 From: pypy.commits at gmail.com (cfbolz) Date: Wed, 30 Mar 2016 00:38:05 -0700 (PDT) Subject: [pypy-commit] pypy guard-compatible: fix translation Message-ID: <56fb825d.90051c0a.955ea.0d6c@mx.google.com> Author: Carl Friedrich Bolz Branch: guard-compatible Changeset: r83433:0b869c7d00db Date: 2016-03-30 08:43 +0100 http://bitbucket.org/pypy/pypy/changeset/0b869c7d00db/ Log: fix translation diff --git a/pypy/objspace/std/callmethod.py b/pypy/objspace/std/callmethod.py --- a/pypy/objspace/std/callmethod.py +++ b/pypy/objspace/std/callmethod.py @@ -43,6 +43,7 @@ safe = False w_descr = None + name = None if space.config.objspace.std.withmapdict and jit.we_are_jitted(): # compute safeness without reading the type map = w_obj._get_mapdict_map_no_promote() diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -322,7 +322,7 @@ # own __getattribute__ if version is None: return False - return w_type.has_object_getattribute() + return self.terminator.w_cls.has_object_getattribute() def _type_lookup(self, name): if not self._type_safe_to_do_getattr(): From pypy.commits at gmail.com Wed Mar 30 03:37:58 2016 From: pypy.commits at gmail.com (cfbolz) Date: Wed, 30 Mar 2016 00:37:58 -0700 (PDT) Subject: [pypy-commit] pypy guard-compatible: another shortening of pre-optimization traces Message-ID: <56fb8256.4816c20a.5f157.ffffbaf4@mx.google.com> Author: Carl Friedrich Bolz Branch: guard-compatible Changeset: r83429:ac6ec0fa91d4 Date: 2016-03-29 17:30 +0200 http://bitbucket.org/pypy/pypy/changeset/ac6ec0fa91d4/ Log: another shortening of pre-optimization traces diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py --- a/pypy/objspace/std/objspace.py +++ b/pypy/objspace/std/objspace.py @@ -541,17 +541,19 @@ map = w_obj._get_mapdict_map_no_promote() if map is not None and map._type_safe_to_do_getattr(): safe = True + name = self.str_w(w_name) + w_descr = map._type_lookup_safe(name) if not safe: w_type = self.type(w_obj) w_descr = w_type.getattribute_if_not_from_object() if w_descr is not None: return self._handle_getattribute(w_descr, w_obj, w_name) + name = self.str_w(w_name) + w_descr = self.lookup(w_obj, name) # fast path: XXX this is duplicating most of the logic # from the default __getattribute__ and the getattr() method... - name = self.str_w(w_name) - w_descr = self.lookup(w_obj, name) e = None if w_descr is not None: w_get = None From pypy.commits at gmail.com Wed Mar 30 07:07:42 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 30 Mar 2016 04:07:42 -0700 (PDT) Subject: [pypy-commit] pypy default: Issue #2266: check that PyTupleObjects do not contain any NULLs at the Message-ID: <56fbb37e.c50b1c0a.78eee.1ede@mx.google.com> Author: Armin Rigo Branch: Changeset: r83434:94bf747cee5d Date: 2016-03-30 13:05 +0200 http://bitbucket.org/pypy/pypy/changeset/94bf747cee5d/ Log: Issue #2266: check that PyTupleObjects do not contain any NULLs at the point of conversion to W_TupleObjects. diff --git a/pypy/module/cpyext/test/test_sequence.py b/pypy/module/cpyext/test/test_sequence.py --- a/pypy/module/cpyext/test/test_sequence.py +++ b/pypy/module/cpyext/test/test_sequence.py @@ -90,8 +90,10 @@ self.raises(space, api, IndexError, api.PySequence_SetItem, l, 3, w_value) + t = api.PyTuple_New(1) + api.PyTuple_SetItem(t, 0, l) self.raises(space, api, TypeError, api.PySequence_SetItem, - api.PyTuple_New(1), 0, w_value) + t, 0, w_value) self.raises(space, api, TypeError, api.PySequence_SetItem, space.newdict(), 0, w_value) diff --git a/pypy/module/cpyext/test/test_tupleobject.py b/pypy/module/cpyext/test/test_tupleobject.py --- a/pypy/module/cpyext/test/test_tupleobject.py +++ b/pypy/module/cpyext/test/test_tupleobject.py @@ -5,6 +5,7 @@ from pypy.module.cpyext.test.test_api import BaseApiTest from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase from rpython.rtyper.lltypesystem import rffi, lltype +from rpython.rlib.debug import FatalError class TestTupleObject(BaseApiTest): @@ -18,29 +19,44 @@ #assert api.PyTuple_GET_SIZE(atuple) == 3 --- now a C macro raises(TypeError, api.PyTuple_Size(space.newlist([]))) api.PyErr_Clear() - + + def test_tuple_realize_refuses_nulls(self, space, api): + py_tuple = api.PyTuple_New(1) + py.test.raises(FatalError, from_ref, space, py_tuple) + def test_tuple_resize(self, space, api): w_42 = space.wrap(42) + w_43 = space.wrap(43) + w_44 = space.wrap(44) ar = lltype.malloc(PyObjectP.TO, 1, flavor='raw') py_tuple = api.PyTuple_New(3) # inside py_tuple is an array of "PyObject *" items which each hold # a reference rffi.cast(PyTupleObject, py_tuple).c_ob_item[0] = make_ref(space, w_42) + rffi.cast(PyTupleObject, py_tuple).c_ob_item[1] = make_ref(space, w_43) ar[0] = py_tuple api._PyTuple_Resize(ar, 2) w_tuple = from_ref(space, ar[0]) assert space.int_w(space.len(w_tuple)) == 2 assert space.int_w(space.getitem(w_tuple, space.wrap(0))) == 42 + assert space.int_w(space.getitem(w_tuple, space.wrap(1))) == 43 api.Py_DecRef(ar[0]) py_tuple = api.PyTuple_New(3) rffi.cast(PyTupleObject, py_tuple).c_ob_item[0] = make_ref(space, w_42) + rffi.cast(PyTupleObject, py_tuple).c_ob_item[1] = make_ref(space, w_43) + rffi.cast(PyTupleObject, py_tuple).c_ob_item[2] = make_ref(space, w_44) ar[0] = py_tuple api._PyTuple_Resize(ar, 10) + assert api.PyTuple_Size(ar[0]) == 10 + for i in range(3, 10): + rffi.cast(PyTupleObject, py_tuple).c_ob_item[i] = make_ref( + space, space.wrap(42 + i)) w_tuple = from_ref(space, ar[0]) assert space.int_w(space.len(w_tuple)) == 10 - assert space.int_w(space.getitem(w_tuple, space.wrap(0))) == 42 + for i in range(10): + assert space.int_w(space.getitem(w_tuple, space.wrap(i))) == 42 + i api.Py_DecRef(ar[0]) lltype.free(ar, flavor='raw') diff --git a/pypy/module/cpyext/tupleobject.py b/pypy/module/cpyext/tupleobject.py --- a/pypy/module/cpyext/tupleobject.py +++ b/pypy/module/cpyext/tupleobject.py @@ -1,5 +1,6 @@ from pypy.interpreter.error import OperationError from rpython.rtyper.lltypesystem import rffi, lltype +from rpython.rlib.debug import fatalerror_notb from pypy.module.cpyext.api import (cpython_api, Py_ssize_t, CANNOT_FAIL, build_type_checkers, PyObjectFields, cpython_struct, bootstrap_function) @@ -91,14 +92,22 @@ def tuple_realize(space, py_obj): """ Creates the tuple in the interpreter. The PyTupleObject must not - be modified after this call. + be modified after this call. We check that it does not contain + any NULLs at this point (which would correspond to half-broken + W_TupleObjects). """ py_tup = rffi.cast(PyTupleObject, py_obj) l = py_tup.c_ob_size p = py_tup.c_ob_item items_w = [None] * l for i in range(l): - items_w[i] = from_ref(space, p[i]) + w_item = from_ref(space, p[i]) + if w_item is None: + fatalerror_notb( + "Fatal error in cpyext, CPython compatibility layer: " + "converting a PyTupleObject into a W_TupleObject, " + "but found NULLs as items") + items_w[i] = w_item w_obj = space.newtuple(items_w) track_reference(space, py_obj, w_obj) return w_obj From pypy.commits at gmail.com Wed Mar 30 09:46:48 2016 From: pypy.commits at gmail.com (cfbolz) Date: Wed, 30 Mar 2016 06:46:48 -0700 (PDT) Subject: [pypy-commit] pypy guard-compatible: Backed out changeset 7fef35dc1817 Message-ID: <56fbd8c8.4816c20a.5f157.5614@mx.google.com> Author: Carl Friedrich Bolz Branch: guard-compatible Changeset: r83435:7aa196510609 Date: 2016-03-30 10:53 +0200 http://bitbucket.org/pypy/pypy/changeset/7aa196510609/ Log: Backed out changeset 7fef35dc1817 can't use the has_object_getattribute, because it has arbitrary effects diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -322,7 +322,9 @@ # own __getattribute__ if version is None: return False - return self.terminator.w_cls.has_object_getattribute() + w_type = self.terminator.w_cls + w_descr = self._type_lookup_pure('__getattribute__') + return w_descr is object_getattribute(self.space) def _type_lookup(self, name): if not self._type_safe_to_do_getattr(): From pypy.commits at gmail.com Wed Mar 30 09:46:50 2016 From: pypy.commits at gmail.com (cfbolz) Date: Wed, 30 Mar 2016 06:46:50 -0700 (PDT) Subject: [pypy-commit] pypy guard-compatible: do the same trick we also do for object.__getattribute__: Message-ID: <56fbd8ca.c818c20a.d27e2.5809@mx.google.com> Author: Carl Friedrich Bolz Branch: guard-compatible Changeset: r83436:a42555cba817 Date: 2016-03-30 14:05 +0200 http://bitbucket.org/pypy/pypy/changeset/a42555cba817/ Log: do the same trick we also do for object.__getattribute__: if the __instancecheck__ method is not overridden, don't go via the method at all. diff --git a/pypy/objspace/descroperation.py b/pypy/objspace/descroperation.py --- a/pypy/objspace/descroperation.py +++ b/pypy/objspace/descroperation.py @@ -58,6 +58,13 @@ return w_iter tuple_iter._annspecialcase_ = 'specialize:memo' +def type_instancecheck(space): + "Utility that returns the app-level descriptor type.__instancecheck__." + w_src, w_instancecheck = space.lookup_in_type_where(space.w_type, + '__instancecheck__') + return w_instancecheck +type_instancecheck._annspecialcase_ = 'specialize:memo' + def raiseattrerror(space, w_obj, name, w_descr=None): if w_descr is None: raise oefmt(space.w_AttributeError, @@ -524,12 +531,12 @@ if not jit.we_are_jitted() and space.type(w_inst) is w_type: return space.w_True # fast path copied from cpython w_check = space.lookup(w_type, "__instancecheck__") - if w_check is not None: + if w_check is None or w_check is type_instancecheck(space): + return space.isinstance(w_inst, w_type) + else: if space.type(w_inst) is w_type: return space.w_True # fast path copied from cpython return space.get_and_call_function(w_check, w_type, w_inst) - else: - return space.isinstance(w_inst, w_type) # helpers From pypy.commits at gmail.com Wed Mar 30 09:46:52 2016 From: pypy.commits at gmail.com (cfbolz) Date: Wed, 30 Mar 2016 06:46:52 -0700 (PDT) Subject: [pypy-commit] pypy guard-compatible: remove promote which killed everything Message-ID: <56fbd8cc.55031c0a.3e0f5.587d@mx.google.com> Author: Carl Friedrich Bolz Branch: guard-compatible Changeset: r83437:83f1ba85ce71 Date: 2016-03-30 14:52 +0100 http://bitbucket.org/pypy/pypy/changeset/83f1ba85ce71/ Log: remove promote which killed everything diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py --- a/pypy/objspace/std/mapdict.py +++ b/pypy/objspace/std/mapdict.py @@ -1158,7 +1158,7 @@ def mapdict_type_isinstance(space, w_obj, w_type): if we_are_jitted(): - map = w_obj._get_mapdict_map() + map = w_obj._get_mapdict_map_no_promote() if map is not None and map.version is not None: version_tag = w_type.version_tag() if version_tag is not None: From pypy.commits at gmail.com Wed Mar 30 11:22:00 2016 From: pypy.commits at gmail.com (arigo) Date: Wed, 30 Mar 2016 08:22:00 -0700 (PDT) Subject: [pypy-commit] cffi default: Change the API of ffi.list_types() Message-ID: <56fbef18.857ac20a.388d.ffff81d5@mx.google.com> Author: Armin Rigo Branch: Changeset: r2655:2022122f5ad3 Date: 2016-03-30 17:22 +0200 http://bitbucket.org/cffi/cffi/changeset/2022122f5ad3/ Log: Change the API of ffi.list_types() diff --git a/c/ffi_obj.c b/c/ffi_obj.c --- a/c/ffi_obj.c +++ b/c/ffi_obj.c @@ -863,54 +863,57 @@ } PyDoc_STRVAR(ffi_list_types_doc, -"Build and return a list of all user type names known in this FFI instance.\n" -"\n" -"Contains typedef names (sorted in alphabetical order), followed by the\n" -"'struct xxx' (sorted) and finally the 'union xxx' (sorted as well)."); +"Returns the user type names known to this FFI instance.\n" +"This returns a tuple containing three lists of names:\n" +"(typedef_names, names_of_structs, names_of_unions)"); static PyObject *ffi_list_types(FFIObject *self, PyObject *noargs) { - int is_union, look_for_union; Py_ssize_t i, n1 = self->types_builder.ctx.num_typenames; Py_ssize_t n23 = self->types_builder.ctx.num_struct_unions; - PyObject *o, *result = PyList_New(n1); - if (result == NULL) - return NULL; + PyObject *o, *lst[3] = {NULL, NULL, NULL}, *result = NULL; + + lst[0] = PyList_New(n1); + if (lst[0] == NULL) + goto error; + lst[1] = PyList_New(0); + if (lst[1] == NULL) + goto error; + lst[2] = PyList_New(0); + if (lst[2] == NULL) + goto error; for (i = 0; i < n1; i++) { o = PyText_FromString(self->types_builder.ctx.typenames[i].name); if (o == NULL) goto error; - PyList_SET_ITEM(result, i, o); + PyList_SET_ITEM(lst[0], i, o); } - for (look_for_union = 0; look_for_union < 2; look_for_union++) { - for (i = 0; i < n23; i++) { - const struct _cffi_struct_union_s *s; - int err; + for (i = 0; i < n23; i++) { + const struct _cffi_struct_union_s *s; + int err, index; - s = &self->types_builder.ctx.struct_unions[i]; - if (s->name[0] == '$') - continue; + s = &self->types_builder.ctx.struct_unions[i]; + if (s->name[0] == '$') + continue; - is_union = (s->flags & _CFFI_F_UNION) != 0; - if (is_union != look_for_union) - continue; - - o = PyText_FromFormat(is_union ? "union %s" : "struct %s", s->name); - if (o == NULL) - goto error; - err = PyList_Append(result, o); - Py_DECREF(o); - if (err < 0) - goto error; - } + o = PyText_FromString(s->name); + if (o == NULL) + goto error; + index = (s->flags & _CFFI_F_UNION) ? 2 : 1; + err = PyList_Append(lst[index], o); + Py_DECREF(o); + if (err < 0) + goto error; } + result = PyTuple_Pack(3, lst[0], lst[1], lst[2]); + /* fall-through */ + error: + Py_XDECREF(lst[2]); + Py_XDECREF(lst[1]); + Py_XDECREF(lst[0]); return result; - - error: - Py_DECREF(result); - return NULL; } PyDoc_STRVAR(ffi_memmove_doc, diff --git a/cffi/api.py b/cffi/api.py --- a/cffi/api.py +++ b/cffi/api.py @@ -722,10 +722,9 @@ "objects") def list_types(self): - """Build and return a list of all user type names known in this FFI - instance. Contains typedef names (sorted in alphabetical order), - followed by the 'struct xxx' (sorted) and finally the 'union xxx' - (sorted as well). + """Returns the user type names known to this FFI instance. + This returns a tuple containing three lists of names: + (typedef_names, names_of_structs, names_of_unions) """ typedefs = [] structs = [] @@ -734,13 +733,13 @@ if key.startswith('typedef '): typedefs.append(key[8:]) elif key.startswith('struct '): - structs.append(key) + structs.append(key[7:]) elif key.startswith('union '): - unions.append(key) + unions.append(key[6:]) typedefs.sort() structs.sort() unions.sort() - return typedefs + structs + unions + return (typedefs, structs, unions) def _load_backend_lib(backend, name, flags): diff --git a/doc/source/using.rst b/doc/source/using.rst --- a/doc/source/using.rst +++ b/doc/source/using.rst @@ -1228,10 +1228,10 @@ .. __: https://bitbucket.org/cffi/cffi/issues/233/ -**ffi.list_types()**: builds and returns a list of all user type names -known in this FFI instance. The list contains typedef names (sorted in -alphabetical order), followed by the 'struct xxx' (sorted) and finally -the 'union xxx' (sorted as well). *New in version 1.6.* +**ffi.list_types()**: Returns the user type names known to this FFI +instance. This returns a tuple containing three lists of names: +``(typedef_names, names_of_structs, names_of_unions)``. *New in +version 1.6.* .. _`Preparing and Distributing modules`: cdef.html#loading-libraries diff --git a/testing/cffi0/test_ffi_backend.py b/testing/cffi0/test_ffi_backend.py --- a/testing/cffi0/test_ffi_backend.py +++ b/testing/cffi0/test_ffi_backend.py @@ -427,31 +427,32 @@ def test_introspect_typedef(self): ffi = FFI() ffi.cdef("typedef int foo_t;") - assert ffi.list_types() == ['foo_t'] + assert ffi.list_types() == (['foo_t'], [], []) assert ffi.typeof('foo_t').kind == 'primitive' assert ffi.typeof('foo_t').cname == 'int' # ffi.cdef("typedef signed char a_t, c_t, g_t, b_t;") - assert ffi.list_types() == ['a_t', 'b_t', 'c_t', 'foo_t', 'g_t'] + assert ffi.list_types() == (['a_t', 'b_t', 'c_t', 'foo_t', 'g_t'], + [], []) def test_introspect_struct(self): ffi = FFI() ffi.cdef("struct foo_s { int a; };") - assert ffi.list_types() == ['struct foo_s'] + assert ffi.list_types() == ([], ['foo_s'], []) assert ffi.typeof('struct foo_s').kind == 'struct' assert ffi.typeof('struct foo_s').cname == 'struct foo_s' def test_introspect_union(self): ffi = FFI() ffi.cdef("union foo_s { int a; };") - assert ffi.list_types() == ['union foo_s'] + assert ffi.list_types() == ([], [], ['foo_s']) assert ffi.typeof('union foo_s').kind == 'union' assert ffi.typeof('union foo_s').cname == 'union foo_s' def test_introspect_struct_and_typedef(self): ffi = FFI() ffi.cdef("typedef struct { int a; } foo_t;") - assert ffi.list_types() == ['foo_t'] + assert ffi.list_types() == (['foo_t'], [], []) assert ffi.typeof('foo_t').kind == 'struct' assert ffi.typeof('foo_t').cname == 'foo_t' @@ -460,14 +461,14 @@ ffi2 = FFI() ffi1.cdef("typedef signed char schar_t; struct sint_t { int x; };") ffi2.include(ffi1) - assert ffi1.list_types() == sorted(ffi2.list_types()) == [ - 'schar_t', 'struct sint_t'] + assert ffi1.list_types() == ffi2.list_types() == ( + ['schar_t'], ['sint_t'], []) def test_introspect_order(self): ffi = FFI() ffi.cdef("union aaa { int a; }; typedef struct ccc { int a; } b;") ffi.cdef("union g { int a; }; typedef struct cc { int a; } bbb;") ffi.cdef("union aa { int a; }; typedef struct a { int a; } bb;") - assert ffi.list_types() == ['b', 'bb', 'bbb', - 'struct a', 'struct cc', 'struct ccc', - 'union aa', 'union aaa', 'union g'] + assert ffi.list_types() == (['b', 'bb', 'bbb'], + ['a', 'cc', 'ccc'], + ['aa', 'aaa', 'g']) diff --git a/testing/cffi1/test_recompiler.py b/testing/cffi1/test_recompiler.py --- a/testing/cffi1/test_recompiler.py +++ b/testing/cffi1/test_recompiler.py @@ -1796,7 +1796,7 @@ lib = verify(ffi, 'test_introspect_typedef', """ typedef int foo_t; """) - assert ffi.list_types() == ['foo_t'] + assert ffi.list_types() == (['foo_t'], [], []) assert ffi.typeof('foo_t').kind == 'primitive' assert ffi.typeof('foo_t').cname == 'int' @@ -1806,7 +1806,7 @@ lib = verify(ffi, 'test_introspect_typedef_multiple', """ typedef signed char a_t, c_t, g_t, b_t; """) - assert ffi.list_types() == ['a_t', 'b_t', 'c_t', 'g_t'] + assert ffi.list_types() == (['a_t', 'b_t', 'c_t', 'g_t'], [], []) def test_introspect_struct(): ffi = FFI() @@ -1814,7 +1814,7 @@ lib = verify(ffi, 'test_introspect_struct', """ struct foo_s { int a; }; """) - assert ffi.list_types() == ['struct foo_s'] + assert ffi.list_types() == ([], ['foo_s'], []) assert ffi.typeof('struct foo_s').kind == 'struct' assert ffi.typeof('struct foo_s').cname == 'struct foo_s' @@ -1824,7 +1824,7 @@ lib = verify(ffi, 'test_introspect_union', """ union foo_s { int a; }; """) - assert ffi.list_types() == ['union foo_s'] + assert ffi.list_types() == ([], [], ['foo_s']) assert ffi.typeof('union foo_s').kind == 'union' assert ffi.typeof('union foo_s').cname == 'union foo_s' @@ -1834,7 +1834,7 @@ lib = verify(ffi, 'test_introspect_struct_and_typedef', """ typedef struct { int a; } foo_t; """) - assert ffi.list_types() == ['foo_t'] + assert ffi.list_types() == (['foo_t'], [], []) assert ffi.typeof('foo_t').kind == 'struct' assert ffi.typeof('foo_t').cname == 'foo_t' @@ -1849,8 +1849,8 @@ ffi2.include(ffi1) verify(ffi1, "test_introspect_included_type_parent", SOURCE) verify(ffi2, "test_introspect_included_type", SOURCE) - assert ffi1.list_types() == ffi2.list_types() == [ - 'schar_t', 'struct sint_t'] + assert ffi1.list_types() == ffi2.list_types() == ( + ['schar_t'], ['sint_t'], []) def test_introspect_order(): ffi = FFI() @@ -1862,6 +1862,6 @@ union g { int a; }; typedef struct cc { int a; } bbb; union aa { int a; }; typedef struct a { int a; } bb; """) - assert ffi.list_types() == ['b', 'bb', 'bbb', - 'struct a', 'struct cc', 'struct ccc', - 'union aa', 'union aaa', 'union g'] + assert ffi.list_types() == (['b', 'bb', 'bbb'], + ['a', 'cc', 'ccc'], + ['aa', 'aaa', 'g']) From pypy.commits at gmail.com Wed Mar 30 13:10:24 2016 From: pypy.commits at gmail.com (mattip) Date: Wed, 30 Mar 2016 10:10:24 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-ext: backout dafdc7b5af5e Message-ID: <56fc0880.53371c0a.e56f2.ffff91e0@mx.google.com> Author: mattip Branch: cpyext-ext Changeset: r83438:92c8bbbed211 Date: 2016-03-29 23:11 +0300 http://bitbucket.org/pypy/pypy/changeset/92c8bbbed211/ Log: backout dafdc7b5af5e diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py --- a/pypy/module/cpyext/api.py +++ b/pypy/module/cpyext/api.py @@ -61,9 +61,8 @@ class CConfig: _compilation_info_ = ExternalCompilationInfo( include_dirs=include_dirs, - includes=['stdarg.h', 'structmember.h'], + includes=['Python.h', 'stdarg.h', 'structmember.h'], compile_extra=['-DPy_BUILD_CORE'], - pre_include_bits = ['#include "Python.h"'], ) class CConfig2: From pypy.commits at gmail.com Wed Mar 30 13:10:26 2016 From: pypy.commits at gmail.com (mattip) Date: Wed, 30 Mar 2016 10:10:26 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-ext: merge default into branch Message-ID: <56fc0882.12871c0a.162f5.ffffade7@mx.google.com> Author: mattip Branch: cpyext-ext Changeset: r83439:b707f5d98670 Date: 2016-03-30 20:07 +0300 http://bitbucket.org/pypy/pypy/changeset/b707f5d98670/ Log: merge default into branch diff too long, truncating to 2000 out of 9177 lines diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -46,7 +46,6 @@ except detect_cpu.ProcessorAutodetectError: pass - translation_modules = default_modules.copy() translation_modules.update([ "fcntl", "time", "select", "signal", "_rawffi", "zlib", "struct", "_md5", diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -27,3 +27,8 @@ .. branch: fix_transpose_for_list_v3 Allow arguments to transpose to be sequences + +.. branch: jit-leaner-frontend + +Improve the tracing speed in the frontend as well as heapcache by using a more compact representation +of traces \ No newline at end of file diff --git a/pypy/module/_multibytecodec/src/cjkcodecs/cjkcodecs.h b/pypy/module/_multibytecodec/src/cjkcodecs/cjkcodecs.h --- a/pypy/module/_multibytecodec/src/cjkcodecs/cjkcodecs.h +++ b/pypy/module/_multibytecodec/src/cjkcodecs/cjkcodecs.h @@ -10,6 +10,7 @@ #define _CJKCODECS_H_ #include "src/cjkcodecs/multibytecodec.h" +#include "src/cjkcodecs/fixnames.h" /* a unicode "undefined" codepoint */ diff --git a/pypy/module/_multibytecodec/src/cjkcodecs/fixnames.h b/pypy/module/_multibytecodec/src/cjkcodecs/fixnames.h new file mode 100644 --- /dev/null +++ b/pypy/module/_multibytecodec/src/cjkcodecs/fixnames.h @@ -0,0 +1,9 @@ + +/* this is only included from the .c files in this directory: rename + these pypymbc-prefixed names to locally define the CPython names */ +typedef pypymbc_ssize_t Py_ssize_t; +#define PY_SSIZE_T_MAX ((Py_ssize_t)(((size_t) -1) >> 1)) +#define Py_UNICODE_SIZE pypymbc_UNICODE_SIZE +typedef pypymbc_wchar_t Py_UNICODE; +typedef pypymbc_ucs4_t ucs4_t; +typedef pypymbc_ucs2_t ucs2_t, DBCHAR; diff --git a/pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.c b/pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.c --- a/pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.c +++ b/pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.c @@ -1,6 +1,7 @@ #include #include #include "src/cjkcodecs/multibytecodec.h" +#include "src/cjkcodecs/fixnames.h" struct pypy_cjk_dec_s *pypy_cjk_dec_new(const MultibyteCodec *codec) diff --git a/pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.h b/pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.h --- a/pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.h +++ b/pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.h @@ -9,31 +9,28 @@ #include #ifdef _WIN64 -typedef __int64 ssize_t +typedef __int64 pypymbc_ssize_t #elif defined(_WIN32) -typedef int ssize_t; +typedef int pypymbc_ssize_t; #else #include -#endif - -#ifndef Py_UNICODE_SIZE -#ifdef _WIN32 -#define Py_UNICODE_SIZE 2 -#else -#define Py_UNICODE_SIZE 4 -#endif -typedef wchar_t Py_UNICODE; -typedef ssize_t Py_ssize_t; -#define PY_SSIZE_T_MAX ((Py_ssize_t)(((size_t) -1) >> 1)) +typedef ssize_t pypymbc_ssize_t; #endif #ifdef _WIN32 -typedef unsigned int ucs4_t; -typedef unsigned short ucs2_t, DBCHAR; +#define pypymbc_UNICODE_SIZE 2 +#else +#define pypymbc_UNICODE_SIZE 4 +#endif +typedef wchar_t pypymbc_wchar_t; + +#ifdef _WIN32 +typedef unsigned int pypymbc_ucs4_t; +typedef unsigned short pypymbc_ucs2_t; #else #include -typedef uint32_t ucs4_t; -typedef uint16_t ucs2_t, DBCHAR; +typedef uint32_t pypymbc_ucs4_t; +typedef uint16_t pypymbc_ucs2_t; #endif @@ -42,28 +39,28 @@ void *p; int i; unsigned char c[8]; - ucs2_t u2[4]; - ucs4_t u4[2]; + pypymbc_ucs2_t u2[4]; + pypymbc_ucs4_t u4[2]; } MultibyteCodec_State; typedef int (*mbcodec_init)(const void *config); -typedef Py_ssize_t (*mbencode_func)(MultibyteCodec_State *state, +typedef pypymbc_ssize_t (*mbencode_func)(MultibyteCodec_State *state, const void *config, - const Py_UNICODE **inbuf, Py_ssize_t inleft, - unsigned char **outbuf, Py_ssize_t outleft, + const pypymbc_wchar_t **inbuf, pypymbc_ssize_t inleft, + unsigned char **outbuf, pypymbc_ssize_t outleft, int flags); typedef int (*mbencodeinit_func)(MultibyteCodec_State *state, const void *config); -typedef Py_ssize_t (*mbencodereset_func)(MultibyteCodec_State *state, +typedef pypymbc_ssize_t (*mbencodereset_func)(MultibyteCodec_State *state, const void *config, - unsigned char **outbuf, Py_ssize_t outleft); -typedef Py_ssize_t (*mbdecode_func)(MultibyteCodec_State *state, + unsigned char **outbuf, pypymbc_ssize_t outleft); +typedef pypymbc_ssize_t (*mbdecode_func)(MultibyteCodec_State *state, const void *config, - const unsigned char **inbuf, Py_ssize_t inleft, - Py_UNICODE **outbuf, Py_ssize_t outleft); + const unsigned char **inbuf, pypymbc_ssize_t inleft, + pypymbc_wchar_t **outbuf, pypymbc_ssize_t outleft); typedef int (*mbdecodeinit_func)(MultibyteCodec_State *state, const void *config); -typedef Py_ssize_t (*mbdecodereset_func)(MultibyteCodec_State *state, +typedef pypymbc_ssize_t (*mbdecodereset_func)(MultibyteCodec_State *state, const void *config); typedef struct MultibyteCodec_s { @@ -94,59 +91,59 @@ const MultibyteCodec *codec; MultibyteCodec_State state; const unsigned char *inbuf_start, *inbuf, *inbuf_end; - Py_UNICODE *outbuf_start, *outbuf, *outbuf_end; + pypymbc_wchar_t *outbuf_start, *outbuf, *outbuf_end; }; RPY_EXTERN struct pypy_cjk_dec_s *pypy_cjk_dec_new(const MultibyteCodec *codec); RPY_EXTERN -Py_ssize_t pypy_cjk_dec_init(struct pypy_cjk_dec_s *d, - char *inbuf, Py_ssize_t inlen); +pypymbc_ssize_t pypy_cjk_dec_init(struct pypy_cjk_dec_s *d, + char *inbuf, pypymbc_ssize_t inlen); RPY_EXTERN void pypy_cjk_dec_free(struct pypy_cjk_dec_s *); RPY_EXTERN -Py_ssize_t pypy_cjk_dec_chunk(struct pypy_cjk_dec_s *); +pypymbc_ssize_t pypy_cjk_dec_chunk(struct pypy_cjk_dec_s *); RPY_EXTERN -Py_UNICODE *pypy_cjk_dec_outbuf(struct pypy_cjk_dec_s *); +pypymbc_wchar_t *pypy_cjk_dec_outbuf(struct pypy_cjk_dec_s *); RPY_EXTERN -Py_ssize_t pypy_cjk_dec_outlen(struct pypy_cjk_dec_s *); +pypymbc_ssize_t pypy_cjk_dec_outlen(struct pypy_cjk_dec_s *); RPY_EXTERN -Py_ssize_t pypy_cjk_dec_inbuf_remaining(struct pypy_cjk_dec_s *d); +pypymbc_ssize_t pypy_cjk_dec_inbuf_remaining(struct pypy_cjk_dec_s *d); RPY_EXTERN -Py_ssize_t pypy_cjk_dec_inbuf_consumed(struct pypy_cjk_dec_s* d); +pypymbc_ssize_t pypy_cjk_dec_inbuf_consumed(struct pypy_cjk_dec_s* d); RPY_EXTERN -Py_ssize_t pypy_cjk_dec_replace_on_error(struct pypy_cjk_dec_s* d, - Py_UNICODE *, Py_ssize_t, Py_ssize_t); +pypymbc_ssize_t pypy_cjk_dec_replace_on_error(struct pypy_cjk_dec_s* d, + pypymbc_wchar_t *, pypymbc_ssize_t, pypymbc_ssize_t); struct pypy_cjk_enc_s { const MultibyteCodec *codec; MultibyteCodec_State state; - const Py_UNICODE *inbuf_start, *inbuf, *inbuf_end; + const pypymbc_wchar_t *inbuf_start, *inbuf, *inbuf_end; unsigned char *outbuf_start, *outbuf, *outbuf_end; }; RPY_EXTERN struct pypy_cjk_enc_s *pypy_cjk_enc_new(const MultibyteCodec *codec); RPY_EXTERN -Py_ssize_t pypy_cjk_enc_init(struct pypy_cjk_enc_s *d, - Py_UNICODE *inbuf, Py_ssize_t inlen); +pypymbc_ssize_t pypy_cjk_enc_init(struct pypy_cjk_enc_s *d, + pypymbc_wchar_t *inbuf, pypymbc_ssize_t inlen); RPY_EXTERN void pypy_cjk_enc_free(struct pypy_cjk_enc_s *); RPY_EXTERN -Py_ssize_t pypy_cjk_enc_chunk(struct pypy_cjk_enc_s *, Py_ssize_t); +pypymbc_ssize_t pypy_cjk_enc_chunk(struct pypy_cjk_enc_s *, pypymbc_ssize_t); RPY_EXTERN -Py_ssize_t pypy_cjk_enc_reset(struct pypy_cjk_enc_s *); +pypymbc_ssize_t pypy_cjk_enc_reset(struct pypy_cjk_enc_s *); RPY_EXTERN char *pypy_cjk_enc_outbuf(struct pypy_cjk_enc_s *); RPY_EXTERN -Py_ssize_t pypy_cjk_enc_outlen(struct pypy_cjk_enc_s *); +pypymbc_ssize_t pypy_cjk_enc_outlen(struct pypy_cjk_enc_s *); RPY_EXTERN -Py_ssize_t pypy_cjk_enc_inbuf_remaining(struct pypy_cjk_enc_s *d); +pypymbc_ssize_t pypy_cjk_enc_inbuf_remaining(struct pypy_cjk_enc_s *d); RPY_EXTERN -Py_ssize_t pypy_cjk_enc_inbuf_consumed(struct pypy_cjk_enc_s* d); +pypymbc_ssize_t pypy_cjk_enc_inbuf_consumed(struct pypy_cjk_enc_s* d); RPY_EXTERN -Py_ssize_t pypy_cjk_enc_replace_on_error(struct pypy_cjk_enc_s* d, - char *, Py_ssize_t, Py_ssize_t); +pypymbc_ssize_t pypy_cjk_enc_replace_on_error(struct pypy_cjk_enc_s* d, + char *, pypymbc_ssize_t, pypymbc_ssize_t); RPY_EXTERN const MultibyteCodec *pypy_cjk_enc_getcodec(struct pypy_cjk_enc_s *); @@ -191,5 +188,7 @@ DEFINE_CODEC(big5) DEFINE_CODEC(cp950) +#undef DEFINE_CODEC + #endif diff --git a/pypy/module/cpyext/test/test_sequence.py b/pypy/module/cpyext/test/test_sequence.py --- a/pypy/module/cpyext/test/test_sequence.py +++ b/pypy/module/cpyext/test/test_sequence.py @@ -101,8 +101,10 @@ self.raises(space, api, IndexError, api.PySequence_SetItem, l, 3, w_value) + t = api.PyTuple_New(1) + api.PyTuple_SetItem(t, 0, l) self.raises(space, api, TypeError, api.PySequence_SetItem, - api.PyTuple_New(1), 0, w_value) + t, 0, w_value) self.raises(space, api, TypeError, api.PySequence_SetItem, space.newdict(), 0, w_value) diff --git a/pypy/module/cpyext/test/test_tupleobject.py b/pypy/module/cpyext/test/test_tupleobject.py --- a/pypy/module/cpyext/test/test_tupleobject.py +++ b/pypy/module/cpyext/test/test_tupleobject.py @@ -5,6 +5,7 @@ from pypy.module.cpyext.test.test_api import BaseApiTest from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase from rpython.rtyper.lltypesystem import rffi, lltype +from rpython.rlib.debug import FatalError class TestTupleObject(BaseApiTest): @@ -18,29 +19,44 @@ #assert api.PyTuple_GET_SIZE(atuple) == 3 --- now a C macro raises(TypeError, api.PyTuple_Size(space.newlist([]))) api.PyErr_Clear() - + + def test_tuple_realize_refuses_nulls(self, space, api): + py_tuple = api.PyTuple_New(1) + py.test.raises(FatalError, from_ref, space, py_tuple) + def test_tuple_resize(self, space, api): w_42 = space.wrap(42) + w_43 = space.wrap(43) + w_44 = space.wrap(44) ar = lltype.malloc(PyObjectP.TO, 1, flavor='raw') py_tuple = api.PyTuple_New(3) # inside py_tuple is an array of "PyObject *" items which each hold # a reference rffi.cast(PyTupleObject, py_tuple).c_ob_item[0] = make_ref(space, w_42) + rffi.cast(PyTupleObject, py_tuple).c_ob_item[1] = make_ref(space, w_43) ar[0] = py_tuple api._PyTuple_Resize(ar, 2) w_tuple = from_ref(space, ar[0]) assert space.int_w(space.len(w_tuple)) == 2 assert space.int_w(space.getitem(w_tuple, space.wrap(0))) == 42 + assert space.int_w(space.getitem(w_tuple, space.wrap(1))) == 43 api.Py_DecRef(ar[0]) py_tuple = api.PyTuple_New(3) rffi.cast(PyTupleObject, py_tuple).c_ob_item[0] = make_ref(space, w_42) + rffi.cast(PyTupleObject, py_tuple).c_ob_item[1] = make_ref(space, w_43) + rffi.cast(PyTupleObject, py_tuple).c_ob_item[2] = make_ref(space, w_44) ar[0] = py_tuple api._PyTuple_Resize(ar, 10) + assert api.PyTuple_Size(ar[0]) == 10 + for i in range(3, 10): + rffi.cast(PyTupleObject, py_tuple).c_ob_item[i] = make_ref( + space, space.wrap(42 + i)) w_tuple = from_ref(space, ar[0]) assert space.int_w(space.len(w_tuple)) == 10 - assert space.int_w(space.getitem(w_tuple, space.wrap(0))) == 42 + for i in range(10): + assert space.int_w(space.getitem(w_tuple, space.wrap(i))) == 42 + i api.Py_DecRef(ar[0]) lltype.free(ar, flavor='raw') diff --git a/pypy/module/cpyext/tupleobject.py b/pypy/module/cpyext/tupleobject.py --- a/pypy/module/cpyext/tupleobject.py +++ b/pypy/module/cpyext/tupleobject.py @@ -1,5 +1,6 @@ from pypy.interpreter.error import OperationError from rpython.rtyper.lltypesystem import rffi, lltype +from rpython.rlib.debug import fatalerror_notb from pypy.module.cpyext.api import (cpython_api, Py_ssize_t, CANNOT_FAIL, build_type_checkers, PyObjectFields, cpython_struct, bootstrap_function) @@ -91,14 +92,22 @@ def tuple_realize(space, py_obj): """ Creates the tuple in the interpreter. The PyTupleObject must not - be modified after this call. + be modified after this call. We check that it does not contain + any NULLs at this point (which would correspond to half-broken + W_TupleObjects). """ py_tup = rffi.cast(PyTupleObject, py_obj) l = py_tup.c_ob_size p = py_tup.c_ob_item items_w = [None] * l for i in range(l): - items_w[i] = from_ref(space, p[i]) + w_item = from_ref(space, p[i]) + if w_item is None: + fatalerror_notb( + "Fatal error in cpyext, CPython compatibility layer: " + "converting a PyTupleObject into a W_TupleObject, " + "but found NULLs as items") + items_w[i] = w_item w_obj = space.newtuple(items_w) track_reference(space, py_obj, w_obj) return w_obj diff --git a/pypy/objspace/std/objectobject.py b/pypy/objspace/std/objectobject.py --- a/pypy/objspace/std/objectobject.py +++ b/pypy/objspace/std/objectobject.py @@ -110,7 +110,7 @@ def descr__init__(space, w_obj, __args__): # don't allow arguments unless __new__ is overridden w_type = space.type(w_obj) - w_parent_new, _ = w_type.lookup_where('__new__') + w_parent_new, _ = space.lookup_in_type_where(w_type, '__new__') if w_parent_new is space.w_object: try: __args__.fixedunpack(0) diff --git a/pypy/tool/gdb_pypy.py b/pypy/tool/gdb_pypy.py --- a/pypy/tool/gdb_pypy.py +++ b/pypy/tool/gdb_pypy.py @@ -288,9 +288,11 @@ RPyListPrinter.recursive = True try: itemlist = [] - for i in range(length): + for i in range(min(length, MAX_DISPLAY_LENGTH)): item = items[i] itemlist.append(str(item)) # may recurse here + if length > MAX_DISPLAY_LENGTH: + itemlist.append("...") str_items = ', '.join(itemlist) finally: RPyListPrinter.recursive = False diff --git a/rpython/config/translationoption.py b/rpython/config/translationoption.py --- a/rpython/config/translationoption.py +++ b/rpython/config/translationoption.py @@ -126,6 +126,9 @@ ChoiceOption("jit_profiler", "integrate profiler support into the JIT", ["off", "oprofile"], default="off"), + ChoiceOption("jit_opencoder_model", "the model limits the maximal length" + " of traces. Use big if you want to go bigger than " + "the default", ["big", "normal"], default="normal"), BoolOption("check_str_without_nul", "Forbid NUL chars in strings in some external function calls", default=False, cmdline=None), diff --git a/rpython/jit/backend/arm/assembler.py b/rpython/jit/backend/arm/assembler.py --- a/rpython/jit/backend/arm/assembler.py +++ b/rpython/jit/backend/arm/assembler.py @@ -939,9 +939,9 @@ op = operations[i] self.mc.mark_op(op) opnum = op.getopnum() - if op.has_no_side_effect() and op not in regalloc.longevity: + if rop.has_no_side_effect(opnum) and op not in regalloc.longevity: regalloc.possibly_free_vars_for_op(op) - elif not we_are_translated() and op.getopnum() == -127: + elif not we_are_translated() and op.getopnum() == rop.FORCE_SPILL: regalloc.prepare_force_spill(op, fcond) else: arglocs = regalloc_operations[opnum](regalloc, op, fcond) @@ -949,7 +949,7 @@ fcond = asm_operations[opnum](self, op, arglocs, regalloc, fcond) assert fcond is not None - if op.is_guard(): + if rop.is_guard(opnum): regalloc.possibly_free_vars(op.getfailargs()) if op.type != 'v': regalloc.possibly_free_var(op) diff --git a/rpython/jit/backend/arm/detect.py b/rpython/jit/backend/arm/detect.py --- a/rpython/jit/backend/arm/detect.py +++ b/rpython/jit/backend/arm/detect.py @@ -63,3 +63,44 @@ "falling back to", "ARMv%d" % n) debug_stop("jit-backend-arch") return n + + +# Once we can rely on the availability of glibc >= 2.16, replace this with: +# from rpython.rtyper.lltypesystem import lltype, rffi +# getauxval = rffi.llexternal("getauxval", [lltype.Unsigned], lltype.Unsigned) +def getauxval(type_, filename='/proc/self/auxv'): + fd = os.open(filename, os.O_RDONLY, 0644) + + buf_size = 2048 + struct_size = 8 # 2x uint32 + try: + buf = os.read(fd, buf_size) + finally: + os.close(fd) + + # decode chunks of 8 bytes (a_type, a_val), and + # return the a_val whose a_type corresponds to type_, + # or zero if not found. + i = 0 + while i <= buf_size - struct_size: + # We only support little-endian ARM + a_type = (ord(buf[i]) | + (ord(buf[i+1]) << 8) | + (ord(buf[i+2]) << 16) | + (ord(buf[i+3]) << 24)) + a_val = (ord(buf[i+4]) | + (ord(buf[i+5]) << 8) | + (ord(buf[i+6]) << 16) | + (ord(buf[i+7]) << 24)) + i += struct_size + if a_type == type_: + return a_val + + return 0 + + +def detect_neon(): + AT_HWCAP = 16 + HWCAP_NEON = 1 << 12 + hwcap = getauxval(AT_HWCAP) + return bool(hwcap & HWCAP_NEON) diff --git a/rpython/jit/backend/arm/opassembler.py b/rpython/jit/backend/arm/opassembler.py --- a/rpython/jit/backend/arm/opassembler.py +++ b/rpython/jit/backend/arm/opassembler.py @@ -1092,8 +1092,8 @@ self.mc.VCVT_int_to_float(res.value, r.svfp_ip.value) return fcond - # the following five instructions are only ARMv7; - # regalloc.py won't call them at all on ARMv6 + # the following five instructions are only ARMv7 with NEON; + # regalloc.py won't call them at all, in other cases emit_opx_llong_add = gen_emit_float_op('llong_add', 'VADD_i64') emit_opx_llong_sub = gen_emit_float_op('llong_sub', 'VSUB_i64') emit_opx_llong_and = gen_emit_float_op('llong_and', 'VAND_i64') diff --git a/rpython/jit/backend/arm/regalloc.py b/rpython/jit/backend/arm/regalloc.py --- a/rpython/jit/backend/arm/regalloc.py +++ b/rpython/jit/backend/arm/regalloc.py @@ -530,7 +530,7 @@ EffectInfo.OS_LLONG_AND, EffectInfo.OS_LLONG_OR, EffectInfo.OS_LLONG_XOR): - if self.cpu.cpuinfo.arch_version >= 7: + if self.cpu.cpuinfo.neon: args = self._prepare_llong_binop_xx(op, fcond) self.perform_extra(op, args, fcond) return diff --git a/rpython/jit/backend/arm/runner.py b/rpython/jit/backend/arm/runner.py --- a/rpython/jit/backend/arm/runner.py +++ b/rpython/jit/backend/arm/runner.py @@ -7,13 +7,14 @@ from rpython.rlib.jit_hooks import LOOP_RUN_CONTAINER from rpython.rtyper.lltypesystem import lltype, llmemory from rpython.jit.backend.arm.detect import detect_hardfloat -from rpython.jit.backend.arm.detect import detect_arch_version +from rpython.jit.backend.arm.detect import detect_arch_version, detect_neon jitframe.STATICSIZE = JITFRAME_FIXED_SIZE class CPUInfo(object): hf_abi = False arch_version = 6 + neon = False class AbstractARMCPU(AbstractLLCPU): @@ -48,6 +49,7 @@ def setup_once(self): self.cpuinfo.arch_version = detect_arch_version() self.cpuinfo.hf_abi = detect_hardfloat() + self.cpuinfo.neon = detect_neon() #self.codemap.setup() self.assembler.setup_once() diff --git a/rpython/jit/backend/arm/test/test_detect.py b/rpython/jit/backend/arm/test/test_detect.py --- a/rpython/jit/backend/arm/test/test_detect.py +++ b/rpython/jit/backend/arm/test/test_detect.py @@ -1,6 +1,6 @@ import py from rpython.tool.udir import udir -from rpython.jit.backend.arm.detect import detect_arch_version +from rpython.jit.backend.arm.detect import detect_arch_version, getauxval cpuinfo = "Processor : ARMv%d-compatible processor rev 7 (v6l)""" cpuinfo2 = """processor : 0 @@ -29,6 +29,19 @@ address sizes : 36 bits physical, 48 bits virtual power management: """ +# From a Marvell Armada 370/XP +auxv = ( + '\x10\x00\x00\x00\xd7\xa8\x1e\x00\x06\x00\x00\x00\x00\x10\x00\x00\x11\x00' + '\x00\x00d\x00\x00\x00\x03\x00\x00\x004\x00\x01\x00\x04\x00\x00\x00 \x00' + '\x00\x00\x05\x00\x00\x00\t\x00\x00\x00\x07\x00\x00\x00\x00\xe0\xf3\xb6' + '\x08\x00\x00\x00\x00\x00\x00\x00\t\x00\x00\x00t\xcf\x04\x00\x0b\x00\x00' + '\x000\x0c\x00\x00\x0c\x00\x00\x000\x0c\x00\x00\r\x00\x00\x000\x0c\x00\x00' + '\x0e\x00\x00\x000\x0c\x00\x00\x17\x00\x00\x00\x00\x00\x00\x00\x19\x00\x00' + '\x00\x8a\xf3\x87\xbe\x1a\x00\x00\x00\x00\x00\x00\x00\x1f\x00\x00\x00\xec' + '\xff\x87\xbe\x0f\x00\x00\x00\x9a\xf3\x87\xbe\x00\x00\x00\x00\x00\x00\x00' + '\x00' +) + def write_cpuinfo(info): filepath = udir.join('get_arch_version') @@ -46,3 +59,10 @@ py.test.raises(ValueError, 'detect_arch_version(write_cpuinfo(cpuinfo % 5))') assert detect_arch_version(write_cpuinfo(cpuinfo2)) == 6 + + +def test_getauxval_no_neon(): + path = udir.join('auxv') + path.write(auxv, 'wb') + AT_HWCAP = 16 + assert getauxval(AT_HWCAP, filename=str(path)) == 2009303 diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -455,7 +455,7 @@ if box is not frame.current_op: value = frame.env[box] else: - value = box.getvalue() # 0 or 0.0 or NULL + value = 0 # box.getvalue() # 0 or 0.0 or NULL else: value = None values.append(value) @@ -472,6 +472,13 @@ # ------------------------------------------------------------ + def setup_descrs(self): + all_descrs = [] + for k, v in self.descrs.iteritems(): + v.descr_index = len(all_descrs) + all_descrs.append(v) + return all_descrs + def calldescrof(self, FUNC, ARGS, RESULT, effect_info): key = ('call', getkind(RESULT), tuple([getkind(A) for A in ARGS]), diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py --- a/rpython/jit/backend/llsupport/assembler.py +++ b/rpython/jit/backend/llsupport/assembler.py @@ -331,7 +331,7 @@ counter = self._register_counter(tp, number, token) c_adr = ConstInt(rffi.cast(lltype.Signed, counter)) operations.append( - ResOperation(rop.INCREMENT_DEBUG_COUNTER, [c_adr], None)) + ResOperation(rop.INCREMENT_DEBUG_COUNTER, [c_adr])) def _register_counter(self, tp, number, token): # YYY very minor leak -- we need the counters to stay alive diff --git a/rpython/jit/backend/llsupport/descr.py b/rpython/jit/backend/llsupport/descr.py --- a/rpython/jit/backend/llsupport/descr.py +++ b/rpython/jit/backend/llsupport/descr.py @@ -21,6 +21,30 @@ self._cache_call = {} self._cache_interiorfield = {} + def setup_descrs(self): + all_descrs = [] + for k, v in self._cache_size.iteritems(): + v.descr_index = len(all_descrs) + all_descrs.append(v) + for k, v in self._cache_field.iteritems(): + for k1, v1 in v.iteritems(): + v1.descr_index = len(all_descrs) + all_descrs.append(v1) + for k, v in self._cache_array.iteritems(): + v.descr_index = len(all_descrs) + all_descrs.append(v) + for k, v in self._cache_arraylen.iteritems(): + v.descr_index = len(all_descrs) + all_descrs.append(v) + for k, v in self._cache_call.iteritems(): + v.descr_index = len(all_descrs) + all_descrs.append(v) + for k, v in self._cache_interiorfield.iteritems(): + v.descr_index = len(all_descrs) + all_descrs.append(v) + assert len(all_descrs) < 2**15 + return all_descrs + def init_size_descr(self, STRUCT, sizedescr): pass diff --git a/rpython/jit/backend/llsupport/llmodel.py b/rpython/jit/backend/llsupport/llmodel.py --- a/rpython/jit/backend/llsupport/llmodel.py +++ b/rpython/jit/backend/llsupport/llmodel.py @@ -316,6 +316,9 @@ return ll_frame return execute_token + def setup_descrs(self): + return self.gc_ll_descr.setup_descrs() + # ------------------- helpers and descriptions -------------------- @staticmethod diff --git a/rpython/jit/backend/llsupport/regalloc.py b/rpython/jit/backend/llsupport/regalloc.py --- a/rpython/jit/backend/llsupport/regalloc.py +++ b/rpython/jit/backend/llsupport/regalloc.py @@ -683,7 +683,7 @@ for i in range(len(operations)-1, -1, -1): op = operations[i] if op.type != 'v': - if op not in last_used and op.has_no_side_effect(): + if op not in last_used and rop.has_no_side_effect(op.opnum): continue opnum = op.getopnum() for j in range(op.numargs()): @@ -695,7 +695,7 @@ if opnum != rop.JUMP and opnum != rop.LABEL: if arg not in last_real_usage: last_real_usage[arg] = i - if op.is_guard(): + if rop.is_guard(op.opnum): for arg in op.getfailargs(): if arg is None: # hole continue @@ -732,14 +732,7 @@ return longevity, last_real_usage def is_comparison_or_ovf_op(opnum): - from rpython.jit.metainterp.resoperation import opclasses - cls = opclasses[opnum] - # hack hack: in theory they are instance method, but they don't use - # any instance field, we can use a fake object - class Fake(cls): - pass - op = Fake() - return op.is_comparison() or op.is_ovf() + return rop.is_comparison(opnum) or rop.is_ovf(opnum) def valid_addressing_size(size): return size == 1 or size == 2 or size == 4 or size == 8 diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py +++ b/rpython/jit/backend/llsupport/rewrite.py @@ -103,7 +103,7 @@ orig_op.set_forwarded(op) replaced = True op.setarg(i, arg) - if op.is_guard(): + if rop.is_guard(op.opnum): if not replaced: op = op.copy_and_change(op.getopnum()) orig_op.set_forwarded(op) @@ -212,7 +212,7 @@ # self._emit_mul_if_factor_offset_not_supported(v_length, scale, 0) # op.setarg(1, ConstInt(scale)) # op.setarg(2, v_length) - if op.is_getarrayitem() or \ + if rop.is_getarrayitem(opnum) or \ opnum in (rop.GETARRAYITEM_RAW_I, rop.GETARRAYITEM_RAW_F): self.handle_getarrayitem(op) @@ -324,13 +324,13 @@ if self.transform_to_gc_load(op): continue # ---------- turn NEWxxx into CALL_MALLOC_xxx ---------- - if op.is_malloc(): + if rop.is_malloc(op.opnum): self.handle_malloc_operation(op) continue - if (op.is_guard() or + if (rop.is_guard(op.opnum) or self.could_merge_with_next_guard(op, i, operations)): self.emit_pending_zeros() - elif op.can_malloc(): + elif rop.can_malloc(op.opnum): self.emitting_an_operation_that_can_collect() elif op.getopnum() == rop.LABEL: self.emitting_an_operation_that_can_collect() @@ -370,8 +370,8 @@ # return True in cases where the operation and the following guard # should likely remain together. Simplified version of # can_merge_with_next_guard() in llsupport/regalloc.py. - if not op.is_comparison(): - return op.is_ovf() # int_xxx_ovf() / guard_no_overflow() + if not rop.is_comparison(op.opnum): + return rop.is_ovf(op.opnum) # int_xxx_ovf() / guard_no_overflow() if i + 1 >= len(operations): return False next_op = operations[i + 1] @@ -400,7 +400,6 @@ # it's hard to test all cases). Rewrite it away. value = int(opnum == rop.GUARD_FALSE) op1 = ResOperation(rop.SAME_AS_I, [ConstInt(value)]) - op1.setint(value) self.emit_op(op1) lst = op.getfailargs()[:] lst[i] = op1 @@ -633,8 +632,7 @@ args = [frame, arglist[jd.index_of_virtualizable]] else: args = [frame] - call_asm = ResOperation(op.getopnum(), args, - op.getdescr()) + call_asm = ResOperation(op.getopnum(), args, descr=op.getdescr()) self.replace_op_with(self.get_box_replacement(op), call_asm) self.emit_op(call_asm) @@ -708,7 +706,7 @@ def _gen_call_malloc_gc(self, args, v_result, descr): """Generate a CALL_MALLOC_GC with the given args.""" self.emitting_an_operation_that_can_collect() - op = ResOperation(rop.CALL_MALLOC_GC, args, descr) + op = ResOperation(rop.CALL_MALLOC_GC, args, descr=descr) self.replace_op_with(v_result, op) self.emit_op(op) # In general, don't add v_result to write_barrier_applied: diff --git a/rpython/jit/backend/ppc/regalloc.py b/rpython/jit/backend/ppc/regalloc.py --- a/rpython/jit/backend/ppc/regalloc.py +++ b/rpython/jit/backend/ppc/regalloc.py @@ -286,7 +286,8 @@ self.assembler.mc.mark_op(op) self.rm.position = i self.fprm.position = i - if op.has_no_side_effect() and op not in self.longevity: + opnum = op.opnum + if rop.has_no_side_effect(opnum) and op not in self.longevity: i += 1 self.possibly_free_vars_for_op(op) continue @@ -298,8 +299,7 @@ else: self.fprm.temp_boxes.append(box) # - opnum = op.getopnum() - if not we_are_translated() and opnum == -127: + if not we_are_translated() and opnum == rop.FORCE_SPILL: self._consider_force_spill(op) else: arglocs = oplist[opnum](self, op) diff --git a/rpython/jit/backend/test/test_ll_random.py b/rpython/jit/backend/test/test_ll_random.py --- a/rpython/jit/backend/test/test_ll_random.py +++ b/rpython/jit/backend/test/test_ll_random.py @@ -2,6 +2,7 @@ from rpython.rtyper.lltypesystem import lltype, llmemory, rffi, rstr from rpython.rtyper import rclass from rpython.jit.backend.test import test_random +from rpython.jit.backend.test.test_random import getint, getref_base, getref from rpython.jit.metainterp.resoperation import ResOperation, rop, optypes from rpython.jit.metainterp.history import ConstInt, ConstPtr, getkind from rpython.jit.codewriter import heaptracker @@ -169,7 +170,7 @@ if length == 0: raise test_random.CannotProduceOperation v_index = r.choice(self.intvars) - if not (0 <= v_index.getint() < length): + if not (0 <= getint(v_index) < length): v_index = ConstInt(r.random_integer() % length) return v_index @@ -311,7 +312,7 @@ def field_descr(self, builder, r): v, A = builder.get_structptr_var(r, type=lltype.Array, array_of_structs=True) - array = v.getref(lltype.Ptr(A)) + array = getref(lltype.Ptr(A), v) v_index = builder.get_index(len(array), r) choice = [] for name in A.OF._names: @@ -344,7 +345,7 @@ w = ConstInt(r.random_integer()) else: w = r.choice(builder.intvars) - value = w.getint() + value = getint(w) if rffi.cast(lltype.Signed, rffi.cast(TYPE, value)) == value: break builder.do(self.opnum, [v, w], descr) @@ -357,7 +358,7 @@ w = ConstInt(r.random_integer()) else: w = r.choice(builder.intvars) - value = w.getint() + value = getint(w) if rffi.cast(lltype.Signed, rffi.cast(TYPE, value)) == value: break builder.do(self.opnum, [v, v_index, w], descr) @@ -389,7 +390,7 @@ class GetArrayItemOperation(ArrayOperation): def field_descr(self, builder, r): v, A = builder.get_arrayptr_var(r) - array = v.getref(lltype.Ptr(A)) + array = getref(lltype.Ptr(A), v) v_index = builder.get_index(len(array), r) descr = self.array_descr(builder, A) return v, A, v_index, descr @@ -411,7 +412,7 @@ w = ConstInt(r.random_integer()) else: w = r.choice(builder.intvars) - value = w.getint() + value = getint(w) if rffi.cast(lltype.Signed, rffi.cast(A.OF, value)) == value: break builder.do(self.opnum, [v, v_index, w], descr) @@ -455,7 +456,7 @@ v_ptr = builder.do(self.opnum, [v_length]) getattr(builder, self.builder_cache).append(v_ptr) # Initialize the string. Is there a better way to do this? - for i in range(v_length.getint()): + for i in range(getint(v_length)): v_index = ConstInt(i) v_char = ConstInt(r.random_integer() % self.max) builder.do(self.set_char, [v_ptr, v_index, v_char]) @@ -471,9 +472,9 @@ current = getattr(builder, self.builder_cache) if current and r.random() < .8: v_string = r.choice(current) - string = v_string.getref(self.ptr) + string = getref(self.ptr, v_string) else: - string = self.alloc(builder.get_index(500, r).getint()) + string = self.alloc(getint(builder.get_index(500, r))) v_string = ConstPtr(lltype.cast_opaque_ptr(llmemory.GCREF, string)) current.append(v_string) for i in range(len(string.chars)): @@ -484,7 +485,7 @@ class AbstractGetItemOperation(AbstractStringOperation): def produce_into(self, builder, r): v_string = self.get_string(builder, r) - v_index = builder.get_index(len(v_string.getref(self.ptr).chars), r) + v_index = builder.get_index(len(getref(self.ptr, v_string).chars), r) builder.do(self.opnum, [v_string, v_index]) class AbstractSetItemOperation(AbstractStringOperation): @@ -492,7 +493,7 @@ v_string = self.get_string(builder, r) if isinstance(v_string, ConstPtr): raise test_random.CannotProduceOperation # setitem(Const, ...) - v_index = builder.get_index(len(v_string.getref(self.ptr).chars), r) + v_index = builder.get_index(len(getref(self.ptr, v_string).chars), r) v_target = ConstInt(r.random_integer() % self.max) builder.do(self.opnum, [v_string, v_index, v_target]) @@ -505,15 +506,15 @@ def produce_into(self, builder, r): v_srcstring = self.get_string(builder, r) v_dststring = self.get_string(builder, r) - src = v_srcstring.getref(self.ptr) - dst = v_dststring.getref(self.ptr) + src = getref(self.ptr, v_srcstring) + dst = getref(self.ptr, v_dststring) if src == dst: # because it's not a raise test_random.CannotProduceOperation # memmove(), but memcpy() srclen = len(src.chars) dstlen = len(dst.chars) v_length = builder.get_index(min(srclen, dstlen), r) - v_srcstart = builder.get_index(srclen - v_length.getint() + 1, r) - v_dststart = builder.get_index(dstlen - v_length.getint() + 1, r) + v_srcstart = builder.get_index(srclen - getint(v_length) + 1, r) + v_dststart = builder.get_index(dstlen - getint(v_length) + 1, r) builder.do(self.opnum, [v_srcstring, v_dststring, v_srcstart, v_dststart, v_length]) @@ -585,7 +586,7 @@ """ % funcargs).compile() vtableptr = v._hints['vtable']._as_ptr() d = { - 'ptr': S.getref_base(), + 'ptr': getref_base(S), 'vtable' : vtableptr, 'LLException' : LLException, } diff --git a/rpython/jit/backend/test/test_random.py b/rpython/jit/backend/test/test_random.py --- a/rpython/jit/backend/test/test_random.py +++ b/rpython/jit/backend/test/test_random.py @@ -11,11 +11,9 @@ from rpython.jit.metainterp.executor import _execute_arglist, wrap_constant from rpython.jit.metainterp.resoperation import opname from rpython.jit.codewriter import longlong -from rpython.rtyper.lltypesystem import lltype, rstr +from rpython.rtyper.lltypesystem import lltype, llmemory, rstr from rpython.rtyper import rclass -class PleaseRewriteMe(Exception): - pass class DummyLoop(object): def __init__(self, subops): @@ -27,6 +25,41 @@ def execute_raised(self, exc, constant=False): self._got_exc = exc + +def getint(v): + if isinstance(v, (ConstInt, InputArgInt)): + return v.getint() + else: + return v._example_int + +def getfloatstorage(v): + if isinstance(v, (ConstFloat, InputArgFloat)): + return v.getfloatstorage() + else: + return v._example_float + +def getfloat(v): + return longlong.getrealfloat(getfloatstorage(v)) + +def getref_base(v): + if isinstance(v, (ConstPtr, InputArgRef)): + return v.getref_base() + else: + return v._example_ref + +def getref(PTR, v): + return lltype.cast_opaque_ptr(PTR, getref_base(v)) + +def constbox(v): + if v.type == INT: + return ConstInt(getint(v)) + if v.type == FLOAT: + return ConstFloat(getfloatstorage(v)) + if v.type == REF: + return ConstPtr(getref_base(v)) + assert 0, v.type + + class OperationBuilder(object): def __init__(self, cpu, loop, vars): self.cpu = cpu @@ -57,11 +90,21 @@ def do(self, opnum, argboxes, descr=None): self.fakemetainterp._got_exc = None op = ResOperation(opnum, argboxes, descr) + argboxes = map(constbox, argboxes) result = _execute_arglist(self.cpu, self.fakemetainterp, opnum, argboxes, descr) if result is not None: - c_result = wrap_constant(result) - op.copy_value_from(c_result) + if lltype.typeOf(result) == lltype.Signed: + op._example_int = result + elif isinstance(result, bool): + op._example_int = int(result) + elif lltype.typeOf(result) == longlong.FLOATSTORAGE: + op._example_float = result + elif isinstance(result, float): + op._example_float = longlong.getfloatstorage(result) + else: + assert lltype.typeOf(result) == llmemory.GCREF + op._example_ref = result self.loop.operations.append(op) return op @@ -101,7 +144,7 @@ if v in names: args.append(names[v]) elif isinstance(v, ConstPtr): - assert not v.getref_base() # otherwise should be in the names + assert not getref_base(v) # otherwise should be in the names args.append('ConstPtr(lltype.nullptr(llmemory.GCREF.TO))') elif isinstance(v, ConstFloat): args.append('ConstFloat(longlong.getfloatstorage(%r))' @@ -198,10 +241,10 @@ # def writevar(v, nameprefix, init=''): if nameprefix == 'const_ptr': - if not v.getref_base(): + if not getref_base(v): return 'lltype.nullptr(llmemory.GCREF.TO)' - TYPE = v.getref_base()._obj.ORIGTYPE - cont = lltype.cast_opaque_ptr(TYPE, v.getref_base()) + TYPE = getref_base(v)._obj.ORIGTYPE + cont = lltype.cast_opaque_ptr(TYPE, getref_base(v)) if TYPE.TO._is_varsize(): if isinstance(TYPE.TO, lltype.GcStruct): lgt = len(cont.chars) @@ -252,9 +295,9 @@ for i, v in enumerate(self.loop.inputargs): assert not isinstance(v, Const) if v.type == FLOAT: - vals.append("longlong.getfloatstorage(%r)" % v.getfloat()) + vals.append("longlong.getfloatstorage(%r)" % getfloat(v)) else: - vals.append("%r" % v.getint()) + vals.append("%r" % getint(v)) print >>s, ' loop_args = [%s]' % ", ".join(vals) print >>s, ' frame = cpu.execute_token(looptoken, *loop_args)' if self.should_fail_by is None: @@ -264,10 +307,10 @@ for i, v in enumerate(fail_args): if v.type == FLOAT: print >>s, (' assert longlong.getrealfloat(' - 'cpu.get_float_value(frame, %d)) == %r' % (i, v.getfloatstorage())) + 'cpu.get_float_value(frame, %d)) == %r' % (i, getfloatstorage(v))) else: print >>s, (' assert cpu.get_int_value(frame, %d) == %d' - % (i, v.getint())) + % (i, getint(v))) self.names = names s.flush() @@ -295,7 +338,7 @@ builder.intvars.append(v_result) boolres = self.boolres if boolres == 'sometimes': - boolres = v_result.getint() in [0, 1] + boolres = getint(v_result) in [0, 1] if boolres: builder.boolvars.append(v_result) elif v_result.type == FLOAT: @@ -346,10 +389,10 @@ v_second = ConstInt((value & self.and_mask) | self.or_mask) else: v = r.choice(builder.intvars) - v_value = v.getint() + v_value = getint(v) if (v_value & self.and_mask) != v_value: v = builder.do(rop.INT_AND, [v, ConstInt(self.and_mask)]) - v_value = v.getint() + v_value = getint(v) if (v_value | self.or_mask) != v_value: v = builder.do(rop.INT_OR, [v, ConstInt(self.or_mask)]) v_second = v @@ -395,9 +438,9 @@ v_second = ConstFloat(r.random_float_storage()) else: v_second = r.choice(builder.floatvars) - if abs(v_first.getfloat()) > 1E100 or abs(v_second.getfloat()) > 1E100: + if abs(getfloat(v_first)) > 1E100 or abs(getfloat(v_second)) > 1E100: raise CannotProduceOperation # avoid infinities - if abs(v_second.getfloat()) < 1E-100: + if abs(getfloat(v_second)) < 1E-100: raise CannotProduceOperation # e.g. division by zero error self.put(builder, [v_first, v_second]) @@ -432,7 +475,7 @@ if not builder.floatvars: raise CannotProduceOperation box = r.choice(builder.floatvars) - if not (-sys.maxint-1 <= box.getfloat() <= sys.maxint): + if not (-sys.maxint-1 <= getfloat(box) <= sys.maxint): raise CannotProduceOperation # would give an overflow self.put(builder, [box]) @@ -440,8 +483,8 @@ def gen_guard(self, builder, r): v = builder.get_bool_var(r) op = ResOperation(self.opnum, [v]) - passing = ((self.opnum == rop.GUARD_TRUE and v.getint()) or - (self.opnum == rop.GUARD_FALSE and not v.getint())) + passing = ((self.opnum == rop.GUARD_TRUE and getint(v)) or + (self.opnum == rop.GUARD_FALSE and not getint(v))) return op, passing def produce_into(self, builder, r): @@ -459,8 +502,8 @@ raise CannotProduceOperation box = r.choice(builder.ptrvars)[0] op = ResOperation(self.opnum, [box]) - passing = ((self.opnum == rop.GUARD_NONNULL and box.getref_base()) or - (self.opnum == rop.GUARD_ISNULL and not box.getref_base())) + passing = ((self.opnum == rop.GUARD_NONNULL and getref_base(box)) or + (self.opnum == rop.GUARD_ISNULL and not getref_base(box))) return op, passing class GuardValueOperation(GuardOperation): @@ -470,14 +513,14 @@ other = r.choice(builder.intvars) else: if r.random() < 0.75: - value = v.getint() + value = getint(v) elif r.random() < 0.5: - value = v.getint() ^ 1 + value = getint(v) ^ 1 else: value = r.random_integer() other = ConstInt(value) op = ResOperation(self.opnum, [v, other]) - return op, (v.getint() == other.getint()) + return op, (getint(v) == getint(other)) # ____________________________________________________________ @@ -675,7 +718,7 @@ assert not hasattr(loop, '_targettoken') for i in range(position): op = loop.operations[i] - if (not op.has_no_side_effect() + if (not rop.has_no_side_effect(op.opnum) or op.type not in (INT, FLOAT)): position = i break # cannot move the LABEL later @@ -728,9 +771,9 @@ self.expected = {} for v in endvars: if v.type == INT: - self.expected[v] = v.getint() + self.expected[v] = getint(v) elif v.type == FLOAT: - self.expected[v] = v.getfloatstorage() + self.expected[v] = getfloatstorage(v) else: assert 0, v.type @@ -742,7 +785,7 @@ args = [] for box in self.startvars: if box not in self.loop.inputargs: - box = box.constbox() + box = constbox(box) args.append(box) self.cpu.compile_loop(self.loop.inputargs, [ResOperation(rop.JUMP, args, @@ -760,7 +803,7 @@ def clear_state(self): for v, S, fields in self.prebuilt_ptr_consts: - container = v.getref_base()._obj.container + container = getref_base(v)._obj.container for name, value in fields.items(): if isinstance(name, str): setattr(container, name, value) @@ -781,9 +824,9 @@ arguments = [] for box in self.loop.inputargs: if box.type == INT: - arguments.append(box.getint()) + arguments.append(getint(box)) elif box.type == FLOAT: - arguments.append(box.getfloatstorage()) + arguments.append(getfloatstorage(box)) else: assert 0, box.type deadframe = cpu.execute_token(self.runjitcelltoken(), *arguments) @@ -795,7 +838,7 @@ if v not in self.expected: assert v.getopnum() == rop.SAME_AS_I # special case assert isinstance(v.getarg(0), ConstInt) - self.expected[v] = v.getarg(0).getint() + self.expected[v] = getint(v.getarg(0)) if v.type == FLOAT: value = cpu.get_float_value(deadframe, i) else: @@ -807,7 +850,7 @@ ) exc = cpu.grab_exc_value(deadframe) if (self.guard_op is not None and - self.guard_op.is_guard_exception()): + rop.is_guard_exception(self.guard_op.getopnum())): if self.guard_op.getopnum() == rop.GUARD_NO_EXCEPTION: do_assert(exc, "grab_exc_value() should not be %r" % (exc,)) @@ -840,7 +883,7 @@ # generate the branch: a sequence of operations that ends in a FINISH subloop = DummyLoop([]) self.subloops.append(subloop) # keep around for debugging - if guard_op.is_guard_exception(): + if rop.is_guard_exception(guard_op.getopnum()): subloop.operations.append(exc_handling(guard_op)) bridge_builder = self.builder.fork(self.builder.cpu, subloop, op.getfailargs()[:]) @@ -876,9 +919,9 @@ args = [] for x in subset: if x.type == INT: - args.append(InputArgInt(x.getint())) + args.append(InputArgInt(getint(x))) elif x.type == FLOAT: - args.append(InputArgFloat(x.getfloatstorage())) + args.append(InputArgFloat(getfloatstorage(x))) else: assert 0, x.type rl = RandomLoop(self.builder.cpu, self.builder.fork, diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -358,11 +358,11 @@ assert self.assembler.mc._frame_size == DEFAULT_FRAME_BYTES self.rm.position = i self.xrm.position = i - if op.has_no_side_effect() and op not in self.longevity: + if rop.has_no_side_effect(op.opnum) and op not in self.longevity: i += 1 self.possibly_free_vars_for_op(op) continue - if not we_are_translated() and op.getopnum() == -127: + if not we_are_translated() and op.getopnum() == rop.FORCE_SPILL: self._consider_force_spill(op) else: oplist[op.getopnum()](self, op) diff --git a/rpython/jit/backend/zarch/regalloc.py b/rpython/jit/backend/zarch/regalloc.py --- a/rpython/jit/backend/zarch/regalloc.py +++ b/rpython/jit/backend/zarch/regalloc.py @@ -476,7 +476,8 @@ self.assembler.mc.mark_op(op) self.rm.position = i self.fprm.position = i - if op.has_no_side_effect() and op not in self.longevity: + opnum = op.getopnum() + if rop.has_no_side_effect(opnum) and op not in self.longevity: i += 1 self.possibly_free_vars_for_op(op) continue @@ -488,8 +489,7 @@ else: self.fprm.temp_boxes.append(box) # - opnum = op.getopnum() - if not we_are_translated() and opnum == -127: + if not we_are_translated() and opnum == rop.FORCE_SPILL: self._consider_force_spill(op) else: arglocs = prepare_oplist[opnum](self, op) diff --git a/rpython/jit/metainterp/blackhole.py b/rpython/jit/metainterp/blackhole.py --- a/rpython/jit/metainterp/blackhole.py +++ b/rpython/jit/metainterp/blackhole.py @@ -1585,7 +1585,6 @@ def _done_with_this_frame(self): # rare case: we only get there if the blackhole interps all returned # normally (in general we get a ContinueRunningNormally exception). - sd = self.builder.metainterp_sd kind = self._return_type if kind == 'v': raise jitexc.DoneWithThisFrameVoid() diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -27,12 +27,11 @@ class CompileData(object): memo = None + log_noopt = True def forget_optimization_info(self): - for arg in self.start_label.getarglist(): + for arg in self.trace.inputargs: arg.set_forwarded(None) - for op in self.operations: - op.set_forwarded(None) class LoopCompileData(CompileData): """ An object that accumulates all of the necessary info for @@ -40,15 +39,13 @@ This is the case of label() ops label() """ - def __init__(self, start_label, end_label, operations, - call_pure_results=None, enable_opts=None): - self.start_label = start_label - self.end_label = end_label + def __init__(self, trace, runtime_boxes, call_pure_results=None, + enable_opts=None): self.enable_opts = enable_opts - assert start_label.getopnum() == rop.LABEL - assert end_label.getopnum() == rop.LABEL - self.operations = operations + self.trace = trace self.call_pure_results = call_pure_results + assert runtime_boxes is not None + self.runtime_boxes = runtime_boxes def optimize(self, metainterp_sd, jitdriver_sd, optimizations, unroll): from rpython.jit.metainterp.optimizeopt.unroll import (UnrollOptimizer, @@ -56,23 +53,21 @@ if unroll: opt = UnrollOptimizer(metainterp_sd, jitdriver_sd, optimizations) - return opt.optimize_preamble(self.start_label, self.end_label, - self.operations, + return opt.optimize_preamble(self.trace, + self.runtime_boxes, self.call_pure_results, self.box_names_memo) else: opt = Optimizer(metainterp_sd, jitdriver_sd, optimizations) - return opt.propagate_all_forward(self.start_label.getarglist(), - self.operations, self.call_pure_results) + return opt.propagate_all_forward(self.trace, self.call_pure_results) class SimpleCompileData(CompileData): """ This represents label() ops jump with no extra info associated with the label """ - def __init__(self, start_label, operations, call_pure_results=None, + def __init__(self, trace, call_pure_results=None, enable_opts=None): - self.start_label = start_label - self.operations = operations + self.trace = trace self.call_pure_results = call_pure_results self.enable_opts = enable_opts @@ -81,17 +76,17 @@ #assert not unroll opt = Optimizer(metainterp_sd, jitdriver_sd, optimizations) - return opt.propagate_all_forward(self.start_label.getarglist(), - self.operations, self.call_pure_results) + return opt.propagate_all_forward(self.trace.get_iter(), + self.call_pure_results) class BridgeCompileData(CompileData): """ This represents ops() with a jump at the end that goes to some loop, we need to deal with virtual state and inlining of short preamble """ - def __init__(self, start_label, operations, call_pure_results=None, + def __init__(self, trace, runtime_boxes, call_pure_results=None, enable_opts=None, inline_short_preamble=False): - self.start_label = start_label - self.operations = operations + self.trace = trace + self.runtime_boxes = runtime_boxes self.call_pure_results = call_pure_results self.enable_opts = enable_opts self.inline_short_preamble = inline_short_preamble @@ -100,7 +95,7 @@ from rpython.jit.metainterp.optimizeopt.unroll import UnrollOptimizer opt = UnrollOptimizer(metainterp_sd, jitdriver_sd, optimizations) - return opt.optimize_bridge(self.start_label, self.operations, + return opt.optimize_bridge(self.trace, self.runtime_boxes, self.call_pure_results, self.inline_short_preamble, self.box_names_memo) @@ -109,12 +104,13 @@ """ This represents label() ops jump with extra info that's from the run of LoopCompileData. Jump goes to the same label """ - def __init__(self, start_label, end_jump, operations, state, + log_noopt = False + + def __init__(self, trace, celltoken, state, call_pure_results=None, enable_opts=None, inline_short_preamble=True): - self.start_label = start_label - self.end_jump = end_jump - self.operations = operations + self.trace = trace + self.celltoken = celltoken self.enable_opts = enable_opts self.state = state self.call_pure_results = call_pure_results @@ -125,9 +121,8 @@ assert unroll # we should not be here if it's disabled opt = UnrollOptimizer(metainterp_sd, jitdriver_sd, optimizations) - return opt.optimize_peeled_loop(self.start_label, self.end_jump, - self.operations, self.state, self.call_pure_results, - self.inline_short_preamble) + return opt.optimize_peeled_loop(self.trace, self.celltoken, self.state, + self.call_pure_results, self.inline_short_preamble) def show_procedures(metainterp_sd, procedure=None, error=None): # debugging @@ -208,23 +203,21 @@ # ____________________________________________________________ -def compile_simple_loop(metainterp, greenkey, start, inputargs, ops, jumpargs, - enable_opts): +def compile_simple_loop(metainterp, greenkey, trace, runtime_args, enable_opts, + cut_at): from rpython.jit.metainterp.optimizeopt import optimize_trace jitdriver_sd = metainterp.jitdriver_sd metainterp_sd = metainterp.staticdata jitcell_token = make_jitcell_token(jitdriver_sd) - label = ResOperation(rop.LABEL, inputargs[:], descr=jitcell_token) - jump_op = ResOperation(rop.JUMP, jumpargs[:], descr=jitcell_token) call_pure_results = metainterp.call_pure_results - data = SimpleCompileData(label, ops + [jump_op], - call_pure_results=call_pure_results, - enable_opts=enable_opts) + data = SimpleCompileData(trace, call_pure_results=call_pure_results, + enable_opts=enable_opts) try: loop_info, ops = optimize_trace(metainterp_sd, jitdriver_sd, data, metainterp.box_names_memo) except InvalidLoop: + trace.cut_at(cut_at) return None loop = create_empty_loop(metainterp) loop.original_jitcell_token = jitcell_token @@ -241,7 +234,7 @@ loop.check_consistency() jitcell_token.target_tokens = [target_token] send_loop_to_backend(greenkey, jitdriver_sd, metainterp_sd, loop, "loop", - inputargs, metainterp.box_names_memo) + runtime_args, metainterp.box_names_memo) record_loop_or_bridge(metainterp_sd, loop) return target_token @@ -255,6 +248,7 @@ metainterp_sd = metainterp.staticdata jitdriver_sd = metainterp.jitdriver_sd history = metainterp.history + trace = history.trace warmstate = jitdriver_sd.warmstate enable_opts = jitdriver_sd.warmstate.enable_opts @@ -264,16 +258,16 @@ enable_opts = enable_opts.copy() del enable_opts['unroll'] - ops = history.operations[start:] + jitcell_token = make_jitcell_token(jitdriver_sd) + cut_at = history.get_trace_position() + history.record(rop.JUMP, jumpargs, None, descr=jitcell_token) + if start != (0, 0, 0): + trace = trace.cut_trace_from(start, inputargs) if 'unroll' not in enable_opts or not metainterp.cpu.supports_guard_gc_type: - return compile_simple_loop(metainterp, greenkey, start, inputargs, ops, - jumpargs, enable_opts) - jitcell_token = make_jitcell_token(jitdriver_sd) - label = ResOperation(rop.LABEL, inputargs, - descr=TargetToken(jitcell_token)) - end_label = ResOperation(rop.LABEL, jumpargs, descr=jitcell_token) + return compile_simple_loop(metainterp, greenkey, trace, jumpargs, + enable_opts, cut_at) call_pure_results = metainterp.call_pure_results - preamble_data = LoopCompileData(label, end_label, ops, + preamble_data = LoopCompileData(trace, jumpargs, call_pure_results=call_pure_results, enable_opts=enable_opts) try: @@ -281,17 +275,15 @@ preamble_data, metainterp.box_names_memo) except InvalidLoop: + history.cut(cut_at) return None metainterp_sd = metainterp.staticdata jitdriver_sd = metainterp.jitdriver_sd - end_label = ResOperation(rop.LABEL, inputargs, - descr=jitcell_token) - jump_op = ResOperation(rop.JUMP, jumpargs, descr=jitcell_token) start_descr = TargetToken(jitcell_token, original_jitcell_token=jitcell_token) jitcell_token.target_tokens = [start_descr] - loop_data = UnrolledLoopData(end_label, jump_op, ops, start_state, + loop_data = UnrolledLoopData(trace, jitcell_token, start_state, call_pure_results=call_pure_results, enable_opts=enable_opts) try: @@ -299,11 +291,12 @@ loop_data, metainterp.box_names_memo) except InvalidLoop: + history.cut(cut_at) return None if ((warmstate.vec and jitdriver_sd.vec) or warmstate.vec_all): from rpython.jit.metainterp.optimizeopt.vector import optimize_vector - loop_info, loop_ops = optimize_vector(metainterp_sd, + loop_info, loop_ops = optimize_vector(trace, metainterp_sd, jitdriver_sd, warmstate, loop_info, loop_ops, jitcell_token) @@ -342,22 +335,20 @@ to the first operation. """ from rpython.jit.metainterp.optimizeopt import optimize_trace - from rpython.jit.metainterp.optimizeopt.optimizer import BasicLoopInfo - history = metainterp.history + trace = metainterp.history.trace.cut_trace_from(start, inputargs) metainterp_sd = metainterp.staticdata jitdriver_sd = metainterp.jitdriver_sd + history = metainterp.history loop_jitcell_token = metainterp.get_procedure_token(greenkey) assert loop_jitcell_token - end_label = ResOperation(rop.LABEL, inputargs[:], - descr=loop_jitcell_token) - jump_op = ResOperation(rop.JUMP, jumpargs[:], descr=loop_jitcell_token) + cut = history.get_trace_position() + history.record(rop.JUMP, jumpargs[:], None, descr=loop_jitcell_token) enable_opts = jitdriver_sd.warmstate.enable_opts - ops = history.operations[start:] call_pure_results = metainterp.call_pure_results - loop_data = UnrolledLoopData(end_label, jump_op, ops, start_state, + loop_data = UnrolledLoopData(trace, loop_jitcell_token, start_state, call_pure_results=call_pure_results, enable_opts=enable_opts) try: @@ -366,8 +357,9 @@ metainterp.box_names_memo) except InvalidLoop: # Fall back on jumping directly to preamble - jump_op = ResOperation(rop.JUMP, inputargs[:], descr=loop_jitcell_token) - loop_data = UnrolledLoopData(end_label, jump_op, [jump_op], start_state, + history.cut(cut) + history.record(rop.JUMP, jumpargs[:], None, descr=loop_jitcell_token) + loop_data = UnrolledLoopData(trace, loop_jitcell_token, start_state, call_pure_results=call_pure_results, enable_opts=enable_opts, inline_short_preamble=False) @@ -376,9 +368,13 @@ loop_data, metainterp.box_names_memo) except InvalidLoop: + history.cut(cut) return None - label_token = loop_info.label_op.getdescr() + label_op = loop_info.label_op + if label_op is None: + assert False, "unreachable code" # hint for some strange tests + label_token = label_op.getdescr() assert isinstance(label_token, TargetToken) if label_token.short_preamble: metainterp_sd.logger_ops.log_short_preamble([], @@ -445,13 +441,13 @@ box = inputargs[i] opnum = OpHelpers.getfield_for_descr(descr) emit_op(extra_ops, - ResOperation(opnum, [vable_box], descr)) + ResOperation(opnum, [vable_box], descr=descr)) box.set_forwarded(extra_ops[-1]) i += 1 arrayindex = 0 for descr in vinfo.array_field_descrs: arraylen = vinfo.get_array_length(vable, arrayindex) - arrayop = ResOperation(rop.GETFIELD_GC_R, [vable_box], descr) + arrayop = ResOperation(rop.GETFIELD_GC_R, [vable_box], descr=descr) emit_op(extra_ops, arrayop) arraydescr = vinfo.array_descrs[arrayindex] assert i + arraylen <= len(inputargs) @@ -1017,9 +1013,9 @@ metainterp_sd.stats.add_jitcell_token(jitcell_token) -def compile_trace(metainterp, resumekey): +def compile_trace(metainterp, resumekey, runtime_boxes): """Try to compile a new bridge leading from the beginning of the history - to some existing place. + to some existging place. """ from rpython.jit.metainterp.optimizeopt import optimize_trace @@ -1037,20 +1033,19 @@ else: inline_short_preamble = True inputargs = metainterp.history.inputargs[:] - operations = metainterp.history.operations - label = ResOperation(rop.LABEL, inputargs) + trace = metainterp.history.trace jitdriver_sd = metainterp.jitdriver_sd enable_opts = jitdriver_sd.warmstate.enable_opts call_pure_results = metainterp.call_pure_results - if operations[-1].getopnum() == rop.JUMP: - data = BridgeCompileData(label, operations[:], + if metainterp.history.ends_with_jump: + data = BridgeCompileData(trace, runtime_boxes, call_pure_results=call_pure_results, enable_opts=enable_opts, inline_short_preamble=inline_short_preamble) else: - data = SimpleCompileData(label, operations[:], + data = SimpleCompileData(trace, call_pure_results=call_pure_results, enable_opts=enable_opts) try: diff --git a/rpython/jit/metainterp/executor.py b/rpython/jit/metainterp/executor.py --- a/rpython/jit/metainterp/executor.py +++ b/rpython/jit/metainterp/executor.py @@ -9,7 +9,7 @@ from rpython.jit.metainterp.history import INT, REF, FLOAT, VOID, AbstractDescr from rpython.jit.metainterp.history import ConstInt, ConstFloat, ConstPtr from rpython.jit.metainterp import resoperation -from rpython.jit.metainterp.resoperation import rop +from rpython.jit.metainterp.resoperation import rop, opname from rpython.jit.metainterp.blackhole import BlackholeInterpreter, NULL from rpython.jit.codewriter import longlong @@ -314,7 +314,8 @@ def _make_execute_list(): execute_by_num_args = {} - for key, value in rop.__dict__.items(): + for key in opname.values(): + value = getattr(rop, key) if not key.startswith('_'): if (rop._FINAL_FIRST <= value <= rop._FINAL_LAST or rop._GUARD_FIRST <= value <= rop._GUARD_LAST): @@ -384,6 +385,11 @@ rop.CALL_MALLOC_NURSERY_VARSIZE_FRAME, rop.NURSERY_PTR_INCREMENT, rop.LABEL, + rop.ESCAPE_I, + rop.ESCAPE_N, + rop.ESCAPE_R, + rop.ESCAPE_F, + rop.FORCE_SPILL, rop.SAVE_EXC_CLASS, rop.SAVE_EXCEPTION, rop.RESTORE_EXCEPTION, diff --git a/rpython/jit/metainterp/graphpage.py b/rpython/jit/metainterp/graphpage.py --- a/rpython/jit/metainterp/graphpage.py +++ b/rpython/jit/metainterp/graphpage.py @@ -170,7 +170,8 @@ while True: op = operations[opindex] op_repr = op.repr(self.memo, graytext=True) - if op.getopnum() == rop.DEBUG_MERGE_POINT: + if (op.getopnum() == rop.DEBUG_MERGE_POINT and + self.metainterp_sd is not None): jd_sd = self.metainterp_sd.jitdrivers_sd[op.getarg(0).getint()] if jd_sd._get_printable_location_ptr: s = jd_sd.warmstate.get_location_str(op.getarglist()[3:]) diff --git a/rpython/jit/metainterp/heapcache.py b/rpython/jit/metainterp/heapcache.py --- a/rpython/jit/metainterp/heapcache.py +++ b/rpython/jit/metainterp/heapcache.py @@ -1,33 +1,59 @@ -from rpython.jit.metainterp.history import ConstInt +from rpython.jit.metainterp.history import Const, ConstInt +from rpython.jit.metainterp.history import FrontendOp, RefFrontendOp from rpython.jit.metainterp.resoperation import rop, OpHelpers +from rpython.jit.metainterp.executor import constant_from_op +from rpython.rlib.rarithmetic import r_uint32, r_uint +from rpython.rlib.objectmodel import always_inline -class HeapCacheValue(object): - def __init__(self, box): - self.box = box - self.likely_virtual = False - self.reset_keep_likely_virtual() +""" A big note: we don't do heap caches on Consts, because it used +to be done with the identity of the Const instance. This gives very wonky +results at best, so we decided to not do it at all. Can be fixed with +interning of Consts (already done on trace anyway) +""" - def reset_keep_likely_virtual(self): - self.known_class = False - self.known_nullity = False - # did we see the allocation during tracing? - self.seen_allocation = False - self.is_unescaped = False - self.nonstandard_virtualizable = False - self.length = None - self.dependencies = None +# RefFrontendOp._heapc_flags: +HF_LIKELY_VIRTUAL = 0x01 +HF_KNOWN_CLASS = 0x02 +HF_KNOWN_NULLITY = 0x04 +HF_SEEN_ALLOCATION = 0x08 # did we see the allocation during tracing? +HF_IS_UNESCAPED = 0x10 +HF_NONSTD_VABLE = 0x20 - def __repr__(self): - return 'HeapCacheValue(%s)' % (self.box, ) +_HF_VERSION_INC = 0x40 # must be last +_HF_VERSION_MAX = r_uint(2 ** 32 - _HF_VERSION_INC) + + at always_inline +def add_flags(ref_frontend_op, flags): + f = ref_frontend_op._get_heapc_flags() + f |= r_uint(flags) + ref_frontend_op._set_heapc_flags(f) + + at always_inline +def remove_flags(ref_frontend_op, flags): + f = ref_frontend_op._get_heapc_flags() + f &= r_uint(~flags) + ref_frontend_op._set_heapc_flags(f) + + at always_inline +def test_flags(ref_frontend_op, flags): + f = ref_frontend_op._get_heapc_flags() + return bool(f & r_uint(flags)) + +def maybe_replace_with_const(box): + if not isinstance(box, Const) and box.is_replaced_with_const(): + return constant_from_op(box) + else: + return box class CacheEntry(object): - def __init__(self): - # both are {from_value: to_value} dicts + def __init__(self, heapcache): + # both are {from_ref_box: to_field_box} dicts # the first is for boxes where we did not see the allocation, the # second for anything else. the reason that distinction makes sense is # because if we saw the allocation, we know it cannot alias with # anything else where we saw the allocation. + self.heapcache = heapcache self.cache_anything = {} self.cache_seen_allocation = {} @@ -36,112 +62,137 @@ self.cache_seen_allocation.clear() self.cache_anything.clear() - def _getdict(self, value): - if value.seen_allocation: + def _seen_alloc(self, ref_box): + if not isinstance(ref_box, RefFrontendOp): + return False + return self.heapcache._check_flag(ref_box, HF_SEEN_ALLOCATION) + + def _getdict(self, seen_alloc): + if seen_alloc: return self.cache_seen_allocation else: return self.cache_anything - def do_write_with_aliasing(self, value, fieldvalue): - self._clear_cache_on_write(value.seen_allocation) - self._getdict(value)[value] = fieldvalue + def do_write_with_aliasing(self, ref_box, fieldbox): + seen_alloc = self._seen_alloc(ref_box) + self._clear_cache_on_write(seen_alloc) + self._getdict(seen_alloc)[ref_box] = fieldbox - def read(self, value): - return self._getdict(value).get(value, None) + def read(self, ref_box): + dict = self._getdict(self._seen_alloc(ref_box)) + try: + res_box = dict[ref_box] + except KeyError: + return None + return maybe_replace_with_const(res_box) - def read_now_known(self, value, fieldvalue): - self._getdict(value)[value] = fieldvalue + def read_now_known(self, ref_box, fieldbox): + self._getdict(self._seen_alloc(ref_box))[ref_box] = fieldbox def invalidate_unescaped(self): self._invalidate_unescaped(self.cache_anything) self._invalidate_unescaped(self.cache_seen_allocation) def _invalidate_unescaped(self, d): - for value in d.keys(): - if not value.is_unescaped: - del d[value] + for ref_box in d.keys(): + if not self.heapcache.is_unescaped(ref_box): + del d[ref_box] class FieldUpdater(object): - def __init__(self, heapcache, value, cache, fieldvalue): - self.heapcache = heapcache - self.value = value + def __init__(self, ref_box, cache, fieldbox): + self.ref_box = ref_box self.cache = cache - if fieldvalue is not None: - self.currfieldbox = fieldvalue.box - else: - self.currfieldbox = None + self.currfieldbox = fieldbox # <= read directly from pyjitpl.py def getfield_now_known(self, fieldbox): - fieldvalue = self.heapcache.getvalue(fieldbox) - self.cache.read_now_known(self.value, fieldvalue) + self.cache.read_now_known(self.ref_box, fieldbox) def setfield(self, fieldbox): - fieldvalue = self.heapcache.getvalue(fieldbox) - self.cache.do_write_with_aliasing(self.value, fieldvalue) + self.cache.do_write_with_aliasing(self.ref_box, fieldbox) + +class DummyFieldUpdater(FieldUpdater): + def __init__(self): + self.currfieldbox = None + + def getfield_now_known(self, fieldbox): + pass + + def setfield(self, fieldbox): + pass + +dummy_field_updater = DummyFieldUpdater() class HeapCache(object): def __init__(self): + # Works with flags stored on RefFrontendOp._heapc_flags. + # There are two ways to do a global resetting of these flags: + # reset() and reset_keep_likely_virtual(). The basic idea is + # to use a version number in each RefFrontendOp, and in order + # to reset the flags globally, we increment the global version + # number in this class. Then when we read '_heapc_flags' we + # also check if the associated version number is up-to-date + # or not. More precisely, we have two global version numbers + # here: 'head_version' and 'likely_virtual_version'. Normally + # we use 'head_version'. For is_likely_virtual() though, we + # use the other, older version number. + self.head_version = r_uint(0) + self.likely_virtual_version = r_uint(0) self.reset() def reset(self): - # maps boxes to values - self.values = {} - # store the boxes that contain newly allocated objects, this maps the - # boxes to a bool, the bool indicates whether or not the object has - # escaped the trace or not (True means the box never escaped, False - # means it did escape), its presences in the mapping shows that it was - # allocated inside the trace - #if trace_branch: - #self.new_boxes = {} - # pass - #else: - #for box in self.new_boxes: - # self.new_boxes[box] = False - # pass - #if reset_virtuals: - # self.likely_virtuals = {} # only for jit.isvirtual() - # Tracks which boxes should be marked as escaped when the key box - # escapes. - #self.dependencies = {} - + # Global reset of all flags. Update both version numbers so + # that any access to '_heapc_flags' will be marked as outdated. + assert self.head_version < _HF_VERSION_MAX + self.head_version += _HF_VERSION_INC + self.likely_virtual_version = self.head_version + # # heap cache # maps descrs to CacheEntry self.heap_cache = {} # heap array cache - # maps descrs to {index: {from_value: to_value}} dicts + # maps descrs to {index: CacheEntry} dicts self.heap_array_cache = {} def reset_keep_likely_virtuals(self): - for value in self.values.itervalues(): - value.reset_keep_likely_virtual() + # Update only 'head_version', but 'likely_virtual_version' remains + # at its older value. + assert self.head_version < _HF_VERSION_MAX + self.head_version += _HF_VERSION_INC self.heap_cache = {} self.heap_array_cache = {} - def getvalue(self, box, create=True): - value = self.values.get(box, None) - if not value and create: - value = self.values[box] = HeapCacheValue(box) - return value + @always_inline + def test_head_version(self, ref_frontend_op): + return ref_frontend_op._get_heapc_flags() >= self.head_version - def getvalues(self, boxes): - return [self.getvalue(box) for box in boxes] + @always_inline + def test_likely_virtual_version(self, ref_frontend_op): + return ref_frontend_op._get_heapc_flags() >= self.likely_virtual_version + + def update_version(self, ref_frontend_op): + """Ensure the version of 'ref_frontend_op' is current. If not, + it will update 'ref_frontend_op' (removing most flags currently set). + """ + if not self.test_head_version(ref_frontend_op): + f = self.head_version + if (self.test_likely_virtual_version(ref_frontend_op) and + test_flags(ref_frontend_op, HF_LIKELY_VIRTUAL)): + f |= HF_LIKELY_VIRTUAL + ref_frontend_op._set_heapc_flags(f) + ref_frontend_op._heapc_deps = None def invalidate_caches(self, opnum, descr, argboxes): self.mark_escaped(opnum, descr, argboxes) self.clear_caches(opnum, descr, argboxes) def _escape_from_write(self, box, fieldbox): - value = self.getvalue(box, create=False) - fieldvalue = self.getvalue(fieldbox, create=False) - if (value is not None and value.is_unescaped and - fieldvalue is not None and fieldvalue.is_unescaped): - if value.dependencies is None: - value.dependencies = [] - value.dependencies.append(fieldvalue) - elif fieldvalue is not None: - self._escape(fieldvalue) + if self.is_unescaped(box) and self.is_unescaped(fieldbox): + deps = self._get_deps(box) + deps.append(fieldbox) + elif fieldbox is not None: + self._escape_box(fieldbox) def mark_escaped(self, opnum, descr, argboxes): if opnum == rop.SETFIELD_GC: @@ -176,19 +227,20 @@ self._escape_box(box) def _escape_box(self, box): - value = self.getvalue(box, create=False) - if not value: - return - self._escape(value) - - def _escape(self, value): - value.is_unescaped = False - value.likely_virtual = False - deps = value.dependencies - value.dependencies = None - if deps is not None: - for dep in deps: - self._escape(dep) + if isinstance(box, RefFrontendOp): + remove_flags(box, HF_LIKELY_VIRTUAL | HF_IS_UNESCAPED) + deps = box._heapc_deps + if deps is not None: + if not self.test_head_version(box): + box._heapc_deps = None + else: + # 'deps[0]' is abused to store the array length, keep it + if deps[0] is None: + box._heapc_deps = None + else: + box._heapc_deps = [deps[0]] + for i in range(1, len(deps)): + self._escape_box(deps[i]) def clear_caches(self, opnum, descr, argboxes): if (opnum == rop.SETFIELD_GC or @@ -241,7 +293,8 @@ self.reset_keep_likely_virtuals() def _clear_caches_arraycopy(self, opnum, desrc, argboxes, effectinfo): - seen_allocation_of_target = self.getvalue(argboxes[2]).seen_allocation + seen_allocation_of_target = self._check_flag( + argboxes[2], HF_SEEN_ALLOCATION) if ( isinstance(argboxes[3], ConstInt) and isinstance(argboxes[4], ConstInt) and @@ -285,74 +338,82 @@ return self.reset_keep_likely_virtuals() + def _get_deps(self, box): + if not isinstance(box, RefFrontendOp): + return None + self.update_version(box) + if box._heapc_deps is None: + box._heapc_deps = [None] + return box._heapc_deps + + def _check_flag(self, box, flag): + return (isinstance(box, RefFrontendOp) and + self.test_head_version(box) and + test_flags(box, flag)) + + def _set_flag(self, box, flag): + assert isinstance(box, RefFrontendOp) + self.update_version(box) + add_flags(box, flag) + def is_class_known(self, box): From pypy.commits at gmail.com Wed Mar 30 13:10:29 2016 From: pypy.commits at gmail.com (mattip) Date: Wed, 30 Mar 2016 10:10:29 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-ext: test for PyString_Concat with unicode, passes with -A, we return NULL Message-ID: <56fc0885.6718c20a.6cb4b.ffffad69@mx.google.com> Author: mattip Branch: cpyext-ext Changeset: r83440:250e674e08c8 Date: 2016-03-30 20:08 +0300 http://bitbucket.org/pypy/pypy/changeset/250e674e08c8/ Log: test for PyString_Concat with unicode, passes with -A, we return NULL diff --git a/pypy/module/cpyext/test/test_bytesobject.py b/pypy/module/cpyext/test/test_bytesobject.py --- a/pypy/module/cpyext/test/test_bytesobject.py +++ b/pypy/module/cpyext/test/test_bytesobject.py @@ -1,3 +1,4 @@ +# encoding: utf-8 from rpython.rtyper.lltypesystem import rffi, lltype from pypy.module.cpyext.test.test_api import BaseApiTest from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase @@ -130,15 +131,29 @@ s = module.getstring() assert s == 'test' - def test_py_string_as_string(self): + def test_manipulations(self): module = self.import_extension('foo', [ ("string_as_string", "METH_VARARGS", ''' return PyString_FromStringAndSize(PyString_AsString( PyTuple_GetItem(args, 0)), 4); ''' - )]) + ), + ("concat", "METH_VARARGS", + """ + PyObject ** v; + PyObject * left = PyTuple_GetItem(args, 0); + v = &left; + PyString_Concat(v, PyTuple_GetItem(args, 1)); + return *v; + """)]) assert module.string_as_string("huheduwe") == "huhe" + ret = module.concat('abc', 'def') + assert ret == 'abcdef' + ret = module.concat('abc', u'def') + assert not isinstance(ret, str) + assert isinstance(ret, unicode) + assert ret == 'abcdef' def test_py_string_as_string_None(self): module = self.import_extension('foo', [ From pypy.commits at gmail.com Wed Mar 30 13:10:31 2016 From: pypy.commits at gmail.com (mattip) Date: Wed, 30 Mar 2016 10:10:31 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-ext: cannot raise NotImplemented on a CANNOT_FAIL function that returns a pointer Message-ID: <56fc0887.84c9c20a.f9baa.ffffa7cd@mx.google.com> Author: mattip Branch: cpyext-ext Changeset: r83441:f52a0d372df2 Date: 2016-03-30 20:09 +0300 http://bitbucket.org/pypy/pypy/changeset/f52a0d372df2/ Log: cannot raise NotImplemented on a CANNOT_FAIL function that returns a pointer diff --git a/pypy/module/cpyext/memoryobject.py b/pypy/module/cpyext/memoryobject.py --- a/pypy/module/cpyext/memoryobject.py +++ b/pypy/module/cpyext/memoryobject.py @@ -1,4 +1,5 @@ -from pypy.module.cpyext.api import cpython_api, Py_buffer, build_type_checkers +from pypy.module.cpyext.api import (cpython_api, Py_buffer, CANNOT_FAIL, + build_type_checkers) from pypy.module.cpyext.pyobject import PyObject from rpython.rtyper.lltypesystem import lltype @@ -13,11 +14,27 @@ # return the obj field of the Py_buffer created by PyMemoryView_GET_BUFFER raise NotImplementedError - at cpython_api([PyObject], lltype.Ptr(Py_buffer)) -def PyMemoryView_GET_BUFFER(space, obj): + at cpython_api([PyObject], lltype.Ptr(Py_buffer), error=CANNOT_FAIL) +def PyMemoryView_GET_BUFFER(space, w_obj): """Return a pointer to the buffer-info structure wrapped by the given object. The object must be a memoryview instance; this macro doesn't check its type, you must do it yourself or you will risk crashes.""" - raise NotImplementedError + view = lltype.malloc(Py_buffer, flavor='raw', zero=True) + # TODO - fill in fields + ''' + view.c_buf = buf + view.c_len = length + view.c_obj = obj + Py_IncRef(space, obj) + view.c_itemsize = 1 + rffi.setintfield(view, 'c_readonly', readonly) + rffi.setintfield(view, 'c_ndim', 0) + view.c_format = lltype.nullptr(rffi.CCHARP.TO) + view.c_shape = lltype.nullptr(Py_ssize_tP.TO) + view.c_strides = lltype.nullptr(Py_ssize_tP.TO) + view.c_suboffsets = lltype.nullptr(Py_ssize_tP.TO) + view.c_internal = lltype.nullptr(rffi.VOIDP.TO) + ''' + return view From pypy.commits at gmail.com Wed Mar 30 16:57:33 2016 From: pypy.commits at gmail.com (mattip) Date: Wed, 30 Mar 2016 13:57:33 -0700 (PDT) Subject: [pypy-commit] pypy win32-lib-name: close branch to be merged Message-ID: <56fc3dbd.e6ebc20a.8bea2.fffff969@mx.google.com> Author: mattip Branch: win32-lib-name Changeset: r83444:8be9640a38c1 Date: 2016-03-30 23:54 +0300 http://bitbucket.org/pypy/pypy/changeset/8be9640a38c1/ Log: close branch to be merged From pypy.commits at gmail.com Wed Mar 30 16:57:31 2016 From: pypy.commits at gmail.com (mattip) Date: Wed, 30 Mar 2016 13:57:31 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-ext: fix PyString_Concat Message-ID: <56fc3dbb.933f1c0a.452a8.fffffa07@mx.google.com> Author: mattip Branch: cpyext-ext Changeset: r83443:06a3e1333572 Date: 2016-03-30 23:53 +0300 http://bitbucket.org/pypy/pypy/changeset/06a3e1333572/ Log: fix PyString_Concat diff --git a/pypy/module/cpyext/bytesobject.py b/pypy/module/cpyext/bytesobject.py --- a/pypy/module/cpyext/bytesobject.py +++ b/pypy/module/cpyext/bytesobject.py @@ -6,7 +6,7 @@ from pypy.module.cpyext.pyerrors import PyErr_BadArgument from pypy.module.cpyext.pyobject import ( PyObject, PyObjectP, Py_DecRef, make_ref, from_ref, track_reference, - make_typedescr, get_typedescr, as_pyobj) + make_typedescr, get_typedescr, as_pyobj, Py_IncRef) ## ## Implementation of PyStringObject @@ -244,15 +244,16 @@ if not ref[0]: return - if w_newpart is None or not PyString_Check(space, ref[0]) or \ - not PyString_Check(space, w_newpart): + if w_newpart is None or not PyString_Check(space, ref[0]) or not \ + (space.isinstance_w(w_newpart, space.w_str) or + space.isinstance_w(w_newpart, space.w_unicode)): Py_DecRef(space, ref[0]) ref[0] = lltype.nullptr(PyObject.TO) return w_str = from_ref(space, ref[0]) w_newstr = space.add(w_str, w_newpart) - Py_DecRef(space, ref[0]) ref[0] = make_ref(space, w_newstr) + Py_IncRef(space, ref[0]) @cpython_api([PyObjectP, PyObject], lltype.Void) def PyString_ConcatAndDel(space, ref, newpart): From pypy.commits at gmail.com Wed Mar 30 16:57:35 2016 From: pypy.commits at gmail.com (mattip) Date: Wed, 30 Mar 2016 13:57:35 -0700 (PDT) Subject: [pypy-commit] pypy default: merge branch which fixes translation when cwd != pypy/goal Message-ID: <56fc3dbf.4a811c0a.ba8c2.3043@mx.google.com> Author: mattip Branch: Changeset: r83445:cfbb442ae368 Date: 2016-03-30 23:56 +0300 http://bitbucket.org/pypy/pypy/changeset/cfbb442ae368/ Log: merge branch which fixes translation when cwd != pypy/goal diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -31,4 +31,7 @@ .. branch: jit-leaner-frontend Improve the tracing speed in the frontend as well as heapcache by using a more compact representation -of traces \ No newline at end of file +of traces + +.. branch: win32-lib-name + diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -240,8 +240,9 @@ "when --shared is on (it is by default). " "See issue #1971.") if sys.platform == 'win32': - config.translation.libname = '..\\..\\libs\\python27.lib' - thisdir.join('..', '..', 'libs').ensure(dir=1) + libdir = thisdir.join('..', '..', 'libs') + libdir.ensure(dir=1) + config.translation.libname = str(libdir.join('python27.lib')) if config.translation.thread: config.objspace.usemodules.thread = True diff --git a/rpython/translator/driver.py b/rpython/translator/driver.py --- a/rpython/translator/driver.py +++ b/rpython/translator/driver.py @@ -490,10 +490,12 @@ # for pypy, the import library is renamed and moved to # libs/python27.lib, according to the pragma in pyconfig.h libname = self.config.translation.libname - libname = libname or soname.new(ext='lib').basename - libname = str(newsoname.dirpath().join(libname)) - shutil.copyfile(str(soname.new(ext='lib')), libname) - self.log.info("copied: %s" % (libname,)) + oldlibname = soname.new(ext='lib') + if not libname: + libname = oldlibname.basename + libname = str(newsoname.dirpath().join(libname)) + shutil.copyfile(str(oldlibname), libname) + self.log.info("copied: %s to %s" % (oldlibname, libname,)) # the pdb file goes in the same place as pypy(w).exe ext_to_copy = ['pdb',] for ext in ext_to_copy: From pypy.commits at gmail.com Wed Mar 30 16:57:29 2016 From: pypy.commits at gmail.com (mattip) Date: Wed, 30 Mar 2016 13:57:29 -0700 (PDT) Subject: [pypy-commit] pypy cpyext-ext: fix tests for -A Message-ID: <56fc3db9.cf0b1c0a.7ca46.2f1b@mx.google.com> Author: mattip Branch: cpyext-ext Changeset: r83442:425af52a45be Date: 2016-03-30 23:52 +0300 http://bitbucket.org/pypy/pypy/changeset/425af52a45be/ Log: fix tests for -A diff --git a/pypy/module/cpyext/test/test_bytesobject.py b/pypy/module/cpyext/test/test_bytesobject.py --- a/pypy/module/cpyext/test/test_bytesobject.py +++ b/pypy/module/cpyext/test/test_bytesobject.py @@ -33,12 +33,14 @@ } #ifdef PYPY_VERSION expected_size = sizeof(void*)*7; + #elif defined Py_DEBUG + expected_size = 53; #else expected_size = 37; #endif if(s->ob_type->tp_basicsize != expected_size) { - printf("tp_basicsize==%d\\n", s->ob_type->tp_basicsize); + printf("tp_basicsize==%ld\\n", s->ob_type->tp_basicsize); result = 0; } Py_DECREF(s); diff --git a/pypy/module/cpyext/test/test_object.py b/pypy/module/cpyext/test/test_object.py --- a/pypy/module/cpyext/test/test_object.py +++ b/pypy/module/cpyext/test/test_object.py @@ -241,13 +241,14 @@ copy = PyObject_REALLOC(orig, 15); if (copy == NULL) Py_RETURN_NONE; - ret = PyString_FromString(copy, 12); + ret = PyString_FromStringAndSize(copy, 12); + if (copy != orig) + PyObject_Free(copy); PyObject_Free(orig); - PyObject_Free(copy); return ret; """)]) x = module.realloctest() - assert x == 'hello world' + assert x == 'hello world\x00' def test_TypeCheck(self): module = self.import_extension('foo', [ @@ -430,7 +431,7 @@ PyBuffer_Release(&buf); Py_RETURN_NONE; """)]) - raises(ValueError, module.fillinfo) + raises((BufferError, ValueError), module.fillinfo) class AppTestPyBuffer_Release(AppTestCpythonExtensionBase): From pypy.commits at gmail.com Thu Mar 31 04:06:39 2016 From: pypy.commits at gmail.com (fijal) Date: Thu, 31 Mar 2016 01:06:39 -0700 (PDT) Subject: [pypy-commit] pypy default: Merge branch that removes escaping logic in the executioncontext. Message-ID: <56fcda8f.47afc20a.4465b.ffff9046@mx.google.com> Author: fijal Branch: Changeset: r83448:80063f19e933 Date: 2016-03-31 10:05 +0200 http://bitbucket.org/pypy/pypy/changeset/80063f19e933/ Log: Merge branch that removes escaping logic in the executioncontext. Since introduction of jitframes, we *can* reconstruct the virtual state much later in the program execution, after the C stack is gone. diff --git a/pypy/interpreter/error.py b/pypy/interpreter/error.py --- a/pypy/interpreter/error.py +++ b/pypy/interpreter/error.py @@ -278,17 +278,9 @@ def get_traceback(self): """Calling this marks the PyTraceback as escaped, i.e. it becomes - accessible and inspectable by app-level Python code. For the JIT. - Note that this has no effect if there are already several traceback - frames recorded, because in this case they are already marked as - escaping by executioncontext.leave() being called with - got_exception=True. + accessible and inspectable by app-level Python code. """ - from pypy.interpreter.pytraceback import PyTraceback - tb = self._application_traceback - if tb is not None and isinstance(tb, PyTraceback): - tb.frame.mark_as_escaped() - return tb + return self._application_traceback def set_traceback(self, traceback): """Set the current traceback. It should either be a traceback diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py --- a/pypy/interpreter/executioncontext.py +++ b/pypy/interpreter/executioncontext.py @@ -74,15 +74,6 @@ finally: frame_vref = self.topframeref self.topframeref = frame.f_backref - if frame.escaped or got_exception: - # if this frame escaped to applevel, we must ensure that also - # f_back does - f_back = frame.f_backref() - if f_back: - f_back.mark_as_escaped() - # force the frame (from the JIT point of view), so that it can - # be accessed also later - frame_vref() jit.virtual_ref_finish(frame_vref, frame) # ________________________________________________________________ diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -65,7 +65,6 @@ last_exception = None f_backref = jit.vref_None - escaped = False # see mark_as_escaped() debugdata = None pycode = None # code object executed by that frame @@ -152,15 +151,6 @@ assert isinstance(cell, Cell) return cell - def mark_as_escaped(self): - """ - Must be called on frames that are exposed to applevel, e.g. by - sys._getframe(). This ensures that the virtualref holding the frame - is properly forced by ec.leave(), and thus the frame will be still - accessible even after the corresponding C stack died. - """ - self.escaped = True - def append_block(self, block): assert block.previous is self.lastblock self.lastblock = block diff --git a/pypy/module/sys/vm.py b/pypy/module/sys/vm.py --- a/pypy/module/sys/vm.py +++ b/pypy/module/sys/vm.py @@ -37,7 +37,6 @@ raise OperationError(space.w_ValueError, space.wrap("call stack is not deep enough")) if depth == 0: - f.mark_as_escaped() return space.wrap(f) depth -= 1 f = ec.getnextframe_nohidden(f) diff --git a/rpython/rlib/jit.py b/rpython/rlib/jit.py --- a/rpython/rlib/jit.py +++ b/rpython/rlib/jit.py @@ -475,8 +475,6 @@ def __call__(self): if self._state == 'non-forced': self._state = 'forced' - elif self._state == 'invalid': - raise InvalidVirtualRef return self._x @property @@ -487,7 +485,7 @@ def _finish(self): if self._state == 'non-forced': - self._state = 'invalid' + self._state = 'forgotten' class DirectJitVRef(DirectVRef): def __init__(self, x): From pypy.commits at gmail.com Thu Mar 31 04:06:34 2016 From: pypy.commits at gmail.com (fijal) Date: Thu, 31 Mar 2016 01:06:34 -0700 (PDT) Subject: [pypy-commit] pypy remove-frame-forcing-in-executioncontext: fix the emulator, I think Message-ID: <56fcda8a.d3921c0a.5ef6b.ffff95ff@mx.google.com> Author: fijal Branch: remove-frame-forcing-in-executioncontext Changeset: r83446:649acd9c24ea Date: 2016-03-31 10:04 +0200 http://bitbucket.org/pypy/pypy/changeset/649acd9c24ea/ Log: fix the emulator, I think diff --git a/rpython/rlib/jit.py b/rpython/rlib/jit.py --- a/rpython/rlib/jit.py +++ b/rpython/rlib/jit.py @@ -475,8 +475,6 @@ def __call__(self): if self._state == 'non-forced': self._state = 'forced' - elif self._state == 'invalid': - raise InvalidVirtualRef return self._x @property @@ -487,7 +485,7 @@ def _finish(self): if self._state == 'non-forced': - self._state = 'invalid' + self._state = 'forgotten' class DirectJitVRef(DirectVRef): def __init__(self, x): From pypy.commits at gmail.com Thu Mar 31 04:06:37 2016 From: pypy.commits at gmail.com (fijal) Date: Thu, 31 Mar 2016 01:06:37 -0700 (PDT) Subject: [pypy-commit] pypy remove-frame-forcing-in-executioncontext: close to be merged branch Message-ID: <56fcda8d.8216c20a.6ce2f.ffff903d@mx.google.com> Author: fijal Branch: remove-frame-forcing-in-executioncontext Changeset: r83447:6e04e357200c Date: 2016-03-31 10:04 +0200 http://bitbucket.org/pypy/pypy/changeset/6e04e357200c/ Log: close to be merged branch From pypy.commits at gmail.com Thu Mar 31 04:08:04 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 31 Mar 2016 01:08:04 -0700 (PDT) Subject: [pypy-commit] pypy default: Issue #2230: _anonymous_ fields were tested at the level of classes, but Message-ID: <56fcdae4.10921c0a.12120.ffffc75d@mx.google.com> Author: Armin Rigo Branch: Changeset: r83449:3d53b5735ff2 Date: 2016-03-31 10:07 +0200 http://bitbucket.org/pypy/pypy/changeset/3d53b5735ff2/ Log: Issue #2230: _anonymous_ fields were tested at the level of classes, but didn't work on instances diff --git a/lib_pypy/_ctypes/structure.py b/lib_pypy/_ctypes/structure.py --- a/lib_pypy/_ctypes/structure.py +++ b/lib_pypy/_ctypes/structure.py @@ -67,7 +67,8 @@ subvalue = subfield.ctype fields[subname] = Field(subname, relpos, subvalue._sizeofinstances(), - subvalue, i, is_bitfield) + subvalue, i, is_bitfield, + inside_anon_field=fields[name]) else: resnames.append(name) names = resnames @@ -77,13 +78,15 @@ class Field(object): - def __init__(self, name, offset, size, ctype, num, is_bitfield): + def __init__(self, name, offset, size, ctype, num, is_bitfield, + inside_anon_field=None): self.__dict__['name'] = name self.__dict__['offset'] = offset self.__dict__['size'] = size self.__dict__['ctype'] = ctype self.__dict__['num'] = num self.__dict__['is_bitfield'] = is_bitfield + self.__dict__['inside_anon_field'] = inside_anon_field def __setattr__(self, name, value): raise AttributeError(name) @@ -95,6 +98,8 @@ def __get__(self, obj, cls=None): if obj is None: return self + if self.inside_anon_field is not None: + return getattr(self.inside_anon_field.__get__(obj), self.name) if self.is_bitfield: # bitfield member, use direct access return obj._buffer.__getattr__(self.name) @@ -105,6 +110,9 @@ return fieldtype._CData_output(suba, obj, offset) def __set__(self, obj, value): + if self.inside_anon_field is not None: + setattr(self.inside_anon_field.__get__(obj), self.name, value) + return fieldtype = self.ctype cobj = fieldtype.from_param(value) key = keepalive_key(self.num) diff --git a/pypy/module/test_lib_pypy/ctypes_tests/test_anon.py b/pypy/module/test_lib_pypy/ctypes_tests/test_anon.py --- a/pypy/module/test_lib_pypy/ctypes_tests/test_anon.py +++ b/pypy/module/test_lib_pypy/ctypes_tests/test_anon.py @@ -57,3 +57,32 @@ assert Y.y.offset == sizeof(c_int) * 2 assert Y._names_ == ['x', 'a', 'b', 'y'] + + def test_anonymous_fields_on_instance(self): + # this is about the *instance-level* access of anonymous fields, + # which you'd guess is the most common, but used not to work + # (issue #2230) + + class B(Structure): + _fields_ = [("x", c_int), ("y", c_int), ("z", c_int)] + class A(Structure): + _anonymous_ = ["b"] + _fields_ = [("b", B)] + + a = A() + a.x = 5 + assert a.x == 5 + assert a.b.x == 5 + a.b.x += 1 + assert a.x == 6 + + class C(Structure): + _anonymous_ = ["a"] + _fields_ = [("v", c_int), ("a", A)] + + c = C() + c.v = 3 + c.y = -8 + assert c.v == 3 + assert c.y == c.a.y == c.a.b.y == -8 + assert not hasattr(c, 'b') From pypy.commits at gmail.com Thu Mar 31 04:12:04 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 31 Mar 2016 01:12:04 -0700 (PDT) Subject: [pypy-commit] pypy default: Kill outdated comments Message-ID: <56fcdbd4.455e1c0a.ee09e.ffff985a@mx.google.com> Author: Armin Rigo Branch: Changeset: r83450:2f0cad6d0069 Date: 2016-03-31 10:11 +0200 http://bitbucket.org/pypy/pypy/changeset/2f0cad6d0069/ Log: Kill outdated comments diff --git a/pypy/interpreter/error.py b/pypy/interpreter/error.py --- a/pypy/interpreter/error.py +++ b/pypy/interpreter/error.py @@ -277,19 +277,12 @@ raise NotImplementedError def get_traceback(self): - """Calling this marks the PyTraceback as escaped, i.e. it becomes - accessible and inspectable by app-level Python code. + """Get the PyTraceback object, for app-level Python code. """ return self._application_traceback def set_traceback(self, traceback): - """Set the current traceback. It should either be a traceback - pointing to some already-escaped frame, or a traceback for the - current frame. To support the latter case we do not mark the - frame as escaped. The idea is that it will be marked as escaping - only if the exception really propagates out of this frame, by - executioncontext.leave() being called with got_exception=True. - """ + """Set the current traceback.""" self._application_traceback = traceback From pypy.commits at gmail.com Thu Mar 31 06:03:09 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 31 Mar 2016 03:03:09 -0700 (PDT) Subject: [pypy-commit] pypy jit-constptr-2: A branch in which to play again with removing the minor collection needed to make ConstPtrs non-movable Message-ID: <56fcf5dd.c818c20a.d27e2.ffffbb07@mx.google.com> Author: Armin Rigo Branch: jit-constptr-2 Changeset: r83451:f350a149824b Date: 2016-03-31 09:48 +0100 http://bitbucket.org/pypy/pypy/changeset/f350a149824b/ Log: A branch in which to play again with removing the minor collection needed to make ConstPtrs non-movable From pypy.commits at gmail.com Thu Mar 31 06:03:11 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 31 Mar 2016 03:03:11 -0700 (PDT) Subject: [pypy-commit] pypy jit-constptr-2: (arigo, fijal around) Message-ID: <56fcf5df.41e11c0a.d0074.ffffeb1e@mx.google.com> Author: Armin Rigo Branch: jit-constptr-2 Changeset: r83452:cc9cdb30a812 Date: 2016-03-31 11:07 +0100 http://bitbucket.org/pypy/pypy/changeset/cc9cdb30a812/ Log: (arigo, fijal around) Add a LOAD_FROM_GC_TABLE operation, emitted by rewrite.py. It should be a replacement for gc.py's _record_constptr. diff --git a/rpython/jit/backend/llsupport/gc.py b/rpython/jit/backend/llsupport/gc.py --- a/rpython/jit/backend/llsupport/gc.py +++ b/rpython/jit/backend/llsupport/gc.py @@ -137,7 +137,7 @@ v = op.getarg(i) if isinstance(v, ConstPtr) and bool(v.value): p = v.value - if rgc._make_sure_does_not_move(p): + if not rgc.can_move(p): gcrefs_output_list.append(p) else: if l is None: @@ -177,7 +177,10 @@ def rewrite_assembler(self, cpu, operations, gcrefs_output_list): rewriter = GcRewriterAssembler(self, cpu) - newops = rewriter.rewrite(operations) + newops = rewriter.rewrite(operations, gcrefs_output_list) + return newops + + XXX # kill the rest # the key is an operation that contains a ConstPtr as an argument and # this ConstPtrs pointer might change as it points to an object that diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py +++ b/rpython/jit/backend/llsupport/rewrite.py @@ -97,6 +97,8 @@ for i in range(op.numargs()): orig_arg = op.getarg(i) arg = self.get_box_replacement(orig_arg) + if isinstance(arg, ConstPtr) and bool(arg.value): + arg = self.remove_constptr(arg) if orig_arg is not arg: if not replaced: op = op.copy_and_change(op.getopnum()) @@ -304,13 +306,15 @@ return False - def rewrite(self, operations): + def rewrite(self, operations, gcrefs_output_list): # we can only remember one malloc since the next malloc can possibly # collect; but we can try to collapse several known-size mallocs into # one, both for performance and to reduce the number of write # barriers. We do this on each "basic block" of operations, which in # this case means between CALLs or unknown-size mallocs. # + self.gcrefs_output_list = gcrefs_output_list + self.gcrefs_map = {} operations = self.remove_bridge_exception(operations) self._changed_op = None for i in range(len(operations)): @@ -940,3 +944,29 @@ operations[start+2].getopnum() == rop.RESTORE_EXCEPTION): return operations[:start] + operations[start+3:] return operations + + def _gcref_index(self, gcref): + if we_are_translated(): + # can only use the dictionary after translation + try: + return self.gcrefs_map[gcref] + except KeyError: + pass + index = len(self.gcrefs_output_list) + self.gcrefs_map[gcref] = index + else: + # untranslated: linear scan + for i, gcref1 in enumerate(self.gcrefs_output_list): + if gcref == gcref1: + return i + index = len(self.gcrefs_output_list) + self.gcrefs_output_list.append(gcref) + return index + + def remove_constptr(self, c): + """Remove all ConstPtrs, and replace them with load_from_gc_table. + """ + index = self._gcref_index(c.value) + load_op = ResOperation(rop.LOAD_FROM_GC_TABLE, [ConstInt(index)]) + self._newops.append(load_op) + return load_op diff --git a/rpython/jit/backend/llsupport/test/test_rewrite.py b/rpython/jit/backend/llsupport/test/test_rewrite.py --- a/rpython/jit/backend/llsupport/test/test_rewrite.py +++ b/rpython/jit/backend/llsupport/test/test_rewrite.py @@ -10,7 +10,7 @@ from rpython.jit.metainterp.optimizeopt.util import equaloplists from rpython.jit.metainterp.history import JitCellToken, FLOAT from rpython.jit.metainterp.history import AbstractFailDescr -from rpython.rtyper.lltypesystem import lltype, rffi +from rpython.rtyper.lltypesystem import lltype, llmemory, rffi from rpython.rtyper import rclass from rpython.jit.backend.x86.arch import WORD from rpython.jit.backend.llsupport.symbolic import (WORD, @@ -77,6 +77,8 @@ tdescr = get_size_descr(self.gc_ll_descr, T) tdescr.tid = 5678 tzdescr = get_field_descr(self.gc_ll_descr, T, 'z') + myT = lltype.cast_opaque_ptr(llmemory.GCREF, + lltype.malloc(T, zero=True)) # A = lltype.GcArray(lltype.Signed) adescr = get_array_descr(self.gc_ll_descr, A) @@ -112,6 +114,8 @@ xdescr = get_field_descr(self.gc_ll_descr, R1, 'x') ydescr = get_field_descr(self.gc_ll_descr, R1, 'y') zdescr = get_field_descr(self.gc_ll_descr, R1, 'z') + myR1 = lltype.cast_opaque_ptr(llmemory.GCREF, + lltype.malloc(R1, zero=True)) # E = lltype.GcStruct('Empty') edescr = get_size_descr(self.gc_ll_descr, E) @@ -1281,3 +1285,59 @@ {t} jump() """.format(**locals())) + + def test_load_from_gc_table_1i(self): + self.check_rewrite(""" + [i1] + setfield_gc(ConstPtr(myR1), i1, descr=xdescr) + jump() + """, """ + [i1] + p0 = load_from_gc_table(0) + gc_store(p0, %(xdescr.offset)s, i1, %(xdescr.field_size)s) + jump() + """) + + def test_load_from_gc_table_1p(self): + self.check_rewrite(""" + [p1] + setfield_gc(ConstPtr(myT), p1, descr=tzdescr) + jump() + """, """ + [i1] + p0 = load_from_gc_table(0) + cond_call_gc_wb(p0, descr=wbdescr) + gc_store(p0, %(tzdescr.offset)s, i1, %(tzdescr.field_size)s) + jump() + """) + + def test_load_from_gc_table_2(self): + self.check_rewrite(""" + [i1, f2] + setfield_gc(ConstPtr(myR1), i1, descr=xdescr) + setfield_gc(ConstPtr(myR1), f2, descr=ydescr) + jump() + """, """ + [i1, f2] + p0 = load_from_gc_table(0) + gc_store(p0, %(xdescr.offset)s, i1, %(xdescr.field_size)s) + gc_store(p0, %(ydescr.offset)s, f2, %(ydescr.field_size)s) + jump() + """) + + def test_load_from_gc_table_3(self): + self.check_rewrite(""" + [i1, f2] + setfield_gc(ConstPtr(myR1), i1, descr=xdescr) + label(f2) + setfield_gc(ConstPtr(myR1), f2, descr=ydescr) + jump() + """, """ + [i1, f2] + p0 = load_from_gc_table(0) + gc_store(p0, %(xdescr.offset)s, i1, %(xdescr.field_size)s) + label(f2) + p1 = load_from_gc_table(0) + gc_store(p1, %(ydescr.offset)s, f2, %(ydescr.field_size)s) + jump() + """) diff --git a/rpython/jit/metainterp/executor.py b/rpython/jit/metainterp/executor.py --- a/rpython/jit/metainterp/executor.py +++ b/rpython/jit/metainterp/executor.py @@ -408,6 +408,7 @@ rop.GC_LOAD_INDEXED_R, rop.GC_STORE, rop.GC_STORE_INDEXED, + rop.LOAD_FROM_GC_TABLE, ): # list of opcodes never executed by pyjitpl continue if rop._VEC_PURE_FIRST <= value <= rop._VEC_PURE_LAST: diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py --- a/rpython/jit/metainterp/resoperation.py +++ b/rpython/jit/metainterp/resoperation.py @@ -1056,6 +1056,8 @@ 'UNICODELEN/1/i', 'UNICODEGETITEM/2/i', # + 'LOAD_FROM_GC_TABLE/1/r', # only emitted by rewrite.py + # '_ALWAYS_PURE_LAST', # ----- end of always_pure operations ----- # parameters GC_LOAD From pypy.commits at gmail.com Thu Mar 31 06:17:38 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 31 Mar 2016 03:17:38 -0700 (PDT) Subject: [pypy-commit] pypy jit-constptr-2: Cache LOAD_FROM_GC_TABLE, passing tests added in cc9cdb30a812 Message-ID: <56fcf942.03dd1c0a.afdc9.ffffb61b@mx.google.com> Author: Armin Rigo Branch: jit-constptr-2 Changeset: r83453:85ae1950770a Date: 2016-03-31 11:21 +0100 http://bitbucket.org/pypy/pypy/changeset/85ae1950770a/ Log: Cache LOAD_FROM_GC_TABLE, passing tests added in cc9cdb30a812 diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py +++ b/rpython/jit/backend/llsupport/rewrite.py @@ -315,6 +315,7 @@ # self.gcrefs_output_list = gcrefs_output_list self.gcrefs_map = {} + self.gcrefs_recently_loaded = {} operations = self.remove_bridge_exception(operations) self._changed_op = None for i in range(len(operations)): @@ -337,8 +338,7 @@ elif rop.can_malloc(op.opnum): self.emitting_an_operation_that_can_collect() elif op.getopnum() == rop.LABEL: - self.emitting_an_operation_that_can_collect() - self._known_lengths.clear() + self.emit_label() # ---------- write barriers ---------- if self.gc_ll_descr.write_barrier_descr is not None: if op.getopnum() == rop.SETFIELD_GC: @@ -945,6 +945,11 @@ return operations[:start] + operations[start+3:] return operations + def emit_label(self): + self.emitting_an_operation_that_can_collect() + self._known_lengths.clear() + self.gcrefs_recently_loaded.clear() + def _gcref_index(self, gcref): if we_are_translated(): # can only use the dictionary after translation @@ -966,7 +971,14 @@ def remove_constptr(self, c): """Remove all ConstPtrs, and replace them with load_from_gc_table. """ + # Note: currently, gcrefs_recently_loaded is only cleared in + # LABELs. We'd like something better, like "don't spill it", + # but that's the wrong level... index = self._gcref_index(c.value) - load_op = ResOperation(rop.LOAD_FROM_GC_TABLE, [ConstInt(index)]) - self._newops.append(load_op) + try: + load_op = self.gcrefs_recently_loaded[index] + except KeyError: + load_op = ResOperation(rop.LOAD_FROM_GC_TABLE, [ConstInt(index)]) + self._newops.append(load_op) + self.gcrefs_recently_loaded[index] = load_op return load_op From pypy.commits at gmail.com Thu Mar 31 06:42:31 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 31 Mar 2016 03:42:31 -0700 (PDT) Subject: [pypy-commit] pypy jit-constptr-2: more tests, and store the descr of guards and finish operations Message-ID: <56fcff17.e853c20a.600f.ffffcf16@mx.google.com> Author: Armin Rigo Branch: jit-constptr-2 Changeset: r83454:e7787f9361a4 Date: 2016-03-31 11:47 +0100 http://bitbucket.org/pypy/pypy/changeset/e7787f9361a4/ Log: more tests, and store the descr of guards and finish operations diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py +++ b/rpython/jit/backend/llsupport/rewrite.py @@ -2,6 +2,7 @@ from rpython.rlib.objectmodel import we_are_translated from rpython.rlib.rarithmetic import ovfcheck, highest_bit from rpython.rtyper.lltypesystem import llmemory, lltype, rstr +from rpython.rtyper.annlowlevel import cast_instance_to_gcref from rpython.jit.metainterp import history from rpython.jit.metainterp.history import ConstInt, ConstPtr from rpython.jit.metainterp.resoperation import ResOperation, rop, OpHelpers @@ -94,6 +95,7 @@ op = self.get_box_replacement(op) orig_op = op replaced = False + opnum = op.getopnum() for i in range(op.numargs()): orig_arg = op.getarg(i) arg = self.get_box_replacement(orig_arg) @@ -101,16 +103,19 @@ arg = self.remove_constptr(arg) if orig_arg is not arg: if not replaced: - op = op.copy_and_change(op.getopnum()) + op = op.copy_and_change(opnum) orig_op.set_forwarded(op) replaced = True op.setarg(i, arg) - if rop.is_guard(op.opnum): + if rop.is_guard(opnum): if not replaced: - op = op.copy_and_change(op.getopnum()) + op = op.copy_and_change(opnum) orig_op.set_forwarded(op) op.setfailargs([self.get_box_replacement(a, True) for a in op.getfailargs()]) + if rop.is_guard(opnum) or opnum == rop.FINISH: + llref = cast_instance_to_gcref(op.getdescr()) + self.gcrefs_output_list.append(llref) self._newops.append(op) def replace_op_with(self, op, newop): diff --git a/rpython/jit/backend/llsupport/test/test_rewrite.py b/rpython/jit/backend/llsupport/test/test_rewrite.py --- a/rpython/jit/backend/llsupport/test/test_rewrite.py +++ b/rpython/jit/backend/llsupport/test/test_rewrite.py @@ -79,6 +79,7 @@ tzdescr = get_field_descr(self.gc_ll_descr, T, 'z') myT = lltype.cast_opaque_ptr(llmemory.GCREF, lltype.malloc(T, zero=True)) + self.myT = myT # A = lltype.GcArray(lltype.Signed) adescr = get_array_descr(self.gc_ll_descr, A) @@ -116,6 +117,10 @@ zdescr = get_field_descr(self.gc_ll_descr, R1, 'z') myR1 = lltype.cast_opaque_ptr(llmemory.GCREF, lltype.malloc(R1, zero=True)) + myR1b = lltype.cast_opaque_ptr(llmemory.GCREF, + lltype.malloc(R1, zero=True)) + self.myR1 = myR1 + self.myR1b = myR1b # E = lltype.GcStruct('Empty') edescr = get_size_descr(self.gc_ll_descr, E) @@ -178,9 +183,10 @@ ops = parse(frm_operations, namespace=namespace) expected = parse(to_operations % Evaluator(namespace), namespace=namespace) + self.gcrefs = [] operations = self.gc_ll_descr.rewrite_assembler(self.cpu, ops.operations, - []) + self.gcrefs) remap = {} for a, b in zip(ops.inputargs, expected.inputargs): remap[b] = a @@ -1297,6 +1303,7 @@ gc_store(p0, %(xdescr.offset)s, i1, %(xdescr.field_size)s) jump() """) + assert self.gcrefs == [self.myR1] def test_load_from_gc_table_1p(self): self.check_rewrite(""" @@ -1310,6 +1317,7 @@ gc_store(p0, %(tzdescr.offset)s, i1, %(tzdescr.field_size)s) jump() """) + assert self.gcrefs == [self.myT] def test_load_from_gc_table_2(self): self.check_rewrite(""" @@ -1324,6 +1332,7 @@ gc_store(p0, %(ydescr.offset)s, f2, %(ydescr.field_size)s) jump() """) + assert self.gcrefs == [self.myR1] def test_load_from_gc_table_3(self): self.check_rewrite(""" @@ -1341,3 +1350,34 @@ gc_store(p1, %(ydescr.offset)s, f2, %(ydescr.field_size)s) jump() """) + assert self.gcrefs == [self.myR1] + + def test_load_from_gc_table_4(self): + self.check_rewrite(""" + [i1, f2] + setfield_gc(ConstPtr(myR1), i1, descr=xdescr) + setfield_gc(ConstPtr(myR1b), f2, descr=ydescr) + jump() + """, """ + [i1, f2] + p0 = load_from_gc_table(0) + gc_store(p0, %(xdescr.offset)s, i1, %(xdescr.field_size)s) + p1 = load_from_gc_table(1) + gc_store(p1, %(ydescr.offset)s, f2, %(ydescr.field_size)s) + jump() + """) + assert self.gcrefs == [self.myR1, self.myR1b] + + def test_guard_in_gcref(self): + self.check_rewrite(""" + [i1, i2] + guard_true(i1) [] + guard_true(i2) [] + jump() + """, """ + [i1, i2] + guard_true(i1) [] + guard_true(i2) [] + jump() + """) + assert len(self.gcrefs) == 2 From pypy.commits at gmail.com Thu Mar 31 08:31:27 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 31 Mar 2016 05:31:27 -0700 (PDT) Subject: [pypy-commit] pypy jit-constptr-2: (fijal, arigo) Message-ID: <56fd189f.41e11c0a.d0074.27c3@mx.google.com> Author: Armin Rigo Branch: jit-constptr-2 Changeset: r83455:17289fc949d4 Date: 2016-03-31 13:20 +0100 http://bitbucket.org/pypy/pypy/changeset/17289fc949d4/ Log: (fijal, arigo) Start on this object diff --git a/rpython/jit/backend/llsupport/gcreftracer.py b/rpython/jit/backend/llsupport/gcreftracer.py new file mode 100644 --- /dev/null +++ b/rpython/jit/backend/llsupport/gcreftracer.py @@ -0,0 +1,19 @@ +from rpython.rtyper.lltypesystem import lltype, llmemory, rffi +from rpython.jit.backend.llsupport.symbolic import WORD + + +GCREFTRACER = lltype.GcStruct( + 'GCREFTRACER', + ('array_base_addr', lltype.Signed), + ('array_length', lltype.Signed), + rtti=True) + +def gcrefs_trace(gc, obj_addr, callback, arg): + obj = llmemory.cast_adr_to_ptr(obj_addr, lltype.Ptr(GCREFTRACER)) + i = 0 + length = obj.array_length + addr = obj.array_base_addr + while i < length: + gc._trace_callback(callback, arg, addr + i * WORD) + i += 1 +lambda_gcrefs_trace = lambda: gcrefs_trace diff --git a/rpython/jit/backend/llsupport/test/test_gcreftracer.py b/rpython/jit/backend/llsupport/test/test_gcreftracer.py new file mode 100644 --- /dev/null +++ b/rpython/jit/backend/llsupport/test/test_gcreftracer.py @@ -0,0 +1,28 @@ +from rpython.rtyper.lltypesystem import lltype, llmemory, rffi +from rpython.jit.backend.llsupport.gcreftracer import GCREFTRACER, gcrefs_trace + + +class FakeGC: + def __init__(self): + self.called = [] + def _trace_callback(self, callback, arg, addr): + assert callback == "callback" + assert arg == "arg" + self.called.append(addr) + + +def test_gcreftracer(): + a = lltype.malloc(rffi.CArray(lltype.Signed), 3, flavor='raw') + a[0] = 123 + a[1] = 456 + a[2] = 789 + tr = lltype.malloc(GCREFTRACER) + tr.array_base_addr = base = rffi.cast(lltype.Signed, a) + tr.array_length = 3 + gc = FakeGC() + gcrefs_trace(gc, llmemory.cast_ptr_to_adr(tr), "callback", "arg") + assert len(gc.called) == 3 + WORD = rffi.sizeof(lltype.Signed) + for i in range(3): + assert gc.called[i] == rffi.cast(llmemory.Address, base + i * WORD) + lltype.free(a, flavor='raw') From pypy.commits at gmail.com Thu Mar 31 08:38:37 2016 From: pypy.commits at gmail.com (plan_rich) Date: Thu, 31 Mar 2016 05:38:37 -0700 (PDT) Subject: [pypy-commit] pypy new-jit-log: minor changed in the emitted format Message-ID: <56fd1a4d.a151c20a.f09b0.002b@mx.google.com> Author: Richard Plangger Branch: new-jit-log Changeset: r83457:26c9229ea302 Date: 2016-03-31 14:30 +0200 http://bitbucket.org/pypy/pypy/changeset/26c9229ea302/ Log: minor changed in the emitted format diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -583,7 +583,7 @@ self.patch_pending_failure_recoveries(rawstart) # patch the jump from original guard if logger: - logger.log_patch_guard(faildescr.adr_jump_offset, rawstart) + logger.log_patch_guard(descr_number, rawstart) self.patch_jump_for_descr(faildescr, rawstart) ops_offset = self.mc.ops_offset frame_depth = max(self.current_clt.frame_info.jfi_frame_depth, diff --git a/rpython/jit/metainterp/jitlog.py b/rpython/jit/metainterp/jitlog.py --- a/rpython/jit/metainterp/jitlog.py +++ b/rpython/jit/metainterp/jitlog.py @@ -23,7 +23,7 @@ MARK_TRACE_ASM = 0x18 # the machine code was patched (e.g. guard) -MARK_ASM_PATCH = 0x19 +MARK_STITCH_BRIDGE = 0x19 IS_32_BIT = sys.maxint == 2**31-1 @@ -61,14 +61,13 @@ memo = {} return LogTrace(tag, memo, metainterp_sd, mc, self) - def log_patch_guard(self, addr, target_addr): - if self.cintf.jitlog_filter(MARK_ASM_PATCH): + def log_patch_guard(self, descr_number, addr): + if self.cintf.jitlog_filter(MARK_STITCH_BRIDGE): return - le_addr_write = self.encode_le_addr(addr) - le_len = self.encode_le_32bit(8) - le_addr = self.encode_le_addr(target_addr) - lst = [le_addr, le_len, le_addr] - self.cintf.jitlog_filter(MARK_ASM_PATCH, ''.join(lst)) + le_descr_number = self.encode_le_addr(descr_number) + le_addr = self.encode_le_addr(addr) + lst = [le_descr_number, le_addr] + self.write_marked(MARK_STITCH_BRIDGE, ''.join(lst)) def encode_str(self, string): return self.encode_le_32bit(len(string)) + string @@ -163,7 +162,9 @@ descr_str = descr.repr_of_descr() line = line + ',' + descr_str string = self.logger.encode_str(line) - return MARK_RESOP_DESCR, le_opnum + string + descr_number = compute_unique_id(descr) + le_descr_number = self.logger.encode_le_addr(descr_number) + return MARK_RESOP_DESCR, le_opnum + string + le_descr_number else: string = self.logger.encode_str(line) return MARK_RESOP, le_opnum + string From pypy.commits at gmail.com Thu Mar 31 08:38:39 2016 From: pypy.commits at gmail.com (plan_rich) Date: Thu, 31 Mar 2016 05:38:39 -0700 (PDT) Subject: [pypy-commit] pypy default: is_jit_debug method was moved. fixes crashes on s390x Message-ID: <56fd1a4f.6774c20a.3c35d.fffff57d@mx.google.com> Author: Richard Plangger Branch: Changeset: r83458:f1a599da9f67 Date: 2016-03-31 14:37 +0200 http://bitbucket.org/pypy/pypy/changeset/f1a599da9f67/ Log: is_jit_debug method was moved. fixes crashes on s390x diff --git a/rpython/jit/backend/zarch/pool.py b/rpython/jit/backend/zarch/pool.py --- a/rpython/jit/backend/zarch/pool.py +++ b/rpython/jit/backend/zarch/pool.py @@ -33,7 +33,7 @@ def ensure_can_hold_constants(self, asm, op): # allocates 8 bytes in memory for pointers, long integers or floats - if op.is_jit_debug(): + if rop.is_jit_debug(op): return for arg in op.getarglist(): From pypy.commits at gmail.com Thu Mar 31 08:31:28 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 31 Mar 2016 05:31:28 -0700 (PDT) Subject: [pypy-commit] pypy jit-constptr-2: (fijal, arigo) Message-ID: <56fd18a0.83301c0a.5137f.2cf3@mx.google.com> Author: Armin Rigo Branch: jit-constptr-2 Changeset: r83456:7262e1468002 Date: 2016-03-31 13:36 +0100 http://bitbucket.org/pypy/pypy/changeset/7262e1468002/ Log: (fijal, arigo) Creating and freeing gcreftracers diff --git a/rpython/jit/backend/llsupport/gcreftracer.py b/rpython/jit/backend/llsupport/gcreftracer.py --- a/rpython/jit/backend/llsupport/gcreftracer.py +++ b/rpython/jit/backend/llsupport/gcreftracer.py @@ -17,3 +17,21 @@ gc._trace_callback(callback, arg, addr + i * WORD) i += 1 lambda_gcrefs_trace = lambda: gcrefs_trace + +def make_gcref_tracer(array_base_addr, gcrefs): + # careful about the order here: the allocation of the GCREFTRACER + # can trigger a GC. So we must write the gcrefs into the raw + # array only afterwards... + tr = lltype.malloc(GCREFTRACER) + tr.array_base_addr = array_base_addr + tr.array_length = 0 # incremented as we populate the array_base_addr + i = 0 + length = len(gcrefs) + while i < length: + p = rffi.cast(rffi.SIGNEDP, array_base_addr + i * WORD) + # --no GC from here-- + p[0] = rffi.cast(lltype.Signed, gcrefs[i]) + tr.array_length += 1 + # --no GC until here-- + i += 1 + return tr diff --git a/rpython/jit/backend/llsupport/llmodel.py b/rpython/jit/backend/llsupport/llmodel.py --- a/rpython/jit/backend/llsupport/llmodel.py +++ b/rpython/jit/backend/llsupport/llmodel.py @@ -246,6 +246,13 @@ def free_loop_and_bridges(self, compiled_loop_token): AbstractCPU.free_loop_and_bridges(self, compiled_loop_token) + # turn off all gcreftracers + tracers = compiled_loop_token.asmmemmgr_gcreftracers + if tracers is not None: + compiled_loop_token.asmmemmgr_gcreftracers = None + for tracer in tracers: + tracer.array_length = 0 + # then free all blocks of code and raw data blocks = compiled_loop_token.asmmemmgr_blocks if blocks is not None: compiled_loop_token.asmmemmgr_blocks = None diff --git a/rpython/jit/backend/llsupport/test/test_gcreftracer.py b/rpython/jit/backend/llsupport/test/test_gcreftracer.py --- a/rpython/jit/backend/llsupport/test/test_gcreftracer.py +++ b/rpython/jit/backend/llsupport/test/test_gcreftracer.py @@ -1,5 +1,6 @@ from rpython.rtyper.lltypesystem import lltype, llmemory, rffi from rpython.jit.backend.llsupport.gcreftracer import GCREFTRACER, gcrefs_trace +from rpython.jit.backend.llsupport.gcreftracer import make_gcref_tracer class FakeGC: @@ -26,3 +27,14 @@ for i in range(3): assert gc.called[i] == rffi.cast(llmemory.Address, base + i * WORD) lltype.free(a, flavor='raw') + +def test_make_gcref_tracer(): + a = lltype.malloc(rffi.CArray(lltype.Signed), 3, flavor='raw') + base = rffi.cast(lltype.Signed, a) + tr = make_gcref_tracer(base, [123, 456, 789]) + assert a[0] == 123 + assert a[1] == 456 + assert a[2] == 789 + assert tr.array_base_addr == base + assert tr.array_length == 3 + lltype.free(a, flavor='raw') diff --git a/rpython/jit/backend/model.py b/rpython/jit/backend/model.py --- a/rpython/jit/backend/model.py +++ b/rpython/jit/backend/model.py @@ -285,7 +285,7 @@ class CompiledLoopToken(object): asmmemmgr_blocks = None - asmmemmgr_gcroots = 0 + asmmemmgr_gcreftracers = None def __init__(self, cpu, number): cpu.tracker.total_compiled_loops += 1 From pypy.commits at gmail.com Thu Mar 31 08:44:20 2016 From: pypy.commits at gmail.com (plan_rich) Date: Thu, 31 Mar 2016 05:44:20 -0700 (PDT) Subject: [pypy-commit] pypy new-jit-log: catchup with default Message-ID: <56fd1ba4.857ac20a.388d.fffffe7c@mx.google.com> Author: Richard Plangger Branch: new-jit-log Changeset: r83459:83bff96fb266 Date: 2016-03-31 14:43 +0200 http://bitbucket.org/pypy/pypy/changeset/83bff96fb266/ Log: catchup with default diff too long, truncating to 2000 out of 9743 lines diff --git a/lib_pypy/_ctypes/structure.py b/lib_pypy/_ctypes/structure.py --- a/lib_pypy/_ctypes/structure.py +++ b/lib_pypy/_ctypes/structure.py @@ -67,7 +67,8 @@ subvalue = subfield.ctype fields[subname] = Field(subname, relpos, subvalue._sizeofinstances(), - subvalue, i, is_bitfield) + subvalue, i, is_bitfield, + inside_anon_field=fields[name]) else: resnames.append(name) names = resnames @@ -77,13 +78,15 @@ class Field(object): - def __init__(self, name, offset, size, ctype, num, is_bitfield): + def __init__(self, name, offset, size, ctype, num, is_bitfield, + inside_anon_field=None): self.__dict__['name'] = name self.__dict__['offset'] = offset self.__dict__['size'] = size self.__dict__['ctype'] = ctype self.__dict__['num'] = num self.__dict__['is_bitfield'] = is_bitfield + self.__dict__['inside_anon_field'] = inside_anon_field def __setattr__(self, name, value): raise AttributeError(name) @@ -95,6 +98,8 @@ def __get__(self, obj, cls=None): if obj is None: return self + if self.inside_anon_field is not None: + return getattr(self.inside_anon_field.__get__(obj), self.name) if self.is_bitfield: # bitfield member, use direct access return obj._buffer.__getattr__(self.name) @@ -105,6 +110,9 @@ return fieldtype._CData_output(suba, obj, offset) def __set__(self, obj, value): + if self.inside_anon_field is not None: + setattr(self.inside_anon_field.__get__(obj), self.name, value) + return fieldtype = self.ctype cobj = fieldtype.from_param(value) key = keepalive_key(self.num) diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py --- a/pypy/config/pypyoption.py +++ b/pypy/config/pypyoption.py @@ -46,7 +46,6 @@ except detect_cpu.ProcessorAutodetectError: pass - translation_modules = default_modules.copy() translation_modules.update([ "fcntl", "time", "select", "signal", "_rawffi", "zlib", "struct", "_md5", diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst --- a/pypy/doc/whatsnew-head.rst +++ b/pypy/doc/whatsnew-head.rst @@ -23,3 +23,15 @@ Implement yet another strange numpy indexing compatibility; indexing by a scalar returns a scalar + +.. branch: fix_transpose_for_list_v3 + +Allow arguments to transpose to be sequences + +.. branch: jit-leaner-frontend + +Improve the tracing speed in the frontend as well as heapcache by using a more compact representation +of traces + +.. branch: win32-lib-name + diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py --- a/pypy/goal/targetpypystandalone.py +++ b/pypy/goal/targetpypystandalone.py @@ -240,8 +240,9 @@ "when --shared is on (it is by default). " "See issue #1971.") if sys.platform == 'win32': - config.translation.libname = '..\\..\\libs\\python27.lib' - thisdir.join('..', '..', 'libs').ensure(dir=1) + libdir = thisdir.join('..', '..', 'libs') + libdir.ensure(dir=1) + config.translation.libname = str(libdir.join('python27.lib')) if config.translation.thread: config.objspace.usemodules.thread = True diff --git a/pypy/interpreter/error.py b/pypy/interpreter/error.py --- a/pypy/interpreter/error.py +++ b/pypy/interpreter/error.py @@ -277,27 +277,12 @@ raise NotImplementedError def get_traceback(self): - """Calling this marks the PyTraceback as escaped, i.e. it becomes - accessible and inspectable by app-level Python code. For the JIT. - Note that this has no effect if there are already several traceback - frames recorded, because in this case they are already marked as - escaping by executioncontext.leave() being called with - got_exception=True. + """Get the PyTraceback object, for app-level Python code. """ - from pypy.interpreter.pytraceback import PyTraceback - tb = self._application_traceback - if tb is not None and isinstance(tb, PyTraceback): - tb.frame.mark_as_escaped() - return tb + return self._application_traceback def set_traceback(self, traceback): - """Set the current traceback. It should either be a traceback - pointing to some already-escaped frame, or a traceback for the - current frame. To support the latter case we do not mark the - frame as escaped. The idea is that it will be marked as escaping - only if the exception really propagates out of this frame, by - executioncontext.leave() being called with got_exception=True. - """ + """Set the current traceback.""" self._application_traceback = traceback diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py --- a/pypy/interpreter/executioncontext.py +++ b/pypy/interpreter/executioncontext.py @@ -74,15 +74,6 @@ finally: frame_vref = self.topframeref self.topframeref = frame.f_backref - if frame.escaped or got_exception: - # if this frame escaped to applevel, we must ensure that also - # f_back does - f_back = frame.f_backref() - if f_back: - f_back.mark_as_escaped() - # force the frame (from the JIT point of view), so that it can - # be accessed also later - frame_vref() jit.virtual_ref_finish(frame_vref, frame) # ________________________________________________________________ diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py --- a/pypy/interpreter/pyframe.py +++ b/pypy/interpreter/pyframe.py @@ -65,7 +65,6 @@ last_exception = None f_backref = jit.vref_None - escaped = False # see mark_as_escaped() debugdata = None pycode = None # code object executed by that frame @@ -152,15 +151,6 @@ assert isinstance(cell, Cell) return cell - def mark_as_escaped(self): - """ - Must be called on frames that are exposed to applevel, e.g. by - sys._getframe(). This ensures that the virtualref holding the frame - is properly forced by ec.leave(), and thus the frame will be still - accessible even after the corresponding C stack died. - """ - self.escaped = True - def append_block(self, block): assert block.previous is self.lastblock self.lastblock = block diff --git a/pypy/module/_multibytecodec/src/cjkcodecs/cjkcodecs.h b/pypy/module/_multibytecodec/src/cjkcodecs/cjkcodecs.h --- a/pypy/module/_multibytecodec/src/cjkcodecs/cjkcodecs.h +++ b/pypy/module/_multibytecodec/src/cjkcodecs/cjkcodecs.h @@ -10,6 +10,7 @@ #define _CJKCODECS_H_ #include "src/cjkcodecs/multibytecodec.h" +#include "src/cjkcodecs/fixnames.h" /* a unicode "undefined" codepoint */ diff --git a/pypy/module/_multibytecodec/src/cjkcodecs/fixnames.h b/pypy/module/_multibytecodec/src/cjkcodecs/fixnames.h new file mode 100644 --- /dev/null +++ b/pypy/module/_multibytecodec/src/cjkcodecs/fixnames.h @@ -0,0 +1,9 @@ + +/* this is only included from the .c files in this directory: rename + these pypymbc-prefixed names to locally define the CPython names */ +typedef pypymbc_ssize_t Py_ssize_t; +#define PY_SSIZE_T_MAX ((Py_ssize_t)(((size_t) -1) >> 1)) +#define Py_UNICODE_SIZE pypymbc_UNICODE_SIZE +typedef pypymbc_wchar_t Py_UNICODE; +typedef pypymbc_ucs4_t ucs4_t; +typedef pypymbc_ucs2_t ucs2_t, DBCHAR; diff --git a/pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.c b/pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.c --- a/pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.c +++ b/pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.c @@ -1,6 +1,7 @@ #include #include #include "src/cjkcodecs/multibytecodec.h" +#include "src/cjkcodecs/fixnames.h" struct pypy_cjk_dec_s *pypy_cjk_dec_new(const MultibyteCodec *codec) diff --git a/pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.h b/pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.h --- a/pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.h +++ b/pypy/module/_multibytecodec/src/cjkcodecs/multibytecodec.h @@ -9,31 +9,28 @@ #include #ifdef _WIN64 -typedef __int64 ssize_t +typedef __int64 pypymbc_ssize_t #elif defined(_WIN32) -typedef int ssize_t; +typedef int pypymbc_ssize_t; #else #include -#endif - -#ifndef Py_UNICODE_SIZE -#ifdef _WIN32 -#define Py_UNICODE_SIZE 2 -#else -#define Py_UNICODE_SIZE 4 -#endif -typedef wchar_t Py_UNICODE; -typedef ssize_t Py_ssize_t; -#define PY_SSIZE_T_MAX ((Py_ssize_t)(((size_t) -1) >> 1)) +typedef ssize_t pypymbc_ssize_t; #endif #ifdef _WIN32 -typedef unsigned int ucs4_t; -typedef unsigned short ucs2_t, DBCHAR; +#define pypymbc_UNICODE_SIZE 2 +#else +#define pypymbc_UNICODE_SIZE 4 +#endif +typedef wchar_t pypymbc_wchar_t; + +#ifdef _WIN32 +typedef unsigned int pypymbc_ucs4_t; +typedef unsigned short pypymbc_ucs2_t; #else #include -typedef uint32_t ucs4_t; -typedef uint16_t ucs2_t, DBCHAR; +typedef uint32_t pypymbc_ucs4_t; +typedef uint16_t pypymbc_ucs2_t; #endif @@ -42,28 +39,28 @@ void *p; int i; unsigned char c[8]; - ucs2_t u2[4]; - ucs4_t u4[2]; + pypymbc_ucs2_t u2[4]; + pypymbc_ucs4_t u4[2]; } MultibyteCodec_State; typedef int (*mbcodec_init)(const void *config); -typedef Py_ssize_t (*mbencode_func)(MultibyteCodec_State *state, +typedef pypymbc_ssize_t (*mbencode_func)(MultibyteCodec_State *state, const void *config, - const Py_UNICODE **inbuf, Py_ssize_t inleft, - unsigned char **outbuf, Py_ssize_t outleft, + const pypymbc_wchar_t **inbuf, pypymbc_ssize_t inleft, + unsigned char **outbuf, pypymbc_ssize_t outleft, int flags); typedef int (*mbencodeinit_func)(MultibyteCodec_State *state, const void *config); -typedef Py_ssize_t (*mbencodereset_func)(MultibyteCodec_State *state, +typedef pypymbc_ssize_t (*mbencodereset_func)(MultibyteCodec_State *state, const void *config, - unsigned char **outbuf, Py_ssize_t outleft); -typedef Py_ssize_t (*mbdecode_func)(MultibyteCodec_State *state, + unsigned char **outbuf, pypymbc_ssize_t outleft); +typedef pypymbc_ssize_t (*mbdecode_func)(MultibyteCodec_State *state, const void *config, - const unsigned char **inbuf, Py_ssize_t inleft, - Py_UNICODE **outbuf, Py_ssize_t outleft); + const unsigned char **inbuf, pypymbc_ssize_t inleft, + pypymbc_wchar_t **outbuf, pypymbc_ssize_t outleft); typedef int (*mbdecodeinit_func)(MultibyteCodec_State *state, const void *config); -typedef Py_ssize_t (*mbdecodereset_func)(MultibyteCodec_State *state, +typedef pypymbc_ssize_t (*mbdecodereset_func)(MultibyteCodec_State *state, const void *config); typedef struct MultibyteCodec_s { @@ -94,59 +91,59 @@ const MultibyteCodec *codec; MultibyteCodec_State state; const unsigned char *inbuf_start, *inbuf, *inbuf_end; - Py_UNICODE *outbuf_start, *outbuf, *outbuf_end; + pypymbc_wchar_t *outbuf_start, *outbuf, *outbuf_end; }; RPY_EXTERN struct pypy_cjk_dec_s *pypy_cjk_dec_new(const MultibyteCodec *codec); RPY_EXTERN -Py_ssize_t pypy_cjk_dec_init(struct pypy_cjk_dec_s *d, - char *inbuf, Py_ssize_t inlen); +pypymbc_ssize_t pypy_cjk_dec_init(struct pypy_cjk_dec_s *d, + char *inbuf, pypymbc_ssize_t inlen); RPY_EXTERN void pypy_cjk_dec_free(struct pypy_cjk_dec_s *); RPY_EXTERN -Py_ssize_t pypy_cjk_dec_chunk(struct pypy_cjk_dec_s *); +pypymbc_ssize_t pypy_cjk_dec_chunk(struct pypy_cjk_dec_s *); RPY_EXTERN -Py_UNICODE *pypy_cjk_dec_outbuf(struct pypy_cjk_dec_s *); +pypymbc_wchar_t *pypy_cjk_dec_outbuf(struct pypy_cjk_dec_s *); RPY_EXTERN -Py_ssize_t pypy_cjk_dec_outlen(struct pypy_cjk_dec_s *); +pypymbc_ssize_t pypy_cjk_dec_outlen(struct pypy_cjk_dec_s *); RPY_EXTERN -Py_ssize_t pypy_cjk_dec_inbuf_remaining(struct pypy_cjk_dec_s *d); +pypymbc_ssize_t pypy_cjk_dec_inbuf_remaining(struct pypy_cjk_dec_s *d); RPY_EXTERN -Py_ssize_t pypy_cjk_dec_inbuf_consumed(struct pypy_cjk_dec_s* d); +pypymbc_ssize_t pypy_cjk_dec_inbuf_consumed(struct pypy_cjk_dec_s* d); RPY_EXTERN -Py_ssize_t pypy_cjk_dec_replace_on_error(struct pypy_cjk_dec_s* d, - Py_UNICODE *, Py_ssize_t, Py_ssize_t); +pypymbc_ssize_t pypy_cjk_dec_replace_on_error(struct pypy_cjk_dec_s* d, + pypymbc_wchar_t *, pypymbc_ssize_t, pypymbc_ssize_t); struct pypy_cjk_enc_s { const MultibyteCodec *codec; MultibyteCodec_State state; - const Py_UNICODE *inbuf_start, *inbuf, *inbuf_end; + const pypymbc_wchar_t *inbuf_start, *inbuf, *inbuf_end; unsigned char *outbuf_start, *outbuf, *outbuf_end; }; RPY_EXTERN struct pypy_cjk_enc_s *pypy_cjk_enc_new(const MultibyteCodec *codec); RPY_EXTERN -Py_ssize_t pypy_cjk_enc_init(struct pypy_cjk_enc_s *d, - Py_UNICODE *inbuf, Py_ssize_t inlen); +pypymbc_ssize_t pypy_cjk_enc_init(struct pypy_cjk_enc_s *d, + pypymbc_wchar_t *inbuf, pypymbc_ssize_t inlen); RPY_EXTERN void pypy_cjk_enc_free(struct pypy_cjk_enc_s *); RPY_EXTERN -Py_ssize_t pypy_cjk_enc_chunk(struct pypy_cjk_enc_s *, Py_ssize_t); +pypymbc_ssize_t pypy_cjk_enc_chunk(struct pypy_cjk_enc_s *, pypymbc_ssize_t); RPY_EXTERN -Py_ssize_t pypy_cjk_enc_reset(struct pypy_cjk_enc_s *); +pypymbc_ssize_t pypy_cjk_enc_reset(struct pypy_cjk_enc_s *); RPY_EXTERN char *pypy_cjk_enc_outbuf(struct pypy_cjk_enc_s *); RPY_EXTERN -Py_ssize_t pypy_cjk_enc_outlen(struct pypy_cjk_enc_s *); +pypymbc_ssize_t pypy_cjk_enc_outlen(struct pypy_cjk_enc_s *); RPY_EXTERN -Py_ssize_t pypy_cjk_enc_inbuf_remaining(struct pypy_cjk_enc_s *d); +pypymbc_ssize_t pypy_cjk_enc_inbuf_remaining(struct pypy_cjk_enc_s *d); RPY_EXTERN -Py_ssize_t pypy_cjk_enc_inbuf_consumed(struct pypy_cjk_enc_s* d); +pypymbc_ssize_t pypy_cjk_enc_inbuf_consumed(struct pypy_cjk_enc_s* d); RPY_EXTERN -Py_ssize_t pypy_cjk_enc_replace_on_error(struct pypy_cjk_enc_s* d, - char *, Py_ssize_t, Py_ssize_t); +pypymbc_ssize_t pypy_cjk_enc_replace_on_error(struct pypy_cjk_enc_s* d, + char *, pypymbc_ssize_t, pypymbc_ssize_t); RPY_EXTERN const MultibyteCodec *pypy_cjk_enc_getcodec(struct pypy_cjk_enc_s *); @@ -191,5 +188,7 @@ DEFINE_CODEC(big5) DEFINE_CODEC(cp950) +#undef DEFINE_CODEC + #endif diff --git a/pypy/module/cpyext/test/test_sequence.py b/pypy/module/cpyext/test/test_sequence.py --- a/pypy/module/cpyext/test/test_sequence.py +++ b/pypy/module/cpyext/test/test_sequence.py @@ -90,8 +90,10 @@ self.raises(space, api, IndexError, api.PySequence_SetItem, l, 3, w_value) + t = api.PyTuple_New(1) + api.PyTuple_SetItem(t, 0, l) self.raises(space, api, TypeError, api.PySequence_SetItem, - api.PyTuple_New(1), 0, w_value) + t, 0, w_value) self.raises(space, api, TypeError, api.PySequence_SetItem, space.newdict(), 0, w_value) diff --git a/pypy/module/cpyext/test/test_tupleobject.py b/pypy/module/cpyext/test/test_tupleobject.py --- a/pypy/module/cpyext/test/test_tupleobject.py +++ b/pypy/module/cpyext/test/test_tupleobject.py @@ -5,6 +5,7 @@ from pypy.module.cpyext.test.test_api import BaseApiTest from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase from rpython.rtyper.lltypesystem import rffi, lltype +from rpython.rlib.debug import FatalError class TestTupleObject(BaseApiTest): @@ -18,29 +19,44 @@ #assert api.PyTuple_GET_SIZE(atuple) == 3 --- now a C macro raises(TypeError, api.PyTuple_Size(space.newlist([]))) api.PyErr_Clear() - + + def test_tuple_realize_refuses_nulls(self, space, api): + py_tuple = api.PyTuple_New(1) + py.test.raises(FatalError, from_ref, space, py_tuple) + def test_tuple_resize(self, space, api): w_42 = space.wrap(42) + w_43 = space.wrap(43) + w_44 = space.wrap(44) ar = lltype.malloc(PyObjectP.TO, 1, flavor='raw') py_tuple = api.PyTuple_New(3) # inside py_tuple is an array of "PyObject *" items which each hold # a reference rffi.cast(PyTupleObject, py_tuple).c_ob_item[0] = make_ref(space, w_42) + rffi.cast(PyTupleObject, py_tuple).c_ob_item[1] = make_ref(space, w_43) ar[0] = py_tuple api._PyTuple_Resize(ar, 2) w_tuple = from_ref(space, ar[0]) assert space.int_w(space.len(w_tuple)) == 2 assert space.int_w(space.getitem(w_tuple, space.wrap(0))) == 42 + assert space.int_w(space.getitem(w_tuple, space.wrap(1))) == 43 api.Py_DecRef(ar[0]) py_tuple = api.PyTuple_New(3) rffi.cast(PyTupleObject, py_tuple).c_ob_item[0] = make_ref(space, w_42) + rffi.cast(PyTupleObject, py_tuple).c_ob_item[1] = make_ref(space, w_43) + rffi.cast(PyTupleObject, py_tuple).c_ob_item[2] = make_ref(space, w_44) ar[0] = py_tuple api._PyTuple_Resize(ar, 10) + assert api.PyTuple_Size(ar[0]) == 10 + for i in range(3, 10): + rffi.cast(PyTupleObject, py_tuple).c_ob_item[i] = make_ref( + space, space.wrap(42 + i)) w_tuple = from_ref(space, ar[0]) assert space.int_w(space.len(w_tuple)) == 10 - assert space.int_w(space.getitem(w_tuple, space.wrap(0))) == 42 + for i in range(10): + assert space.int_w(space.getitem(w_tuple, space.wrap(i))) == 42 + i api.Py_DecRef(ar[0]) lltype.free(ar, flavor='raw') diff --git a/pypy/module/cpyext/tupleobject.py b/pypy/module/cpyext/tupleobject.py --- a/pypy/module/cpyext/tupleobject.py +++ b/pypy/module/cpyext/tupleobject.py @@ -1,5 +1,6 @@ from pypy.interpreter.error import OperationError from rpython.rtyper.lltypesystem import rffi, lltype +from rpython.rlib.debug import fatalerror_notb from pypy.module.cpyext.api import (cpython_api, Py_ssize_t, CANNOT_FAIL, build_type_checkers, PyObjectFields, cpython_struct, bootstrap_function) @@ -91,14 +92,22 @@ def tuple_realize(space, py_obj): """ Creates the tuple in the interpreter. The PyTupleObject must not - be modified after this call. + be modified after this call. We check that it does not contain + any NULLs at this point (which would correspond to half-broken + W_TupleObjects). """ py_tup = rffi.cast(PyTupleObject, py_obj) l = py_tup.c_ob_size p = py_tup.c_ob_item items_w = [None] * l for i in range(l): - items_w[i] = from_ref(space, p[i]) + w_item = from_ref(space, p[i]) + if w_item is None: + fatalerror_notb( + "Fatal error in cpyext, CPython compatibility layer: " + "converting a PyTupleObject into a W_TupleObject, " + "but found NULLs as items") + items_w[i] = w_item w_obj = space.newtuple(items_w) track_reference(space, py_obj, w_obj) return w_obj diff --git a/pypy/module/micronumpy/ndarray.py b/pypy/module/micronumpy/ndarray.py --- a/pypy/module/micronumpy/ndarray.py +++ b/pypy/module/micronumpy/ndarray.py @@ -502,29 +502,34 @@ return W_NDimArray(self.implementation.transpose(self, axes)) def descr_transpose(self, space, args_w): - if len(args_w) == 1 and space.isinstance_w(args_w[0], space.w_tuple): - args_w = space.fixedview(args_w[0]) - if (len(args_w) == 0 or - len(args_w) == 1 and space.is_none(args_w[0])): + if len(args_w) == 0 or len(args_w) == 1 and space.is_none(args_w[0]): return self.descr_get_transpose(space) else: - if len(args_w) != self.ndims(): - raise oefmt(space.w_ValueError, "axes don't match array") - axes = [] - axes_seen = [False] * self.ndims() - for w_arg in args_w: - try: - axis = support.index_w(space, w_arg) - except OperationError: - raise oefmt(space.w_TypeError, "an integer is required") - if axis < 0 or axis >= self.ndims(): - raise oefmt(space.w_ValueError, "invalid axis for this array") - if axes_seen[axis] is True: - raise oefmt(space.w_ValueError, "repeated axis in transpose") - axes.append(axis) - axes_seen[axis] = True - return self.descr_get_transpose(space, axes) + if len(args_w) > 1: + axes = args_w + else: # Iterable in the only argument (len(arg_w) == 1 and arg_w[0] is not None) + axes = space.fixedview(args_w[0]) + axes = self._checked_axes(axes, space) + return self.descr_get_transpose(space, axes) + + def _checked_axes(self, axes_raw, space): + if len(axes_raw) != self.ndims(): + raise oefmt(space.w_ValueError, "axes don't match array") + axes = [] + axes_seen = [False] * self.ndims() + for elem in axes_raw: + try: + axis = support.index_w(space, elem) + except OperationError: + raise oefmt(space.w_TypeError, "an integer is required") + if axis < 0 or axis >= self.ndims(): + raise oefmt(space.w_ValueError, "invalid axis for this array") + if axes_seen[axis] is True: + raise oefmt(space.w_ValueError, "repeated axis in transpose") + axes.append(axis) + axes_seen[axis] = True + return axes @unwrap_spec(axis1=int, axis2=int) def descr_swapaxes(self, space, axis1, axis2): diff --git a/pypy/module/micronumpy/test/test_ndarray.py b/pypy/module/micronumpy/test/test_ndarray.py --- a/pypy/module/micronumpy/test/test_ndarray.py +++ b/pypy/module/micronumpy/test/test_ndarray.py @@ -2960,6 +2960,36 @@ assert (a.transpose() == b).all() assert (a.transpose(None) == b).all() + def test_transpose_arg_tuple(self): + import numpy as np + a = np.arange(24).reshape(2, 3, 4) + transpose_args = a.transpose(1, 2, 0) + + transpose_test = a.transpose((1, 2, 0)) + + assert transpose_test.shape == (3, 4, 2) + assert (transpose_args == transpose_test).all() + + def test_transpose_arg_list(self): + import numpy as np + a = np.arange(24).reshape(2, 3, 4) + transpose_args = a.transpose(1, 2, 0) + + transpose_test = a.transpose([1, 2, 0]) + + assert transpose_test.shape == (3, 4, 2) + assert (transpose_args == transpose_test).all() + + def test_transpose_arg_array(self): + import numpy as np + a = np.arange(24).reshape(2, 3, 4) + transpose_args = a.transpose(1, 2, 0) + + transpose_test = a.transpose(np.array([1, 2, 0])) + + assert transpose_test.shape == (3, 4, 2) + assert (transpose_args == transpose_test).all() + def test_transpose_error(self): import numpy as np a = np.arange(24).reshape(2, 3, 4) @@ -2968,6 +2998,11 @@ raises(ValueError, a.transpose, 1, 0, 1) raises(TypeError, a.transpose, 1, 0, '2') + def test_transpose_unexpected_argument(self): + import numpy as np + a = np.array([[1, 2], [3, 4], [5, 6]]) + raises(TypeError, 'a.transpose(axes=(1,2,0))') + def test_flatiter(self): from numpy import array, flatiter, arange, zeros a = array([[10, 30], [40, 60]]) diff --git a/pypy/module/sys/vm.py b/pypy/module/sys/vm.py --- a/pypy/module/sys/vm.py +++ b/pypy/module/sys/vm.py @@ -37,7 +37,6 @@ raise OperationError(space.w_ValueError, space.wrap("call stack is not deep enough")) if depth == 0: - f.mark_as_escaped() return space.wrap(f) depth -= 1 f = ec.getnextframe_nohidden(f) diff --git a/pypy/module/test_lib_pypy/ctypes_tests/test_anon.py b/pypy/module/test_lib_pypy/ctypes_tests/test_anon.py --- a/pypy/module/test_lib_pypy/ctypes_tests/test_anon.py +++ b/pypy/module/test_lib_pypy/ctypes_tests/test_anon.py @@ -57,3 +57,32 @@ assert Y.y.offset == sizeof(c_int) * 2 assert Y._names_ == ['x', 'a', 'b', 'y'] + + def test_anonymous_fields_on_instance(self): + # this is about the *instance-level* access of anonymous fields, + # which you'd guess is the most common, but used not to work + # (issue #2230) + + class B(Structure): + _fields_ = [("x", c_int), ("y", c_int), ("z", c_int)] + class A(Structure): + _anonymous_ = ["b"] + _fields_ = [("b", B)] + + a = A() + a.x = 5 + assert a.x == 5 + assert a.b.x == 5 + a.b.x += 1 + assert a.x == 6 + + class C(Structure): + _anonymous_ = ["a"] + _fields_ = [("v", c_int), ("a", A)] + + c = C() + c.v = 3 + c.y = -8 + assert c.v == 3 + assert c.y == c.a.y == c.a.b.y == -8 + assert not hasattr(c, 'b') diff --git a/pypy/objspace/std/objectobject.py b/pypy/objspace/std/objectobject.py --- a/pypy/objspace/std/objectobject.py +++ b/pypy/objspace/std/objectobject.py @@ -110,7 +110,7 @@ def descr__init__(space, w_obj, __args__): # don't allow arguments unless __new__ is overridden w_type = space.type(w_obj) - w_parent_new, _ = w_type.lookup_where('__new__') + w_parent_new, _ = space.lookup_in_type_where(w_type, '__new__') if w_parent_new is space.w_object: try: __args__.fixedunpack(0) diff --git a/pypy/tool/gdb_pypy.py b/pypy/tool/gdb_pypy.py --- a/pypy/tool/gdb_pypy.py +++ b/pypy/tool/gdb_pypy.py @@ -288,9 +288,11 @@ RPyListPrinter.recursive = True try: itemlist = [] - for i in range(length): + for i in range(min(length, MAX_DISPLAY_LENGTH)): item = items[i] itemlist.append(str(item)) # may recurse here + if length > MAX_DISPLAY_LENGTH: + itemlist.append("...") str_items = ', '.join(itemlist) finally: RPyListPrinter.recursive = False diff --git a/rpython/config/translationoption.py b/rpython/config/translationoption.py --- a/rpython/config/translationoption.py +++ b/rpython/config/translationoption.py @@ -126,6 +126,9 @@ ChoiceOption("jit_profiler", "integrate profiler support into the JIT", ["off", "oprofile"], default="off"), + ChoiceOption("jit_opencoder_model", "the model limits the maximal length" + " of traces. Use big if you want to go bigger than " + "the default", ["big", "normal"], default="normal"), BoolOption("check_str_without_nul", "Forbid NUL chars in strings in some external function calls", default=False, cmdline=None), diff --git a/rpython/jit/backend/arm/assembler.py b/rpython/jit/backend/arm/assembler.py --- a/rpython/jit/backend/arm/assembler.py +++ b/rpython/jit/backend/arm/assembler.py @@ -939,9 +939,9 @@ op = operations[i] self.mc.mark_op(op) opnum = op.getopnum() - if op.has_no_side_effect() and op not in regalloc.longevity: + if rop.has_no_side_effect(opnum) and op not in regalloc.longevity: regalloc.possibly_free_vars_for_op(op) - elif not we_are_translated() and op.getopnum() == -127: + elif not we_are_translated() and op.getopnum() == rop.FORCE_SPILL: regalloc.prepare_force_spill(op, fcond) else: arglocs = regalloc_operations[opnum](regalloc, op, fcond) @@ -949,7 +949,7 @@ fcond = asm_operations[opnum](self, op, arglocs, regalloc, fcond) assert fcond is not None - if op.is_guard(): + if rop.is_guard(opnum): regalloc.possibly_free_vars(op.getfailargs()) if op.type != 'v': regalloc.possibly_free_var(op) diff --git a/rpython/jit/backend/arm/detect.py b/rpython/jit/backend/arm/detect.py --- a/rpython/jit/backend/arm/detect.py +++ b/rpython/jit/backend/arm/detect.py @@ -63,3 +63,44 @@ "falling back to", "ARMv%d" % n) debug_stop("jit-backend-arch") return n + + +# Once we can rely on the availability of glibc >= 2.16, replace this with: +# from rpython.rtyper.lltypesystem import lltype, rffi +# getauxval = rffi.llexternal("getauxval", [lltype.Unsigned], lltype.Unsigned) +def getauxval(type_, filename='/proc/self/auxv'): + fd = os.open(filename, os.O_RDONLY, 0644) + + buf_size = 2048 + struct_size = 8 # 2x uint32 + try: + buf = os.read(fd, buf_size) + finally: + os.close(fd) + + # decode chunks of 8 bytes (a_type, a_val), and + # return the a_val whose a_type corresponds to type_, + # or zero if not found. + i = 0 + while i <= buf_size - struct_size: + # We only support little-endian ARM + a_type = (ord(buf[i]) | + (ord(buf[i+1]) << 8) | + (ord(buf[i+2]) << 16) | + (ord(buf[i+3]) << 24)) + a_val = (ord(buf[i+4]) | + (ord(buf[i+5]) << 8) | + (ord(buf[i+6]) << 16) | + (ord(buf[i+7]) << 24)) + i += struct_size + if a_type == type_: + return a_val + + return 0 + + +def detect_neon(): + AT_HWCAP = 16 + HWCAP_NEON = 1 << 12 + hwcap = getauxval(AT_HWCAP) + return bool(hwcap & HWCAP_NEON) diff --git a/rpython/jit/backend/arm/opassembler.py b/rpython/jit/backend/arm/opassembler.py --- a/rpython/jit/backend/arm/opassembler.py +++ b/rpython/jit/backend/arm/opassembler.py @@ -1092,8 +1092,8 @@ self.mc.VCVT_int_to_float(res.value, r.svfp_ip.value) return fcond - # the following five instructions are only ARMv7; - # regalloc.py won't call them at all on ARMv6 + # the following five instructions are only ARMv7 with NEON; + # regalloc.py won't call them at all, in other cases emit_opx_llong_add = gen_emit_float_op('llong_add', 'VADD_i64') emit_opx_llong_sub = gen_emit_float_op('llong_sub', 'VSUB_i64') emit_opx_llong_and = gen_emit_float_op('llong_and', 'VAND_i64') diff --git a/rpython/jit/backend/arm/regalloc.py b/rpython/jit/backend/arm/regalloc.py --- a/rpython/jit/backend/arm/regalloc.py +++ b/rpython/jit/backend/arm/regalloc.py @@ -530,7 +530,7 @@ EffectInfo.OS_LLONG_AND, EffectInfo.OS_LLONG_OR, EffectInfo.OS_LLONG_XOR): - if self.cpu.cpuinfo.arch_version >= 7: + if self.cpu.cpuinfo.neon: args = self._prepare_llong_binop_xx(op, fcond) self.perform_extra(op, args, fcond) return diff --git a/rpython/jit/backend/arm/runner.py b/rpython/jit/backend/arm/runner.py --- a/rpython/jit/backend/arm/runner.py +++ b/rpython/jit/backend/arm/runner.py @@ -7,13 +7,14 @@ from rpython.rlib.jit_hooks import LOOP_RUN_CONTAINER from rpython.rtyper.lltypesystem import lltype, llmemory from rpython.jit.backend.arm.detect import detect_hardfloat -from rpython.jit.backend.arm.detect import detect_arch_version +from rpython.jit.backend.arm.detect import detect_arch_version, detect_neon jitframe.STATICSIZE = JITFRAME_FIXED_SIZE class CPUInfo(object): hf_abi = False arch_version = 6 + neon = False class AbstractARMCPU(AbstractLLCPU): @@ -48,6 +49,7 @@ def setup_once(self): self.cpuinfo.arch_version = detect_arch_version() self.cpuinfo.hf_abi = detect_hardfloat() + self.cpuinfo.neon = detect_neon() #self.codemap.setup() self.assembler.setup_once() diff --git a/rpython/jit/backend/arm/test/test_detect.py b/rpython/jit/backend/arm/test/test_detect.py --- a/rpython/jit/backend/arm/test/test_detect.py +++ b/rpython/jit/backend/arm/test/test_detect.py @@ -1,6 +1,6 @@ import py from rpython.tool.udir import udir -from rpython.jit.backend.arm.detect import detect_arch_version +from rpython.jit.backend.arm.detect import detect_arch_version, getauxval cpuinfo = "Processor : ARMv%d-compatible processor rev 7 (v6l)""" cpuinfo2 = """processor : 0 @@ -29,6 +29,19 @@ address sizes : 36 bits physical, 48 bits virtual power management: """ +# From a Marvell Armada 370/XP +auxv = ( + '\x10\x00\x00\x00\xd7\xa8\x1e\x00\x06\x00\x00\x00\x00\x10\x00\x00\x11\x00' + '\x00\x00d\x00\x00\x00\x03\x00\x00\x004\x00\x01\x00\x04\x00\x00\x00 \x00' + '\x00\x00\x05\x00\x00\x00\t\x00\x00\x00\x07\x00\x00\x00\x00\xe0\xf3\xb6' + '\x08\x00\x00\x00\x00\x00\x00\x00\t\x00\x00\x00t\xcf\x04\x00\x0b\x00\x00' + '\x000\x0c\x00\x00\x0c\x00\x00\x000\x0c\x00\x00\r\x00\x00\x000\x0c\x00\x00' + '\x0e\x00\x00\x000\x0c\x00\x00\x17\x00\x00\x00\x00\x00\x00\x00\x19\x00\x00' + '\x00\x8a\xf3\x87\xbe\x1a\x00\x00\x00\x00\x00\x00\x00\x1f\x00\x00\x00\xec' + '\xff\x87\xbe\x0f\x00\x00\x00\x9a\xf3\x87\xbe\x00\x00\x00\x00\x00\x00\x00' + '\x00' +) + def write_cpuinfo(info): filepath = udir.join('get_arch_version') @@ -46,3 +59,10 @@ py.test.raises(ValueError, 'detect_arch_version(write_cpuinfo(cpuinfo % 5))') assert detect_arch_version(write_cpuinfo(cpuinfo2)) == 6 + + +def test_getauxval_no_neon(): + path = udir.join('auxv') + path.write(auxv, 'wb') + AT_HWCAP = 16 + assert getauxval(AT_HWCAP, filename=str(path)) == 2009303 diff --git a/rpython/jit/backend/llgraph/runner.py b/rpython/jit/backend/llgraph/runner.py --- a/rpython/jit/backend/llgraph/runner.py +++ b/rpython/jit/backend/llgraph/runner.py @@ -455,7 +455,7 @@ if box is not frame.current_op: value = frame.env[box] else: - value = box.getvalue() # 0 or 0.0 or NULL + value = 0 # box.getvalue() # 0 or 0.0 or NULL else: value = None values.append(value) @@ -472,6 +472,13 @@ # ------------------------------------------------------------ + def setup_descrs(self): + all_descrs = [] + for k, v in self.descrs.iteritems(): + v.descr_index = len(all_descrs) + all_descrs.append(v) + return all_descrs + def calldescrof(self, FUNC, ARGS, RESULT, effect_info): key = ('call', getkind(RESULT), tuple([getkind(A) for A in ARGS]), diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py --- a/rpython/jit/backend/llsupport/assembler.py +++ b/rpython/jit/backend/llsupport/assembler.py @@ -331,7 +331,7 @@ counter = self._register_counter(tp, number, token) c_adr = ConstInt(rffi.cast(lltype.Signed, counter)) operations.append( - ResOperation(rop.INCREMENT_DEBUG_COUNTER, [c_adr], None)) + ResOperation(rop.INCREMENT_DEBUG_COUNTER, [c_adr])) def _register_counter(self, tp, number, token): # YYY very minor leak -- we need the counters to stay alive diff --git a/rpython/jit/backend/llsupport/descr.py b/rpython/jit/backend/llsupport/descr.py --- a/rpython/jit/backend/llsupport/descr.py +++ b/rpython/jit/backend/llsupport/descr.py @@ -21,6 +21,30 @@ self._cache_call = {} self._cache_interiorfield = {} + def setup_descrs(self): + all_descrs = [] + for k, v in self._cache_size.iteritems(): + v.descr_index = len(all_descrs) + all_descrs.append(v) + for k, v in self._cache_field.iteritems(): + for k1, v1 in v.iteritems(): + v1.descr_index = len(all_descrs) + all_descrs.append(v1) + for k, v in self._cache_array.iteritems(): + v.descr_index = len(all_descrs) + all_descrs.append(v) + for k, v in self._cache_arraylen.iteritems(): + v.descr_index = len(all_descrs) + all_descrs.append(v) + for k, v in self._cache_call.iteritems(): + v.descr_index = len(all_descrs) + all_descrs.append(v) + for k, v in self._cache_interiorfield.iteritems(): + v.descr_index = len(all_descrs) + all_descrs.append(v) + assert len(all_descrs) < 2**15 + return all_descrs + def init_size_descr(self, STRUCT, sizedescr): pass diff --git a/rpython/jit/backend/llsupport/llmodel.py b/rpython/jit/backend/llsupport/llmodel.py --- a/rpython/jit/backend/llsupport/llmodel.py +++ b/rpython/jit/backend/llsupport/llmodel.py @@ -316,6 +316,9 @@ return ll_frame return execute_token + def setup_descrs(self): + return self.gc_ll_descr.setup_descrs() + # ------------------- helpers and descriptions -------------------- @staticmethod diff --git a/rpython/jit/backend/llsupport/regalloc.py b/rpython/jit/backend/llsupport/regalloc.py --- a/rpython/jit/backend/llsupport/regalloc.py +++ b/rpython/jit/backend/llsupport/regalloc.py @@ -683,7 +683,7 @@ for i in range(len(operations)-1, -1, -1): op = operations[i] if op.type != 'v': - if op not in last_used and op.has_no_side_effect(): + if op not in last_used and rop.has_no_side_effect(op.opnum): continue opnum = op.getopnum() for j in range(op.numargs()): @@ -695,7 +695,7 @@ if opnum != rop.JUMP and opnum != rop.LABEL: if arg not in last_real_usage: last_real_usage[arg] = i - if op.is_guard(): + if rop.is_guard(op.opnum): for arg in op.getfailargs(): if arg is None: # hole continue @@ -732,14 +732,7 @@ return longevity, last_real_usage def is_comparison_or_ovf_op(opnum): - from rpython.jit.metainterp.resoperation import opclasses - cls = opclasses[opnum] - # hack hack: in theory they are instance method, but they don't use - # any instance field, we can use a fake object - class Fake(cls): - pass - op = Fake() - return op.is_comparison() or op.is_ovf() + return rop.is_comparison(opnum) or rop.is_ovf(opnum) def valid_addressing_size(size): return size == 1 or size == 2 or size == 4 or size == 8 diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py +++ b/rpython/jit/backend/llsupport/rewrite.py @@ -103,7 +103,7 @@ orig_op.set_forwarded(op) replaced = True op.setarg(i, arg) - if op.is_guard(): + if rop.is_guard(op.opnum): if not replaced: op = op.copy_and_change(op.getopnum()) orig_op.set_forwarded(op) @@ -212,7 +212,7 @@ # self._emit_mul_if_factor_offset_not_supported(v_length, scale, 0) # op.setarg(1, ConstInt(scale)) # op.setarg(2, v_length) - if op.is_getarrayitem() or \ + if rop.is_getarrayitem(opnum) or \ opnum in (rop.GETARRAYITEM_RAW_I, rop.GETARRAYITEM_RAW_F): self.handle_getarrayitem(op) @@ -324,13 +324,13 @@ if self.transform_to_gc_load(op): continue # ---------- turn NEWxxx into CALL_MALLOC_xxx ---------- - if op.is_malloc(): + if rop.is_malloc(op.opnum): self.handle_malloc_operation(op) continue - if (op.is_guard() or + if (rop.is_guard(op.opnum) or self.could_merge_with_next_guard(op, i, operations)): self.emit_pending_zeros() - elif op.can_malloc(): + elif rop.can_malloc(op.opnum): self.emitting_an_operation_that_can_collect() elif op.getopnum() == rop.LABEL: self.emitting_an_operation_that_can_collect() @@ -370,8 +370,8 @@ # return True in cases where the operation and the following guard # should likely remain together. Simplified version of # can_merge_with_next_guard() in llsupport/regalloc.py. - if not op.is_comparison(): - return op.is_ovf() # int_xxx_ovf() / guard_no_overflow() + if not rop.is_comparison(op.opnum): + return rop.is_ovf(op.opnum) # int_xxx_ovf() / guard_no_overflow() if i + 1 >= len(operations): return False next_op = operations[i + 1] @@ -400,7 +400,6 @@ # it's hard to test all cases). Rewrite it away. value = int(opnum == rop.GUARD_FALSE) op1 = ResOperation(rop.SAME_AS_I, [ConstInt(value)]) - op1.setint(value) self.emit_op(op1) lst = op.getfailargs()[:] lst[i] = op1 @@ -633,8 +632,7 @@ args = [frame, arglist[jd.index_of_virtualizable]] else: args = [frame] - call_asm = ResOperation(op.getopnum(), args, - op.getdescr()) + call_asm = ResOperation(op.getopnum(), args, descr=op.getdescr()) self.replace_op_with(self.get_box_replacement(op), call_asm) self.emit_op(call_asm) @@ -708,7 +706,7 @@ def _gen_call_malloc_gc(self, args, v_result, descr): """Generate a CALL_MALLOC_GC with the given args.""" self.emitting_an_operation_that_can_collect() - op = ResOperation(rop.CALL_MALLOC_GC, args, descr) + op = ResOperation(rop.CALL_MALLOC_GC, args, descr=descr) self.replace_op_with(v_result, op) self.emit_op(op) # In general, don't add v_result to write_barrier_applied: diff --git a/rpython/jit/backend/ppc/regalloc.py b/rpython/jit/backend/ppc/regalloc.py --- a/rpython/jit/backend/ppc/regalloc.py +++ b/rpython/jit/backend/ppc/regalloc.py @@ -286,7 +286,8 @@ self.assembler.mc.mark_op(op) self.rm.position = i self.fprm.position = i - if op.has_no_side_effect() and op not in self.longevity: + opnum = op.opnum + if rop.has_no_side_effect(opnum) and op not in self.longevity: i += 1 self.possibly_free_vars_for_op(op) continue @@ -298,8 +299,7 @@ else: self.fprm.temp_boxes.append(box) # - opnum = op.getopnum() - if not we_are_translated() and opnum == -127: + if not we_are_translated() and opnum == rop.FORCE_SPILL: self._consider_force_spill(op) else: arglocs = oplist[opnum](self, op) diff --git a/rpython/jit/backend/test/test_ll_random.py b/rpython/jit/backend/test/test_ll_random.py --- a/rpython/jit/backend/test/test_ll_random.py +++ b/rpython/jit/backend/test/test_ll_random.py @@ -2,6 +2,7 @@ from rpython.rtyper.lltypesystem import lltype, llmemory, rffi, rstr from rpython.rtyper import rclass from rpython.jit.backend.test import test_random +from rpython.jit.backend.test.test_random import getint, getref_base, getref from rpython.jit.metainterp.resoperation import ResOperation, rop, optypes from rpython.jit.metainterp.history import ConstInt, ConstPtr, getkind from rpython.jit.codewriter import heaptracker @@ -169,7 +170,7 @@ if length == 0: raise test_random.CannotProduceOperation v_index = r.choice(self.intvars) - if not (0 <= v_index.getint() < length): + if not (0 <= getint(v_index) < length): v_index = ConstInt(r.random_integer() % length) return v_index @@ -311,7 +312,7 @@ def field_descr(self, builder, r): v, A = builder.get_structptr_var(r, type=lltype.Array, array_of_structs=True) - array = v.getref(lltype.Ptr(A)) + array = getref(lltype.Ptr(A), v) v_index = builder.get_index(len(array), r) choice = [] for name in A.OF._names: @@ -344,7 +345,7 @@ w = ConstInt(r.random_integer()) else: w = r.choice(builder.intvars) - value = w.getint() + value = getint(w) if rffi.cast(lltype.Signed, rffi.cast(TYPE, value)) == value: break builder.do(self.opnum, [v, w], descr) @@ -357,7 +358,7 @@ w = ConstInt(r.random_integer()) else: w = r.choice(builder.intvars) - value = w.getint() + value = getint(w) if rffi.cast(lltype.Signed, rffi.cast(TYPE, value)) == value: break builder.do(self.opnum, [v, v_index, w], descr) @@ -389,7 +390,7 @@ class GetArrayItemOperation(ArrayOperation): def field_descr(self, builder, r): v, A = builder.get_arrayptr_var(r) - array = v.getref(lltype.Ptr(A)) + array = getref(lltype.Ptr(A), v) v_index = builder.get_index(len(array), r) descr = self.array_descr(builder, A) return v, A, v_index, descr @@ -411,7 +412,7 @@ w = ConstInt(r.random_integer()) else: w = r.choice(builder.intvars) - value = w.getint() + value = getint(w) if rffi.cast(lltype.Signed, rffi.cast(A.OF, value)) == value: break builder.do(self.opnum, [v, v_index, w], descr) @@ -455,7 +456,7 @@ v_ptr = builder.do(self.opnum, [v_length]) getattr(builder, self.builder_cache).append(v_ptr) # Initialize the string. Is there a better way to do this? - for i in range(v_length.getint()): + for i in range(getint(v_length)): v_index = ConstInt(i) v_char = ConstInt(r.random_integer() % self.max) builder.do(self.set_char, [v_ptr, v_index, v_char]) @@ -471,9 +472,9 @@ current = getattr(builder, self.builder_cache) if current and r.random() < .8: v_string = r.choice(current) - string = v_string.getref(self.ptr) + string = getref(self.ptr, v_string) else: - string = self.alloc(builder.get_index(500, r).getint()) + string = self.alloc(getint(builder.get_index(500, r))) v_string = ConstPtr(lltype.cast_opaque_ptr(llmemory.GCREF, string)) current.append(v_string) for i in range(len(string.chars)): @@ -484,7 +485,7 @@ class AbstractGetItemOperation(AbstractStringOperation): def produce_into(self, builder, r): v_string = self.get_string(builder, r) - v_index = builder.get_index(len(v_string.getref(self.ptr).chars), r) + v_index = builder.get_index(len(getref(self.ptr, v_string).chars), r) builder.do(self.opnum, [v_string, v_index]) class AbstractSetItemOperation(AbstractStringOperation): @@ -492,7 +493,7 @@ v_string = self.get_string(builder, r) if isinstance(v_string, ConstPtr): raise test_random.CannotProduceOperation # setitem(Const, ...) - v_index = builder.get_index(len(v_string.getref(self.ptr).chars), r) + v_index = builder.get_index(len(getref(self.ptr, v_string).chars), r) v_target = ConstInt(r.random_integer() % self.max) builder.do(self.opnum, [v_string, v_index, v_target]) @@ -505,15 +506,15 @@ def produce_into(self, builder, r): v_srcstring = self.get_string(builder, r) v_dststring = self.get_string(builder, r) - src = v_srcstring.getref(self.ptr) - dst = v_dststring.getref(self.ptr) + src = getref(self.ptr, v_srcstring) + dst = getref(self.ptr, v_dststring) if src == dst: # because it's not a raise test_random.CannotProduceOperation # memmove(), but memcpy() srclen = len(src.chars) dstlen = len(dst.chars) v_length = builder.get_index(min(srclen, dstlen), r) - v_srcstart = builder.get_index(srclen - v_length.getint() + 1, r) - v_dststart = builder.get_index(dstlen - v_length.getint() + 1, r) + v_srcstart = builder.get_index(srclen - getint(v_length) + 1, r) + v_dststart = builder.get_index(dstlen - getint(v_length) + 1, r) builder.do(self.opnum, [v_srcstring, v_dststring, v_srcstart, v_dststart, v_length]) @@ -585,7 +586,7 @@ """ % funcargs).compile() vtableptr = v._hints['vtable']._as_ptr() d = { - 'ptr': S.getref_base(), + 'ptr': getref_base(S), 'vtable' : vtableptr, 'LLException' : LLException, } diff --git a/rpython/jit/backend/test/test_random.py b/rpython/jit/backend/test/test_random.py --- a/rpython/jit/backend/test/test_random.py +++ b/rpython/jit/backend/test/test_random.py @@ -11,11 +11,9 @@ from rpython.jit.metainterp.executor import _execute_arglist, wrap_constant from rpython.jit.metainterp.resoperation import opname from rpython.jit.codewriter import longlong -from rpython.rtyper.lltypesystem import lltype, rstr +from rpython.rtyper.lltypesystem import lltype, llmemory, rstr from rpython.rtyper import rclass -class PleaseRewriteMe(Exception): - pass class DummyLoop(object): def __init__(self, subops): @@ -27,6 +25,41 @@ def execute_raised(self, exc, constant=False): self._got_exc = exc + +def getint(v): + if isinstance(v, (ConstInt, InputArgInt)): + return v.getint() + else: + return v._example_int + +def getfloatstorage(v): + if isinstance(v, (ConstFloat, InputArgFloat)): + return v.getfloatstorage() + else: + return v._example_float + +def getfloat(v): + return longlong.getrealfloat(getfloatstorage(v)) + +def getref_base(v): + if isinstance(v, (ConstPtr, InputArgRef)): + return v.getref_base() + else: + return v._example_ref + +def getref(PTR, v): + return lltype.cast_opaque_ptr(PTR, getref_base(v)) + +def constbox(v): + if v.type == INT: + return ConstInt(getint(v)) + if v.type == FLOAT: + return ConstFloat(getfloatstorage(v)) + if v.type == REF: + return ConstPtr(getref_base(v)) + assert 0, v.type + + class OperationBuilder(object): def __init__(self, cpu, loop, vars): self.cpu = cpu @@ -57,11 +90,21 @@ def do(self, opnum, argboxes, descr=None): self.fakemetainterp._got_exc = None op = ResOperation(opnum, argboxes, descr) + argboxes = map(constbox, argboxes) result = _execute_arglist(self.cpu, self.fakemetainterp, opnum, argboxes, descr) if result is not None: - c_result = wrap_constant(result) - op.copy_value_from(c_result) + if lltype.typeOf(result) == lltype.Signed: + op._example_int = result + elif isinstance(result, bool): + op._example_int = int(result) + elif lltype.typeOf(result) == longlong.FLOATSTORAGE: + op._example_float = result + elif isinstance(result, float): + op._example_float = longlong.getfloatstorage(result) + else: + assert lltype.typeOf(result) == llmemory.GCREF + op._example_ref = result self.loop.operations.append(op) return op @@ -101,7 +144,7 @@ if v in names: args.append(names[v]) elif isinstance(v, ConstPtr): - assert not v.getref_base() # otherwise should be in the names + assert not getref_base(v) # otherwise should be in the names args.append('ConstPtr(lltype.nullptr(llmemory.GCREF.TO))') elif isinstance(v, ConstFloat): args.append('ConstFloat(longlong.getfloatstorage(%r))' @@ -198,10 +241,10 @@ # def writevar(v, nameprefix, init=''): if nameprefix == 'const_ptr': - if not v.getref_base(): + if not getref_base(v): return 'lltype.nullptr(llmemory.GCREF.TO)' - TYPE = v.getref_base()._obj.ORIGTYPE - cont = lltype.cast_opaque_ptr(TYPE, v.getref_base()) + TYPE = getref_base(v)._obj.ORIGTYPE + cont = lltype.cast_opaque_ptr(TYPE, getref_base(v)) if TYPE.TO._is_varsize(): if isinstance(TYPE.TO, lltype.GcStruct): lgt = len(cont.chars) @@ -252,9 +295,9 @@ for i, v in enumerate(self.loop.inputargs): assert not isinstance(v, Const) if v.type == FLOAT: - vals.append("longlong.getfloatstorage(%r)" % v.getfloat()) + vals.append("longlong.getfloatstorage(%r)" % getfloat(v)) else: - vals.append("%r" % v.getint()) + vals.append("%r" % getint(v)) print >>s, ' loop_args = [%s]' % ", ".join(vals) print >>s, ' frame = cpu.execute_token(looptoken, *loop_args)' if self.should_fail_by is None: @@ -264,10 +307,10 @@ for i, v in enumerate(fail_args): if v.type == FLOAT: print >>s, (' assert longlong.getrealfloat(' - 'cpu.get_float_value(frame, %d)) == %r' % (i, v.getfloatstorage())) + 'cpu.get_float_value(frame, %d)) == %r' % (i, getfloatstorage(v))) else: print >>s, (' assert cpu.get_int_value(frame, %d) == %d' - % (i, v.getint())) + % (i, getint(v))) self.names = names s.flush() @@ -295,7 +338,7 @@ builder.intvars.append(v_result) boolres = self.boolres if boolres == 'sometimes': - boolres = v_result.getint() in [0, 1] + boolres = getint(v_result) in [0, 1] if boolres: builder.boolvars.append(v_result) elif v_result.type == FLOAT: @@ -346,10 +389,10 @@ v_second = ConstInt((value & self.and_mask) | self.or_mask) else: v = r.choice(builder.intvars) - v_value = v.getint() + v_value = getint(v) if (v_value & self.and_mask) != v_value: v = builder.do(rop.INT_AND, [v, ConstInt(self.and_mask)]) - v_value = v.getint() + v_value = getint(v) if (v_value | self.or_mask) != v_value: v = builder.do(rop.INT_OR, [v, ConstInt(self.or_mask)]) v_second = v @@ -395,9 +438,9 @@ v_second = ConstFloat(r.random_float_storage()) else: v_second = r.choice(builder.floatvars) - if abs(v_first.getfloat()) > 1E100 or abs(v_second.getfloat()) > 1E100: + if abs(getfloat(v_first)) > 1E100 or abs(getfloat(v_second)) > 1E100: raise CannotProduceOperation # avoid infinities - if abs(v_second.getfloat()) < 1E-100: + if abs(getfloat(v_second)) < 1E-100: raise CannotProduceOperation # e.g. division by zero error self.put(builder, [v_first, v_second]) @@ -432,7 +475,7 @@ if not builder.floatvars: raise CannotProduceOperation box = r.choice(builder.floatvars) - if not (-sys.maxint-1 <= box.getfloat() <= sys.maxint): + if not (-sys.maxint-1 <= getfloat(box) <= sys.maxint): raise CannotProduceOperation # would give an overflow self.put(builder, [box]) @@ -440,8 +483,8 @@ def gen_guard(self, builder, r): v = builder.get_bool_var(r) op = ResOperation(self.opnum, [v]) - passing = ((self.opnum == rop.GUARD_TRUE and v.getint()) or - (self.opnum == rop.GUARD_FALSE and not v.getint())) + passing = ((self.opnum == rop.GUARD_TRUE and getint(v)) or + (self.opnum == rop.GUARD_FALSE and not getint(v))) return op, passing def produce_into(self, builder, r): @@ -459,8 +502,8 @@ raise CannotProduceOperation box = r.choice(builder.ptrvars)[0] op = ResOperation(self.opnum, [box]) - passing = ((self.opnum == rop.GUARD_NONNULL and box.getref_base()) or - (self.opnum == rop.GUARD_ISNULL and not box.getref_base())) + passing = ((self.opnum == rop.GUARD_NONNULL and getref_base(box)) or + (self.opnum == rop.GUARD_ISNULL and not getref_base(box))) return op, passing class GuardValueOperation(GuardOperation): @@ -470,14 +513,14 @@ other = r.choice(builder.intvars) else: if r.random() < 0.75: - value = v.getint() + value = getint(v) elif r.random() < 0.5: - value = v.getint() ^ 1 + value = getint(v) ^ 1 else: value = r.random_integer() other = ConstInt(value) op = ResOperation(self.opnum, [v, other]) - return op, (v.getint() == other.getint()) + return op, (getint(v) == getint(other)) # ____________________________________________________________ @@ -675,7 +718,7 @@ assert not hasattr(loop, '_targettoken') for i in range(position): op = loop.operations[i] - if (not op.has_no_side_effect() + if (not rop.has_no_side_effect(op.opnum) or op.type not in (INT, FLOAT)): position = i break # cannot move the LABEL later @@ -728,9 +771,9 @@ self.expected = {} for v in endvars: if v.type == INT: - self.expected[v] = v.getint() + self.expected[v] = getint(v) elif v.type == FLOAT: - self.expected[v] = v.getfloatstorage() + self.expected[v] = getfloatstorage(v) else: assert 0, v.type @@ -742,7 +785,7 @@ args = [] for box in self.startvars: if box not in self.loop.inputargs: - box = box.constbox() + box = constbox(box) args.append(box) self.cpu.compile_loop(self.loop.inputargs, [ResOperation(rop.JUMP, args, @@ -760,7 +803,7 @@ def clear_state(self): for v, S, fields in self.prebuilt_ptr_consts: - container = v.getref_base()._obj.container + container = getref_base(v)._obj.container for name, value in fields.items(): if isinstance(name, str): setattr(container, name, value) @@ -781,9 +824,9 @@ arguments = [] for box in self.loop.inputargs: if box.type == INT: - arguments.append(box.getint()) + arguments.append(getint(box)) elif box.type == FLOAT: - arguments.append(box.getfloatstorage()) + arguments.append(getfloatstorage(box)) else: assert 0, box.type deadframe = cpu.execute_token(self.runjitcelltoken(), *arguments) @@ -795,7 +838,7 @@ if v not in self.expected: assert v.getopnum() == rop.SAME_AS_I # special case assert isinstance(v.getarg(0), ConstInt) - self.expected[v] = v.getarg(0).getint() + self.expected[v] = getint(v.getarg(0)) if v.type == FLOAT: value = cpu.get_float_value(deadframe, i) else: @@ -807,7 +850,7 @@ ) exc = cpu.grab_exc_value(deadframe) if (self.guard_op is not None and - self.guard_op.is_guard_exception()): + rop.is_guard_exception(self.guard_op.getopnum())): if self.guard_op.getopnum() == rop.GUARD_NO_EXCEPTION: do_assert(exc, "grab_exc_value() should not be %r" % (exc,)) @@ -840,7 +883,7 @@ # generate the branch: a sequence of operations that ends in a FINISH subloop = DummyLoop([]) self.subloops.append(subloop) # keep around for debugging - if guard_op.is_guard_exception(): + if rop.is_guard_exception(guard_op.getopnum()): subloop.operations.append(exc_handling(guard_op)) bridge_builder = self.builder.fork(self.builder.cpu, subloop, op.getfailargs()[:]) @@ -876,9 +919,9 @@ args = [] for x in subset: if x.type == INT: - args.append(InputArgInt(x.getint())) + args.append(InputArgInt(getint(x))) elif x.type == FLOAT: - args.append(InputArgFloat(x.getfloatstorage())) + args.append(InputArgFloat(getfloatstorage(x))) else: assert 0, x.type rl = RandomLoop(self.builder.cpu, self.builder.fork, diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -358,11 +358,11 @@ assert self.assembler.mc._frame_size == DEFAULT_FRAME_BYTES self.rm.position = i self.xrm.position = i - if op.has_no_side_effect() and op not in self.longevity: + if rop.has_no_side_effect(op.opnum) and op not in self.longevity: i += 1 self.possibly_free_vars_for_op(op) continue - if not we_are_translated() and op.getopnum() == -127: + if not we_are_translated() and op.getopnum() == rop.FORCE_SPILL: self._consider_force_spill(op) else: oplist[op.getopnum()](self, op) diff --git a/rpython/jit/backend/zarch/pool.py b/rpython/jit/backend/zarch/pool.py --- a/rpython/jit/backend/zarch/pool.py +++ b/rpython/jit/backend/zarch/pool.py @@ -33,7 +33,7 @@ def ensure_can_hold_constants(self, asm, op): # allocates 8 bytes in memory for pointers, long integers or floats - if op.is_jit_debug(): + if rop.is_jit_debug(op): return for arg in op.getarglist(): diff --git a/rpython/jit/backend/zarch/regalloc.py b/rpython/jit/backend/zarch/regalloc.py --- a/rpython/jit/backend/zarch/regalloc.py +++ b/rpython/jit/backend/zarch/regalloc.py @@ -476,7 +476,8 @@ self.assembler.mc.mark_op(op) self.rm.position = i self.fprm.position = i - if op.has_no_side_effect() and op not in self.longevity: + opnum = op.getopnum() + if rop.has_no_side_effect(opnum) and op not in self.longevity: i += 1 self.possibly_free_vars_for_op(op) continue @@ -488,8 +489,7 @@ else: self.fprm.temp_boxes.append(box) # - opnum = op.getopnum() - if not we_are_translated() and opnum == -127: + if not we_are_translated() and opnum == rop.FORCE_SPILL: self._consider_force_spill(op) else: arglocs = prepare_oplist[opnum](self, op) diff --git a/rpython/jit/codewriter/jtransform.py b/rpython/jit/codewriter/jtransform.py --- a/rpython/jit/codewriter/jtransform.py +++ b/rpython/jit/codewriter/jtransform.py @@ -688,6 +688,10 @@ ARRAY = op.args[0].concretetype.TO if self._array_of_voids(ARRAY): return [] + if isinstance(ARRAY, lltype.FixedSizeArray): + raise NotImplementedError( + "%r uses %r, which is not supported by the JIT codewriter" + % (self.graph, ARRAY)) if op.args[0] in self.vable_array_vars: # for virtualizables vars = self.vable_array_vars[op.args[0]] (v_base, arrayfielddescr, arraydescr) = vars @@ -718,6 +722,10 @@ ARRAY = op.args[0].concretetype.TO if self._array_of_voids(ARRAY): return [] + if isinstance(ARRAY, lltype.FixedSizeArray): + raise NotImplementedError( + "%r uses %r, which is not supported by the JIT codewriter" + % (self.graph, ARRAY)) if op.args[0] in self.vable_array_vars: # for virtualizables vars = self.vable_array_vars[op.args[0]] (v_base, arrayfielddescr, arraydescr) = vars diff --git a/rpython/jit/codewriter/test/test_jtransform.py b/rpython/jit/codewriter/test/test_jtransform.py --- a/rpython/jit/codewriter/test/test_jtransform.py +++ b/rpython/jit/codewriter/test/test_jtransform.py @@ -1316,6 +1316,21 @@ tr = Transformer(None, None) py.test.raises(NotImplementedError, tr.rewrite_operation, op) +def test_no_fixedsizearray(): + A = lltype.FixedSizeArray(lltype.Signed, 5) + v_x = varoftype(lltype.Ptr(A)) + op = SpaceOperation('getarrayitem', [v_x, Constant(0, lltype.Signed)], + varoftype(lltype.Signed)) + tr = Transformer(None, None) + tr.graph = 'demo' + py.test.raises(NotImplementedError, tr.rewrite_operation, op) + op = SpaceOperation('setarrayitem', [v_x, Constant(0, lltype.Signed), + Constant(42, lltype.Signed)], + varoftype(lltype.Void)) + e = py.test.raises(NotImplementedError, tr.rewrite_operation, op) + assert str(e.value) == ( + "'demo' uses %r, which is not supported by the JIT codewriter" % (A,)) + def _test_threadlocalref_get(loop_inv): from rpython.rlib.rthread import ThreadLocalField tlfield = ThreadLocalField(lltype.Signed, 'foobar_test_', diff --git a/rpython/jit/metainterp/blackhole.py b/rpython/jit/metainterp/blackhole.py --- a/rpython/jit/metainterp/blackhole.py +++ b/rpython/jit/metainterp/blackhole.py @@ -1585,7 +1585,6 @@ def _done_with_this_frame(self): # rare case: we only get there if the blackhole interps all returned # normally (in general we get a ContinueRunningNormally exception). - sd = self.builder.metainterp_sd kind = self._return_type if kind == 'v': raise jitexc.DoneWithThisFrameVoid() diff --git a/rpython/jit/metainterp/compile.py b/rpython/jit/metainterp/compile.py --- a/rpython/jit/metainterp/compile.py +++ b/rpython/jit/metainterp/compile.py @@ -28,12 +28,11 @@ class CompileData(object): memo = None + log_noopt = True def forget_optimization_info(self): - for arg in self.start_label.getarglist(): + for arg in self.trace.inputargs: arg.set_forwarded(None) - for op in self.operations: - op.set_forwarded(None) class LoopCompileData(CompileData): """ An object that accumulates all of the necessary info for @@ -41,15 +40,13 @@ This is the case of label() ops label() """ - def __init__(self, start_label, end_label, operations, - call_pure_results=None, enable_opts=None): - self.start_label = start_label - self.end_label = end_label + def __init__(self, trace, runtime_boxes, call_pure_results=None, + enable_opts=None): self.enable_opts = enable_opts - assert start_label.getopnum() == rop.LABEL - assert end_label.getopnum() == rop.LABEL - self.operations = operations + self.trace = trace self.call_pure_results = call_pure_results + assert runtime_boxes is not None + self.runtime_boxes = runtime_boxes def optimize(self, metainterp_sd, jitdriver_sd, optimizations, unroll): from rpython.jit.metainterp.optimizeopt.unroll import (UnrollOptimizer, @@ -57,23 +54,21 @@ if unroll: opt = UnrollOptimizer(metainterp_sd, jitdriver_sd, optimizations) - return opt.optimize_preamble(self.start_label, self.end_label, - self.operations, + return opt.optimize_preamble(self.trace, + self.runtime_boxes, self.call_pure_results, self.box_names_memo) else: opt = Optimizer(metainterp_sd, jitdriver_sd, optimizations) - return opt.propagate_all_forward(self.start_label.getarglist(), - self.operations, self.call_pure_results) + return opt.propagate_all_forward(self.trace, self.call_pure_results) class SimpleCompileData(CompileData): """ This represents label() ops jump with no extra info associated with the label """ - def __init__(self, start_label, operations, call_pure_results=None, + def __init__(self, trace, call_pure_results=None, enable_opts=None): - self.start_label = start_label - self.operations = operations + self.trace = trace self.call_pure_results = call_pure_results self.enable_opts = enable_opts @@ -82,17 +77,17 @@ #assert not unroll opt = Optimizer(metainterp_sd, jitdriver_sd, optimizations) - return opt.propagate_all_forward(self.start_label.getarglist(), - self.operations, self.call_pure_results) + return opt.propagate_all_forward(self.trace.get_iter(), + self.call_pure_results) class BridgeCompileData(CompileData): """ This represents ops() with a jump at the end that goes to some loop, we need to deal with virtual state and inlining of short preamble """ - def __init__(self, start_label, operations, call_pure_results=None, + def __init__(self, trace, runtime_boxes, call_pure_results=None, enable_opts=None, inline_short_preamble=False): - self.start_label = start_label - self.operations = operations + self.trace = trace + self.runtime_boxes = runtime_boxes self.call_pure_results = call_pure_results self.enable_opts = enable_opts self.inline_short_preamble = inline_short_preamble @@ -101,7 +96,7 @@ from rpython.jit.metainterp.optimizeopt.unroll import UnrollOptimizer opt = UnrollOptimizer(metainterp_sd, jitdriver_sd, optimizations) - return opt.optimize_bridge(self.start_label, self.operations, + return opt.optimize_bridge(self.trace, self.runtime_boxes, self.call_pure_results, self.inline_short_preamble, self.box_names_memo) @@ -110,12 +105,13 @@ """ This represents label() ops jump with extra info that's from the run of LoopCompileData. Jump goes to the same label """ - def __init__(self, start_label, end_jump, operations, state, + log_noopt = False + + def __init__(self, trace, celltoken, state, call_pure_results=None, enable_opts=None, inline_short_preamble=True): - self.start_label = start_label - self.end_jump = end_jump - self.operations = operations + self.trace = trace + self.celltoken = celltoken self.enable_opts = enable_opts self.state = state self.call_pure_results = call_pure_results @@ -126,9 +122,8 @@ assert unroll # we should not be here if it's disabled opt = UnrollOptimizer(metainterp_sd, jitdriver_sd, optimizations) - return opt.optimize_peeled_loop(self.start_label, self.end_jump, - self.operations, self.state, self.call_pure_results, - self.inline_short_preamble) + return opt.optimize_peeled_loop(self.trace, self.celltoken, self.state, + self.call_pure_results, self.inline_short_preamble) def show_procedures(metainterp_sd, procedure=None, error=None): # debugging @@ -209,23 +204,21 @@ # ____________________________________________________________ -def compile_simple_loop(metainterp, greenkey, start, inputargs, ops, jumpargs, - enable_opts): +def compile_simple_loop(metainterp, greenkey, trace, runtime_args, enable_opts, + cut_at): from rpython.jit.metainterp.optimizeopt import optimize_trace jitdriver_sd = metainterp.jitdriver_sd metainterp_sd = metainterp.staticdata jitcell_token = make_jitcell_token(jitdriver_sd) - label = ResOperation(rop.LABEL, inputargs[:], descr=jitcell_token) - jump_op = ResOperation(rop.JUMP, jumpargs[:], descr=jitcell_token) call_pure_results = metainterp.call_pure_results - data = SimpleCompileData(label, ops + [jump_op], - call_pure_results=call_pure_results, - enable_opts=enable_opts) + data = SimpleCompileData(trace, call_pure_results=call_pure_results, + enable_opts=enable_opts) try: loop_info, ops = optimize_trace(metainterp_sd, jitdriver_sd, data, metainterp.box_names_memo) except InvalidLoop: + trace.cut_at(cut_at) return None loop = create_empty_loop(metainterp) loop.original_jitcell_token = jitcell_token @@ -242,7 +235,7 @@ loop.check_consistency() jitcell_token.target_tokens = [target_token] send_loop_to_backend(greenkey, jitdriver_sd, metainterp_sd, loop, "loop", - inputargs, metainterp.box_names_memo) + runtime_args, metainterp.box_names_memo) record_loop_or_bridge(metainterp_sd, loop) return target_token @@ -256,6 +249,7 @@ metainterp_sd = metainterp.staticdata jitdriver_sd = metainterp.jitdriver_sd history = metainterp.history + trace = history.trace warmstate = jitdriver_sd.warmstate enable_opts = jitdriver_sd.warmstate.enable_opts @@ -265,16 +259,16 @@ enable_opts = enable_opts.copy() del enable_opts['unroll'] - ops = history.operations[start:] + jitcell_token = make_jitcell_token(jitdriver_sd) + cut_at = history.get_trace_position() + history.record(rop.JUMP, jumpargs, None, descr=jitcell_token) + if start != (0, 0, 0): + trace = trace.cut_trace_from(start, inputargs) if 'unroll' not in enable_opts or not metainterp.cpu.supports_guard_gc_type: - return compile_simple_loop(metainterp, greenkey, start, inputargs, ops, - jumpargs, enable_opts) - jitcell_token = make_jitcell_token(jitdriver_sd) - label = ResOperation(rop.LABEL, inputargs, - descr=TargetToken(jitcell_token)) - end_label = ResOperation(rop.LABEL, jumpargs, descr=jitcell_token) + return compile_simple_loop(metainterp, greenkey, trace, jumpargs, + enable_opts, cut_at) call_pure_results = metainterp.call_pure_results - preamble_data = LoopCompileData(label, end_label, ops, + preamble_data = LoopCompileData(trace, jumpargs, call_pure_results=call_pure_results, enable_opts=enable_opts) try: @@ -282,17 +276,15 @@ preamble_data, metainterp.box_names_memo) except InvalidLoop: + history.cut(cut_at) return None metainterp_sd = metainterp.staticdata jitdriver_sd = metainterp.jitdriver_sd - end_label = ResOperation(rop.LABEL, inputargs, - descr=jitcell_token) - jump_op = ResOperation(rop.JUMP, jumpargs, descr=jitcell_token) start_descr = TargetToken(jitcell_token, original_jitcell_token=jitcell_token) jitcell_token.target_tokens = [start_descr] - loop_data = UnrolledLoopData(end_label, jump_op, ops, start_state, + loop_data = UnrolledLoopData(trace, jitcell_token, start_state, call_pure_results=call_pure_results, enable_opts=enable_opts) try: @@ -300,11 +292,12 @@ loop_data, metainterp.box_names_memo) except InvalidLoop: + history.cut(cut_at) return None if ((warmstate.vec and jitdriver_sd.vec) or warmstate.vec_all): from rpython.jit.metainterp.optimizeopt.vector import optimize_vector - loop_info, loop_ops = optimize_vector(metainterp_sd, + loop_info, loop_ops = optimize_vector(trace, metainterp_sd, jitdriver_sd, warmstate, loop_info, loop_ops, jitcell_token) @@ -343,22 +336,20 @@ to the first operation. """ from rpython.jit.metainterp.optimizeopt import optimize_trace - from rpython.jit.metainterp.optimizeopt.optimizer import BasicLoopInfo - history = metainterp.history + trace = metainterp.history.trace.cut_trace_from(start, inputargs) metainterp_sd = metainterp.staticdata jitdriver_sd = metainterp.jitdriver_sd + history = metainterp.history loop_jitcell_token = metainterp.get_procedure_token(greenkey) assert loop_jitcell_token - end_label = ResOperation(rop.LABEL, inputargs[:], - descr=loop_jitcell_token) - jump_op = ResOperation(rop.JUMP, jumpargs[:], descr=loop_jitcell_token) + cut = history.get_trace_position() + history.record(rop.JUMP, jumpargs[:], None, descr=loop_jitcell_token) enable_opts = jitdriver_sd.warmstate.enable_opts - ops = history.operations[start:] call_pure_results = metainterp.call_pure_results - loop_data = UnrolledLoopData(end_label, jump_op, ops, start_state, + loop_data = UnrolledLoopData(trace, loop_jitcell_token, start_state, call_pure_results=call_pure_results, enable_opts=enable_opts) try: @@ -367,8 +358,9 @@ metainterp.box_names_memo) except InvalidLoop: # Fall back on jumping directly to preamble - jump_op = ResOperation(rop.JUMP, inputargs[:], descr=loop_jitcell_token) - loop_data = UnrolledLoopData(end_label, jump_op, [jump_op], start_state, + history.cut(cut) + history.record(rop.JUMP, jumpargs[:], None, descr=loop_jitcell_token) + loop_data = UnrolledLoopData(trace, loop_jitcell_token, start_state, call_pure_results=call_pure_results, enable_opts=enable_opts, inline_short_preamble=False) @@ -377,9 +369,13 @@ loop_data, metainterp.box_names_memo) except InvalidLoop: + history.cut(cut) return None - label_token = loop_info.label_op.getdescr() + label_op = loop_info.label_op + if label_op is None: + assert False, "unreachable code" # hint for some strange tests + label_token = label_op.getdescr() assert isinstance(label_token, TargetToken) if label_token.short_preamble: metainterp_sd.logger_ops.log_short_preamble([], @@ -446,13 +442,13 @@ box = inputargs[i] opnum = OpHelpers.getfield_for_descr(descr) emit_op(extra_ops, - ResOperation(opnum, [vable_box], descr)) + ResOperation(opnum, [vable_box], descr=descr)) box.set_forwarded(extra_ops[-1]) i += 1 arrayindex = 0 for descr in vinfo.array_field_descrs: arraylen = vinfo.get_array_length(vable, arrayindex) - arrayop = ResOperation(rop.GETFIELD_GC_R, [vable_box], descr) + arrayop = ResOperation(rop.GETFIELD_GC_R, [vable_box], descr=descr) emit_op(extra_ops, arrayop) arraydescr = vinfo.array_descrs[arrayindex] assert i + arraylen <= len(inputargs) @@ -1024,9 +1020,9 @@ metainterp_sd.stats.add_jitcell_token(jitcell_token) -def compile_trace(metainterp, resumekey): +def compile_trace(metainterp, resumekey, runtime_boxes): """Try to compile a new bridge leading from the beginning of the history - to some existing place. + to some existging place. """ from rpython.jit.metainterp.optimizeopt import optimize_trace @@ -1044,20 +1040,19 @@ else: inline_short_preamble = True inputargs = metainterp.history.inputargs[:] - operations = metainterp.history.operations - label = ResOperation(rop.LABEL, inputargs) + trace = metainterp.history.trace jitdriver_sd = metainterp.jitdriver_sd enable_opts = jitdriver_sd.warmstate.enable_opts call_pure_results = metainterp.call_pure_results - if operations[-1].getopnum() == rop.JUMP: - data = BridgeCompileData(label, operations[:], + if metainterp.history.ends_with_jump: + data = BridgeCompileData(trace, runtime_boxes, call_pure_results=call_pure_results, enable_opts=enable_opts, inline_short_preamble=inline_short_preamble) else: - data = SimpleCompileData(label, operations[:], + data = SimpleCompileData(trace, call_pure_results=call_pure_results, enable_opts=enable_opts) try: diff --git a/rpython/jit/metainterp/executor.py b/rpython/jit/metainterp/executor.py --- a/rpython/jit/metainterp/executor.py +++ b/rpython/jit/metainterp/executor.py @@ -9,7 +9,7 @@ from rpython.jit.metainterp.history import INT, REF, FLOAT, VOID, AbstractDescr from rpython.jit.metainterp.history import ConstInt, ConstFloat, ConstPtr from rpython.jit.metainterp import resoperation -from rpython.jit.metainterp.resoperation import rop +from rpython.jit.metainterp.resoperation import rop, opname from rpython.jit.metainterp.blackhole import BlackholeInterpreter, NULL from rpython.jit.codewriter import longlong @@ -314,7 +314,8 @@ def _make_execute_list(): execute_by_num_args = {} - for key, value in rop.__dict__.items(): + for key in opname.values(): + value = getattr(rop, key) if not key.startswith('_'): if (rop._FINAL_FIRST <= value <= rop._FINAL_LAST or rop._GUARD_FIRST <= value <= rop._GUARD_LAST): @@ -384,6 +385,11 @@ rop.CALL_MALLOC_NURSERY_VARSIZE_FRAME, rop.NURSERY_PTR_INCREMENT, rop.LABEL, + rop.ESCAPE_I, From pypy.commits at gmail.com Thu Mar 31 09:22:24 2016 From: pypy.commits at gmail.com (cfbolz) Date: Thu, 31 Mar 2016 06:22:24 -0700 (PDT) Subject: [pypy-commit] pypy default: add a __pypy__._promote Message-ID: <56fd2490.6672c20a.22f0f.18ab@mx.google.com> Author: Carl Friedrich Bolz Branch: Changeset: r83460:422ee5506a5a Date: 2016-03-31 15:21 +0200 http://bitbucket.org/pypy/pypy/changeset/422ee5506a5a/ Log: add a __pypy__._promote diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -90,6 +90,7 @@ 'save_module_content_for_future_reload': 'interp_magic.save_module_content_for_future_reload', 'decode_long' : 'interp_magic.decode_long', + '_promote' : 'interp_magic._promote', } if sys.platform == 'win32': interpleveldefs['get_console_cp'] = 'interp_magic.get_console_cp' diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py --- a/pypy/module/__pypy__/interp_magic.py +++ b/pypy/module/__pypy__/interp_magic.py @@ -168,3 +168,22 @@ except InvalidEndiannessError: raise oefmt(space.w_ValueError, "invalid byteorder argument") return space.newlong_from_rbigint(result) + +def _promote(space, w_obj): + """ Promote the first argument of the function and return it. Promote is by + value for ints, floats, strs, unicodes (but not subclasses thereof) and by + reference otherwise. + + This function is experimental!""" + from rpython.rlib import jit + if space.is_w(space.type(w_obj), space.w_int): + jit.promote(space.int_w(w_obj)) + elif space.is_w(space.type(w_obj), space.w_float): + jit.promote(space.float_w(w_obj)) + elif space.is_w(space.type(w_obj), space.w_str): + jit.promote(space.str_w(w_obj)) + elif space.is_w(space.type(w_obj), space.w_unicode): + jit.promote(space.unicode_w(w_obj)) + else: + jit.promote(w_obj) + return w_obj diff --git a/pypy/module/__pypy__/test/test_magic.py b/pypy/module/__pypy__/test/test_magic.py --- a/pypy/module/__pypy__/test/test_magic.py +++ b/pypy/module/__pypy__/test/test_magic.py @@ -47,3 +47,16 @@ assert decode_long('\x00\x80', 'little', False) == 32768 assert decode_long('\x00\x80', 'little', True) == -32768 raises(ValueError, decode_long, '', 'foo') + + def test_promote(self): + from __pypy__ import _promote + assert _promote(1) == 1 + assert _promote(1.1) == 1.1 + assert _promote("abc") == "abc" + assert _promote(u"abc") == u"abc" + l = [] + assert _promote(l) is l + class A(object): + pass + a = A() + assert _promote(a) is a From pypy.commits at gmail.com Thu Mar 31 09:38:59 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 31 Mar 2016 06:38:59 -0700 (PDT) Subject: [pypy-commit] pypy jit-constptr-2: Implementation and test of the %rip+offset addressing mode on x86-64 Message-ID: <56fd2873.e7bec20a.9c9d8.1649@mx.google.com> Author: Armin Rigo Branch: jit-constptr-2 Changeset: r83461:ecde1ca1079a Date: 2016-03-31 14:44 +0100 http://bitbucket.org/pypy/pypy/changeset/ecde1ca1079a/ Log: Implementation and test of the %rip+offset addressing mode on x86-64 diff --git a/rpython/jit/backend/x86/rx86.py b/rpython/jit/backend/x86/rx86.py --- a/rpython/jit/backend/x86/rx86.py +++ b/rpython/jit/backend/x86/rx86.py @@ -297,6 +297,20 @@ return encode_abs, argnum, None, None # ____________________________________________________________ +# ***X86_64 only*** +# Emit a mod/rm referencing an address "RIP + immediate_offset". + + at specialize.arg(2) +def encode_rip_offset(mc, immediate, _, orbyte): + assert mc.WORD == 8 + mc.writechar(chr(0x05 | orbyte)) + mc.writeimm32(immediate) + return 0 + +def rip_offset(argnum): + return encode_rip_offset, argnum, None, None + +# ____________________________________________________________ # For 64-bits mode: the REX.W, REX.R, REX.X, REG.B prefixes REX_W = 8 @@ -914,6 +928,7 @@ add_insn('m', mem_reg_plus_const(modrm_argnum)) add_insn('a', mem_reg_plus_scaled_reg_plus_const(modrm_argnum)) add_insn('j', abs_(modrm_argnum)) + add_insn('p', rip_offset(modrm_argnum)) # Define a regular MOV, and a variant MOV32 that only uses the low 4 bytes of a # register diff --git a/rpython/jit/backend/x86/test/test_rx86_32_auto_encoding.py b/rpython/jit/backend/x86/test/test_rx86_32_auto_encoding.py --- a/rpython/jit/backend/x86/test/test_rx86_32_auto_encoding.py +++ b/rpython/jit/backend/x86/test/test_rx86_32_auto_encoding.py @@ -279,6 +279,8 @@ if modes: tests = self.get_all_tests() m = modes[0] + if m == 'p' and self.WORD == 4: + return [] lst = tests[m]() random.shuffle(lst) if methname == 'PSRAD_xi' and m == 'i': diff --git a/rpython/jit/backend/x86/test/test_rx86_64_auto_encoding.py b/rpython/jit/backend/x86/test/test_rx86_64_auto_encoding.py --- a/rpython/jit/backend/x86/test/test_rx86_64_auto_encoding.py +++ b/rpython/jit/backend/x86/test/test_rx86_64_auto_encoding.py @@ -51,3 +51,19 @@ def test_extra_MOV_ri64(self): self.imm32_tests = self.imm64_tests # patch on 'self' self.complete_test('MOV_ri') + + def rip_relative_tests(self): + return [-0x80000000, 0x7FFFFFFF, 128, 256, -129, -255, 0, 127] + + def get_all_tests(self): + d = super(TestRx86_64, self).get_all_tests() + d['p'] = self.rip_relative_tests + return d + + def assembler_operand_rip_relative(self, value): + return '%d(%%rip)' % value + + def get_all_assembler_operands(self): + d = super(TestRx86_64, self).get_all_assembler_operands() + d['p'] = self.assembler_operand_rip_relative + return d From pypy.commits at gmail.com Thu Mar 31 09:44:44 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 31 Mar 2016 06:44:44 -0700 (PDT) Subject: [pypy-commit] pypy jit-constptr-2: x86-64 implementation of load_from_gc_table Message-ID: <56fd29cc.05de1c0a.e504d.1954@mx.google.com> Author: Armin Rigo Branch: jit-constptr-2 Changeset: r83462:d84828e7649c Date: 2016-03-31 14:49 +0100 http://bitbucket.org/pypy/pypy/changeset/d84828e7649c/ Log: x86-64 implementation of load_from_gc_table diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -2,7 +2,7 @@ import os import py -from rpython.jit.backend.llsupport import symbolic, jitframe, rewrite +from rpython.jit.backend.llsupport import symbolic, jitframe, rewrite, gcreftracer from rpython.jit.backend.llsupport.assembler import (GuardToken, BaseAssembler, DEBUG_COUNTER, debug_bridge) from rpython.jit.backend.llsupport.asmmemmgr import MachineDataBlockWrapper @@ -489,7 +489,6 @@ frame_info = self.datablockwrapper.malloc_aligned( jitframe.JITFRAMEINFO_SIZE, alignment=WORD) clt.frame_info = rffi.cast(jitframe.JITFRAMEINFOPTR, frame_info) - clt.allgcrefs = [] clt.frame_info.clear() # for now if log: @@ -498,10 +497,13 @@ regalloc = RegAlloc(self, self.cpu.translate_support_code) # + allgcrefs = [] + operations = regalloc.prepare_loop(inputargs, operations, + looptoken, allgcrefs) + self.reserve_gcref_table(allgcrefs) + functionpos = self.mc.get_relative_pos() self._call_header_with_stack_check() self._check_frame_depth_debug(self.mc) - operations = regalloc.prepare_loop(inputargs, operations, - looptoken, clt.allgcrefs) looppos = self.mc.get_relative_pos() frame_depth_no_fixed_size = self._assemble(regalloc, inputargs, operations) @@ -512,6 +514,7 @@ full_size = self.mc.get_relative_pos() # rawstart = self.materialize_loop(looptoken) + self.patch_gcref_table(looptoken, allgcrefs, rawstart) self.patch_stack_checks(frame_depth_no_fixed_size + JITFRAME_FIXED_SIZE, rawstart) looptoken._ll_loop_code = looppos + rawstart @@ -530,7 +533,7 @@ looptoken._x86_rawstart = rawstart looptoken._x86_fullsize = full_size looptoken._x86_ops_offset = ops_offset - looptoken._ll_function_addr = rawstart + looptoken._ll_function_addr = rawstart + functionpos if logger: logger.log_loop(inputargs, operations, 0, "rewritten", name=loopname, ops_offset=ops_offset) @@ -563,11 +566,13 @@ 'b', descr_number) arglocs = self.rebuild_faillocs_from_descr(faildescr, inputargs) regalloc = RegAlloc(self, self.cpu.translate_support_code) - startpos = self.mc.get_relative_pos() + allgcrefs = [] operations = regalloc.prepare_bridge(inputargs, arglocs, operations, - self.current_clt.allgcrefs, + allgcrefs, self.current_clt.frame_info) + self.reserve_gcref_table(allgcrefs) + startpos = self.mc.get_relative_pos() self._check_frame_depth(self.mc, regalloc.get_gcmap()) bridgestartpos = self.mc.get_relative_pos() self._update_at_exit(arglocs, inputargs, faildescr, regalloc) @@ -577,12 +582,13 @@ fullsize = self.mc.get_relative_pos() # rawstart = self.materialize_loop(original_loop_token) + self.patch_gcref_table(original_loop_token, allgcrefs, rawstart) self.patch_stack_checks(frame_depth_no_fixed_size + JITFRAME_FIXED_SIZE, rawstart) debug_bridge(descr_number, rawstart, codeendpos) self.patch_pending_failure_recoveries(rawstart) # patch the jump from original guard - self.patch_jump_for_descr(faildescr, rawstart) + self.patch_jump_for_descr(faildescr, rawstart + startpos) ops_offset = self.mc.ops_offset frame_depth = max(self.current_clt.frame_info.jfi_frame_depth, frame_depth_no_fixed_size + JITFRAME_FIXED_SIZE) @@ -654,6 +660,22 @@ # update the guard to jump right to this custom piece of assembler self.patch_jump_for_descr(faildescr, rawstart) + def reserve_gcref_table(self, allgcrefs): + assert IS_X86_64, "XXX" + gcref_table_size = len(allgcrefs) * WORD + # align to a multiple of 16 + gcref_table_size = (gcref_table_size + 15) & ~15 + mc = self.mc + assert mc.get_relative_pos() == 0 + for i in range(gcref_table_size): + mc.writechar('\x00') + + def patch_gcref_table(self, looptoken, allgcrefs, rawstart): + assert IS_X86_64, "XXX" + tracer = gcreftracer.make_gcref_tracer(rawstart, allgcrefs) + gcreftracers = self.get_asmmemmgr_gcreftracers(looptoken) + gcreftracers.append(tracer) # keepalive + def write_pending_failure_recoveries(self, regalloc): # for each pending guard, generate the code of the recovery stub # at the end of self.mc. @@ -777,6 +799,12 @@ clt.asmmemmgr_blocks = [] return clt.asmmemmgr_blocks + def get_asmmemmgr_gcreftracers(self, looptoken): + clt = looptoken.compiled_loop_token + if clt.asmmemmgr_gcreftracers is None: + clt.asmmemmgr_gcreftracers = [] + return clt.asmmemmgr_gcreftracers + def materialize_loop(self, looptoken): self.datablockwrapper.done() # finish using cpu.asmmemmgr self.datablockwrapper = None @@ -1358,6 +1386,17 @@ genop_cast_ptr_to_int = _genop_same_as genop_cast_int_to_ptr = _genop_same_as + def genop_load_from_gc_table(self, op, arglocs, resloc): + [loc] = arglocs + assert isinstance(loc, ImmedLoc) + assert isinstance(resloc, RegLoc) + address_in_buffer = loc.value * WORD # at the start of the buffer + assert IS_X86_64, "XXX" + self.mc.MOV_rp(resloc.value, 0) # %rip-relative + p_location = self.mc.get_relative_pos() + offset = address_in_buffer - p_location + self.mc.overwrite32(p_location-4, offset) + def genop_int_force_ge_zero(self, op, arglocs, resloc): self.mc.TEST(arglocs[0], arglocs[0]) self.mov(imm0, resloc) diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -1140,6 +1140,7 @@ consider_same_as_i = _consider_same_as consider_same_as_r = _consider_same_as consider_same_as_f = _consider_same_as + consider_load_from_gc_table = _consider_same_as def consider_int_force_ge_zero(self, op): argloc = self.make_sure_var_in_reg(op.getarg(0)) From pypy.commits at gmail.com Thu Mar 31 10:01:55 2016 From: pypy.commits at gmail.com (plan_rich) Date: Thu, 31 Mar 2016 07:01:55 -0700 (PDT) Subject: [pypy-commit] pypy default: _patch_jump_for_descr cannot be used to patch any jump (stitch bridge would not work if the offset was too big. thx arigato) Message-ID: <56fd2dd3.2179c20a.7af1c.189a@mx.google.com> Author: Richard Plangger Branch: Changeset: r83463:c5ec7f300d0a Date: 2016-03-31 15:58 +0200 http://bitbucket.org/pypy/pypy/changeset/c5ec7f300d0a/ Log: _patch_jump_for_descr cannot be used to patch any jump (stitch bridge would not work if the offset was too big. thx arigato) diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -645,15 +645,28 @@ pass elif gloc is not bloc: self.mov(gloc, bloc) + offset = self.mc.get_relative_pos() self.mc.JMP_l(0) + self.mc.writeimm32(0) self.mc.force_frame_size(DEFAULT_FRAME_BYTES) - offset = self.mc.get_relative_pos() - 4 rawstart = self.materialize_loop(looptoken) - # update the jump to the real trace - self._patch_jump_for_descr(rawstart + offset, asminfo.rawstart) + # update the jump (above) to the real trace + self._patch_jump_to(rawstart + offset, asminfo.rawstart) # update the guard to jump right to this custom piece of assembler self.patch_jump_for_descr(faildescr, rawstart) + def _patch_jump_to(self, adr_jump_offset, adr_new_target): + assert adr_jump_offset != 0 + offset = adr_new_target - (adr_jump_offset + 5) + mc = codebuf.MachineCodeBlockWrapper() + mc.force_frame_size(DEFAULT_FRAME_BYTES) + if rx86.fits_in_32bits(offset): + mc.JMP_l(offset) + else: + mc.MOV_ri(X86_64_SCRATCH_REG.value, adr_new_target) + mc.JMP_r(X86_64_SCRATCH_REG.value) + mc.copy_to_raw_memory(adr_jump_offset) + def write_pending_failure_recoveries(self, regalloc): # for each pending guard, generate the code of the recovery stub # at the end of self.mc. @@ -791,10 +804,6 @@ def patch_jump_for_descr(self, faildescr, adr_new_target): adr_jump_offset = faildescr.adr_jump_offset - self._patch_jump_for_descr(adr_jump_offset, adr_new_target) - faildescr.adr_jump_offset = 0 # means "patched" - - def _patch_jump_for_descr(self, adr_jump_offset, adr_new_target): assert adr_jump_offset != 0 offset = adr_new_target - (adr_jump_offset + 4) # If the new target fits within a rel32 of the jump, just patch @@ -815,6 +824,7 @@ p = rffi.cast(rffi.INTP, adr_jump_offset) adr_target = adr_jump_offset + 4 + rffi.cast(lltype.Signed, p[0]) mc.copy_to_raw_memory(adr_target) + faildescr.adr_jump_offset = 0 # means "patched" def fixup_target_tokens(self, rawstart): for targettoken in self.target_tokens_currently_compiling: From pypy.commits at gmail.com Thu Mar 31 10:22:35 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 31 Mar 2016 07:22:35 -0700 (PDT) Subject: [pypy-commit] pypy jit-constptr-2: progress for x86-64 Message-ID: <56fd32ab.85371c0a.5b846.2a21@mx.google.com> Author: Armin Rigo Branch: jit-constptr-2 Changeset: r83464:27b8d73fef68 Date: 2016-03-31 15:27 +0100 http://bitbucket.org/pypy/pypy/changeset/27b8d73fef68/ Log: progress for x86-64 diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py --- a/rpython/jit/backend/llsupport/assembler.py +++ b/rpython/jit/backend/llsupport/assembler.py @@ -23,10 +23,11 @@ class GuardToken(object): def __init__(self, cpu, gcmap, faildescr, failargs, fail_locs, - guard_opnum, frame_depth): + guard_opnum, frame_depth, faildescrindex): assert isinstance(faildescr, AbstractFailDescr) self.cpu = cpu self.faildescr = faildescr + self.faildescrindex = faildescrindex self.failargs = failargs self.fail_locs = fail_locs self.gcmap = self.compute_gcmap(gcmap, failargs, @@ -144,6 +145,21 @@ self.codemap_builder = CodemapBuilder() self._finish_gcmap = lltype.nullptr(jitframe.GCMAP) + def setup_gcrefs_list(self, allgcrefs): + self._allgcrefs = allgcrefs + self._allgcrefs_faildescr_next = 0 + + def teardown_gcrefs_list(self): + self._allgcrefs = None + + def get_gcref_from_faildescr(self, descr): + """This assumes that it is called in order for all faildescrs.""" + search = cast_instance_to_gcref(descr) + while self._allgcrefs[self._allgcrefs_faildescr_next] != search: + self._allgcrefs_faildescr_next += 1 + assert self._allgcrefs_faildescr_next < len(self._allgcrefs) + return self._allgcrefs_faildescr_next + def set_debug(self, v): r = self._debug self._debug = v @@ -186,8 +202,7 @@ break exc = guardtok.must_save_exception() target = self.failure_recovery_code[exc + 2 * withfloats] - fail_descr = cast_instance_to_gcref(guardtok.faildescr) - fail_descr = rffi.cast(lltype.Signed, fail_descr) + faildescrindex = guardtok.faildescrindex base_ofs = self.cpu.get_baseofs_of_frame_field() # # in practice, about 2/3rd of 'positions' lists that we build are @@ -229,7 +244,7 @@ self._previous_rd_locs = positions # write down the positions of locs guardtok.faildescr.rd_locs = positions - return fail_descr, target + return faildescrindex, target def enter_portal_frame(self, op): if self.cpu.HAS_CODEMAP: @@ -288,7 +303,7 @@ gcref = cast_instance_to_gcref(value) if gcref: - rgc._make_sure_does_not_move(gcref) + rgc._make_sure_does_not_move(gcref) # but should be prebuilt value = rffi.cast(lltype.Signed, gcref) je_location = self._call_assembler_check_descr(value, tmploc) # diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -514,7 +514,7 @@ full_size = self.mc.get_relative_pos() # rawstart = self.materialize_loop(looptoken) - self.patch_gcref_table(looptoken, allgcrefs, rawstart) + self.patch_gcref_table(looptoken, rawstart) self.patch_stack_checks(frame_depth_no_fixed_size + JITFRAME_FIXED_SIZE, rawstart) looptoken._ll_loop_code = looppos + rawstart @@ -582,7 +582,7 @@ fullsize = self.mc.get_relative_pos() # rawstart = self.materialize_loop(original_loop_token) - self.patch_gcref_table(original_loop_token, allgcrefs, rawstart) + self.patch_gcref_table(original_loop_token, rawstart) self.patch_stack_checks(frame_depth_no_fixed_size + JITFRAME_FIXED_SIZE, rawstart) debug_bridge(descr_number, rawstart, codeendpos) @@ -669,12 +669,14 @@ assert mc.get_relative_pos() == 0 for i in range(gcref_table_size): mc.writechar('\x00') + self.setup_gcrefs_list(allgcrefs) - def patch_gcref_table(self, looptoken, allgcrefs, rawstart): + def patch_gcref_table(self, looptoken, rawstart): assert IS_X86_64, "XXX" - tracer = gcreftracer.make_gcref_tracer(rawstart, allgcrefs) + tracer = gcreftracer.make_gcref_tracer(rawstart, self._allgcrefs) gcreftracers = self.get_asmmemmgr_gcreftracers(looptoken) gcreftracers.append(tracer) # keepalive + self.teardown_gcrefs_list() def write_pending_failure_recoveries(self, regalloc): # for each pending guard, generate the code of the recovery stub @@ -1386,16 +1388,20 @@ genop_cast_ptr_to_int = _genop_same_as genop_cast_int_to_ptr = _genop_same_as + def _patch_load_from_gc_table(self, index): + # must be called immediately after a "p"-mode instruction + # has been emitted + address_in_buffer = index * WORD # at the start of the buffer + p_location = self.mc.get_relative_pos() + offset = address_in_buffer - p_location + self.mc.overwrite32(p_location-4, offset) + def genop_load_from_gc_table(self, op, arglocs, resloc): [loc] = arglocs assert isinstance(loc, ImmedLoc) assert isinstance(resloc, RegLoc) - address_in_buffer = loc.value * WORD # at the start of the buffer - assert IS_X86_64, "XXX" self.mc.MOV_rp(resloc.value, 0) # %rip-relative - p_location = self.mc.get_relative_pos() - offset = address_in_buffer - p_location - self.mc.overwrite32(p_location-4, offset) + self._patch_load_from_gc_table(loc.value) def genop_int_force_ge_zero(self, op, arglocs, resloc): self.mc.TEST(arglocs[0], arglocs[0]) @@ -1872,8 +1878,9 @@ def implement_guard_recovery(self, guard_opnum, faildescr, failargs, fail_locs, frame_depth): gcmap = allocate_gcmap(self, frame_depth, JITFRAME_FIXED_SIZE) + faildescrindex = self.get_gcref_from_faildescr(faildescr) return GuardToken(self.cpu, gcmap, faildescr, failargs, fail_locs, - guard_opnum, frame_depth) + guard_opnum, frame_depth, faildescrindex) def generate_propagate_error_64(self): assert WORD == 8 @@ -1891,8 +1898,9 @@ self._update_at_exit(guardtok.fail_locs, guardtok.failargs, guardtok.faildescr, regalloc) # - fail_descr, target = self.store_info_on_descr(startpos, guardtok) - self.mc.PUSH(imm(fail_descr)) + faildescrindex, target = self.store_info_on_descr(startpos, guardtok) + self.mc.PUSH_p(0) # %rip-relative + self._patch_load_from_gc_table(faildescrindex) self.push_gcmap(self.mc, guardtok.gcmap, push=True) self.mc.JMP(imm(target)) return startpos @@ -1996,17 +2004,21 @@ def genop_finish(self, op, arglocs, result_loc): base_ofs = self.cpu.get_baseofs_of_frame_field() - if len(arglocs) == 2: - [return_val, fail_descr_loc] = arglocs + if len(arglocs) > 0: + [return_val] = arglocs if op.getarg(0).type == FLOAT and not IS_X86_64: size = WORD * 2 else: size = WORD self.save_into_mem(raw_stack(base_ofs), return_val, imm(size)) - else: - [fail_descr_loc] = arglocs ofs = self.cpu.get_ofs_of_frame_field('jf_descr') - self.mov(fail_descr_loc, RawEbpLoc(ofs)) + + descr = op.getdescr() + faildescrindex = self.get_gcref_from_faildescr(descr) + self.mc.MOV_rp(eax.value, 0) + self._patch_load_from_gc_table(faildescrindex) + self.mov(eax, RawEbpLoc(ofs)) + arglist = op.getarglist() if arglist and arglist[0].type == REF: if self._finish_gcmap: @@ -2076,8 +2088,12 @@ guard_op.getopnum() == rop.GUARD_NOT_FORCED_2) faildescr = guard_op.getdescr() ofs = self.cpu.get_ofs_of_frame_field('jf_force_descr') - self.mc.MOV(raw_stack(ofs), imm(rffi.cast(lltype.Signed, - cast_instance_to_gcref(faildescr)))) + + assert IS_X86_64, "XXX uses the scratch reg" + faildescrindex = self.get_gcref_from_faildescr(faildescr) + self.mc.MOV_rp(X86_64_SCRATCH_REG.value, 0) + self._patch_load_from_gc_table(faildescrindex) + self.mc.MOV(raw_stack(ofs), X86_64_SCRATCH_REG) def _find_nearby_operation(self, delta): regalloc = self._regalloc diff --git a/rpython/jit/backend/x86/regalloc.py b/rpython/jit/backend/x86/regalloc.py --- a/rpython/jit/backend/x86/regalloc.py +++ b/rpython/jit/backend/x86/regalloc.py @@ -423,16 +423,11 @@ def consider_finish(self, op): # the frame is in ebp, but we have to point where in the frame is # the potential argument to FINISH - descr = op.getdescr() - fail_descr = cast_instance_to_gcref(descr) - # we know it does not move, but well - rgc._make_sure_does_not_move(fail_descr) - fail_descr = rffi.cast(lltype.Signed, fail_descr) if op.numargs() == 1: loc = self.make_sure_var_in_reg(op.getarg(0)) - locs = [loc, imm(fail_descr)] + locs = [loc] else: - locs = [imm(fail_descr)] + locs = [] self.perform(op, locs, None) def consider_guard_no_exception(self, op): diff --git a/rpython/jit/backend/x86/rx86.py b/rpython/jit/backend/x86/rx86.py --- a/rpython/jit/backend/x86/rx86.py +++ b/rpython/jit/backend/x86/rx86.py @@ -600,6 +600,7 @@ PUS1_r = insn(rex_nw, register(1), '\x50') PUS1_b = insn(rex_nw, '\xFF', orbyte(6<<3), stack_bp(1)) PUS1_m = insn(rex_nw, '\xFF', orbyte(6<<3), mem_reg_plus_const(1)) + PUS1_p = insn(rex_nw, '\xFF', orbyte(6<<3), rip_offset(1)) PUS1_i8 = insn('\x6A', immediate(1, 'b')) PUS1_i32 = insn('\x68', immediate(1, 'i')) @@ -622,6 +623,10 @@ self.PUS1_i32(immed) self.stack_frame_size_delta(+self.WORD) + def PUSH_p(self, rip_offset): + self.PUS1_p(rip_offset) + self.stack_frame_size_delta(+self.WORD) + PO1_r = insn(rex_nw, register(1), '\x58') PO1_b = insn(rex_nw, '\x8F', orbyte(0<<3), stack_bp(1)) From pypy.commits at gmail.com Thu Mar 31 10:50:43 2016 From: pypy.commits at gmail.com (fijal) Date: Thu, 31 Mar 2016 07:50:43 -0700 (PDT) Subject: [pypy-commit] pypy default: fix the test Message-ID: <56fd3943.47afc20a.4465b.32b3@mx.google.com> Author: fijal Branch: Changeset: r83465:643df004248e Date: 2016-03-31 16:49 +0200 http://bitbucket.org/pypy/pypy/changeset/643df004248e/ Log: fix the test diff --git a/rpython/jit/metainterp/test/test_virtualref.py b/rpython/jit/metainterp/test/test_virtualref.py --- a/rpython/jit/metainterp/test/test_virtualref.py +++ b/rpython/jit/metainterp/test/test_virtualref.py @@ -578,7 +578,6 @@ n -= 1 return res # - py.test.raises(InvalidVirtualRef, "fn(10)") py.test.raises(UnknownException, "self.meta_interp(fn, [10])") def test_call_virtualref_already_forced(self): From pypy.commits at gmail.com Thu Mar 31 10:50:45 2016 From: pypy.commits at gmail.com (fijal) Date: Thu, 31 Mar 2016 07:50:45 -0700 (PDT) Subject: [pypy-commit] pypy default: merge Message-ID: <56fd3945.838d1c0a.a7662.69e9@mx.google.com> Author: fijal Branch: Changeset: r83466:786c1c380bd8 Date: 2016-03-31 16:49 +0200 http://bitbucket.org/pypy/pypy/changeset/786c1c380bd8/ Log: merge diff --git a/pypy/interpreter/error.py b/pypy/interpreter/error.py --- a/pypy/interpreter/error.py +++ b/pypy/interpreter/error.py @@ -277,19 +277,12 @@ raise NotImplementedError def get_traceback(self): - """Calling this marks the PyTraceback as escaped, i.e. it becomes - accessible and inspectable by app-level Python code. + """Get the PyTraceback object, for app-level Python code. """ return self._application_traceback def set_traceback(self, traceback): - """Set the current traceback. It should either be a traceback - pointing to some already-escaped frame, or a traceback for the - current frame. To support the latter case we do not mark the - frame as escaped. The idea is that it will be marked as escaping - only if the exception really propagates out of this frame, by - executioncontext.leave() being called with got_exception=True. - """ + """Set the current traceback.""" self._application_traceback = traceback diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py --- a/pypy/module/__pypy__/__init__.py +++ b/pypy/module/__pypy__/__init__.py @@ -90,6 +90,7 @@ 'save_module_content_for_future_reload': 'interp_magic.save_module_content_for_future_reload', 'decode_long' : 'interp_magic.decode_long', + '_promote' : 'interp_magic._promote', } if sys.platform == 'win32': interpleveldefs['get_console_cp'] = 'interp_magic.get_console_cp' diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py --- a/pypy/module/__pypy__/interp_magic.py +++ b/pypy/module/__pypy__/interp_magic.py @@ -168,3 +168,22 @@ except InvalidEndiannessError: raise oefmt(space.w_ValueError, "invalid byteorder argument") return space.newlong_from_rbigint(result) + +def _promote(space, w_obj): + """ Promote the first argument of the function and return it. Promote is by + value for ints, floats, strs, unicodes (but not subclasses thereof) and by + reference otherwise. + + This function is experimental!""" + from rpython.rlib import jit + if space.is_w(space.type(w_obj), space.w_int): + jit.promote(space.int_w(w_obj)) + elif space.is_w(space.type(w_obj), space.w_float): + jit.promote(space.float_w(w_obj)) + elif space.is_w(space.type(w_obj), space.w_str): + jit.promote(space.str_w(w_obj)) + elif space.is_w(space.type(w_obj), space.w_unicode): + jit.promote(space.unicode_w(w_obj)) + else: + jit.promote(w_obj) + return w_obj diff --git a/pypy/module/__pypy__/test/test_magic.py b/pypy/module/__pypy__/test/test_magic.py --- a/pypy/module/__pypy__/test/test_magic.py +++ b/pypy/module/__pypy__/test/test_magic.py @@ -47,3 +47,16 @@ assert decode_long('\x00\x80', 'little', False) == 32768 assert decode_long('\x00\x80', 'little', True) == -32768 raises(ValueError, decode_long, '', 'foo') + + def test_promote(self): + from __pypy__ import _promote + assert _promote(1) == 1 + assert _promote(1.1) == 1.1 + assert _promote("abc") == "abc" + assert _promote(u"abc") == u"abc" + l = [] + assert _promote(l) is l + class A(object): + pass + a = A() + assert _promote(a) is a diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -645,15 +645,28 @@ pass elif gloc is not bloc: self.mov(gloc, bloc) + offset = self.mc.get_relative_pos() self.mc.JMP_l(0) + self.mc.writeimm32(0) self.mc.force_frame_size(DEFAULT_FRAME_BYTES) - offset = self.mc.get_relative_pos() - 4 rawstart = self.materialize_loop(looptoken) - # update the jump to the real trace - self._patch_jump_for_descr(rawstart + offset, asminfo.rawstart) + # update the jump (above) to the real trace + self._patch_jump_to(rawstart + offset, asminfo.rawstart) # update the guard to jump right to this custom piece of assembler self.patch_jump_for_descr(faildescr, rawstart) + def _patch_jump_to(self, adr_jump_offset, adr_new_target): + assert adr_jump_offset != 0 + offset = adr_new_target - (adr_jump_offset + 5) + mc = codebuf.MachineCodeBlockWrapper() + mc.force_frame_size(DEFAULT_FRAME_BYTES) + if rx86.fits_in_32bits(offset): + mc.JMP_l(offset) + else: + mc.MOV_ri(X86_64_SCRATCH_REG.value, adr_new_target) + mc.JMP_r(X86_64_SCRATCH_REG.value) + mc.copy_to_raw_memory(adr_jump_offset) + def write_pending_failure_recoveries(self, regalloc): # for each pending guard, generate the code of the recovery stub # at the end of self.mc. @@ -791,10 +804,6 @@ def patch_jump_for_descr(self, faildescr, adr_new_target): adr_jump_offset = faildescr.adr_jump_offset - self._patch_jump_for_descr(adr_jump_offset, adr_new_target) - faildescr.adr_jump_offset = 0 # means "patched" - - def _patch_jump_for_descr(self, adr_jump_offset, adr_new_target): assert adr_jump_offset != 0 offset = adr_new_target - (adr_jump_offset + 4) # If the new target fits within a rel32 of the jump, just patch @@ -815,6 +824,7 @@ p = rffi.cast(rffi.INTP, adr_jump_offset) adr_target = adr_jump_offset + 4 + rffi.cast(lltype.Signed, p[0]) mc.copy_to_raw_memory(adr_target) + faildescr.adr_jump_offset = 0 # means "patched" def fixup_target_tokens(self, rawstart): for targettoken in self.target_tokens_currently_compiling: diff --git a/rpython/jit/backend/zarch/pool.py b/rpython/jit/backend/zarch/pool.py --- a/rpython/jit/backend/zarch/pool.py +++ b/rpython/jit/backend/zarch/pool.py @@ -33,7 +33,7 @@ def ensure_can_hold_constants(self, asm, op): # allocates 8 bytes in memory for pointers, long integers or floats - if op.is_jit_debug(): + if rop.is_jit_debug(op): return for arg in op.getarglist(): From pypy.commits at gmail.com Thu Mar 31 10:50:56 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 31 Mar 2016 07:50:56 -0700 (PDT) Subject: [pypy-commit] pypy jit-constptr-2: fixes fixes Message-ID: <56fd3950.e853c20a.600f.3433@mx.google.com> Author: Armin Rigo Branch: jit-constptr-2 Changeset: r83467:a6f415a30786 Date: 2016-03-31 15:56 +0100 http://bitbucket.org/pypy/pypy/changeset/a6f415a30786/ Log: fixes fixes diff --git a/rpython/jit/backend/llsupport/assembler.py b/rpython/jit/backend/llsupport/assembler.py --- a/rpython/jit/backend/llsupport/assembler.py +++ b/rpython/jit/backend/llsupport/assembler.py @@ -155,7 +155,8 @@ def get_gcref_from_faildescr(self, descr): """This assumes that it is called in order for all faildescrs.""" search = cast_instance_to_gcref(descr) - while self._allgcrefs[self._allgcrefs_faildescr_next] != search: + while not _safe_eq( + self._allgcrefs[self._allgcrefs_faildescr_next], search): self._allgcrefs_faildescr_next += 1 assert self._allgcrefs_faildescr_next < len(self._allgcrefs) return self._allgcrefs_faildescr_next @@ -466,3 +467,8 @@ r_uint(rawstart + codeendpos))) debug_stop("jit-backend-addr") +def _safe_eq(x, y): + try: + return x == y + except AttributeError: # minor mess + return False diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py +++ b/rpython/jit/backend/llsupport/rewrite.py @@ -1,11 +1,12 @@ from rpython.rlib import rgc -from rpython.rlib.objectmodel import we_are_translated +from rpython.rlib.objectmodel import we_are_translated, r_dict from rpython.rlib.rarithmetic import ovfcheck, highest_bit from rpython.rtyper.lltypesystem import llmemory, lltype, rstr from rpython.rtyper.annlowlevel import cast_instance_to_gcref from rpython.jit.metainterp import history from rpython.jit.metainterp.history import ConstInt, ConstPtr from rpython.jit.metainterp.resoperation import ResOperation, rop, OpHelpers +from rpython.jit.metainterp.typesystem import rd_eq, rd_hash from rpython.jit.codewriter import heaptracker from rpython.jit.backend.llsupport.symbolic import (WORD, get_array_token) @@ -96,10 +97,11 @@ orig_op = op replaced = False opnum = op.getopnum() + keep = (opnum == rop.JIT_DEBUG) for i in range(op.numargs()): orig_arg = op.getarg(i) arg = self.get_box_replacement(orig_arg) - if isinstance(arg, ConstPtr) and bool(arg.value): + if isinstance(arg, ConstPtr) and bool(arg.value) and not keep: arg = self.remove_constptr(arg) if orig_arg is not arg: if not replaced: @@ -319,7 +321,7 @@ # this case means between CALLs or unknown-size mallocs. # self.gcrefs_output_list = gcrefs_output_list - self.gcrefs_map = {} + self.gcrefs_map = r_dict(rd_eq, rd_hash) # rdict {gcref: index} self.gcrefs_recently_loaded = {} operations = self.remove_bridge_exception(operations) self._changed_op = None @@ -956,20 +958,12 @@ self.gcrefs_recently_loaded.clear() def _gcref_index(self, gcref): - if we_are_translated(): - # can only use the dictionary after translation - try: - return self.gcrefs_map[gcref] - except KeyError: - pass - index = len(self.gcrefs_output_list) - self.gcrefs_map[gcref] = index - else: - # untranslated: linear scan - for i, gcref1 in enumerate(self.gcrefs_output_list): - if gcref == gcref1: - return i - index = len(self.gcrefs_output_list) + try: + return self.gcrefs_map[gcref] + except KeyError: + pass + index = len(self.gcrefs_output_list) + self.gcrefs_map[gcref] = index self.gcrefs_output_list.append(gcref) return index From pypy.commits at gmail.com Thu Mar 31 11:24:09 2016 From: pypy.commits at gmail.com (cfbolz) Date: Thu, 31 Mar 2016 08:24:09 -0700 (PDT) Subject: [pypy-commit] pypy default: interp_pdb has been renamed Message-ID: <56fd4119.d3921c0a.5ef6b.483e@mx.google.com> Author: Carl Friedrich Bolz Branch: Changeset: r83468:eb3838995550 Date: 2016-03-31 17:20 +0200 http://bitbucket.org/pypy/pypy/changeset/eb3838995550/ Log: interp_pdb has been renamed diff --git a/pypy/doc/__pypy__-module.rst b/pypy/doc/__pypy__-module.rst --- a/pypy/doc/__pypy__-module.rst +++ b/pypy/doc/__pypy__-module.rst @@ -18,6 +18,7 @@ - ``bytebuffer(length)``: return a new read-write buffer of the given length. It works like a simplified array of characters (actually, depending on the configuration the ``array`` module internally uses this). + - ``attach_gdb()``: start a GDB at the interpreter-level (or a PDB before translation). Transparent Proxy Functionality @@ -37,4 +38,3 @@ -------------------------------------------------------- - ``isfake(obj)``: returns True if ``obj`` is faked. - - ``interp_pdb()``: start a pdb at interpreter-level. From pypy.commits at gmail.com Thu Mar 31 13:13:29 2016 From: pypy.commits at gmail.com (rlamy) Date: Thu, 31 Mar 2016 10:13:29 -0700 (PDT) Subject: [pypy-commit] pypy rposix-for-3: Avoid imperative py.test.skip() whenever possible Message-ID: <56fd5ab9.4412c30a.d6cca.6b6c@mx.google.com> Author: Ronan Lamy Branch: rposix-for-3 Changeset: r83469:c3eceb0fc5db Date: 2016-03-31 17:13 +0100 http://bitbucket.org/pypy/pypy/changeset/c3eceb0fc5db/ Log: Avoid imperative py.test.skip() whenever possible diff --git a/rpython/rlib/test/test_rposix.py b/rpython/rlib/test/test_rposix.py --- a/rpython/rlib/test/test_rposix.py +++ b/rpython/rlib/test/test_rposix.py @@ -11,6 +11,8 @@ return py.test.mark.skipif(not hasattr(rposix, funcname), reason="Requires rposix.%s()" % funcname) +win_only = py.test.mark.skipif("os.name != 'nt'") + class TestPosixFunction: def test_access(self): filename = str(udir.join('test_access.txt')) @@ -33,9 +35,8 @@ for value in times: assert isinstance(value, float) + @py.test.mark.skipif("not hasattr(os, 'getlogin')") def test_getlogin(self): - if not hasattr(os, 'getlogin'): - py.test.skip('posix specific function') try: expected = os.getlogin() except OSError, e: @@ -43,9 +44,8 @@ data = rposix.getlogin() assert data == expected + @win_only def test_utimes(self): - if os.name != 'nt': - py.test.skip('Windows specific feature') # Windows support centiseconds def f(fname, t1): os.utime(fname, (t1, t1)) @@ -55,15 +55,12 @@ t1 = 1159195039.25 compile(f, (str, float))(str(fname), t1) assert t1 == os.stat(str(fname)).st_mtime - if sys.version_info < (2, 7): - py.test.skip('requires Python 2.7') t1 = 5000000000.0 compile(f, (str, float))(str(fname), t1) assert t1 == os.stat(str(fname)).st_mtime + @win_only def test__getfullpathname(self): - if os.name != 'nt': - py.test.skip('nt specific function') posix = __import__(os.name) sysdrv = os.getenv('SystemDrive', 'C:') stuff = sysdrv + 'stuff' @@ -134,10 +131,8 @@ os.unlink(filename) + @py.test.mark.skipif("os.name != 'posix'") def test_execve(self): - if os.name != 'posix': - py.test.skip('posix specific function') - EXECVE_ENV = {"foo": "bar", "baz": "quux"} def run_execve(program, args=None, env=None, do_path_lookup=False): @@ -276,11 +271,8 @@ assert rposix.isatty(-1) is False + at py.test.mark.skipif("not hasattr(os, 'ttyname')") class TestOsExpect(ExpectTest): - def setup_class(cls): - if not hasattr(os, 'ttyname'): - py.test.skip("no ttyname") - def test_ttyname(self): def f(): import os @@ -444,9 +436,8 @@ except Exception: pass + @win_only def test_is_valid_fd(self): - if os.name != 'nt': - py.test.skip('relevant for windows only') assert rposix.is_valid_fd(0) == 1 fid = open(str(udir.join('validate_test.txt')), 'w') fd = fid.fileno() From pypy.commits at gmail.com Thu Mar 31 13:13:31 2016 From: pypy.commits at gmail.com (rlamy) Date: Thu, 31 Mar 2016 10:13:31 -0700 (PDT) Subject: [pypy-commit] pypy rposix-for-3: Implement rposix.fexecve() Message-ID: <56fd5abb.06b01c0a.d4956.ffffa724@mx.google.com> Author: Ronan Lamy Branch: rposix-for-3 Changeset: r83470:e4260eb4e93c Date: 2016-03-31 17:20 +0100 http://bitbucket.org/pypy/pypy/changeset/e4260eb4e93c/ Log: Implement rposix.fexecve() diff --git a/rpython/rlib/rposix.py b/rpython/rlib/rposix.py --- a/rpython/rlib/rposix.py +++ b/rpython/rlib/rposix.py @@ -1801,6 +1801,26 @@ error = c_fchownat(dir_fd, path, owner, group, flag) handle_posix_error('fchownat', error) +if HAVE_FEXECVE: + c_fexecve = external('fexecve', + [rffi.INT, rffi.CCHARPP, rffi.CCHARPP], rffi.INT, + save_err=rffi.RFFI_SAVE_ERRNO) + + def fexecve(fd, args, env): + envstrs = [] + for item in env.iteritems(): + envstr = "%s=%s" % item + envstrs.append(envstr) + + # This list conversion already takes care of NUL bytes. + l_args = rffi.ll_liststr2charpp(args) + l_env = rffi.ll_liststr2charpp(envstrs) + c_fexecve(fd, l_args, l_env) + + rffi.free_charpp(l_env) + rffi.free_charpp(l_args) + raise OSError(get_saved_errno(), "execve failed") + if HAVE_LINKAT: c_linkat = external('linkat', [rffi.INT, rffi.CCHARP, rffi.INT, rffi.CCHARP, rffi.INT], rffi.INT) From pypy.commits at gmail.com Thu Mar 31 13:13:33 2016 From: pypy.commits at gmail.com (rlamy) Date: Thu, 31 Mar 2016 10:13:33 -0700 (PDT) Subject: [pypy-commit] pypy rposix-for-3: Add rposix.fdlistdir(): variant of rposix.listdir() using fdopendir() Message-ID: <56fd5abd.02931c0a.d9f45.ffff9c8c@mx.google.com> Author: Ronan Lamy Branch: rposix-for-3 Changeset: r83471:42e156eb8e2c Date: 2016-03-31 18:12 +0100 http://bitbucket.org/pypy/pypy/changeset/42e156eb8e2c/ Log: Add rposix.fdlistdir(): variant of rposix.listdir() using fdopendir() diff --git a/rpython/rlib/rposix.py b/rpython/rlib/rposix.py --- a/rpython/rlib/rposix.py +++ b/rpython/rlib/rposix.py @@ -603,14 +603,44 @@ config = rffi_platform.configure(CConfig) DIRENT = config['DIRENT'] DIRENTP = lltype.Ptr(DIRENT) - c_opendir = external('opendir', [rffi.CCHARP], DIRP, - save_err=rffi.RFFI_SAVE_ERRNO) + c_opendir = external('opendir', + [rffi.CCHARP], DIRP, save_err=rffi.RFFI_SAVE_ERRNO) + c_fdopendir = external('fdopendir', + [rffi.INT], DIRP, save_err=rffi.RFFI_SAVE_ERRNO) # XXX macro=True is hack to make sure we get the correct kind of # dirent struct (which depends on defines) c_readdir = external('readdir', [DIRP], DIRENTP, macro=True, save_err=rffi.RFFI_FULL_ERRNO_ZERO) c_closedir = external('closedir', [DIRP], rffi.INT) +def _listdir(dirp): + result = [] + while True: + direntp = c_readdir(dirp) + if not direntp: + error = get_saved_errno() + break + namep = rffi.cast(rffi.CCHARP, direntp.c_d_name) + name = rffi.charp2str(namep) + if name != '.' and name != '..': + result.append(name) + c_closedir(dirp) + if error: + raise OSError(error, "readdir failed") + return result + +def fdlistdir(dirfd): + """ + Like listdir(), except that the directory is specified as an open + file descriptor. + + Note: fdlistdir() closes the file descriptor. + """ + dirp = c_fdopendir(dirfd) + if not dirp: + raise OSError(get_saved_errno(), "opendir failed") + return _listdir(dirp) + @replace_os_function('listdir') @specialize.argtype(0) def listdir(path): @@ -619,20 +649,7 @@ dirp = c_opendir(path) if not dirp: raise OSError(get_saved_errno(), "opendir failed") - result = [] - while True: - direntp = c_readdir(dirp) - if not direntp: - error = get_saved_errno() - break - namep = rffi.cast(rffi.CCHARP, direntp.c_d_name) - name = rffi.charp2str(namep) - if name != '.' and name != '..': - result.append(name) - c_closedir(dirp) - if error: - raise OSError(error, "readdir failed") - return result + return _listdir(dirp) else: # _WIN32 case traits = _preferred_traits(path) win32traits = make_win32_traits(traits) diff --git a/rpython/rlib/test/test_rposix.py b/rpython/rlib/test/test_rposix.py --- a/rpython/rlib/test/test_rposix.py +++ b/rpython/rlib/test/test_rposix.py @@ -528,6 +528,14 @@ os.open(u'/tmp/t', 0, 0) compile(f, ()) + +def test_fdlistdir(tmpdir): + tmpdir.join('file').write('text') + dirfd = os.open(str(tmpdir), os.O_RDONLY) + result = rposix.fdlistdir(dirfd) + # Note: fdlistdir() always closes dirfd + assert result == ['file'] + def test_symlinkat(tmpdir): tmpdir.join('file').write('text') dirfd = os.open(str(tmpdir), os.O_RDONLY) @@ -537,7 +545,6 @@ finally: os.close(dirfd) - def test_renameat(tmpdir): tmpdir.join('file').write('text') dirfd = os.open(str(tmpdir), os.O_RDONLY) From pypy.commits at gmail.com Thu Mar 31 18:09:51 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 31 Mar 2016 15:09:51 -0700 (PDT) Subject: [pypy-commit] pypy jit-constptr-2: more fixes Message-ID: <56fda02f.cc811c0a.d468f.ffffdca0@mx.google.com> Author: Armin Rigo Branch: jit-constptr-2 Changeset: r83472:db567242f29f Date: 2016-04-01 00:01 +0200 http://bitbucket.org/pypy/pypy/changeset/db567242f29f/ Log: more fixes diff --git a/rpython/jit/backend/llsupport/gcreftracer.py b/rpython/jit/backend/llsupport/gcreftracer.py --- a/rpython/jit/backend/llsupport/gcreftracer.py +++ b/rpython/jit/backend/llsupport/gcreftracer.py @@ -1,4 +1,6 @@ +from rpython.rlib import rgc from rpython.rtyper.lltypesystem import lltype, llmemory, rffi +from rpython.rtyper.lltypesystem.lloperation import llop from rpython.jit.backend.llsupport.symbolic import WORD @@ -14,7 +16,8 @@ length = obj.array_length addr = obj.array_base_addr while i < length: - gc._trace_callback(callback, arg, addr + i * WORD) + p = rffi.cast(llmemory.Address, addr + i * WORD) + gc._trace_callback(callback, arg, p) i += 1 lambda_gcrefs_trace = lambda: gcrefs_trace @@ -22,16 +25,17 @@ # careful about the order here: the allocation of the GCREFTRACER # can trigger a GC. So we must write the gcrefs into the raw # array only afterwards... + rgc.register_custom_trace_hook(GCREFTRACER, lambda_gcrefs_trace) + length = len(gcrefs) tr = lltype.malloc(GCREFTRACER) + # --no GC from here-- tr.array_base_addr = array_base_addr - tr.array_length = 0 # incremented as we populate the array_base_addr + tr.array_length = length i = 0 - length = len(gcrefs) while i < length: p = rffi.cast(rffi.SIGNEDP, array_base_addr + i * WORD) - # --no GC from here-- p[0] = rffi.cast(lltype.Signed, gcrefs[i]) - tr.array_length += 1 - # --no GC until here-- i += 1 + llop.gc_writebarrier(lltype.Void, tr) + # --no GC until here-- return tr diff --git a/rpython/jit/backend/llsupport/test/test_gcreftracer.py b/rpython/jit/backend/llsupport/test/test_gcreftracer.py --- a/rpython/jit/backend/llsupport/test/test_gcreftracer.py +++ b/rpython/jit/backend/llsupport/test/test_gcreftracer.py @@ -9,6 +9,7 @@ def _trace_callback(self, callback, arg, addr): assert callback == "callback" assert arg == "arg" + assert lltype.typeOf(addr) == llmemory.Address self.called.append(addr) diff --git a/rpython/jit/backend/x86/assembler.py b/rpython/jit/backend/x86/assembler.py --- a/rpython/jit/backend/x86/assembler.py +++ b/rpython/jit/backend/x86/assembler.py @@ -524,6 +524,12 @@ r_uint(rawstart + looppos), r_uint(rawstart + size_excluding_failure_stuff), r_uint(rawstart))) + debug_print(" gc table: 0x%x" % r_uint(rawstart)) + debug_print(" function: 0x%x" % r_uint(rawstart + functionpos)) + debug_print(" loop: 0x%x" % r_uint(rawstart + looppos)) + debug_print(" failures: 0x%x" % r_uint(rawstart + + size_excluding_failure_stuff)) + debug_print(" end: 0x%x" % r_uint(rawstart + full_size)) debug_stop("jit-backend-addr") self.patch_pending_failure_recoveries(rawstart) # From pypy.commits at gmail.com Thu Mar 31 18:57:02 2016 From: pypy.commits at gmail.com (arigo) Date: Thu, 31 Mar 2016 15:57:02 -0700 (PDT) Subject: [pypy-commit] pypy jit-constptr-2: translation fix for some tests Message-ID: <56fdab3e.e213c20a.53491.ffffd5d3@mx.google.com> Author: Armin Rigo Branch: jit-constptr-2 Changeset: r83473:647c93899602 Date: 2016-04-01 00:56 +0200 http://bitbucket.org/pypy/pypy/changeset/647c93899602/ Log: translation fix for some tests diff --git a/rpython/jit/backend/llsupport/rewrite.py b/rpython/jit/backend/llsupport/rewrite.py --- a/rpython/jit/backend/llsupport/rewrite.py +++ b/rpython/jit/backend/llsupport/rewrite.py @@ -321,8 +321,8 @@ # this case means between CALLs or unknown-size mallocs. # self.gcrefs_output_list = gcrefs_output_list - self.gcrefs_map = r_dict(rd_eq, rd_hash) # rdict {gcref: index} - self.gcrefs_recently_loaded = {} + self.gcrefs_map = None + self.gcrefs_recently_loaded = None operations = self.remove_bridge_exception(operations) self._changed_op = None for i in range(len(operations)): @@ -955,9 +955,11 @@ def emit_label(self): self.emitting_an_operation_that_can_collect() self._known_lengths.clear() - self.gcrefs_recently_loaded.clear() + self.gcrefs_recently_loaded = None def _gcref_index(self, gcref): + if self.gcrefs_map is None: + self.gcrefs_map = r_dict(rd_eq, rd_hash) try: return self.gcrefs_map[gcref] except KeyError: @@ -974,6 +976,8 @@ # LABELs. We'd like something better, like "don't spill it", # but that's the wrong level... index = self._gcref_index(c.value) + if self.gcrefs_recently_loaded is None: + self.gcrefs_recently_loaded = {} try: load_op = self.gcrefs_recently_loaded[index] except KeyError: